1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/MemoryLocation.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/CodeGen/ValueTypes.h"
30 #include "llvm/IR/DiagnosticInfo.h"
31 #include "llvm/IR/DiagnosticPrinter.h"
32 #include "llvm/IR/IRBuilder.h"
33 #include "llvm/IR/IntrinsicsRISCV.h"
34 #include "llvm/IR/PatternMatch.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/KnownBits.h"
38 #include "llvm/Support/MathExtras.h"
39 #include "llvm/Support/raw_ostream.h"
40 
41 using namespace llvm;
42 
43 #define DEBUG_TYPE "riscv-lower"
44 
45 STATISTIC(NumTailCalls, "Number of tail calls");
46 
47 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
48                                          const RISCVSubtarget &STI)
49     : TargetLowering(TM), Subtarget(STI) {
50 
51   if (Subtarget.isRV32E())
52     report_fatal_error("Codegen not yet implemented for RV32E");
53 
54   RISCVABI::ABI ABI = Subtarget.getTargetABI();
55   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
56 
57   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
58       !Subtarget.hasStdExtF()) {
59     errs() << "Hard-float 'f' ABI can't be used for a target that "
60                 "doesn't support the F instruction set extension (ignoring "
61                           "target-abi)\n";
62     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
63   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
64              !Subtarget.hasStdExtD()) {
65     errs() << "Hard-float 'd' ABI can't be used for a target that "
66               "doesn't support the D instruction set extension (ignoring "
67               "target-abi)\n";
68     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
69   }
70 
71   switch (ABI) {
72   default:
73     report_fatal_error("Don't know how to lower this ABI");
74   case RISCVABI::ABI_ILP32:
75   case RISCVABI::ABI_ILP32F:
76   case RISCVABI::ABI_ILP32D:
77   case RISCVABI::ABI_LP64:
78   case RISCVABI::ABI_LP64F:
79   case RISCVABI::ABI_LP64D:
80     break;
81   }
82 
83   MVT XLenVT = Subtarget.getXLenVT();
84 
85   // Set up the register classes.
86   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
87 
88   if (Subtarget.hasStdExtZfh())
89     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
90   if (Subtarget.hasStdExtF())
91     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
92   if (Subtarget.hasStdExtD())
93     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
94 
95   static const MVT::SimpleValueType BoolVecVTs[] = {
96       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
97       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
98   static const MVT::SimpleValueType IntVecVTs[] = {
99       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
100       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
101       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
102       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
103       MVT::nxv4i64, MVT::nxv8i64};
104   static const MVT::SimpleValueType F16VecVTs[] = {
105       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
106       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
107   static const MVT::SimpleValueType F32VecVTs[] = {
108       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
109   static const MVT::SimpleValueType F64VecVTs[] = {
110       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
111 
112   if (Subtarget.hasVInstructions()) {
113     auto addRegClassForRVV = [this](MVT VT) {
114       unsigned Size = VT.getSizeInBits().getKnownMinValue();
115       assert(Size <= 512 && isPowerOf2_32(Size));
116       const TargetRegisterClass *RC;
117       if (Size <= 64)
118         RC = &RISCV::VRRegClass;
119       else if (Size == 128)
120         RC = &RISCV::VRM2RegClass;
121       else if (Size == 256)
122         RC = &RISCV::VRM4RegClass;
123       else
124         RC = &RISCV::VRM8RegClass;
125 
126       addRegisterClass(VT, RC);
127     };
128 
129     for (MVT VT : BoolVecVTs)
130       addRegClassForRVV(VT);
131     for (MVT VT : IntVecVTs) {
132       if (VT.getVectorElementType() == MVT::i64 &&
133           !Subtarget.hasVInstructionsI64())
134         continue;
135       addRegClassForRVV(VT);
136     }
137 
138     if (Subtarget.hasVInstructionsF16())
139       for (MVT VT : F16VecVTs)
140         addRegClassForRVV(VT);
141 
142     if (Subtarget.hasVInstructionsF32())
143       for (MVT VT : F32VecVTs)
144         addRegClassForRVV(VT);
145 
146     if (Subtarget.hasVInstructionsF64())
147       for (MVT VT : F64VecVTs)
148         addRegClassForRVV(VT);
149 
150     if (Subtarget.useRVVForFixedLengthVectors()) {
151       auto addRegClassForFixedVectors = [this](MVT VT) {
152         MVT ContainerVT = getContainerForFixedLengthVector(VT);
153         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
154         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
155         addRegisterClass(VT, TRI.getRegClass(RCID));
156       };
157       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
158         if (useRVVForFixedLengthVectorVT(VT))
159           addRegClassForFixedVectors(VT);
160 
161       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
162         if (useRVVForFixedLengthVectorVT(VT))
163           addRegClassForFixedVectors(VT);
164     }
165   }
166 
167   // Compute derived properties from the register classes.
168   computeRegisterProperties(STI.getRegisterInfo());
169 
170   setStackPointerRegisterToSaveRestore(RISCV::X2);
171 
172   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
173     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
174 
175   // TODO: add all necessary setOperationAction calls.
176   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
177 
178   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
179   setOperationAction(ISD::BR_CC, XLenVT, Expand);
180   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
181   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
182 
183   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
184   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
185 
186   setOperationAction(ISD::VASTART, MVT::Other, Custom);
187   setOperationAction(ISD::VAARG, MVT::Other, Expand);
188   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
189   setOperationAction(ISD::VAEND, MVT::Other, Expand);
190 
191   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
192   if (!Subtarget.hasStdExtZbb()) {
193     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
194     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
195   }
196 
197   if (Subtarget.is64Bit()) {
198     setOperationAction(ISD::ADD, MVT::i32, Custom);
199     setOperationAction(ISD::SUB, MVT::i32, Custom);
200     setOperationAction(ISD::SHL, MVT::i32, Custom);
201     setOperationAction(ISD::SRA, MVT::i32, Custom);
202     setOperationAction(ISD::SRL, MVT::i32, Custom);
203 
204     setOperationAction(ISD::UADDO, MVT::i32, Custom);
205     setOperationAction(ISD::USUBO, MVT::i32, Custom);
206     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
207     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
208   } else {
209     setLibcallName(RTLIB::SHL_I128, nullptr);
210     setLibcallName(RTLIB::SRL_I128, nullptr);
211     setLibcallName(RTLIB::SRA_I128, nullptr);
212     setLibcallName(RTLIB::MUL_I128, nullptr);
213     setLibcallName(RTLIB::MULO_I64, nullptr);
214   }
215 
216   if (!Subtarget.hasStdExtM()) {
217     setOperationAction(ISD::MUL, XLenVT, Expand);
218     setOperationAction(ISD::MULHS, XLenVT, Expand);
219     setOperationAction(ISD::MULHU, XLenVT, Expand);
220     setOperationAction(ISD::SDIV, XLenVT, Expand);
221     setOperationAction(ISD::UDIV, XLenVT, Expand);
222     setOperationAction(ISD::SREM, XLenVT, Expand);
223     setOperationAction(ISD::UREM, XLenVT, Expand);
224   } else {
225     if (Subtarget.is64Bit()) {
226       setOperationAction(ISD::MUL, MVT::i32, Custom);
227       setOperationAction(ISD::MUL, MVT::i128, Custom);
228 
229       setOperationAction(ISD::SDIV, MVT::i8, Custom);
230       setOperationAction(ISD::UDIV, MVT::i8, Custom);
231       setOperationAction(ISD::UREM, MVT::i8, Custom);
232       setOperationAction(ISD::SDIV, MVT::i16, Custom);
233       setOperationAction(ISD::UDIV, MVT::i16, Custom);
234       setOperationAction(ISD::UREM, MVT::i16, Custom);
235       setOperationAction(ISD::SDIV, MVT::i32, Custom);
236       setOperationAction(ISD::UDIV, MVT::i32, Custom);
237       setOperationAction(ISD::UREM, MVT::i32, Custom);
238     } else {
239       setOperationAction(ISD::MUL, MVT::i64, Custom);
240     }
241   }
242 
243   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
244   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
245   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
246   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
247 
248   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
249   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
250   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
251 
252   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
253     if (Subtarget.is64Bit()) {
254       setOperationAction(ISD::ROTL, MVT::i32, Custom);
255       setOperationAction(ISD::ROTR, MVT::i32, Custom);
256     }
257   } else {
258     setOperationAction(ISD::ROTL, XLenVT, Expand);
259     setOperationAction(ISD::ROTR, XLenVT, Expand);
260   }
261 
262   if (Subtarget.hasStdExtZbp()) {
263     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
264     // more combining.
265     setOperationAction(ISD::BITREVERSE, XLenVT,   Custom);
266     setOperationAction(ISD::BSWAP,      XLenVT,   Custom);
267     setOperationAction(ISD::BITREVERSE, MVT::i8,  Custom);
268     // BSWAP i8 doesn't exist.
269     setOperationAction(ISD::BITREVERSE, MVT::i16, Custom);
270     setOperationAction(ISD::BSWAP,      MVT::i16, Custom);
271 
272     if (Subtarget.is64Bit()) {
273       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
274       setOperationAction(ISD::BSWAP,      MVT::i32, Custom);
275     }
276   } else {
277     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
278     // pattern match it directly in isel.
279     setOperationAction(ISD::BSWAP, XLenVT,
280                        Subtarget.hasStdExtZbb() ? Legal : Expand);
281   }
282 
283   if (Subtarget.hasStdExtZbb()) {
284     setOperationAction(ISD::SMIN, XLenVT, Legal);
285     setOperationAction(ISD::SMAX, XLenVT, Legal);
286     setOperationAction(ISD::UMIN, XLenVT, Legal);
287     setOperationAction(ISD::UMAX, XLenVT, Legal);
288 
289     if (Subtarget.is64Bit()) {
290       setOperationAction(ISD::CTTZ, MVT::i32, Custom);
291       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
292       setOperationAction(ISD::CTLZ, MVT::i32, Custom);
293       setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
294     }
295   } else {
296     setOperationAction(ISD::CTTZ, XLenVT, Expand);
297     setOperationAction(ISD::CTLZ, XLenVT, Expand);
298     setOperationAction(ISD::CTPOP, XLenVT, Expand);
299   }
300 
301   if (Subtarget.hasStdExtZbt()) {
302     setOperationAction(ISD::FSHL, XLenVT, Custom);
303     setOperationAction(ISD::FSHR, XLenVT, Custom);
304     setOperationAction(ISD::SELECT, XLenVT, Legal);
305 
306     if (Subtarget.is64Bit()) {
307       setOperationAction(ISD::FSHL, MVT::i32, Custom);
308       setOperationAction(ISD::FSHR, MVT::i32, Custom);
309     }
310   } else {
311     setOperationAction(ISD::SELECT, XLenVT, Custom);
312   }
313 
314   static const ISD::CondCode FPCCToExpand[] = {
315       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
316       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
317       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
318 
319   static const ISD::NodeType FPOpToExpand[] = {
320       ISD::FSIN, ISD::FCOS,       ISD::FSINCOS,   ISD::FPOW,
321       ISD::FREM, ISD::FP16_TO_FP, ISD::FP_TO_FP16};
322 
323   if (Subtarget.hasStdExtZfh())
324     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
325 
326   if (Subtarget.hasStdExtZfh()) {
327     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
328     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
329     setOperationAction(ISD::LRINT, MVT::f16, Legal);
330     setOperationAction(ISD::LLRINT, MVT::f16, Legal);
331     setOperationAction(ISD::LROUND, MVT::f16, Legal);
332     setOperationAction(ISD::LLROUND, MVT::f16, Legal);
333     for (auto CC : FPCCToExpand)
334       setCondCodeAction(CC, MVT::f16, Expand);
335     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
336     setOperationAction(ISD::SELECT, MVT::f16, Custom);
337     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
338 
339     setOperationAction(ISD::FREM,       MVT::f16, Promote);
340     setOperationAction(ISD::FCEIL,      MVT::f16, Promote);
341     setOperationAction(ISD::FFLOOR,     MVT::f16, Promote);
342     setOperationAction(ISD::FNEARBYINT, MVT::f16, Promote);
343     setOperationAction(ISD::FRINT,      MVT::f16, Promote);
344     setOperationAction(ISD::FROUND,     MVT::f16, Promote);
345     setOperationAction(ISD::FROUNDEVEN, MVT::f16, Promote);
346     setOperationAction(ISD::FTRUNC,     MVT::f16, Promote);
347     setOperationAction(ISD::FPOW,       MVT::f16, Promote);
348     setOperationAction(ISD::FPOWI,      MVT::f16, Promote);
349     setOperationAction(ISD::FCOS,       MVT::f16, Promote);
350     setOperationAction(ISD::FSIN,       MVT::f16, Promote);
351     setOperationAction(ISD::FSINCOS,    MVT::f16, Promote);
352     setOperationAction(ISD::FEXP,       MVT::f16, Promote);
353     setOperationAction(ISD::FEXP2,      MVT::f16, Promote);
354     setOperationAction(ISD::FLOG,       MVT::f16, Promote);
355     setOperationAction(ISD::FLOG2,      MVT::f16, Promote);
356     setOperationAction(ISD::FLOG10,     MVT::f16, Promote);
357 
358     // We need to custom promote this.
359     if (Subtarget.is64Bit())
360       setOperationAction(ISD::FPOWI, MVT::i32, Custom);
361   }
362 
363   if (Subtarget.hasStdExtF()) {
364     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
365     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
366     setOperationAction(ISD::LRINT, MVT::f32, Legal);
367     setOperationAction(ISD::LLRINT, MVT::f32, Legal);
368     setOperationAction(ISD::LROUND, MVT::f32, Legal);
369     setOperationAction(ISD::LLROUND, MVT::f32, Legal);
370     for (auto CC : FPCCToExpand)
371       setCondCodeAction(CC, MVT::f32, Expand);
372     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
373     setOperationAction(ISD::SELECT, MVT::f32, Custom);
374     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
375     for (auto Op : FPOpToExpand)
376       setOperationAction(Op, MVT::f32, Expand);
377     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
378     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
379   }
380 
381   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
382     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
383 
384   if (Subtarget.hasStdExtD()) {
385     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
386     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
387     setOperationAction(ISD::LRINT, MVT::f64, Legal);
388     setOperationAction(ISD::LLRINT, MVT::f64, Legal);
389     setOperationAction(ISD::LROUND, MVT::f64, Legal);
390     setOperationAction(ISD::LLROUND, MVT::f64, Legal);
391     for (auto CC : FPCCToExpand)
392       setCondCodeAction(CC, MVT::f64, Expand);
393     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
394     setOperationAction(ISD::SELECT, MVT::f64, Custom);
395     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
396     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
397     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
398     for (auto Op : FPOpToExpand)
399       setOperationAction(Op, MVT::f64, Expand);
400     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
401     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
402   }
403 
404   if (Subtarget.is64Bit()) {
405     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
406     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
407     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
408     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
409   }
410 
411   if (Subtarget.hasStdExtF()) {
412     setOperationAction(ISD::FP_TO_UINT_SAT, XLenVT, Custom);
413     setOperationAction(ISD::FP_TO_SINT_SAT, XLenVT, Custom);
414 
415     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
416     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
417   }
418 
419   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
420   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
421   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
422   setOperationAction(ISD::JumpTable, XLenVT, Custom);
423 
424   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
425 
426   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
427   // Unfortunately this can't be determined just from the ISA naming string.
428   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
429                      Subtarget.is64Bit() ? Legal : Custom);
430 
431   setOperationAction(ISD::TRAP, MVT::Other, Legal);
432   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
433   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
434   if (Subtarget.is64Bit())
435     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
436 
437   if (Subtarget.hasStdExtA()) {
438     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
439     setMinCmpXchgSizeInBits(32);
440   } else {
441     setMaxAtomicSizeInBitsSupported(0);
442   }
443 
444   setBooleanContents(ZeroOrOneBooleanContent);
445 
446   if (Subtarget.hasVInstructions()) {
447     setBooleanVectorContents(ZeroOrOneBooleanContent);
448 
449     setOperationAction(ISD::VSCALE, XLenVT, Custom);
450 
451     // RVV intrinsics may have illegal operands.
452     // We also need to custom legalize vmv.x.s.
453     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
454     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
455     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
456     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
457     if (Subtarget.is64Bit()) {
458       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
459     } else {
460       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
461       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
462     }
463 
464     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
465     setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
466 
467     static const unsigned IntegerVPOps[] = {
468         ISD::VP_ADD,         ISD::VP_SUB,         ISD::VP_MUL,
469         ISD::VP_SDIV,        ISD::VP_UDIV,        ISD::VP_SREM,
470         ISD::VP_UREM,        ISD::VP_AND,         ISD::VP_OR,
471         ISD::VP_XOR,         ISD::VP_ASHR,        ISD::VP_LSHR,
472         ISD::VP_SHL,         ISD::VP_REDUCE_ADD,  ISD::VP_REDUCE_AND,
473         ISD::VP_REDUCE_OR,   ISD::VP_REDUCE_XOR,  ISD::VP_REDUCE_SMAX,
474         ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN};
475 
476     static const unsigned FloatingPointVPOps[] = {
477         ISD::VP_FADD,        ISD::VP_FSUB,        ISD::VP_FMUL,
478         ISD::VP_FDIV,        ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD,
479         ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX};
480 
481     if (!Subtarget.is64Bit()) {
482       // We must custom-lower certain vXi64 operations on RV32 due to the vector
483       // element type being illegal.
484       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
485       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
486 
487       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
488       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
489       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
490       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
491       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
492       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
493       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
494       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
495 
496       setOperationAction(ISD::VP_REDUCE_ADD, MVT::i64, Custom);
497       setOperationAction(ISD::VP_REDUCE_AND, MVT::i64, Custom);
498       setOperationAction(ISD::VP_REDUCE_OR, MVT::i64, Custom);
499       setOperationAction(ISD::VP_REDUCE_XOR, MVT::i64, Custom);
500       setOperationAction(ISD::VP_REDUCE_SMAX, MVT::i64, Custom);
501       setOperationAction(ISD::VP_REDUCE_SMIN, MVT::i64, Custom);
502       setOperationAction(ISD::VP_REDUCE_UMAX, MVT::i64, Custom);
503       setOperationAction(ISD::VP_REDUCE_UMIN, MVT::i64, Custom);
504     }
505 
506     for (MVT VT : BoolVecVTs) {
507       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
508 
509       // Mask VTs are custom-expanded into a series of standard nodes
510       setOperationAction(ISD::TRUNCATE, VT, Custom);
511       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
512       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
513       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
514 
515       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
516       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
517 
518       setOperationAction(ISD::SELECT, VT, Custom);
519       setOperationAction(ISD::SELECT_CC, VT, Expand);
520       setOperationAction(ISD::VSELECT, VT, Expand);
521 
522       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
523       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
524       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
525 
526       setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
527       setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
528       setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
529 
530       // RVV has native int->float & float->int conversions where the
531       // element type sizes are within one power-of-two of each other. Any
532       // wider distances between type sizes have to be lowered as sequences
533       // which progressively narrow the gap in stages.
534       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
535       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
536       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
537       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
538 
539       // Expand all extending loads to types larger than this, and truncating
540       // stores from types larger than this.
541       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
542         setTruncStoreAction(OtherVT, VT, Expand);
543         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
544         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
545         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
546       }
547     }
548 
549     for (MVT VT : IntVecVTs) {
550       if (VT.getVectorElementType() == MVT::i64 &&
551           !Subtarget.hasVInstructionsI64())
552         continue;
553 
554       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
555       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
556 
557       // Vectors implement MULHS/MULHU.
558       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
559       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
560 
561       setOperationAction(ISD::SMIN, VT, Legal);
562       setOperationAction(ISD::SMAX, VT, Legal);
563       setOperationAction(ISD::UMIN, VT, Legal);
564       setOperationAction(ISD::UMAX, VT, Legal);
565 
566       setOperationAction(ISD::ROTL, VT, Expand);
567       setOperationAction(ISD::ROTR, VT, Expand);
568 
569       setOperationAction(ISD::CTTZ, VT, Expand);
570       setOperationAction(ISD::CTLZ, VT, Expand);
571       setOperationAction(ISD::CTPOP, VT, Expand);
572 
573       setOperationAction(ISD::BSWAP, VT, Expand);
574 
575       // Custom-lower extensions and truncations from/to mask types.
576       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
577       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
578       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
579 
580       // RVV has native int->float & float->int conversions where the
581       // element type sizes are within one power-of-two of each other. Any
582       // wider distances between type sizes have to be lowered as sequences
583       // which progressively narrow the gap in stages.
584       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
585       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
586       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
587       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
588 
589       setOperationAction(ISD::SADDSAT, VT, Legal);
590       setOperationAction(ISD::UADDSAT, VT, Legal);
591       setOperationAction(ISD::SSUBSAT, VT, Legal);
592       setOperationAction(ISD::USUBSAT, VT, Legal);
593 
594       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
595       // nodes which truncate by one power of two at a time.
596       setOperationAction(ISD::TRUNCATE, VT, Custom);
597 
598       // Custom-lower insert/extract operations to simplify patterns.
599       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
600       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
601 
602       // Custom-lower reduction operations to set up the corresponding custom
603       // nodes' operands.
604       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
605       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
606       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
607       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
608       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
609       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
610       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
611       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
612 
613       for (unsigned VPOpc : IntegerVPOps)
614         setOperationAction(VPOpc, VT, Custom);
615 
616       setOperationAction(ISD::LOAD, VT, Custom);
617       setOperationAction(ISD::STORE, VT, Custom);
618 
619       setOperationAction(ISD::MLOAD, VT, Custom);
620       setOperationAction(ISD::MSTORE, VT, Custom);
621       setOperationAction(ISD::MGATHER, VT, Custom);
622       setOperationAction(ISD::MSCATTER, VT, Custom);
623 
624       setOperationAction(ISD::VP_LOAD, VT, Custom);
625       setOperationAction(ISD::VP_STORE, VT, Custom);
626       setOperationAction(ISD::VP_GATHER, VT, Custom);
627       setOperationAction(ISD::VP_SCATTER, VT, Custom);
628 
629       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
630       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
631       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
632 
633       setOperationAction(ISD::SELECT, VT, Custom);
634       setOperationAction(ISD::SELECT_CC, VT, Expand);
635 
636       setOperationAction(ISD::STEP_VECTOR, VT, Custom);
637       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
638 
639       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
640         setTruncStoreAction(VT, OtherVT, Expand);
641         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
642         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
643         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
644       }
645 
646       // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
647       // type that can represent the value exactly.
648       if (VT.getVectorElementType() != MVT::i64) {
649         MVT FloatEltVT =
650             VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
651         EVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
652         if (isTypeLegal(FloatVT)) {
653           setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
654           setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
655         }
656       }
657     }
658 
659     // Expand various CCs to best match the RVV ISA, which natively supports UNE
660     // but no other unordered comparisons, and supports all ordered comparisons
661     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
662     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
663     // and we pattern-match those back to the "original", swapping operands once
664     // more. This way we catch both operations and both "vf" and "fv" forms with
665     // fewer patterns.
666     static const ISD::CondCode VFPCCToExpand[] = {
667         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
668         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
669         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
670     };
671 
672     // Sets common operation actions on RVV floating-point vector types.
673     const auto SetCommonVFPActions = [&](MVT VT) {
674       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
675       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
676       // sizes are within one power-of-two of each other. Therefore conversions
677       // between vXf16 and vXf64 must be lowered as sequences which convert via
678       // vXf32.
679       setOperationAction(ISD::FP_ROUND, VT, Custom);
680       setOperationAction(ISD::FP_EXTEND, VT, Custom);
681       // Custom-lower insert/extract operations to simplify patterns.
682       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
683       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
684       // Expand various condition codes (explained above).
685       for (auto CC : VFPCCToExpand)
686         setCondCodeAction(CC, VT, Expand);
687 
688       setOperationAction(ISD::FMINNUM, VT, Legal);
689       setOperationAction(ISD::FMAXNUM, VT, Legal);
690 
691       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
692       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
693       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
694       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
695 
696       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
697 
698       setOperationAction(ISD::LOAD, VT, Custom);
699       setOperationAction(ISD::STORE, VT, Custom);
700 
701       setOperationAction(ISD::MLOAD, VT, Custom);
702       setOperationAction(ISD::MSTORE, VT, Custom);
703       setOperationAction(ISD::MGATHER, VT, Custom);
704       setOperationAction(ISD::MSCATTER, VT, Custom);
705 
706       setOperationAction(ISD::VP_LOAD, VT, Custom);
707       setOperationAction(ISD::VP_STORE, VT, Custom);
708       setOperationAction(ISD::VP_GATHER, VT, Custom);
709       setOperationAction(ISD::VP_SCATTER, VT, Custom);
710 
711       setOperationAction(ISD::SELECT, VT, Custom);
712       setOperationAction(ISD::SELECT_CC, VT, Expand);
713 
714       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
715       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
716       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
717 
718       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
719 
720       for (unsigned VPOpc : FloatingPointVPOps)
721         setOperationAction(VPOpc, VT, Custom);
722     };
723 
724     // Sets common extload/truncstore actions on RVV floating-point vector
725     // types.
726     const auto SetCommonVFPExtLoadTruncStoreActions =
727         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
728           for (auto SmallVT : SmallerVTs) {
729             setTruncStoreAction(VT, SmallVT, Expand);
730             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
731           }
732         };
733 
734     if (Subtarget.hasVInstructionsF16())
735       for (MVT VT : F16VecVTs)
736         SetCommonVFPActions(VT);
737 
738     for (MVT VT : F32VecVTs) {
739       if (Subtarget.hasVInstructionsF32())
740         SetCommonVFPActions(VT);
741       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
742     }
743 
744     for (MVT VT : F64VecVTs) {
745       if (Subtarget.hasVInstructionsF64())
746         SetCommonVFPActions(VT);
747       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
748       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
749     }
750 
751     if (Subtarget.useRVVForFixedLengthVectors()) {
752       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
753         if (!useRVVForFixedLengthVectorVT(VT))
754           continue;
755 
756         // By default everything must be expanded.
757         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
758           setOperationAction(Op, VT, Expand);
759         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
760           setTruncStoreAction(VT, OtherVT, Expand);
761           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
762           setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
763           setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
764         }
765 
766         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
767         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
768         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
769 
770         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
771         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
772 
773         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
774         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
775 
776         setOperationAction(ISD::LOAD, VT, Custom);
777         setOperationAction(ISD::STORE, VT, Custom);
778 
779         setOperationAction(ISD::SETCC, VT, Custom);
780 
781         setOperationAction(ISD::SELECT, VT, Custom);
782 
783         setOperationAction(ISD::TRUNCATE, VT, Custom);
784 
785         setOperationAction(ISD::BITCAST, VT, Custom);
786 
787         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
788         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
789         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
790 
791         setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
792         setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
793         setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
794 
795         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
796         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
797         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
798         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
799 
800         // Operations below are different for between masks and other vectors.
801         if (VT.getVectorElementType() == MVT::i1) {
802           setOperationAction(ISD::AND, VT, Custom);
803           setOperationAction(ISD::OR, VT, Custom);
804           setOperationAction(ISD::XOR, VT, Custom);
805           continue;
806         }
807 
808         // Use SPLAT_VECTOR to prevent type legalization from destroying the
809         // splats when type legalizing i64 scalar on RV32.
810         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
811         // improvements first.
812         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
813           setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
814           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
815         }
816 
817         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
818         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
819 
820         setOperationAction(ISD::MLOAD, VT, Custom);
821         setOperationAction(ISD::MSTORE, VT, Custom);
822         setOperationAction(ISD::MGATHER, VT, Custom);
823         setOperationAction(ISD::MSCATTER, VT, Custom);
824 
825         setOperationAction(ISD::VP_LOAD, VT, Custom);
826         setOperationAction(ISD::VP_STORE, VT, Custom);
827         setOperationAction(ISD::VP_GATHER, VT, Custom);
828         setOperationAction(ISD::VP_SCATTER, VT, Custom);
829 
830         setOperationAction(ISD::ADD, VT, Custom);
831         setOperationAction(ISD::MUL, VT, Custom);
832         setOperationAction(ISD::SUB, VT, Custom);
833         setOperationAction(ISD::AND, VT, Custom);
834         setOperationAction(ISD::OR, VT, Custom);
835         setOperationAction(ISD::XOR, VT, Custom);
836         setOperationAction(ISD::SDIV, VT, Custom);
837         setOperationAction(ISD::SREM, VT, Custom);
838         setOperationAction(ISD::UDIV, VT, Custom);
839         setOperationAction(ISD::UREM, VT, Custom);
840         setOperationAction(ISD::SHL, VT, Custom);
841         setOperationAction(ISD::SRA, VT, Custom);
842         setOperationAction(ISD::SRL, VT, Custom);
843 
844         setOperationAction(ISD::SMIN, VT, Custom);
845         setOperationAction(ISD::SMAX, VT, Custom);
846         setOperationAction(ISD::UMIN, VT, Custom);
847         setOperationAction(ISD::UMAX, VT, Custom);
848         setOperationAction(ISD::ABS,  VT, Custom);
849 
850         setOperationAction(ISD::MULHS, VT, Custom);
851         setOperationAction(ISD::MULHU, VT, Custom);
852 
853         setOperationAction(ISD::SADDSAT, VT, Custom);
854         setOperationAction(ISD::UADDSAT, VT, Custom);
855         setOperationAction(ISD::SSUBSAT, VT, Custom);
856         setOperationAction(ISD::USUBSAT, VT, Custom);
857 
858         setOperationAction(ISD::VSELECT, VT, Custom);
859         setOperationAction(ISD::SELECT_CC, VT, Expand);
860 
861         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
862         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
863         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
864 
865         // Custom-lower reduction operations to set up the corresponding custom
866         // nodes' operands.
867         setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
868         setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
869         setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
870         setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
871         setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
872 
873         for (unsigned VPOpc : IntegerVPOps)
874           setOperationAction(VPOpc, VT, Custom);
875 
876         // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
877         // type that can represent the value exactly.
878         if (VT.getVectorElementType() != MVT::i64) {
879           MVT FloatEltVT =
880               VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
881           EVT FloatVT =
882               MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
883           if (isTypeLegal(FloatVT)) {
884             setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
885             setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
886           }
887         }
888       }
889 
890       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
891         if (!useRVVForFixedLengthVectorVT(VT))
892           continue;
893 
894         // By default everything must be expanded.
895         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
896           setOperationAction(Op, VT, Expand);
897         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
898           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
899           setTruncStoreAction(VT, OtherVT, Expand);
900         }
901 
902         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
903         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
904         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
905 
906         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
907         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
908         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
909         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
910         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
911 
912         setOperationAction(ISD::LOAD, VT, Custom);
913         setOperationAction(ISD::STORE, VT, Custom);
914         setOperationAction(ISD::MLOAD, VT, Custom);
915         setOperationAction(ISD::MSTORE, VT, Custom);
916         setOperationAction(ISD::MGATHER, VT, Custom);
917         setOperationAction(ISD::MSCATTER, VT, Custom);
918 
919         setOperationAction(ISD::VP_LOAD, VT, Custom);
920         setOperationAction(ISD::VP_STORE, VT, Custom);
921         setOperationAction(ISD::VP_GATHER, VT, Custom);
922         setOperationAction(ISD::VP_SCATTER, VT, Custom);
923 
924         setOperationAction(ISD::FADD, VT, Custom);
925         setOperationAction(ISD::FSUB, VT, Custom);
926         setOperationAction(ISD::FMUL, VT, Custom);
927         setOperationAction(ISD::FDIV, VT, Custom);
928         setOperationAction(ISD::FNEG, VT, Custom);
929         setOperationAction(ISD::FABS, VT, Custom);
930         setOperationAction(ISD::FCOPYSIGN, VT, Custom);
931         setOperationAction(ISD::FSQRT, VT, Custom);
932         setOperationAction(ISD::FMA, VT, Custom);
933         setOperationAction(ISD::FMINNUM, VT, Custom);
934         setOperationAction(ISD::FMAXNUM, VT, Custom);
935 
936         setOperationAction(ISD::FP_ROUND, VT, Custom);
937         setOperationAction(ISD::FP_EXTEND, VT, Custom);
938 
939         for (auto CC : VFPCCToExpand)
940           setCondCodeAction(CC, VT, Expand);
941 
942         setOperationAction(ISD::VSELECT, VT, Custom);
943         setOperationAction(ISD::SELECT, VT, Custom);
944         setOperationAction(ISD::SELECT_CC, VT, Expand);
945 
946         setOperationAction(ISD::BITCAST, VT, Custom);
947 
948         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
949         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
950         setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
951         setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
952 
953         for (unsigned VPOpc : FloatingPointVPOps)
954           setOperationAction(VPOpc, VT, Custom);
955       }
956 
957       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
958       setOperationAction(ISD::BITCAST, MVT::i8, Custom);
959       setOperationAction(ISD::BITCAST, MVT::i16, Custom);
960       setOperationAction(ISD::BITCAST, MVT::i32, Custom);
961       setOperationAction(ISD::BITCAST, MVT::i64, Custom);
962       setOperationAction(ISD::BITCAST, MVT::f16, Custom);
963       setOperationAction(ISD::BITCAST, MVT::f32, Custom);
964       setOperationAction(ISD::BITCAST, MVT::f64, Custom);
965     }
966   }
967 
968   // Function alignments.
969   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
970   setMinFunctionAlignment(FunctionAlignment);
971   setPrefFunctionAlignment(FunctionAlignment);
972 
973   setMinimumJumpTableEntries(5);
974 
975   // Jumps are expensive, compared to logic
976   setJumpIsExpensive();
977 
978   setTargetDAGCombine(ISD::ADD);
979   setTargetDAGCombine(ISD::SUB);
980   setTargetDAGCombine(ISD::AND);
981   setTargetDAGCombine(ISD::OR);
982   setTargetDAGCombine(ISD::XOR);
983   setTargetDAGCombine(ISD::ANY_EXTEND);
984   setTargetDAGCombine(ISD::ZERO_EXTEND);
985   if (Subtarget.hasVInstructions()) {
986     setTargetDAGCombine(ISD::FCOPYSIGN);
987     setTargetDAGCombine(ISD::MGATHER);
988     setTargetDAGCombine(ISD::MSCATTER);
989     setTargetDAGCombine(ISD::VP_GATHER);
990     setTargetDAGCombine(ISD::VP_SCATTER);
991     setTargetDAGCombine(ISD::SRA);
992     setTargetDAGCombine(ISD::SRL);
993     setTargetDAGCombine(ISD::SHL);
994     setTargetDAGCombine(ISD::STORE);
995   }
996 }
997 
998 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
999                                             LLVMContext &Context,
1000                                             EVT VT) const {
1001   if (!VT.isVector())
1002     return getPointerTy(DL);
1003   if (Subtarget.hasVInstructions() &&
1004       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
1005     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
1006   return VT.changeVectorElementTypeToInteger();
1007 }
1008 
1009 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
1010   return Subtarget.getXLenVT();
1011 }
1012 
1013 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
1014                                              const CallInst &I,
1015                                              MachineFunction &MF,
1016                                              unsigned Intrinsic) const {
1017   auto &DL = I.getModule()->getDataLayout();
1018   switch (Intrinsic) {
1019   default:
1020     return false;
1021   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
1022   case Intrinsic::riscv_masked_atomicrmw_add_i32:
1023   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
1024   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
1025   case Intrinsic::riscv_masked_atomicrmw_max_i32:
1026   case Intrinsic::riscv_masked_atomicrmw_min_i32:
1027   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
1028   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
1029   case Intrinsic::riscv_masked_cmpxchg_i32: {
1030     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
1031     Info.opc = ISD::INTRINSIC_W_CHAIN;
1032     Info.memVT = MVT::getVT(PtrTy->getElementType());
1033     Info.ptrVal = I.getArgOperand(0);
1034     Info.offset = 0;
1035     Info.align = Align(4);
1036     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
1037                  MachineMemOperand::MOVolatile;
1038     return true;
1039   }
1040   case Intrinsic::riscv_masked_strided_load:
1041     Info.opc = ISD::INTRINSIC_W_CHAIN;
1042     Info.ptrVal = I.getArgOperand(1);
1043     Info.memVT = getValueType(DL, I.getType()->getScalarType());
1044     Info.align = Align(DL.getTypeSizeInBits(I.getType()->getScalarType()) / 8);
1045     Info.size = MemoryLocation::UnknownSize;
1046     Info.flags |= MachineMemOperand::MOLoad;
1047     return true;
1048   case Intrinsic::riscv_masked_strided_store:
1049     Info.opc = ISD::INTRINSIC_VOID;
1050     Info.ptrVal = I.getArgOperand(1);
1051     Info.memVT =
1052         getValueType(DL, I.getArgOperand(0)->getType()->getScalarType());
1053     Info.align = Align(
1054         DL.getTypeSizeInBits(I.getArgOperand(0)->getType()->getScalarType()) /
1055         8);
1056     Info.size = MemoryLocation::UnknownSize;
1057     Info.flags |= MachineMemOperand::MOStore;
1058     return true;
1059   }
1060 }
1061 
1062 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1063                                                 const AddrMode &AM, Type *Ty,
1064                                                 unsigned AS,
1065                                                 Instruction *I) const {
1066   // No global is ever allowed as a base.
1067   if (AM.BaseGV)
1068     return false;
1069 
1070   // Require a 12-bit signed offset.
1071   if (!isInt<12>(AM.BaseOffs))
1072     return false;
1073 
1074   switch (AM.Scale) {
1075   case 0: // "r+i" or just "i", depending on HasBaseReg.
1076     break;
1077   case 1:
1078     if (!AM.HasBaseReg) // allow "r+i".
1079       break;
1080     return false; // disallow "r+r" or "r+r+i".
1081   default:
1082     return false;
1083   }
1084 
1085   return true;
1086 }
1087 
1088 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
1089   return isInt<12>(Imm);
1090 }
1091 
1092 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
1093   return isInt<12>(Imm);
1094 }
1095 
1096 // On RV32, 64-bit integers are split into their high and low parts and held
1097 // in two different registers, so the trunc is free since the low register can
1098 // just be used.
1099 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
1100   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
1101     return false;
1102   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
1103   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
1104   return (SrcBits == 64 && DestBits == 32);
1105 }
1106 
1107 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
1108   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
1109       !SrcVT.isInteger() || !DstVT.isInteger())
1110     return false;
1111   unsigned SrcBits = SrcVT.getSizeInBits();
1112   unsigned DestBits = DstVT.getSizeInBits();
1113   return (SrcBits == 64 && DestBits == 32);
1114 }
1115 
1116 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
1117   // Zexts are free if they can be combined with a load.
1118   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
1119     EVT MemVT = LD->getMemoryVT();
1120     if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
1121          (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
1122         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
1123          LD->getExtensionType() == ISD::ZEXTLOAD))
1124       return true;
1125   }
1126 
1127   return TargetLowering::isZExtFree(Val, VT2);
1128 }
1129 
1130 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
1131   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
1132 }
1133 
1134 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
1135   return Subtarget.hasStdExtZbb();
1136 }
1137 
1138 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
1139   return Subtarget.hasStdExtZbb();
1140 }
1141 
1142 bool RISCVTargetLowering::hasAndNot(SDValue Y) const {
1143   EVT VT = Y.getValueType();
1144 
1145   // FIXME: Support vectors once we have tests.
1146   if (VT.isVector())
1147     return false;
1148 
1149   return Subtarget.hasStdExtZbb() && !isa<ConstantSDNode>(Y);
1150 }
1151 
1152 /// Check if sinking \p I's operands to I's basic block is profitable, because
1153 /// the operands can be folded into a target instruction, e.g.
1154 /// splats of scalars can fold into vector instructions.
1155 bool RISCVTargetLowering::shouldSinkOperands(
1156     Instruction *I, SmallVectorImpl<Use *> &Ops) const {
1157   using namespace llvm::PatternMatch;
1158 
1159   if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions())
1160     return false;
1161 
1162   auto IsSinker = [&](Instruction *I, int Operand) {
1163     switch (I->getOpcode()) {
1164     case Instruction::Add:
1165     case Instruction::Sub:
1166     case Instruction::Mul:
1167     case Instruction::And:
1168     case Instruction::Or:
1169     case Instruction::Xor:
1170     case Instruction::FAdd:
1171     case Instruction::FSub:
1172     case Instruction::FMul:
1173     case Instruction::FDiv:
1174     case Instruction::ICmp:
1175     case Instruction::FCmp:
1176       return true;
1177     case Instruction::Shl:
1178     case Instruction::LShr:
1179     case Instruction::AShr:
1180       return Operand == 1;
1181     case Instruction::Call:
1182       if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1183         switch (II->getIntrinsicID()) {
1184         case Intrinsic::fma:
1185           return Operand == 0 || Operand == 1;
1186         default:
1187           return false;
1188         }
1189       }
1190       return false;
1191     default:
1192       return false;
1193     }
1194   };
1195 
1196   for (auto OpIdx : enumerate(I->operands())) {
1197     if (!IsSinker(I, OpIdx.index()))
1198       continue;
1199 
1200     Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
1201     // Make sure we are not already sinking this operand
1202     if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
1203       continue;
1204 
1205     // We are looking for a splat that can be sunk.
1206     if (!match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
1207                              m_Undef(), m_ZeroMask())))
1208       continue;
1209 
1210     // All uses of the shuffle should be sunk to avoid duplicating it across gpr
1211     // and vector registers
1212     for (Use &U : Op->uses()) {
1213       Instruction *Insn = cast<Instruction>(U.getUser());
1214       if (!IsSinker(Insn, U.getOperandNo()))
1215         return false;
1216     }
1217 
1218     Ops.push_back(&Op->getOperandUse(0));
1219     Ops.push_back(&OpIdx.value());
1220   }
1221   return true;
1222 }
1223 
1224 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
1225                                        bool ForCodeSize) const {
1226   if (VT == MVT::f16 && !Subtarget.hasStdExtZfhmin())
1227     return false;
1228   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
1229     return false;
1230   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
1231     return false;
1232   if (Imm.isNegZero())
1233     return false;
1234   return Imm.isZero();
1235 }
1236 
1237 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
1238   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
1239          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
1240          (VT == MVT::f64 && Subtarget.hasStdExtD());
1241 }
1242 
1243 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1244                                                       CallingConv::ID CC,
1245                                                       EVT VT) const {
1246   // Use f32 to pass f16 if it is legal and Zfhmin/Zfh is not enabled.
1247   // We might still end up using a GPR but that will be decided based on ABI.
1248   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfhmin())
1249     return MVT::f32;
1250 
1251   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1252 }
1253 
1254 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1255                                                            CallingConv::ID CC,
1256                                                            EVT VT) const {
1257   // Use f32 to pass f16 if it is legal and Zfhmin/Zfh is not enabled.
1258   // We might still end up using a GPR but that will be decided based on ABI.
1259   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfhmin())
1260     return 1;
1261 
1262   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1263 }
1264 
1265 // Changes the condition code and swaps operands if necessary, so the SetCC
1266 // operation matches one of the comparisons supported directly by branches
1267 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1268 // with 1/-1.
1269 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1270                                     ISD::CondCode &CC, SelectionDAG &DAG) {
1271   // Convert X > -1 to X >= 0.
1272   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1273     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1274     CC = ISD::SETGE;
1275     return;
1276   }
1277   // Convert X < 1 to 0 >= X.
1278   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1279     RHS = LHS;
1280     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1281     CC = ISD::SETGE;
1282     return;
1283   }
1284 
1285   switch (CC) {
1286   default:
1287     break;
1288   case ISD::SETGT:
1289   case ISD::SETLE:
1290   case ISD::SETUGT:
1291   case ISD::SETULE:
1292     CC = ISD::getSetCCSwappedOperands(CC);
1293     std::swap(LHS, RHS);
1294     break;
1295   }
1296 }
1297 
1298 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1299   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1300   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1301   if (VT.getVectorElementType() == MVT::i1)
1302     KnownSize *= 8;
1303 
1304   switch (KnownSize) {
1305   default:
1306     llvm_unreachable("Invalid LMUL.");
1307   case 8:
1308     return RISCVII::VLMUL::LMUL_F8;
1309   case 16:
1310     return RISCVII::VLMUL::LMUL_F4;
1311   case 32:
1312     return RISCVII::VLMUL::LMUL_F2;
1313   case 64:
1314     return RISCVII::VLMUL::LMUL_1;
1315   case 128:
1316     return RISCVII::VLMUL::LMUL_2;
1317   case 256:
1318     return RISCVII::VLMUL::LMUL_4;
1319   case 512:
1320     return RISCVII::VLMUL::LMUL_8;
1321   }
1322 }
1323 
1324 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1325   switch (LMul) {
1326   default:
1327     llvm_unreachable("Invalid LMUL.");
1328   case RISCVII::VLMUL::LMUL_F8:
1329   case RISCVII::VLMUL::LMUL_F4:
1330   case RISCVII::VLMUL::LMUL_F2:
1331   case RISCVII::VLMUL::LMUL_1:
1332     return RISCV::VRRegClassID;
1333   case RISCVII::VLMUL::LMUL_2:
1334     return RISCV::VRM2RegClassID;
1335   case RISCVII::VLMUL::LMUL_4:
1336     return RISCV::VRM4RegClassID;
1337   case RISCVII::VLMUL::LMUL_8:
1338     return RISCV::VRM8RegClassID;
1339   }
1340 }
1341 
1342 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1343   RISCVII::VLMUL LMUL = getLMUL(VT);
1344   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1345       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1346       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1347       LMUL == RISCVII::VLMUL::LMUL_1) {
1348     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1349                   "Unexpected subreg numbering");
1350     return RISCV::sub_vrm1_0 + Index;
1351   }
1352   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1353     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1354                   "Unexpected subreg numbering");
1355     return RISCV::sub_vrm2_0 + Index;
1356   }
1357   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1358     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1359                   "Unexpected subreg numbering");
1360     return RISCV::sub_vrm4_0 + Index;
1361   }
1362   llvm_unreachable("Invalid vector type.");
1363 }
1364 
1365 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1366   if (VT.getVectorElementType() == MVT::i1)
1367     return RISCV::VRRegClassID;
1368   return getRegClassIDForLMUL(getLMUL(VT));
1369 }
1370 
1371 // Attempt to decompose a subvector insert/extract between VecVT and
1372 // SubVecVT via subregister indices. Returns the subregister index that
1373 // can perform the subvector insert/extract with the given element index, as
1374 // well as the index corresponding to any leftover subvectors that must be
1375 // further inserted/extracted within the register class for SubVecVT.
1376 std::pair<unsigned, unsigned>
1377 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1378     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1379     const RISCVRegisterInfo *TRI) {
1380   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1381                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1382                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1383                 "Register classes not ordered");
1384   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1385   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1386   // Try to compose a subregister index that takes us from the incoming
1387   // LMUL>1 register class down to the outgoing one. At each step we half
1388   // the LMUL:
1389   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1390   // Note that this is not guaranteed to find a subregister index, such as
1391   // when we are extracting from one VR type to another.
1392   unsigned SubRegIdx = RISCV::NoSubRegister;
1393   for (const unsigned RCID :
1394        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1395     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1396       VecVT = VecVT.getHalfNumVectorElementsVT();
1397       bool IsHi =
1398           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1399       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1400                                             getSubregIndexByMVT(VecVT, IsHi));
1401       if (IsHi)
1402         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1403     }
1404   return {SubRegIdx, InsertExtractIdx};
1405 }
1406 
1407 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1408 // stores for those types.
1409 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1410   return !Subtarget.useRVVForFixedLengthVectors() ||
1411          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1412 }
1413 
1414 bool RISCVTargetLowering::isLegalElementTypeForRVV(Type *ScalarTy) const {
1415   if (ScalarTy->isPointerTy())
1416     return true;
1417 
1418   if (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) ||
1419       ScalarTy->isIntegerTy(32))
1420     return true;
1421 
1422   if (ScalarTy->isIntegerTy(64))
1423     return Subtarget.hasVInstructionsI64();
1424 
1425   if (ScalarTy->isHalfTy())
1426     return Subtarget.hasVInstructionsF16();
1427   if (ScalarTy->isFloatTy())
1428     return Subtarget.hasVInstructionsF32();
1429   if (ScalarTy->isDoubleTy())
1430     return Subtarget.hasVInstructionsF64();
1431 
1432   return false;
1433 }
1434 
1435 static bool useRVVForFixedLengthVectorVT(MVT VT,
1436                                          const RISCVSubtarget &Subtarget) {
1437   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1438   if (!Subtarget.useRVVForFixedLengthVectors())
1439     return false;
1440 
1441   // We only support a set of vector types with a consistent maximum fixed size
1442   // across all supported vector element types to avoid legalization issues.
1443   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1444   // fixed-length vector type we support is 1024 bytes.
1445   if (VT.getFixedSizeInBits() > 1024 * 8)
1446     return false;
1447 
1448   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1449 
1450   MVT EltVT = VT.getVectorElementType();
1451 
1452   // Don't use RVV for vectors we cannot scalarize if required.
1453   switch (EltVT.SimpleTy) {
1454   // i1 is supported but has different rules.
1455   default:
1456     return false;
1457   case MVT::i1:
1458     // Masks can only use a single register.
1459     if (VT.getVectorNumElements() > MinVLen)
1460       return false;
1461     MinVLen /= 8;
1462     break;
1463   case MVT::i8:
1464   case MVT::i16:
1465   case MVT::i32:
1466     break;
1467   case MVT::i64:
1468     if (!Subtarget.hasVInstructionsI64())
1469       return false;
1470     break;
1471   case MVT::f16:
1472     if (!Subtarget.hasVInstructionsF16())
1473       return false;
1474     break;
1475   case MVT::f32:
1476     if (!Subtarget.hasVInstructionsF32())
1477       return false;
1478     break;
1479   case MVT::f64:
1480     if (!Subtarget.hasVInstructionsF64())
1481       return false;
1482     break;
1483   }
1484 
1485   // Reject elements larger than ELEN.
1486   if (EltVT.getSizeInBits() > Subtarget.getMaxELENForFixedLengthVectors())
1487     return false;
1488 
1489   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1490   // Don't use RVV for types that don't fit.
1491   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1492     return false;
1493 
1494   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1495   // the base fixed length RVV support in place.
1496   if (!VT.isPow2VectorType())
1497     return false;
1498 
1499   return true;
1500 }
1501 
1502 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1503   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1504 }
1505 
1506 // Return the largest legal scalable vector type that matches VT's element type.
1507 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1508                                             const RISCVSubtarget &Subtarget) {
1509   // This may be called before legal types are setup.
1510   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1511           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1512          "Expected legal fixed length vector!");
1513 
1514   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1515   unsigned MaxELen = Subtarget.getMaxELENForFixedLengthVectors();
1516 
1517   MVT EltVT = VT.getVectorElementType();
1518   switch (EltVT.SimpleTy) {
1519   default:
1520     llvm_unreachable("unexpected element type for RVV container");
1521   case MVT::i1:
1522   case MVT::i8:
1523   case MVT::i16:
1524   case MVT::i32:
1525   case MVT::i64:
1526   case MVT::f16:
1527   case MVT::f32:
1528   case MVT::f64: {
1529     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1530     // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within
1531     // each fractional LMUL we support SEW between 8 and LMUL*ELEN.
1532     unsigned NumElts =
1533         (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen;
1534     NumElts = std::max(NumElts, RISCV::RVVBitsPerBlock / MaxELen);
1535     assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts");
1536     return MVT::getScalableVectorVT(EltVT, NumElts);
1537   }
1538   }
1539 }
1540 
1541 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1542                                             const RISCVSubtarget &Subtarget) {
1543   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1544                                           Subtarget);
1545 }
1546 
1547 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1548   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1549 }
1550 
1551 // Grow V to consume an entire RVV register.
1552 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1553                                        const RISCVSubtarget &Subtarget) {
1554   assert(VT.isScalableVector() &&
1555          "Expected to convert into a scalable vector!");
1556   assert(V.getValueType().isFixedLengthVector() &&
1557          "Expected a fixed length vector operand!");
1558   SDLoc DL(V);
1559   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1560   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1561 }
1562 
1563 // Shrink V so it's just big enough to maintain a VT's worth of data.
1564 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1565                                          const RISCVSubtarget &Subtarget) {
1566   assert(VT.isFixedLengthVector() &&
1567          "Expected to convert into a fixed length vector!");
1568   assert(V.getValueType().isScalableVector() &&
1569          "Expected a scalable vector operand!");
1570   SDLoc DL(V);
1571   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1572   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1573 }
1574 
1575 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1576 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1577 // the vector type that it is contained in.
1578 static std::pair<SDValue, SDValue>
1579 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1580                 const RISCVSubtarget &Subtarget) {
1581   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1582   MVT XLenVT = Subtarget.getXLenVT();
1583   SDValue VL = VecVT.isFixedLengthVector()
1584                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1585                    : DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
1586   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1587   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1588   return {Mask, VL};
1589 }
1590 
1591 // As above but assuming the given type is a scalable vector type.
1592 static std::pair<SDValue, SDValue>
1593 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1594                         const RISCVSubtarget &Subtarget) {
1595   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1596   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1597 }
1598 
1599 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1600 // of either is (currently) supported. This can get us into an infinite loop
1601 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1602 // as a ..., etc.
1603 // Until either (or both) of these can reliably lower any node, reporting that
1604 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1605 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1606 // which is not desirable.
1607 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1608     EVT VT, unsigned DefinedValues) const {
1609   return false;
1610 }
1611 
1612 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
1613   // Only splats are currently supported.
1614   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
1615     return true;
1616 
1617   return false;
1618 }
1619 
1620 static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) {
1621   // RISCV FP-to-int conversions saturate to the destination register size, but
1622   // don't produce 0 for nan. We can use a conversion instruction and fix the
1623   // nan case with a compare and a select.
1624   SDValue Src = Op.getOperand(0);
1625 
1626   EVT DstVT = Op.getValueType();
1627   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1628 
1629   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
1630   unsigned Opc;
1631   if (SatVT == DstVT)
1632     Opc = IsSigned ? RISCVISD::FCVT_X_RTZ : RISCVISD::FCVT_XU_RTZ;
1633   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
1634     Opc = IsSigned ? RISCVISD::FCVT_W_RTZ_RV64 : RISCVISD::FCVT_WU_RTZ_RV64;
1635   else
1636     return SDValue();
1637   // FIXME: Support other SatVTs by clamping before or after the conversion.
1638 
1639   SDLoc DL(Op);
1640   SDValue FpToInt = DAG.getNode(Opc, DL, DstVT, Src);
1641 
1642   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
1643   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
1644 }
1645 
1646 static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG,
1647                                  const RISCVSubtarget &Subtarget) {
1648   MVT VT = Op.getSimpleValueType();
1649   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1650 
1651   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1652 
1653   SDLoc DL(Op);
1654   SDValue Mask, VL;
1655   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1656 
1657   unsigned Opc =
1658       VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1659   SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, Op.getOperand(0), VL);
1660   return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1661 }
1662 
1663 struct VIDSequence {
1664   int64_t StepNumerator;
1665   unsigned StepDenominator;
1666   int64_t Addend;
1667 };
1668 
1669 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
1670 // to the (non-zero) step S and start value X. This can be then lowered as the
1671 // RVV sequence (VID * S) + X, for example.
1672 // The step S is represented as an integer numerator divided by a positive
1673 // denominator. Note that the implementation currently only identifies
1674 // sequences in which either the numerator is +/- 1 or the denominator is 1. It
1675 // cannot detect 2/3, for example.
1676 // Note that this method will also match potentially unappealing index
1677 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to
1678 // determine whether this is worth generating code for.
1679 static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
1680   unsigned NumElts = Op.getNumOperands();
1681   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
1682   if (!Op.getValueType().isInteger())
1683     return None;
1684 
1685   Optional<unsigned> SeqStepDenom;
1686   Optional<int64_t> SeqStepNum, SeqAddend;
1687   Optional<std::pair<uint64_t, unsigned>> PrevElt;
1688   unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
1689   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1690     // Assume undef elements match the sequence; we just have to be careful
1691     // when interpolating across them.
1692     if (Op.getOperand(Idx).isUndef())
1693       continue;
1694     // The BUILD_VECTOR must be all constants.
1695     if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
1696       return None;
1697 
1698     uint64_t Val = Op.getConstantOperandVal(Idx) &
1699                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1700 
1701     if (PrevElt) {
1702       // Calculate the step since the last non-undef element, and ensure
1703       // it's consistent across the entire sequence.
1704       unsigned IdxDiff = Idx - PrevElt->second;
1705       int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
1706 
1707       // A zero-value value difference means that we're somewhere in the middle
1708       // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
1709       // step change before evaluating the sequence.
1710       if (ValDiff != 0) {
1711         int64_t Remainder = ValDiff % IdxDiff;
1712         // Normalize the step if it's greater than 1.
1713         if (Remainder != ValDiff) {
1714           // The difference must cleanly divide the element span.
1715           if (Remainder != 0)
1716             return None;
1717           ValDiff /= IdxDiff;
1718           IdxDiff = 1;
1719         }
1720 
1721         if (!SeqStepNum)
1722           SeqStepNum = ValDiff;
1723         else if (ValDiff != SeqStepNum)
1724           return None;
1725 
1726         if (!SeqStepDenom)
1727           SeqStepDenom = IdxDiff;
1728         else if (IdxDiff != *SeqStepDenom)
1729           return None;
1730       }
1731     }
1732 
1733     // Record and/or check any addend.
1734     if (SeqStepNum && SeqStepDenom) {
1735       uint64_t ExpectedVal =
1736           (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom;
1737       int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits);
1738       if (!SeqAddend)
1739         SeqAddend = Addend;
1740       else if (SeqAddend != Addend)
1741         return None;
1742     }
1743 
1744     // Record this non-undef element for later.
1745     if (!PrevElt || PrevElt->first != Val)
1746       PrevElt = std::make_pair(Val, Idx);
1747   }
1748   // We need to have logged both a step and an addend for this to count as
1749   // a legal index sequence.
1750   if (!SeqStepNum || !SeqStepDenom || !SeqAddend)
1751     return None;
1752 
1753   return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
1754 }
1755 
1756 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
1757                                  const RISCVSubtarget &Subtarget) {
1758   MVT VT = Op.getSimpleValueType();
1759   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1760 
1761   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1762 
1763   SDLoc DL(Op);
1764   SDValue Mask, VL;
1765   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1766 
1767   MVT XLenVT = Subtarget.getXLenVT();
1768   unsigned NumElts = Op.getNumOperands();
1769 
1770   if (VT.getVectorElementType() == MVT::i1) {
1771     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
1772       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
1773       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
1774     }
1775 
1776     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
1777       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
1778       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
1779     }
1780 
1781     // Lower constant mask BUILD_VECTORs via an integer vector type, in
1782     // scalar integer chunks whose bit-width depends on the number of mask
1783     // bits and XLEN.
1784     // First, determine the most appropriate scalar integer type to use. This
1785     // is at most XLenVT, but may be shrunk to a smaller vector element type
1786     // according to the size of the final vector - use i8 chunks rather than
1787     // XLenVT if we're producing a v8i1. This results in more consistent
1788     // codegen across RV32 and RV64.
1789     unsigned NumViaIntegerBits =
1790         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
1791     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
1792       // If we have to use more than one INSERT_VECTOR_ELT then this
1793       // optimization is likely to increase code size; avoid peforming it in
1794       // such a case. We can use a load from a constant pool in this case.
1795       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
1796         return SDValue();
1797       // Now we can create our integer vector type. Note that it may be larger
1798       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
1799       MVT IntegerViaVecVT =
1800           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
1801                            divideCeil(NumElts, NumViaIntegerBits));
1802 
1803       uint64_t Bits = 0;
1804       unsigned BitPos = 0, IntegerEltIdx = 0;
1805       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
1806 
1807       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
1808         // Once we accumulate enough bits to fill our scalar type, insert into
1809         // our vector and clear our accumulated data.
1810         if (I != 0 && I % NumViaIntegerBits == 0) {
1811           if (NumViaIntegerBits <= 32)
1812             Bits = SignExtend64(Bits, 32);
1813           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1814           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
1815                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1816           Bits = 0;
1817           BitPos = 0;
1818           IntegerEltIdx++;
1819         }
1820         SDValue V = Op.getOperand(I);
1821         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
1822         Bits |= ((uint64_t)BitValue << BitPos);
1823       }
1824 
1825       // Insert the (remaining) scalar value into position in our integer
1826       // vector type.
1827       if (NumViaIntegerBits <= 32)
1828         Bits = SignExtend64(Bits, 32);
1829       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1830       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
1831                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1832 
1833       if (NumElts < NumViaIntegerBits) {
1834         // If we're producing a smaller vector than our minimum legal integer
1835         // type, bitcast to the equivalent (known-legal) mask type, and extract
1836         // our final mask.
1837         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
1838         Vec = DAG.getBitcast(MVT::v8i1, Vec);
1839         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
1840                           DAG.getConstant(0, DL, XLenVT));
1841       } else {
1842         // Else we must have produced an integer type with the same size as the
1843         // mask type; bitcast for the final result.
1844         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
1845         Vec = DAG.getBitcast(VT, Vec);
1846       }
1847 
1848       return Vec;
1849     }
1850 
1851     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
1852     // vector type, we have a legal equivalently-sized i8 type, so we can use
1853     // that.
1854     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
1855     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
1856 
1857     SDValue WideVec;
1858     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1859       // For a splat, perform a scalar truncate before creating the wider
1860       // vector.
1861       assert(Splat.getValueType() == XLenVT &&
1862              "Unexpected type for i1 splat value");
1863       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
1864                           DAG.getConstant(1, DL, XLenVT));
1865       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
1866     } else {
1867       SmallVector<SDValue, 8> Ops(Op->op_values());
1868       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
1869       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
1870       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
1871     }
1872 
1873     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
1874   }
1875 
1876   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1877     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
1878                                         : RISCVISD::VMV_V_X_VL;
1879     Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL);
1880     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1881   }
1882 
1883   // Try and match index sequences, which we can lower to the vid instruction
1884   // with optional modifications. An all-undef vector is matched by
1885   // getSplatValue, above.
1886   if (auto SimpleVID = isSimpleVIDSequence(Op)) {
1887     int64_t StepNumerator = SimpleVID->StepNumerator;
1888     unsigned StepDenominator = SimpleVID->StepDenominator;
1889     int64_t Addend = SimpleVID->Addend;
1890     // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
1891     // threshold since it's the immediate value many RVV instructions accept.
1892     if (isInt<5>(StepNumerator) && isPowerOf2_32(StepDenominator) &&
1893         isInt<5>(Addend)) {
1894       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
1895       // Convert right out of the scalable type so we can use standard ISD
1896       // nodes for the rest of the computation. If we used scalable types with
1897       // these, we'd lose the fixed-length vector info and generate worse
1898       // vsetvli code.
1899       VID = convertFromScalableVector(VT, VID, DAG, Subtarget);
1900       assert(StepNumerator != 0 && "Invalid step");
1901       bool Negate = false;
1902       if (StepNumerator != 1) {
1903         int64_t SplatStepVal = StepNumerator;
1904         unsigned Opcode = ISD::MUL;
1905         if (isPowerOf2_64(std::abs(StepNumerator))) {
1906           Negate = StepNumerator < 0;
1907           Opcode = ISD::SHL;
1908           SplatStepVal = Log2_64(std::abs(StepNumerator));
1909         }
1910         SDValue SplatStep = DAG.getSplatVector(
1911             VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
1912         VID = DAG.getNode(Opcode, DL, VT, VID, SplatStep);
1913       }
1914       if (StepDenominator != 1) {
1915         SDValue SplatStep = DAG.getSplatVector(
1916             VT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT));
1917         VID = DAG.getNode(ISD::SRL, DL, VT, VID, SplatStep);
1918       }
1919       if (Addend != 0 || Negate) {
1920         SDValue SplatAddend =
1921             DAG.getSplatVector(VT, DL, DAG.getConstant(Addend, DL, XLenVT));
1922         VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID);
1923       }
1924       return VID;
1925     }
1926   }
1927 
1928   // Attempt to detect "hidden" splats, which only reveal themselves as splats
1929   // when re-interpreted as a vector with a larger element type. For example,
1930   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
1931   // could be instead splat as
1932   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
1933   // TODO: This optimization could also work on non-constant splats, but it
1934   // would require bit-manipulation instructions to construct the splat value.
1935   SmallVector<SDValue> Sequence;
1936   unsigned EltBitSize = VT.getScalarSizeInBits();
1937   const auto *BV = cast<BuildVectorSDNode>(Op);
1938   if (VT.isInteger() && EltBitSize < 64 &&
1939       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
1940       BV->getRepeatedSequence(Sequence) &&
1941       (Sequence.size() * EltBitSize) <= 64) {
1942     unsigned SeqLen = Sequence.size();
1943     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
1944     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
1945     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
1946             ViaIntVT == MVT::i64) &&
1947            "Unexpected sequence type");
1948 
1949     unsigned EltIdx = 0;
1950     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
1951     uint64_t SplatValue = 0;
1952     // Construct the amalgamated value which can be splatted as this larger
1953     // vector type.
1954     for (const auto &SeqV : Sequence) {
1955       if (!SeqV.isUndef())
1956         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
1957                        << (EltIdx * EltBitSize));
1958       EltIdx++;
1959     }
1960 
1961     // On RV64, sign-extend from 32 to 64 bits where possible in order to
1962     // achieve better constant materializion.
1963     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
1964       SplatValue = SignExtend64(SplatValue, 32);
1965 
1966     // Since we can't introduce illegal i64 types at this stage, we can only
1967     // perform an i64 splat on RV32 if it is its own sign-extended value. That
1968     // way we can use RVV instructions to splat.
1969     assert((ViaIntVT.bitsLE(XLenVT) ||
1970             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
1971            "Unexpected bitcast sequence");
1972     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
1973       SDValue ViaVL =
1974           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
1975       MVT ViaContainerVT =
1976           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
1977       SDValue Splat =
1978           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
1979                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
1980       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
1981       return DAG.getBitcast(VT, Splat);
1982     }
1983   }
1984 
1985   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
1986   // which constitute a large proportion of the elements. In such cases we can
1987   // splat a vector with the dominant element and make up the shortfall with
1988   // INSERT_VECTOR_ELTs.
1989   // Note that this includes vectors of 2 elements by association. The
1990   // upper-most element is the "dominant" one, allowing us to use a splat to
1991   // "insert" the upper element, and an insert of the lower element at position
1992   // 0, which improves codegen.
1993   SDValue DominantValue;
1994   unsigned MostCommonCount = 0;
1995   DenseMap<SDValue, unsigned> ValueCounts;
1996   unsigned NumUndefElts =
1997       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
1998 
1999   // Track the number of scalar loads we know we'd be inserting, estimated as
2000   // any non-zero floating-point constant. Other kinds of element are either
2001   // already in registers or are materialized on demand. The threshold at which
2002   // a vector load is more desirable than several scalar materializion and
2003   // vector-insertion instructions is not known.
2004   unsigned NumScalarLoads = 0;
2005 
2006   for (SDValue V : Op->op_values()) {
2007     if (V.isUndef())
2008       continue;
2009 
2010     ValueCounts.insert(std::make_pair(V, 0));
2011     unsigned &Count = ValueCounts[V];
2012 
2013     if (auto *CFP = dyn_cast<ConstantFPSDNode>(V))
2014       NumScalarLoads += !CFP->isExactlyValue(+0.0);
2015 
2016     // Is this value dominant? In case of a tie, prefer the highest element as
2017     // it's cheaper to insert near the beginning of a vector than it is at the
2018     // end.
2019     if (++Count >= MostCommonCount) {
2020       DominantValue = V;
2021       MostCommonCount = Count;
2022     }
2023   }
2024 
2025   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
2026   unsigned NumDefElts = NumElts - NumUndefElts;
2027   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
2028 
2029   // Don't perform this optimization when optimizing for size, since
2030   // materializing elements and inserting them tends to cause code bloat.
2031   if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts &&
2032       ((MostCommonCount > DominantValueCountThreshold) ||
2033        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
2034     // Start by splatting the most common element.
2035     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
2036 
2037     DenseSet<SDValue> Processed{DominantValue};
2038     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
2039     for (const auto &OpIdx : enumerate(Op->ops())) {
2040       const SDValue &V = OpIdx.value();
2041       if (V.isUndef() || !Processed.insert(V).second)
2042         continue;
2043       if (ValueCounts[V] == 1) {
2044         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
2045                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
2046       } else {
2047         // Blend in all instances of this value using a VSELECT, using a
2048         // mask where each bit signals whether that element is the one
2049         // we're after.
2050         SmallVector<SDValue> Ops;
2051         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
2052           return DAG.getConstant(V == V1, DL, XLenVT);
2053         });
2054         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
2055                           DAG.getBuildVector(SelMaskTy, DL, Ops),
2056                           DAG.getSplatBuildVector(VT, DL, V), Vec);
2057       }
2058     }
2059 
2060     return Vec;
2061   }
2062 
2063   return SDValue();
2064 }
2065 
2066 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Lo,
2067                                    SDValue Hi, SDValue VL, SelectionDAG &DAG) {
2068   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
2069     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
2070     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
2071     // If Hi constant is all the same sign bit as Lo, lower this as a custom
2072     // node in order to try and match RVV vector/scalar instructions.
2073     if ((LoC >> 31) == HiC)
2074       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL);
2075   }
2076 
2077   // Fall back to a stack store and stride x0 vector load.
2078   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Lo, Hi, VL);
2079 }
2080 
2081 // Called by type legalization to handle splat of i64 on RV32.
2082 // FIXME: We can optimize this when the type has sign or zero bits in one
2083 // of the halves.
2084 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar,
2085                                    SDValue VL, SelectionDAG &DAG) {
2086   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
2087   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2088                            DAG.getConstant(0, DL, MVT::i32));
2089   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2090                            DAG.getConstant(1, DL, MVT::i32));
2091   return splatPartsI64WithVL(DL, VT, Lo, Hi, VL, DAG);
2092 }
2093 
2094 // This function lowers a splat of a scalar operand Splat with the vector
2095 // length VL. It ensures the final sequence is type legal, which is useful when
2096 // lowering a splat after type legalization.
2097 static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL,
2098                                 SelectionDAG &DAG,
2099                                 const RISCVSubtarget &Subtarget) {
2100   if (VT.isFloatingPoint())
2101     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Scalar, VL);
2102 
2103   MVT XLenVT = Subtarget.getXLenVT();
2104 
2105   // Simplest case is that the operand needs to be promoted to XLenVT.
2106   if (Scalar.getValueType().bitsLE(XLenVT)) {
2107     // If the operand is a constant, sign extend to increase our chances
2108     // of being able to use a .vi instruction. ANY_EXTEND would become a
2109     // a zero extend and the simm5 check in isel would fail.
2110     // FIXME: Should we ignore the upper bits in isel instead?
2111     unsigned ExtOpc =
2112         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
2113     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
2114     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Scalar, VL);
2115   }
2116 
2117   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
2118          "Unexpected scalar for splat lowering!");
2119 
2120   // Otherwise use the more complicated splatting algorithm.
2121   return splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
2122 }
2123 
2124 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
2125                                    const RISCVSubtarget &Subtarget) {
2126   SDValue V1 = Op.getOperand(0);
2127   SDValue V2 = Op.getOperand(1);
2128   SDLoc DL(Op);
2129   MVT XLenVT = Subtarget.getXLenVT();
2130   MVT VT = Op.getSimpleValueType();
2131   unsigned NumElts = VT.getVectorNumElements();
2132   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2133 
2134   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2135 
2136   SDValue TrueMask, VL;
2137   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2138 
2139   if (SVN->isSplat()) {
2140     const int Lane = SVN->getSplatIndex();
2141     if (Lane >= 0) {
2142       MVT SVT = VT.getVectorElementType();
2143 
2144       // Turn splatted vector load into a strided load with an X0 stride.
2145       SDValue V = V1;
2146       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
2147       // with undef.
2148       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
2149       int Offset = Lane;
2150       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
2151         int OpElements =
2152             V.getOperand(0).getSimpleValueType().getVectorNumElements();
2153         V = V.getOperand(Offset / OpElements);
2154         Offset %= OpElements;
2155       }
2156 
2157       // We need to ensure the load isn't atomic or volatile.
2158       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
2159         auto *Ld = cast<LoadSDNode>(V);
2160         Offset *= SVT.getStoreSize();
2161         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
2162                                                    TypeSize::Fixed(Offset), DL);
2163 
2164         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
2165         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
2166           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
2167           SDValue IntID =
2168               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
2169           SDValue Ops[] = {Ld->getChain(), IntID, NewAddr,
2170                            DAG.getRegister(RISCV::X0, XLenVT), VL};
2171           SDValue NewLoad = DAG.getMemIntrinsicNode(
2172               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
2173               DAG.getMachineFunction().getMachineMemOperand(
2174                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
2175           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
2176           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
2177         }
2178 
2179         // Otherwise use a scalar load and splat. This will give the best
2180         // opportunity to fold a splat into the operation. ISel can turn it into
2181         // the x0 strided load if we aren't able to fold away the select.
2182         if (SVT.isFloatingPoint())
2183           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
2184                           Ld->getPointerInfo().getWithOffset(Offset),
2185                           Ld->getOriginalAlign(),
2186                           Ld->getMemOperand()->getFlags());
2187         else
2188           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
2189                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
2190                              Ld->getOriginalAlign(),
2191                              Ld->getMemOperand()->getFlags());
2192         DAG.makeEquivalentMemoryOrdering(Ld, V);
2193 
2194         unsigned Opc =
2195             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
2196         SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, V, VL);
2197         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2198       }
2199 
2200       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2201       assert(Lane < (int)NumElts && "Unexpected lane!");
2202       SDValue Gather =
2203           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
2204                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
2205       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2206     }
2207   }
2208 
2209   // Detect shuffles which can be re-expressed as vector selects; these are
2210   // shuffles in which each element in the destination is taken from an element
2211   // at the corresponding index in either source vectors.
2212   bool IsSelect = all_of(enumerate(SVN->getMask()), [&](const auto &MaskIdx) {
2213     int MaskIndex = MaskIdx.value();
2214     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
2215   });
2216 
2217   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
2218 
2219   SmallVector<SDValue> MaskVals;
2220   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
2221   // merged with a second vrgather.
2222   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
2223 
2224   // By default we preserve the original operand order, and use a mask to
2225   // select LHS as true and RHS as false. However, since RVV vector selects may
2226   // feature splats but only on the LHS, we may choose to invert our mask and
2227   // instead select between RHS and LHS.
2228   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
2229   bool InvertMask = IsSelect == SwapOps;
2230 
2231   // Keep a track of which non-undef indices are used by each LHS/RHS shuffle
2232   // half.
2233   DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts;
2234 
2235   // Now construct the mask that will be used by the vselect or blended
2236   // vrgather operation. For vrgathers, construct the appropriate indices into
2237   // each vector.
2238   for (int MaskIndex : SVN->getMask()) {
2239     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
2240     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
2241     if (!IsSelect) {
2242       bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
2243       GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
2244                                      ? DAG.getConstant(MaskIndex, DL, XLenVT)
2245                                      : DAG.getUNDEF(XLenVT));
2246       GatherIndicesRHS.push_back(
2247           IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
2248                             : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
2249       if (IsLHSOrUndefIndex && MaskIndex >= 0)
2250         ++LHSIndexCounts[MaskIndex];
2251       if (!IsLHSOrUndefIndex)
2252         ++RHSIndexCounts[MaskIndex - NumElts];
2253     }
2254   }
2255 
2256   if (SwapOps) {
2257     std::swap(V1, V2);
2258     std::swap(GatherIndicesLHS, GatherIndicesRHS);
2259   }
2260 
2261   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
2262   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
2263   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
2264 
2265   if (IsSelect)
2266     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
2267 
2268   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
2269     // On such a large vector we're unable to use i8 as the index type.
2270     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
2271     // may involve vector splitting if we're already at LMUL=8, or our
2272     // user-supplied maximum fixed-length LMUL.
2273     return SDValue();
2274   }
2275 
2276   unsigned GatherVXOpc = RISCVISD::VRGATHER_VX_VL;
2277   unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL;
2278   MVT IndexVT = VT.changeTypeToInteger();
2279   // Since we can't introduce illegal index types at this stage, use i16 and
2280   // vrgatherei16 if the corresponding index type for plain vrgather is greater
2281   // than XLenVT.
2282   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
2283     GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
2284     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
2285   }
2286 
2287   MVT IndexContainerVT =
2288       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
2289 
2290   SDValue Gather;
2291   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
2292   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
2293   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
2294     Gather = lowerScalarSplat(SplatValue, VL, ContainerVT, DL, DAG, Subtarget);
2295   } else {
2296     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2297     // If only one index is used, we can use a "splat" vrgather.
2298     // TODO: We can splat the most-common index and fix-up any stragglers, if
2299     // that's beneficial.
2300     if (LHSIndexCounts.size() == 1) {
2301       int SplatIndex = LHSIndexCounts.begin()->getFirst();
2302       Gather =
2303           DAG.getNode(GatherVXOpc, DL, ContainerVT, V1,
2304                       DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2305     } else {
2306       SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
2307       LHSIndices =
2308           convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
2309 
2310       Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
2311                            TrueMask, VL);
2312     }
2313   }
2314 
2315   // If a second vector operand is used by this shuffle, blend it in with an
2316   // additional vrgather.
2317   if (!V2.isUndef()) {
2318     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
2319     // If only one index is used, we can use a "splat" vrgather.
2320     // TODO: We can splat the most-common index and fix-up any stragglers, if
2321     // that's beneficial.
2322     if (RHSIndexCounts.size() == 1) {
2323       int SplatIndex = RHSIndexCounts.begin()->getFirst();
2324       V2 = DAG.getNode(GatherVXOpc, DL, ContainerVT, V2,
2325                        DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2326     } else {
2327       SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
2328       RHSIndices =
2329           convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
2330       V2 = DAG.getNode(GatherVVOpc, DL, ContainerVT, V2, RHSIndices, TrueMask,
2331                        VL);
2332     }
2333 
2334     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
2335     SelectMask =
2336         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
2337 
2338     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
2339                          Gather, VL);
2340   }
2341 
2342   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2343 }
2344 
2345 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
2346                                      SDLoc DL, SelectionDAG &DAG,
2347                                      const RISCVSubtarget &Subtarget) {
2348   if (VT.isScalableVector())
2349     return DAG.getFPExtendOrRound(Op, DL, VT);
2350   assert(VT.isFixedLengthVector() &&
2351          "Unexpected value type for RVV FP extend/round lowering");
2352   SDValue Mask, VL;
2353   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2354   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
2355                         ? RISCVISD::FP_EXTEND_VL
2356                         : RISCVISD::FP_ROUND_VL;
2357   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
2358 }
2359 
2360 // Lower CTLZ_ZERO_UNDEF or CTTZ_ZERO_UNDEF by converting to FP and extracting
2361 // the exponent.
2362 static SDValue lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
2363   MVT VT = Op.getSimpleValueType();
2364   unsigned EltSize = VT.getScalarSizeInBits();
2365   SDValue Src = Op.getOperand(0);
2366   SDLoc DL(Op);
2367 
2368   // We need a FP type that can represent the value.
2369   // TODO: Use f16 for i8 when possible?
2370   MVT FloatEltVT = EltSize == 32 ? MVT::f64 : MVT::f32;
2371   MVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
2372 
2373   // Legal types should have been checked in the RISCVTargetLowering
2374   // constructor.
2375   // TODO: Splitting may make sense in some cases.
2376   assert(DAG.getTargetLoweringInfo().isTypeLegal(FloatVT) &&
2377          "Expected legal float type!");
2378 
2379   // For CTTZ_ZERO_UNDEF, we need to extract the lowest set bit using X & -X.
2380   // The trailing zero count is equal to log2 of this single bit value.
2381   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
2382     SDValue Neg =
2383         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
2384     Src = DAG.getNode(ISD::AND, DL, VT, Src, Neg);
2385   }
2386 
2387   // We have a legal FP type, convert to it.
2388   SDValue FloatVal = DAG.getNode(ISD::UINT_TO_FP, DL, FloatVT, Src);
2389   // Bitcast to integer and shift the exponent to the LSB.
2390   EVT IntVT = FloatVT.changeVectorElementTypeToInteger();
2391   SDValue Bitcast = DAG.getBitcast(IntVT, FloatVal);
2392   unsigned ShiftAmt = FloatEltVT == MVT::f64 ? 52 : 23;
2393   SDValue Shift = DAG.getNode(ISD::SRL, DL, IntVT, Bitcast,
2394                               DAG.getConstant(ShiftAmt, DL, IntVT));
2395   // Truncate back to original type to allow vnsrl.
2396   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, Shift);
2397   // The exponent contains log2 of the value in biased form.
2398   unsigned ExponentBias = FloatEltVT == MVT::f64 ? 1023 : 127;
2399 
2400   // For trailing zeros, we just need to subtract the bias.
2401   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF)
2402     return DAG.getNode(ISD::SUB, DL, VT, Trunc,
2403                        DAG.getConstant(ExponentBias, DL, VT));
2404 
2405   // For leading zeros, we need to remove the bias and convert from log2 to
2406   // leading zeros. We can do this by subtracting from (Bias + (EltSize - 1)).
2407   unsigned Adjust = ExponentBias + (EltSize - 1);
2408   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(Adjust, DL, VT), Trunc);
2409 }
2410 
2411 // While RVV has alignment restrictions, we should always be able to load as a
2412 // legal equivalently-sized byte-typed vector instead. This method is
2413 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
2414 // the load is already correctly-aligned, it returns SDValue().
2415 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
2416                                                     SelectionDAG &DAG) const {
2417   auto *Load = cast<LoadSDNode>(Op);
2418   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
2419 
2420   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2421                                      Load->getMemoryVT(),
2422                                      *Load->getMemOperand()))
2423     return SDValue();
2424 
2425   SDLoc DL(Op);
2426   MVT VT = Op.getSimpleValueType();
2427   unsigned EltSizeBits = VT.getScalarSizeInBits();
2428   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2429          "Unexpected unaligned RVV load type");
2430   MVT NewVT =
2431       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2432   assert(NewVT.isValid() &&
2433          "Expecting equally-sized RVV vector types to be legal");
2434   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
2435                           Load->getPointerInfo(), Load->getOriginalAlign(),
2436                           Load->getMemOperand()->getFlags());
2437   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
2438 }
2439 
2440 // While RVV has alignment restrictions, we should always be able to store as a
2441 // legal equivalently-sized byte-typed vector instead. This method is
2442 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
2443 // returns SDValue() if the store is already correctly aligned.
2444 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
2445                                                      SelectionDAG &DAG) const {
2446   auto *Store = cast<StoreSDNode>(Op);
2447   assert(Store && Store->getValue().getValueType().isVector() &&
2448          "Expected vector store");
2449 
2450   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2451                                      Store->getMemoryVT(),
2452                                      *Store->getMemOperand()))
2453     return SDValue();
2454 
2455   SDLoc DL(Op);
2456   SDValue StoredVal = Store->getValue();
2457   MVT VT = StoredVal.getSimpleValueType();
2458   unsigned EltSizeBits = VT.getScalarSizeInBits();
2459   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2460          "Unexpected unaligned RVV store type");
2461   MVT NewVT =
2462       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2463   assert(NewVT.isValid() &&
2464          "Expecting equally-sized RVV vector types to be legal");
2465   StoredVal = DAG.getBitcast(NewVT, StoredVal);
2466   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
2467                       Store->getPointerInfo(), Store->getOriginalAlign(),
2468                       Store->getMemOperand()->getFlags());
2469 }
2470 
2471 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
2472                                             SelectionDAG &DAG) const {
2473   switch (Op.getOpcode()) {
2474   default:
2475     report_fatal_error("unimplemented operand");
2476   case ISD::GlobalAddress:
2477     return lowerGlobalAddress(Op, DAG);
2478   case ISD::BlockAddress:
2479     return lowerBlockAddress(Op, DAG);
2480   case ISD::ConstantPool:
2481     return lowerConstantPool(Op, DAG);
2482   case ISD::JumpTable:
2483     return lowerJumpTable(Op, DAG);
2484   case ISD::GlobalTLSAddress:
2485     return lowerGlobalTLSAddress(Op, DAG);
2486   case ISD::SELECT:
2487     return lowerSELECT(Op, DAG);
2488   case ISD::BRCOND:
2489     return lowerBRCOND(Op, DAG);
2490   case ISD::VASTART:
2491     return lowerVASTART(Op, DAG);
2492   case ISD::FRAMEADDR:
2493     return lowerFRAMEADDR(Op, DAG);
2494   case ISD::RETURNADDR:
2495     return lowerRETURNADDR(Op, DAG);
2496   case ISD::SHL_PARTS:
2497     return lowerShiftLeftParts(Op, DAG);
2498   case ISD::SRA_PARTS:
2499     return lowerShiftRightParts(Op, DAG, true);
2500   case ISD::SRL_PARTS:
2501     return lowerShiftRightParts(Op, DAG, false);
2502   case ISD::BITCAST: {
2503     SDLoc DL(Op);
2504     EVT VT = Op.getValueType();
2505     SDValue Op0 = Op.getOperand(0);
2506     EVT Op0VT = Op0.getValueType();
2507     MVT XLenVT = Subtarget.getXLenVT();
2508     if (VT.isFixedLengthVector()) {
2509       // We can handle fixed length vector bitcasts with a simple replacement
2510       // in isel.
2511       if (Op0VT.isFixedLengthVector())
2512         return Op;
2513       // When bitcasting from scalar to fixed-length vector, insert the scalar
2514       // into a one-element vector of the result type, and perform a vector
2515       // bitcast.
2516       if (!Op0VT.isVector()) {
2517         EVT BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
2518         if (!isTypeLegal(BVT))
2519           return SDValue();
2520         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
2521                                               DAG.getUNDEF(BVT), Op0,
2522                                               DAG.getConstant(0, DL, XLenVT)));
2523       }
2524       return SDValue();
2525     }
2526     // Custom-legalize bitcasts from fixed-length vector types to scalar types
2527     // thus: bitcast the vector to a one-element vector type whose element type
2528     // is the same as the result type, and extract the first element.
2529     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
2530       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
2531       if (!isTypeLegal(BVT))
2532         return SDValue();
2533       SDValue BVec = DAG.getBitcast(BVT, Op0);
2534       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
2535                          DAG.getConstant(0, DL, XLenVT));
2536     }
2537     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
2538       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
2539       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
2540       return FPConv;
2541     }
2542     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
2543         Subtarget.hasStdExtF()) {
2544       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
2545       SDValue FPConv =
2546           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
2547       return FPConv;
2548     }
2549     return SDValue();
2550   }
2551   case ISD::INTRINSIC_WO_CHAIN:
2552     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
2553   case ISD::INTRINSIC_W_CHAIN:
2554     return LowerINTRINSIC_W_CHAIN(Op, DAG);
2555   case ISD::INTRINSIC_VOID:
2556     return LowerINTRINSIC_VOID(Op, DAG);
2557   case ISD::BSWAP:
2558   case ISD::BITREVERSE: {
2559     // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
2560     assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
2561     MVT VT = Op.getSimpleValueType();
2562     SDLoc DL(Op);
2563     // Start with the maximum immediate value which is the bitwidth - 1.
2564     unsigned Imm = VT.getSizeInBits() - 1;
2565     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
2566     if (Op.getOpcode() == ISD::BSWAP)
2567       Imm &= ~0x7U;
2568     return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
2569                        DAG.getConstant(Imm, DL, VT));
2570   }
2571   case ISD::FSHL:
2572   case ISD::FSHR: {
2573     MVT VT = Op.getSimpleValueType();
2574     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
2575     SDLoc DL(Op);
2576     if (Op.getOperand(2).getOpcode() == ISD::Constant)
2577       return Op;
2578     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
2579     // use log(XLen) bits. Mask the shift amount accordingly.
2580     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
2581     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
2582                                 DAG.getConstant(ShAmtWidth, DL, VT));
2583     unsigned Opc = Op.getOpcode() == ISD::FSHL ? RISCVISD::FSL : RISCVISD::FSR;
2584     return DAG.getNode(Opc, DL, VT, Op.getOperand(0), Op.getOperand(1), ShAmt);
2585   }
2586   case ISD::TRUNCATE: {
2587     SDLoc DL(Op);
2588     MVT VT = Op.getSimpleValueType();
2589     // Only custom-lower vector truncates
2590     if (!VT.isVector())
2591       return Op;
2592 
2593     // Truncates to mask types are handled differently
2594     if (VT.getVectorElementType() == MVT::i1)
2595       return lowerVectorMaskTrunc(Op, DAG);
2596 
2597     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
2598     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
2599     // truncate by one power of two at a time.
2600     MVT DstEltVT = VT.getVectorElementType();
2601 
2602     SDValue Src = Op.getOperand(0);
2603     MVT SrcVT = Src.getSimpleValueType();
2604     MVT SrcEltVT = SrcVT.getVectorElementType();
2605 
2606     assert(DstEltVT.bitsLT(SrcEltVT) &&
2607            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
2608            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
2609            "Unexpected vector truncate lowering");
2610 
2611     MVT ContainerVT = SrcVT;
2612     if (SrcVT.isFixedLengthVector()) {
2613       ContainerVT = getContainerForFixedLengthVector(SrcVT);
2614       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
2615     }
2616 
2617     SDValue Result = Src;
2618     SDValue Mask, VL;
2619     std::tie(Mask, VL) =
2620         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
2621     LLVMContext &Context = *DAG.getContext();
2622     const ElementCount Count = ContainerVT.getVectorElementCount();
2623     do {
2624       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
2625       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
2626       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
2627                            Mask, VL);
2628     } while (SrcEltVT != DstEltVT);
2629 
2630     if (SrcVT.isFixedLengthVector())
2631       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
2632 
2633     return Result;
2634   }
2635   case ISD::ANY_EXTEND:
2636   case ISD::ZERO_EXTEND:
2637     if (Op.getOperand(0).getValueType().isVector() &&
2638         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2639       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
2640     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
2641   case ISD::SIGN_EXTEND:
2642     if (Op.getOperand(0).getValueType().isVector() &&
2643         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2644       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
2645     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
2646   case ISD::SPLAT_VECTOR_PARTS:
2647     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
2648   case ISD::INSERT_VECTOR_ELT:
2649     return lowerINSERT_VECTOR_ELT(Op, DAG);
2650   case ISD::EXTRACT_VECTOR_ELT:
2651     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
2652   case ISD::VSCALE: {
2653     MVT VT = Op.getSimpleValueType();
2654     SDLoc DL(Op);
2655     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
2656     // We define our scalable vector types for lmul=1 to use a 64 bit known
2657     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
2658     // vscale as VLENB / 8.
2659     assert(RISCV::RVVBitsPerBlock == 64 && "Unexpected bits per block!");
2660     if (isa<ConstantSDNode>(Op.getOperand(0))) {
2661       // We assume VLENB is a multiple of 8. We manually choose the best shift
2662       // here because SimplifyDemandedBits isn't always able to simplify it.
2663       uint64_t Val = Op.getConstantOperandVal(0);
2664       if (isPowerOf2_64(Val)) {
2665         uint64_t Log2 = Log2_64(Val);
2666         if (Log2 < 3)
2667           return DAG.getNode(ISD::SRL, DL, VT, VLENB,
2668                              DAG.getConstant(3 - Log2, DL, VT));
2669         if (Log2 > 3)
2670           return DAG.getNode(ISD::SHL, DL, VT, VLENB,
2671                              DAG.getConstant(Log2 - 3, DL, VT));
2672         return VLENB;
2673       }
2674       // If the multiplier is a multiple of 8, scale it down to avoid needing
2675       // to shift the VLENB value.
2676       if ((Val % 8) == 0)
2677         return DAG.getNode(ISD::MUL, DL, VT, VLENB,
2678                            DAG.getConstant(Val / 8, DL, VT));
2679     }
2680 
2681     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
2682                                  DAG.getConstant(3, DL, VT));
2683     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
2684   }
2685   case ISD::FPOWI: {
2686     // Custom promote f16 powi with illegal i32 integer type on RV64. Once
2687     // promoted this will be legalized into a libcall by LegalizeIntegerTypes.
2688     if (Op.getValueType() == MVT::f16 && Subtarget.is64Bit() &&
2689         Op.getOperand(1).getValueType() == MVT::i32) {
2690       SDLoc DL(Op);
2691       SDValue Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op.getOperand(0));
2692       SDValue Powi =
2693           DAG.getNode(ISD::FPOWI, DL, MVT::f32, Op0, Op.getOperand(1));
2694       return DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, Powi,
2695                          DAG.getIntPtrConstant(0, DL));
2696     }
2697     return SDValue();
2698   }
2699   case ISD::FP_EXTEND: {
2700     // RVV can only do fp_extend to types double the size as the source. We
2701     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
2702     // via f32.
2703     SDLoc DL(Op);
2704     MVT VT = Op.getSimpleValueType();
2705     SDValue Src = Op.getOperand(0);
2706     MVT SrcVT = Src.getSimpleValueType();
2707 
2708     // Prepare any fixed-length vector operands.
2709     MVT ContainerVT = VT;
2710     if (SrcVT.isFixedLengthVector()) {
2711       ContainerVT = getContainerForFixedLengthVector(VT);
2712       MVT SrcContainerVT =
2713           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
2714       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2715     }
2716 
2717     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
2718         SrcVT.getVectorElementType() != MVT::f16) {
2719       // For scalable vectors, we only need to close the gap between
2720       // vXf16->vXf64.
2721       if (!VT.isFixedLengthVector())
2722         return Op;
2723       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
2724       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2725       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2726     }
2727 
2728     MVT InterVT = VT.changeVectorElementType(MVT::f32);
2729     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
2730     SDValue IntermediateExtend = getRVVFPExtendOrRound(
2731         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
2732 
2733     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
2734                                            DL, DAG, Subtarget);
2735     if (VT.isFixedLengthVector())
2736       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
2737     return Extend;
2738   }
2739   case ISD::FP_ROUND: {
2740     // RVV can only do fp_round to types half the size as the source. We
2741     // custom-lower f64->f16 rounds via RVV's round-to-odd float
2742     // conversion instruction.
2743     SDLoc DL(Op);
2744     MVT VT = Op.getSimpleValueType();
2745     SDValue Src = Op.getOperand(0);
2746     MVT SrcVT = Src.getSimpleValueType();
2747 
2748     // Prepare any fixed-length vector operands.
2749     MVT ContainerVT = VT;
2750     if (VT.isFixedLengthVector()) {
2751       MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2752       ContainerVT =
2753           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2754       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2755     }
2756 
2757     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
2758         SrcVT.getVectorElementType() != MVT::f64) {
2759       // For scalable vectors, we only need to close the gap between
2760       // vXf64<->vXf16.
2761       if (!VT.isFixedLengthVector())
2762         return Op;
2763       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
2764       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2765       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2766     }
2767 
2768     SDValue Mask, VL;
2769     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2770 
2771     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
2772     SDValue IntermediateRound =
2773         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
2774     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
2775                                           DL, DAG, Subtarget);
2776 
2777     if (VT.isFixedLengthVector())
2778       return convertFromScalableVector(VT, Round, DAG, Subtarget);
2779     return Round;
2780   }
2781   case ISD::FP_TO_SINT:
2782   case ISD::FP_TO_UINT:
2783   case ISD::SINT_TO_FP:
2784   case ISD::UINT_TO_FP: {
2785     // RVV can only do fp<->int conversions to types half/double the size as
2786     // the source. We custom-lower any conversions that do two hops into
2787     // sequences.
2788     MVT VT = Op.getSimpleValueType();
2789     if (!VT.isVector())
2790       return Op;
2791     SDLoc DL(Op);
2792     SDValue Src = Op.getOperand(0);
2793     MVT EltVT = VT.getVectorElementType();
2794     MVT SrcVT = Src.getSimpleValueType();
2795     MVT SrcEltVT = SrcVT.getVectorElementType();
2796     unsigned EltSize = EltVT.getSizeInBits();
2797     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
2798     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
2799            "Unexpected vector element types");
2800 
2801     bool IsInt2FP = SrcEltVT.isInteger();
2802     // Widening conversions
2803     if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
2804       if (IsInt2FP) {
2805         // Do a regular integer sign/zero extension then convert to float.
2806         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
2807                                       VT.getVectorElementCount());
2808         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
2809                                  ? ISD::ZERO_EXTEND
2810                                  : ISD::SIGN_EXTEND;
2811         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
2812         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
2813       }
2814       // FP2Int
2815       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
2816       // Do one doubling fp_extend then complete the operation by converting
2817       // to int.
2818       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2819       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
2820       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
2821     }
2822 
2823     // Narrowing conversions
2824     if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
2825       if (IsInt2FP) {
2826         // One narrowing int_to_fp, then an fp_round.
2827         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
2828         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2829         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
2830         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
2831       }
2832       // FP2Int
2833       // One narrowing fp_to_int, then truncate the integer. If the float isn't
2834       // representable by the integer, the result is poison.
2835       MVT IVecVT =
2836           MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
2837                            VT.getVectorElementCount());
2838       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
2839       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
2840     }
2841 
2842     // Scalable vectors can exit here. Patterns will handle equally-sized
2843     // conversions halving/doubling ones.
2844     if (!VT.isFixedLengthVector())
2845       return Op;
2846 
2847     // For fixed-length vectors we lower to a custom "VL" node.
2848     unsigned RVVOpc = 0;
2849     switch (Op.getOpcode()) {
2850     default:
2851       llvm_unreachable("Impossible opcode");
2852     case ISD::FP_TO_SINT:
2853       RVVOpc = RISCVISD::FP_TO_SINT_VL;
2854       break;
2855     case ISD::FP_TO_UINT:
2856       RVVOpc = RISCVISD::FP_TO_UINT_VL;
2857       break;
2858     case ISD::SINT_TO_FP:
2859       RVVOpc = RISCVISD::SINT_TO_FP_VL;
2860       break;
2861     case ISD::UINT_TO_FP:
2862       RVVOpc = RISCVISD::UINT_TO_FP_VL;
2863       break;
2864     }
2865 
2866     MVT ContainerVT, SrcContainerVT;
2867     // Derive the reference container type from the larger vector type.
2868     if (SrcEltSize > EltSize) {
2869       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2870       ContainerVT =
2871           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2872     } else {
2873       ContainerVT = getContainerForFixedLengthVector(VT);
2874       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
2875     }
2876 
2877     SDValue Mask, VL;
2878     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2879 
2880     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2881     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
2882     return convertFromScalableVector(VT, Src, DAG, Subtarget);
2883   }
2884   case ISD::FP_TO_SINT_SAT:
2885   case ISD::FP_TO_UINT_SAT:
2886     return lowerFP_TO_INT_SAT(Op, DAG);
2887   case ISD::VECREDUCE_ADD:
2888   case ISD::VECREDUCE_UMAX:
2889   case ISD::VECREDUCE_SMAX:
2890   case ISD::VECREDUCE_UMIN:
2891   case ISD::VECREDUCE_SMIN:
2892     return lowerVECREDUCE(Op, DAG);
2893   case ISD::VECREDUCE_AND:
2894   case ISD::VECREDUCE_OR:
2895   case ISD::VECREDUCE_XOR:
2896     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2897       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ false);
2898     return lowerVECREDUCE(Op, DAG);
2899   case ISD::VECREDUCE_FADD:
2900   case ISD::VECREDUCE_SEQ_FADD:
2901   case ISD::VECREDUCE_FMIN:
2902   case ISD::VECREDUCE_FMAX:
2903     return lowerFPVECREDUCE(Op, DAG);
2904   case ISD::VP_REDUCE_ADD:
2905   case ISD::VP_REDUCE_UMAX:
2906   case ISD::VP_REDUCE_SMAX:
2907   case ISD::VP_REDUCE_UMIN:
2908   case ISD::VP_REDUCE_SMIN:
2909   case ISD::VP_REDUCE_FADD:
2910   case ISD::VP_REDUCE_SEQ_FADD:
2911   case ISD::VP_REDUCE_FMIN:
2912   case ISD::VP_REDUCE_FMAX:
2913     return lowerVPREDUCE(Op, DAG);
2914   case ISD::VP_REDUCE_AND:
2915   case ISD::VP_REDUCE_OR:
2916   case ISD::VP_REDUCE_XOR:
2917     if (Op.getOperand(1).getValueType().getVectorElementType() == MVT::i1)
2918       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ true);
2919     return lowerVPREDUCE(Op, DAG);
2920   case ISD::INSERT_SUBVECTOR:
2921     return lowerINSERT_SUBVECTOR(Op, DAG);
2922   case ISD::EXTRACT_SUBVECTOR:
2923     return lowerEXTRACT_SUBVECTOR(Op, DAG);
2924   case ISD::STEP_VECTOR:
2925     return lowerSTEP_VECTOR(Op, DAG);
2926   case ISD::VECTOR_REVERSE:
2927     return lowerVECTOR_REVERSE(Op, DAG);
2928   case ISD::BUILD_VECTOR:
2929     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
2930   case ISD::SPLAT_VECTOR:
2931     if (Op.getValueType().getVectorElementType() == MVT::i1)
2932       return lowerVectorMaskSplat(Op, DAG);
2933     return lowerSPLAT_VECTOR(Op, DAG, Subtarget);
2934   case ISD::VECTOR_SHUFFLE:
2935     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
2936   case ISD::CONCAT_VECTORS: {
2937     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
2938     // better than going through the stack, as the default expansion does.
2939     SDLoc DL(Op);
2940     MVT VT = Op.getSimpleValueType();
2941     unsigned NumOpElts =
2942         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
2943     SDValue Vec = DAG.getUNDEF(VT);
2944     for (const auto &OpIdx : enumerate(Op->ops()))
2945       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, OpIdx.value(),
2946                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
2947     return Vec;
2948   }
2949   case ISD::LOAD:
2950     if (auto V = expandUnalignedRVVLoad(Op, DAG))
2951       return V;
2952     if (Op.getValueType().isFixedLengthVector())
2953       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
2954     return Op;
2955   case ISD::STORE:
2956     if (auto V = expandUnalignedRVVStore(Op, DAG))
2957       return V;
2958     if (Op.getOperand(1).getValueType().isFixedLengthVector())
2959       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
2960     return Op;
2961   case ISD::MLOAD:
2962   case ISD::VP_LOAD:
2963     return lowerMaskedLoad(Op, DAG);
2964   case ISD::MSTORE:
2965   case ISD::VP_STORE:
2966     return lowerMaskedStore(Op, DAG);
2967   case ISD::SETCC:
2968     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
2969   case ISD::ADD:
2970     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
2971   case ISD::SUB:
2972     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
2973   case ISD::MUL:
2974     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
2975   case ISD::MULHS:
2976     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
2977   case ISD::MULHU:
2978     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
2979   case ISD::AND:
2980     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
2981                                               RISCVISD::AND_VL);
2982   case ISD::OR:
2983     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
2984                                               RISCVISD::OR_VL);
2985   case ISD::XOR:
2986     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
2987                                               RISCVISD::XOR_VL);
2988   case ISD::SDIV:
2989     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
2990   case ISD::SREM:
2991     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
2992   case ISD::UDIV:
2993     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
2994   case ISD::UREM:
2995     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
2996   case ISD::SHL:
2997   case ISD::SRA:
2998   case ISD::SRL:
2999     if (Op.getSimpleValueType().isFixedLengthVector())
3000       return lowerFixedLengthVectorShiftToRVV(Op, DAG);
3001     // This can be called for an i32 shift amount that needs to be promoted.
3002     assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
3003            "Unexpected custom legalisation");
3004     return SDValue();
3005   case ISD::SADDSAT:
3006     return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL);
3007   case ISD::UADDSAT:
3008     return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL);
3009   case ISD::SSUBSAT:
3010     return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL);
3011   case ISD::USUBSAT:
3012     return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL);
3013   case ISD::FADD:
3014     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
3015   case ISD::FSUB:
3016     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
3017   case ISD::FMUL:
3018     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
3019   case ISD::FDIV:
3020     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
3021   case ISD::FNEG:
3022     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
3023   case ISD::FABS:
3024     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
3025   case ISD::FSQRT:
3026     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
3027   case ISD::FMA:
3028     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
3029   case ISD::SMIN:
3030     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
3031   case ISD::SMAX:
3032     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
3033   case ISD::UMIN:
3034     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
3035   case ISD::UMAX:
3036     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
3037   case ISD::FMINNUM:
3038     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
3039   case ISD::FMAXNUM:
3040     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
3041   case ISD::ABS:
3042     return lowerABS(Op, DAG);
3043   case ISD::CTLZ_ZERO_UNDEF:
3044   case ISD::CTTZ_ZERO_UNDEF:
3045     return lowerCTLZ_CTTZ_ZERO_UNDEF(Op, DAG);
3046   case ISD::VSELECT:
3047     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
3048   case ISD::FCOPYSIGN:
3049     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
3050   case ISD::MGATHER:
3051   case ISD::VP_GATHER:
3052     return lowerMaskedGather(Op, DAG);
3053   case ISD::MSCATTER:
3054   case ISD::VP_SCATTER:
3055     return lowerMaskedScatter(Op, DAG);
3056   case ISD::FLT_ROUNDS_:
3057     return lowerGET_ROUNDING(Op, DAG);
3058   case ISD::SET_ROUNDING:
3059     return lowerSET_ROUNDING(Op, DAG);
3060   case ISD::VP_ADD:
3061     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
3062   case ISD::VP_SUB:
3063     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
3064   case ISD::VP_MUL:
3065     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
3066   case ISD::VP_SDIV:
3067     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
3068   case ISD::VP_UDIV:
3069     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
3070   case ISD::VP_SREM:
3071     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
3072   case ISD::VP_UREM:
3073     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
3074   case ISD::VP_AND:
3075     return lowerVPOp(Op, DAG, RISCVISD::AND_VL);
3076   case ISD::VP_OR:
3077     return lowerVPOp(Op, DAG, RISCVISD::OR_VL);
3078   case ISD::VP_XOR:
3079     return lowerVPOp(Op, DAG, RISCVISD::XOR_VL);
3080   case ISD::VP_ASHR:
3081     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
3082   case ISD::VP_LSHR:
3083     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
3084   case ISD::VP_SHL:
3085     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
3086   case ISD::VP_FADD:
3087     return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
3088   case ISD::VP_FSUB:
3089     return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
3090   case ISD::VP_FMUL:
3091     return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
3092   case ISD::VP_FDIV:
3093     return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
3094   }
3095 }
3096 
3097 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
3098                              SelectionDAG &DAG, unsigned Flags) {
3099   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
3100 }
3101 
3102 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
3103                              SelectionDAG &DAG, unsigned Flags) {
3104   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
3105                                    Flags);
3106 }
3107 
3108 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
3109                              SelectionDAG &DAG, unsigned Flags) {
3110   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
3111                                    N->getOffset(), Flags);
3112 }
3113 
3114 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
3115                              SelectionDAG &DAG, unsigned Flags) {
3116   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
3117 }
3118 
3119 template <class NodeTy>
3120 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
3121                                      bool IsLocal) const {
3122   SDLoc DL(N);
3123   EVT Ty = getPointerTy(DAG.getDataLayout());
3124 
3125   if (isPositionIndependent()) {
3126     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3127     if (IsLocal)
3128       // Use PC-relative addressing to access the symbol. This generates the
3129       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
3130       // %pcrel_lo(auipc)).
3131       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3132 
3133     // Use PC-relative addressing to access the GOT for this symbol, then load
3134     // the address from the GOT. This generates the pattern (PseudoLA sym),
3135     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
3136     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
3137   }
3138 
3139   switch (getTargetMachine().getCodeModel()) {
3140   default:
3141     report_fatal_error("Unsupported code model for lowering");
3142   case CodeModel::Small: {
3143     // Generate a sequence for accessing addresses within the first 2 GiB of
3144     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
3145     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
3146     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
3147     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3148     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
3149   }
3150   case CodeModel::Medium: {
3151     // Generate a sequence for accessing addresses within any 2GiB range within
3152     // the address space. This generates the pattern (PseudoLLA sym), which
3153     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
3154     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3155     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3156   }
3157   }
3158 }
3159 
3160 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
3161                                                 SelectionDAG &DAG) const {
3162   SDLoc DL(Op);
3163   EVT Ty = Op.getValueType();
3164   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3165   int64_t Offset = N->getOffset();
3166   MVT XLenVT = Subtarget.getXLenVT();
3167 
3168   const GlobalValue *GV = N->getGlobal();
3169   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
3170   SDValue Addr = getAddr(N, DAG, IsLocal);
3171 
3172   // In order to maximise the opportunity for common subexpression elimination,
3173   // emit a separate ADD node for the global address offset instead of folding
3174   // it in the global address node. Later peephole optimisations may choose to
3175   // fold it back in when profitable.
3176   if (Offset != 0)
3177     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3178                        DAG.getConstant(Offset, DL, XLenVT));
3179   return Addr;
3180 }
3181 
3182 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
3183                                                SelectionDAG &DAG) const {
3184   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
3185 
3186   return getAddr(N, DAG);
3187 }
3188 
3189 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
3190                                                SelectionDAG &DAG) const {
3191   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
3192 
3193   return getAddr(N, DAG);
3194 }
3195 
3196 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
3197                                             SelectionDAG &DAG) const {
3198   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
3199 
3200   return getAddr(N, DAG);
3201 }
3202 
3203 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
3204                                               SelectionDAG &DAG,
3205                                               bool UseGOT) const {
3206   SDLoc DL(N);
3207   EVT Ty = getPointerTy(DAG.getDataLayout());
3208   const GlobalValue *GV = N->getGlobal();
3209   MVT XLenVT = Subtarget.getXLenVT();
3210 
3211   if (UseGOT) {
3212     // Use PC-relative addressing to access the GOT for this TLS symbol, then
3213     // load the address from the GOT and add the thread pointer. This generates
3214     // the pattern (PseudoLA_TLS_IE sym), which expands to
3215     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
3216     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3217     SDValue Load =
3218         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
3219 
3220     // Add the thread pointer.
3221     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3222     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
3223   }
3224 
3225   // Generate a sequence for accessing the address relative to the thread
3226   // pointer, with the appropriate adjustment for the thread pointer offset.
3227   // This generates the pattern
3228   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
3229   SDValue AddrHi =
3230       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
3231   SDValue AddrAdd =
3232       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
3233   SDValue AddrLo =
3234       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
3235 
3236   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3237   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3238   SDValue MNAdd = SDValue(
3239       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
3240       0);
3241   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
3242 }
3243 
3244 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
3245                                                SelectionDAG &DAG) const {
3246   SDLoc DL(N);
3247   EVT Ty = getPointerTy(DAG.getDataLayout());
3248   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
3249   const GlobalValue *GV = N->getGlobal();
3250 
3251   // Use a PC-relative addressing mode to access the global dynamic GOT address.
3252   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
3253   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
3254   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3255   SDValue Load =
3256       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
3257 
3258   // Prepare argument list to generate call.
3259   ArgListTy Args;
3260   ArgListEntry Entry;
3261   Entry.Node = Load;
3262   Entry.Ty = CallTy;
3263   Args.push_back(Entry);
3264 
3265   // Setup call to __tls_get_addr.
3266   TargetLowering::CallLoweringInfo CLI(DAG);
3267   CLI.setDebugLoc(DL)
3268       .setChain(DAG.getEntryNode())
3269       .setLibCallee(CallingConv::C, CallTy,
3270                     DAG.getExternalSymbol("__tls_get_addr", Ty),
3271                     std::move(Args));
3272 
3273   return LowerCallTo(CLI).first;
3274 }
3275 
3276 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
3277                                                    SelectionDAG &DAG) const {
3278   SDLoc DL(Op);
3279   EVT Ty = Op.getValueType();
3280   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3281   int64_t Offset = N->getOffset();
3282   MVT XLenVT = Subtarget.getXLenVT();
3283 
3284   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
3285 
3286   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
3287       CallingConv::GHC)
3288     report_fatal_error("In GHC calling convention TLS is not supported");
3289 
3290   SDValue Addr;
3291   switch (Model) {
3292   case TLSModel::LocalExec:
3293     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
3294     break;
3295   case TLSModel::InitialExec:
3296     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
3297     break;
3298   case TLSModel::LocalDynamic:
3299   case TLSModel::GeneralDynamic:
3300     Addr = getDynamicTLSAddr(N, DAG);
3301     break;
3302   }
3303 
3304   // In order to maximise the opportunity for common subexpression elimination,
3305   // emit a separate ADD node for the global address offset instead of folding
3306   // it in the global address node. Later peephole optimisations may choose to
3307   // fold it back in when profitable.
3308   if (Offset != 0)
3309     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3310                        DAG.getConstant(Offset, DL, XLenVT));
3311   return Addr;
3312 }
3313 
3314 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3315   SDValue CondV = Op.getOperand(0);
3316   SDValue TrueV = Op.getOperand(1);
3317   SDValue FalseV = Op.getOperand(2);
3318   SDLoc DL(Op);
3319   MVT VT = Op.getSimpleValueType();
3320   MVT XLenVT = Subtarget.getXLenVT();
3321 
3322   // Lower vector SELECTs to VSELECTs by splatting the condition.
3323   if (VT.isVector()) {
3324     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
3325     SDValue CondSplat = VT.isScalableVector()
3326                             ? DAG.getSplatVector(SplatCondVT, DL, CondV)
3327                             : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
3328     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
3329   }
3330 
3331   // If the result type is XLenVT and CondV is the output of a SETCC node
3332   // which also operated on XLenVT inputs, then merge the SETCC node into the
3333   // lowered RISCVISD::SELECT_CC to take advantage of the integer
3334   // compare+branch instructions. i.e.:
3335   // (select (setcc lhs, rhs, cc), truev, falsev)
3336   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
3337   if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
3338       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
3339     SDValue LHS = CondV.getOperand(0);
3340     SDValue RHS = CondV.getOperand(1);
3341     const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
3342     ISD::CondCode CCVal = CC->get();
3343 
3344     // Special case for a select of 2 constants that have a diffence of 1.
3345     // Normally this is done by DAGCombine, but if the select is introduced by
3346     // type legalization or op legalization, we miss it. Restricting to SETLT
3347     // case for now because that is what signed saturating add/sub need.
3348     // FIXME: We don't need the condition to be SETLT or even a SETCC,
3349     // but we would probably want to swap the true/false values if the condition
3350     // is SETGE/SETLE to avoid an XORI.
3351     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
3352         CCVal == ISD::SETLT) {
3353       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
3354       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
3355       if (TrueVal - 1 == FalseVal)
3356         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
3357       if (TrueVal + 1 == FalseVal)
3358         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
3359     }
3360 
3361     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3362 
3363     SDValue TargetCC = DAG.getCondCode(CCVal);
3364     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
3365     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3366   }
3367 
3368   // Otherwise:
3369   // (select condv, truev, falsev)
3370   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
3371   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3372   SDValue SetNE = DAG.getCondCode(ISD::SETNE);
3373 
3374   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
3375 
3376   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3377 }
3378 
3379 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
3380   SDValue CondV = Op.getOperand(1);
3381   SDLoc DL(Op);
3382   MVT XLenVT = Subtarget.getXLenVT();
3383 
3384   if (CondV.getOpcode() == ISD::SETCC &&
3385       CondV.getOperand(0).getValueType() == XLenVT) {
3386     SDValue LHS = CondV.getOperand(0);
3387     SDValue RHS = CondV.getOperand(1);
3388     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
3389 
3390     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3391 
3392     SDValue TargetCC = DAG.getCondCode(CCVal);
3393     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3394                        LHS, RHS, TargetCC, Op.getOperand(2));
3395   }
3396 
3397   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3398                      CondV, DAG.getConstant(0, DL, XLenVT),
3399                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
3400 }
3401 
3402 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3403   MachineFunction &MF = DAG.getMachineFunction();
3404   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
3405 
3406   SDLoc DL(Op);
3407   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3408                                  getPointerTy(MF.getDataLayout()));
3409 
3410   // vastart just stores the address of the VarArgsFrameIndex slot into the
3411   // memory location argument.
3412   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3413   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
3414                       MachinePointerInfo(SV));
3415 }
3416 
3417 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
3418                                             SelectionDAG &DAG) const {
3419   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3420   MachineFunction &MF = DAG.getMachineFunction();
3421   MachineFrameInfo &MFI = MF.getFrameInfo();
3422   MFI.setFrameAddressIsTaken(true);
3423   Register FrameReg = RI.getFrameRegister(MF);
3424   int XLenInBytes = Subtarget.getXLen() / 8;
3425 
3426   EVT VT = Op.getValueType();
3427   SDLoc DL(Op);
3428   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
3429   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3430   while (Depth--) {
3431     int Offset = -(XLenInBytes * 2);
3432     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
3433                               DAG.getIntPtrConstant(Offset, DL));
3434     FrameAddr =
3435         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
3436   }
3437   return FrameAddr;
3438 }
3439 
3440 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
3441                                              SelectionDAG &DAG) const {
3442   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3443   MachineFunction &MF = DAG.getMachineFunction();
3444   MachineFrameInfo &MFI = MF.getFrameInfo();
3445   MFI.setReturnAddressIsTaken(true);
3446   MVT XLenVT = Subtarget.getXLenVT();
3447   int XLenInBytes = Subtarget.getXLen() / 8;
3448 
3449   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
3450     return SDValue();
3451 
3452   EVT VT = Op.getValueType();
3453   SDLoc DL(Op);
3454   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3455   if (Depth) {
3456     int Off = -XLenInBytes;
3457     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
3458     SDValue Offset = DAG.getConstant(Off, DL, VT);
3459     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
3460                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
3461                        MachinePointerInfo());
3462   }
3463 
3464   // Return the value of the return address register, marking it an implicit
3465   // live-in.
3466   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
3467   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
3468 }
3469 
3470 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
3471                                                  SelectionDAG &DAG) const {
3472   SDLoc DL(Op);
3473   SDValue Lo = Op.getOperand(0);
3474   SDValue Hi = Op.getOperand(1);
3475   SDValue Shamt = Op.getOperand(2);
3476   EVT VT = Lo.getValueType();
3477 
3478   // if Shamt-XLEN < 0: // Shamt < XLEN
3479   //   Lo = Lo << Shamt
3480   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
3481   // else:
3482   //   Lo = 0
3483   //   Hi = Lo << (Shamt-XLEN)
3484 
3485   SDValue Zero = DAG.getConstant(0, DL, VT);
3486   SDValue One = DAG.getConstant(1, DL, VT);
3487   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3488   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3489   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3490   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
3491 
3492   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
3493   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
3494   SDValue ShiftRightLo =
3495       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
3496   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
3497   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
3498   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
3499 
3500   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3501 
3502   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
3503   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3504 
3505   SDValue Parts[2] = {Lo, Hi};
3506   return DAG.getMergeValues(Parts, DL);
3507 }
3508 
3509 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
3510                                                   bool IsSRA) const {
3511   SDLoc DL(Op);
3512   SDValue Lo = Op.getOperand(0);
3513   SDValue Hi = Op.getOperand(1);
3514   SDValue Shamt = Op.getOperand(2);
3515   EVT VT = Lo.getValueType();
3516 
3517   // SRA expansion:
3518   //   if Shamt-XLEN < 0: // Shamt < XLEN
3519   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
3520   //     Hi = Hi >>s Shamt
3521   //   else:
3522   //     Lo = Hi >>s (Shamt-XLEN);
3523   //     Hi = Hi >>s (XLEN-1)
3524   //
3525   // SRL expansion:
3526   //   if Shamt-XLEN < 0: // Shamt < XLEN
3527   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
3528   //     Hi = Hi >>u Shamt
3529   //   else:
3530   //     Lo = Hi >>u (Shamt-XLEN);
3531   //     Hi = 0;
3532 
3533   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
3534 
3535   SDValue Zero = DAG.getConstant(0, DL, VT);
3536   SDValue One = DAG.getConstant(1, DL, VT);
3537   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3538   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3539   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3540   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
3541 
3542   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
3543   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
3544   SDValue ShiftLeftHi =
3545       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
3546   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
3547   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
3548   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
3549   SDValue HiFalse =
3550       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
3551 
3552   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3553 
3554   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
3555   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3556 
3557   SDValue Parts[2] = {Lo, Hi};
3558   return DAG.getMergeValues(Parts, DL);
3559 }
3560 
3561 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
3562 // legal equivalently-sized i8 type, so we can use that as a go-between.
3563 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
3564                                                   SelectionDAG &DAG) const {
3565   SDLoc DL(Op);
3566   MVT VT = Op.getSimpleValueType();
3567   SDValue SplatVal = Op.getOperand(0);
3568   // All-zeros or all-ones splats are handled specially.
3569   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
3570     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
3571     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
3572   }
3573   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
3574     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
3575     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
3576   }
3577   MVT XLenVT = Subtarget.getXLenVT();
3578   assert(SplatVal.getValueType() == XLenVT &&
3579          "Unexpected type for i1 splat value");
3580   MVT InterVT = VT.changeVectorElementType(MVT::i8);
3581   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
3582                          DAG.getConstant(1, DL, XLenVT));
3583   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
3584   SDValue Zero = DAG.getConstant(0, DL, InterVT);
3585   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
3586 }
3587 
3588 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
3589 // illegal (currently only vXi64 RV32).
3590 // FIXME: We could also catch non-constant sign-extended i32 values and lower
3591 // them to SPLAT_VECTOR_I64
3592 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
3593                                                      SelectionDAG &DAG) const {
3594   SDLoc DL(Op);
3595   MVT VecVT = Op.getSimpleValueType();
3596   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
3597          "Unexpected SPLAT_VECTOR_PARTS lowering");
3598 
3599   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
3600   SDValue Lo = Op.getOperand(0);
3601   SDValue Hi = Op.getOperand(1);
3602 
3603   if (VecVT.isFixedLengthVector()) {
3604     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
3605     SDLoc DL(Op);
3606     SDValue Mask, VL;
3607     std::tie(Mask, VL) =
3608         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3609 
3610     SDValue Res = splatPartsI64WithVL(DL, ContainerVT, Lo, Hi, VL, DAG);
3611     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
3612   }
3613 
3614   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
3615     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
3616     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
3617     // If Hi constant is all the same sign bit as Lo, lower this as a custom
3618     // node in order to try and match RVV vector/scalar instructions.
3619     if ((LoC >> 31) == HiC)
3620       return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
3621   }
3622 
3623   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
3624   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
3625       isa<ConstantSDNode>(Hi.getOperand(1)) &&
3626       Hi.getConstantOperandVal(1) == 31)
3627     return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
3628 
3629   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
3630   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT, Lo, Hi,
3631                      DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, MVT::i64));
3632 }
3633 
3634 // Custom-lower extensions from mask vectors by using a vselect either with 1
3635 // for zero/any-extension or -1 for sign-extension:
3636 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
3637 // Note that any-extension is lowered identically to zero-extension.
3638 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
3639                                                 int64_t ExtTrueVal) const {
3640   SDLoc DL(Op);
3641   MVT VecVT = Op.getSimpleValueType();
3642   SDValue Src = Op.getOperand(0);
3643   // Only custom-lower extensions from mask types
3644   assert(Src.getValueType().isVector() &&
3645          Src.getValueType().getVectorElementType() == MVT::i1);
3646 
3647   MVT XLenVT = Subtarget.getXLenVT();
3648   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
3649   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
3650 
3651   if (VecVT.isScalableVector()) {
3652     // Be careful not to introduce illegal scalar types at this stage, and be
3653     // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
3654     // illegal and must be expanded. Since we know that the constants are
3655     // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly.
3656     bool IsRV32E64 =
3657         !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
3658 
3659     if (!IsRV32E64) {
3660       SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
3661       SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
3662     } else {
3663       SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero);
3664       SplatTrueVal =
3665           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal);
3666     }
3667 
3668     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
3669   }
3670 
3671   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
3672   MVT I1ContainerVT =
3673       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3674 
3675   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
3676 
3677   SDValue Mask, VL;
3678   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3679 
3680   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL);
3681   SplatTrueVal =
3682       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL);
3683   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
3684                                SplatTrueVal, SplatZero, VL);
3685 
3686   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
3687 }
3688 
3689 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
3690     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
3691   MVT ExtVT = Op.getSimpleValueType();
3692   // Only custom-lower extensions from fixed-length vector types.
3693   if (!ExtVT.isFixedLengthVector())
3694     return Op;
3695   MVT VT = Op.getOperand(0).getSimpleValueType();
3696   // Grab the canonical container type for the extended type. Infer the smaller
3697   // type from that to ensure the same number of vector elements, as we know
3698   // the LMUL will be sufficient to hold the smaller type.
3699   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
3700   // Get the extended container type manually to ensure the same number of
3701   // vector elements between source and dest.
3702   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
3703                                      ContainerExtVT.getVectorElementCount());
3704 
3705   SDValue Op1 =
3706       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
3707 
3708   SDLoc DL(Op);
3709   SDValue Mask, VL;
3710   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3711 
3712   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
3713 
3714   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
3715 }
3716 
3717 // Custom-lower truncations from vectors to mask vectors by using a mask and a
3718 // setcc operation:
3719 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
3720 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
3721                                                   SelectionDAG &DAG) const {
3722   SDLoc DL(Op);
3723   EVT MaskVT = Op.getValueType();
3724   // Only expect to custom-lower truncations to mask types
3725   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
3726          "Unexpected type for vector mask lowering");
3727   SDValue Src = Op.getOperand(0);
3728   MVT VecVT = Src.getSimpleValueType();
3729 
3730   // If this is a fixed vector, we need to convert it to a scalable vector.
3731   MVT ContainerVT = VecVT;
3732   if (VecVT.isFixedLengthVector()) {
3733     ContainerVT = getContainerForFixedLengthVector(VecVT);
3734     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
3735   }
3736 
3737   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
3738   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
3739 
3740   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne);
3741   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero);
3742 
3743   if (VecVT.isScalableVector()) {
3744     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
3745     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
3746   }
3747 
3748   SDValue Mask, VL;
3749   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3750 
3751   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
3752   SDValue Trunc =
3753       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
3754   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
3755                       DAG.getCondCode(ISD::SETNE), Mask, VL);
3756   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
3757 }
3758 
3759 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
3760 // first position of a vector, and that vector is slid up to the insert index.
3761 // By limiting the active vector length to index+1 and merging with the
3762 // original vector (with an undisturbed tail policy for elements >= VL), we
3763 // achieve the desired result of leaving all elements untouched except the one
3764 // at VL-1, which is replaced with the desired value.
3765 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
3766                                                     SelectionDAG &DAG) const {
3767   SDLoc DL(Op);
3768   MVT VecVT = Op.getSimpleValueType();
3769   SDValue Vec = Op.getOperand(0);
3770   SDValue Val = Op.getOperand(1);
3771   SDValue Idx = Op.getOperand(2);
3772 
3773   if (VecVT.getVectorElementType() == MVT::i1) {
3774     // FIXME: For now we just promote to an i8 vector and insert into that,
3775     // but this is probably not optimal.
3776     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3777     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3778     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
3779     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
3780   }
3781 
3782   MVT ContainerVT = VecVT;
3783   // If the operand is a fixed-length vector, convert to a scalable one.
3784   if (VecVT.isFixedLengthVector()) {
3785     ContainerVT = getContainerForFixedLengthVector(VecVT);
3786     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3787   }
3788 
3789   MVT XLenVT = Subtarget.getXLenVT();
3790 
3791   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3792   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
3793   // Even i64-element vectors on RV32 can be lowered without scalar
3794   // legalization if the most-significant 32 bits of the value are not affected
3795   // by the sign-extension of the lower 32 bits.
3796   // TODO: We could also catch sign extensions of a 32-bit value.
3797   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
3798     const auto *CVal = cast<ConstantSDNode>(Val);
3799     if (isInt<32>(CVal->getSExtValue())) {
3800       IsLegalInsert = true;
3801       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3802     }
3803   }
3804 
3805   SDValue Mask, VL;
3806   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3807 
3808   SDValue ValInVec;
3809 
3810   if (IsLegalInsert) {
3811     unsigned Opc =
3812         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
3813     if (isNullConstant(Idx)) {
3814       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
3815       if (!VecVT.isFixedLengthVector())
3816         return Vec;
3817       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
3818     }
3819     ValInVec =
3820         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
3821   } else {
3822     // On RV32, i64-element vectors must be specially handled to place the
3823     // value at element 0, by using two vslide1up instructions in sequence on
3824     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
3825     // this.
3826     SDValue One = DAG.getConstant(1, DL, XLenVT);
3827     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
3828     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
3829     MVT I32ContainerVT =
3830         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
3831     SDValue I32Mask =
3832         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
3833     // Limit the active VL to two.
3834     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
3835     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
3836     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
3837     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero,
3838                            InsertI64VL);
3839     // First slide in the hi value, then the lo in underneath it.
3840     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3841                            ValHi, I32Mask, InsertI64VL);
3842     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3843                            ValLo, I32Mask, InsertI64VL);
3844     // Bitcast back to the right container type.
3845     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
3846   }
3847 
3848   // Now that the value is in a vector, slide it into position.
3849   SDValue InsertVL =
3850       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
3851   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
3852                                 ValInVec, Idx, Mask, InsertVL);
3853   if (!VecVT.isFixedLengthVector())
3854     return Slideup;
3855   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
3856 }
3857 
3858 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
3859 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
3860 // types this is done using VMV_X_S to allow us to glean information about the
3861 // sign bits of the result.
3862 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
3863                                                      SelectionDAG &DAG) const {
3864   SDLoc DL(Op);
3865   SDValue Idx = Op.getOperand(1);
3866   SDValue Vec = Op.getOperand(0);
3867   EVT EltVT = Op.getValueType();
3868   MVT VecVT = Vec.getSimpleValueType();
3869   MVT XLenVT = Subtarget.getXLenVT();
3870 
3871   if (VecVT.getVectorElementType() == MVT::i1) {
3872     // FIXME: For now we just promote to an i8 vector and extract from that,
3873     // but this is probably not optimal.
3874     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3875     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3876     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
3877   }
3878 
3879   // If this is a fixed vector, we need to convert it to a scalable vector.
3880   MVT ContainerVT = VecVT;
3881   if (VecVT.isFixedLengthVector()) {
3882     ContainerVT = getContainerForFixedLengthVector(VecVT);
3883     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3884   }
3885 
3886   // If the index is 0, the vector is already in the right position.
3887   if (!isNullConstant(Idx)) {
3888     // Use a VL of 1 to avoid processing more elements than we need.
3889     SDValue VL = DAG.getConstant(1, DL, XLenVT);
3890     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3891     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3892     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
3893                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
3894   }
3895 
3896   if (!EltVT.isInteger()) {
3897     // Floating-point extracts are handled in TableGen.
3898     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
3899                        DAG.getConstant(0, DL, XLenVT));
3900   }
3901 
3902   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
3903   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
3904 }
3905 
3906 // Some RVV intrinsics may claim that they want an integer operand to be
3907 // promoted or expanded.
3908 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
3909                                           const RISCVSubtarget &Subtarget) {
3910   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
3911           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
3912          "Unexpected opcode");
3913 
3914   if (!Subtarget.hasVInstructions())
3915     return SDValue();
3916 
3917   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
3918   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
3919   SDLoc DL(Op);
3920 
3921   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
3922       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
3923   if (!II || !II->SplatOperand)
3924     return SDValue();
3925 
3926   unsigned SplatOp = II->SplatOperand + HasChain;
3927   assert(SplatOp < Op.getNumOperands());
3928 
3929   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
3930   SDValue &ScalarOp = Operands[SplatOp];
3931   MVT OpVT = ScalarOp.getSimpleValueType();
3932   MVT XLenVT = Subtarget.getXLenVT();
3933 
3934   // If this isn't a scalar, or its type is XLenVT we're done.
3935   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
3936     return SDValue();
3937 
3938   // Simplest case is that the operand needs to be promoted to XLenVT.
3939   if (OpVT.bitsLT(XLenVT)) {
3940     // If the operand is a constant, sign extend to increase our chances
3941     // of being able to use a .vi instruction. ANY_EXTEND would become a
3942     // a zero extend and the simm5 check in isel would fail.
3943     // FIXME: Should we ignore the upper bits in isel instead?
3944     unsigned ExtOpc =
3945         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
3946     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
3947     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3948   }
3949 
3950   // Use the previous operand to get the vXi64 VT. The result might be a mask
3951   // VT for compares. Using the previous operand assumes that the previous
3952   // operand will never have a smaller element size than a scalar operand and
3953   // that a widening operation never uses SEW=64.
3954   // NOTE: If this fails the below assert, we can probably just find the
3955   // element count from any operand or result and use it to construct the VT.
3956   assert(II->SplatOperand > 1 && "Unexpected splat operand!");
3957   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
3958 
3959   // The more complex case is when the scalar is larger than XLenVT.
3960   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
3961          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
3962 
3963   // If this is a sign-extended 32-bit constant, we can truncate it and rely
3964   // on the instruction to sign-extend since SEW>XLEN.
3965   if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) {
3966     if (isInt<32>(CVal->getSExtValue())) {
3967       ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3968       return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3969     }
3970   }
3971 
3972   // We need to convert the scalar to a splat vector.
3973   // FIXME: Can we implicitly truncate the scalar if it is known to
3974   // be sign extended?
3975   // VL should be the last operand.
3976   SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
3977   assert(VL.getValueType() == XLenVT);
3978   ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG);
3979   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3980 }
3981 
3982 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
3983                                                      SelectionDAG &DAG) const {
3984   unsigned IntNo = Op.getConstantOperandVal(0);
3985   SDLoc DL(Op);
3986   MVT XLenVT = Subtarget.getXLenVT();
3987 
3988   switch (IntNo) {
3989   default:
3990     break; // Don't custom lower most intrinsics.
3991   case Intrinsic::thread_pointer: {
3992     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3993     return DAG.getRegister(RISCV::X4, PtrVT);
3994   }
3995   case Intrinsic::riscv_orc_b:
3996     // Lower to the GORCI encoding for orc.b.
3997     return DAG.getNode(RISCVISD::GORC, DL, XLenVT, Op.getOperand(1),
3998                        DAG.getConstant(7, DL, XLenVT));
3999   case Intrinsic::riscv_grev:
4000   case Intrinsic::riscv_gorc: {
4001     unsigned Opc =
4002         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
4003     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4004   }
4005   case Intrinsic::riscv_shfl:
4006   case Intrinsic::riscv_unshfl: {
4007     unsigned Opc =
4008         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4009     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4010   }
4011   case Intrinsic::riscv_bcompress:
4012   case Intrinsic::riscv_bdecompress: {
4013     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
4014                                                        : RISCVISD::BDECOMPRESS;
4015     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4016   }
4017   case Intrinsic::riscv_vmv_x_s:
4018     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
4019     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
4020                        Op.getOperand(1));
4021   case Intrinsic::riscv_vmv_v_x:
4022     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
4023                             Op.getSimpleValueType(), DL, DAG, Subtarget);
4024   case Intrinsic::riscv_vfmv_v_f:
4025     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
4026                        Op.getOperand(1), Op.getOperand(2));
4027   case Intrinsic::riscv_vmv_s_x: {
4028     SDValue Scalar = Op.getOperand(2);
4029 
4030     if (Scalar.getValueType().bitsLE(XLenVT)) {
4031       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
4032       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
4033                          Op.getOperand(1), Scalar, Op.getOperand(3));
4034     }
4035 
4036     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
4037 
4038     // This is an i64 value that lives in two scalar registers. We have to
4039     // insert this in a convoluted way. First we build vXi64 splat containing
4040     // the/ two values that we assemble using some bit math. Next we'll use
4041     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
4042     // to merge element 0 from our splat into the source vector.
4043     // FIXME: This is probably not the best way to do this, but it is
4044     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
4045     // point.
4046     //   sw lo, (a0)
4047     //   sw hi, 4(a0)
4048     //   vlse vX, (a0)
4049     //
4050     //   vid.v      vVid
4051     //   vmseq.vx   mMask, vVid, 0
4052     //   vmerge.vvm vDest, vSrc, vVal, mMask
4053     MVT VT = Op.getSimpleValueType();
4054     SDValue Vec = Op.getOperand(1);
4055     SDValue VL = Op.getOperand(3);
4056 
4057     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
4058     SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
4059                                       DAG.getConstant(0, DL, MVT::i32), VL);
4060 
4061     MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
4062     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4063     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4064     SDValue SelectCond =
4065         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
4066                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
4067     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
4068                        Vec, VL);
4069   }
4070   case Intrinsic::riscv_vslide1up:
4071   case Intrinsic::riscv_vslide1down:
4072   case Intrinsic::riscv_vslide1up_mask:
4073   case Intrinsic::riscv_vslide1down_mask: {
4074     // We need to special case these when the scalar is larger than XLen.
4075     unsigned NumOps = Op.getNumOperands();
4076     bool IsMasked = NumOps == 7;
4077     unsigned OpOffset = IsMasked ? 1 : 0;
4078     SDValue Scalar = Op.getOperand(2 + OpOffset);
4079     if (Scalar.getValueType().bitsLE(XLenVT))
4080       break;
4081 
4082     // Splatting a sign extended constant is fine.
4083     if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar))
4084       if (isInt<32>(CVal->getSExtValue()))
4085         break;
4086 
4087     MVT VT = Op.getSimpleValueType();
4088     assert(VT.getVectorElementType() == MVT::i64 &&
4089            Scalar.getValueType() == MVT::i64 && "Unexpected VTs");
4090 
4091     // Convert the vector source to the equivalent nxvXi32 vector.
4092     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
4093     SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset));
4094 
4095     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
4096                                    DAG.getConstant(0, DL, XLenVT));
4097     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
4098                                    DAG.getConstant(1, DL, XLenVT));
4099 
4100     // Double the VL since we halved SEW.
4101     SDValue VL = Op.getOperand(NumOps - (1 + OpOffset));
4102     SDValue I32VL =
4103         DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
4104 
4105     MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount());
4106     SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL);
4107 
4108     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
4109     // instructions.
4110     if (IntNo == Intrinsic::riscv_vslide1up ||
4111         IntNo == Intrinsic::riscv_vslide1up_mask) {
4112       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi,
4113                         I32Mask, I32VL);
4114       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo,
4115                         I32Mask, I32VL);
4116     } else {
4117       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo,
4118                         I32Mask, I32VL);
4119       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi,
4120                         I32Mask, I32VL);
4121     }
4122 
4123     // Convert back to nxvXi64.
4124     Vec = DAG.getBitcast(VT, Vec);
4125 
4126     if (!IsMasked)
4127       return Vec;
4128 
4129     // Apply mask after the operation.
4130     SDValue Mask = Op.getOperand(NumOps - 3);
4131     SDValue MaskedOff = Op.getOperand(1);
4132     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL);
4133   }
4134   }
4135 
4136   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
4137 }
4138 
4139 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4140                                                     SelectionDAG &DAG) const {
4141   unsigned IntNo = Op.getConstantOperandVal(1);
4142   switch (IntNo) {
4143   default:
4144     break;
4145   case Intrinsic::riscv_masked_strided_load: {
4146     SDLoc DL(Op);
4147     MVT XLenVT = Subtarget.getXLenVT();
4148 
4149     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4150     // the selection of the masked intrinsics doesn't do this for us.
4151     SDValue Mask = Op.getOperand(5);
4152     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4153 
4154     MVT VT = Op->getSimpleValueType(0);
4155     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4156 
4157     SDValue PassThru = Op.getOperand(2);
4158     if (!IsUnmasked) {
4159       MVT MaskVT =
4160           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4161       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4162       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4163     }
4164 
4165     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4166 
4167     SDValue IntID = DAG.getTargetConstant(
4168         IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL,
4169         XLenVT);
4170 
4171     auto *Load = cast<MemIntrinsicSDNode>(Op);
4172     SmallVector<SDValue, 8> Ops{Load->getChain(), IntID};
4173     if (!IsUnmasked)
4174       Ops.push_back(PassThru);
4175     Ops.push_back(Op.getOperand(3)); // Ptr
4176     Ops.push_back(Op.getOperand(4)); // Stride
4177     if (!IsUnmasked)
4178       Ops.push_back(Mask);
4179     Ops.push_back(VL);
4180     if (!IsUnmasked) {
4181       SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
4182       Ops.push_back(Policy);
4183     }
4184 
4185     SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4186     SDValue Result =
4187         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4188                                 Load->getMemoryVT(), Load->getMemOperand());
4189     SDValue Chain = Result.getValue(1);
4190     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4191     return DAG.getMergeValues({Result, Chain}, DL);
4192   }
4193   }
4194 
4195   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
4196 }
4197 
4198 SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
4199                                                  SelectionDAG &DAG) const {
4200   unsigned IntNo = Op.getConstantOperandVal(1);
4201   switch (IntNo) {
4202   default:
4203     break;
4204   case Intrinsic::riscv_masked_strided_store: {
4205     SDLoc DL(Op);
4206     MVT XLenVT = Subtarget.getXLenVT();
4207 
4208     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4209     // the selection of the masked intrinsics doesn't do this for us.
4210     SDValue Mask = Op.getOperand(5);
4211     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4212 
4213     SDValue Val = Op.getOperand(2);
4214     MVT VT = Val.getSimpleValueType();
4215     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4216 
4217     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4218     if (!IsUnmasked) {
4219       MVT MaskVT =
4220           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4221       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4222     }
4223 
4224     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4225 
4226     SDValue IntID = DAG.getTargetConstant(
4227         IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL,
4228         XLenVT);
4229 
4230     auto *Store = cast<MemIntrinsicSDNode>(Op);
4231     SmallVector<SDValue, 8> Ops{Store->getChain(), IntID};
4232     Ops.push_back(Val);
4233     Ops.push_back(Op.getOperand(3)); // Ptr
4234     Ops.push_back(Op.getOperand(4)); // Stride
4235     if (!IsUnmasked)
4236       Ops.push_back(Mask);
4237     Ops.push_back(VL);
4238 
4239     return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Store->getVTList(),
4240                                    Ops, Store->getMemoryVT(),
4241                                    Store->getMemOperand());
4242   }
4243   }
4244 
4245   return SDValue();
4246 }
4247 
4248 static MVT getLMUL1VT(MVT VT) {
4249   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
4250          "Unexpected vector MVT");
4251   return MVT::getScalableVectorVT(
4252       VT.getVectorElementType(),
4253       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
4254 }
4255 
4256 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
4257   switch (ISDOpcode) {
4258   default:
4259     llvm_unreachable("Unhandled reduction");
4260   case ISD::VECREDUCE_ADD:
4261     return RISCVISD::VECREDUCE_ADD_VL;
4262   case ISD::VECREDUCE_UMAX:
4263     return RISCVISD::VECREDUCE_UMAX_VL;
4264   case ISD::VECREDUCE_SMAX:
4265     return RISCVISD::VECREDUCE_SMAX_VL;
4266   case ISD::VECREDUCE_UMIN:
4267     return RISCVISD::VECREDUCE_UMIN_VL;
4268   case ISD::VECREDUCE_SMIN:
4269     return RISCVISD::VECREDUCE_SMIN_VL;
4270   case ISD::VECREDUCE_AND:
4271     return RISCVISD::VECREDUCE_AND_VL;
4272   case ISD::VECREDUCE_OR:
4273     return RISCVISD::VECREDUCE_OR_VL;
4274   case ISD::VECREDUCE_XOR:
4275     return RISCVISD::VECREDUCE_XOR_VL;
4276   }
4277 }
4278 
4279 SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
4280                                                          SelectionDAG &DAG,
4281                                                          bool IsVP) const {
4282   SDLoc DL(Op);
4283   SDValue Vec = Op.getOperand(IsVP ? 1 : 0);
4284   MVT VecVT = Vec.getSimpleValueType();
4285   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
4286           Op.getOpcode() == ISD::VECREDUCE_OR ||
4287           Op.getOpcode() == ISD::VECREDUCE_XOR ||
4288           Op.getOpcode() == ISD::VP_REDUCE_AND ||
4289           Op.getOpcode() == ISD::VP_REDUCE_OR ||
4290           Op.getOpcode() == ISD::VP_REDUCE_XOR) &&
4291          "Unexpected reduction lowering");
4292 
4293   MVT XLenVT = Subtarget.getXLenVT();
4294   assert(Op.getValueType() == XLenVT &&
4295          "Expected reduction output to be legalized to XLenVT");
4296 
4297   MVT ContainerVT = VecVT;
4298   if (VecVT.isFixedLengthVector()) {
4299     ContainerVT = getContainerForFixedLengthVector(VecVT);
4300     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4301   }
4302 
4303   SDValue Mask, VL;
4304   if (IsVP) {
4305     Mask = Op.getOperand(2);
4306     VL = Op.getOperand(3);
4307   } else {
4308     std::tie(Mask, VL) =
4309         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4310   }
4311 
4312   unsigned BaseOpc;
4313   ISD::CondCode CC;
4314   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
4315 
4316   switch (Op.getOpcode()) {
4317   default:
4318     llvm_unreachable("Unhandled reduction");
4319   case ISD::VECREDUCE_AND:
4320   case ISD::VP_REDUCE_AND: {
4321     // vcpop ~x == 0
4322     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
4323     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL);
4324     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
4325     CC = ISD::SETEQ;
4326     BaseOpc = ISD::AND;
4327     break;
4328   }
4329   case ISD::VECREDUCE_OR:
4330   case ISD::VP_REDUCE_OR:
4331     // vcpop x != 0
4332     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
4333     CC = ISD::SETNE;
4334     BaseOpc = ISD::OR;
4335     break;
4336   case ISD::VECREDUCE_XOR:
4337   case ISD::VP_REDUCE_XOR: {
4338     // ((vcpop x) & 1) != 0
4339     SDValue One = DAG.getConstant(1, DL, XLenVT);
4340     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
4341     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
4342     CC = ISD::SETNE;
4343     BaseOpc = ISD::XOR;
4344     break;
4345   }
4346   }
4347 
4348   SDValue SetCC = DAG.getSetCC(DL, XLenVT, Vec, Zero, CC);
4349 
4350   if (!IsVP)
4351     return SetCC;
4352 
4353   // Now include the start value in the operation.
4354   // Note that we must return the start value when no elements are operated
4355   // upon. The vcpop instructions we've emitted in each case above will return
4356   // 0 for an inactive vector, and so we've already received the neutral value:
4357   // AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we
4358   // can simply include the start value.
4359   return DAG.getNode(BaseOpc, DL, XLenVT, SetCC, Op.getOperand(0));
4360 }
4361 
4362 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
4363                                             SelectionDAG &DAG) const {
4364   SDLoc DL(Op);
4365   SDValue Vec = Op.getOperand(0);
4366   EVT VecEVT = Vec.getValueType();
4367 
4368   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
4369 
4370   // Due to ordering in legalize types we may have a vector type that needs to
4371   // be split. Do that manually so we can get down to a legal type.
4372   while (getTypeAction(*DAG.getContext(), VecEVT) ==
4373          TargetLowering::TypeSplitVector) {
4374     SDValue Lo, Hi;
4375     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
4376     VecEVT = Lo.getValueType();
4377     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
4378   }
4379 
4380   // TODO: The type may need to be widened rather than split. Or widened before
4381   // it can be split.
4382   if (!isTypeLegal(VecEVT))
4383     return SDValue();
4384 
4385   MVT VecVT = VecEVT.getSimpleVT();
4386   MVT VecEltVT = VecVT.getVectorElementType();
4387   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
4388 
4389   MVT ContainerVT = VecVT;
4390   if (VecVT.isFixedLengthVector()) {
4391     ContainerVT = getContainerForFixedLengthVector(VecVT);
4392     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4393   }
4394 
4395   MVT M1VT = getLMUL1VT(ContainerVT);
4396 
4397   SDValue Mask, VL;
4398   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4399 
4400   // FIXME: This is a VLMAX splat which might be too large and can prevent
4401   // vsetvli removal.
4402   SDValue NeutralElem =
4403       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
4404   SDValue IdentitySplat = DAG.getSplatVector(M1VT, DL, NeutralElem);
4405   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT), Vec,
4406                                   IdentitySplat, Mask, VL);
4407   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
4408                              DAG.getConstant(0, DL, Subtarget.getXLenVT()));
4409   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
4410 }
4411 
4412 // Given a reduction op, this function returns the matching reduction opcode,
4413 // the vector SDValue and the scalar SDValue required to lower this to a
4414 // RISCVISD node.
4415 static std::tuple<unsigned, SDValue, SDValue>
4416 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
4417   SDLoc DL(Op);
4418   auto Flags = Op->getFlags();
4419   unsigned Opcode = Op.getOpcode();
4420   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
4421   switch (Opcode) {
4422   default:
4423     llvm_unreachable("Unhandled reduction");
4424   case ISD::VECREDUCE_FADD:
4425     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0),
4426                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
4427   case ISD::VECREDUCE_SEQ_FADD:
4428     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
4429                            Op.getOperand(0));
4430   case ISD::VECREDUCE_FMIN:
4431     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
4432                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
4433   case ISD::VECREDUCE_FMAX:
4434     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
4435                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
4436   }
4437 }
4438 
4439 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
4440                                               SelectionDAG &DAG) const {
4441   SDLoc DL(Op);
4442   MVT VecEltVT = Op.getSimpleValueType();
4443 
4444   unsigned RVVOpcode;
4445   SDValue VectorVal, ScalarVal;
4446   std::tie(RVVOpcode, VectorVal, ScalarVal) =
4447       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
4448   MVT VecVT = VectorVal.getSimpleValueType();
4449 
4450   MVT ContainerVT = VecVT;
4451   if (VecVT.isFixedLengthVector()) {
4452     ContainerVT = getContainerForFixedLengthVector(VecVT);
4453     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
4454   }
4455 
4456   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
4457 
4458   SDValue Mask, VL;
4459   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4460 
4461   // FIXME: This is a VLMAX splat which might be too large and can prevent
4462   // vsetvli removal.
4463   SDValue ScalarSplat = DAG.getSplatVector(M1VT, DL, ScalarVal);
4464   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT),
4465                                   VectorVal, ScalarSplat, Mask, VL);
4466   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
4467                      DAG.getConstant(0, DL, Subtarget.getXLenVT()));
4468 }
4469 
4470 static unsigned getRVVVPReductionOp(unsigned ISDOpcode) {
4471   switch (ISDOpcode) {
4472   default:
4473     llvm_unreachable("Unhandled reduction");
4474   case ISD::VP_REDUCE_ADD:
4475     return RISCVISD::VECREDUCE_ADD_VL;
4476   case ISD::VP_REDUCE_UMAX:
4477     return RISCVISD::VECREDUCE_UMAX_VL;
4478   case ISD::VP_REDUCE_SMAX:
4479     return RISCVISD::VECREDUCE_SMAX_VL;
4480   case ISD::VP_REDUCE_UMIN:
4481     return RISCVISD::VECREDUCE_UMIN_VL;
4482   case ISD::VP_REDUCE_SMIN:
4483     return RISCVISD::VECREDUCE_SMIN_VL;
4484   case ISD::VP_REDUCE_AND:
4485     return RISCVISD::VECREDUCE_AND_VL;
4486   case ISD::VP_REDUCE_OR:
4487     return RISCVISD::VECREDUCE_OR_VL;
4488   case ISD::VP_REDUCE_XOR:
4489     return RISCVISD::VECREDUCE_XOR_VL;
4490   case ISD::VP_REDUCE_FADD:
4491     return RISCVISD::VECREDUCE_FADD_VL;
4492   case ISD::VP_REDUCE_SEQ_FADD:
4493     return RISCVISD::VECREDUCE_SEQ_FADD_VL;
4494   case ISD::VP_REDUCE_FMAX:
4495     return RISCVISD::VECREDUCE_FMAX_VL;
4496   case ISD::VP_REDUCE_FMIN:
4497     return RISCVISD::VECREDUCE_FMIN_VL;
4498   }
4499 }
4500 
4501 SDValue RISCVTargetLowering::lowerVPREDUCE(SDValue Op,
4502                                            SelectionDAG &DAG) const {
4503   SDLoc DL(Op);
4504   SDValue Vec = Op.getOperand(1);
4505   EVT VecEVT = Vec.getValueType();
4506 
4507   // TODO: The type may need to be widened rather than split. Or widened before
4508   // it can be split.
4509   if (!isTypeLegal(VecEVT))
4510     return SDValue();
4511 
4512   MVT VecVT = VecEVT.getSimpleVT();
4513   MVT VecEltVT = VecVT.getVectorElementType();
4514   unsigned RVVOpcode = getRVVVPReductionOp(Op.getOpcode());
4515 
4516   MVT ContainerVT = VecVT;
4517   if (VecVT.isFixedLengthVector()) {
4518     ContainerVT = getContainerForFixedLengthVector(VecVT);
4519     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4520   }
4521 
4522   SDValue VL = Op.getOperand(3);
4523   SDValue Mask = Op.getOperand(2);
4524 
4525   MVT M1VT = getLMUL1VT(ContainerVT);
4526   MVT XLenVT = Subtarget.getXLenVT();
4527   MVT ResVT = !VecVT.isInteger() || VecEltVT.bitsGE(XLenVT) ? VecEltVT : XLenVT;
4528 
4529   // FIXME: This is a VLMAX splat which might be too large and can prevent
4530   // vsetvli removal.
4531   SDValue StartSplat = DAG.getSplatVector(M1VT, DL, Op.getOperand(0));
4532   SDValue Reduction =
4533       DAG.getNode(RVVOpcode, DL, M1VT, StartSplat, Vec, StartSplat, Mask, VL);
4534   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction,
4535                              DAG.getConstant(0, DL, Subtarget.getXLenVT()));
4536   if (!VecVT.isInteger())
4537     return Elt0;
4538   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
4539 }
4540 
4541 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
4542                                                    SelectionDAG &DAG) const {
4543   SDValue Vec = Op.getOperand(0);
4544   SDValue SubVec = Op.getOperand(1);
4545   MVT VecVT = Vec.getSimpleValueType();
4546   MVT SubVecVT = SubVec.getSimpleValueType();
4547 
4548   SDLoc DL(Op);
4549   MVT XLenVT = Subtarget.getXLenVT();
4550   unsigned OrigIdx = Op.getConstantOperandVal(2);
4551   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
4552 
4553   // We don't have the ability to slide mask vectors up indexed by their i1
4554   // elements; the smallest we can do is i8. Often we are able to bitcast to
4555   // equivalent i8 vectors. Note that when inserting a fixed-length vector
4556   // into a scalable one, we might not necessarily have enough scalable
4557   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
4558   if (SubVecVT.getVectorElementType() == MVT::i1 &&
4559       (OrigIdx != 0 || !Vec.isUndef())) {
4560     if (VecVT.getVectorMinNumElements() >= 8 &&
4561         SubVecVT.getVectorMinNumElements() >= 8) {
4562       assert(OrigIdx % 8 == 0 && "Invalid index");
4563       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
4564              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
4565              "Unexpected mask vector lowering");
4566       OrigIdx /= 8;
4567       SubVecVT =
4568           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
4569                            SubVecVT.isScalableVector());
4570       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
4571                                VecVT.isScalableVector());
4572       Vec = DAG.getBitcast(VecVT, Vec);
4573       SubVec = DAG.getBitcast(SubVecVT, SubVec);
4574     } else {
4575       // We can't slide this mask vector up indexed by its i1 elements.
4576       // This poses a problem when we wish to insert a scalable vector which
4577       // can't be re-expressed as a larger type. Just choose the slow path and
4578       // extend to a larger type, then truncate back down.
4579       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
4580       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
4581       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
4582       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
4583       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
4584                         Op.getOperand(2));
4585       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
4586       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
4587     }
4588   }
4589 
4590   // If the subvector vector is a fixed-length type, we cannot use subregister
4591   // manipulation to simplify the codegen; we don't know which register of a
4592   // LMUL group contains the specific subvector as we only know the minimum
4593   // register size. Therefore we must slide the vector group up the full
4594   // amount.
4595   if (SubVecVT.isFixedLengthVector()) {
4596     if (OrigIdx == 0 && Vec.isUndef())
4597       return Op;
4598     MVT ContainerVT = VecVT;
4599     if (VecVT.isFixedLengthVector()) {
4600       ContainerVT = getContainerForFixedLengthVector(VecVT);
4601       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4602     }
4603     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
4604                          DAG.getUNDEF(ContainerVT), SubVec,
4605                          DAG.getConstant(0, DL, XLenVT));
4606     SDValue Mask =
4607         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
4608     // Set the vector length to only the number of elements we care about. Note
4609     // that for slideup this includes the offset.
4610     SDValue VL =
4611         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
4612     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
4613     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
4614                                   SubVec, SlideupAmt, Mask, VL);
4615     if (VecVT.isFixedLengthVector())
4616       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
4617     return DAG.getBitcast(Op.getValueType(), Slideup);
4618   }
4619 
4620   unsigned SubRegIdx, RemIdx;
4621   std::tie(SubRegIdx, RemIdx) =
4622       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
4623           VecVT, SubVecVT, OrigIdx, TRI);
4624 
4625   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
4626   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
4627                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
4628                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
4629 
4630   // 1. If the Idx has been completely eliminated and this subvector's size is
4631   // a vector register or a multiple thereof, or the surrounding elements are
4632   // undef, then this is a subvector insert which naturally aligns to a vector
4633   // register. These can easily be handled using subregister manipulation.
4634   // 2. If the subvector is smaller than a vector register, then the insertion
4635   // must preserve the undisturbed elements of the register. We do this by
4636   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
4637   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
4638   // subvector within the vector register, and an INSERT_SUBVECTOR of that
4639   // LMUL=1 type back into the larger vector (resolving to another subregister
4640   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
4641   // to avoid allocating a large register group to hold our subvector.
4642   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
4643     return Op;
4644 
4645   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
4646   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
4647   // (in our case undisturbed). This means we can set up a subvector insertion
4648   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
4649   // size of the subvector.
4650   MVT InterSubVT = VecVT;
4651   SDValue AlignedExtract = Vec;
4652   unsigned AlignedIdx = OrigIdx - RemIdx;
4653   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
4654     InterSubVT = getLMUL1VT(VecVT);
4655     // Extract a subvector equal to the nearest full vector register type. This
4656     // should resolve to a EXTRACT_SUBREG instruction.
4657     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
4658                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
4659   }
4660 
4661   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
4662   // For scalable vectors this must be further multiplied by vscale.
4663   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
4664 
4665   SDValue Mask, VL;
4666   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
4667 
4668   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
4669   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
4670   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
4671   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
4672 
4673   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
4674                        DAG.getUNDEF(InterSubVT), SubVec,
4675                        DAG.getConstant(0, DL, XLenVT));
4676 
4677   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
4678                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
4679 
4680   // If required, insert this subvector back into the correct vector register.
4681   // This should resolve to an INSERT_SUBREG instruction.
4682   if (VecVT.bitsGT(InterSubVT))
4683     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
4684                           DAG.getConstant(AlignedIdx, DL, XLenVT));
4685 
4686   // We might have bitcast from a mask type: cast back to the original type if
4687   // required.
4688   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
4689 }
4690 
4691 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
4692                                                     SelectionDAG &DAG) const {
4693   SDValue Vec = Op.getOperand(0);
4694   MVT SubVecVT = Op.getSimpleValueType();
4695   MVT VecVT = Vec.getSimpleValueType();
4696 
4697   SDLoc DL(Op);
4698   MVT XLenVT = Subtarget.getXLenVT();
4699   unsigned OrigIdx = Op.getConstantOperandVal(1);
4700   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
4701 
4702   // We don't have the ability to slide mask vectors down indexed by their i1
4703   // elements; the smallest we can do is i8. Often we are able to bitcast to
4704   // equivalent i8 vectors. Note that when extracting a fixed-length vector
4705   // from a scalable one, we might not necessarily have enough scalable
4706   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
4707   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
4708     if (VecVT.getVectorMinNumElements() >= 8 &&
4709         SubVecVT.getVectorMinNumElements() >= 8) {
4710       assert(OrigIdx % 8 == 0 && "Invalid index");
4711       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
4712              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
4713              "Unexpected mask vector lowering");
4714       OrigIdx /= 8;
4715       SubVecVT =
4716           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
4717                            SubVecVT.isScalableVector());
4718       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
4719                                VecVT.isScalableVector());
4720       Vec = DAG.getBitcast(VecVT, Vec);
4721     } else {
4722       // We can't slide this mask vector down, indexed by its i1 elements.
4723       // This poses a problem when we wish to extract a scalable vector which
4724       // can't be re-expressed as a larger type. Just choose the slow path and
4725       // extend to a larger type, then truncate back down.
4726       // TODO: We could probably improve this when extracting certain fixed
4727       // from fixed, where we can extract as i8 and shift the correct element
4728       // right to reach the desired subvector?
4729       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
4730       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
4731       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
4732       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
4733                         Op.getOperand(1));
4734       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
4735       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
4736     }
4737   }
4738 
4739   // If the subvector vector is a fixed-length type, we cannot use subregister
4740   // manipulation to simplify the codegen; we don't know which register of a
4741   // LMUL group contains the specific subvector as we only know the minimum
4742   // register size. Therefore we must slide the vector group down the full
4743   // amount.
4744   if (SubVecVT.isFixedLengthVector()) {
4745     // With an index of 0 this is a cast-like subvector, which can be performed
4746     // with subregister operations.
4747     if (OrigIdx == 0)
4748       return Op;
4749     MVT ContainerVT = VecVT;
4750     if (VecVT.isFixedLengthVector()) {
4751       ContainerVT = getContainerForFixedLengthVector(VecVT);
4752       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4753     }
4754     SDValue Mask =
4755         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
4756     // Set the vector length to only the number of elements we care about. This
4757     // avoids sliding down elements we're going to discard straight away.
4758     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
4759     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
4760     SDValue Slidedown =
4761         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4762                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
4763     // Now we can use a cast-like subvector extract to get the result.
4764     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
4765                             DAG.getConstant(0, DL, XLenVT));
4766     return DAG.getBitcast(Op.getValueType(), Slidedown);
4767   }
4768 
4769   unsigned SubRegIdx, RemIdx;
4770   std::tie(SubRegIdx, RemIdx) =
4771       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
4772           VecVT, SubVecVT, OrigIdx, TRI);
4773 
4774   // If the Idx has been completely eliminated then this is a subvector extract
4775   // which naturally aligns to a vector register. These can easily be handled
4776   // using subregister manipulation.
4777   if (RemIdx == 0)
4778     return Op;
4779 
4780   // Else we must shift our vector register directly to extract the subvector.
4781   // Do this using VSLIDEDOWN.
4782 
4783   // If the vector type is an LMUL-group type, extract a subvector equal to the
4784   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
4785   // instruction.
4786   MVT InterSubVT = VecVT;
4787   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
4788     InterSubVT = getLMUL1VT(VecVT);
4789     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
4790                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
4791   }
4792 
4793   // Slide this vector register down by the desired number of elements in order
4794   // to place the desired subvector starting at element 0.
4795   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
4796   // For scalable vectors this must be further multiplied by vscale.
4797   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
4798 
4799   SDValue Mask, VL;
4800   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
4801   SDValue Slidedown =
4802       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
4803                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
4804 
4805   // Now the vector is in the right position, extract our final subvector. This
4806   // should resolve to a COPY.
4807   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
4808                           DAG.getConstant(0, DL, XLenVT));
4809 
4810   // We might have bitcast from a mask type: cast back to the original type if
4811   // required.
4812   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
4813 }
4814 
4815 // Lower step_vector to the vid instruction. Any non-identity step value must
4816 // be accounted for my manual expansion.
4817 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
4818                                               SelectionDAG &DAG) const {
4819   SDLoc DL(Op);
4820   MVT VT = Op.getSimpleValueType();
4821   MVT XLenVT = Subtarget.getXLenVT();
4822   SDValue Mask, VL;
4823   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
4824   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4825   uint64_t StepValImm = Op.getConstantOperandVal(0);
4826   if (StepValImm != 1) {
4827     if (isPowerOf2_64(StepValImm)) {
4828       SDValue StepVal =
4829           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
4830                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
4831       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
4832     } else {
4833       SDValue StepVal = lowerScalarSplat(
4834           DAG.getConstant(StepValImm, DL, VT.getVectorElementType()), VL, VT,
4835           DL, DAG, Subtarget);
4836       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
4837     }
4838   }
4839   return StepVec;
4840 }
4841 
4842 // Implement vector_reverse using vrgather.vv with indices determined by
4843 // subtracting the id of each element from (VLMAX-1). This will convert
4844 // the indices like so:
4845 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
4846 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
4847 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
4848                                                  SelectionDAG &DAG) const {
4849   SDLoc DL(Op);
4850   MVT VecVT = Op.getSimpleValueType();
4851   unsigned EltSize = VecVT.getScalarSizeInBits();
4852   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
4853 
4854   unsigned MaxVLMAX = 0;
4855   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
4856   if (VectorBitsMax != 0)
4857     MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
4858 
4859   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
4860   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
4861 
4862   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
4863   // to use vrgatherei16.vv.
4864   // TODO: It's also possible to use vrgatherei16.vv for other types to
4865   // decrease register width for the index calculation.
4866   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
4867     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
4868     // Reverse each half, then reassemble them in reverse order.
4869     // NOTE: It's also possible that after splitting that VLMAX no longer
4870     // requires vrgatherei16.vv.
4871     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
4872       SDValue Lo, Hi;
4873       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
4874       EVT LoVT, HiVT;
4875       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
4876       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
4877       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
4878       // Reassemble the low and high pieces reversed.
4879       // FIXME: This is a CONCAT_VECTORS.
4880       SDValue Res =
4881           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
4882                       DAG.getIntPtrConstant(0, DL));
4883       return DAG.getNode(
4884           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
4885           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
4886     }
4887 
4888     // Just promote the int type to i16 which will double the LMUL.
4889     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
4890     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
4891   }
4892 
4893   MVT XLenVT = Subtarget.getXLenVT();
4894   SDValue Mask, VL;
4895   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
4896 
4897   // Calculate VLMAX-1 for the desired SEW.
4898   unsigned MinElts = VecVT.getVectorMinNumElements();
4899   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
4900                               DAG.getConstant(MinElts, DL, XLenVT));
4901   SDValue VLMinus1 =
4902       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
4903 
4904   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
4905   bool IsRV32E64 =
4906       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
4907   SDValue SplatVL;
4908   if (!IsRV32E64)
4909     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
4910   else
4911     SplatVL = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, IntVT, VLMinus1);
4912 
4913   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
4914   SDValue Indices =
4915       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
4916 
4917   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
4918 }
4919 
4920 SDValue
4921 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
4922                                                      SelectionDAG &DAG) const {
4923   SDLoc DL(Op);
4924   auto *Load = cast<LoadSDNode>(Op);
4925 
4926   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
4927                                         Load->getMemoryVT(),
4928                                         *Load->getMemOperand()) &&
4929          "Expecting a correctly-aligned load");
4930 
4931   MVT VT = Op.getSimpleValueType();
4932   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4933 
4934   SDValue VL =
4935       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4936 
4937   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4938   SDValue NewLoad = DAG.getMemIntrinsicNode(
4939       RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL},
4940       Load->getMemoryVT(), Load->getMemOperand());
4941 
4942   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
4943   return DAG.getMergeValues({Result, Load->getChain()}, DL);
4944 }
4945 
4946 SDValue
4947 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
4948                                                       SelectionDAG &DAG) const {
4949   SDLoc DL(Op);
4950   auto *Store = cast<StoreSDNode>(Op);
4951 
4952   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
4953                                         Store->getMemoryVT(),
4954                                         *Store->getMemOperand()) &&
4955          "Expecting a correctly-aligned store");
4956 
4957   SDValue StoreVal = Store->getValue();
4958   MVT VT = StoreVal.getSimpleValueType();
4959 
4960   // If the size less than a byte, we need to pad with zeros to make a byte.
4961   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
4962     VT = MVT::v8i1;
4963     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
4964                            DAG.getConstant(0, DL, VT), StoreVal,
4965                            DAG.getIntPtrConstant(0, DL));
4966   }
4967 
4968   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4969 
4970   SDValue VL =
4971       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4972 
4973   SDValue NewValue =
4974       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
4975   return DAG.getMemIntrinsicNode(
4976       RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other),
4977       {Store->getChain(), NewValue, Store->getBasePtr(), VL},
4978       Store->getMemoryVT(), Store->getMemOperand());
4979 }
4980 
4981 SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
4982                                              SelectionDAG &DAG) const {
4983   SDLoc DL(Op);
4984   MVT VT = Op.getSimpleValueType();
4985 
4986   const auto *MemSD = cast<MemSDNode>(Op);
4987   EVT MemVT = MemSD->getMemoryVT();
4988   MachineMemOperand *MMO = MemSD->getMemOperand();
4989   SDValue Chain = MemSD->getChain();
4990   SDValue BasePtr = MemSD->getBasePtr();
4991 
4992   SDValue Mask, PassThru, VL;
4993   if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) {
4994     Mask = VPLoad->getMask();
4995     PassThru = DAG.getUNDEF(VT);
4996     VL = VPLoad->getVectorLength();
4997   } else {
4998     const auto *MLoad = cast<MaskedLoadSDNode>(Op);
4999     Mask = MLoad->getMask();
5000     PassThru = MLoad->getPassThru();
5001   }
5002 
5003   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5004 
5005   MVT XLenVT = Subtarget.getXLenVT();
5006 
5007   MVT ContainerVT = VT;
5008   if (VT.isFixedLengthVector()) {
5009     ContainerVT = getContainerForFixedLengthVector(VT);
5010     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
5011     if (!IsUnmasked) {
5012       MVT MaskVT =
5013           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5014       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5015     }
5016   }
5017 
5018   if (!VL)
5019     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5020 
5021   unsigned IntID =
5022       IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
5023   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5024   if (!IsUnmasked)
5025     Ops.push_back(PassThru);
5026   Ops.push_back(BasePtr);
5027   if (!IsUnmasked)
5028     Ops.push_back(Mask);
5029   Ops.push_back(VL);
5030   if (!IsUnmasked)
5031     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
5032 
5033   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5034 
5035   SDValue Result =
5036       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
5037   Chain = Result.getValue(1);
5038 
5039   if (VT.isFixedLengthVector())
5040     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
5041 
5042   return DAG.getMergeValues({Result, Chain}, DL);
5043 }
5044 
5045 SDValue RISCVTargetLowering::lowerMaskedStore(SDValue Op,
5046                                               SelectionDAG &DAG) const {
5047   SDLoc DL(Op);
5048 
5049   const auto *MemSD = cast<MemSDNode>(Op);
5050   EVT MemVT = MemSD->getMemoryVT();
5051   MachineMemOperand *MMO = MemSD->getMemOperand();
5052   SDValue Chain = MemSD->getChain();
5053   SDValue BasePtr = MemSD->getBasePtr();
5054   SDValue Val, Mask, VL;
5055 
5056   if (const auto *VPStore = dyn_cast<VPStoreSDNode>(Op)) {
5057     Val = VPStore->getValue();
5058     Mask = VPStore->getMask();
5059     VL = VPStore->getVectorLength();
5060   } else {
5061     const auto *MStore = cast<MaskedStoreSDNode>(Op);
5062     Val = MStore->getValue();
5063     Mask = MStore->getMask();
5064   }
5065 
5066   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5067 
5068   MVT VT = Val.getSimpleValueType();
5069   MVT XLenVT = Subtarget.getXLenVT();
5070 
5071   MVT ContainerVT = VT;
5072   if (VT.isFixedLengthVector()) {
5073     ContainerVT = getContainerForFixedLengthVector(VT);
5074 
5075     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
5076     if (!IsUnmasked) {
5077       MVT MaskVT =
5078           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5079       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5080     }
5081   }
5082 
5083   if (!VL)
5084     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5085 
5086   unsigned IntID =
5087       IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask;
5088   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5089   Ops.push_back(Val);
5090   Ops.push_back(BasePtr);
5091   if (!IsUnmasked)
5092     Ops.push_back(Mask);
5093   Ops.push_back(VL);
5094 
5095   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
5096                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
5097 }
5098 
5099 SDValue
5100 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
5101                                                       SelectionDAG &DAG) const {
5102   MVT InVT = Op.getOperand(0).getSimpleValueType();
5103   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
5104 
5105   MVT VT = Op.getSimpleValueType();
5106 
5107   SDValue Op1 =
5108       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
5109   SDValue Op2 =
5110       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5111 
5112   SDLoc DL(Op);
5113   SDValue VL =
5114       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5115 
5116   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5117   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5118 
5119   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
5120                             Op.getOperand(2), Mask, VL);
5121 
5122   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
5123 }
5124 
5125 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
5126     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
5127   MVT VT = Op.getSimpleValueType();
5128 
5129   if (VT.getVectorElementType() == MVT::i1)
5130     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
5131 
5132   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
5133 }
5134 
5135 SDValue
5136 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op,
5137                                                       SelectionDAG &DAG) const {
5138   unsigned Opc;
5139   switch (Op.getOpcode()) {
5140   default: llvm_unreachable("Unexpected opcode!");
5141   case ISD::SHL: Opc = RISCVISD::SHL_VL; break;
5142   case ISD::SRA: Opc = RISCVISD::SRA_VL; break;
5143   case ISD::SRL: Opc = RISCVISD::SRL_VL; break;
5144   }
5145 
5146   return lowerToScalableOp(Op, DAG, Opc);
5147 }
5148 
5149 // Lower vector ABS to smax(X, sub(0, X)).
5150 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
5151   SDLoc DL(Op);
5152   MVT VT = Op.getSimpleValueType();
5153   SDValue X = Op.getOperand(0);
5154 
5155   assert(VT.isFixedLengthVector() && "Unexpected type");
5156 
5157   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5158   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
5159 
5160   SDValue Mask, VL;
5161   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5162 
5163   SDValue SplatZero =
5164       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
5165                   DAG.getConstant(0, DL, Subtarget.getXLenVT()));
5166   SDValue NegX =
5167       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
5168   SDValue Max =
5169       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
5170 
5171   return convertFromScalableVector(VT, Max, DAG, Subtarget);
5172 }
5173 
5174 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
5175     SDValue Op, SelectionDAG &DAG) const {
5176   SDLoc DL(Op);
5177   MVT VT = Op.getSimpleValueType();
5178   SDValue Mag = Op.getOperand(0);
5179   SDValue Sign = Op.getOperand(1);
5180   assert(Mag.getValueType() == Sign.getValueType() &&
5181          "Can only handle COPYSIGN with matching types.");
5182 
5183   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5184   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
5185   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
5186 
5187   SDValue Mask, VL;
5188   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5189 
5190   SDValue CopySign =
5191       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
5192 
5193   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
5194 }
5195 
5196 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
5197     SDValue Op, SelectionDAG &DAG) const {
5198   MVT VT = Op.getSimpleValueType();
5199   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5200 
5201   MVT I1ContainerVT =
5202       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5203 
5204   SDValue CC =
5205       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
5206   SDValue Op1 =
5207       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5208   SDValue Op2 =
5209       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
5210 
5211   SDLoc DL(Op);
5212   SDValue Mask, VL;
5213   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5214 
5215   SDValue Select =
5216       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
5217 
5218   return convertFromScalableVector(VT, Select, DAG, Subtarget);
5219 }
5220 
5221 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
5222                                                unsigned NewOpc,
5223                                                bool HasMask) const {
5224   MVT VT = Op.getSimpleValueType();
5225   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5226 
5227   // Create list of operands by converting existing ones to scalable types.
5228   SmallVector<SDValue, 6> Ops;
5229   for (const SDValue &V : Op->op_values()) {
5230     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
5231 
5232     // Pass through non-vector operands.
5233     if (!V.getValueType().isVector()) {
5234       Ops.push_back(V);
5235       continue;
5236     }
5237 
5238     // "cast" fixed length vector to a scalable vector.
5239     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
5240            "Only fixed length vectors are supported!");
5241     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
5242   }
5243 
5244   SDLoc DL(Op);
5245   SDValue Mask, VL;
5246   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5247   if (HasMask)
5248     Ops.push_back(Mask);
5249   Ops.push_back(VL);
5250 
5251   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
5252   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
5253 }
5254 
5255 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
5256 // * Operands of each node are assumed to be in the same order.
5257 // * The EVL operand is promoted from i32 to i64 on RV64.
5258 // * Fixed-length vectors are converted to their scalable-vector container
5259 //   types.
5260 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
5261                                        unsigned RISCVISDOpc) const {
5262   SDLoc DL(Op);
5263   MVT VT = Op.getSimpleValueType();
5264   SmallVector<SDValue, 4> Ops;
5265 
5266   for (const auto &OpIdx : enumerate(Op->ops())) {
5267     SDValue V = OpIdx.value();
5268     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
5269     // Pass through operands which aren't fixed-length vectors.
5270     if (!V.getValueType().isFixedLengthVector()) {
5271       Ops.push_back(V);
5272       continue;
5273     }
5274     // "cast" fixed length vector to a scalable vector.
5275     MVT OpVT = V.getSimpleValueType();
5276     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
5277     assert(useRVVForFixedLengthVectorVT(OpVT) &&
5278            "Only fixed length vectors are supported!");
5279     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
5280   }
5281 
5282   if (!VT.isFixedLengthVector())
5283     return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
5284 
5285   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5286 
5287   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
5288 
5289   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
5290 }
5291 
5292 // Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
5293 // matched to a RVV indexed load. The RVV indexed load instructions only
5294 // support the "unsigned unscaled" addressing mode; indices are implicitly
5295 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
5296 // signed or scaled indexing is extended to the XLEN value type and scaled
5297 // accordingly.
5298 SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
5299                                                SelectionDAG &DAG) const {
5300   SDLoc DL(Op);
5301   MVT VT = Op.getSimpleValueType();
5302 
5303   const auto *MemSD = cast<MemSDNode>(Op.getNode());
5304   EVT MemVT = MemSD->getMemoryVT();
5305   MachineMemOperand *MMO = MemSD->getMemOperand();
5306   SDValue Chain = MemSD->getChain();
5307   SDValue BasePtr = MemSD->getBasePtr();
5308 
5309   ISD::LoadExtType LoadExtType;
5310   SDValue Index, Mask, PassThru, VL;
5311 
5312   if (auto *VPGN = dyn_cast<VPGatherSDNode>(Op.getNode())) {
5313     Index = VPGN->getIndex();
5314     Mask = VPGN->getMask();
5315     PassThru = DAG.getUNDEF(VT);
5316     VL = VPGN->getVectorLength();
5317     // VP doesn't support extending loads.
5318     LoadExtType = ISD::NON_EXTLOAD;
5319   } else {
5320     // Else it must be a MGATHER.
5321     auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
5322     Index = MGN->getIndex();
5323     Mask = MGN->getMask();
5324     PassThru = MGN->getPassThru();
5325     LoadExtType = MGN->getExtensionType();
5326   }
5327 
5328   MVT IndexVT = Index.getSimpleValueType();
5329   MVT XLenVT = Subtarget.getXLenVT();
5330 
5331   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
5332          "Unexpected VTs!");
5333   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
5334   // Targets have to explicitly opt-in for extending vector loads.
5335   assert(LoadExtType == ISD::NON_EXTLOAD &&
5336          "Unexpected extending MGATHER/VP_GATHER");
5337   (void)LoadExtType;
5338 
5339   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
5340   // the selection of the masked intrinsics doesn't do this for us.
5341   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5342 
5343   MVT ContainerVT = VT;
5344   if (VT.isFixedLengthVector()) {
5345     // We need to use the larger of the result and index type to determine the
5346     // scalable type to use so we don't increase LMUL for any operand/result.
5347     if (VT.bitsGE(IndexVT)) {
5348       ContainerVT = getContainerForFixedLengthVector(VT);
5349       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
5350                                  ContainerVT.getVectorElementCount());
5351     } else {
5352       IndexVT = getContainerForFixedLengthVector(IndexVT);
5353       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
5354                                      IndexVT.getVectorElementCount());
5355     }
5356 
5357     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
5358 
5359     if (!IsUnmasked) {
5360       MVT MaskVT =
5361           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5362       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5363       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
5364     }
5365   }
5366 
5367   if (!VL)
5368     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5369 
5370   unsigned IntID =
5371       IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
5372   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5373   if (!IsUnmasked)
5374     Ops.push_back(PassThru);
5375   Ops.push_back(BasePtr);
5376   Ops.push_back(Index);
5377   if (!IsUnmasked)
5378     Ops.push_back(Mask);
5379   Ops.push_back(VL);
5380   if (!IsUnmasked)
5381     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
5382 
5383   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5384   SDValue Result =
5385       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
5386   Chain = Result.getValue(1);
5387 
5388   if (VT.isFixedLengthVector())
5389     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
5390 
5391   return DAG.getMergeValues({Result, Chain}, DL);
5392 }
5393 
5394 // Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be
5395 // matched to a RVV indexed store. The RVV indexed store instructions only
5396 // support the "unsigned unscaled" addressing mode; indices are implicitly
5397 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
5398 // signed or scaled indexing is extended to the XLEN value type and scaled
5399 // accordingly.
5400 SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
5401                                                 SelectionDAG &DAG) const {
5402   SDLoc DL(Op);
5403   const auto *MemSD = cast<MemSDNode>(Op.getNode());
5404   EVT MemVT = MemSD->getMemoryVT();
5405   MachineMemOperand *MMO = MemSD->getMemOperand();
5406   SDValue Chain = MemSD->getChain();
5407   SDValue BasePtr = MemSD->getBasePtr();
5408 
5409   bool IsTruncatingStore = false;
5410   SDValue Index, Mask, Val, VL;
5411 
5412   if (auto *VPSN = dyn_cast<VPScatterSDNode>(Op.getNode())) {
5413     Index = VPSN->getIndex();
5414     Mask = VPSN->getMask();
5415     Val = VPSN->getValue();
5416     VL = VPSN->getVectorLength();
5417     // VP doesn't support truncating stores.
5418     IsTruncatingStore = false;
5419   } else {
5420     // Else it must be a MSCATTER.
5421     auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
5422     Index = MSN->getIndex();
5423     Mask = MSN->getMask();
5424     Val = MSN->getValue();
5425     IsTruncatingStore = MSN->isTruncatingStore();
5426   }
5427 
5428   MVT VT = Val.getSimpleValueType();
5429   MVT IndexVT = Index.getSimpleValueType();
5430   MVT XLenVT = Subtarget.getXLenVT();
5431 
5432   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
5433          "Unexpected VTs!");
5434   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
5435   // Targets have to explicitly opt-in for extending vector loads and
5436   // truncating vector stores.
5437   assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER");
5438   (void)IsTruncatingStore;
5439 
5440   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
5441   // the selection of the masked intrinsics doesn't do this for us.
5442   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5443 
5444   MVT ContainerVT = VT;
5445   if (VT.isFixedLengthVector()) {
5446     // We need to use the larger of the value and index type to determine the
5447     // scalable type to use so we don't increase LMUL for any operand/result.
5448     if (VT.bitsGE(IndexVT)) {
5449       ContainerVT = getContainerForFixedLengthVector(VT);
5450       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
5451                                  ContainerVT.getVectorElementCount());
5452     } else {
5453       IndexVT = getContainerForFixedLengthVector(IndexVT);
5454       ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
5455                                      IndexVT.getVectorElementCount());
5456     }
5457 
5458     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
5459     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
5460 
5461     if (!IsUnmasked) {
5462       MVT MaskVT =
5463           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5464       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5465     }
5466   }
5467 
5468   if (!VL)
5469     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5470 
5471   unsigned IntID =
5472       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
5473   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5474   Ops.push_back(Val);
5475   Ops.push_back(BasePtr);
5476   Ops.push_back(Index);
5477   if (!IsUnmasked)
5478     Ops.push_back(Mask);
5479   Ops.push_back(VL);
5480 
5481   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
5482                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
5483 }
5484 
5485 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
5486                                                SelectionDAG &DAG) const {
5487   const MVT XLenVT = Subtarget.getXLenVT();
5488   SDLoc DL(Op);
5489   SDValue Chain = Op->getOperand(0);
5490   SDValue SysRegNo = DAG.getTargetConstant(
5491       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
5492   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
5493   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
5494 
5495   // Encoding used for rounding mode in RISCV differs from that used in
5496   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
5497   // table, which consists of a sequence of 4-bit fields, each representing
5498   // corresponding FLT_ROUNDS mode.
5499   static const int Table =
5500       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
5501       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
5502       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
5503       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
5504       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
5505 
5506   SDValue Shift =
5507       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
5508   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
5509                                 DAG.getConstant(Table, DL, XLenVT), Shift);
5510   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
5511                                DAG.getConstant(7, DL, XLenVT));
5512 
5513   return DAG.getMergeValues({Masked, Chain}, DL);
5514 }
5515 
5516 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
5517                                                SelectionDAG &DAG) const {
5518   const MVT XLenVT = Subtarget.getXLenVT();
5519   SDLoc DL(Op);
5520   SDValue Chain = Op->getOperand(0);
5521   SDValue RMValue = Op->getOperand(1);
5522   SDValue SysRegNo = DAG.getTargetConstant(
5523       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
5524 
5525   // Encoding used for rounding mode in RISCV differs from that used in
5526   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
5527   // a table, which consists of a sequence of 4-bit fields, each representing
5528   // corresponding RISCV mode.
5529   static const unsigned Table =
5530       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
5531       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
5532       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
5533       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
5534       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
5535 
5536   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
5537                               DAG.getConstant(2, DL, XLenVT));
5538   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
5539                                 DAG.getConstant(Table, DL, XLenVT), Shift);
5540   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
5541                         DAG.getConstant(0x7, DL, XLenVT));
5542   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
5543                      RMValue);
5544 }
5545 
5546 // Returns the opcode of the target-specific SDNode that implements the 32-bit
5547 // form of the given Opcode.
5548 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
5549   switch (Opcode) {
5550   default:
5551     llvm_unreachable("Unexpected opcode");
5552   case ISD::SHL:
5553     return RISCVISD::SLLW;
5554   case ISD::SRA:
5555     return RISCVISD::SRAW;
5556   case ISD::SRL:
5557     return RISCVISD::SRLW;
5558   case ISD::SDIV:
5559     return RISCVISD::DIVW;
5560   case ISD::UDIV:
5561     return RISCVISD::DIVUW;
5562   case ISD::UREM:
5563     return RISCVISD::REMUW;
5564   case ISD::ROTL:
5565     return RISCVISD::ROLW;
5566   case ISD::ROTR:
5567     return RISCVISD::RORW;
5568   case RISCVISD::GREV:
5569     return RISCVISD::GREVW;
5570   case RISCVISD::GORC:
5571     return RISCVISD::GORCW;
5572   }
5573 }
5574 
5575 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
5576 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
5577 // otherwise be promoted to i64, making it difficult to select the
5578 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of
5579 // type i8/i16/i32 is lost.
5580 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
5581                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
5582   SDLoc DL(N);
5583   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
5584   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
5585   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
5586   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
5587   // ReplaceNodeResults requires we maintain the same type for the return value.
5588   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
5589 }
5590 
5591 // Converts the given 32-bit operation to a i64 operation with signed extension
5592 // semantic to reduce the signed extension instructions.
5593 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
5594   SDLoc DL(N);
5595   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5596   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5597   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
5598   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
5599                                DAG.getValueType(MVT::i32));
5600   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
5601 }
5602 
5603 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
5604                                              SmallVectorImpl<SDValue> &Results,
5605                                              SelectionDAG &DAG) const {
5606   SDLoc DL(N);
5607   switch (N->getOpcode()) {
5608   default:
5609     llvm_unreachable("Don't know how to custom type legalize this operation!");
5610   case ISD::STRICT_FP_TO_SINT:
5611   case ISD::STRICT_FP_TO_UINT:
5612   case ISD::FP_TO_SINT:
5613   case ISD::FP_TO_UINT: {
5614     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5615            "Unexpected custom legalisation");
5616     bool IsStrict = N->isStrictFPOpcode();
5617     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
5618                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
5619     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
5620     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
5621         TargetLowering::TypeSoftenFloat) {
5622       // FIXME: Support strict FP.
5623       if (IsStrict)
5624         return;
5625       if (!isTypeLegal(Op0.getValueType()))
5626         return;
5627       unsigned Opc =
5628           IsSigned ? RISCVISD::FCVT_W_RTZ_RV64 : RISCVISD::FCVT_WU_RTZ_RV64;
5629       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, Op0);
5630       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5631       return;
5632     }
5633     // If the FP type needs to be softened, emit a library call using the 'si'
5634     // version. If we left it to default legalization we'd end up with 'di'. If
5635     // the FP type doesn't need to be softened just let generic type
5636     // legalization promote the result type.
5637     RTLIB::Libcall LC;
5638     if (IsSigned)
5639       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
5640     else
5641       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
5642     MakeLibCallOptions CallOptions;
5643     EVT OpVT = Op0.getValueType();
5644     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
5645     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
5646     SDValue Result;
5647     std::tie(Result, Chain) =
5648         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
5649     Results.push_back(Result);
5650     if (IsStrict)
5651       Results.push_back(Chain);
5652     break;
5653   }
5654   case ISD::READCYCLECOUNTER: {
5655     assert(!Subtarget.is64Bit() &&
5656            "READCYCLECOUNTER only has custom type legalization on riscv32");
5657 
5658     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
5659     SDValue RCW =
5660         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
5661 
5662     Results.push_back(
5663         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
5664     Results.push_back(RCW.getValue(2));
5665     break;
5666   }
5667   case ISD::MUL: {
5668     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
5669     unsigned XLen = Subtarget.getXLen();
5670     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
5671     if (Size > XLen) {
5672       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
5673       SDValue LHS = N->getOperand(0);
5674       SDValue RHS = N->getOperand(1);
5675       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
5676 
5677       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
5678       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
5679       // We need exactly one side to be unsigned.
5680       if (LHSIsU == RHSIsU)
5681         return;
5682 
5683       auto MakeMULPair = [&](SDValue S, SDValue U) {
5684         MVT XLenVT = Subtarget.getXLenVT();
5685         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
5686         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
5687         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
5688         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
5689         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
5690       };
5691 
5692       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
5693       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
5694 
5695       // The other operand should be signed, but still prefer MULH when
5696       // possible.
5697       if (RHSIsU && LHSIsS && !RHSIsS)
5698         Results.push_back(MakeMULPair(LHS, RHS));
5699       else if (LHSIsU && RHSIsS && !LHSIsS)
5700         Results.push_back(MakeMULPair(RHS, LHS));
5701 
5702       return;
5703     }
5704     LLVM_FALLTHROUGH;
5705   }
5706   case ISD::ADD:
5707   case ISD::SUB:
5708     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5709            "Unexpected custom legalisation");
5710     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
5711     break;
5712   case ISD::SHL:
5713   case ISD::SRA:
5714   case ISD::SRL:
5715     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5716            "Unexpected custom legalisation");
5717     if (N->getOperand(1).getOpcode() != ISD::Constant) {
5718       Results.push_back(customLegalizeToWOp(N, DAG));
5719       break;
5720     }
5721 
5722     // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is
5723     // similar to customLegalizeToWOpWithSExt, but we must zero_extend the
5724     // shift amount.
5725     if (N->getOpcode() == ISD::SHL) {
5726       SDLoc DL(N);
5727       SDValue NewOp0 =
5728           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5729       SDValue NewOp1 =
5730           DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1));
5731       SDValue NewWOp = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0, NewOp1);
5732       SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
5733                                    DAG.getValueType(MVT::i32));
5734       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
5735     }
5736 
5737     break;
5738   case ISD::ROTL:
5739   case ISD::ROTR:
5740     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5741            "Unexpected custom legalisation");
5742     Results.push_back(customLegalizeToWOp(N, DAG));
5743     break;
5744   case ISD::CTTZ:
5745   case ISD::CTTZ_ZERO_UNDEF:
5746   case ISD::CTLZ:
5747   case ISD::CTLZ_ZERO_UNDEF: {
5748     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5749            "Unexpected custom legalisation");
5750 
5751     SDValue NewOp0 =
5752         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5753     bool IsCTZ =
5754         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
5755     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
5756     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
5757     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5758     return;
5759   }
5760   case ISD::SDIV:
5761   case ISD::UDIV:
5762   case ISD::UREM: {
5763     MVT VT = N->getSimpleValueType(0);
5764     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
5765            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
5766            "Unexpected custom legalisation");
5767     // Don't promote division/remainder by constant since we should expand those
5768     // to multiply by magic constant.
5769     // FIXME: What if the expansion is disabled for minsize.
5770     if (N->getOperand(1).getOpcode() == ISD::Constant)
5771       return;
5772 
5773     // If the input is i32, use ANY_EXTEND since the W instructions don't read
5774     // the upper 32 bits. For other types we need to sign or zero extend
5775     // based on the opcode.
5776     unsigned ExtOpc = ISD::ANY_EXTEND;
5777     if (VT != MVT::i32)
5778       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
5779                                            : ISD::ZERO_EXTEND;
5780 
5781     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
5782     break;
5783   }
5784   case ISD::UADDO:
5785   case ISD::USUBO: {
5786     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5787            "Unexpected custom legalisation");
5788     bool IsAdd = N->getOpcode() == ISD::UADDO;
5789     // Create an ADDW or SUBW.
5790     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5791     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5792     SDValue Res =
5793         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
5794     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
5795                       DAG.getValueType(MVT::i32));
5796 
5797     // Sign extend the LHS and perform an unsigned compare with the ADDW result.
5798     // Since the inputs are sign extended from i32, this is equivalent to
5799     // comparing the lower 32 bits.
5800     LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
5801     SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
5802                                     IsAdd ? ISD::SETULT : ISD::SETUGT);
5803 
5804     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5805     Results.push_back(Overflow);
5806     return;
5807   }
5808   case ISD::UADDSAT:
5809   case ISD::USUBSAT: {
5810     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5811            "Unexpected custom legalisation");
5812     if (Subtarget.hasStdExtZbb()) {
5813       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
5814       // sign extend allows overflow of the lower 32 bits to be detected on
5815       // the promoted size.
5816       SDValue LHS =
5817           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
5818       SDValue RHS =
5819           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
5820       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
5821       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5822       return;
5823     }
5824 
5825     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
5826     // promotion for UADDO/USUBO.
5827     Results.push_back(expandAddSubSat(N, DAG));
5828     return;
5829   }
5830   case ISD::BITCAST: {
5831     EVT VT = N->getValueType(0);
5832     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
5833     SDValue Op0 = N->getOperand(0);
5834     EVT Op0VT = Op0.getValueType();
5835     MVT XLenVT = Subtarget.getXLenVT();
5836     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
5837       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
5838       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
5839     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
5840                Subtarget.hasStdExtF()) {
5841       SDValue FPConv =
5842           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
5843       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
5844     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
5845                isTypeLegal(Op0VT)) {
5846       // Custom-legalize bitcasts from fixed-length vector types to illegal
5847       // scalar types in order to improve codegen. Bitcast the vector to a
5848       // one-element vector type whose element type is the same as the result
5849       // type, and extract the first element.
5850       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
5851       if (isTypeLegal(BVT)) {
5852         SDValue BVec = DAG.getBitcast(BVT, Op0);
5853         Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
5854                                       DAG.getConstant(0, DL, XLenVT)));
5855       }
5856     }
5857     break;
5858   }
5859   case RISCVISD::GREV:
5860   case RISCVISD::GORC: {
5861     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5862            "Unexpected custom legalisation");
5863     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
5864     // This is similar to customLegalizeToWOp, except that we pass the second
5865     // operand (a TargetConstant) straight through: it is already of type
5866     // XLenVT.
5867     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
5868     SDValue NewOp0 =
5869         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5870     SDValue NewOp1 =
5871         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5872     SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
5873     // ReplaceNodeResults requires we maintain the same type for the return
5874     // value.
5875     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
5876     break;
5877   }
5878   case RISCVISD::SHFL: {
5879     // There is no SHFLIW instruction, but we can just promote the operation.
5880     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5881            "Unexpected custom legalisation");
5882     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
5883     SDValue NewOp0 =
5884         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5885     SDValue NewOp1 =
5886         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5887     SDValue NewRes = DAG.getNode(RISCVISD::SHFL, DL, MVT::i64, NewOp0, NewOp1);
5888     // ReplaceNodeResults requires we maintain the same type for the return
5889     // value.
5890     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
5891     break;
5892   }
5893   case ISD::BSWAP:
5894   case ISD::BITREVERSE: {
5895     MVT VT = N->getSimpleValueType(0);
5896     MVT XLenVT = Subtarget.getXLenVT();
5897     assert((VT == MVT::i8 || VT == MVT::i16 ||
5898             (VT == MVT::i32 && Subtarget.is64Bit())) &&
5899            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
5900     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
5901     unsigned Imm = VT.getSizeInBits() - 1;
5902     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
5903     if (N->getOpcode() == ISD::BSWAP)
5904       Imm &= ~0x7U;
5905     unsigned Opc = Subtarget.is64Bit() ? RISCVISD::GREVW : RISCVISD::GREV;
5906     SDValue GREVI =
5907         DAG.getNode(Opc, DL, XLenVT, NewOp0, DAG.getConstant(Imm, DL, XLenVT));
5908     // ReplaceNodeResults requires we maintain the same type for the return
5909     // value.
5910     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
5911     break;
5912   }
5913   case ISD::FSHL:
5914   case ISD::FSHR: {
5915     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5916            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
5917     SDValue NewOp0 =
5918         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5919     SDValue NewOp1 =
5920         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5921     SDValue NewOp2 =
5922         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5923     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
5924     // Mask the shift amount to 5 bits.
5925     NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
5926                          DAG.getConstant(0x1f, DL, MVT::i64));
5927     unsigned Opc =
5928         N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW;
5929     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2);
5930     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
5931     break;
5932   }
5933   case ISD::EXTRACT_VECTOR_ELT: {
5934     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
5935     // type is illegal (currently only vXi64 RV32).
5936     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
5937     // transferred to the destination register. We issue two of these from the
5938     // upper- and lower- halves of the SEW-bit vector element, slid down to the
5939     // first element.
5940     SDValue Vec = N->getOperand(0);
5941     SDValue Idx = N->getOperand(1);
5942 
5943     // The vector type hasn't been legalized yet so we can't issue target
5944     // specific nodes if it needs legalization.
5945     // FIXME: We would manually legalize if it's important.
5946     if (!isTypeLegal(Vec.getValueType()))
5947       return;
5948 
5949     MVT VecVT = Vec.getSimpleValueType();
5950 
5951     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
5952            VecVT.getVectorElementType() == MVT::i64 &&
5953            "Unexpected EXTRACT_VECTOR_ELT legalization");
5954 
5955     // If this is a fixed vector, we need to convert it to a scalable vector.
5956     MVT ContainerVT = VecVT;
5957     if (VecVT.isFixedLengthVector()) {
5958       ContainerVT = getContainerForFixedLengthVector(VecVT);
5959       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5960     }
5961 
5962     MVT XLenVT = Subtarget.getXLenVT();
5963 
5964     // Use a VL of 1 to avoid processing more elements than we need.
5965     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5966     SDValue VL = DAG.getConstant(1, DL, XLenVT);
5967     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5968 
5969     // Unless the index is known to be 0, we must slide the vector down to get
5970     // the desired element into index 0.
5971     if (!isNullConstant(Idx)) {
5972       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
5973                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
5974     }
5975 
5976     // Extract the lower XLEN bits of the correct vector element.
5977     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
5978 
5979     // To extract the upper XLEN bits of the vector element, shift the first
5980     // element right by 32 bits and re-extract the lower XLEN bits.
5981     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
5982                                      DAG.getConstant(32, DL, XLenVT), VL);
5983     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
5984                                  ThirtyTwoV, Mask, VL);
5985 
5986     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
5987 
5988     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
5989     break;
5990   }
5991   case ISD::INTRINSIC_WO_CHAIN: {
5992     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
5993     switch (IntNo) {
5994     default:
5995       llvm_unreachable(
5996           "Don't know how to custom type legalize this intrinsic!");
5997     case Intrinsic::riscv_orc_b: {
5998       // Lower to the GORCI encoding for orc.b with the operand extended.
5999       SDValue NewOp =
6000           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6001       // If Zbp is enabled, use GORCIW which will sign extend the result.
6002       unsigned Opc =
6003           Subtarget.hasStdExtZbp() ? RISCVISD::GORCW : RISCVISD::GORC;
6004       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp,
6005                                 DAG.getConstant(7, DL, MVT::i64));
6006       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6007       return;
6008     }
6009     case Intrinsic::riscv_grev:
6010     case Intrinsic::riscv_gorc: {
6011       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6012              "Unexpected custom legalisation");
6013       SDValue NewOp1 =
6014           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6015       SDValue NewOp2 =
6016           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6017       unsigned Opc =
6018           IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW;
6019       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
6020       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6021       break;
6022     }
6023     case Intrinsic::riscv_shfl:
6024     case Intrinsic::riscv_unshfl: {
6025       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6026              "Unexpected custom legalisation");
6027       SDValue NewOp1 =
6028           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6029       SDValue NewOp2 =
6030           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6031       unsigned Opc =
6032           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
6033       if (isa<ConstantSDNode>(N->getOperand(2))) {
6034         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
6035                              DAG.getConstant(0xf, DL, MVT::i64));
6036         Opc =
6037             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
6038       }
6039       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
6040       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6041       break;
6042     }
6043     case Intrinsic::riscv_bcompress:
6044     case Intrinsic::riscv_bdecompress: {
6045       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6046              "Unexpected custom legalisation");
6047       SDValue NewOp1 =
6048           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6049       SDValue NewOp2 =
6050           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6051       unsigned Opc = IntNo == Intrinsic::riscv_bcompress
6052                          ? RISCVISD::BCOMPRESSW
6053                          : RISCVISD::BDECOMPRESSW;
6054       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
6055       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6056       break;
6057     }
6058     case Intrinsic::riscv_vmv_x_s: {
6059       EVT VT = N->getValueType(0);
6060       MVT XLenVT = Subtarget.getXLenVT();
6061       if (VT.bitsLT(XLenVT)) {
6062         // Simple case just extract using vmv.x.s and truncate.
6063         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
6064                                       Subtarget.getXLenVT(), N->getOperand(1));
6065         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
6066         return;
6067       }
6068 
6069       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
6070              "Unexpected custom legalization");
6071 
6072       // We need to do the move in two steps.
6073       SDValue Vec = N->getOperand(1);
6074       MVT VecVT = Vec.getSimpleValueType();
6075 
6076       // First extract the lower XLEN bits of the element.
6077       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
6078 
6079       // To extract the upper XLEN bits of the vector element, shift the first
6080       // element right by 32 bits and re-extract the lower XLEN bits.
6081       SDValue VL = DAG.getConstant(1, DL, XLenVT);
6082       MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
6083       SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
6084       SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT,
6085                                        DAG.getConstant(32, DL, XLenVT), VL);
6086       SDValue LShr32 =
6087           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
6088       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
6089 
6090       Results.push_back(
6091           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
6092       break;
6093     }
6094     }
6095     break;
6096   }
6097   case ISD::VECREDUCE_ADD:
6098   case ISD::VECREDUCE_AND:
6099   case ISD::VECREDUCE_OR:
6100   case ISD::VECREDUCE_XOR:
6101   case ISD::VECREDUCE_SMAX:
6102   case ISD::VECREDUCE_UMAX:
6103   case ISD::VECREDUCE_SMIN:
6104   case ISD::VECREDUCE_UMIN:
6105     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
6106       Results.push_back(V);
6107     break;
6108   case ISD::VP_REDUCE_ADD:
6109   case ISD::VP_REDUCE_AND:
6110   case ISD::VP_REDUCE_OR:
6111   case ISD::VP_REDUCE_XOR:
6112   case ISD::VP_REDUCE_SMAX:
6113   case ISD::VP_REDUCE_UMAX:
6114   case ISD::VP_REDUCE_SMIN:
6115   case ISD::VP_REDUCE_UMIN:
6116     if (SDValue V = lowerVPREDUCE(SDValue(N, 0), DAG))
6117       Results.push_back(V);
6118     break;
6119   case ISD::FLT_ROUNDS_: {
6120     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
6121     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
6122     Results.push_back(Res.getValue(0));
6123     Results.push_back(Res.getValue(1));
6124     break;
6125   }
6126   }
6127 }
6128 
6129 // A structure to hold one of the bit-manipulation patterns below. Together, a
6130 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
6131 //   (or (and (shl x, 1), 0xAAAAAAAA),
6132 //       (and (srl x, 1), 0x55555555))
6133 struct RISCVBitmanipPat {
6134   SDValue Op;
6135   unsigned ShAmt;
6136   bool IsSHL;
6137 
6138   bool formsPairWith(const RISCVBitmanipPat &Other) const {
6139     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
6140   }
6141 };
6142 
6143 // Matches patterns of the form
6144 //   (and (shl x, C2), (C1 << C2))
6145 //   (and (srl x, C2), C1)
6146 //   (shl (and x, C1), C2)
6147 //   (srl (and x, (C1 << C2)), C2)
6148 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
6149 // The expected masks for each shift amount are specified in BitmanipMasks where
6150 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
6151 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
6152 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
6153 // XLen is 64.
6154 static Optional<RISCVBitmanipPat>
6155 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
6156   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
6157          "Unexpected number of masks");
6158   Optional<uint64_t> Mask;
6159   // Optionally consume a mask around the shift operation.
6160   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
6161     Mask = Op.getConstantOperandVal(1);
6162     Op = Op.getOperand(0);
6163   }
6164   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
6165     return None;
6166   bool IsSHL = Op.getOpcode() == ISD::SHL;
6167 
6168   if (!isa<ConstantSDNode>(Op.getOperand(1)))
6169     return None;
6170   uint64_t ShAmt = Op.getConstantOperandVal(1);
6171 
6172   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
6173   if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
6174     return None;
6175   // If we don't have enough masks for 64 bit, then we must be trying to
6176   // match SHFL so we're only allowed to shift 1/4 of the width.
6177   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
6178     return None;
6179 
6180   SDValue Src = Op.getOperand(0);
6181 
6182   // The expected mask is shifted left when the AND is found around SHL
6183   // patterns.
6184   //   ((x >> 1) & 0x55555555)
6185   //   ((x << 1) & 0xAAAAAAAA)
6186   bool SHLExpMask = IsSHL;
6187 
6188   if (!Mask) {
6189     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
6190     // the mask is all ones: consume that now.
6191     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
6192       Mask = Src.getConstantOperandVal(1);
6193       Src = Src.getOperand(0);
6194       // The expected mask is now in fact shifted left for SRL, so reverse the
6195       // decision.
6196       //   ((x & 0xAAAAAAAA) >> 1)
6197       //   ((x & 0x55555555) << 1)
6198       SHLExpMask = !SHLExpMask;
6199     } else {
6200       // Use a default shifted mask of all-ones if there's no AND, truncated
6201       // down to the expected width. This simplifies the logic later on.
6202       Mask = maskTrailingOnes<uint64_t>(Width);
6203       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
6204     }
6205   }
6206 
6207   unsigned MaskIdx = Log2_32(ShAmt);
6208   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
6209 
6210   if (SHLExpMask)
6211     ExpMask <<= ShAmt;
6212 
6213   if (Mask != ExpMask)
6214     return None;
6215 
6216   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
6217 }
6218 
6219 // Matches any of the following bit-manipulation patterns:
6220 //   (and (shl x, 1), (0x55555555 << 1))
6221 //   (and (srl x, 1), 0x55555555)
6222 //   (shl (and x, 0x55555555), 1)
6223 //   (srl (and x, (0x55555555 << 1)), 1)
6224 // where the shift amount and mask may vary thus:
6225 //   [1]  = 0x55555555 / 0xAAAAAAAA
6226 //   [2]  = 0x33333333 / 0xCCCCCCCC
6227 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
6228 //   [8]  = 0x00FF00FF / 0xFF00FF00
6229 //   [16] = 0x0000FFFF / 0xFFFFFFFF
6230 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
6231 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
6232   // These are the unshifted masks which we use to match bit-manipulation
6233   // patterns. They may be shifted left in certain circumstances.
6234   static const uint64_t BitmanipMasks[] = {
6235       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
6236       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
6237 
6238   return matchRISCVBitmanipPat(Op, BitmanipMasks);
6239 }
6240 
6241 // Match the following pattern as a GREVI(W) operation
6242 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
6243 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
6244                                const RISCVSubtarget &Subtarget) {
6245   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
6246   EVT VT = Op.getValueType();
6247 
6248   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
6249     auto LHS = matchGREVIPat(Op.getOperand(0));
6250     auto RHS = matchGREVIPat(Op.getOperand(1));
6251     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
6252       SDLoc DL(Op);
6253       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
6254                          DAG.getConstant(LHS->ShAmt, DL, VT));
6255     }
6256   }
6257   return SDValue();
6258 }
6259 
6260 // Matches any the following pattern as a GORCI(W) operation
6261 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
6262 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
6263 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
6264 // Note that with the variant of 3.,
6265 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
6266 // the inner pattern will first be matched as GREVI and then the outer
6267 // pattern will be matched to GORC via the first rule above.
6268 // 4.  (or (rotl/rotr x, bitwidth/2), x)
6269 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
6270                                const RISCVSubtarget &Subtarget) {
6271   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
6272   EVT VT = Op.getValueType();
6273 
6274   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
6275     SDLoc DL(Op);
6276     SDValue Op0 = Op.getOperand(0);
6277     SDValue Op1 = Op.getOperand(1);
6278 
6279     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
6280       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
6281           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
6282           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
6283         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
6284       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
6285       if ((Reverse.getOpcode() == ISD::ROTL ||
6286            Reverse.getOpcode() == ISD::ROTR) &&
6287           Reverse.getOperand(0) == X &&
6288           isa<ConstantSDNode>(Reverse.getOperand(1))) {
6289         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
6290         if (RotAmt == (VT.getSizeInBits() / 2))
6291           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
6292                              DAG.getConstant(RotAmt, DL, VT));
6293       }
6294       return SDValue();
6295     };
6296 
6297     // Check for either commutable permutation of (or (GREVI x, shamt), x)
6298     if (SDValue V = MatchOROfReverse(Op0, Op1))
6299       return V;
6300     if (SDValue V = MatchOROfReverse(Op1, Op0))
6301       return V;
6302 
6303     // OR is commutable so canonicalize its OR operand to the left
6304     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
6305       std::swap(Op0, Op1);
6306     if (Op0.getOpcode() != ISD::OR)
6307       return SDValue();
6308     SDValue OrOp0 = Op0.getOperand(0);
6309     SDValue OrOp1 = Op0.getOperand(1);
6310     auto LHS = matchGREVIPat(OrOp0);
6311     // OR is commutable so swap the operands and try again: x might have been
6312     // on the left
6313     if (!LHS) {
6314       std::swap(OrOp0, OrOp1);
6315       LHS = matchGREVIPat(OrOp0);
6316     }
6317     auto RHS = matchGREVIPat(Op1);
6318     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
6319       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
6320                          DAG.getConstant(LHS->ShAmt, DL, VT));
6321     }
6322   }
6323   return SDValue();
6324 }
6325 
6326 // Matches any of the following bit-manipulation patterns:
6327 //   (and (shl x, 1), (0x22222222 << 1))
6328 //   (and (srl x, 1), 0x22222222)
6329 //   (shl (and x, 0x22222222), 1)
6330 //   (srl (and x, (0x22222222 << 1)), 1)
6331 // where the shift amount and mask may vary thus:
6332 //   [1]  = 0x22222222 / 0x44444444
6333 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
6334 //   [4]  = 0x00F000F0 / 0x0F000F00
6335 //   [8]  = 0x0000FF00 / 0x00FF0000
6336 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
6337 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
6338   // These are the unshifted masks which we use to match bit-manipulation
6339   // patterns. They may be shifted left in certain circumstances.
6340   static const uint64_t BitmanipMasks[] = {
6341       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
6342       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
6343 
6344   return matchRISCVBitmanipPat(Op, BitmanipMasks);
6345 }
6346 
6347 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
6348 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
6349                                const RISCVSubtarget &Subtarget) {
6350   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
6351   EVT VT = Op.getValueType();
6352 
6353   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
6354     return SDValue();
6355 
6356   SDValue Op0 = Op.getOperand(0);
6357   SDValue Op1 = Op.getOperand(1);
6358 
6359   // Or is commutable so canonicalize the second OR to the LHS.
6360   if (Op0.getOpcode() != ISD::OR)
6361     std::swap(Op0, Op1);
6362   if (Op0.getOpcode() != ISD::OR)
6363     return SDValue();
6364 
6365   // We found an inner OR, so our operands are the operands of the inner OR
6366   // and the other operand of the outer OR.
6367   SDValue A = Op0.getOperand(0);
6368   SDValue B = Op0.getOperand(1);
6369   SDValue C = Op1;
6370 
6371   auto Match1 = matchSHFLPat(A);
6372   auto Match2 = matchSHFLPat(B);
6373 
6374   // If neither matched, we failed.
6375   if (!Match1 && !Match2)
6376     return SDValue();
6377 
6378   // We had at least one match. if one failed, try the remaining C operand.
6379   if (!Match1) {
6380     std::swap(A, C);
6381     Match1 = matchSHFLPat(A);
6382     if (!Match1)
6383       return SDValue();
6384   } else if (!Match2) {
6385     std::swap(B, C);
6386     Match2 = matchSHFLPat(B);
6387     if (!Match2)
6388       return SDValue();
6389   }
6390   assert(Match1 && Match2);
6391 
6392   // Make sure our matches pair up.
6393   if (!Match1->formsPairWith(*Match2))
6394     return SDValue();
6395 
6396   // All the remains is to make sure C is an AND with the same input, that masks
6397   // out the bits that are being shuffled.
6398   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
6399       C.getOperand(0) != Match1->Op)
6400     return SDValue();
6401 
6402   uint64_t Mask = C.getConstantOperandVal(1);
6403 
6404   static const uint64_t BitmanipMasks[] = {
6405       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
6406       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
6407   };
6408 
6409   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
6410   unsigned MaskIdx = Log2_32(Match1->ShAmt);
6411   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
6412 
6413   if (Mask != ExpMask)
6414     return SDValue();
6415 
6416   SDLoc DL(Op);
6417   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
6418                      DAG.getConstant(Match1->ShAmt, DL, VT));
6419 }
6420 
6421 // Optimize (add (shl x, c0), (shl y, c1)) ->
6422 //          (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
6423 static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
6424                                   const RISCVSubtarget &Subtarget) {
6425   // Perform this optimization only in the zba extension.
6426   if (!Subtarget.hasStdExtZba())
6427     return SDValue();
6428 
6429   // Skip for vector types and larger types.
6430   EVT VT = N->getValueType(0);
6431   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
6432     return SDValue();
6433 
6434   // The two operand nodes must be SHL and have no other use.
6435   SDValue N0 = N->getOperand(0);
6436   SDValue N1 = N->getOperand(1);
6437   if (N0->getOpcode() != ISD::SHL || N1->getOpcode() != ISD::SHL ||
6438       !N0->hasOneUse() || !N1->hasOneUse())
6439     return SDValue();
6440 
6441   // Check c0 and c1.
6442   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
6443   auto *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(1));
6444   if (!N0C || !N1C)
6445     return SDValue();
6446   int64_t C0 = N0C->getSExtValue();
6447   int64_t C1 = N1C->getSExtValue();
6448   if (C0 <= 0 || C1 <= 0)
6449     return SDValue();
6450 
6451   // Skip if SH1ADD/SH2ADD/SH3ADD are not applicable.
6452   int64_t Bits = std::min(C0, C1);
6453   int64_t Diff = std::abs(C0 - C1);
6454   if (Diff != 1 && Diff != 2 && Diff != 3)
6455     return SDValue();
6456 
6457   // Build nodes.
6458   SDLoc DL(N);
6459   SDValue NS = (C0 < C1) ? N0->getOperand(0) : N1->getOperand(0);
6460   SDValue NL = (C0 > C1) ? N0->getOperand(0) : N1->getOperand(0);
6461   SDValue NA0 =
6462       DAG.getNode(ISD::SHL, DL, VT, NL, DAG.getConstant(Diff, DL, VT));
6463   SDValue NA1 = DAG.getNode(ISD::ADD, DL, VT, NA0, NS);
6464   return DAG.getNode(ISD::SHL, DL, VT, NA1, DAG.getConstant(Bits, DL, VT));
6465 }
6466 
6467 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
6468 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
6469 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
6470 // not undo itself, but they are redundant.
6471 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
6472   SDValue Src = N->getOperand(0);
6473 
6474   if (Src.getOpcode() != N->getOpcode())
6475     return SDValue();
6476 
6477   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
6478       !isa<ConstantSDNode>(Src.getOperand(1)))
6479     return SDValue();
6480 
6481   unsigned ShAmt1 = N->getConstantOperandVal(1);
6482   unsigned ShAmt2 = Src.getConstantOperandVal(1);
6483   Src = Src.getOperand(0);
6484 
6485   unsigned CombinedShAmt;
6486   if (N->getOpcode() == RISCVISD::GORC || N->getOpcode() == RISCVISD::GORCW)
6487     CombinedShAmt = ShAmt1 | ShAmt2;
6488   else
6489     CombinedShAmt = ShAmt1 ^ ShAmt2;
6490 
6491   if (CombinedShAmt == 0)
6492     return Src;
6493 
6494   SDLoc DL(N);
6495   return DAG.getNode(
6496       N->getOpcode(), DL, N->getValueType(0), Src,
6497       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
6498 }
6499 
6500 // Combine a constant select operand into its use:
6501 //
6502 // (and (select cond, -1, c), x)
6503 //   -> (select cond, x, (and x, c))  [AllOnes=1]
6504 // (or  (select cond, 0, c), x)
6505 //   -> (select cond, x, (or x, c))  [AllOnes=0]
6506 // (xor (select cond, 0, c), x)
6507 //   -> (select cond, x, (xor x, c))  [AllOnes=0]
6508 // (add (select cond, 0, c), x)
6509 //   -> (select cond, x, (add x, c))  [AllOnes=0]
6510 // (sub x, (select cond, 0, c))
6511 //   -> (select cond, x, (sub x, c))  [AllOnes=0]
6512 static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
6513                                    SelectionDAG &DAG, bool AllOnes) {
6514   EVT VT = N->getValueType(0);
6515 
6516   // Skip vectors.
6517   if (VT.isVector())
6518     return SDValue();
6519 
6520   if ((Slct.getOpcode() != ISD::SELECT &&
6521        Slct.getOpcode() != RISCVISD::SELECT_CC) ||
6522       !Slct.hasOneUse())
6523     return SDValue();
6524 
6525   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
6526     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
6527   };
6528 
6529   bool SwapSelectOps;
6530   unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0;
6531   SDValue TrueVal = Slct.getOperand(1 + OpOffset);
6532   SDValue FalseVal = Slct.getOperand(2 + OpOffset);
6533   SDValue NonConstantVal;
6534   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
6535     SwapSelectOps = false;
6536     NonConstantVal = FalseVal;
6537   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
6538     SwapSelectOps = true;
6539     NonConstantVal = TrueVal;
6540   } else
6541     return SDValue();
6542 
6543   // Slct is now know to be the desired identity constant when CC is true.
6544   TrueVal = OtherOp;
6545   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
6546   // Unless SwapSelectOps says the condition should be false.
6547   if (SwapSelectOps)
6548     std::swap(TrueVal, FalseVal);
6549 
6550   if (Slct.getOpcode() == RISCVISD::SELECT_CC)
6551     return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
6552                        {Slct.getOperand(0), Slct.getOperand(1),
6553                         Slct.getOperand(2), TrueVal, FalseVal});
6554 
6555   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
6556                      {Slct.getOperand(0), TrueVal, FalseVal});
6557 }
6558 
6559 // Attempt combineSelectAndUse on each operand of a commutative operator N.
6560 static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
6561                                               bool AllOnes) {
6562   SDValue N0 = N->getOperand(0);
6563   SDValue N1 = N->getOperand(1);
6564   if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes))
6565     return Result;
6566   if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes))
6567     return Result;
6568   return SDValue();
6569 }
6570 
6571 // Transform (add (mul x, c0), c1) ->
6572 //           (add (mul (add x, c1/c0), c0), c1%c0).
6573 // if c1/c0 and c1%c0 are simm12, while c1 is not. A special corner case
6574 // that should be excluded is when c0*(c1/c0) is simm12, which will lead
6575 // to an infinite loop in DAGCombine if transformed.
6576 // Or transform (add (mul x, c0), c1) ->
6577 //              (add (mul (add x, c1/c0+1), c0), c1%c0-c0),
6578 // if c1/c0+1 and c1%c0-c0 are simm12, while c1 is not. A special corner
6579 // case that should be excluded is when c0*(c1/c0+1) is simm12, which will
6580 // lead to an infinite loop in DAGCombine if transformed.
6581 // Or transform (add (mul x, c0), c1) ->
6582 //              (add (mul (add x, c1/c0-1), c0), c1%c0+c0),
6583 // if c1/c0-1 and c1%c0+c0 are simm12, while c1 is not. A special corner
6584 // case that should be excluded is when c0*(c1/c0-1) is simm12, which will
6585 // lead to an infinite loop in DAGCombine if transformed.
6586 // Or transform (add (mul x, c0), c1) ->
6587 //              (mul (add x, c1/c0), c0).
6588 // if c1%c0 is zero, and c1/c0 is simm12 while c1 is not.
6589 static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG,
6590                                      const RISCVSubtarget &Subtarget) {
6591   // Skip for vector types and larger types.
6592   EVT VT = N->getValueType(0);
6593   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
6594     return SDValue();
6595   // The first operand node must be a MUL and has no other use.
6596   SDValue N0 = N->getOperand(0);
6597   if (!N0->hasOneUse() || N0->getOpcode() != ISD::MUL)
6598     return SDValue();
6599   // Check if c0 and c1 match above conditions.
6600   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
6601   auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
6602   if (!N0C || !N1C)
6603     return SDValue();
6604   int64_t C0 = N0C->getSExtValue();
6605   int64_t C1 = N1C->getSExtValue();
6606   int64_t CA, CB;
6607   if (C0 == -1 || C0 == 0 || C0 == 1 || isInt<12>(C1))
6608     return SDValue();
6609   // Search for proper CA (non-zero) and CB that both are simm12.
6610   if ((C1 / C0) != 0 && isInt<12>(C1 / C0) && isInt<12>(C1 % C0) &&
6611       !isInt<12>(C0 * (C1 / C0))) {
6612     CA = C1 / C0;
6613     CB = C1 % C0;
6614   } else if ((C1 / C0 + 1) != 0 && isInt<12>(C1 / C0 + 1) &&
6615              isInt<12>(C1 % C0 - C0) && !isInt<12>(C0 * (C1 / C0 + 1))) {
6616     CA = C1 / C0 + 1;
6617     CB = C1 % C0 - C0;
6618   } else if ((C1 / C0 - 1) != 0 && isInt<12>(C1 / C0 - 1) &&
6619              isInt<12>(C1 % C0 + C0) && !isInt<12>(C0 * (C1 / C0 - 1))) {
6620     CA = C1 / C0 - 1;
6621     CB = C1 % C0 + C0;
6622   } else
6623     return SDValue();
6624   // Build new nodes (add (mul (add x, c1/c0), c0), c1%c0).
6625   SDLoc DL(N);
6626   SDValue New0 = DAG.getNode(ISD::ADD, DL, VT, N0->getOperand(0),
6627                              DAG.getConstant(CA, DL, VT));
6628   SDValue New1 =
6629       DAG.getNode(ISD::MUL, DL, VT, New0, DAG.getConstant(C0, DL, VT));
6630   return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(CB, DL, VT));
6631 }
6632 
6633 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
6634                                  const RISCVSubtarget &Subtarget) {
6635   if (SDValue V = transformAddImmMulImm(N, DAG, Subtarget))
6636     return V;
6637   if (SDValue V = transformAddShlImm(N, DAG, Subtarget))
6638     return V;
6639   // fold (add (select lhs, rhs, cc, 0, y), x) ->
6640   //      (select lhs, rhs, cc, x, (add x, y))
6641   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
6642 }
6643 
6644 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
6645   // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
6646   //      (select lhs, rhs, cc, x, (sub x, y))
6647   SDValue N0 = N->getOperand(0);
6648   SDValue N1 = N->getOperand(1);
6649   return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false);
6650 }
6651 
6652 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG) {
6653   // fold (and (select lhs, rhs, cc, -1, y), x) ->
6654   //      (select lhs, rhs, cc, x, (and x, y))
6655   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true);
6656 }
6657 
6658 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
6659                                 const RISCVSubtarget &Subtarget) {
6660   if (Subtarget.hasStdExtZbp()) {
6661     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
6662       return GREV;
6663     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
6664       return GORC;
6665     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
6666       return SHFL;
6667   }
6668 
6669   // fold (or (select cond, 0, y), x) ->
6670   //      (select cond, x, (or x, y))
6671   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
6672 }
6673 
6674 static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) {
6675   // fold (xor (select cond, 0, y), x) ->
6676   //      (select cond, x, (xor x, y))
6677   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
6678 }
6679 
6680 // Attempt to turn ANY_EXTEND into SIGN_EXTEND if the input to the ANY_EXTEND
6681 // has users that require SIGN_EXTEND and the SIGN_EXTEND can be done for free
6682 // by an instruction like ADDW/SUBW/MULW. Without this the ANY_EXTEND would be
6683 // removed during type legalization leaving an ADD/SUB/MUL use that won't use
6684 // ADDW/SUBW/MULW.
6685 static SDValue performANY_EXTENDCombine(SDNode *N,
6686                                         TargetLowering::DAGCombinerInfo &DCI,
6687                                         const RISCVSubtarget &Subtarget) {
6688   if (!Subtarget.is64Bit())
6689     return SDValue();
6690 
6691   SelectionDAG &DAG = DCI.DAG;
6692 
6693   SDValue Src = N->getOperand(0);
6694   EVT VT = N->getValueType(0);
6695   if (VT != MVT::i64 || Src.getValueType() != MVT::i32)
6696     return SDValue();
6697 
6698   // The opcode must be one that can implicitly sign_extend.
6699   // FIXME: Additional opcodes.
6700   switch (Src.getOpcode()) {
6701   default:
6702     return SDValue();
6703   case ISD::MUL:
6704     if (!Subtarget.hasStdExtM())
6705       return SDValue();
6706     LLVM_FALLTHROUGH;
6707   case ISD::ADD:
6708   case ISD::SUB:
6709     break;
6710   }
6711 
6712   // Only handle cases where the result is used by a CopyToReg. That likely
6713   // means the value is a liveout of the basic block. This helps prevent
6714   // infinite combine loops like PR51206.
6715   if (none_of(N->uses(),
6716               [](SDNode *User) { return User->getOpcode() == ISD::CopyToReg; }))
6717     return SDValue();
6718 
6719   SmallVector<SDNode *, 4> SetCCs;
6720   for (SDNode::use_iterator UI = Src.getNode()->use_begin(),
6721                             UE = Src.getNode()->use_end();
6722        UI != UE; ++UI) {
6723     SDNode *User = *UI;
6724     if (User == N)
6725       continue;
6726     if (UI.getUse().getResNo() != Src.getResNo())
6727       continue;
6728     // All i32 setccs are legalized by sign extending operands.
6729     if (User->getOpcode() == ISD::SETCC) {
6730       SetCCs.push_back(User);
6731       continue;
6732     }
6733     // We don't know if we can extend this user.
6734     break;
6735   }
6736 
6737   // If we don't have any SetCCs, this isn't worthwhile.
6738   if (SetCCs.empty())
6739     return SDValue();
6740 
6741   SDLoc DL(N);
6742   SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Src);
6743   DCI.CombineTo(N, SExt);
6744 
6745   // Promote all the setccs.
6746   for (SDNode *SetCC : SetCCs) {
6747     SmallVector<SDValue, 4> Ops;
6748 
6749     for (unsigned j = 0; j != 2; ++j) {
6750       SDValue SOp = SetCC->getOperand(j);
6751       if (SOp == Src)
6752         Ops.push_back(SExt);
6753       else
6754         Ops.push_back(DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, SOp));
6755     }
6756 
6757     Ops.push_back(SetCC->getOperand(2));
6758     DCI.CombineTo(SetCC,
6759                   DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops));
6760   }
6761   return SDValue(N, 0);
6762 }
6763 
6764 // Try to form VWMUL or VWMULU.
6765 // FIXME: Support VWMULSU.
6766 static SDValue combineMUL_VLToVWMUL(SDNode *N, SDValue Op0, SDValue Op1,
6767                                     SelectionDAG &DAG) {
6768   assert(N->getOpcode() == RISCVISD::MUL_VL && "Unexpected opcode");
6769   bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
6770   bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
6771   if ((!IsSignExt && !IsZeroExt) || !Op0.hasOneUse())
6772     return SDValue();
6773 
6774   SDValue Mask = N->getOperand(2);
6775   SDValue VL = N->getOperand(3);
6776 
6777   // Make sure the mask and VL match.
6778   if (Op0.getOperand(1) != Mask || Op0.getOperand(2) != VL)
6779     return SDValue();
6780 
6781   MVT VT = N->getSimpleValueType(0);
6782 
6783   // Determine the narrow size for a widening multiply.
6784   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
6785   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
6786                                   VT.getVectorElementCount());
6787 
6788   SDLoc DL(N);
6789 
6790   // See if the other operand is the same opcode.
6791   if (Op0.getOpcode() == Op1.getOpcode()) {
6792     if (!Op1.hasOneUse())
6793       return SDValue();
6794 
6795     // Make sure the mask and VL match.
6796     if (Op1.getOperand(1) != Mask || Op1.getOperand(2) != VL)
6797       return SDValue();
6798 
6799     Op1 = Op1.getOperand(0);
6800   } else if (Op1.getOpcode() == RISCVISD::VMV_V_X_VL) {
6801     // The operand is a splat of a scalar.
6802 
6803     // The VL must be the same.
6804     if (Op1.getOperand(1) != VL)
6805       return SDValue();
6806 
6807     // Get the scalar value.
6808     Op1 = Op1.getOperand(0);
6809 
6810     // See if have enough sign bits or zero bits in the scalar to use a
6811     // widening multiply by splatting to smaller element size.
6812     unsigned EltBits = VT.getScalarSizeInBits();
6813     unsigned ScalarBits = Op1.getValueSizeInBits();
6814     // Make sure we're getting all element bits from the scalar register.
6815     // FIXME: Support implicit sign extension of vmv.v.x?
6816     if (ScalarBits < EltBits)
6817       return SDValue();
6818 
6819     if (IsSignExt) {
6820       if (DAG.ComputeNumSignBits(Op1) <= (ScalarBits - NarrowSize))
6821         return SDValue();
6822     } else {
6823       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
6824       if (!DAG.MaskedValueIsZero(Op1, Mask))
6825         return SDValue();
6826     }
6827 
6828     Op1 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT, Op1, VL);
6829   } else
6830     return SDValue();
6831 
6832   Op0 = Op0.getOperand(0);
6833 
6834   // Re-introduce narrower extends if needed.
6835   unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
6836   if (Op0.getValueType() != NarrowVT)
6837     Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
6838   if (Op1.getValueType() != NarrowVT)
6839     Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
6840 
6841   unsigned WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
6842   return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
6843 }
6844 
6845 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
6846                                                DAGCombinerInfo &DCI) const {
6847   SelectionDAG &DAG = DCI.DAG;
6848 
6849   // Helper to call SimplifyDemandedBits on an operand of N where only some low
6850   // bits are demanded. N will be added to the Worklist if it was not deleted.
6851   // Caller should return SDValue(N, 0) if this returns true.
6852   auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) {
6853     SDValue Op = N->getOperand(OpNo);
6854     APInt Mask = APInt::getLowBitsSet(Op.getValueSizeInBits(), LowBits);
6855     if (!SimplifyDemandedBits(Op, Mask, DCI))
6856       return false;
6857 
6858     if (N->getOpcode() != ISD::DELETED_NODE)
6859       DCI.AddToWorklist(N);
6860     return true;
6861   };
6862 
6863   switch (N->getOpcode()) {
6864   default:
6865     break;
6866   case RISCVISD::SplitF64: {
6867     SDValue Op0 = N->getOperand(0);
6868     // If the input to SplitF64 is just BuildPairF64 then the operation is
6869     // redundant. Instead, use BuildPairF64's operands directly.
6870     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
6871       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
6872 
6873     SDLoc DL(N);
6874 
6875     // It's cheaper to materialise two 32-bit integers than to load a double
6876     // from the constant pool and transfer it to integer registers through the
6877     // stack.
6878     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
6879       APInt V = C->getValueAPF().bitcastToAPInt();
6880       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
6881       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
6882       return DCI.CombineTo(N, Lo, Hi);
6883     }
6884 
6885     // This is a target-specific version of a DAGCombine performed in
6886     // DAGCombiner::visitBITCAST. It performs the equivalent of:
6887     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
6888     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
6889     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
6890         !Op0.getNode()->hasOneUse())
6891       break;
6892     SDValue NewSplitF64 =
6893         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
6894                     Op0.getOperand(0));
6895     SDValue Lo = NewSplitF64.getValue(0);
6896     SDValue Hi = NewSplitF64.getValue(1);
6897     APInt SignBit = APInt::getSignMask(32);
6898     if (Op0.getOpcode() == ISD::FNEG) {
6899       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
6900                                   DAG.getConstant(SignBit, DL, MVT::i32));
6901       return DCI.CombineTo(N, Lo, NewHi);
6902     }
6903     assert(Op0.getOpcode() == ISD::FABS);
6904     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
6905                                 DAG.getConstant(~SignBit, DL, MVT::i32));
6906     return DCI.CombineTo(N, Lo, NewHi);
6907   }
6908   case RISCVISD::SLLW:
6909   case RISCVISD::SRAW:
6910   case RISCVISD::SRLW:
6911   case RISCVISD::ROLW:
6912   case RISCVISD::RORW: {
6913     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
6914     if (SimplifyDemandedLowBitsHelper(0, 32) ||
6915         SimplifyDemandedLowBitsHelper(1, 5))
6916       return SDValue(N, 0);
6917     break;
6918   }
6919   case RISCVISD::CLZW:
6920   case RISCVISD::CTZW: {
6921     // Only the lower 32 bits of the first operand are read
6922     if (SimplifyDemandedLowBitsHelper(0, 32))
6923       return SDValue(N, 0);
6924     break;
6925   }
6926   case RISCVISD::FSL:
6927   case RISCVISD::FSR: {
6928     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
6929     unsigned BitWidth = N->getOperand(2).getValueSizeInBits();
6930     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
6931     if (SimplifyDemandedLowBitsHelper(2, Log2_32(BitWidth) + 1))
6932       return SDValue(N, 0);
6933     break;
6934   }
6935   case RISCVISD::FSLW:
6936   case RISCVISD::FSRW: {
6937     // Only the lower 32 bits of Values and lower 6 bits of shift amount are
6938     // read.
6939     if (SimplifyDemandedLowBitsHelper(0, 32) ||
6940         SimplifyDemandedLowBitsHelper(1, 32) ||
6941         SimplifyDemandedLowBitsHelper(2, 6))
6942       return SDValue(N, 0);
6943     break;
6944   }
6945   case RISCVISD::GREV:
6946   case RISCVISD::GORC: {
6947     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
6948     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
6949     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
6950     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth)))
6951       return SDValue(N, 0);
6952 
6953     return combineGREVI_GORCI(N, DCI.DAG);
6954   }
6955   case RISCVISD::GREVW:
6956   case RISCVISD::GORCW: {
6957     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
6958     if (SimplifyDemandedLowBitsHelper(0, 32) ||
6959         SimplifyDemandedLowBitsHelper(1, 5))
6960       return SDValue(N, 0);
6961 
6962     return combineGREVI_GORCI(N, DCI.DAG);
6963   }
6964   case RISCVISD::SHFL:
6965   case RISCVISD::UNSHFL: {
6966     // Only the lower log2(Bitwidth)-1 bits of the the shift amount are read.
6967     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
6968     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
6969     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) - 1))
6970       return SDValue(N, 0);
6971 
6972     break;
6973   }
6974   case RISCVISD::SHFLW:
6975   case RISCVISD::UNSHFLW: {
6976     // Only the lower 32 bits of LHS and lower 4 bits of RHS are read.
6977     SDValue LHS = N->getOperand(0);
6978     SDValue RHS = N->getOperand(1);
6979     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
6980     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 4);
6981     if (SimplifyDemandedLowBitsHelper(0, 32) ||
6982         SimplifyDemandedLowBitsHelper(1, 4))
6983       return SDValue(N, 0);
6984 
6985     break;
6986   }
6987   case RISCVISD::BCOMPRESSW:
6988   case RISCVISD::BDECOMPRESSW: {
6989     // Only the lower 32 bits of LHS and RHS are read.
6990     if (SimplifyDemandedLowBitsHelper(0, 32) ||
6991         SimplifyDemandedLowBitsHelper(1, 32))
6992       return SDValue(N, 0);
6993 
6994     break;
6995   }
6996   case RISCVISD::FMV_X_ANYEXTH:
6997   case RISCVISD::FMV_X_ANYEXTW_RV64: {
6998     SDLoc DL(N);
6999     SDValue Op0 = N->getOperand(0);
7000     MVT VT = N->getSimpleValueType(0);
7001     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
7002     // conversion is unnecessary and can be replaced with the FMV_W_X_RV64
7003     // operand. Similar for FMV_X_ANYEXTH and FMV_H_X.
7004     if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 &&
7005          Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) ||
7006         (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
7007          Op0->getOpcode() == RISCVISD::FMV_H_X)) {
7008       assert(Op0.getOperand(0).getValueType() == VT &&
7009              "Unexpected value type!");
7010       return Op0.getOperand(0);
7011     }
7012 
7013     // This is a target-specific version of a DAGCombine performed in
7014     // DAGCombiner::visitBITCAST. It performs the equivalent of:
7015     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
7016     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
7017     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
7018         !Op0.getNode()->hasOneUse())
7019       break;
7020     SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0));
7021     unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16;
7022     APInt SignBit = APInt::getSignMask(FPBits).sextOrSelf(VT.getSizeInBits());
7023     if (Op0.getOpcode() == ISD::FNEG)
7024       return DAG.getNode(ISD::XOR, DL, VT, NewFMV,
7025                          DAG.getConstant(SignBit, DL, VT));
7026 
7027     assert(Op0.getOpcode() == ISD::FABS);
7028     return DAG.getNode(ISD::AND, DL, VT, NewFMV,
7029                        DAG.getConstant(~SignBit, DL, VT));
7030   }
7031   case ISD::ADD:
7032     return performADDCombine(N, DAG, Subtarget);
7033   case ISD::SUB:
7034     return performSUBCombine(N, DAG);
7035   case ISD::AND:
7036     return performANDCombine(N, DAG);
7037   case ISD::OR:
7038     return performORCombine(N, DAG, Subtarget);
7039   case ISD::XOR:
7040     return performXORCombine(N, DAG);
7041   case ISD::ANY_EXTEND:
7042     return performANY_EXTENDCombine(N, DCI, Subtarget);
7043   case ISD::ZERO_EXTEND:
7044     // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during
7045     // type legalization. This is safe because fp_to_uint produces poison if
7046     // it overflows.
7047     if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit() &&
7048         N->getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
7049         isTypeLegal(N->getOperand(0).getOperand(0).getValueType()))
7050       return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64,
7051                          N->getOperand(0).getOperand(0));
7052     return SDValue();
7053   case RISCVISD::SELECT_CC: {
7054     // Transform
7055     SDValue LHS = N->getOperand(0);
7056     SDValue RHS = N->getOperand(1);
7057     SDValue TrueV = N->getOperand(3);
7058     SDValue FalseV = N->getOperand(4);
7059 
7060     // If the True and False values are the same, we don't need a select_cc.
7061     if (TrueV == FalseV)
7062       return TrueV;
7063 
7064     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(2))->get();
7065     if (!ISD::isIntEqualitySetCC(CCVal))
7066       break;
7067 
7068     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
7069     //      (select_cc X, Y, lt, trueV, falseV)
7070     // Sometimes the setcc is introduced after select_cc has been formed.
7071     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
7072         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
7073       // If we're looking for eq 0 instead of ne 0, we need to invert the
7074       // condition.
7075       bool Invert = CCVal == ISD::SETEQ;
7076       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
7077       if (Invert)
7078         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
7079 
7080       SDLoc DL(N);
7081       RHS = LHS.getOperand(1);
7082       LHS = LHS.getOperand(0);
7083       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
7084 
7085       SDValue TargetCC = DAG.getCondCode(CCVal);
7086       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
7087                          {LHS, RHS, TargetCC, TrueV, FalseV});
7088     }
7089 
7090     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
7091     //      (select_cc X, Y, eq/ne, trueV, falseV)
7092     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
7093       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
7094                          {LHS.getOperand(0), LHS.getOperand(1),
7095                           N->getOperand(2), TrueV, FalseV});
7096     // (select_cc X, 1, setne, trueV, falseV) ->
7097     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
7098     // This can occur when legalizing some floating point comparisons.
7099     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
7100     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
7101       SDLoc DL(N);
7102       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
7103       SDValue TargetCC = DAG.getCondCode(CCVal);
7104       RHS = DAG.getConstant(0, DL, LHS.getValueType());
7105       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
7106                          {LHS, RHS, TargetCC, TrueV, FalseV});
7107     }
7108 
7109     break;
7110   }
7111   case RISCVISD::BR_CC: {
7112     SDValue LHS = N->getOperand(1);
7113     SDValue RHS = N->getOperand(2);
7114     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
7115     if (!ISD::isIntEqualitySetCC(CCVal))
7116       break;
7117 
7118     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
7119     //      (br_cc X, Y, lt, dest)
7120     // Sometimes the setcc is introduced after br_cc has been formed.
7121     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
7122         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
7123       // If we're looking for eq 0 instead of ne 0, we need to invert the
7124       // condition.
7125       bool Invert = CCVal == ISD::SETEQ;
7126       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
7127       if (Invert)
7128         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
7129 
7130       SDLoc DL(N);
7131       RHS = LHS.getOperand(1);
7132       LHS = LHS.getOperand(0);
7133       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
7134 
7135       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
7136                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
7137                          N->getOperand(4));
7138     }
7139 
7140     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
7141     //      (br_cc X, Y, eq/ne, trueV, falseV)
7142     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
7143       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
7144                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
7145                          N->getOperand(3), N->getOperand(4));
7146 
7147     // (br_cc X, 1, setne, br_cc) ->
7148     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
7149     // This can occur when legalizing some floating point comparisons.
7150     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
7151     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
7152       SDLoc DL(N);
7153       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
7154       SDValue TargetCC = DAG.getCondCode(CCVal);
7155       RHS = DAG.getConstant(0, DL, LHS.getValueType());
7156       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
7157                          N->getOperand(0), LHS, RHS, TargetCC,
7158                          N->getOperand(4));
7159     }
7160     break;
7161   }
7162   case ISD::FCOPYSIGN: {
7163     EVT VT = N->getValueType(0);
7164     if (!VT.isVector())
7165       break;
7166     // There is a form of VFSGNJ which injects the negated sign of its second
7167     // operand. Try and bubble any FNEG up after the extend/round to produce
7168     // this optimized pattern. Avoid modifying cases where FP_ROUND and
7169     // TRUNC=1.
7170     SDValue In2 = N->getOperand(1);
7171     // Avoid cases where the extend/round has multiple uses, as duplicating
7172     // those is typically more expensive than removing a fneg.
7173     if (!In2.hasOneUse())
7174       break;
7175     if (In2.getOpcode() != ISD::FP_EXTEND &&
7176         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
7177       break;
7178     In2 = In2.getOperand(0);
7179     if (In2.getOpcode() != ISD::FNEG)
7180       break;
7181     SDLoc DL(N);
7182     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
7183     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
7184                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
7185   }
7186   case ISD::MGATHER:
7187   case ISD::MSCATTER:
7188   case ISD::VP_GATHER:
7189   case ISD::VP_SCATTER: {
7190     if (!DCI.isBeforeLegalize())
7191       break;
7192     SDValue Index, ScaleOp;
7193     bool IsIndexScaled = false;
7194     bool IsIndexSigned = false;
7195     if (const auto *VPGSN = dyn_cast<VPGatherScatterSDNode>(N)) {
7196       Index = VPGSN->getIndex();
7197       ScaleOp = VPGSN->getScale();
7198       IsIndexScaled = VPGSN->isIndexScaled();
7199       IsIndexSigned = VPGSN->isIndexSigned();
7200     } else {
7201       const auto *MGSN = cast<MaskedGatherScatterSDNode>(N);
7202       Index = MGSN->getIndex();
7203       ScaleOp = MGSN->getScale();
7204       IsIndexScaled = MGSN->isIndexScaled();
7205       IsIndexSigned = MGSN->isIndexSigned();
7206     }
7207     EVT IndexVT = Index.getValueType();
7208     MVT XLenVT = Subtarget.getXLenVT();
7209     // RISCV indexed loads only support the "unsigned unscaled" addressing
7210     // mode, so anything else must be manually legalized.
7211     bool NeedsIdxLegalization =
7212         IsIndexScaled ||
7213         (IsIndexSigned && IndexVT.getVectorElementType().bitsLT(XLenVT));
7214     if (!NeedsIdxLegalization)
7215       break;
7216 
7217     SDLoc DL(N);
7218 
7219     // Any index legalization should first promote to XLenVT, so we don't lose
7220     // bits when scaling. This may create an illegal index type so we let
7221     // LLVM's legalization take care of the splitting.
7222     // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet.
7223     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
7224       IndexVT = IndexVT.changeVectorElementType(XLenVT);
7225       Index = DAG.getNode(IsIndexSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
7226                           DL, IndexVT, Index);
7227     }
7228 
7229     unsigned Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
7230     if (IsIndexScaled && Scale != 1) {
7231       // Manually scale the indices by the element size.
7232       // TODO: Sanitize the scale operand here?
7233       // TODO: For VP nodes, should we use VP_SHL here?
7234       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
7235       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
7236       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
7237     }
7238 
7239     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
7240     if (const auto *VPGN = dyn_cast<VPGatherSDNode>(N))
7241       return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL,
7242                              {VPGN->getChain(), VPGN->getBasePtr(), Index,
7243                               VPGN->getScale(), VPGN->getMask(),
7244                               VPGN->getVectorLength()},
7245                              VPGN->getMemOperand(), NewIndexTy);
7246     if (const auto *VPSN = dyn_cast<VPScatterSDNode>(N))
7247       return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL,
7248                               {VPSN->getChain(), VPSN->getValue(),
7249                                VPSN->getBasePtr(), Index, VPSN->getScale(),
7250                                VPSN->getMask(), VPSN->getVectorLength()},
7251                               VPSN->getMemOperand(), NewIndexTy);
7252     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N))
7253       return DAG.getMaskedGather(
7254           N->getVTList(), MGN->getMemoryVT(), DL,
7255           {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
7256            MGN->getBasePtr(), Index, MGN->getScale()},
7257           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
7258     const auto *MSN = cast<MaskedScatterSDNode>(N);
7259     return DAG.getMaskedScatter(
7260         N->getVTList(), MSN->getMemoryVT(), DL,
7261         {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
7262          Index, MSN->getScale()},
7263         MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
7264   }
7265   case RISCVISD::SRA_VL:
7266   case RISCVISD::SRL_VL:
7267   case RISCVISD::SHL_VL: {
7268     SDValue ShAmt = N->getOperand(1);
7269     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
7270       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
7271       SDLoc DL(N);
7272       SDValue VL = N->getOperand(3);
7273       EVT VT = N->getValueType(0);
7274       ShAmt =
7275           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, ShAmt.getOperand(0), VL);
7276       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
7277                          N->getOperand(2), N->getOperand(3));
7278     }
7279     break;
7280   }
7281   case ISD::SRA:
7282   case ISD::SRL:
7283   case ISD::SHL: {
7284     SDValue ShAmt = N->getOperand(1);
7285     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
7286       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
7287       SDLoc DL(N);
7288       EVT VT = N->getValueType(0);
7289       ShAmt =
7290           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VT, ShAmt.getOperand(0));
7291       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
7292     }
7293     break;
7294   }
7295   case RISCVISD::MUL_VL: {
7296     SDValue Op0 = N->getOperand(0);
7297     SDValue Op1 = N->getOperand(1);
7298     if (SDValue V = combineMUL_VLToVWMUL(N, Op0, Op1, DAG))
7299       return V;
7300     if (SDValue V = combineMUL_VLToVWMUL(N, Op1, Op0, DAG))
7301       return V;
7302     return SDValue();
7303   }
7304   case ISD::STORE: {
7305     auto *Store = cast<StoreSDNode>(N);
7306     SDValue Val = Store->getValue();
7307     // Combine store of vmv.x.s to vse with VL of 1.
7308     // FIXME: Support FP.
7309     if (Val.getOpcode() == RISCVISD::VMV_X_S) {
7310       SDValue Src = Val.getOperand(0);
7311       EVT VecVT = Src.getValueType();
7312       EVT MemVT = Store->getMemoryVT();
7313       // The memory VT and the element type must match.
7314       if (VecVT.getVectorElementType() == MemVT) {
7315         SDLoc DL(N);
7316         MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
7317         return DAG.getStoreVP(Store->getChain(), DL, Src, Store->getBasePtr(),
7318                               DAG.getConstant(1, DL, MaskVT),
7319                               DAG.getConstant(1, DL, Subtarget.getXLenVT()),
7320                               Store->getPointerInfo(),
7321                               Store->getOriginalAlign(),
7322                               Store->getMemOperand()->getFlags());
7323       }
7324     }
7325 
7326     break;
7327   }
7328   }
7329 
7330   return SDValue();
7331 }
7332 
7333 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
7334     const SDNode *N, CombineLevel Level) const {
7335   // The following folds are only desirable if `(OP _, c1 << c2)` can be
7336   // materialised in fewer instructions than `(OP _, c1)`:
7337   //
7338   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
7339   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
7340   SDValue N0 = N->getOperand(0);
7341   EVT Ty = N0.getValueType();
7342   if (Ty.isScalarInteger() &&
7343       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
7344     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7345     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
7346     if (C1 && C2) {
7347       const APInt &C1Int = C1->getAPIntValue();
7348       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
7349 
7350       // We can materialise `c1 << c2` into an add immediate, so it's "free",
7351       // and the combine should happen, to potentially allow further combines
7352       // later.
7353       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
7354           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
7355         return true;
7356 
7357       // We can materialise `c1` in an add immediate, so it's "free", and the
7358       // combine should be prevented.
7359       if (C1Int.getMinSignedBits() <= 64 &&
7360           isLegalAddImmediate(C1Int.getSExtValue()))
7361         return false;
7362 
7363       // Neither constant will fit into an immediate, so find materialisation
7364       // costs.
7365       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
7366                                               Subtarget.getFeatureBits(),
7367                                               /*CompressionCost*/true);
7368       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
7369           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(),
7370           /*CompressionCost*/true);
7371 
7372       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
7373       // combine should be prevented.
7374       if (C1Cost < ShiftedC1Cost)
7375         return false;
7376     }
7377   }
7378   return true;
7379 }
7380 
7381 bool RISCVTargetLowering::targetShrinkDemandedConstant(
7382     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
7383     TargetLoweringOpt &TLO) const {
7384   // Delay this optimization as late as possible.
7385   if (!TLO.LegalOps)
7386     return false;
7387 
7388   EVT VT = Op.getValueType();
7389   if (VT.isVector())
7390     return false;
7391 
7392   // Only handle AND for now.
7393   if (Op.getOpcode() != ISD::AND)
7394     return false;
7395 
7396   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
7397   if (!C)
7398     return false;
7399 
7400   const APInt &Mask = C->getAPIntValue();
7401 
7402   // Clear all non-demanded bits initially.
7403   APInt ShrunkMask = Mask & DemandedBits;
7404 
7405   // Try to make a smaller immediate by setting undemanded bits.
7406 
7407   APInt ExpandedMask = Mask | ~DemandedBits;
7408 
7409   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
7410     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
7411   };
7412   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
7413     if (NewMask == Mask)
7414       return true;
7415     SDLoc DL(Op);
7416     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
7417     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
7418     return TLO.CombineTo(Op, NewOp);
7419   };
7420 
7421   // If the shrunk mask fits in sign extended 12 bits, let the target
7422   // independent code apply it.
7423   if (ShrunkMask.isSignedIntN(12))
7424     return false;
7425 
7426   // Preserve (and X, 0xffff) when zext.h is supported.
7427   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
7428     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
7429     if (IsLegalMask(NewMask))
7430       return UseMask(NewMask);
7431   }
7432 
7433   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
7434   if (VT == MVT::i64) {
7435     APInt NewMask = APInt(64, 0xffffffff);
7436     if (IsLegalMask(NewMask))
7437       return UseMask(NewMask);
7438   }
7439 
7440   // For the remaining optimizations, we need to be able to make a negative
7441   // number through a combination of mask and undemanded bits.
7442   if (!ExpandedMask.isNegative())
7443     return false;
7444 
7445   // What is the fewest number of bits we need to represent the negative number.
7446   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
7447 
7448   // Try to make a 12 bit negative immediate. If that fails try to make a 32
7449   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
7450   APInt NewMask = ShrunkMask;
7451   if (MinSignedBits <= 12)
7452     NewMask.setBitsFrom(11);
7453   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
7454     NewMask.setBitsFrom(31);
7455   else
7456     return false;
7457 
7458   // Check that our new mask is a subset of the demanded mask.
7459   assert(IsLegalMask(NewMask));
7460   return UseMask(NewMask);
7461 }
7462 
7463 static void computeGREV(APInt &Src, unsigned ShAmt) {
7464   ShAmt &= Src.getBitWidth() - 1;
7465   uint64_t x = Src.getZExtValue();
7466   if (ShAmt & 1)
7467     x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1);
7468   if (ShAmt & 2)
7469     x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2);
7470   if (ShAmt & 4)
7471     x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4);
7472   if (ShAmt & 8)
7473     x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8);
7474   if (ShAmt & 16)
7475     x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16);
7476   if (ShAmt & 32)
7477     x = ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32);
7478   Src = x;
7479 }
7480 
7481 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
7482                                                         KnownBits &Known,
7483                                                         const APInt &DemandedElts,
7484                                                         const SelectionDAG &DAG,
7485                                                         unsigned Depth) const {
7486   unsigned BitWidth = Known.getBitWidth();
7487   unsigned Opc = Op.getOpcode();
7488   assert((Opc >= ISD::BUILTIN_OP_END ||
7489           Opc == ISD::INTRINSIC_WO_CHAIN ||
7490           Opc == ISD::INTRINSIC_W_CHAIN ||
7491           Opc == ISD::INTRINSIC_VOID) &&
7492          "Should use MaskedValueIsZero if you don't know whether Op"
7493          " is a target node!");
7494 
7495   Known.resetAll();
7496   switch (Opc) {
7497   default: break;
7498   case RISCVISD::SELECT_CC: {
7499     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
7500     // If we don't know any bits, early out.
7501     if (Known.isUnknown())
7502       break;
7503     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
7504 
7505     // Only known if known in both the LHS and RHS.
7506     Known = KnownBits::commonBits(Known, Known2);
7507     break;
7508   }
7509   case RISCVISD::REMUW: {
7510     KnownBits Known2;
7511     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
7512     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
7513     // We only care about the lower 32 bits.
7514     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
7515     // Restore the original width by sign extending.
7516     Known = Known.sext(BitWidth);
7517     break;
7518   }
7519   case RISCVISD::DIVUW: {
7520     KnownBits Known2;
7521     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
7522     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
7523     // We only care about the lower 32 bits.
7524     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
7525     // Restore the original width by sign extending.
7526     Known = Known.sext(BitWidth);
7527     break;
7528   }
7529   case RISCVISD::CTZW: {
7530     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
7531     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
7532     unsigned LowBits = Log2_32(PossibleTZ) + 1;
7533     Known.Zero.setBitsFrom(LowBits);
7534     break;
7535   }
7536   case RISCVISD::CLZW: {
7537     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
7538     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
7539     unsigned LowBits = Log2_32(PossibleLZ) + 1;
7540     Known.Zero.setBitsFrom(LowBits);
7541     break;
7542   }
7543   case RISCVISD::GREV:
7544   case RISCVISD::GREVW: {
7545     if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
7546       Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
7547       if (Opc == RISCVISD::GREVW)
7548         Known = Known.trunc(32);
7549       unsigned ShAmt = C->getZExtValue();
7550       computeGREV(Known.Zero, ShAmt);
7551       computeGREV(Known.One, ShAmt);
7552       if (Opc == RISCVISD::GREVW)
7553         Known = Known.sext(BitWidth);
7554     }
7555     break;
7556   }
7557   case RISCVISD::READ_VLENB:
7558     // We assume VLENB is at least 16 bytes.
7559     Known.Zero.setLowBits(4);
7560     // We assume VLENB is no more than 65536 / 8 bytes.
7561     Known.Zero.setBitsFrom(14);
7562     break;
7563   case ISD::INTRINSIC_W_CHAIN: {
7564     unsigned IntNo = Op.getConstantOperandVal(1);
7565     switch (IntNo) {
7566     default:
7567       // We can't do anything for most intrinsics.
7568       break;
7569     case Intrinsic::riscv_vsetvli:
7570     case Intrinsic::riscv_vsetvlimax:
7571       // Assume that VL output is positive and would fit in an int32_t.
7572       // TODO: VLEN might be capped at 16 bits in a future V spec update.
7573       if (BitWidth >= 32)
7574         Known.Zero.setBitsFrom(31);
7575       break;
7576     }
7577     break;
7578   }
7579   }
7580 }
7581 
7582 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
7583     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
7584     unsigned Depth) const {
7585   switch (Op.getOpcode()) {
7586   default:
7587     break;
7588   case RISCVISD::SELECT_CC: {
7589     unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1);
7590     if (Tmp == 1) return 1;  // Early out.
7591     unsigned Tmp2 = DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1);
7592     return std::min(Tmp, Tmp2);
7593   }
7594   case RISCVISD::SLLW:
7595   case RISCVISD::SRAW:
7596   case RISCVISD::SRLW:
7597   case RISCVISD::DIVW:
7598   case RISCVISD::DIVUW:
7599   case RISCVISD::REMUW:
7600   case RISCVISD::ROLW:
7601   case RISCVISD::RORW:
7602   case RISCVISD::GREVW:
7603   case RISCVISD::GORCW:
7604   case RISCVISD::FSLW:
7605   case RISCVISD::FSRW:
7606   case RISCVISD::SHFLW:
7607   case RISCVISD::UNSHFLW:
7608   case RISCVISD::BCOMPRESSW:
7609   case RISCVISD::BDECOMPRESSW:
7610   case RISCVISD::FCVT_W_RTZ_RV64:
7611   case RISCVISD::FCVT_WU_RTZ_RV64:
7612     // TODO: As the result is sign-extended, this is conservatively correct. A
7613     // more precise answer could be calculated for SRAW depending on known
7614     // bits in the shift amount.
7615     return 33;
7616   case RISCVISD::SHFL:
7617   case RISCVISD::UNSHFL: {
7618     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
7619     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
7620     // will stay within the upper 32 bits. If there were more than 32 sign bits
7621     // before there will be at least 33 sign bits after.
7622     if (Op.getValueType() == MVT::i64 &&
7623         isa<ConstantSDNode>(Op.getOperand(1)) &&
7624         (Op.getConstantOperandVal(1) & 0x10) == 0) {
7625       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
7626       if (Tmp > 32)
7627         return 33;
7628     }
7629     break;
7630   }
7631   case RISCVISD::VMV_X_S:
7632     // The number of sign bits of the scalar result is computed by obtaining the
7633     // element type of the input vector operand, subtracting its width from the
7634     // XLEN, and then adding one (sign bit within the element type). If the
7635     // element type is wider than XLen, the least-significant XLEN bits are
7636     // taken.
7637     if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen())
7638       return 1;
7639     return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1;
7640   }
7641 
7642   return 1;
7643 }
7644 
7645 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
7646                                                   MachineBasicBlock *BB) {
7647   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
7648 
7649   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
7650   // Should the count have wrapped while it was being read, we need to try
7651   // again.
7652   // ...
7653   // read:
7654   // rdcycleh x3 # load high word of cycle
7655   // rdcycle  x2 # load low word of cycle
7656   // rdcycleh x4 # load high word of cycle
7657   // bne x3, x4, read # check if high word reads match, otherwise try again
7658   // ...
7659 
7660   MachineFunction &MF = *BB->getParent();
7661   const BasicBlock *LLVM_BB = BB->getBasicBlock();
7662   MachineFunction::iterator It = ++BB->getIterator();
7663 
7664   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
7665   MF.insert(It, LoopMBB);
7666 
7667   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
7668   MF.insert(It, DoneMBB);
7669 
7670   // Transfer the remainder of BB and its successor edges to DoneMBB.
7671   DoneMBB->splice(DoneMBB->begin(), BB,
7672                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
7673   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
7674 
7675   BB->addSuccessor(LoopMBB);
7676 
7677   MachineRegisterInfo &RegInfo = MF.getRegInfo();
7678   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
7679   Register LoReg = MI.getOperand(0).getReg();
7680   Register HiReg = MI.getOperand(1).getReg();
7681   DebugLoc DL = MI.getDebugLoc();
7682 
7683   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
7684   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
7685       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
7686       .addReg(RISCV::X0);
7687   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
7688       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
7689       .addReg(RISCV::X0);
7690   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
7691       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
7692       .addReg(RISCV::X0);
7693 
7694   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
7695       .addReg(HiReg)
7696       .addReg(ReadAgainReg)
7697       .addMBB(LoopMBB);
7698 
7699   LoopMBB->addSuccessor(LoopMBB);
7700   LoopMBB->addSuccessor(DoneMBB);
7701 
7702   MI.eraseFromParent();
7703 
7704   return DoneMBB;
7705 }
7706 
7707 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
7708                                              MachineBasicBlock *BB) {
7709   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
7710 
7711   MachineFunction &MF = *BB->getParent();
7712   DebugLoc DL = MI.getDebugLoc();
7713   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
7714   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
7715   Register LoReg = MI.getOperand(0).getReg();
7716   Register HiReg = MI.getOperand(1).getReg();
7717   Register SrcReg = MI.getOperand(2).getReg();
7718   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
7719   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
7720 
7721   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
7722                           RI);
7723   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
7724   MachineMemOperand *MMOLo =
7725       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
7726   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
7727       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
7728   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
7729       .addFrameIndex(FI)
7730       .addImm(0)
7731       .addMemOperand(MMOLo);
7732   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
7733       .addFrameIndex(FI)
7734       .addImm(4)
7735       .addMemOperand(MMOHi);
7736   MI.eraseFromParent(); // The pseudo instruction is gone now.
7737   return BB;
7738 }
7739 
7740 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
7741                                                  MachineBasicBlock *BB) {
7742   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
7743          "Unexpected instruction");
7744 
7745   MachineFunction &MF = *BB->getParent();
7746   DebugLoc DL = MI.getDebugLoc();
7747   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
7748   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
7749   Register DstReg = MI.getOperand(0).getReg();
7750   Register LoReg = MI.getOperand(1).getReg();
7751   Register HiReg = MI.getOperand(2).getReg();
7752   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
7753   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
7754 
7755   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
7756   MachineMemOperand *MMOLo =
7757       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
7758   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
7759       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
7760   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
7761       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
7762       .addFrameIndex(FI)
7763       .addImm(0)
7764       .addMemOperand(MMOLo);
7765   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
7766       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
7767       .addFrameIndex(FI)
7768       .addImm(4)
7769       .addMemOperand(MMOHi);
7770   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
7771   MI.eraseFromParent(); // The pseudo instruction is gone now.
7772   return BB;
7773 }
7774 
7775 static bool isSelectPseudo(MachineInstr &MI) {
7776   switch (MI.getOpcode()) {
7777   default:
7778     return false;
7779   case RISCV::Select_GPR_Using_CC_GPR:
7780   case RISCV::Select_FPR16_Using_CC_GPR:
7781   case RISCV::Select_FPR32_Using_CC_GPR:
7782   case RISCV::Select_FPR64_Using_CC_GPR:
7783     return true;
7784   }
7785 }
7786 
7787 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
7788                                            MachineBasicBlock *BB,
7789                                            const RISCVSubtarget &Subtarget) {
7790   // To "insert" Select_* instructions, we actually have to insert the triangle
7791   // control-flow pattern.  The incoming instructions know the destination vreg
7792   // to set, the condition code register to branch on, the true/false values to
7793   // select between, and the condcode to use to select the appropriate branch.
7794   //
7795   // We produce the following control flow:
7796   //     HeadMBB
7797   //     |  \
7798   //     |  IfFalseMBB
7799   //     | /
7800   //    TailMBB
7801   //
7802   // When we find a sequence of selects we attempt to optimize their emission
7803   // by sharing the control flow. Currently we only handle cases where we have
7804   // multiple selects with the exact same condition (same LHS, RHS and CC).
7805   // The selects may be interleaved with other instructions if the other
7806   // instructions meet some requirements we deem safe:
7807   // - They are debug instructions. Otherwise,
7808   // - They do not have side-effects, do not access memory and their inputs do
7809   //   not depend on the results of the select pseudo-instructions.
7810   // The TrueV/FalseV operands of the selects cannot depend on the result of
7811   // previous selects in the sequence.
7812   // These conditions could be further relaxed. See the X86 target for a
7813   // related approach and more information.
7814   Register LHS = MI.getOperand(1).getReg();
7815   Register RHS = MI.getOperand(2).getReg();
7816   auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
7817 
7818   SmallVector<MachineInstr *, 4> SelectDebugValues;
7819   SmallSet<Register, 4> SelectDests;
7820   SelectDests.insert(MI.getOperand(0).getReg());
7821 
7822   MachineInstr *LastSelectPseudo = &MI;
7823 
7824   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
7825        SequenceMBBI != E; ++SequenceMBBI) {
7826     if (SequenceMBBI->isDebugInstr())
7827       continue;
7828     else if (isSelectPseudo(*SequenceMBBI)) {
7829       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
7830           SequenceMBBI->getOperand(2).getReg() != RHS ||
7831           SequenceMBBI->getOperand(3).getImm() != CC ||
7832           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
7833           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
7834         break;
7835       LastSelectPseudo = &*SequenceMBBI;
7836       SequenceMBBI->collectDebugValues(SelectDebugValues);
7837       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
7838     } else {
7839       if (SequenceMBBI->hasUnmodeledSideEffects() ||
7840           SequenceMBBI->mayLoadOrStore())
7841         break;
7842       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
7843             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
7844           }))
7845         break;
7846     }
7847   }
7848 
7849   const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
7850   const BasicBlock *LLVM_BB = BB->getBasicBlock();
7851   DebugLoc DL = MI.getDebugLoc();
7852   MachineFunction::iterator I = ++BB->getIterator();
7853 
7854   MachineBasicBlock *HeadMBB = BB;
7855   MachineFunction *F = BB->getParent();
7856   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
7857   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
7858 
7859   F->insert(I, IfFalseMBB);
7860   F->insert(I, TailMBB);
7861 
7862   // Transfer debug instructions associated with the selects to TailMBB.
7863   for (MachineInstr *DebugInstr : SelectDebugValues) {
7864     TailMBB->push_back(DebugInstr->removeFromParent());
7865   }
7866 
7867   // Move all instructions after the sequence to TailMBB.
7868   TailMBB->splice(TailMBB->end(), HeadMBB,
7869                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
7870   // Update machine-CFG edges by transferring all successors of the current
7871   // block to the new block which will contain the Phi nodes for the selects.
7872   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
7873   // Set the successors for HeadMBB.
7874   HeadMBB->addSuccessor(IfFalseMBB);
7875   HeadMBB->addSuccessor(TailMBB);
7876 
7877   // Insert appropriate branch.
7878   BuildMI(HeadMBB, DL, TII.getBrCond(CC))
7879     .addReg(LHS)
7880     .addReg(RHS)
7881     .addMBB(TailMBB);
7882 
7883   // IfFalseMBB just falls through to TailMBB.
7884   IfFalseMBB->addSuccessor(TailMBB);
7885 
7886   // Create PHIs for all of the select pseudo-instructions.
7887   auto SelectMBBI = MI.getIterator();
7888   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
7889   auto InsertionPoint = TailMBB->begin();
7890   while (SelectMBBI != SelectEnd) {
7891     auto Next = std::next(SelectMBBI);
7892     if (isSelectPseudo(*SelectMBBI)) {
7893       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
7894       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
7895               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
7896           .addReg(SelectMBBI->getOperand(4).getReg())
7897           .addMBB(HeadMBB)
7898           .addReg(SelectMBBI->getOperand(5).getReg())
7899           .addMBB(IfFalseMBB);
7900       SelectMBBI->eraseFromParent();
7901     }
7902     SelectMBBI = Next;
7903   }
7904 
7905   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
7906   return TailMBB;
7907 }
7908 
7909 MachineBasicBlock *
7910 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
7911                                                  MachineBasicBlock *BB) const {
7912   switch (MI.getOpcode()) {
7913   default:
7914     llvm_unreachable("Unexpected instr type to insert");
7915   case RISCV::ReadCycleWide:
7916     assert(!Subtarget.is64Bit() &&
7917            "ReadCycleWrite is only to be used on riscv32");
7918     return emitReadCycleWidePseudo(MI, BB);
7919   case RISCV::Select_GPR_Using_CC_GPR:
7920   case RISCV::Select_FPR16_Using_CC_GPR:
7921   case RISCV::Select_FPR32_Using_CC_GPR:
7922   case RISCV::Select_FPR64_Using_CC_GPR:
7923     return emitSelectPseudo(MI, BB, Subtarget);
7924   case RISCV::BuildPairF64Pseudo:
7925     return emitBuildPairF64Pseudo(MI, BB);
7926   case RISCV::SplitF64Pseudo:
7927     return emitSplitF64Pseudo(MI, BB);
7928   }
7929 }
7930 
7931 // Calling Convention Implementation.
7932 // The expectations for frontend ABI lowering vary from target to target.
7933 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
7934 // details, but this is a longer term goal. For now, we simply try to keep the
7935 // role of the frontend as simple and well-defined as possible. The rules can
7936 // be summarised as:
7937 // * Never split up large scalar arguments. We handle them here.
7938 // * If a hardfloat calling convention is being used, and the struct may be
7939 // passed in a pair of registers (fp+fp, int+fp), and both registers are
7940 // available, then pass as two separate arguments. If either the GPRs or FPRs
7941 // are exhausted, then pass according to the rule below.
7942 // * If a struct could never be passed in registers or directly in a stack
7943 // slot (as it is larger than 2*XLEN and the floating point rules don't
7944 // apply), then pass it using a pointer with the byval attribute.
7945 // * If a struct is less than 2*XLEN, then coerce to either a two-element
7946 // word-sized array or a 2*XLEN scalar (depending on alignment).
7947 // * The frontend can determine whether a struct is returned by reference or
7948 // not based on its size and fields. If it will be returned by reference, the
7949 // frontend must modify the prototype so a pointer with the sret annotation is
7950 // passed as the first argument. This is not necessary for large scalar
7951 // returns.
7952 // * Struct return values and varargs should be coerced to structs containing
7953 // register-size fields in the same situations they would be for fixed
7954 // arguments.
7955 
7956 static const MCPhysReg ArgGPRs[] = {
7957   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
7958   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
7959 };
7960 static const MCPhysReg ArgFPR16s[] = {
7961   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
7962   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
7963 };
7964 static const MCPhysReg ArgFPR32s[] = {
7965   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
7966   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
7967 };
7968 static const MCPhysReg ArgFPR64s[] = {
7969   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
7970   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
7971 };
7972 // This is an interim calling convention and it may be changed in the future.
7973 static const MCPhysReg ArgVRs[] = {
7974     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
7975     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
7976     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
7977 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
7978                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
7979                                      RISCV::V20M2, RISCV::V22M2};
7980 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
7981                                      RISCV::V20M4};
7982 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
7983 
7984 // Pass a 2*XLEN argument that has been split into two XLEN values through
7985 // registers or the stack as necessary.
7986 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
7987                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
7988                                 MVT ValVT2, MVT LocVT2,
7989                                 ISD::ArgFlagsTy ArgFlags2) {
7990   unsigned XLenInBytes = XLen / 8;
7991   if (Register Reg = State.AllocateReg(ArgGPRs)) {
7992     // At least one half can be passed via register.
7993     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
7994                                      VA1.getLocVT(), CCValAssign::Full));
7995   } else {
7996     // Both halves must be passed on the stack, with proper alignment.
7997     Align StackAlign =
7998         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
7999     State.addLoc(
8000         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
8001                             State.AllocateStack(XLenInBytes, StackAlign),
8002                             VA1.getLocVT(), CCValAssign::Full));
8003     State.addLoc(CCValAssign::getMem(
8004         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
8005         LocVT2, CCValAssign::Full));
8006     return false;
8007   }
8008 
8009   if (Register Reg = State.AllocateReg(ArgGPRs)) {
8010     // The second half can also be passed via register.
8011     State.addLoc(
8012         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
8013   } else {
8014     // The second half is passed via the stack, without additional alignment.
8015     State.addLoc(CCValAssign::getMem(
8016         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
8017         LocVT2, CCValAssign::Full));
8018   }
8019 
8020   return false;
8021 }
8022 
8023 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
8024                                Optional<unsigned> FirstMaskArgument,
8025                                CCState &State, const RISCVTargetLowering &TLI) {
8026   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
8027   if (RC == &RISCV::VRRegClass) {
8028     // Assign the first mask argument to V0.
8029     // This is an interim calling convention and it may be changed in the
8030     // future.
8031     if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
8032       return State.AllocateReg(RISCV::V0);
8033     return State.AllocateReg(ArgVRs);
8034   }
8035   if (RC == &RISCV::VRM2RegClass)
8036     return State.AllocateReg(ArgVRM2s);
8037   if (RC == &RISCV::VRM4RegClass)
8038     return State.AllocateReg(ArgVRM4s);
8039   if (RC == &RISCV::VRM8RegClass)
8040     return State.AllocateReg(ArgVRM8s);
8041   llvm_unreachable("Unhandled register class for ValueType");
8042 }
8043 
8044 // Implements the RISC-V calling convention. Returns true upon failure.
8045 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
8046                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
8047                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
8048                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
8049                      Optional<unsigned> FirstMaskArgument) {
8050   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
8051   assert(XLen == 32 || XLen == 64);
8052   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
8053 
8054   // Any return value split in to more than two values can't be returned
8055   // directly. Vectors are returned via the available vector registers.
8056   if (!LocVT.isVector() && IsRet && ValNo > 1)
8057     return true;
8058 
8059   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
8060   // variadic argument, or if no F16/F32 argument registers are available.
8061   bool UseGPRForF16_F32 = true;
8062   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
8063   // variadic argument, or if no F64 argument registers are available.
8064   bool UseGPRForF64 = true;
8065 
8066   switch (ABI) {
8067   default:
8068     llvm_unreachable("Unexpected ABI");
8069   case RISCVABI::ABI_ILP32:
8070   case RISCVABI::ABI_LP64:
8071     break;
8072   case RISCVABI::ABI_ILP32F:
8073   case RISCVABI::ABI_LP64F:
8074     UseGPRForF16_F32 = !IsFixed;
8075     break;
8076   case RISCVABI::ABI_ILP32D:
8077   case RISCVABI::ABI_LP64D:
8078     UseGPRForF16_F32 = !IsFixed;
8079     UseGPRForF64 = !IsFixed;
8080     break;
8081   }
8082 
8083   // FPR16, FPR32, and FPR64 alias each other.
8084   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
8085     UseGPRForF16_F32 = true;
8086     UseGPRForF64 = true;
8087   }
8088 
8089   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
8090   // similar local variables rather than directly checking against the target
8091   // ABI.
8092 
8093   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
8094     LocVT = XLenVT;
8095     LocInfo = CCValAssign::BCvt;
8096   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
8097     LocVT = MVT::i64;
8098     LocInfo = CCValAssign::BCvt;
8099   }
8100 
8101   // If this is a variadic argument, the RISC-V calling convention requires
8102   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
8103   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
8104   // be used regardless of whether the original argument was split during
8105   // legalisation or not. The argument will not be passed by registers if the
8106   // original type is larger than 2*XLEN, so the register alignment rule does
8107   // not apply.
8108   unsigned TwoXLenInBytes = (2 * XLen) / 8;
8109   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
8110       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
8111     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
8112     // Skip 'odd' register if necessary.
8113     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
8114       State.AllocateReg(ArgGPRs);
8115   }
8116 
8117   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
8118   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
8119       State.getPendingArgFlags();
8120 
8121   assert(PendingLocs.size() == PendingArgFlags.size() &&
8122          "PendingLocs and PendingArgFlags out of sync");
8123 
8124   // Handle passing f64 on RV32D with a soft float ABI or when floating point
8125   // registers are exhausted.
8126   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
8127     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
8128            "Can't lower f64 if it is split");
8129     // Depending on available argument GPRS, f64 may be passed in a pair of
8130     // GPRs, split between a GPR and the stack, or passed completely on the
8131     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
8132     // cases.
8133     Register Reg = State.AllocateReg(ArgGPRs);
8134     LocVT = MVT::i32;
8135     if (!Reg) {
8136       unsigned StackOffset = State.AllocateStack(8, Align(8));
8137       State.addLoc(
8138           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
8139       return false;
8140     }
8141     if (!State.AllocateReg(ArgGPRs))
8142       State.AllocateStack(4, Align(4));
8143     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8144     return false;
8145   }
8146 
8147   // Fixed-length vectors are located in the corresponding scalable-vector
8148   // container types.
8149   if (ValVT.isFixedLengthVector())
8150     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
8151 
8152   // Split arguments might be passed indirectly, so keep track of the pending
8153   // values. Split vectors are passed via a mix of registers and indirectly, so
8154   // treat them as we would any other argument.
8155   if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
8156     LocVT = XLenVT;
8157     LocInfo = CCValAssign::Indirect;
8158     PendingLocs.push_back(
8159         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
8160     PendingArgFlags.push_back(ArgFlags);
8161     if (!ArgFlags.isSplitEnd()) {
8162       return false;
8163     }
8164   }
8165 
8166   // If the split argument only had two elements, it should be passed directly
8167   // in registers or on the stack.
8168   if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
8169       PendingLocs.size() <= 2) {
8170     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
8171     // Apply the normal calling convention rules to the first half of the
8172     // split argument.
8173     CCValAssign VA = PendingLocs[0];
8174     ISD::ArgFlagsTy AF = PendingArgFlags[0];
8175     PendingLocs.clear();
8176     PendingArgFlags.clear();
8177     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
8178                                ArgFlags);
8179   }
8180 
8181   // Allocate to a register if possible, or else a stack slot.
8182   Register Reg;
8183   unsigned StoreSizeBytes = XLen / 8;
8184   Align StackAlign = Align(XLen / 8);
8185 
8186   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
8187     Reg = State.AllocateReg(ArgFPR16s);
8188   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
8189     Reg = State.AllocateReg(ArgFPR32s);
8190   else if (ValVT == MVT::f64 && !UseGPRForF64)
8191     Reg = State.AllocateReg(ArgFPR64s);
8192   else if (ValVT.isVector()) {
8193     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
8194     if (!Reg) {
8195       // For return values, the vector must be passed fully via registers or
8196       // via the stack.
8197       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
8198       // but we're using all of them.
8199       if (IsRet)
8200         return true;
8201       // Try using a GPR to pass the address
8202       if ((Reg = State.AllocateReg(ArgGPRs))) {
8203         LocVT = XLenVT;
8204         LocInfo = CCValAssign::Indirect;
8205       } else if (ValVT.isScalableVector()) {
8206         report_fatal_error("Unable to pass scalable vector types on the stack");
8207       } else {
8208         // Pass fixed-length vectors on the stack.
8209         LocVT = ValVT;
8210         StoreSizeBytes = ValVT.getStoreSize();
8211         // Align vectors to their element sizes, being careful for vXi1
8212         // vectors.
8213         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
8214       }
8215     }
8216   } else {
8217     Reg = State.AllocateReg(ArgGPRs);
8218   }
8219 
8220   unsigned StackOffset =
8221       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
8222 
8223   // If we reach this point and PendingLocs is non-empty, we must be at the
8224   // end of a split argument that must be passed indirectly.
8225   if (!PendingLocs.empty()) {
8226     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
8227     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
8228 
8229     for (auto &It : PendingLocs) {
8230       if (Reg)
8231         It.convertToReg(Reg);
8232       else
8233         It.convertToMem(StackOffset);
8234       State.addLoc(It);
8235     }
8236     PendingLocs.clear();
8237     PendingArgFlags.clear();
8238     return false;
8239   }
8240 
8241   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
8242           (TLI.getSubtarget().hasVInstructions() && ValVT.isVector())) &&
8243          "Expected an XLenVT or vector types at this stage");
8244 
8245   if (Reg) {
8246     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8247     return false;
8248   }
8249 
8250   // When a floating-point value is passed on the stack, no bit-conversion is
8251   // needed.
8252   if (ValVT.isFloatingPoint()) {
8253     LocVT = ValVT;
8254     LocInfo = CCValAssign::Full;
8255   }
8256   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
8257   return false;
8258 }
8259 
8260 template <typename ArgTy>
8261 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
8262   for (const auto &ArgIdx : enumerate(Args)) {
8263     MVT ArgVT = ArgIdx.value().VT;
8264     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
8265       return ArgIdx.index();
8266   }
8267   return None;
8268 }
8269 
8270 void RISCVTargetLowering::analyzeInputArgs(
8271     MachineFunction &MF, CCState &CCInfo,
8272     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
8273     RISCVCCAssignFn Fn) const {
8274   unsigned NumArgs = Ins.size();
8275   FunctionType *FType = MF.getFunction().getFunctionType();
8276 
8277   Optional<unsigned> FirstMaskArgument;
8278   if (Subtarget.hasVInstructions())
8279     FirstMaskArgument = preAssignMask(Ins);
8280 
8281   for (unsigned i = 0; i != NumArgs; ++i) {
8282     MVT ArgVT = Ins[i].VT;
8283     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
8284 
8285     Type *ArgTy = nullptr;
8286     if (IsRet)
8287       ArgTy = FType->getReturnType();
8288     else if (Ins[i].isOrigArg())
8289       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
8290 
8291     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
8292     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
8293            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
8294            FirstMaskArgument)) {
8295       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
8296                         << EVT(ArgVT).getEVTString() << '\n');
8297       llvm_unreachable(nullptr);
8298     }
8299   }
8300 }
8301 
8302 void RISCVTargetLowering::analyzeOutputArgs(
8303     MachineFunction &MF, CCState &CCInfo,
8304     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
8305     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
8306   unsigned NumArgs = Outs.size();
8307 
8308   Optional<unsigned> FirstMaskArgument;
8309   if (Subtarget.hasVInstructions())
8310     FirstMaskArgument = preAssignMask(Outs);
8311 
8312   for (unsigned i = 0; i != NumArgs; i++) {
8313     MVT ArgVT = Outs[i].VT;
8314     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
8315     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
8316 
8317     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
8318     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
8319            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
8320            FirstMaskArgument)) {
8321       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
8322                         << EVT(ArgVT).getEVTString() << "\n");
8323       llvm_unreachable(nullptr);
8324     }
8325   }
8326 }
8327 
8328 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
8329 // values.
8330 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
8331                                    const CCValAssign &VA, const SDLoc &DL,
8332                                    const RISCVSubtarget &Subtarget) {
8333   switch (VA.getLocInfo()) {
8334   default:
8335     llvm_unreachable("Unexpected CCValAssign::LocInfo");
8336   case CCValAssign::Full:
8337     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
8338       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
8339     break;
8340   case CCValAssign::BCvt:
8341     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
8342       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
8343     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
8344       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
8345     else
8346       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
8347     break;
8348   }
8349   return Val;
8350 }
8351 
8352 // The caller is responsible for loading the full value if the argument is
8353 // passed with CCValAssign::Indirect.
8354 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
8355                                 const CCValAssign &VA, const SDLoc &DL,
8356                                 const RISCVTargetLowering &TLI) {
8357   MachineFunction &MF = DAG.getMachineFunction();
8358   MachineRegisterInfo &RegInfo = MF.getRegInfo();
8359   EVT LocVT = VA.getLocVT();
8360   SDValue Val;
8361   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
8362   Register VReg = RegInfo.createVirtualRegister(RC);
8363   RegInfo.addLiveIn(VA.getLocReg(), VReg);
8364   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
8365 
8366   if (VA.getLocInfo() == CCValAssign::Indirect)
8367     return Val;
8368 
8369   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
8370 }
8371 
8372 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
8373                                    const CCValAssign &VA, const SDLoc &DL,
8374                                    const RISCVSubtarget &Subtarget) {
8375   EVT LocVT = VA.getLocVT();
8376 
8377   switch (VA.getLocInfo()) {
8378   default:
8379     llvm_unreachable("Unexpected CCValAssign::LocInfo");
8380   case CCValAssign::Full:
8381     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
8382       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
8383     break;
8384   case CCValAssign::BCvt:
8385     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
8386       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
8387     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
8388       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
8389     else
8390       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
8391     break;
8392   }
8393   return Val;
8394 }
8395 
8396 // The caller is responsible for loading the full value if the argument is
8397 // passed with CCValAssign::Indirect.
8398 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
8399                                 const CCValAssign &VA, const SDLoc &DL) {
8400   MachineFunction &MF = DAG.getMachineFunction();
8401   MachineFrameInfo &MFI = MF.getFrameInfo();
8402   EVT LocVT = VA.getLocVT();
8403   EVT ValVT = VA.getValVT();
8404   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
8405   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
8406                                  /*Immutable=*/true);
8407   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
8408   SDValue Val;
8409 
8410   ISD::LoadExtType ExtType;
8411   switch (VA.getLocInfo()) {
8412   default:
8413     llvm_unreachable("Unexpected CCValAssign::LocInfo");
8414   case CCValAssign::Full:
8415   case CCValAssign::Indirect:
8416   case CCValAssign::BCvt:
8417     ExtType = ISD::NON_EXTLOAD;
8418     break;
8419   }
8420   Val = DAG.getExtLoad(
8421       ExtType, DL, LocVT, Chain, FIN,
8422       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
8423   return Val;
8424 }
8425 
8426 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
8427                                        const CCValAssign &VA, const SDLoc &DL) {
8428   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
8429          "Unexpected VA");
8430   MachineFunction &MF = DAG.getMachineFunction();
8431   MachineFrameInfo &MFI = MF.getFrameInfo();
8432   MachineRegisterInfo &RegInfo = MF.getRegInfo();
8433 
8434   if (VA.isMemLoc()) {
8435     // f64 is passed on the stack.
8436     int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
8437     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
8438     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
8439                        MachinePointerInfo::getFixedStack(MF, FI));
8440   }
8441 
8442   assert(VA.isRegLoc() && "Expected register VA assignment");
8443 
8444   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
8445   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
8446   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
8447   SDValue Hi;
8448   if (VA.getLocReg() == RISCV::X17) {
8449     // Second half of f64 is passed on the stack.
8450     int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
8451     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
8452     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
8453                      MachinePointerInfo::getFixedStack(MF, FI));
8454   } else {
8455     // Second half of f64 is passed in another GPR.
8456     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
8457     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
8458     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
8459   }
8460   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
8461 }
8462 
8463 // FastCC has less than 1% performance improvement for some particular
8464 // benchmark. But theoretically, it may has benenfit for some cases.
8465 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
8466                             unsigned ValNo, MVT ValVT, MVT LocVT,
8467                             CCValAssign::LocInfo LocInfo,
8468                             ISD::ArgFlagsTy ArgFlags, CCState &State,
8469                             bool IsFixed, bool IsRet, Type *OrigTy,
8470                             const RISCVTargetLowering &TLI,
8471                             Optional<unsigned> FirstMaskArgument) {
8472 
8473   // X5 and X6 might be used for save-restore libcall.
8474   static const MCPhysReg GPRList[] = {
8475       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
8476       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
8477       RISCV::X29, RISCV::X30, RISCV::X31};
8478 
8479   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
8480     if (unsigned Reg = State.AllocateReg(GPRList)) {
8481       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8482       return false;
8483     }
8484   }
8485 
8486   if (LocVT == MVT::f16) {
8487     static const MCPhysReg FPR16List[] = {
8488         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
8489         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
8490         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
8491         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
8492     if (unsigned Reg = State.AllocateReg(FPR16List)) {
8493       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8494       return false;
8495     }
8496   }
8497 
8498   if (LocVT == MVT::f32) {
8499     static const MCPhysReg FPR32List[] = {
8500         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
8501         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
8502         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
8503         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
8504     if (unsigned Reg = State.AllocateReg(FPR32List)) {
8505       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8506       return false;
8507     }
8508   }
8509 
8510   if (LocVT == MVT::f64) {
8511     static const MCPhysReg FPR64List[] = {
8512         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
8513         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
8514         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
8515         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
8516     if (unsigned Reg = State.AllocateReg(FPR64List)) {
8517       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8518       return false;
8519     }
8520   }
8521 
8522   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
8523     unsigned Offset4 = State.AllocateStack(4, Align(4));
8524     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
8525     return false;
8526   }
8527 
8528   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
8529     unsigned Offset5 = State.AllocateStack(8, Align(8));
8530     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
8531     return false;
8532   }
8533 
8534   if (LocVT.isVector()) {
8535     if (unsigned Reg =
8536             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
8537       // Fixed-length vectors are located in the corresponding scalable-vector
8538       // container types.
8539       if (ValVT.isFixedLengthVector())
8540         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
8541       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8542     } else {
8543       // Try and pass the address via a "fast" GPR.
8544       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
8545         LocInfo = CCValAssign::Indirect;
8546         LocVT = TLI.getSubtarget().getXLenVT();
8547         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
8548       } else if (ValVT.isFixedLengthVector()) {
8549         auto StackAlign =
8550             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
8551         unsigned StackOffset =
8552             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
8553         State.addLoc(
8554             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
8555       } else {
8556         // Can't pass scalable vectors on the stack.
8557         return true;
8558       }
8559     }
8560 
8561     return false;
8562   }
8563 
8564   return true; // CC didn't match.
8565 }
8566 
8567 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
8568                          CCValAssign::LocInfo LocInfo,
8569                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
8570 
8571   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
8572     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
8573     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
8574     static const MCPhysReg GPRList[] = {
8575         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
8576         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
8577     if (unsigned Reg = State.AllocateReg(GPRList)) {
8578       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8579       return false;
8580     }
8581   }
8582 
8583   if (LocVT == MVT::f32) {
8584     // Pass in STG registers: F1, ..., F6
8585     //                        fs0 ... fs5
8586     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
8587                                           RISCV::F18_F, RISCV::F19_F,
8588                                           RISCV::F20_F, RISCV::F21_F};
8589     if (unsigned Reg = State.AllocateReg(FPR32List)) {
8590       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8591       return false;
8592     }
8593   }
8594 
8595   if (LocVT == MVT::f64) {
8596     // Pass in STG registers: D1, ..., D6
8597     //                        fs6 ... fs11
8598     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
8599                                           RISCV::F24_D, RISCV::F25_D,
8600                                           RISCV::F26_D, RISCV::F27_D};
8601     if (unsigned Reg = State.AllocateReg(FPR64List)) {
8602       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8603       return false;
8604     }
8605   }
8606 
8607   report_fatal_error("No registers left in GHC calling convention");
8608   return true;
8609 }
8610 
8611 // Transform physical registers into virtual registers.
8612 SDValue RISCVTargetLowering::LowerFormalArguments(
8613     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
8614     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
8615     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
8616 
8617   MachineFunction &MF = DAG.getMachineFunction();
8618 
8619   switch (CallConv) {
8620   default:
8621     report_fatal_error("Unsupported calling convention");
8622   case CallingConv::C:
8623   case CallingConv::Fast:
8624     break;
8625   case CallingConv::GHC:
8626     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
8627         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
8628       report_fatal_error(
8629         "GHC calling convention requires the F and D instruction set extensions");
8630   }
8631 
8632   const Function &Func = MF.getFunction();
8633   if (Func.hasFnAttribute("interrupt")) {
8634     if (!Func.arg_empty())
8635       report_fatal_error(
8636         "Functions with the interrupt attribute cannot have arguments!");
8637 
8638     StringRef Kind =
8639       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
8640 
8641     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
8642       report_fatal_error(
8643         "Function interrupt attribute argument not supported!");
8644   }
8645 
8646   EVT PtrVT = getPointerTy(DAG.getDataLayout());
8647   MVT XLenVT = Subtarget.getXLenVT();
8648   unsigned XLenInBytes = Subtarget.getXLen() / 8;
8649   // Used with vargs to acumulate store chains.
8650   std::vector<SDValue> OutChains;
8651 
8652   // Assign locations to all of the incoming arguments.
8653   SmallVector<CCValAssign, 16> ArgLocs;
8654   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
8655 
8656   if (CallConv == CallingConv::GHC)
8657     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
8658   else
8659     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
8660                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
8661                                                    : CC_RISCV);
8662 
8663   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
8664     CCValAssign &VA = ArgLocs[i];
8665     SDValue ArgValue;
8666     // Passing f64 on RV32D with a soft float ABI must be handled as a special
8667     // case.
8668     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
8669       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
8670     else if (VA.isRegLoc())
8671       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
8672     else
8673       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
8674 
8675     if (VA.getLocInfo() == CCValAssign::Indirect) {
8676       // If the original argument was split and passed by reference (e.g. i128
8677       // on RV32), we need to load all parts of it here (using the same
8678       // address). Vectors may be partly split to registers and partly to the
8679       // stack, in which case the base address is partly offset and subsequent
8680       // stores are relative to that.
8681       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
8682                                    MachinePointerInfo()));
8683       unsigned ArgIndex = Ins[i].OrigArgIndex;
8684       unsigned ArgPartOffset = Ins[i].PartOffset;
8685       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
8686       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
8687         CCValAssign &PartVA = ArgLocs[i + 1];
8688         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
8689         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
8690         if (PartVA.getValVT().isScalableVector())
8691           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
8692         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
8693         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
8694                                      MachinePointerInfo()));
8695         ++i;
8696       }
8697       continue;
8698     }
8699     InVals.push_back(ArgValue);
8700   }
8701 
8702   if (IsVarArg) {
8703     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
8704     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
8705     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
8706     MachineFrameInfo &MFI = MF.getFrameInfo();
8707     MachineRegisterInfo &RegInfo = MF.getRegInfo();
8708     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
8709 
8710     // Offset of the first variable argument from stack pointer, and size of
8711     // the vararg save area. For now, the varargs save area is either zero or
8712     // large enough to hold a0-a7.
8713     int VaArgOffset, VarArgsSaveSize;
8714 
8715     // If all registers are allocated, then all varargs must be passed on the
8716     // stack and we don't need to save any argregs.
8717     if (ArgRegs.size() == Idx) {
8718       VaArgOffset = CCInfo.getNextStackOffset();
8719       VarArgsSaveSize = 0;
8720     } else {
8721       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
8722       VaArgOffset = -VarArgsSaveSize;
8723     }
8724 
8725     // Record the frame index of the first variable argument
8726     // which is a value necessary to VASTART.
8727     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
8728     RVFI->setVarArgsFrameIndex(FI);
8729 
8730     // If saving an odd number of registers then create an extra stack slot to
8731     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
8732     // offsets to even-numbered registered remain 2*XLEN-aligned.
8733     if (Idx % 2) {
8734       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
8735       VarArgsSaveSize += XLenInBytes;
8736     }
8737 
8738     // Copy the integer registers that may have been used for passing varargs
8739     // to the vararg save area.
8740     for (unsigned I = Idx; I < ArgRegs.size();
8741          ++I, VaArgOffset += XLenInBytes) {
8742       const Register Reg = RegInfo.createVirtualRegister(RC);
8743       RegInfo.addLiveIn(ArgRegs[I], Reg);
8744       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
8745       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
8746       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
8747       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
8748                                    MachinePointerInfo::getFixedStack(MF, FI));
8749       cast<StoreSDNode>(Store.getNode())
8750           ->getMemOperand()
8751           ->setValue((Value *)nullptr);
8752       OutChains.push_back(Store);
8753     }
8754     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
8755   }
8756 
8757   // All stores are grouped in one node to allow the matching between
8758   // the size of Ins and InVals. This only happens for vararg functions.
8759   if (!OutChains.empty()) {
8760     OutChains.push_back(Chain);
8761     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
8762   }
8763 
8764   return Chain;
8765 }
8766 
8767 /// isEligibleForTailCallOptimization - Check whether the call is eligible
8768 /// for tail call optimization.
8769 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
8770 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
8771     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
8772     const SmallVector<CCValAssign, 16> &ArgLocs) const {
8773 
8774   auto &Callee = CLI.Callee;
8775   auto CalleeCC = CLI.CallConv;
8776   auto &Outs = CLI.Outs;
8777   auto &Caller = MF.getFunction();
8778   auto CallerCC = Caller.getCallingConv();
8779 
8780   // Exception-handling functions need a special set of instructions to
8781   // indicate a return to the hardware. Tail-calling another function would
8782   // probably break this.
8783   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
8784   // should be expanded as new function attributes are introduced.
8785   if (Caller.hasFnAttribute("interrupt"))
8786     return false;
8787 
8788   // Do not tail call opt if the stack is used to pass parameters.
8789   if (CCInfo.getNextStackOffset() != 0)
8790     return false;
8791 
8792   // Do not tail call opt if any parameters need to be passed indirectly.
8793   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
8794   // passed indirectly. So the address of the value will be passed in a
8795   // register, or if not available, then the address is put on the stack. In
8796   // order to pass indirectly, space on the stack often needs to be allocated
8797   // in order to store the value. In this case the CCInfo.getNextStackOffset()
8798   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
8799   // are passed CCValAssign::Indirect.
8800   for (auto &VA : ArgLocs)
8801     if (VA.getLocInfo() == CCValAssign::Indirect)
8802       return false;
8803 
8804   // Do not tail call opt if either caller or callee uses struct return
8805   // semantics.
8806   auto IsCallerStructRet = Caller.hasStructRetAttr();
8807   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
8808   if (IsCallerStructRet || IsCalleeStructRet)
8809     return false;
8810 
8811   // Externally-defined functions with weak linkage should not be
8812   // tail-called. The behaviour of branch instructions in this situation (as
8813   // used for tail calls) is implementation-defined, so we cannot rely on the
8814   // linker replacing the tail call with a return.
8815   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
8816     const GlobalValue *GV = G->getGlobal();
8817     if (GV->hasExternalWeakLinkage())
8818       return false;
8819   }
8820 
8821   // The callee has to preserve all registers the caller needs to preserve.
8822   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
8823   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
8824   if (CalleeCC != CallerCC) {
8825     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
8826     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
8827       return false;
8828   }
8829 
8830   // Byval parameters hand the function a pointer directly into the stack area
8831   // we want to reuse during a tail call. Working around this *is* possible
8832   // but less efficient and uglier in LowerCall.
8833   for (auto &Arg : Outs)
8834     if (Arg.Flags.isByVal())
8835       return false;
8836 
8837   return true;
8838 }
8839 
8840 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
8841   return DAG.getDataLayout().getPrefTypeAlign(
8842       VT.getTypeForEVT(*DAG.getContext()));
8843 }
8844 
8845 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
8846 // and output parameter nodes.
8847 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
8848                                        SmallVectorImpl<SDValue> &InVals) const {
8849   SelectionDAG &DAG = CLI.DAG;
8850   SDLoc &DL = CLI.DL;
8851   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
8852   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
8853   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
8854   SDValue Chain = CLI.Chain;
8855   SDValue Callee = CLI.Callee;
8856   bool &IsTailCall = CLI.IsTailCall;
8857   CallingConv::ID CallConv = CLI.CallConv;
8858   bool IsVarArg = CLI.IsVarArg;
8859   EVT PtrVT = getPointerTy(DAG.getDataLayout());
8860   MVT XLenVT = Subtarget.getXLenVT();
8861 
8862   MachineFunction &MF = DAG.getMachineFunction();
8863 
8864   // Analyze the operands of the call, assigning locations to each operand.
8865   SmallVector<CCValAssign, 16> ArgLocs;
8866   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
8867 
8868   if (CallConv == CallingConv::GHC)
8869     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
8870   else
8871     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
8872                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
8873                                                     : CC_RISCV);
8874 
8875   // Check if it's really possible to do a tail call.
8876   if (IsTailCall)
8877     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
8878 
8879   if (IsTailCall)
8880     ++NumTailCalls;
8881   else if (CLI.CB && CLI.CB->isMustTailCall())
8882     report_fatal_error("failed to perform tail call elimination on a call "
8883                        "site marked musttail");
8884 
8885   // Get a count of how many bytes are to be pushed on the stack.
8886   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
8887 
8888   // Create local copies for byval args
8889   SmallVector<SDValue, 8> ByValArgs;
8890   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
8891     ISD::ArgFlagsTy Flags = Outs[i].Flags;
8892     if (!Flags.isByVal())
8893       continue;
8894 
8895     SDValue Arg = OutVals[i];
8896     unsigned Size = Flags.getByValSize();
8897     Align Alignment = Flags.getNonZeroByValAlign();
8898 
8899     int FI =
8900         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
8901     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
8902     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
8903 
8904     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
8905                           /*IsVolatile=*/false,
8906                           /*AlwaysInline=*/false, IsTailCall,
8907                           MachinePointerInfo(), MachinePointerInfo());
8908     ByValArgs.push_back(FIPtr);
8909   }
8910 
8911   if (!IsTailCall)
8912     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
8913 
8914   // Copy argument values to their designated locations.
8915   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
8916   SmallVector<SDValue, 8> MemOpChains;
8917   SDValue StackPtr;
8918   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
8919     CCValAssign &VA = ArgLocs[i];
8920     SDValue ArgValue = OutVals[i];
8921     ISD::ArgFlagsTy Flags = Outs[i].Flags;
8922 
8923     // Handle passing f64 on RV32D with a soft float ABI as a special case.
8924     bool IsF64OnRV32DSoftABI =
8925         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
8926     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
8927       SDValue SplitF64 = DAG.getNode(
8928           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
8929       SDValue Lo = SplitF64.getValue(0);
8930       SDValue Hi = SplitF64.getValue(1);
8931 
8932       Register RegLo = VA.getLocReg();
8933       RegsToPass.push_back(std::make_pair(RegLo, Lo));
8934 
8935       if (RegLo == RISCV::X17) {
8936         // Second half of f64 is passed on the stack.
8937         // Work out the address of the stack slot.
8938         if (!StackPtr.getNode())
8939           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
8940         // Emit the store.
8941         MemOpChains.push_back(
8942             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
8943       } else {
8944         // Second half of f64 is passed in another GPR.
8945         assert(RegLo < RISCV::X31 && "Invalid register pair");
8946         Register RegHigh = RegLo + 1;
8947         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
8948       }
8949       continue;
8950     }
8951 
8952     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
8953     // as any other MemLoc.
8954 
8955     // Promote the value if needed.
8956     // For now, only handle fully promoted and indirect arguments.
8957     if (VA.getLocInfo() == CCValAssign::Indirect) {
8958       // Store the argument in a stack slot and pass its address.
8959       Align StackAlign =
8960           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
8961                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
8962       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
8963       // If the original argument was split (e.g. i128), we need
8964       // to store the required parts of it here (and pass just one address).
8965       // Vectors may be partly split to registers and partly to the stack, in
8966       // which case the base address is partly offset and subsequent stores are
8967       // relative to that.
8968       unsigned ArgIndex = Outs[i].OrigArgIndex;
8969       unsigned ArgPartOffset = Outs[i].PartOffset;
8970       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
8971       // Calculate the total size to store. We don't have access to what we're
8972       // actually storing other than performing the loop and collecting the
8973       // info.
8974       SmallVector<std::pair<SDValue, SDValue>> Parts;
8975       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
8976         SDValue PartValue = OutVals[i + 1];
8977         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
8978         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
8979         EVT PartVT = PartValue.getValueType();
8980         if (PartVT.isScalableVector())
8981           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
8982         StoredSize += PartVT.getStoreSize();
8983         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
8984         Parts.push_back(std::make_pair(PartValue, Offset));
8985         ++i;
8986       }
8987       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
8988       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
8989       MemOpChains.push_back(
8990           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
8991                        MachinePointerInfo::getFixedStack(MF, FI)));
8992       for (const auto &Part : Parts) {
8993         SDValue PartValue = Part.first;
8994         SDValue PartOffset = Part.second;
8995         SDValue Address =
8996             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
8997         MemOpChains.push_back(
8998             DAG.getStore(Chain, DL, PartValue, Address,
8999                          MachinePointerInfo::getFixedStack(MF, FI)));
9000       }
9001       ArgValue = SpillSlot;
9002     } else {
9003       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
9004     }
9005 
9006     // Use local copy if it is a byval arg.
9007     if (Flags.isByVal())
9008       ArgValue = ByValArgs[j++];
9009 
9010     if (VA.isRegLoc()) {
9011       // Queue up the argument copies and emit them at the end.
9012       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
9013     } else {
9014       assert(VA.isMemLoc() && "Argument not register or memory");
9015       assert(!IsTailCall && "Tail call not allowed if stack is used "
9016                             "for passing parameters");
9017 
9018       // Work out the address of the stack slot.
9019       if (!StackPtr.getNode())
9020         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
9021       SDValue Address =
9022           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
9023                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
9024 
9025       // Emit the store.
9026       MemOpChains.push_back(
9027           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
9028     }
9029   }
9030 
9031   // Join the stores, which are independent of one another.
9032   if (!MemOpChains.empty())
9033     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
9034 
9035   SDValue Glue;
9036 
9037   // Build a sequence of copy-to-reg nodes, chained and glued together.
9038   for (auto &Reg : RegsToPass) {
9039     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
9040     Glue = Chain.getValue(1);
9041   }
9042 
9043   // Validate that none of the argument registers have been marked as
9044   // reserved, if so report an error. Do the same for the return address if this
9045   // is not a tailcall.
9046   validateCCReservedRegs(RegsToPass, MF);
9047   if (!IsTailCall &&
9048       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
9049     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
9050         MF.getFunction(),
9051         "Return address register required, but has been reserved."});
9052 
9053   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
9054   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
9055   // split it and then direct call can be matched by PseudoCALL.
9056   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
9057     const GlobalValue *GV = S->getGlobal();
9058 
9059     unsigned OpFlags = RISCVII::MO_CALL;
9060     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
9061       OpFlags = RISCVII::MO_PLT;
9062 
9063     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
9064   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
9065     unsigned OpFlags = RISCVII::MO_CALL;
9066 
9067     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
9068                                                  nullptr))
9069       OpFlags = RISCVII::MO_PLT;
9070 
9071     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
9072   }
9073 
9074   // The first call operand is the chain and the second is the target address.
9075   SmallVector<SDValue, 8> Ops;
9076   Ops.push_back(Chain);
9077   Ops.push_back(Callee);
9078 
9079   // Add argument registers to the end of the list so that they are
9080   // known live into the call.
9081   for (auto &Reg : RegsToPass)
9082     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
9083 
9084   if (!IsTailCall) {
9085     // Add a register mask operand representing the call-preserved registers.
9086     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
9087     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
9088     assert(Mask && "Missing call preserved mask for calling convention");
9089     Ops.push_back(DAG.getRegisterMask(Mask));
9090   }
9091 
9092   // Glue the call to the argument copies, if any.
9093   if (Glue.getNode())
9094     Ops.push_back(Glue);
9095 
9096   // Emit the call.
9097   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
9098 
9099   if (IsTailCall) {
9100     MF.getFrameInfo().setHasTailCall();
9101     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
9102   }
9103 
9104   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
9105   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
9106   Glue = Chain.getValue(1);
9107 
9108   // Mark the end of the call, which is glued to the call itself.
9109   Chain = DAG.getCALLSEQ_END(Chain,
9110                              DAG.getConstant(NumBytes, DL, PtrVT, true),
9111                              DAG.getConstant(0, DL, PtrVT, true),
9112                              Glue, DL);
9113   Glue = Chain.getValue(1);
9114 
9115   // Assign locations to each value returned by this call.
9116   SmallVector<CCValAssign, 16> RVLocs;
9117   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
9118   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
9119 
9120   // Copy all of the result registers out of their specified physreg.
9121   for (auto &VA : RVLocs) {
9122     // Copy the value out
9123     SDValue RetValue =
9124         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
9125     // Glue the RetValue to the end of the call sequence
9126     Chain = RetValue.getValue(1);
9127     Glue = RetValue.getValue(2);
9128 
9129     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
9130       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
9131       SDValue RetValue2 =
9132           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
9133       Chain = RetValue2.getValue(1);
9134       Glue = RetValue2.getValue(2);
9135       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
9136                              RetValue2);
9137     }
9138 
9139     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
9140 
9141     InVals.push_back(RetValue);
9142   }
9143 
9144   return Chain;
9145 }
9146 
9147 bool RISCVTargetLowering::CanLowerReturn(
9148     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
9149     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
9150   SmallVector<CCValAssign, 16> RVLocs;
9151   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
9152 
9153   Optional<unsigned> FirstMaskArgument;
9154   if (Subtarget.hasVInstructions())
9155     FirstMaskArgument = preAssignMask(Outs);
9156 
9157   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
9158     MVT VT = Outs[i].VT;
9159     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
9160     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
9161     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
9162                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
9163                  *this, FirstMaskArgument))
9164       return false;
9165   }
9166   return true;
9167 }
9168 
9169 SDValue
9170 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
9171                                  bool IsVarArg,
9172                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
9173                                  const SmallVectorImpl<SDValue> &OutVals,
9174                                  const SDLoc &DL, SelectionDAG &DAG) const {
9175   const MachineFunction &MF = DAG.getMachineFunction();
9176   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
9177 
9178   // Stores the assignment of the return value to a location.
9179   SmallVector<CCValAssign, 16> RVLocs;
9180 
9181   // Info about the registers and stack slot.
9182   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
9183                  *DAG.getContext());
9184 
9185   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
9186                     nullptr, CC_RISCV);
9187 
9188   if (CallConv == CallingConv::GHC && !RVLocs.empty())
9189     report_fatal_error("GHC functions return void only");
9190 
9191   SDValue Glue;
9192   SmallVector<SDValue, 4> RetOps(1, Chain);
9193 
9194   // Copy the result values into the output registers.
9195   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
9196     SDValue Val = OutVals[i];
9197     CCValAssign &VA = RVLocs[i];
9198     assert(VA.isRegLoc() && "Can only return in registers!");
9199 
9200     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
9201       // Handle returning f64 on RV32D with a soft float ABI.
9202       assert(VA.isRegLoc() && "Expected return via registers");
9203       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
9204                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
9205       SDValue Lo = SplitF64.getValue(0);
9206       SDValue Hi = SplitF64.getValue(1);
9207       Register RegLo = VA.getLocReg();
9208       assert(RegLo < RISCV::X31 && "Invalid register pair");
9209       Register RegHi = RegLo + 1;
9210 
9211       if (STI.isRegisterReservedByUser(RegLo) ||
9212           STI.isRegisterReservedByUser(RegHi))
9213         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
9214             MF.getFunction(),
9215             "Return value register required, but has been reserved."});
9216 
9217       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
9218       Glue = Chain.getValue(1);
9219       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
9220       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
9221       Glue = Chain.getValue(1);
9222       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
9223     } else {
9224       // Handle a 'normal' return.
9225       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
9226       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
9227 
9228       if (STI.isRegisterReservedByUser(VA.getLocReg()))
9229         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
9230             MF.getFunction(),
9231             "Return value register required, but has been reserved."});
9232 
9233       // Guarantee that all emitted copies are stuck together.
9234       Glue = Chain.getValue(1);
9235       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
9236     }
9237   }
9238 
9239   RetOps[0] = Chain; // Update chain.
9240 
9241   // Add the glue node if we have it.
9242   if (Glue.getNode()) {
9243     RetOps.push_back(Glue);
9244   }
9245 
9246   unsigned RetOpc = RISCVISD::RET_FLAG;
9247   // Interrupt service routines use different return instructions.
9248   const Function &Func = DAG.getMachineFunction().getFunction();
9249   if (Func.hasFnAttribute("interrupt")) {
9250     if (!Func.getReturnType()->isVoidTy())
9251       report_fatal_error(
9252           "Functions with the interrupt attribute must have void return type!");
9253 
9254     MachineFunction &MF = DAG.getMachineFunction();
9255     StringRef Kind =
9256       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
9257 
9258     if (Kind == "user")
9259       RetOpc = RISCVISD::URET_FLAG;
9260     else if (Kind == "supervisor")
9261       RetOpc = RISCVISD::SRET_FLAG;
9262     else
9263       RetOpc = RISCVISD::MRET_FLAG;
9264   }
9265 
9266   return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
9267 }
9268 
9269 void RISCVTargetLowering::validateCCReservedRegs(
9270     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
9271     MachineFunction &MF) const {
9272   const Function &F = MF.getFunction();
9273   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
9274 
9275   if (llvm::any_of(Regs, [&STI](auto Reg) {
9276         return STI.isRegisterReservedByUser(Reg.first);
9277       }))
9278     F.getContext().diagnose(DiagnosticInfoUnsupported{
9279         F, "Argument register required, but has been reserved."});
9280 }
9281 
9282 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
9283   return CI->isTailCall();
9284 }
9285 
9286 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
9287 #define NODE_NAME_CASE(NODE)                                                   \
9288   case RISCVISD::NODE:                                                         \
9289     return "RISCVISD::" #NODE;
9290   // clang-format off
9291   switch ((RISCVISD::NodeType)Opcode) {
9292   case RISCVISD::FIRST_NUMBER:
9293     break;
9294   NODE_NAME_CASE(RET_FLAG)
9295   NODE_NAME_CASE(URET_FLAG)
9296   NODE_NAME_CASE(SRET_FLAG)
9297   NODE_NAME_CASE(MRET_FLAG)
9298   NODE_NAME_CASE(CALL)
9299   NODE_NAME_CASE(SELECT_CC)
9300   NODE_NAME_CASE(BR_CC)
9301   NODE_NAME_CASE(BuildPairF64)
9302   NODE_NAME_CASE(SplitF64)
9303   NODE_NAME_CASE(TAIL)
9304   NODE_NAME_CASE(MULHSU)
9305   NODE_NAME_CASE(SLLW)
9306   NODE_NAME_CASE(SRAW)
9307   NODE_NAME_CASE(SRLW)
9308   NODE_NAME_CASE(DIVW)
9309   NODE_NAME_CASE(DIVUW)
9310   NODE_NAME_CASE(REMUW)
9311   NODE_NAME_CASE(ROLW)
9312   NODE_NAME_CASE(RORW)
9313   NODE_NAME_CASE(CLZW)
9314   NODE_NAME_CASE(CTZW)
9315   NODE_NAME_CASE(FSLW)
9316   NODE_NAME_CASE(FSRW)
9317   NODE_NAME_CASE(FSL)
9318   NODE_NAME_CASE(FSR)
9319   NODE_NAME_CASE(FMV_H_X)
9320   NODE_NAME_CASE(FMV_X_ANYEXTH)
9321   NODE_NAME_CASE(FMV_W_X_RV64)
9322   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
9323   NODE_NAME_CASE(FCVT_X_RTZ)
9324   NODE_NAME_CASE(FCVT_XU_RTZ)
9325   NODE_NAME_CASE(FCVT_W_RTZ_RV64)
9326   NODE_NAME_CASE(FCVT_WU_RTZ_RV64)
9327   NODE_NAME_CASE(READ_CYCLE_WIDE)
9328   NODE_NAME_CASE(GREV)
9329   NODE_NAME_CASE(GREVW)
9330   NODE_NAME_CASE(GORC)
9331   NODE_NAME_CASE(GORCW)
9332   NODE_NAME_CASE(SHFL)
9333   NODE_NAME_CASE(SHFLW)
9334   NODE_NAME_CASE(UNSHFL)
9335   NODE_NAME_CASE(UNSHFLW)
9336   NODE_NAME_CASE(BCOMPRESS)
9337   NODE_NAME_CASE(BCOMPRESSW)
9338   NODE_NAME_CASE(BDECOMPRESS)
9339   NODE_NAME_CASE(BDECOMPRESSW)
9340   NODE_NAME_CASE(VMV_V_X_VL)
9341   NODE_NAME_CASE(VFMV_V_F_VL)
9342   NODE_NAME_CASE(VMV_X_S)
9343   NODE_NAME_CASE(VMV_S_X_VL)
9344   NODE_NAME_CASE(VFMV_S_F_VL)
9345   NODE_NAME_CASE(SPLAT_VECTOR_I64)
9346   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
9347   NODE_NAME_CASE(READ_VLENB)
9348   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
9349   NODE_NAME_CASE(VSLIDEUP_VL)
9350   NODE_NAME_CASE(VSLIDE1UP_VL)
9351   NODE_NAME_CASE(VSLIDEDOWN_VL)
9352   NODE_NAME_CASE(VSLIDE1DOWN_VL)
9353   NODE_NAME_CASE(VID_VL)
9354   NODE_NAME_CASE(VFNCVT_ROD_VL)
9355   NODE_NAME_CASE(VECREDUCE_ADD_VL)
9356   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
9357   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
9358   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
9359   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
9360   NODE_NAME_CASE(VECREDUCE_AND_VL)
9361   NODE_NAME_CASE(VECREDUCE_OR_VL)
9362   NODE_NAME_CASE(VECREDUCE_XOR_VL)
9363   NODE_NAME_CASE(VECREDUCE_FADD_VL)
9364   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
9365   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
9366   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
9367   NODE_NAME_CASE(ADD_VL)
9368   NODE_NAME_CASE(AND_VL)
9369   NODE_NAME_CASE(MUL_VL)
9370   NODE_NAME_CASE(OR_VL)
9371   NODE_NAME_CASE(SDIV_VL)
9372   NODE_NAME_CASE(SHL_VL)
9373   NODE_NAME_CASE(SREM_VL)
9374   NODE_NAME_CASE(SRA_VL)
9375   NODE_NAME_CASE(SRL_VL)
9376   NODE_NAME_CASE(SUB_VL)
9377   NODE_NAME_CASE(UDIV_VL)
9378   NODE_NAME_CASE(UREM_VL)
9379   NODE_NAME_CASE(XOR_VL)
9380   NODE_NAME_CASE(SADDSAT_VL)
9381   NODE_NAME_CASE(UADDSAT_VL)
9382   NODE_NAME_CASE(SSUBSAT_VL)
9383   NODE_NAME_CASE(USUBSAT_VL)
9384   NODE_NAME_CASE(FADD_VL)
9385   NODE_NAME_CASE(FSUB_VL)
9386   NODE_NAME_CASE(FMUL_VL)
9387   NODE_NAME_CASE(FDIV_VL)
9388   NODE_NAME_CASE(FNEG_VL)
9389   NODE_NAME_CASE(FABS_VL)
9390   NODE_NAME_CASE(FSQRT_VL)
9391   NODE_NAME_CASE(FMA_VL)
9392   NODE_NAME_CASE(FCOPYSIGN_VL)
9393   NODE_NAME_CASE(SMIN_VL)
9394   NODE_NAME_CASE(SMAX_VL)
9395   NODE_NAME_CASE(UMIN_VL)
9396   NODE_NAME_CASE(UMAX_VL)
9397   NODE_NAME_CASE(FMINNUM_VL)
9398   NODE_NAME_CASE(FMAXNUM_VL)
9399   NODE_NAME_CASE(MULHS_VL)
9400   NODE_NAME_CASE(MULHU_VL)
9401   NODE_NAME_CASE(FP_TO_SINT_VL)
9402   NODE_NAME_CASE(FP_TO_UINT_VL)
9403   NODE_NAME_CASE(SINT_TO_FP_VL)
9404   NODE_NAME_CASE(UINT_TO_FP_VL)
9405   NODE_NAME_CASE(FP_EXTEND_VL)
9406   NODE_NAME_CASE(FP_ROUND_VL)
9407   NODE_NAME_CASE(VWMUL_VL)
9408   NODE_NAME_CASE(VWMULU_VL)
9409   NODE_NAME_CASE(SETCC_VL)
9410   NODE_NAME_CASE(VSELECT_VL)
9411   NODE_NAME_CASE(VMAND_VL)
9412   NODE_NAME_CASE(VMOR_VL)
9413   NODE_NAME_CASE(VMXOR_VL)
9414   NODE_NAME_CASE(VMCLR_VL)
9415   NODE_NAME_CASE(VMSET_VL)
9416   NODE_NAME_CASE(VRGATHER_VX_VL)
9417   NODE_NAME_CASE(VRGATHER_VV_VL)
9418   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
9419   NODE_NAME_CASE(VSEXT_VL)
9420   NODE_NAME_CASE(VZEXT_VL)
9421   NODE_NAME_CASE(VCPOP_VL)
9422   NODE_NAME_CASE(VLE_VL)
9423   NODE_NAME_CASE(VSE_VL)
9424   NODE_NAME_CASE(READ_CSR)
9425   NODE_NAME_CASE(WRITE_CSR)
9426   NODE_NAME_CASE(SWAP_CSR)
9427   }
9428   // clang-format on
9429   return nullptr;
9430 #undef NODE_NAME_CASE
9431 }
9432 
9433 /// getConstraintType - Given a constraint letter, return the type of
9434 /// constraint it is for this target.
9435 RISCVTargetLowering::ConstraintType
9436 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
9437   if (Constraint.size() == 1) {
9438     switch (Constraint[0]) {
9439     default:
9440       break;
9441     case 'f':
9442       return C_RegisterClass;
9443     case 'I':
9444     case 'J':
9445     case 'K':
9446       return C_Immediate;
9447     case 'A':
9448       return C_Memory;
9449     case 'S': // A symbolic address
9450       return C_Other;
9451     }
9452   } else {
9453     if (Constraint == "vr" || Constraint == "vm")
9454       return C_RegisterClass;
9455   }
9456   return TargetLowering::getConstraintType(Constraint);
9457 }
9458 
9459 std::pair<unsigned, const TargetRegisterClass *>
9460 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
9461                                                   StringRef Constraint,
9462                                                   MVT VT) const {
9463   // First, see if this is a constraint that directly corresponds to a
9464   // RISCV register class.
9465   if (Constraint.size() == 1) {
9466     switch (Constraint[0]) {
9467     case 'r':
9468       return std::make_pair(0U, &RISCV::GPRRegClass);
9469     case 'f':
9470       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
9471         return std::make_pair(0U, &RISCV::FPR16RegClass);
9472       if (Subtarget.hasStdExtF() && VT == MVT::f32)
9473         return std::make_pair(0U, &RISCV::FPR32RegClass);
9474       if (Subtarget.hasStdExtD() && VT == MVT::f64)
9475         return std::make_pair(0U, &RISCV::FPR64RegClass);
9476       break;
9477     default:
9478       break;
9479     }
9480   } else {
9481     if (Constraint == "vr") {
9482       for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass,
9483                              &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
9484         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
9485           return std::make_pair(0U, RC);
9486       }
9487     } else if (Constraint == "vm") {
9488       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
9489         return std::make_pair(0U, &RISCV::VMRegClass);
9490     }
9491   }
9492 
9493   // Clang will correctly decode the usage of register name aliases into their
9494   // official names. However, other frontends like `rustc` do not. This allows
9495   // users of these frontends to use the ABI names for registers in LLVM-style
9496   // register constraints.
9497   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
9498                                .Case("{zero}", RISCV::X0)
9499                                .Case("{ra}", RISCV::X1)
9500                                .Case("{sp}", RISCV::X2)
9501                                .Case("{gp}", RISCV::X3)
9502                                .Case("{tp}", RISCV::X4)
9503                                .Case("{t0}", RISCV::X5)
9504                                .Case("{t1}", RISCV::X6)
9505                                .Case("{t2}", RISCV::X7)
9506                                .Cases("{s0}", "{fp}", RISCV::X8)
9507                                .Case("{s1}", RISCV::X9)
9508                                .Case("{a0}", RISCV::X10)
9509                                .Case("{a1}", RISCV::X11)
9510                                .Case("{a2}", RISCV::X12)
9511                                .Case("{a3}", RISCV::X13)
9512                                .Case("{a4}", RISCV::X14)
9513                                .Case("{a5}", RISCV::X15)
9514                                .Case("{a6}", RISCV::X16)
9515                                .Case("{a7}", RISCV::X17)
9516                                .Case("{s2}", RISCV::X18)
9517                                .Case("{s3}", RISCV::X19)
9518                                .Case("{s4}", RISCV::X20)
9519                                .Case("{s5}", RISCV::X21)
9520                                .Case("{s6}", RISCV::X22)
9521                                .Case("{s7}", RISCV::X23)
9522                                .Case("{s8}", RISCV::X24)
9523                                .Case("{s9}", RISCV::X25)
9524                                .Case("{s10}", RISCV::X26)
9525                                .Case("{s11}", RISCV::X27)
9526                                .Case("{t3}", RISCV::X28)
9527                                .Case("{t4}", RISCV::X29)
9528                                .Case("{t5}", RISCV::X30)
9529                                .Case("{t6}", RISCV::X31)
9530                                .Default(RISCV::NoRegister);
9531   if (XRegFromAlias != RISCV::NoRegister)
9532     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
9533 
9534   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
9535   // TableGen record rather than the AsmName to choose registers for InlineAsm
9536   // constraints, plus we want to match those names to the widest floating point
9537   // register type available, manually select floating point registers here.
9538   //
9539   // The second case is the ABI name of the register, so that frontends can also
9540   // use the ABI names in register constraint lists.
9541   if (Subtarget.hasStdExtF()) {
9542     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
9543                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
9544                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
9545                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
9546                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
9547                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
9548                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
9549                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
9550                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
9551                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
9552                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
9553                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
9554                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
9555                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
9556                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
9557                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
9558                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
9559                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
9560                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
9561                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
9562                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
9563                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
9564                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
9565                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
9566                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
9567                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
9568                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
9569                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
9570                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
9571                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
9572                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
9573                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
9574                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
9575                         .Default(RISCV::NoRegister);
9576     if (FReg != RISCV::NoRegister) {
9577       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
9578       if (Subtarget.hasStdExtD()) {
9579         unsigned RegNo = FReg - RISCV::F0_F;
9580         unsigned DReg = RISCV::F0_D + RegNo;
9581         return std::make_pair(DReg, &RISCV::FPR64RegClass);
9582       }
9583       return std::make_pair(FReg, &RISCV::FPR32RegClass);
9584     }
9585   }
9586 
9587   if (Subtarget.hasVInstructions()) {
9588     Register VReg = StringSwitch<Register>(Constraint.lower())
9589                         .Case("{v0}", RISCV::V0)
9590                         .Case("{v1}", RISCV::V1)
9591                         .Case("{v2}", RISCV::V2)
9592                         .Case("{v3}", RISCV::V3)
9593                         .Case("{v4}", RISCV::V4)
9594                         .Case("{v5}", RISCV::V5)
9595                         .Case("{v6}", RISCV::V6)
9596                         .Case("{v7}", RISCV::V7)
9597                         .Case("{v8}", RISCV::V8)
9598                         .Case("{v9}", RISCV::V9)
9599                         .Case("{v10}", RISCV::V10)
9600                         .Case("{v11}", RISCV::V11)
9601                         .Case("{v12}", RISCV::V12)
9602                         .Case("{v13}", RISCV::V13)
9603                         .Case("{v14}", RISCV::V14)
9604                         .Case("{v15}", RISCV::V15)
9605                         .Case("{v16}", RISCV::V16)
9606                         .Case("{v17}", RISCV::V17)
9607                         .Case("{v18}", RISCV::V18)
9608                         .Case("{v19}", RISCV::V19)
9609                         .Case("{v20}", RISCV::V20)
9610                         .Case("{v21}", RISCV::V21)
9611                         .Case("{v22}", RISCV::V22)
9612                         .Case("{v23}", RISCV::V23)
9613                         .Case("{v24}", RISCV::V24)
9614                         .Case("{v25}", RISCV::V25)
9615                         .Case("{v26}", RISCV::V26)
9616                         .Case("{v27}", RISCV::V27)
9617                         .Case("{v28}", RISCV::V28)
9618                         .Case("{v29}", RISCV::V29)
9619                         .Case("{v30}", RISCV::V30)
9620                         .Case("{v31}", RISCV::V31)
9621                         .Default(RISCV::NoRegister);
9622     if (VReg != RISCV::NoRegister) {
9623       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
9624         return std::make_pair(VReg, &RISCV::VMRegClass);
9625       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
9626         return std::make_pair(VReg, &RISCV::VRRegClass);
9627       for (const auto *RC :
9628            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
9629         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
9630           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
9631           return std::make_pair(VReg, RC);
9632         }
9633       }
9634     }
9635   }
9636 
9637   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
9638 }
9639 
9640 unsigned
9641 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
9642   // Currently only support length 1 constraints.
9643   if (ConstraintCode.size() == 1) {
9644     switch (ConstraintCode[0]) {
9645     case 'A':
9646       return InlineAsm::Constraint_A;
9647     default:
9648       break;
9649     }
9650   }
9651 
9652   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
9653 }
9654 
9655 void RISCVTargetLowering::LowerAsmOperandForConstraint(
9656     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
9657     SelectionDAG &DAG) const {
9658   // Currently only support length 1 constraints.
9659   if (Constraint.length() == 1) {
9660     switch (Constraint[0]) {
9661     case 'I':
9662       // Validate & create a 12-bit signed immediate operand.
9663       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
9664         uint64_t CVal = C->getSExtValue();
9665         if (isInt<12>(CVal))
9666           Ops.push_back(
9667               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
9668       }
9669       return;
9670     case 'J':
9671       // Validate & create an integer zero operand.
9672       if (auto *C = dyn_cast<ConstantSDNode>(Op))
9673         if (C->getZExtValue() == 0)
9674           Ops.push_back(
9675               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
9676       return;
9677     case 'K':
9678       // Validate & create a 5-bit unsigned immediate operand.
9679       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
9680         uint64_t CVal = C->getZExtValue();
9681         if (isUInt<5>(CVal))
9682           Ops.push_back(
9683               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
9684       }
9685       return;
9686     case 'S':
9687       if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
9688         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
9689                                                  GA->getValueType(0)));
9690       } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
9691         Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
9692                                                 BA->getValueType(0)));
9693       }
9694       return;
9695     default:
9696       break;
9697     }
9698   }
9699   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
9700 }
9701 
9702 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
9703                                                    Instruction *Inst,
9704                                                    AtomicOrdering Ord) const {
9705   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
9706     return Builder.CreateFence(Ord);
9707   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
9708     return Builder.CreateFence(AtomicOrdering::Release);
9709   return nullptr;
9710 }
9711 
9712 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
9713                                                     Instruction *Inst,
9714                                                     AtomicOrdering Ord) const {
9715   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
9716     return Builder.CreateFence(AtomicOrdering::Acquire);
9717   return nullptr;
9718 }
9719 
9720 TargetLowering::AtomicExpansionKind
9721 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
9722   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
9723   // point operations can't be used in an lr/sc sequence without breaking the
9724   // forward-progress guarantee.
9725   if (AI->isFloatingPointOperation())
9726     return AtomicExpansionKind::CmpXChg;
9727 
9728   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
9729   if (Size == 8 || Size == 16)
9730     return AtomicExpansionKind::MaskedIntrinsic;
9731   return AtomicExpansionKind::None;
9732 }
9733 
9734 static Intrinsic::ID
9735 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
9736   if (XLen == 32) {
9737     switch (BinOp) {
9738     default:
9739       llvm_unreachable("Unexpected AtomicRMW BinOp");
9740     case AtomicRMWInst::Xchg:
9741       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
9742     case AtomicRMWInst::Add:
9743       return Intrinsic::riscv_masked_atomicrmw_add_i32;
9744     case AtomicRMWInst::Sub:
9745       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
9746     case AtomicRMWInst::Nand:
9747       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
9748     case AtomicRMWInst::Max:
9749       return Intrinsic::riscv_masked_atomicrmw_max_i32;
9750     case AtomicRMWInst::Min:
9751       return Intrinsic::riscv_masked_atomicrmw_min_i32;
9752     case AtomicRMWInst::UMax:
9753       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
9754     case AtomicRMWInst::UMin:
9755       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
9756     }
9757   }
9758 
9759   if (XLen == 64) {
9760     switch (BinOp) {
9761     default:
9762       llvm_unreachable("Unexpected AtomicRMW BinOp");
9763     case AtomicRMWInst::Xchg:
9764       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
9765     case AtomicRMWInst::Add:
9766       return Intrinsic::riscv_masked_atomicrmw_add_i64;
9767     case AtomicRMWInst::Sub:
9768       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
9769     case AtomicRMWInst::Nand:
9770       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
9771     case AtomicRMWInst::Max:
9772       return Intrinsic::riscv_masked_atomicrmw_max_i64;
9773     case AtomicRMWInst::Min:
9774       return Intrinsic::riscv_masked_atomicrmw_min_i64;
9775     case AtomicRMWInst::UMax:
9776       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
9777     case AtomicRMWInst::UMin:
9778       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
9779     }
9780   }
9781 
9782   llvm_unreachable("Unexpected XLen\n");
9783 }
9784 
9785 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
9786     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
9787     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
9788   unsigned XLen = Subtarget.getXLen();
9789   Value *Ordering =
9790       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
9791   Type *Tys[] = {AlignedAddr->getType()};
9792   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
9793       AI->getModule(),
9794       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
9795 
9796   if (XLen == 64) {
9797     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
9798     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
9799     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
9800   }
9801 
9802   Value *Result;
9803 
9804   // Must pass the shift amount needed to sign extend the loaded value prior
9805   // to performing a signed comparison for min/max. ShiftAmt is the number of
9806   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
9807   // is the number of bits to left+right shift the value in order to
9808   // sign-extend.
9809   if (AI->getOperation() == AtomicRMWInst::Min ||
9810       AI->getOperation() == AtomicRMWInst::Max) {
9811     const DataLayout &DL = AI->getModule()->getDataLayout();
9812     unsigned ValWidth =
9813         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
9814     Value *SextShamt =
9815         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
9816     Result = Builder.CreateCall(LrwOpScwLoop,
9817                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
9818   } else {
9819     Result =
9820         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
9821   }
9822 
9823   if (XLen == 64)
9824     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
9825   return Result;
9826 }
9827 
9828 TargetLowering::AtomicExpansionKind
9829 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
9830     AtomicCmpXchgInst *CI) const {
9831   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
9832   if (Size == 8 || Size == 16)
9833     return AtomicExpansionKind::MaskedIntrinsic;
9834   return AtomicExpansionKind::None;
9835 }
9836 
9837 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
9838     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
9839     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
9840   unsigned XLen = Subtarget.getXLen();
9841   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
9842   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
9843   if (XLen == 64) {
9844     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
9845     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
9846     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
9847     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
9848   }
9849   Type *Tys[] = {AlignedAddr->getType()};
9850   Function *MaskedCmpXchg =
9851       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
9852   Value *Result = Builder.CreateCall(
9853       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
9854   if (XLen == 64)
9855     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
9856   return Result;
9857 }
9858 
9859 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
9860   return false;
9861 }
9862 
9863 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
9864                                                      EVT VT) const {
9865   VT = VT.getScalarType();
9866 
9867   if (!VT.isSimple())
9868     return false;
9869 
9870   switch (VT.getSimpleVT().SimpleTy) {
9871   case MVT::f16:
9872     return Subtarget.hasStdExtZfh();
9873   case MVT::f32:
9874     return Subtarget.hasStdExtF();
9875   case MVT::f64:
9876     return Subtarget.hasStdExtD();
9877   default:
9878     break;
9879   }
9880 
9881   return false;
9882 }
9883 
9884 Register RISCVTargetLowering::getExceptionPointerRegister(
9885     const Constant *PersonalityFn) const {
9886   return RISCV::X10;
9887 }
9888 
9889 Register RISCVTargetLowering::getExceptionSelectorRegister(
9890     const Constant *PersonalityFn) const {
9891   return RISCV::X11;
9892 }
9893 
9894 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
9895   // Return false to suppress the unnecessary extensions if the LibCall
9896   // arguments or return value is f32 type for LP64 ABI.
9897   RISCVABI::ABI ABI = Subtarget.getTargetABI();
9898   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
9899     return false;
9900 
9901   return true;
9902 }
9903 
9904 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
9905   if (Subtarget.is64Bit() && Type == MVT::i32)
9906     return true;
9907 
9908   return IsSigned;
9909 }
9910 
9911 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
9912                                                  SDValue C) const {
9913   // Check integral scalar types.
9914   if (VT.isScalarInteger()) {
9915     // Omit the optimization if the sub target has the M extension and the data
9916     // size exceeds XLen.
9917     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
9918       return false;
9919     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
9920       // Break the MUL to a SLLI and an ADD/SUB.
9921       const APInt &Imm = ConstNode->getAPIntValue();
9922       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
9923           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
9924         return true;
9925       // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12.
9926       if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) &&
9927           ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
9928            (Imm - 8).isPowerOf2()))
9929         return true;
9930       // Omit the following optimization if the sub target has the M extension
9931       // and the data size >= XLen.
9932       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
9933         return false;
9934       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
9935       // a pair of LUI/ADDI.
9936       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
9937         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
9938         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
9939             (1 - ImmS).isPowerOf2())
9940         return true;
9941       }
9942     }
9943   }
9944 
9945   return false;
9946 }
9947 
9948 bool RISCVTargetLowering::isMulAddWithConstProfitable(
9949     const SDValue &AddNode, const SDValue &ConstNode) const {
9950   // Let the DAGCombiner decide for vectors.
9951   EVT VT = AddNode.getValueType();
9952   if (VT.isVector())
9953     return true;
9954 
9955   // Let the DAGCombiner decide for larger types.
9956   if (VT.getScalarSizeInBits() > Subtarget.getXLen())
9957     return true;
9958 
9959   // It is worse if c1 is simm12 while c1*c2 is not.
9960   ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
9961   ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
9962   const APInt &C1 = C1Node->getAPIntValue();
9963   const APInt &C2 = C2Node->getAPIntValue();
9964   if (C1.isSignedIntN(12) && !(C1 * C2).isSignedIntN(12))
9965     return false;
9966 
9967   // Default to true and let the DAGCombiner decide.
9968   return true;
9969 }
9970 
9971 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
9972     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
9973     bool *Fast) const {
9974   if (!VT.isVector())
9975     return false;
9976 
9977   EVT ElemVT = VT.getVectorElementType();
9978   if (Alignment >= ElemVT.getStoreSize()) {
9979     if (Fast)
9980       *Fast = true;
9981     return true;
9982   }
9983 
9984   return false;
9985 }
9986 
9987 bool RISCVTargetLowering::splitValueIntoRegisterParts(
9988     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
9989     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
9990   bool IsABIRegCopy = CC.hasValue();
9991   EVT ValueVT = Val.getValueType();
9992   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
9993     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
9994     // and cast to f32.
9995     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
9996     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
9997     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
9998                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
9999     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
10000     Parts[0] = Val;
10001     return true;
10002   }
10003 
10004   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
10005     LLVMContext &Context = *DAG.getContext();
10006     EVT ValueEltVT = ValueVT.getVectorElementType();
10007     EVT PartEltVT = PartVT.getVectorElementType();
10008     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
10009     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
10010     if (PartVTBitSize % ValueVTBitSize == 0) {
10011       // If the element types are different, bitcast to the same element type of
10012       // PartVT first.
10013       if (ValueEltVT != PartEltVT) {
10014         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
10015         assert(Count != 0 && "The number of element should not be zero.");
10016         EVT SameEltTypeVT =
10017             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
10018         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
10019       }
10020       Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
10021                         Val, DAG.getConstant(0, DL, Subtarget.getXLenVT()));
10022       Parts[0] = Val;
10023       return true;
10024     }
10025   }
10026   return false;
10027 }
10028 
10029 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
10030     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
10031     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
10032   bool IsABIRegCopy = CC.hasValue();
10033   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
10034     SDValue Val = Parts[0];
10035 
10036     // Cast the f32 to i32, truncate to i16, and cast back to f16.
10037     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
10038     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
10039     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
10040     return Val;
10041   }
10042 
10043   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
10044     LLVMContext &Context = *DAG.getContext();
10045     SDValue Val = Parts[0];
10046     EVT ValueEltVT = ValueVT.getVectorElementType();
10047     EVT PartEltVT = PartVT.getVectorElementType();
10048     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
10049     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
10050     if (PartVTBitSize % ValueVTBitSize == 0) {
10051       EVT SameEltTypeVT = ValueVT;
10052       // If the element types are different, convert it to the same element type
10053       // of PartVT.
10054       if (ValueEltVT != PartEltVT) {
10055         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
10056         assert(Count != 0 && "The number of element should not be zero.");
10057         SameEltTypeVT =
10058             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
10059       }
10060       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SameEltTypeVT, Val,
10061                         DAG.getConstant(0, DL, Subtarget.getXLenVT()));
10062       if (ValueEltVT != PartEltVT)
10063         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
10064       return Val;
10065     }
10066   }
10067   return SDValue();
10068 }
10069 
10070 #define GET_REGISTER_MATCHER
10071 #include "RISCVGenAsmMatcher.inc"
10072 
10073 Register
10074 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
10075                                        const MachineFunction &MF) const {
10076   Register Reg = MatchRegisterAltName(RegName);
10077   if (Reg == RISCV::NoRegister)
10078     Reg = MatchRegisterName(RegName);
10079   if (Reg == RISCV::NoRegister)
10080     report_fatal_error(
10081         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
10082   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
10083   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
10084     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
10085                              StringRef(RegName) + "\"."));
10086   return Reg;
10087 }
10088 
10089 namespace llvm {
10090 namespace RISCVVIntrinsicsTable {
10091 
10092 #define GET_RISCVVIntrinsicsTable_IMPL
10093 #include "RISCVGenSearchableTables.inc"
10094 
10095 } // namespace RISCVVIntrinsicsTable
10096 
10097 } // namespace llvm
10098