1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/MemoryLocation.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/CodeGen/ValueTypes.h"
30 #include "llvm/IR/DiagnosticInfo.h"
31 #include "llvm/IR/DiagnosticPrinter.h"
32 #include "llvm/IR/IRBuilder.h"
33 #include "llvm/IR/IntrinsicsRISCV.h"
34 #include "llvm/IR/PatternMatch.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/KnownBits.h"
38 #include "llvm/Support/MathExtras.h"
39 #include "llvm/Support/raw_ostream.h"
40 
41 using namespace llvm;
42 
43 #define DEBUG_TYPE "riscv-lower"
44 
45 STATISTIC(NumTailCalls, "Number of tail calls");
46 
47 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
48                                          const RISCVSubtarget &STI)
49     : TargetLowering(TM), Subtarget(STI) {
50 
51   if (Subtarget.isRV32E())
52     report_fatal_error("Codegen not yet implemented for RV32E");
53 
54   RISCVABI::ABI ABI = Subtarget.getTargetABI();
55   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
56 
57   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
58       !Subtarget.hasStdExtF()) {
59     errs() << "Hard-float 'f' ABI can't be used for a target that "
60                 "doesn't support the F instruction set extension (ignoring "
61                           "target-abi)\n";
62     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
63   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
64              !Subtarget.hasStdExtD()) {
65     errs() << "Hard-float 'd' ABI can't be used for a target that "
66               "doesn't support the D instruction set extension (ignoring "
67               "target-abi)\n";
68     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
69   }
70 
71   switch (ABI) {
72   default:
73     report_fatal_error("Don't know how to lower this ABI");
74   case RISCVABI::ABI_ILP32:
75   case RISCVABI::ABI_ILP32F:
76   case RISCVABI::ABI_ILP32D:
77   case RISCVABI::ABI_LP64:
78   case RISCVABI::ABI_LP64F:
79   case RISCVABI::ABI_LP64D:
80     break;
81   }
82 
83   MVT XLenVT = Subtarget.getXLenVT();
84 
85   // Set up the register classes.
86   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
87 
88   if (Subtarget.hasStdExtZfh())
89     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
90   if (Subtarget.hasStdExtF())
91     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
92   if (Subtarget.hasStdExtD())
93     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
94 
95   static const MVT::SimpleValueType BoolVecVTs[] = {
96       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
97       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
98   static const MVT::SimpleValueType IntVecVTs[] = {
99       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
100       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
101       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
102       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
103       MVT::nxv4i64, MVT::nxv8i64};
104   static const MVT::SimpleValueType F16VecVTs[] = {
105       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
106       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
107   static const MVT::SimpleValueType F32VecVTs[] = {
108       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
109   static const MVT::SimpleValueType F64VecVTs[] = {
110       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
111 
112   if (Subtarget.hasVInstructions()) {
113     auto addRegClassForRVV = [this](MVT VT) {
114       unsigned Size = VT.getSizeInBits().getKnownMinValue();
115       assert(Size <= 512 && isPowerOf2_32(Size));
116       const TargetRegisterClass *RC;
117       if (Size <= 64)
118         RC = &RISCV::VRRegClass;
119       else if (Size == 128)
120         RC = &RISCV::VRM2RegClass;
121       else if (Size == 256)
122         RC = &RISCV::VRM4RegClass;
123       else
124         RC = &RISCV::VRM8RegClass;
125 
126       addRegisterClass(VT, RC);
127     };
128 
129     for (MVT VT : BoolVecVTs)
130       addRegClassForRVV(VT);
131     for (MVT VT : IntVecVTs) {
132       if (VT.getVectorElementType() == MVT::i64 &&
133           !Subtarget.hasVInstructionsI64())
134         continue;
135       addRegClassForRVV(VT);
136     }
137 
138     if (Subtarget.hasVInstructionsF16())
139       for (MVT VT : F16VecVTs)
140         addRegClassForRVV(VT);
141 
142     if (Subtarget.hasVInstructionsF32())
143       for (MVT VT : F32VecVTs)
144         addRegClassForRVV(VT);
145 
146     if (Subtarget.hasVInstructionsF64())
147       for (MVT VT : F64VecVTs)
148         addRegClassForRVV(VT);
149 
150     if (Subtarget.useRVVForFixedLengthVectors()) {
151       auto addRegClassForFixedVectors = [this](MVT VT) {
152         MVT ContainerVT = getContainerForFixedLengthVector(VT);
153         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
154         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
155         addRegisterClass(VT, TRI.getRegClass(RCID));
156       };
157       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
158         if (useRVVForFixedLengthVectorVT(VT))
159           addRegClassForFixedVectors(VT);
160 
161       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
162         if (useRVVForFixedLengthVectorVT(VT))
163           addRegClassForFixedVectors(VT);
164     }
165   }
166 
167   // Compute derived properties from the register classes.
168   computeRegisterProperties(STI.getRegisterInfo());
169 
170   setStackPointerRegisterToSaveRestore(RISCV::X2);
171 
172   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
173     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
174 
175   // TODO: add all necessary setOperationAction calls.
176   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
177 
178   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
179   setOperationAction(ISD::BR_CC, XLenVT, Expand);
180   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
181   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
182 
183   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
184   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
185 
186   setOperationAction(ISD::VASTART, MVT::Other, Custom);
187   setOperationAction(ISD::VAARG, MVT::Other, Expand);
188   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
189   setOperationAction(ISD::VAEND, MVT::Other, Expand);
190 
191   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
192   if (!Subtarget.hasStdExtZbb()) {
193     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
194     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
195   }
196 
197   if (Subtarget.is64Bit()) {
198     setOperationAction(ISD::ADD, MVT::i32, Custom);
199     setOperationAction(ISD::SUB, MVT::i32, Custom);
200     setOperationAction(ISD::SHL, MVT::i32, Custom);
201     setOperationAction(ISD::SRA, MVT::i32, Custom);
202     setOperationAction(ISD::SRL, MVT::i32, Custom);
203 
204     setOperationAction(ISD::UADDO, MVT::i32, Custom);
205     setOperationAction(ISD::USUBO, MVT::i32, Custom);
206     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
207     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
208   } else {
209     setLibcallName(RTLIB::SHL_I128, nullptr);
210     setLibcallName(RTLIB::SRL_I128, nullptr);
211     setLibcallName(RTLIB::SRA_I128, nullptr);
212     setLibcallName(RTLIB::MUL_I128, nullptr);
213     setLibcallName(RTLIB::MULO_I64, nullptr);
214   }
215 
216   if (!Subtarget.hasStdExtM()) {
217     setOperationAction(ISD::MUL, XLenVT, Expand);
218     setOperationAction(ISD::MULHS, XLenVT, Expand);
219     setOperationAction(ISD::MULHU, XLenVT, Expand);
220     setOperationAction(ISD::SDIV, XLenVT, Expand);
221     setOperationAction(ISD::UDIV, XLenVT, Expand);
222     setOperationAction(ISD::SREM, XLenVT, Expand);
223     setOperationAction(ISD::UREM, XLenVT, Expand);
224   } else {
225     if (Subtarget.is64Bit()) {
226       setOperationAction(ISD::MUL, MVT::i32, Custom);
227       setOperationAction(ISD::MUL, MVT::i128, Custom);
228 
229       setOperationAction(ISD::SDIV, MVT::i8, Custom);
230       setOperationAction(ISD::UDIV, MVT::i8, Custom);
231       setOperationAction(ISD::UREM, MVT::i8, Custom);
232       setOperationAction(ISD::SDIV, MVT::i16, Custom);
233       setOperationAction(ISD::UDIV, MVT::i16, Custom);
234       setOperationAction(ISD::UREM, MVT::i16, Custom);
235       setOperationAction(ISD::SDIV, MVT::i32, Custom);
236       setOperationAction(ISD::UDIV, MVT::i32, Custom);
237       setOperationAction(ISD::UREM, MVT::i32, Custom);
238     } else {
239       setOperationAction(ISD::MUL, MVT::i64, Custom);
240     }
241   }
242 
243   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
244   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
245   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
246   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
247 
248   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
249   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
250   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
251 
252   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
253     if (Subtarget.is64Bit()) {
254       setOperationAction(ISD::ROTL, MVT::i32, Custom);
255       setOperationAction(ISD::ROTR, MVT::i32, Custom);
256     }
257   } else {
258     setOperationAction(ISD::ROTL, XLenVT, Expand);
259     setOperationAction(ISD::ROTR, XLenVT, Expand);
260   }
261 
262   if (Subtarget.hasStdExtZbp()) {
263     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
264     // more combining.
265     setOperationAction(ISD::BITREVERSE, XLenVT,   Custom);
266     setOperationAction(ISD::BSWAP,      XLenVT,   Custom);
267     setOperationAction(ISD::BITREVERSE, MVT::i8,  Custom);
268     // BSWAP i8 doesn't exist.
269     setOperationAction(ISD::BITREVERSE, MVT::i16, Custom);
270     setOperationAction(ISD::BSWAP,      MVT::i16, Custom);
271 
272     if (Subtarget.is64Bit()) {
273       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
274       setOperationAction(ISD::BSWAP,      MVT::i32, Custom);
275     }
276   } else {
277     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
278     // pattern match it directly in isel.
279     setOperationAction(ISD::BSWAP, XLenVT,
280                        Subtarget.hasStdExtZbb() ? Legal : Expand);
281   }
282 
283   if (Subtarget.hasStdExtZbb()) {
284     setOperationAction(ISD::SMIN, XLenVT, Legal);
285     setOperationAction(ISD::SMAX, XLenVT, Legal);
286     setOperationAction(ISD::UMIN, XLenVT, Legal);
287     setOperationAction(ISD::UMAX, XLenVT, Legal);
288 
289     if (Subtarget.is64Bit()) {
290       setOperationAction(ISD::CTTZ, MVT::i32, Custom);
291       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
292       setOperationAction(ISD::CTLZ, MVT::i32, Custom);
293       setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
294     }
295   } else {
296     setOperationAction(ISD::CTTZ, XLenVT, Expand);
297     setOperationAction(ISD::CTLZ, XLenVT, Expand);
298     setOperationAction(ISD::CTPOP, XLenVT, Expand);
299   }
300 
301   if (Subtarget.hasStdExtZbt()) {
302     setOperationAction(ISD::FSHL, XLenVT, Custom);
303     setOperationAction(ISD::FSHR, XLenVT, Custom);
304     setOperationAction(ISD::SELECT, XLenVT, Legal);
305 
306     if (Subtarget.is64Bit()) {
307       setOperationAction(ISD::FSHL, MVT::i32, Custom);
308       setOperationAction(ISD::FSHR, MVT::i32, Custom);
309     }
310   } else {
311     setOperationAction(ISD::SELECT, XLenVT, Custom);
312   }
313 
314   static const ISD::CondCode FPCCToExpand[] = {
315       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
316       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
317       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
318 
319   static const ISD::NodeType FPOpToExpand[] = {
320       ISD::FSIN, ISD::FCOS,       ISD::FSINCOS,   ISD::FPOW,
321       ISD::FREM, ISD::FP16_TO_FP, ISD::FP_TO_FP16};
322 
323   if (Subtarget.hasStdExtZfh())
324     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
325 
326   if (Subtarget.hasStdExtZfh()) {
327     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
328     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
329     setOperationAction(ISD::LRINT, MVT::f16, Legal);
330     setOperationAction(ISD::LLRINT, MVT::f16, Legal);
331     setOperationAction(ISD::LROUND, MVT::f16, Legal);
332     setOperationAction(ISD::LLROUND, MVT::f16, Legal);
333     for (auto CC : FPCCToExpand)
334       setCondCodeAction(CC, MVT::f16, Expand);
335     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
336     setOperationAction(ISD::SELECT, MVT::f16, Custom);
337     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
338     for (auto Op : FPOpToExpand)
339       setOperationAction(Op, MVT::f16, Expand);
340   }
341 
342   if (Subtarget.hasStdExtF()) {
343     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
344     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
345     setOperationAction(ISD::LRINT, MVT::f32, Legal);
346     setOperationAction(ISD::LLRINT, MVT::f32, Legal);
347     setOperationAction(ISD::LROUND, MVT::f32, Legal);
348     setOperationAction(ISD::LLROUND, MVT::f32, Legal);
349     for (auto CC : FPCCToExpand)
350       setCondCodeAction(CC, MVT::f32, Expand);
351     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
352     setOperationAction(ISD::SELECT, MVT::f32, Custom);
353     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
354     for (auto Op : FPOpToExpand)
355       setOperationAction(Op, MVT::f32, Expand);
356     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
357     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
358   }
359 
360   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
361     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
362 
363   if (Subtarget.hasStdExtD()) {
364     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
365     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
366     setOperationAction(ISD::LRINT, MVT::f64, Legal);
367     setOperationAction(ISD::LLRINT, MVT::f64, Legal);
368     setOperationAction(ISD::LROUND, MVT::f64, Legal);
369     setOperationAction(ISD::LLROUND, MVT::f64, Legal);
370     for (auto CC : FPCCToExpand)
371       setCondCodeAction(CC, MVT::f64, Expand);
372     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
373     setOperationAction(ISD::SELECT, MVT::f64, Custom);
374     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
375     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
376     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
377     for (auto Op : FPOpToExpand)
378       setOperationAction(Op, MVT::f64, Expand);
379     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
380     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
381   }
382 
383   if (Subtarget.is64Bit()) {
384     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
385     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
386     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
387     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
388   }
389 
390   if (Subtarget.hasStdExtF()) {
391     setOperationAction(ISD::FP_TO_UINT_SAT, XLenVT, Custom);
392     setOperationAction(ISD::FP_TO_SINT_SAT, XLenVT, Custom);
393 
394     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
395     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
396   }
397 
398   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
399   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
400   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
401   setOperationAction(ISD::JumpTable, XLenVT, Custom);
402 
403   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
404 
405   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
406   // Unfortunately this can't be determined just from the ISA naming string.
407   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
408                      Subtarget.is64Bit() ? Legal : Custom);
409 
410   setOperationAction(ISD::TRAP, MVT::Other, Legal);
411   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
412   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
413   if (Subtarget.is64Bit())
414     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
415 
416   if (Subtarget.hasStdExtA()) {
417     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
418     setMinCmpXchgSizeInBits(32);
419   } else {
420     setMaxAtomicSizeInBitsSupported(0);
421   }
422 
423   setBooleanContents(ZeroOrOneBooleanContent);
424 
425   if (Subtarget.hasVInstructions()) {
426     setBooleanVectorContents(ZeroOrOneBooleanContent);
427 
428     setOperationAction(ISD::VSCALE, XLenVT, Custom);
429 
430     // RVV intrinsics may have illegal operands.
431     // We also need to custom legalize vmv.x.s.
432     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
433     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
434     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
435     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
436     if (Subtarget.is64Bit()) {
437       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
438     } else {
439       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
440       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
441     }
442 
443     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
444     setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
445 
446     static const unsigned IntegerVPOps[] = {
447         ISD::VP_ADD,         ISD::VP_SUB,         ISD::VP_MUL,
448         ISD::VP_SDIV,        ISD::VP_UDIV,        ISD::VP_SREM,
449         ISD::VP_UREM,        ISD::VP_AND,         ISD::VP_OR,
450         ISD::VP_XOR,         ISD::VP_ASHR,        ISD::VP_LSHR,
451         ISD::VP_SHL,         ISD::VP_REDUCE_ADD,  ISD::VP_REDUCE_AND,
452         ISD::VP_REDUCE_OR,   ISD::VP_REDUCE_XOR,  ISD::VP_REDUCE_SMAX,
453         ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN};
454 
455     static const unsigned FloatingPointVPOps[] = {
456         ISD::VP_FADD,        ISD::VP_FSUB,        ISD::VP_FMUL,
457         ISD::VP_FDIV,        ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD,
458         ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX};
459 
460     if (!Subtarget.is64Bit()) {
461       // We must custom-lower certain vXi64 operations on RV32 due to the vector
462       // element type being illegal.
463       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
464       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
465 
466       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
467       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
468       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
469       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
470       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
471       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
472       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
473       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
474 
475       setOperationAction(ISD::VP_REDUCE_ADD, MVT::i64, Custom);
476       setOperationAction(ISD::VP_REDUCE_AND, MVT::i64, Custom);
477       setOperationAction(ISD::VP_REDUCE_OR, MVT::i64, Custom);
478       setOperationAction(ISD::VP_REDUCE_XOR, MVT::i64, Custom);
479       setOperationAction(ISD::VP_REDUCE_SMAX, MVT::i64, Custom);
480       setOperationAction(ISD::VP_REDUCE_SMIN, MVT::i64, Custom);
481       setOperationAction(ISD::VP_REDUCE_UMAX, MVT::i64, Custom);
482       setOperationAction(ISD::VP_REDUCE_UMIN, MVT::i64, Custom);
483     }
484 
485     for (MVT VT : BoolVecVTs) {
486       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
487 
488       // Mask VTs are custom-expanded into a series of standard nodes
489       setOperationAction(ISD::TRUNCATE, VT, Custom);
490       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
491       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
492       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
493 
494       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
495       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
496 
497       setOperationAction(ISD::SELECT, VT, Custom);
498       setOperationAction(ISD::SELECT_CC, VT, Expand);
499       setOperationAction(ISD::VSELECT, VT, Expand);
500 
501       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
502       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
503       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
504 
505       setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
506       setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
507       setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
508 
509       // RVV has native int->float & float->int conversions where the
510       // element type sizes are within one power-of-two of each other. Any
511       // wider distances between type sizes have to be lowered as sequences
512       // which progressively narrow the gap in stages.
513       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
514       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
515       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
516       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
517 
518       // Expand all extending loads to types larger than this, and truncating
519       // stores from types larger than this.
520       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
521         setTruncStoreAction(OtherVT, VT, Expand);
522         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
523         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
524         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
525       }
526     }
527 
528     for (MVT VT : IntVecVTs) {
529       if (VT.getVectorElementType() == MVT::i64 &&
530           !Subtarget.hasVInstructionsI64())
531         continue;
532 
533       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
534       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
535 
536       setOperationAction(ISD::SMIN, VT, Legal);
537       setOperationAction(ISD::SMAX, VT, Legal);
538       setOperationAction(ISD::UMIN, VT, Legal);
539       setOperationAction(ISD::UMAX, VT, Legal);
540 
541       setOperationAction(ISD::ROTL, VT, Expand);
542       setOperationAction(ISD::ROTR, VT, Expand);
543 
544       setOperationAction(ISD::CTTZ, VT, Expand);
545       setOperationAction(ISD::CTLZ, VT, Expand);
546       setOperationAction(ISD::CTPOP, VT, Expand);
547 
548       setOperationAction(ISD::BSWAP, VT, Expand);
549 
550       // Custom-lower extensions and truncations from/to mask types.
551       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
552       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
553       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
554 
555       // RVV has native int->float & float->int conversions where the
556       // element type sizes are within one power-of-two of each other. Any
557       // wider distances between type sizes have to be lowered as sequences
558       // which progressively narrow the gap in stages.
559       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
560       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
561       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
562       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
563 
564       setOperationAction(ISD::SADDSAT, VT, Legal);
565       setOperationAction(ISD::UADDSAT, VT, Legal);
566       setOperationAction(ISD::SSUBSAT, VT, Legal);
567       setOperationAction(ISD::USUBSAT, VT, Legal);
568 
569       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
570       // nodes which truncate by one power of two at a time.
571       setOperationAction(ISD::TRUNCATE, VT, Custom);
572 
573       // Custom-lower insert/extract operations to simplify patterns.
574       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
575       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
576 
577       // Custom-lower reduction operations to set up the corresponding custom
578       // nodes' operands.
579       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
580       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
581       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
582       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
583       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
584       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
585       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
586       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
587 
588       for (unsigned VPOpc : IntegerVPOps)
589         setOperationAction(VPOpc, VT, Custom);
590 
591       setOperationAction(ISD::LOAD, VT, Custom);
592       setOperationAction(ISD::STORE, VT, Custom);
593 
594       setOperationAction(ISD::MLOAD, VT, Custom);
595       setOperationAction(ISD::MSTORE, VT, Custom);
596       setOperationAction(ISD::MGATHER, VT, Custom);
597       setOperationAction(ISD::MSCATTER, VT, Custom);
598 
599       setOperationAction(ISD::VP_LOAD, VT, Custom);
600       setOperationAction(ISD::VP_STORE, VT, Custom);
601       setOperationAction(ISD::VP_GATHER, VT, Custom);
602       setOperationAction(ISD::VP_SCATTER, VT, Custom);
603 
604       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
605       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
606       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
607 
608       setOperationAction(ISD::SELECT, VT, Custom);
609       setOperationAction(ISD::SELECT_CC, VT, Expand);
610 
611       setOperationAction(ISD::STEP_VECTOR, VT, Custom);
612       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
613 
614       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
615         setTruncStoreAction(VT, OtherVT, Expand);
616         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
617         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
618         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
619       }
620     }
621 
622     // Expand various CCs to best match the RVV ISA, which natively supports UNE
623     // but no other unordered comparisons, and supports all ordered comparisons
624     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
625     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
626     // and we pattern-match those back to the "original", swapping operands once
627     // more. This way we catch both operations and both "vf" and "fv" forms with
628     // fewer patterns.
629     static const ISD::CondCode VFPCCToExpand[] = {
630         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
631         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
632         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
633     };
634 
635     // Sets common operation actions on RVV floating-point vector types.
636     const auto SetCommonVFPActions = [&](MVT VT) {
637       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
638       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
639       // sizes are within one power-of-two of each other. Therefore conversions
640       // between vXf16 and vXf64 must be lowered as sequences which convert via
641       // vXf32.
642       setOperationAction(ISD::FP_ROUND, VT, Custom);
643       setOperationAction(ISD::FP_EXTEND, VT, Custom);
644       // Custom-lower insert/extract operations to simplify patterns.
645       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
646       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
647       // Expand various condition codes (explained above).
648       for (auto CC : VFPCCToExpand)
649         setCondCodeAction(CC, VT, Expand);
650 
651       setOperationAction(ISD::FMINNUM, VT, Legal);
652       setOperationAction(ISD::FMAXNUM, VT, Legal);
653 
654       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
655       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
656       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
657       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
658 
659       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
660 
661       setOperationAction(ISD::LOAD, VT, Custom);
662       setOperationAction(ISD::STORE, VT, Custom);
663 
664       setOperationAction(ISD::MLOAD, VT, Custom);
665       setOperationAction(ISD::MSTORE, VT, Custom);
666       setOperationAction(ISD::MGATHER, VT, Custom);
667       setOperationAction(ISD::MSCATTER, VT, Custom);
668 
669       setOperationAction(ISD::VP_LOAD, VT, Custom);
670       setOperationAction(ISD::VP_STORE, VT, Custom);
671       setOperationAction(ISD::VP_GATHER, VT, Custom);
672       setOperationAction(ISD::VP_SCATTER, VT, Custom);
673 
674       setOperationAction(ISD::SELECT, VT, Custom);
675       setOperationAction(ISD::SELECT_CC, VT, Expand);
676 
677       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
678       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
679       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
680 
681       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
682 
683       for (unsigned VPOpc : FloatingPointVPOps)
684         setOperationAction(VPOpc, VT, Custom);
685     };
686 
687     // Sets common extload/truncstore actions on RVV floating-point vector
688     // types.
689     const auto SetCommonVFPExtLoadTruncStoreActions =
690         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
691           for (auto SmallVT : SmallerVTs) {
692             setTruncStoreAction(VT, SmallVT, Expand);
693             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
694           }
695         };
696 
697     if (Subtarget.hasVInstructionsF16())
698       for (MVT VT : F16VecVTs)
699         SetCommonVFPActions(VT);
700 
701     for (MVT VT : F32VecVTs) {
702       if (Subtarget.hasVInstructionsF32())
703         SetCommonVFPActions(VT);
704       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
705     }
706 
707     for (MVT VT : F64VecVTs) {
708       if (Subtarget.hasVInstructionsF64())
709         SetCommonVFPActions(VT);
710       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
711       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
712     }
713 
714     if (Subtarget.useRVVForFixedLengthVectors()) {
715       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
716         if (!useRVVForFixedLengthVectorVT(VT))
717           continue;
718 
719         // By default everything must be expanded.
720         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
721           setOperationAction(Op, VT, Expand);
722         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
723           setTruncStoreAction(VT, OtherVT, Expand);
724           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
725           setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
726           setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
727         }
728 
729         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
730         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
731         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
732 
733         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
734         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
735 
736         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
737         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
738 
739         setOperationAction(ISD::LOAD, VT, Custom);
740         setOperationAction(ISD::STORE, VT, Custom);
741 
742         setOperationAction(ISD::SETCC, VT, Custom);
743 
744         setOperationAction(ISD::SELECT, VT, Custom);
745 
746         setOperationAction(ISD::TRUNCATE, VT, Custom);
747 
748         setOperationAction(ISD::BITCAST, VT, Custom);
749 
750         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
751         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
752         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
753 
754         setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
755         setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
756         setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
757 
758         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
759         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
760         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
761         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
762 
763         // Operations below are different for between masks and other vectors.
764         if (VT.getVectorElementType() == MVT::i1) {
765           setOperationAction(ISD::AND, VT, Custom);
766           setOperationAction(ISD::OR, VT, Custom);
767           setOperationAction(ISD::XOR, VT, Custom);
768           continue;
769         }
770 
771         // Use SPLAT_VECTOR to prevent type legalization from destroying the
772         // splats when type legalizing i64 scalar on RV32.
773         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
774         // improvements first.
775         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
776           setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
777           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
778         }
779 
780         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
781         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
782 
783         setOperationAction(ISD::MLOAD, VT, Custom);
784         setOperationAction(ISD::MSTORE, VT, Custom);
785         setOperationAction(ISD::MGATHER, VT, Custom);
786         setOperationAction(ISD::MSCATTER, VT, Custom);
787 
788         setOperationAction(ISD::VP_LOAD, VT, Custom);
789         setOperationAction(ISD::VP_STORE, VT, Custom);
790         setOperationAction(ISD::VP_GATHER, VT, Custom);
791         setOperationAction(ISD::VP_SCATTER, VT, Custom);
792 
793         setOperationAction(ISD::ADD, VT, Custom);
794         setOperationAction(ISD::MUL, VT, Custom);
795         setOperationAction(ISD::SUB, VT, Custom);
796         setOperationAction(ISD::AND, VT, Custom);
797         setOperationAction(ISD::OR, VT, Custom);
798         setOperationAction(ISD::XOR, VT, Custom);
799         setOperationAction(ISD::SDIV, VT, Custom);
800         setOperationAction(ISD::SREM, VT, Custom);
801         setOperationAction(ISD::UDIV, VT, Custom);
802         setOperationAction(ISD::UREM, VT, Custom);
803         setOperationAction(ISD::SHL, VT, Custom);
804         setOperationAction(ISD::SRA, VT, Custom);
805         setOperationAction(ISD::SRL, VT, Custom);
806 
807         setOperationAction(ISD::SMIN, VT, Custom);
808         setOperationAction(ISD::SMAX, VT, Custom);
809         setOperationAction(ISD::UMIN, VT, Custom);
810         setOperationAction(ISD::UMAX, VT, Custom);
811         setOperationAction(ISD::ABS,  VT, Custom);
812 
813         setOperationAction(ISD::MULHS, VT, Custom);
814         setOperationAction(ISD::MULHU, VT, Custom);
815 
816         setOperationAction(ISD::SADDSAT, VT, Custom);
817         setOperationAction(ISD::UADDSAT, VT, Custom);
818         setOperationAction(ISD::SSUBSAT, VT, Custom);
819         setOperationAction(ISD::USUBSAT, VT, Custom);
820 
821         setOperationAction(ISD::VSELECT, VT, Custom);
822         setOperationAction(ISD::SELECT_CC, VT, Expand);
823 
824         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
825         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
826         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
827 
828         // Custom-lower reduction operations to set up the corresponding custom
829         // nodes' operands.
830         setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
831         setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
832         setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
833         setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
834         setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
835 
836         for (unsigned VPOpc : IntegerVPOps)
837           setOperationAction(VPOpc, VT, Custom);
838       }
839 
840       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
841         if (!useRVVForFixedLengthVectorVT(VT))
842           continue;
843 
844         // By default everything must be expanded.
845         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
846           setOperationAction(Op, VT, Expand);
847         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
848           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
849           setTruncStoreAction(VT, OtherVT, Expand);
850         }
851 
852         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
853         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
854         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
855 
856         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
857         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
858         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
859         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
860         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
861 
862         setOperationAction(ISD::LOAD, VT, Custom);
863         setOperationAction(ISD::STORE, VT, Custom);
864         setOperationAction(ISD::MLOAD, VT, Custom);
865         setOperationAction(ISD::MSTORE, VT, Custom);
866         setOperationAction(ISD::MGATHER, VT, Custom);
867         setOperationAction(ISD::MSCATTER, VT, Custom);
868 
869         setOperationAction(ISD::VP_LOAD, VT, Custom);
870         setOperationAction(ISD::VP_STORE, VT, Custom);
871         setOperationAction(ISD::VP_GATHER, VT, Custom);
872         setOperationAction(ISD::VP_SCATTER, VT, Custom);
873 
874         setOperationAction(ISD::FADD, VT, Custom);
875         setOperationAction(ISD::FSUB, VT, Custom);
876         setOperationAction(ISD::FMUL, VT, Custom);
877         setOperationAction(ISD::FDIV, VT, Custom);
878         setOperationAction(ISD::FNEG, VT, Custom);
879         setOperationAction(ISD::FABS, VT, Custom);
880         setOperationAction(ISD::FCOPYSIGN, VT, Custom);
881         setOperationAction(ISD::FSQRT, VT, Custom);
882         setOperationAction(ISD::FMA, VT, Custom);
883         setOperationAction(ISD::FMINNUM, VT, Custom);
884         setOperationAction(ISD::FMAXNUM, VT, Custom);
885 
886         setOperationAction(ISD::FP_ROUND, VT, Custom);
887         setOperationAction(ISD::FP_EXTEND, VT, Custom);
888 
889         for (auto CC : VFPCCToExpand)
890           setCondCodeAction(CC, VT, Expand);
891 
892         setOperationAction(ISD::VSELECT, VT, Custom);
893         setOperationAction(ISD::SELECT, VT, Custom);
894         setOperationAction(ISD::SELECT_CC, VT, Expand);
895 
896         setOperationAction(ISD::BITCAST, VT, Custom);
897 
898         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
899         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
900         setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
901         setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
902 
903         for (unsigned VPOpc : FloatingPointVPOps)
904           setOperationAction(VPOpc, VT, Custom);
905       }
906 
907       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
908       setOperationAction(ISD::BITCAST, MVT::i8, Custom);
909       setOperationAction(ISD::BITCAST, MVT::i16, Custom);
910       setOperationAction(ISD::BITCAST, MVT::i32, Custom);
911       setOperationAction(ISD::BITCAST, MVT::i64, Custom);
912       setOperationAction(ISD::BITCAST, MVT::f16, Custom);
913       setOperationAction(ISD::BITCAST, MVT::f32, Custom);
914       setOperationAction(ISD::BITCAST, MVT::f64, Custom);
915     }
916   }
917 
918   // Function alignments.
919   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
920   setMinFunctionAlignment(FunctionAlignment);
921   setPrefFunctionAlignment(FunctionAlignment);
922 
923   setMinimumJumpTableEntries(5);
924 
925   // Jumps are expensive, compared to logic
926   setJumpIsExpensive();
927 
928   // We can use any register for comparisons
929   setHasMultipleConditionRegisters();
930 
931   setTargetDAGCombine(ISD::ADD);
932   setTargetDAGCombine(ISD::SUB);
933   setTargetDAGCombine(ISD::AND);
934   setTargetDAGCombine(ISD::OR);
935   setTargetDAGCombine(ISD::XOR);
936   setTargetDAGCombine(ISD::ANY_EXTEND);
937   setTargetDAGCombine(ISD::ZERO_EXTEND);
938   if (Subtarget.hasVInstructions()) {
939     setTargetDAGCombine(ISD::FCOPYSIGN);
940     setTargetDAGCombine(ISD::MGATHER);
941     setTargetDAGCombine(ISD::MSCATTER);
942     setTargetDAGCombine(ISD::VP_GATHER);
943     setTargetDAGCombine(ISD::VP_SCATTER);
944     setTargetDAGCombine(ISD::SRA);
945     setTargetDAGCombine(ISD::SRL);
946     setTargetDAGCombine(ISD::SHL);
947     setTargetDAGCombine(ISD::STORE);
948   }
949 }
950 
951 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
952                                             LLVMContext &Context,
953                                             EVT VT) const {
954   if (!VT.isVector())
955     return getPointerTy(DL);
956   if (Subtarget.hasVInstructions() &&
957       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
958     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
959   return VT.changeVectorElementTypeToInteger();
960 }
961 
962 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
963   return Subtarget.getXLenVT();
964 }
965 
966 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
967                                              const CallInst &I,
968                                              MachineFunction &MF,
969                                              unsigned Intrinsic) const {
970   auto &DL = I.getModule()->getDataLayout();
971   switch (Intrinsic) {
972   default:
973     return false;
974   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
975   case Intrinsic::riscv_masked_atomicrmw_add_i32:
976   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
977   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
978   case Intrinsic::riscv_masked_atomicrmw_max_i32:
979   case Intrinsic::riscv_masked_atomicrmw_min_i32:
980   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
981   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
982   case Intrinsic::riscv_masked_cmpxchg_i32: {
983     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
984     Info.opc = ISD::INTRINSIC_W_CHAIN;
985     Info.memVT = MVT::getVT(PtrTy->getElementType());
986     Info.ptrVal = I.getArgOperand(0);
987     Info.offset = 0;
988     Info.align = Align(4);
989     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
990                  MachineMemOperand::MOVolatile;
991     return true;
992   }
993   case Intrinsic::riscv_masked_strided_load:
994     Info.opc = ISD::INTRINSIC_W_CHAIN;
995     Info.ptrVal = I.getArgOperand(1);
996     Info.memVT = getValueType(DL, I.getType()->getScalarType());
997     Info.align = Align(DL.getTypeSizeInBits(I.getType()->getScalarType()) / 8);
998     Info.size = MemoryLocation::UnknownSize;
999     Info.flags |= MachineMemOperand::MOLoad;
1000     return true;
1001   case Intrinsic::riscv_masked_strided_store:
1002     Info.opc = ISD::INTRINSIC_VOID;
1003     Info.ptrVal = I.getArgOperand(1);
1004     Info.memVT =
1005         getValueType(DL, I.getArgOperand(0)->getType()->getScalarType());
1006     Info.align = Align(
1007         DL.getTypeSizeInBits(I.getArgOperand(0)->getType()->getScalarType()) /
1008         8);
1009     Info.size = MemoryLocation::UnknownSize;
1010     Info.flags |= MachineMemOperand::MOStore;
1011     return true;
1012   }
1013 }
1014 
1015 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1016                                                 const AddrMode &AM, Type *Ty,
1017                                                 unsigned AS,
1018                                                 Instruction *I) const {
1019   // No global is ever allowed as a base.
1020   if (AM.BaseGV)
1021     return false;
1022 
1023   // Require a 12-bit signed offset.
1024   if (!isInt<12>(AM.BaseOffs))
1025     return false;
1026 
1027   switch (AM.Scale) {
1028   case 0: // "r+i" or just "i", depending on HasBaseReg.
1029     break;
1030   case 1:
1031     if (!AM.HasBaseReg) // allow "r+i".
1032       break;
1033     return false; // disallow "r+r" or "r+r+i".
1034   default:
1035     return false;
1036   }
1037 
1038   return true;
1039 }
1040 
1041 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
1042   return isInt<12>(Imm);
1043 }
1044 
1045 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
1046   return isInt<12>(Imm);
1047 }
1048 
1049 // On RV32, 64-bit integers are split into their high and low parts and held
1050 // in two different registers, so the trunc is free since the low register can
1051 // just be used.
1052 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
1053   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
1054     return false;
1055   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
1056   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
1057   return (SrcBits == 64 && DestBits == 32);
1058 }
1059 
1060 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
1061   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
1062       !SrcVT.isInteger() || !DstVT.isInteger())
1063     return false;
1064   unsigned SrcBits = SrcVT.getSizeInBits();
1065   unsigned DestBits = DstVT.getSizeInBits();
1066   return (SrcBits == 64 && DestBits == 32);
1067 }
1068 
1069 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
1070   // Zexts are free if they can be combined with a load.
1071   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
1072     EVT MemVT = LD->getMemoryVT();
1073     if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
1074          (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
1075         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
1076          LD->getExtensionType() == ISD::ZEXTLOAD))
1077       return true;
1078   }
1079 
1080   return TargetLowering::isZExtFree(Val, VT2);
1081 }
1082 
1083 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
1084   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
1085 }
1086 
1087 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
1088   return Subtarget.hasStdExtZbb();
1089 }
1090 
1091 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
1092   return Subtarget.hasStdExtZbb();
1093 }
1094 
1095 /// Check if sinking \p I's operands to I's basic block is profitable, because
1096 /// the operands can be folded into a target instruction, e.g.
1097 /// splats of scalars can fold into vector instructions.
1098 bool RISCVTargetLowering::shouldSinkOperands(
1099     Instruction *I, SmallVectorImpl<Use *> &Ops) const {
1100   using namespace llvm::PatternMatch;
1101 
1102   if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions())
1103     return false;
1104 
1105   auto IsSinker = [&](Instruction *I, int Operand) {
1106     switch (I->getOpcode()) {
1107     case Instruction::Add:
1108     case Instruction::Sub:
1109     case Instruction::Mul:
1110     case Instruction::And:
1111     case Instruction::Or:
1112     case Instruction::Xor:
1113     case Instruction::FAdd:
1114     case Instruction::FSub:
1115     case Instruction::FMul:
1116     case Instruction::FDiv:
1117     case Instruction::ICmp:
1118     case Instruction::FCmp:
1119       return true;
1120     case Instruction::Shl:
1121     case Instruction::LShr:
1122     case Instruction::AShr:
1123       return Operand == 1;
1124     case Instruction::Call:
1125       if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1126         switch (II->getIntrinsicID()) {
1127         case Intrinsic::fma:
1128           return Operand == 0 || Operand == 1;
1129         default:
1130           return false;
1131         }
1132       }
1133       return false;
1134     default:
1135       return false;
1136     }
1137   };
1138 
1139   for (auto OpIdx : enumerate(I->operands())) {
1140     if (!IsSinker(I, OpIdx.index()))
1141       continue;
1142 
1143     Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
1144     // Make sure we are not already sinking this operand
1145     if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
1146       continue;
1147 
1148     // We are looking for a splat that can be sunk.
1149     if (!match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
1150                              m_Undef(), m_ZeroMask())))
1151       continue;
1152 
1153     // All uses of the shuffle should be sunk to avoid duplicating it across gpr
1154     // and vector registers
1155     for (Use &U : Op->uses()) {
1156       Instruction *Insn = cast<Instruction>(U.getUser());
1157       if (!IsSinker(Insn, U.getOperandNo()))
1158         return false;
1159     }
1160 
1161     Ops.push_back(&Op->getOperandUse(0));
1162     Ops.push_back(&OpIdx.value());
1163   }
1164   return true;
1165 }
1166 
1167 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
1168                                        bool ForCodeSize) const {
1169   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
1170     return false;
1171   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
1172     return false;
1173   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
1174     return false;
1175   if (Imm.isNegZero())
1176     return false;
1177   return Imm.isZero();
1178 }
1179 
1180 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
1181   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
1182          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
1183          (VT == MVT::f64 && Subtarget.hasStdExtD());
1184 }
1185 
1186 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1187                                                       CallingConv::ID CC,
1188                                                       EVT VT) const {
1189   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
1190   // end up using a GPR but that will be decided based on ABI.
1191   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1192     return MVT::f32;
1193 
1194   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1195 }
1196 
1197 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1198                                                            CallingConv::ID CC,
1199                                                            EVT VT) const {
1200   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
1201   // end up using a GPR but that will be decided based on ABI.
1202   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1203     return 1;
1204 
1205   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1206 }
1207 
1208 // Changes the condition code and swaps operands if necessary, so the SetCC
1209 // operation matches one of the comparisons supported directly by branches
1210 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1211 // with 1/-1.
1212 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1213                                     ISD::CondCode &CC, SelectionDAG &DAG) {
1214   // Convert X > -1 to X >= 0.
1215   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1216     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1217     CC = ISD::SETGE;
1218     return;
1219   }
1220   // Convert X < 1 to 0 >= X.
1221   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1222     RHS = LHS;
1223     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1224     CC = ISD::SETGE;
1225     return;
1226   }
1227 
1228   switch (CC) {
1229   default:
1230     break;
1231   case ISD::SETGT:
1232   case ISD::SETLE:
1233   case ISD::SETUGT:
1234   case ISD::SETULE:
1235     CC = ISD::getSetCCSwappedOperands(CC);
1236     std::swap(LHS, RHS);
1237     break;
1238   }
1239 }
1240 
1241 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1242   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1243   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1244   if (VT.getVectorElementType() == MVT::i1)
1245     KnownSize *= 8;
1246 
1247   switch (KnownSize) {
1248   default:
1249     llvm_unreachable("Invalid LMUL.");
1250   case 8:
1251     return RISCVII::VLMUL::LMUL_F8;
1252   case 16:
1253     return RISCVII::VLMUL::LMUL_F4;
1254   case 32:
1255     return RISCVII::VLMUL::LMUL_F2;
1256   case 64:
1257     return RISCVII::VLMUL::LMUL_1;
1258   case 128:
1259     return RISCVII::VLMUL::LMUL_2;
1260   case 256:
1261     return RISCVII::VLMUL::LMUL_4;
1262   case 512:
1263     return RISCVII::VLMUL::LMUL_8;
1264   }
1265 }
1266 
1267 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1268   switch (LMul) {
1269   default:
1270     llvm_unreachable("Invalid LMUL.");
1271   case RISCVII::VLMUL::LMUL_F8:
1272   case RISCVII::VLMUL::LMUL_F4:
1273   case RISCVII::VLMUL::LMUL_F2:
1274   case RISCVII::VLMUL::LMUL_1:
1275     return RISCV::VRRegClassID;
1276   case RISCVII::VLMUL::LMUL_2:
1277     return RISCV::VRM2RegClassID;
1278   case RISCVII::VLMUL::LMUL_4:
1279     return RISCV::VRM4RegClassID;
1280   case RISCVII::VLMUL::LMUL_8:
1281     return RISCV::VRM8RegClassID;
1282   }
1283 }
1284 
1285 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1286   RISCVII::VLMUL LMUL = getLMUL(VT);
1287   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1288       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1289       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1290       LMUL == RISCVII::VLMUL::LMUL_1) {
1291     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1292                   "Unexpected subreg numbering");
1293     return RISCV::sub_vrm1_0 + Index;
1294   }
1295   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1296     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1297                   "Unexpected subreg numbering");
1298     return RISCV::sub_vrm2_0 + Index;
1299   }
1300   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1301     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1302                   "Unexpected subreg numbering");
1303     return RISCV::sub_vrm4_0 + Index;
1304   }
1305   llvm_unreachable("Invalid vector type.");
1306 }
1307 
1308 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1309   if (VT.getVectorElementType() == MVT::i1)
1310     return RISCV::VRRegClassID;
1311   return getRegClassIDForLMUL(getLMUL(VT));
1312 }
1313 
1314 // Attempt to decompose a subvector insert/extract between VecVT and
1315 // SubVecVT via subregister indices. Returns the subregister index that
1316 // can perform the subvector insert/extract with the given element index, as
1317 // well as the index corresponding to any leftover subvectors that must be
1318 // further inserted/extracted within the register class for SubVecVT.
1319 std::pair<unsigned, unsigned>
1320 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1321     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1322     const RISCVRegisterInfo *TRI) {
1323   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1324                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1325                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1326                 "Register classes not ordered");
1327   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1328   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1329   // Try to compose a subregister index that takes us from the incoming
1330   // LMUL>1 register class down to the outgoing one. At each step we half
1331   // the LMUL:
1332   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1333   // Note that this is not guaranteed to find a subregister index, such as
1334   // when we are extracting from one VR type to another.
1335   unsigned SubRegIdx = RISCV::NoSubRegister;
1336   for (const unsigned RCID :
1337        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1338     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1339       VecVT = VecVT.getHalfNumVectorElementsVT();
1340       bool IsHi =
1341           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1342       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1343                                             getSubregIndexByMVT(VecVT, IsHi));
1344       if (IsHi)
1345         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1346     }
1347   return {SubRegIdx, InsertExtractIdx};
1348 }
1349 
1350 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1351 // stores for those types.
1352 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1353   return !Subtarget.useRVVForFixedLengthVectors() ||
1354          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1355 }
1356 
1357 bool RISCVTargetLowering::isLegalElementTypeForRVV(Type *ScalarTy) const {
1358   if (ScalarTy->isPointerTy())
1359     return true;
1360 
1361   if (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) ||
1362       ScalarTy->isIntegerTy(32))
1363     return true;
1364 
1365   if (ScalarTy->isIntegerTy(64))
1366     return Subtarget.hasVInstructionsI64();
1367 
1368   if (ScalarTy->isHalfTy())
1369     return Subtarget.hasVInstructionsF16();
1370   if (ScalarTy->isFloatTy())
1371     return Subtarget.hasVInstructionsF32();
1372   if (ScalarTy->isDoubleTy())
1373     return Subtarget.hasVInstructionsF64();
1374 
1375   return false;
1376 }
1377 
1378 static bool useRVVForFixedLengthVectorVT(MVT VT,
1379                                          const RISCVSubtarget &Subtarget) {
1380   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1381   if (!Subtarget.useRVVForFixedLengthVectors())
1382     return false;
1383 
1384   // We only support a set of vector types with a consistent maximum fixed size
1385   // across all supported vector element types to avoid legalization issues.
1386   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1387   // fixed-length vector type we support is 1024 bytes.
1388   if (VT.getFixedSizeInBits() > 1024 * 8)
1389     return false;
1390 
1391   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1392 
1393   MVT EltVT = VT.getVectorElementType();
1394 
1395   // Don't use RVV for vectors we cannot scalarize if required.
1396   switch (EltVT.SimpleTy) {
1397   // i1 is supported but has different rules.
1398   default:
1399     return false;
1400   case MVT::i1:
1401     // Masks can only use a single register.
1402     if (VT.getVectorNumElements() > MinVLen)
1403       return false;
1404     MinVLen /= 8;
1405     break;
1406   case MVT::i8:
1407   case MVT::i16:
1408   case MVT::i32:
1409     break;
1410   case MVT::i64:
1411     if (!Subtarget.hasVInstructionsI64())
1412       return false;
1413     break;
1414   case MVT::f16:
1415     if (!Subtarget.hasVInstructionsF16())
1416       return false;
1417     break;
1418   case MVT::f32:
1419     if (!Subtarget.hasVInstructionsF32())
1420       return false;
1421     break;
1422   case MVT::f64:
1423     if (!Subtarget.hasVInstructionsF64())
1424       return false;
1425     break;
1426   }
1427 
1428   // Reject elements larger than ELEN.
1429   if (EltVT.getSizeInBits() > Subtarget.getMaxELENForFixedLengthVectors())
1430     return false;
1431 
1432   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1433   // Don't use RVV for types that don't fit.
1434   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1435     return false;
1436 
1437   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1438   // the base fixed length RVV support in place.
1439   if (!VT.isPow2VectorType())
1440     return false;
1441 
1442   return true;
1443 }
1444 
1445 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1446   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1447 }
1448 
1449 // Return the largest legal scalable vector type that matches VT's element type.
1450 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1451                                             const RISCVSubtarget &Subtarget) {
1452   // This may be called before legal types are setup.
1453   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1454           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1455          "Expected legal fixed length vector!");
1456 
1457   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1458   unsigned MaxELen = Subtarget.getMaxELENForFixedLengthVectors();
1459 
1460   MVT EltVT = VT.getVectorElementType();
1461   switch (EltVT.SimpleTy) {
1462   default:
1463     llvm_unreachable("unexpected element type for RVV container");
1464   case MVT::i1:
1465   case MVT::i8:
1466   case MVT::i16:
1467   case MVT::i32:
1468   case MVT::i64:
1469   case MVT::f16:
1470   case MVT::f32:
1471   case MVT::f64: {
1472     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1473     // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within
1474     // each fractional LMUL we support SEW between 8 and LMUL*ELEN.
1475     unsigned NumElts =
1476         (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen;
1477     NumElts = std::max(NumElts, RISCV::RVVBitsPerBlock / MaxELen);
1478     assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts");
1479     return MVT::getScalableVectorVT(EltVT, NumElts);
1480   }
1481   }
1482 }
1483 
1484 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1485                                             const RISCVSubtarget &Subtarget) {
1486   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1487                                           Subtarget);
1488 }
1489 
1490 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1491   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1492 }
1493 
1494 // Grow V to consume an entire RVV register.
1495 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1496                                        const RISCVSubtarget &Subtarget) {
1497   assert(VT.isScalableVector() &&
1498          "Expected to convert into a scalable vector!");
1499   assert(V.getValueType().isFixedLengthVector() &&
1500          "Expected a fixed length vector operand!");
1501   SDLoc DL(V);
1502   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1503   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1504 }
1505 
1506 // Shrink V so it's just big enough to maintain a VT's worth of data.
1507 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1508                                          const RISCVSubtarget &Subtarget) {
1509   assert(VT.isFixedLengthVector() &&
1510          "Expected to convert into a fixed length vector!");
1511   assert(V.getValueType().isScalableVector() &&
1512          "Expected a scalable vector operand!");
1513   SDLoc DL(V);
1514   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1515   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1516 }
1517 
1518 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1519 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1520 // the vector type that it is contained in.
1521 static std::pair<SDValue, SDValue>
1522 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1523                 const RISCVSubtarget &Subtarget) {
1524   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1525   MVT XLenVT = Subtarget.getXLenVT();
1526   SDValue VL = VecVT.isFixedLengthVector()
1527                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1528                    : DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
1529   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1530   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1531   return {Mask, VL};
1532 }
1533 
1534 // As above but assuming the given type is a scalable vector type.
1535 static std::pair<SDValue, SDValue>
1536 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1537                         const RISCVSubtarget &Subtarget) {
1538   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1539   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1540 }
1541 
1542 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1543 // of either is (currently) supported. This can get us into an infinite loop
1544 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1545 // as a ..., etc.
1546 // Until either (or both) of these can reliably lower any node, reporting that
1547 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1548 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1549 // which is not desirable.
1550 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1551     EVT VT, unsigned DefinedValues) const {
1552   return false;
1553 }
1554 
1555 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
1556   // Only splats are currently supported.
1557   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
1558     return true;
1559 
1560   return false;
1561 }
1562 
1563 static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) {
1564   // RISCV FP-to-int conversions saturate to the destination register size, but
1565   // don't produce 0 for nan. We can use a conversion instruction and fix the
1566   // nan case with a compare and a select.
1567   SDValue Src = Op.getOperand(0);
1568 
1569   EVT DstVT = Op.getValueType();
1570   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1571 
1572   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
1573   unsigned Opc;
1574   if (SatVT == DstVT)
1575     Opc = IsSigned ? RISCVISD::FCVT_X_RTZ : RISCVISD::FCVT_XU_RTZ;
1576   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
1577     Opc = IsSigned ? RISCVISD::FCVT_W_RTZ_RV64 : RISCVISD::FCVT_WU_RTZ_RV64;
1578   else
1579     return SDValue();
1580   // FIXME: Support other SatVTs by clamping before or after the conversion.
1581 
1582   SDLoc DL(Op);
1583   SDValue FpToInt = DAG.getNode(Opc, DL, DstVT, Src);
1584 
1585   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
1586   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
1587 }
1588 
1589 static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG,
1590                                  const RISCVSubtarget &Subtarget) {
1591   MVT VT = Op.getSimpleValueType();
1592   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1593 
1594   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1595 
1596   SDLoc DL(Op);
1597   SDValue Mask, VL;
1598   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1599 
1600   unsigned Opc =
1601       VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1602   SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, Op.getOperand(0), VL);
1603   return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1604 }
1605 
1606 struct VIDSequence {
1607   int64_t StepNumerator;
1608   unsigned StepDenominator;
1609   int64_t Addend;
1610 };
1611 
1612 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
1613 // to the (non-zero) step S and start value X. This can be then lowered as the
1614 // RVV sequence (VID * S) + X, for example.
1615 // The step S is represented as an integer numerator divided by a positive
1616 // denominator. Note that the implementation currently only identifies
1617 // sequences in which either the numerator is +/- 1 or the denominator is 1. It
1618 // cannot detect 2/3, for example.
1619 // Note that this method will also match potentially unappealing index
1620 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to
1621 // determine whether this is worth generating code for.
1622 static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
1623   unsigned NumElts = Op.getNumOperands();
1624   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
1625   if (!Op.getValueType().isInteger())
1626     return None;
1627 
1628   Optional<unsigned> SeqStepDenom;
1629   Optional<int64_t> SeqStepNum, SeqAddend;
1630   Optional<std::pair<uint64_t, unsigned>> PrevElt;
1631   unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
1632   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1633     // Assume undef elements match the sequence; we just have to be careful
1634     // when interpolating across them.
1635     if (Op.getOperand(Idx).isUndef())
1636       continue;
1637     // The BUILD_VECTOR must be all constants.
1638     if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
1639       return None;
1640 
1641     uint64_t Val = Op.getConstantOperandVal(Idx) &
1642                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1643 
1644     if (PrevElt) {
1645       // Calculate the step since the last non-undef element, and ensure
1646       // it's consistent across the entire sequence.
1647       unsigned IdxDiff = Idx - PrevElt->second;
1648       int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
1649 
1650       // A zero-value value difference means that we're somewhere in the middle
1651       // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
1652       // step change before evaluating the sequence.
1653       if (ValDiff != 0) {
1654         int64_t Remainder = ValDiff % IdxDiff;
1655         // Normalize the step if it's greater than 1.
1656         if (Remainder != ValDiff) {
1657           // The difference must cleanly divide the element span.
1658           if (Remainder != 0)
1659             return None;
1660           ValDiff /= IdxDiff;
1661           IdxDiff = 1;
1662         }
1663 
1664         if (!SeqStepNum)
1665           SeqStepNum = ValDiff;
1666         else if (ValDiff != SeqStepNum)
1667           return None;
1668 
1669         if (!SeqStepDenom)
1670           SeqStepDenom = IdxDiff;
1671         else if (IdxDiff != *SeqStepDenom)
1672           return None;
1673       }
1674     }
1675 
1676     // Record and/or check any addend.
1677     if (SeqStepNum && SeqStepDenom) {
1678       uint64_t ExpectedVal =
1679           (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom;
1680       int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits);
1681       if (!SeqAddend)
1682         SeqAddend = Addend;
1683       else if (SeqAddend != Addend)
1684         return None;
1685     }
1686 
1687     // Record this non-undef element for later.
1688     if (!PrevElt || PrevElt->first != Val)
1689       PrevElt = std::make_pair(Val, Idx);
1690   }
1691   // We need to have logged both a step and an addend for this to count as
1692   // a legal index sequence.
1693   if (!SeqStepNum || !SeqStepDenom || !SeqAddend)
1694     return None;
1695 
1696   return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
1697 }
1698 
1699 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
1700                                  const RISCVSubtarget &Subtarget) {
1701   MVT VT = Op.getSimpleValueType();
1702   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1703 
1704   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1705 
1706   SDLoc DL(Op);
1707   SDValue Mask, VL;
1708   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1709 
1710   MVT XLenVT = Subtarget.getXLenVT();
1711   unsigned NumElts = Op.getNumOperands();
1712 
1713   if (VT.getVectorElementType() == MVT::i1) {
1714     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
1715       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
1716       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
1717     }
1718 
1719     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
1720       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
1721       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
1722     }
1723 
1724     // Lower constant mask BUILD_VECTORs via an integer vector type, in
1725     // scalar integer chunks whose bit-width depends on the number of mask
1726     // bits and XLEN.
1727     // First, determine the most appropriate scalar integer type to use. This
1728     // is at most XLenVT, but may be shrunk to a smaller vector element type
1729     // according to the size of the final vector - use i8 chunks rather than
1730     // XLenVT if we're producing a v8i1. This results in more consistent
1731     // codegen across RV32 and RV64.
1732     unsigned NumViaIntegerBits =
1733         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
1734     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
1735       // If we have to use more than one INSERT_VECTOR_ELT then this
1736       // optimization is likely to increase code size; avoid peforming it in
1737       // such a case. We can use a load from a constant pool in this case.
1738       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
1739         return SDValue();
1740       // Now we can create our integer vector type. Note that it may be larger
1741       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
1742       MVT IntegerViaVecVT =
1743           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
1744                            divideCeil(NumElts, NumViaIntegerBits));
1745 
1746       uint64_t Bits = 0;
1747       unsigned BitPos = 0, IntegerEltIdx = 0;
1748       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
1749 
1750       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
1751         // Once we accumulate enough bits to fill our scalar type, insert into
1752         // our vector and clear our accumulated data.
1753         if (I != 0 && I % NumViaIntegerBits == 0) {
1754           if (NumViaIntegerBits <= 32)
1755             Bits = SignExtend64(Bits, 32);
1756           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1757           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
1758                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1759           Bits = 0;
1760           BitPos = 0;
1761           IntegerEltIdx++;
1762         }
1763         SDValue V = Op.getOperand(I);
1764         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
1765         Bits |= ((uint64_t)BitValue << BitPos);
1766       }
1767 
1768       // Insert the (remaining) scalar value into position in our integer
1769       // vector type.
1770       if (NumViaIntegerBits <= 32)
1771         Bits = SignExtend64(Bits, 32);
1772       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1773       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
1774                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1775 
1776       if (NumElts < NumViaIntegerBits) {
1777         // If we're producing a smaller vector than our minimum legal integer
1778         // type, bitcast to the equivalent (known-legal) mask type, and extract
1779         // our final mask.
1780         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
1781         Vec = DAG.getBitcast(MVT::v8i1, Vec);
1782         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
1783                           DAG.getConstant(0, DL, XLenVT));
1784       } else {
1785         // Else we must have produced an integer type with the same size as the
1786         // mask type; bitcast for the final result.
1787         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
1788         Vec = DAG.getBitcast(VT, Vec);
1789       }
1790 
1791       return Vec;
1792     }
1793 
1794     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
1795     // vector type, we have a legal equivalently-sized i8 type, so we can use
1796     // that.
1797     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
1798     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
1799 
1800     SDValue WideVec;
1801     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1802       // For a splat, perform a scalar truncate before creating the wider
1803       // vector.
1804       assert(Splat.getValueType() == XLenVT &&
1805              "Unexpected type for i1 splat value");
1806       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
1807                           DAG.getConstant(1, DL, XLenVT));
1808       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
1809     } else {
1810       SmallVector<SDValue, 8> Ops(Op->op_values());
1811       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
1812       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
1813       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
1814     }
1815 
1816     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
1817   }
1818 
1819   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1820     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
1821                                         : RISCVISD::VMV_V_X_VL;
1822     Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL);
1823     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1824   }
1825 
1826   // Try and match index sequences, which we can lower to the vid instruction
1827   // with optional modifications. An all-undef vector is matched by
1828   // getSplatValue, above.
1829   if (auto SimpleVID = isSimpleVIDSequence(Op)) {
1830     int64_t StepNumerator = SimpleVID->StepNumerator;
1831     unsigned StepDenominator = SimpleVID->StepDenominator;
1832     int64_t Addend = SimpleVID->Addend;
1833     // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
1834     // threshold since it's the immediate value many RVV instructions accept.
1835     if (isInt<5>(StepNumerator) && isPowerOf2_32(StepDenominator) &&
1836         isInt<5>(Addend)) {
1837       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
1838       // Convert right out of the scalable type so we can use standard ISD
1839       // nodes for the rest of the computation. If we used scalable types with
1840       // these, we'd lose the fixed-length vector info and generate worse
1841       // vsetvli code.
1842       VID = convertFromScalableVector(VT, VID, DAG, Subtarget);
1843       assert(StepNumerator != 0 && "Invalid step");
1844       bool Negate = false;
1845       if (StepNumerator != 1) {
1846         int64_t SplatStepVal = StepNumerator;
1847         unsigned Opcode = ISD::MUL;
1848         if (isPowerOf2_64(std::abs(StepNumerator))) {
1849           Negate = StepNumerator < 0;
1850           Opcode = ISD::SHL;
1851           SplatStepVal = Log2_64(std::abs(StepNumerator));
1852         }
1853         SDValue SplatStep = DAG.getSplatVector(
1854             VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
1855         VID = DAG.getNode(Opcode, DL, VT, VID, SplatStep);
1856       }
1857       if (StepDenominator != 1) {
1858         SDValue SplatStep = DAG.getSplatVector(
1859             VT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT));
1860         VID = DAG.getNode(ISD::SRL, DL, VT, VID, SplatStep);
1861       }
1862       if (Addend != 0 || Negate) {
1863         SDValue SplatAddend =
1864             DAG.getSplatVector(VT, DL, DAG.getConstant(Addend, DL, XLenVT));
1865         VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID);
1866       }
1867       return VID;
1868     }
1869   }
1870 
1871   // Attempt to detect "hidden" splats, which only reveal themselves as splats
1872   // when re-interpreted as a vector with a larger element type. For example,
1873   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
1874   // could be instead splat as
1875   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
1876   // TODO: This optimization could also work on non-constant splats, but it
1877   // would require bit-manipulation instructions to construct the splat value.
1878   SmallVector<SDValue> Sequence;
1879   unsigned EltBitSize = VT.getScalarSizeInBits();
1880   const auto *BV = cast<BuildVectorSDNode>(Op);
1881   if (VT.isInteger() && EltBitSize < 64 &&
1882       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
1883       BV->getRepeatedSequence(Sequence) &&
1884       (Sequence.size() * EltBitSize) <= 64) {
1885     unsigned SeqLen = Sequence.size();
1886     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
1887     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
1888     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
1889             ViaIntVT == MVT::i64) &&
1890            "Unexpected sequence type");
1891 
1892     unsigned EltIdx = 0;
1893     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
1894     uint64_t SplatValue = 0;
1895     // Construct the amalgamated value which can be splatted as this larger
1896     // vector type.
1897     for (const auto &SeqV : Sequence) {
1898       if (!SeqV.isUndef())
1899         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
1900                        << (EltIdx * EltBitSize));
1901       EltIdx++;
1902     }
1903 
1904     // On RV64, sign-extend from 32 to 64 bits where possible in order to
1905     // achieve better constant materializion.
1906     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
1907       SplatValue = SignExtend64(SplatValue, 32);
1908 
1909     // Since we can't introduce illegal i64 types at this stage, we can only
1910     // perform an i64 splat on RV32 if it is its own sign-extended value. That
1911     // way we can use RVV instructions to splat.
1912     assert((ViaIntVT.bitsLE(XLenVT) ||
1913             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
1914            "Unexpected bitcast sequence");
1915     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
1916       SDValue ViaVL =
1917           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
1918       MVT ViaContainerVT =
1919           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
1920       SDValue Splat =
1921           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
1922                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
1923       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
1924       return DAG.getBitcast(VT, Splat);
1925     }
1926   }
1927 
1928   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
1929   // which constitute a large proportion of the elements. In such cases we can
1930   // splat a vector with the dominant element and make up the shortfall with
1931   // INSERT_VECTOR_ELTs.
1932   // Note that this includes vectors of 2 elements by association. The
1933   // upper-most element is the "dominant" one, allowing us to use a splat to
1934   // "insert" the upper element, and an insert of the lower element at position
1935   // 0, which improves codegen.
1936   SDValue DominantValue;
1937   unsigned MostCommonCount = 0;
1938   DenseMap<SDValue, unsigned> ValueCounts;
1939   unsigned NumUndefElts =
1940       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
1941 
1942   // Track the number of scalar loads we know we'd be inserting, estimated as
1943   // any non-zero floating-point constant. Other kinds of element are either
1944   // already in registers or are materialized on demand. The threshold at which
1945   // a vector load is more desirable than several scalar materializion and
1946   // vector-insertion instructions is not known.
1947   unsigned NumScalarLoads = 0;
1948 
1949   for (SDValue V : Op->op_values()) {
1950     if (V.isUndef())
1951       continue;
1952 
1953     ValueCounts.insert(std::make_pair(V, 0));
1954     unsigned &Count = ValueCounts[V];
1955 
1956     if (auto *CFP = dyn_cast<ConstantFPSDNode>(V))
1957       NumScalarLoads += !CFP->isExactlyValue(+0.0);
1958 
1959     // Is this value dominant? In case of a tie, prefer the highest element as
1960     // it's cheaper to insert near the beginning of a vector than it is at the
1961     // end.
1962     if (++Count >= MostCommonCount) {
1963       DominantValue = V;
1964       MostCommonCount = Count;
1965     }
1966   }
1967 
1968   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
1969   unsigned NumDefElts = NumElts - NumUndefElts;
1970   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
1971 
1972   // Don't perform this optimization when optimizing for size, since
1973   // materializing elements and inserting them tends to cause code bloat.
1974   if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts &&
1975       ((MostCommonCount > DominantValueCountThreshold) ||
1976        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
1977     // Start by splatting the most common element.
1978     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
1979 
1980     DenseSet<SDValue> Processed{DominantValue};
1981     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
1982     for (const auto &OpIdx : enumerate(Op->ops())) {
1983       const SDValue &V = OpIdx.value();
1984       if (V.isUndef() || !Processed.insert(V).second)
1985         continue;
1986       if (ValueCounts[V] == 1) {
1987         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
1988                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
1989       } else {
1990         // Blend in all instances of this value using a VSELECT, using a
1991         // mask where each bit signals whether that element is the one
1992         // we're after.
1993         SmallVector<SDValue> Ops;
1994         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
1995           return DAG.getConstant(V == V1, DL, XLenVT);
1996         });
1997         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
1998                           DAG.getBuildVector(SelMaskTy, DL, Ops),
1999                           DAG.getSplatBuildVector(VT, DL, V), Vec);
2000       }
2001     }
2002 
2003     return Vec;
2004   }
2005 
2006   return SDValue();
2007 }
2008 
2009 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Lo,
2010                                    SDValue Hi, SDValue VL, SelectionDAG &DAG) {
2011   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
2012     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
2013     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
2014     // If Hi constant is all the same sign bit as Lo, lower this as a custom
2015     // node in order to try and match RVV vector/scalar instructions.
2016     if ((LoC >> 31) == HiC)
2017       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL);
2018   }
2019 
2020   // Fall back to a stack store and stride x0 vector load.
2021   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Lo, Hi, VL);
2022 }
2023 
2024 // Called by type legalization to handle splat of i64 on RV32.
2025 // FIXME: We can optimize this when the type has sign or zero bits in one
2026 // of the halves.
2027 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar,
2028                                    SDValue VL, SelectionDAG &DAG) {
2029   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
2030   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2031                            DAG.getConstant(0, DL, MVT::i32));
2032   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2033                            DAG.getConstant(1, DL, MVT::i32));
2034   return splatPartsI64WithVL(DL, VT, Lo, Hi, VL, DAG);
2035 }
2036 
2037 // This function lowers a splat of a scalar operand Splat with the vector
2038 // length VL. It ensures the final sequence is type legal, which is useful when
2039 // lowering a splat after type legalization.
2040 static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL,
2041                                 SelectionDAG &DAG,
2042                                 const RISCVSubtarget &Subtarget) {
2043   if (VT.isFloatingPoint())
2044     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Scalar, VL);
2045 
2046   MVT XLenVT = Subtarget.getXLenVT();
2047 
2048   // Simplest case is that the operand needs to be promoted to XLenVT.
2049   if (Scalar.getValueType().bitsLE(XLenVT)) {
2050     // If the operand is a constant, sign extend to increase our chances
2051     // of being able to use a .vi instruction. ANY_EXTEND would become a
2052     // a zero extend and the simm5 check in isel would fail.
2053     // FIXME: Should we ignore the upper bits in isel instead?
2054     unsigned ExtOpc =
2055         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
2056     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
2057     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Scalar, VL);
2058   }
2059 
2060   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
2061          "Unexpected scalar for splat lowering!");
2062 
2063   // Otherwise use the more complicated splatting algorithm.
2064   return splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
2065 }
2066 
2067 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
2068                                    const RISCVSubtarget &Subtarget) {
2069   SDValue V1 = Op.getOperand(0);
2070   SDValue V2 = Op.getOperand(1);
2071   SDLoc DL(Op);
2072   MVT XLenVT = Subtarget.getXLenVT();
2073   MVT VT = Op.getSimpleValueType();
2074   unsigned NumElts = VT.getVectorNumElements();
2075   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2076 
2077   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2078 
2079   SDValue TrueMask, VL;
2080   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2081 
2082   if (SVN->isSplat()) {
2083     const int Lane = SVN->getSplatIndex();
2084     if (Lane >= 0) {
2085       MVT SVT = VT.getVectorElementType();
2086 
2087       // Turn splatted vector load into a strided load with an X0 stride.
2088       SDValue V = V1;
2089       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
2090       // with undef.
2091       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
2092       int Offset = Lane;
2093       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
2094         int OpElements =
2095             V.getOperand(0).getSimpleValueType().getVectorNumElements();
2096         V = V.getOperand(Offset / OpElements);
2097         Offset %= OpElements;
2098       }
2099 
2100       // We need to ensure the load isn't atomic or volatile.
2101       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
2102         auto *Ld = cast<LoadSDNode>(V);
2103         Offset *= SVT.getStoreSize();
2104         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
2105                                                    TypeSize::Fixed(Offset), DL);
2106 
2107         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
2108         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
2109           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
2110           SDValue IntID =
2111               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
2112           SDValue Ops[] = {Ld->getChain(), IntID, NewAddr,
2113                            DAG.getRegister(RISCV::X0, XLenVT), VL};
2114           SDValue NewLoad = DAG.getMemIntrinsicNode(
2115               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
2116               DAG.getMachineFunction().getMachineMemOperand(
2117                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
2118           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
2119           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
2120         }
2121 
2122         // Otherwise use a scalar load and splat. This will give the best
2123         // opportunity to fold a splat into the operation. ISel can turn it into
2124         // the x0 strided load if we aren't able to fold away the select.
2125         if (SVT.isFloatingPoint())
2126           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
2127                           Ld->getPointerInfo().getWithOffset(Offset),
2128                           Ld->getOriginalAlign(),
2129                           Ld->getMemOperand()->getFlags());
2130         else
2131           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
2132                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
2133                              Ld->getOriginalAlign(),
2134                              Ld->getMemOperand()->getFlags());
2135         DAG.makeEquivalentMemoryOrdering(Ld, V);
2136 
2137         unsigned Opc =
2138             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
2139         SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, V, VL);
2140         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2141       }
2142 
2143       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2144       assert(Lane < (int)NumElts && "Unexpected lane!");
2145       SDValue Gather =
2146           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
2147                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
2148       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2149     }
2150   }
2151 
2152   // Detect shuffles which can be re-expressed as vector selects; these are
2153   // shuffles in which each element in the destination is taken from an element
2154   // at the corresponding index in either source vectors.
2155   bool IsSelect = all_of(enumerate(SVN->getMask()), [&](const auto &MaskIdx) {
2156     int MaskIndex = MaskIdx.value();
2157     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
2158   });
2159 
2160   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
2161 
2162   SmallVector<SDValue> MaskVals;
2163   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
2164   // merged with a second vrgather.
2165   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
2166 
2167   // By default we preserve the original operand order, and use a mask to
2168   // select LHS as true and RHS as false. However, since RVV vector selects may
2169   // feature splats but only on the LHS, we may choose to invert our mask and
2170   // instead select between RHS and LHS.
2171   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
2172   bool InvertMask = IsSelect == SwapOps;
2173 
2174   // Keep a track of which non-undef indices are used by each LHS/RHS shuffle
2175   // half.
2176   DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts;
2177 
2178   // Now construct the mask that will be used by the vselect or blended
2179   // vrgather operation. For vrgathers, construct the appropriate indices into
2180   // each vector.
2181   for (int MaskIndex : SVN->getMask()) {
2182     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
2183     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
2184     if (!IsSelect) {
2185       bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
2186       GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
2187                                      ? DAG.getConstant(MaskIndex, DL, XLenVT)
2188                                      : DAG.getUNDEF(XLenVT));
2189       GatherIndicesRHS.push_back(
2190           IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
2191                             : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
2192       if (IsLHSOrUndefIndex && MaskIndex >= 0)
2193         ++LHSIndexCounts[MaskIndex];
2194       if (!IsLHSOrUndefIndex)
2195         ++RHSIndexCounts[MaskIndex - NumElts];
2196     }
2197   }
2198 
2199   if (SwapOps) {
2200     std::swap(V1, V2);
2201     std::swap(GatherIndicesLHS, GatherIndicesRHS);
2202   }
2203 
2204   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
2205   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
2206   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
2207 
2208   if (IsSelect)
2209     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
2210 
2211   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
2212     // On such a large vector we're unable to use i8 as the index type.
2213     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
2214     // may involve vector splitting if we're already at LMUL=8, or our
2215     // user-supplied maximum fixed-length LMUL.
2216     return SDValue();
2217   }
2218 
2219   unsigned GatherVXOpc = RISCVISD::VRGATHER_VX_VL;
2220   unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL;
2221   MVT IndexVT = VT.changeTypeToInteger();
2222   // Since we can't introduce illegal index types at this stage, use i16 and
2223   // vrgatherei16 if the corresponding index type for plain vrgather is greater
2224   // than XLenVT.
2225   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
2226     GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
2227     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
2228   }
2229 
2230   MVT IndexContainerVT =
2231       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
2232 
2233   SDValue Gather;
2234   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
2235   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
2236   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
2237     Gather = lowerScalarSplat(SplatValue, VL, ContainerVT, DL, DAG, Subtarget);
2238   } else {
2239     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2240     // If only one index is used, we can use a "splat" vrgather.
2241     // TODO: We can splat the most-common index and fix-up any stragglers, if
2242     // that's beneficial.
2243     if (LHSIndexCounts.size() == 1) {
2244       int SplatIndex = LHSIndexCounts.begin()->getFirst();
2245       Gather =
2246           DAG.getNode(GatherVXOpc, DL, ContainerVT, V1,
2247                       DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2248     } else {
2249       SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
2250       LHSIndices =
2251           convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
2252 
2253       Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
2254                            TrueMask, VL);
2255     }
2256   }
2257 
2258   // If a second vector operand is used by this shuffle, blend it in with an
2259   // additional vrgather.
2260   if (!V2.isUndef()) {
2261     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
2262     // If only one index is used, we can use a "splat" vrgather.
2263     // TODO: We can splat the most-common index and fix-up any stragglers, if
2264     // that's beneficial.
2265     if (RHSIndexCounts.size() == 1) {
2266       int SplatIndex = RHSIndexCounts.begin()->getFirst();
2267       V2 = DAG.getNode(GatherVXOpc, DL, ContainerVT, V2,
2268                        DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2269     } else {
2270       SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
2271       RHSIndices =
2272           convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
2273       V2 = DAG.getNode(GatherVVOpc, DL, ContainerVT, V2, RHSIndices, TrueMask,
2274                        VL);
2275     }
2276 
2277     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
2278     SelectMask =
2279         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
2280 
2281     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
2282                          Gather, VL);
2283   }
2284 
2285   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2286 }
2287 
2288 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
2289                                      SDLoc DL, SelectionDAG &DAG,
2290                                      const RISCVSubtarget &Subtarget) {
2291   if (VT.isScalableVector())
2292     return DAG.getFPExtendOrRound(Op, DL, VT);
2293   assert(VT.isFixedLengthVector() &&
2294          "Unexpected value type for RVV FP extend/round lowering");
2295   SDValue Mask, VL;
2296   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2297   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
2298                         ? RISCVISD::FP_EXTEND_VL
2299                         : RISCVISD::FP_ROUND_VL;
2300   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
2301 }
2302 
2303 // While RVV has alignment restrictions, we should always be able to load as a
2304 // legal equivalently-sized byte-typed vector instead. This method is
2305 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
2306 // the load is already correctly-aligned, it returns SDValue().
2307 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
2308                                                     SelectionDAG &DAG) const {
2309   auto *Load = cast<LoadSDNode>(Op);
2310   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
2311 
2312   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2313                                      Load->getMemoryVT(),
2314                                      *Load->getMemOperand()))
2315     return SDValue();
2316 
2317   SDLoc DL(Op);
2318   MVT VT = Op.getSimpleValueType();
2319   unsigned EltSizeBits = VT.getScalarSizeInBits();
2320   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2321          "Unexpected unaligned RVV load type");
2322   MVT NewVT =
2323       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2324   assert(NewVT.isValid() &&
2325          "Expecting equally-sized RVV vector types to be legal");
2326   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
2327                           Load->getPointerInfo(), Load->getOriginalAlign(),
2328                           Load->getMemOperand()->getFlags());
2329   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
2330 }
2331 
2332 // While RVV has alignment restrictions, we should always be able to store as a
2333 // legal equivalently-sized byte-typed vector instead. This method is
2334 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
2335 // returns SDValue() if the store is already correctly aligned.
2336 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
2337                                                      SelectionDAG &DAG) const {
2338   auto *Store = cast<StoreSDNode>(Op);
2339   assert(Store && Store->getValue().getValueType().isVector() &&
2340          "Expected vector store");
2341 
2342   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2343                                      Store->getMemoryVT(),
2344                                      *Store->getMemOperand()))
2345     return SDValue();
2346 
2347   SDLoc DL(Op);
2348   SDValue StoredVal = Store->getValue();
2349   MVT VT = StoredVal.getSimpleValueType();
2350   unsigned EltSizeBits = VT.getScalarSizeInBits();
2351   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2352          "Unexpected unaligned RVV store type");
2353   MVT NewVT =
2354       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2355   assert(NewVT.isValid() &&
2356          "Expecting equally-sized RVV vector types to be legal");
2357   StoredVal = DAG.getBitcast(NewVT, StoredVal);
2358   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
2359                       Store->getPointerInfo(), Store->getOriginalAlign(),
2360                       Store->getMemOperand()->getFlags());
2361 }
2362 
2363 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
2364                                             SelectionDAG &DAG) const {
2365   switch (Op.getOpcode()) {
2366   default:
2367     report_fatal_error("unimplemented operand");
2368   case ISD::GlobalAddress:
2369     return lowerGlobalAddress(Op, DAG);
2370   case ISD::BlockAddress:
2371     return lowerBlockAddress(Op, DAG);
2372   case ISD::ConstantPool:
2373     return lowerConstantPool(Op, DAG);
2374   case ISD::JumpTable:
2375     return lowerJumpTable(Op, DAG);
2376   case ISD::GlobalTLSAddress:
2377     return lowerGlobalTLSAddress(Op, DAG);
2378   case ISD::SELECT:
2379     return lowerSELECT(Op, DAG);
2380   case ISD::BRCOND:
2381     return lowerBRCOND(Op, DAG);
2382   case ISD::VASTART:
2383     return lowerVASTART(Op, DAG);
2384   case ISD::FRAMEADDR:
2385     return lowerFRAMEADDR(Op, DAG);
2386   case ISD::RETURNADDR:
2387     return lowerRETURNADDR(Op, DAG);
2388   case ISD::SHL_PARTS:
2389     return lowerShiftLeftParts(Op, DAG);
2390   case ISD::SRA_PARTS:
2391     return lowerShiftRightParts(Op, DAG, true);
2392   case ISD::SRL_PARTS:
2393     return lowerShiftRightParts(Op, DAG, false);
2394   case ISD::BITCAST: {
2395     SDLoc DL(Op);
2396     EVT VT = Op.getValueType();
2397     SDValue Op0 = Op.getOperand(0);
2398     EVT Op0VT = Op0.getValueType();
2399     MVT XLenVT = Subtarget.getXLenVT();
2400     if (VT.isFixedLengthVector()) {
2401       // We can handle fixed length vector bitcasts with a simple replacement
2402       // in isel.
2403       if (Op0VT.isFixedLengthVector())
2404         return Op;
2405       // When bitcasting from scalar to fixed-length vector, insert the scalar
2406       // into a one-element vector of the result type, and perform a vector
2407       // bitcast.
2408       if (!Op0VT.isVector()) {
2409         auto BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
2410         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
2411                                               DAG.getUNDEF(BVT), Op0,
2412                                               DAG.getConstant(0, DL, XLenVT)));
2413       }
2414       return SDValue();
2415     }
2416     // Custom-legalize bitcasts from fixed-length vector types to scalar types
2417     // thus: bitcast the vector to a one-element vector type whose element type
2418     // is the same as the result type, and extract the first element.
2419     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
2420       LLVMContext &Context = *DAG.getContext();
2421       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
2422       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
2423                          DAG.getConstant(0, DL, XLenVT));
2424     }
2425     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
2426       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
2427       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
2428       return FPConv;
2429     }
2430     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
2431         Subtarget.hasStdExtF()) {
2432       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
2433       SDValue FPConv =
2434           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
2435       return FPConv;
2436     }
2437     return SDValue();
2438   }
2439   case ISD::INTRINSIC_WO_CHAIN:
2440     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
2441   case ISD::INTRINSIC_W_CHAIN:
2442     return LowerINTRINSIC_W_CHAIN(Op, DAG);
2443   case ISD::INTRINSIC_VOID:
2444     return LowerINTRINSIC_VOID(Op, DAG);
2445   case ISD::BSWAP:
2446   case ISD::BITREVERSE: {
2447     // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
2448     assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
2449     MVT VT = Op.getSimpleValueType();
2450     SDLoc DL(Op);
2451     // Start with the maximum immediate value which is the bitwidth - 1.
2452     unsigned Imm = VT.getSizeInBits() - 1;
2453     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
2454     if (Op.getOpcode() == ISD::BSWAP)
2455       Imm &= ~0x7U;
2456     return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
2457                        DAG.getConstant(Imm, DL, VT));
2458   }
2459   case ISD::FSHL:
2460   case ISD::FSHR: {
2461     MVT VT = Op.getSimpleValueType();
2462     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
2463     SDLoc DL(Op);
2464     if (Op.getOperand(2).getOpcode() == ISD::Constant)
2465       return Op;
2466     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
2467     // use log(XLen) bits. Mask the shift amount accordingly.
2468     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
2469     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
2470                                 DAG.getConstant(ShAmtWidth, DL, VT));
2471     unsigned Opc = Op.getOpcode() == ISD::FSHL ? RISCVISD::FSL : RISCVISD::FSR;
2472     return DAG.getNode(Opc, DL, VT, Op.getOperand(0), Op.getOperand(1), ShAmt);
2473   }
2474   case ISD::TRUNCATE: {
2475     SDLoc DL(Op);
2476     MVT VT = Op.getSimpleValueType();
2477     // Only custom-lower vector truncates
2478     if (!VT.isVector())
2479       return Op;
2480 
2481     // Truncates to mask types are handled differently
2482     if (VT.getVectorElementType() == MVT::i1)
2483       return lowerVectorMaskTrunc(Op, DAG);
2484 
2485     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
2486     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
2487     // truncate by one power of two at a time.
2488     MVT DstEltVT = VT.getVectorElementType();
2489 
2490     SDValue Src = Op.getOperand(0);
2491     MVT SrcVT = Src.getSimpleValueType();
2492     MVT SrcEltVT = SrcVT.getVectorElementType();
2493 
2494     assert(DstEltVT.bitsLT(SrcEltVT) &&
2495            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
2496            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
2497            "Unexpected vector truncate lowering");
2498 
2499     MVT ContainerVT = SrcVT;
2500     if (SrcVT.isFixedLengthVector()) {
2501       ContainerVT = getContainerForFixedLengthVector(SrcVT);
2502       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
2503     }
2504 
2505     SDValue Result = Src;
2506     SDValue Mask, VL;
2507     std::tie(Mask, VL) =
2508         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
2509     LLVMContext &Context = *DAG.getContext();
2510     const ElementCount Count = ContainerVT.getVectorElementCount();
2511     do {
2512       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
2513       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
2514       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
2515                            Mask, VL);
2516     } while (SrcEltVT != DstEltVT);
2517 
2518     if (SrcVT.isFixedLengthVector())
2519       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
2520 
2521     return Result;
2522   }
2523   case ISD::ANY_EXTEND:
2524   case ISD::ZERO_EXTEND:
2525     if (Op.getOperand(0).getValueType().isVector() &&
2526         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2527       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
2528     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
2529   case ISD::SIGN_EXTEND:
2530     if (Op.getOperand(0).getValueType().isVector() &&
2531         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2532       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
2533     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
2534   case ISD::SPLAT_VECTOR_PARTS:
2535     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
2536   case ISD::INSERT_VECTOR_ELT:
2537     return lowerINSERT_VECTOR_ELT(Op, DAG);
2538   case ISD::EXTRACT_VECTOR_ELT:
2539     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
2540   case ISD::VSCALE: {
2541     MVT VT = Op.getSimpleValueType();
2542     SDLoc DL(Op);
2543     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
2544     // We define our scalable vector types for lmul=1 to use a 64 bit known
2545     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
2546     // vscale as VLENB / 8.
2547     assert(RISCV::RVVBitsPerBlock == 64 && "Unexpected bits per block!");
2548     if (isa<ConstantSDNode>(Op.getOperand(0))) {
2549       // We assume VLENB is a multiple of 8. We manually choose the best shift
2550       // here because SimplifyDemandedBits isn't always able to simplify it.
2551       uint64_t Val = Op.getConstantOperandVal(0);
2552       if (isPowerOf2_64(Val)) {
2553         uint64_t Log2 = Log2_64(Val);
2554         if (Log2 < 3)
2555           return DAG.getNode(ISD::SRL, DL, VT, VLENB,
2556                              DAG.getConstant(3 - Log2, DL, VT));
2557         if (Log2 > 3)
2558           return DAG.getNode(ISD::SHL, DL, VT, VLENB,
2559                              DAG.getConstant(Log2 - 3, DL, VT));
2560         return VLENB;
2561       }
2562       // If the multiplier is a multiple of 8, scale it down to avoid needing
2563       // to shift the VLENB value.
2564       if ((Val % 8) == 0)
2565         return DAG.getNode(ISD::MUL, DL, VT, VLENB,
2566                            DAG.getConstant(Val / 8, DL, VT));
2567     }
2568 
2569     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
2570                                  DAG.getConstant(3, DL, VT));
2571     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
2572   }
2573   case ISD::FP_EXTEND: {
2574     // RVV can only do fp_extend to types double the size as the source. We
2575     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
2576     // via f32.
2577     SDLoc DL(Op);
2578     MVT VT = Op.getSimpleValueType();
2579     SDValue Src = Op.getOperand(0);
2580     MVT SrcVT = Src.getSimpleValueType();
2581 
2582     // Prepare any fixed-length vector operands.
2583     MVT ContainerVT = VT;
2584     if (SrcVT.isFixedLengthVector()) {
2585       ContainerVT = getContainerForFixedLengthVector(VT);
2586       MVT SrcContainerVT =
2587           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
2588       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2589     }
2590 
2591     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
2592         SrcVT.getVectorElementType() != MVT::f16) {
2593       // For scalable vectors, we only need to close the gap between
2594       // vXf16->vXf64.
2595       if (!VT.isFixedLengthVector())
2596         return Op;
2597       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
2598       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2599       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2600     }
2601 
2602     MVT InterVT = VT.changeVectorElementType(MVT::f32);
2603     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
2604     SDValue IntermediateExtend = getRVVFPExtendOrRound(
2605         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
2606 
2607     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
2608                                            DL, DAG, Subtarget);
2609     if (VT.isFixedLengthVector())
2610       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
2611     return Extend;
2612   }
2613   case ISD::FP_ROUND: {
2614     // RVV can only do fp_round to types half the size as the source. We
2615     // custom-lower f64->f16 rounds via RVV's round-to-odd float
2616     // conversion instruction.
2617     SDLoc DL(Op);
2618     MVT VT = Op.getSimpleValueType();
2619     SDValue Src = Op.getOperand(0);
2620     MVT SrcVT = Src.getSimpleValueType();
2621 
2622     // Prepare any fixed-length vector operands.
2623     MVT ContainerVT = VT;
2624     if (VT.isFixedLengthVector()) {
2625       MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2626       ContainerVT =
2627           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2628       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2629     }
2630 
2631     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
2632         SrcVT.getVectorElementType() != MVT::f64) {
2633       // For scalable vectors, we only need to close the gap between
2634       // vXf64<->vXf16.
2635       if (!VT.isFixedLengthVector())
2636         return Op;
2637       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
2638       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2639       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2640     }
2641 
2642     SDValue Mask, VL;
2643     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2644 
2645     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
2646     SDValue IntermediateRound =
2647         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
2648     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
2649                                           DL, DAG, Subtarget);
2650 
2651     if (VT.isFixedLengthVector())
2652       return convertFromScalableVector(VT, Round, DAG, Subtarget);
2653     return Round;
2654   }
2655   case ISD::FP_TO_SINT:
2656   case ISD::FP_TO_UINT:
2657   case ISD::SINT_TO_FP:
2658   case ISD::UINT_TO_FP: {
2659     // RVV can only do fp<->int conversions to types half/double the size as
2660     // the source. We custom-lower any conversions that do two hops into
2661     // sequences.
2662     MVT VT = Op.getSimpleValueType();
2663     if (!VT.isVector())
2664       return Op;
2665     SDLoc DL(Op);
2666     SDValue Src = Op.getOperand(0);
2667     MVT EltVT = VT.getVectorElementType();
2668     MVT SrcVT = Src.getSimpleValueType();
2669     MVT SrcEltVT = SrcVT.getVectorElementType();
2670     unsigned EltSize = EltVT.getSizeInBits();
2671     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
2672     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
2673            "Unexpected vector element types");
2674 
2675     bool IsInt2FP = SrcEltVT.isInteger();
2676     // Widening conversions
2677     if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
2678       if (IsInt2FP) {
2679         // Do a regular integer sign/zero extension then convert to float.
2680         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
2681                                       VT.getVectorElementCount());
2682         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
2683                                  ? ISD::ZERO_EXTEND
2684                                  : ISD::SIGN_EXTEND;
2685         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
2686         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
2687       }
2688       // FP2Int
2689       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
2690       // Do one doubling fp_extend then complete the operation by converting
2691       // to int.
2692       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2693       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
2694       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
2695     }
2696 
2697     // Narrowing conversions
2698     if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
2699       if (IsInt2FP) {
2700         // One narrowing int_to_fp, then an fp_round.
2701         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
2702         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2703         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
2704         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
2705       }
2706       // FP2Int
2707       // One narrowing fp_to_int, then truncate the integer. If the float isn't
2708       // representable by the integer, the result is poison.
2709       MVT IVecVT =
2710           MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
2711                            VT.getVectorElementCount());
2712       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
2713       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
2714     }
2715 
2716     // Scalable vectors can exit here. Patterns will handle equally-sized
2717     // conversions halving/doubling ones.
2718     if (!VT.isFixedLengthVector())
2719       return Op;
2720 
2721     // For fixed-length vectors we lower to a custom "VL" node.
2722     unsigned RVVOpc = 0;
2723     switch (Op.getOpcode()) {
2724     default:
2725       llvm_unreachable("Impossible opcode");
2726     case ISD::FP_TO_SINT:
2727       RVVOpc = RISCVISD::FP_TO_SINT_VL;
2728       break;
2729     case ISD::FP_TO_UINT:
2730       RVVOpc = RISCVISD::FP_TO_UINT_VL;
2731       break;
2732     case ISD::SINT_TO_FP:
2733       RVVOpc = RISCVISD::SINT_TO_FP_VL;
2734       break;
2735     case ISD::UINT_TO_FP:
2736       RVVOpc = RISCVISD::UINT_TO_FP_VL;
2737       break;
2738     }
2739 
2740     MVT ContainerVT, SrcContainerVT;
2741     // Derive the reference container type from the larger vector type.
2742     if (SrcEltSize > EltSize) {
2743       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2744       ContainerVT =
2745           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2746     } else {
2747       ContainerVT = getContainerForFixedLengthVector(VT);
2748       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
2749     }
2750 
2751     SDValue Mask, VL;
2752     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2753 
2754     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2755     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
2756     return convertFromScalableVector(VT, Src, DAG, Subtarget);
2757   }
2758   case ISD::FP_TO_SINT_SAT:
2759   case ISD::FP_TO_UINT_SAT:
2760     return lowerFP_TO_INT_SAT(Op, DAG);
2761   case ISD::VECREDUCE_ADD:
2762   case ISD::VECREDUCE_UMAX:
2763   case ISD::VECREDUCE_SMAX:
2764   case ISD::VECREDUCE_UMIN:
2765   case ISD::VECREDUCE_SMIN:
2766     return lowerVECREDUCE(Op, DAG);
2767   case ISD::VECREDUCE_AND:
2768   case ISD::VECREDUCE_OR:
2769   case ISD::VECREDUCE_XOR:
2770     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2771       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ false);
2772     return lowerVECREDUCE(Op, DAG);
2773   case ISD::VECREDUCE_FADD:
2774   case ISD::VECREDUCE_SEQ_FADD:
2775   case ISD::VECREDUCE_FMIN:
2776   case ISD::VECREDUCE_FMAX:
2777     return lowerFPVECREDUCE(Op, DAG);
2778   case ISD::VP_REDUCE_ADD:
2779   case ISD::VP_REDUCE_UMAX:
2780   case ISD::VP_REDUCE_SMAX:
2781   case ISD::VP_REDUCE_UMIN:
2782   case ISD::VP_REDUCE_SMIN:
2783   case ISD::VP_REDUCE_FADD:
2784   case ISD::VP_REDUCE_SEQ_FADD:
2785   case ISD::VP_REDUCE_FMIN:
2786   case ISD::VP_REDUCE_FMAX:
2787     return lowerVPREDUCE(Op, DAG);
2788   case ISD::VP_REDUCE_AND:
2789   case ISD::VP_REDUCE_OR:
2790   case ISD::VP_REDUCE_XOR:
2791     if (Op.getOperand(1).getValueType().getVectorElementType() == MVT::i1)
2792       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ true);
2793     return lowerVPREDUCE(Op, DAG);
2794   case ISD::INSERT_SUBVECTOR:
2795     return lowerINSERT_SUBVECTOR(Op, DAG);
2796   case ISD::EXTRACT_SUBVECTOR:
2797     return lowerEXTRACT_SUBVECTOR(Op, DAG);
2798   case ISD::STEP_VECTOR:
2799     return lowerSTEP_VECTOR(Op, DAG);
2800   case ISD::VECTOR_REVERSE:
2801     return lowerVECTOR_REVERSE(Op, DAG);
2802   case ISD::BUILD_VECTOR:
2803     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
2804   case ISD::SPLAT_VECTOR:
2805     if (Op.getValueType().getVectorElementType() == MVT::i1)
2806       return lowerVectorMaskSplat(Op, DAG);
2807     return lowerSPLAT_VECTOR(Op, DAG, Subtarget);
2808   case ISD::VECTOR_SHUFFLE:
2809     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
2810   case ISD::CONCAT_VECTORS: {
2811     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
2812     // better than going through the stack, as the default expansion does.
2813     SDLoc DL(Op);
2814     MVT VT = Op.getSimpleValueType();
2815     unsigned NumOpElts =
2816         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
2817     SDValue Vec = DAG.getUNDEF(VT);
2818     for (const auto &OpIdx : enumerate(Op->ops()))
2819       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, OpIdx.value(),
2820                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
2821     return Vec;
2822   }
2823   case ISD::LOAD:
2824     if (auto V = expandUnalignedRVVLoad(Op, DAG))
2825       return V;
2826     if (Op.getValueType().isFixedLengthVector())
2827       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
2828     return Op;
2829   case ISD::STORE:
2830     if (auto V = expandUnalignedRVVStore(Op, DAG))
2831       return V;
2832     if (Op.getOperand(1).getValueType().isFixedLengthVector())
2833       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
2834     return Op;
2835   case ISD::MLOAD:
2836   case ISD::VP_LOAD:
2837     return lowerMaskedLoad(Op, DAG);
2838   case ISD::MSTORE:
2839   case ISD::VP_STORE:
2840     return lowerMaskedStore(Op, DAG);
2841   case ISD::SETCC:
2842     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
2843   case ISD::ADD:
2844     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
2845   case ISD::SUB:
2846     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
2847   case ISD::MUL:
2848     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
2849   case ISD::MULHS:
2850     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
2851   case ISD::MULHU:
2852     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
2853   case ISD::AND:
2854     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
2855                                               RISCVISD::AND_VL);
2856   case ISD::OR:
2857     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
2858                                               RISCVISD::OR_VL);
2859   case ISD::XOR:
2860     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
2861                                               RISCVISD::XOR_VL);
2862   case ISD::SDIV:
2863     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
2864   case ISD::SREM:
2865     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
2866   case ISD::UDIV:
2867     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
2868   case ISD::UREM:
2869     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
2870   case ISD::SHL:
2871   case ISD::SRA:
2872   case ISD::SRL:
2873     if (Op.getSimpleValueType().isFixedLengthVector())
2874       return lowerFixedLengthVectorShiftToRVV(Op, DAG);
2875     // This can be called for an i32 shift amount that needs to be promoted.
2876     assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
2877            "Unexpected custom legalisation");
2878     return SDValue();
2879   case ISD::SADDSAT:
2880     return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL);
2881   case ISD::UADDSAT:
2882     return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL);
2883   case ISD::SSUBSAT:
2884     return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL);
2885   case ISD::USUBSAT:
2886     return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL);
2887   case ISD::FADD:
2888     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
2889   case ISD::FSUB:
2890     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
2891   case ISD::FMUL:
2892     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
2893   case ISD::FDIV:
2894     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
2895   case ISD::FNEG:
2896     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
2897   case ISD::FABS:
2898     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
2899   case ISD::FSQRT:
2900     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
2901   case ISD::FMA:
2902     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
2903   case ISD::SMIN:
2904     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
2905   case ISD::SMAX:
2906     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
2907   case ISD::UMIN:
2908     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
2909   case ISD::UMAX:
2910     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
2911   case ISD::FMINNUM:
2912     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
2913   case ISD::FMAXNUM:
2914     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
2915   case ISD::ABS:
2916     return lowerABS(Op, DAG);
2917   case ISD::VSELECT:
2918     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
2919   case ISD::FCOPYSIGN:
2920     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
2921   case ISD::MGATHER:
2922   case ISD::VP_GATHER:
2923     return lowerMaskedGather(Op, DAG);
2924   case ISD::MSCATTER:
2925   case ISD::VP_SCATTER:
2926     return lowerMaskedScatter(Op, DAG);
2927   case ISD::FLT_ROUNDS_:
2928     return lowerGET_ROUNDING(Op, DAG);
2929   case ISD::SET_ROUNDING:
2930     return lowerSET_ROUNDING(Op, DAG);
2931   case ISD::VP_ADD:
2932     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
2933   case ISD::VP_SUB:
2934     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
2935   case ISD::VP_MUL:
2936     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
2937   case ISD::VP_SDIV:
2938     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
2939   case ISD::VP_UDIV:
2940     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
2941   case ISD::VP_SREM:
2942     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
2943   case ISD::VP_UREM:
2944     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
2945   case ISD::VP_AND:
2946     return lowerVPOp(Op, DAG, RISCVISD::AND_VL);
2947   case ISD::VP_OR:
2948     return lowerVPOp(Op, DAG, RISCVISD::OR_VL);
2949   case ISD::VP_XOR:
2950     return lowerVPOp(Op, DAG, RISCVISD::XOR_VL);
2951   case ISD::VP_ASHR:
2952     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
2953   case ISD::VP_LSHR:
2954     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
2955   case ISD::VP_SHL:
2956     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
2957   case ISD::VP_FADD:
2958     return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
2959   case ISD::VP_FSUB:
2960     return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
2961   case ISD::VP_FMUL:
2962     return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
2963   case ISD::VP_FDIV:
2964     return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
2965   }
2966 }
2967 
2968 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
2969                              SelectionDAG &DAG, unsigned Flags) {
2970   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
2971 }
2972 
2973 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
2974                              SelectionDAG &DAG, unsigned Flags) {
2975   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
2976                                    Flags);
2977 }
2978 
2979 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
2980                              SelectionDAG &DAG, unsigned Flags) {
2981   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
2982                                    N->getOffset(), Flags);
2983 }
2984 
2985 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
2986                              SelectionDAG &DAG, unsigned Flags) {
2987   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
2988 }
2989 
2990 template <class NodeTy>
2991 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
2992                                      bool IsLocal) const {
2993   SDLoc DL(N);
2994   EVT Ty = getPointerTy(DAG.getDataLayout());
2995 
2996   if (isPositionIndependent()) {
2997     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2998     if (IsLocal)
2999       // Use PC-relative addressing to access the symbol. This generates the
3000       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
3001       // %pcrel_lo(auipc)).
3002       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3003 
3004     // Use PC-relative addressing to access the GOT for this symbol, then load
3005     // the address from the GOT. This generates the pattern (PseudoLA sym),
3006     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
3007     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
3008   }
3009 
3010   switch (getTargetMachine().getCodeModel()) {
3011   default:
3012     report_fatal_error("Unsupported code model for lowering");
3013   case CodeModel::Small: {
3014     // Generate a sequence for accessing addresses within the first 2 GiB of
3015     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
3016     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
3017     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
3018     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3019     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
3020   }
3021   case CodeModel::Medium: {
3022     // Generate a sequence for accessing addresses within any 2GiB range within
3023     // the address space. This generates the pattern (PseudoLLA sym), which
3024     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
3025     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3026     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3027   }
3028   }
3029 }
3030 
3031 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
3032                                                 SelectionDAG &DAG) const {
3033   SDLoc DL(Op);
3034   EVT Ty = Op.getValueType();
3035   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3036   int64_t Offset = N->getOffset();
3037   MVT XLenVT = Subtarget.getXLenVT();
3038 
3039   const GlobalValue *GV = N->getGlobal();
3040   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
3041   SDValue Addr = getAddr(N, DAG, IsLocal);
3042 
3043   // In order to maximise the opportunity for common subexpression elimination,
3044   // emit a separate ADD node for the global address offset instead of folding
3045   // it in the global address node. Later peephole optimisations may choose to
3046   // fold it back in when profitable.
3047   if (Offset != 0)
3048     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3049                        DAG.getConstant(Offset, DL, XLenVT));
3050   return Addr;
3051 }
3052 
3053 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
3054                                                SelectionDAG &DAG) const {
3055   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
3056 
3057   return getAddr(N, DAG);
3058 }
3059 
3060 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
3061                                                SelectionDAG &DAG) const {
3062   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
3063 
3064   return getAddr(N, DAG);
3065 }
3066 
3067 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
3068                                             SelectionDAG &DAG) const {
3069   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
3070 
3071   return getAddr(N, DAG);
3072 }
3073 
3074 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
3075                                               SelectionDAG &DAG,
3076                                               bool UseGOT) const {
3077   SDLoc DL(N);
3078   EVT Ty = getPointerTy(DAG.getDataLayout());
3079   const GlobalValue *GV = N->getGlobal();
3080   MVT XLenVT = Subtarget.getXLenVT();
3081 
3082   if (UseGOT) {
3083     // Use PC-relative addressing to access the GOT for this TLS symbol, then
3084     // load the address from the GOT and add the thread pointer. This generates
3085     // the pattern (PseudoLA_TLS_IE sym), which expands to
3086     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
3087     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3088     SDValue Load =
3089         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
3090 
3091     // Add the thread pointer.
3092     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3093     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
3094   }
3095 
3096   // Generate a sequence for accessing the address relative to the thread
3097   // pointer, with the appropriate adjustment for the thread pointer offset.
3098   // This generates the pattern
3099   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
3100   SDValue AddrHi =
3101       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
3102   SDValue AddrAdd =
3103       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
3104   SDValue AddrLo =
3105       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
3106 
3107   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3108   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3109   SDValue MNAdd = SDValue(
3110       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
3111       0);
3112   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
3113 }
3114 
3115 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
3116                                                SelectionDAG &DAG) const {
3117   SDLoc DL(N);
3118   EVT Ty = getPointerTy(DAG.getDataLayout());
3119   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
3120   const GlobalValue *GV = N->getGlobal();
3121 
3122   // Use a PC-relative addressing mode to access the global dynamic GOT address.
3123   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
3124   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
3125   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3126   SDValue Load =
3127       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
3128 
3129   // Prepare argument list to generate call.
3130   ArgListTy Args;
3131   ArgListEntry Entry;
3132   Entry.Node = Load;
3133   Entry.Ty = CallTy;
3134   Args.push_back(Entry);
3135 
3136   // Setup call to __tls_get_addr.
3137   TargetLowering::CallLoweringInfo CLI(DAG);
3138   CLI.setDebugLoc(DL)
3139       .setChain(DAG.getEntryNode())
3140       .setLibCallee(CallingConv::C, CallTy,
3141                     DAG.getExternalSymbol("__tls_get_addr", Ty),
3142                     std::move(Args));
3143 
3144   return LowerCallTo(CLI).first;
3145 }
3146 
3147 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
3148                                                    SelectionDAG &DAG) const {
3149   SDLoc DL(Op);
3150   EVT Ty = Op.getValueType();
3151   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3152   int64_t Offset = N->getOffset();
3153   MVT XLenVT = Subtarget.getXLenVT();
3154 
3155   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
3156 
3157   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
3158       CallingConv::GHC)
3159     report_fatal_error("In GHC calling convention TLS is not supported");
3160 
3161   SDValue Addr;
3162   switch (Model) {
3163   case TLSModel::LocalExec:
3164     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
3165     break;
3166   case TLSModel::InitialExec:
3167     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
3168     break;
3169   case TLSModel::LocalDynamic:
3170   case TLSModel::GeneralDynamic:
3171     Addr = getDynamicTLSAddr(N, DAG);
3172     break;
3173   }
3174 
3175   // In order to maximise the opportunity for common subexpression elimination,
3176   // emit a separate ADD node for the global address offset instead of folding
3177   // it in the global address node. Later peephole optimisations may choose to
3178   // fold it back in when profitable.
3179   if (Offset != 0)
3180     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3181                        DAG.getConstant(Offset, DL, XLenVT));
3182   return Addr;
3183 }
3184 
3185 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3186   SDValue CondV = Op.getOperand(0);
3187   SDValue TrueV = Op.getOperand(1);
3188   SDValue FalseV = Op.getOperand(2);
3189   SDLoc DL(Op);
3190   MVT VT = Op.getSimpleValueType();
3191   MVT XLenVT = Subtarget.getXLenVT();
3192 
3193   // Lower vector SELECTs to VSELECTs by splatting the condition.
3194   if (VT.isVector()) {
3195     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
3196     SDValue CondSplat = VT.isScalableVector()
3197                             ? DAG.getSplatVector(SplatCondVT, DL, CondV)
3198                             : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
3199     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
3200   }
3201 
3202   // If the result type is XLenVT and CondV is the output of a SETCC node
3203   // which also operated on XLenVT inputs, then merge the SETCC node into the
3204   // lowered RISCVISD::SELECT_CC to take advantage of the integer
3205   // compare+branch instructions. i.e.:
3206   // (select (setcc lhs, rhs, cc), truev, falsev)
3207   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
3208   if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
3209       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
3210     SDValue LHS = CondV.getOperand(0);
3211     SDValue RHS = CondV.getOperand(1);
3212     const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
3213     ISD::CondCode CCVal = CC->get();
3214 
3215     // Special case for a select of 2 constants that have a diffence of 1.
3216     // Normally this is done by DAGCombine, but if the select is introduced by
3217     // type legalization or op legalization, we miss it. Restricting to SETLT
3218     // case for now because that is what signed saturating add/sub need.
3219     // FIXME: We don't need the condition to be SETLT or even a SETCC,
3220     // but we would probably want to swap the true/false values if the condition
3221     // is SETGE/SETLE to avoid an XORI.
3222     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
3223         CCVal == ISD::SETLT) {
3224       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
3225       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
3226       if (TrueVal - 1 == FalseVal)
3227         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
3228       if (TrueVal + 1 == FalseVal)
3229         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
3230     }
3231 
3232     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3233 
3234     SDValue TargetCC = DAG.getCondCode(CCVal);
3235     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
3236     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3237   }
3238 
3239   // Otherwise:
3240   // (select condv, truev, falsev)
3241   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
3242   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3243   SDValue SetNE = DAG.getCondCode(ISD::SETNE);
3244 
3245   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
3246 
3247   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3248 }
3249 
3250 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
3251   SDValue CondV = Op.getOperand(1);
3252   SDLoc DL(Op);
3253   MVT XLenVT = Subtarget.getXLenVT();
3254 
3255   if (CondV.getOpcode() == ISD::SETCC &&
3256       CondV.getOperand(0).getValueType() == XLenVT) {
3257     SDValue LHS = CondV.getOperand(0);
3258     SDValue RHS = CondV.getOperand(1);
3259     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
3260 
3261     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3262 
3263     SDValue TargetCC = DAG.getCondCode(CCVal);
3264     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3265                        LHS, RHS, TargetCC, Op.getOperand(2));
3266   }
3267 
3268   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3269                      CondV, DAG.getConstant(0, DL, XLenVT),
3270                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
3271 }
3272 
3273 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3274   MachineFunction &MF = DAG.getMachineFunction();
3275   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
3276 
3277   SDLoc DL(Op);
3278   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3279                                  getPointerTy(MF.getDataLayout()));
3280 
3281   // vastart just stores the address of the VarArgsFrameIndex slot into the
3282   // memory location argument.
3283   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3284   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
3285                       MachinePointerInfo(SV));
3286 }
3287 
3288 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
3289                                             SelectionDAG &DAG) const {
3290   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3291   MachineFunction &MF = DAG.getMachineFunction();
3292   MachineFrameInfo &MFI = MF.getFrameInfo();
3293   MFI.setFrameAddressIsTaken(true);
3294   Register FrameReg = RI.getFrameRegister(MF);
3295   int XLenInBytes = Subtarget.getXLen() / 8;
3296 
3297   EVT VT = Op.getValueType();
3298   SDLoc DL(Op);
3299   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
3300   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3301   while (Depth--) {
3302     int Offset = -(XLenInBytes * 2);
3303     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
3304                               DAG.getIntPtrConstant(Offset, DL));
3305     FrameAddr =
3306         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
3307   }
3308   return FrameAddr;
3309 }
3310 
3311 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
3312                                              SelectionDAG &DAG) const {
3313   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3314   MachineFunction &MF = DAG.getMachineFunction();
3315   MachineFrameInfo &MFI = MF.getFrameInfo();
3316   MFI.setReturnAddressIsTaken(true);
3317   MVT XLenVT = Subtarget.getXLenVT();
3318   int XLenInBytes = Subtarget.getXLen() / 8;
3319 
3320   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
3321     return SDValue();
3322 
3323   EVT VT = Op.getValueType();
3324   SDLoc DL(Op);
3325   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3326   if (Depth) {
3327     int Off = -XLenInBytes;
3328     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
3329     SDValue Offset = DAG.getConstant(Off, DL, VT);
3330     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
3331                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
3332                        MachinePointerInfo());
3333   }
3334 
3335   // Return the value of the return address register, marking it an implicit
3336   // live-in.
3337   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
3338   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
3339 }
3340 
3341 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
3342                                                  SelectionDAG &DAG) const {
3343   SDLoc DL(Op);
3344   SDValue Lo = Op.getOperand(0);
3345   SDValue Hi = Op.getOperand(1);
3346   SDValue Shamt = Op.getOperand(2);
3347   EVT VT = Lo.getValueType();
3348 
3349   // if Shamt-XLEN < 0: // Shamt < XLEN
3350   //   Lo = Lo << Shamt
3351   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
3352   // else:
3353   //   Lo = 0
3354   //   Hi = Lo << (Shamt-XLEN)
3355 
3356   SDValue Zero = DAG.getConstant(0, DL, VT);
3357   SDValue One = DAG.getConstant(1, DL, VT);
3358   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3359   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3360   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3361   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
3362 
3363   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
3364   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
3365   SDValue ShiftRightLo =
3366       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
3367   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
3368   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
3369   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
3370 
3371   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3372 
3373   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
3374   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3375 
3376   SDValue Parts[2] = {Lo, Hi};
3377   return DAG.getMergeValues(Parts, DL);
3378 }
3379 
3380 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
3381                                                   bool IsSRA) const {
3382   SDLoc DL(Op);
3383   SDValue Lo = Op.getOperand(0);
3384   SDValue Hi = Op.getOperand(1);
3385   SDValue Shamt = Op.getOperand(2);
3386   EVT VT = Lo.getValueType();
3387 
3388   // SRA expansion:
3389   //   if Shamt-XLEN < 0: // Shamt < XLEN
3390   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
3391   //     Hi = Hi >>s Shamt
3392   //   else:
3393   //     Lo = Hi >>s (Shamt-XLEN);
3394   //     Hi = Hi >>s (XLEN-1)
3395   //
3396   // SRL expansion:
3397   //   if Shamt-XLEN < 0: // Shamt < XLEN
3398   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
3399   //     Hi = Hi >>u Shamt
3400   //   else:
3401   //     Lo = Hi >>u (Shamt-XLEN);
3402   //     Hi = 0;
3403 
3404   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
3405 
3406   SDValue Zero = DAG.getConstant(0, DL, VT);
3407   SDValue One = DAG.getConstant(1, DL, VT);
3408   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3409   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3410   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3411   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
3412 
3413   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
3414   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
3415   SDValue ShiftLeftHi =
3416       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
3417   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
3418   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
3419   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
3420   SDValue HiFalse =
3421       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
3422 
3423   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3424 
3425   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
3426   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3427 
3428   SDValue Parts[2] = {Lo, Hi};
3429   return DAG.getMergeValues(Parts, DL);
3430 }
3431 
3432 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
3433 // legal equivalently-sized i8 type, so we can use that as a go-between.
3434 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
3435                                                   SelectionDAG &DAG) const {
3436   SDLoc DL(Op);
3437   MVT VT = Op.getSimpleValueType();
3438   SDValue SplatVal = Op.getOperand(0);
3439   // All-zeros or all-ones splats are handled specially.
3440   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
3441     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
3442     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
3443   }
3444   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
3445     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
3446     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
3447   }
3448   MVT XLenVT = Subtarget.getXLenVT();
3449   assert(SplatVal.getValueType() == XLenVT &&
3450          "Unexpected type for i1 splat value");
3451   MVT InterVT = VT.changeVectorElementType(MVT::i8);
3452   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
3453                          DAG.getConstant(1, DL, XLenVT));
3454   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
3455   SDValue Zero = DAG.getConstant(0, DL, InterVT);
3456   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
3457 }
3458 
3459 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
3460 // illegal (currently only vXi64 RV32).
3461 // FIXME: We could also catch non-constant sign-extended i32 values and lower
3462 // them to SPLAT_VECTOR_I64
3463 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
3464                                                      SelectionDAG &DAG) const {
3465   SDLoc DL(Op);
3466   MVT VecVT = Op.getSimpleValueType();
3467   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
3468          "Unexpected SPLAT_VECTOR_PARTS lowering");
3469 
3470   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
3471   SDValue Lo = Op.getOperand(0);
3472   SDValue Hi = Op.getOperand(1);
3473 
3474   if (VecVT.isFixedLengthVector()) {
3475     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
3476     SDLoc DL(Op);
3477     SDValue Mask, VL;
3478     std::tie(Mask, VL) =
3479         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3480 
3481     SDValue Res = splatPartsI64WithVL(DL, ContainerVT, Lo, Hi, VL, DAG);
3482     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
3483   }
3484 
3485   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
3486     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
3487     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
3488     // If Hi constant is all the same sign bit as Lo, lower this as a custom
3489     // node in order to try and match RVV vector/scalar instructions.
3490     if ((LoC >> 31) == HiC)
3491       return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
3492   }
3493 
3494   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
3495   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
3496       isa<ConstantSDNode>(Hi.getOperand(1)) &&
3497       Hi.getConstantOperandVal(1) == 31)
3498     return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
3499 
3500   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
3501   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT, Lo, Hi,
3502                      DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, MVT::i64));
3503 }
3504 
3505 // Custom-lower extensions from mask vectors by using a vselect either with 1
3506 // for zero/any-extension or -1 for sign-extension:
3507 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
3508 // Note that any-extension is lowered identically to zero-extension.
3509 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
3510                                                 int64_t ExtTrueVal) const {
3511   SDLoc DL(Op);
3512   MVT VecVT = Op.getSimpleValueType();
3513   SDValue Src = Op.getOperand(0);
3514   // Only custom-lower extensions from mask types
3515   assert(Src.getValueType().isVector() &&
3516          Src.getValueType().getVectorElementType() == MVT::i1);
3517 
3518   MVT XLenVT = Subtarget.getXLenVT();
3519   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
3520   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
3521 
3522   if (VecVT.isScalableVector()) {
3523     // Be careful not to introduce illegal scalar types at this stage, and be
3524     // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
3525     // illegal and must be expanded. Since we know that the constants are
3526     // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly.
3527     bool IsRV32E64 =
3528         !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
3529 
3530     if (!IsRV32E64) {
3531       SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
3532       SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
3533     } else {
3534       SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero);
3535       SplatTrueVal =
3536           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal);
3537     }
3538 
3539     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
3540   }
3541 
3542   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
3543   MVT I1ContainerVT =
3544       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3545 
3546   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
3547 
3548   SDValue Mask, VL;
3549   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3550 
3551   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL);
3552   SplatTrueVal =
3553       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL);
3554   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
3555                                SplatTrueVal, SplatZero, VL);
3556 
3557   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
3558 }
3559 
3560 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
3561     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
3562   MVT ExtVT = Op.getSimpleValueType();
3563   // Only custom-lower extensions from fixed-length vector types.
3564   if (!ExtVT.isFixedLengthVector())
3565     return Op;
3566   MVT VT = Op.getOperand(0).getSimpleValueType();
3567   // Grab the canonical container type for the extended type. Infer the smaller
3568   // type from that to ensure the same number of vector elements, as we know
3569   // the LMUL will be sufficient to hold the smaller type.
3570   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
3571   // Get the extended container type manually to ensure the same number of
3572   // vector elements between source and dest.
3573   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
3574                                      ContainerExtVT.getVectorElementCount());
3575 
3576   SDValue Op1 =
3577       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
3578 
3579   SDLoc DL(Op);
3580   SDValue Mask, VL;
3581   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3582 
3583   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
3584 
3585   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
3586 }
3587 
3588 // Custom-lower truncations from vectors to mask vectors by using a mask and a
3589 // setcc operation:
3590 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
3591 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
3592                                                   SelectionDAG &DAG) const {
3593   SDLoc DL(Op);
3594   EVT MaskVT = Op.getValueType();
3595   // Only expect to custom-lower truncations to mask types
3596   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
3597          "Unexpected type for vector mask lowering");
3598   SDValue Src = Op.getOperand(0);
3599   MVT VecVT = Src.getSimpleValueType();
3600 
3601   // If this is a fixed vector, we need to convert it to a scalable vector.
3602   MVT ContainerVT = VecVT;
3603   if (VecVT.isFixedLengthVector()) {
3604     ContainerVT = getContainerForFixedLengthVector(VecVT);
3605     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
3606   }
3607 
3608   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
3609   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
3610 
3611   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne);
3612   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero);
3613 
3614   if (VecVT.isScalableVector()) {
3615     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
3616     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
3617   }
3618 
3619   SDValue Mask, VL;
3620   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3621 
3622   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
3623   SDValue Trunc =
3624       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
3625   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
3626                       DAG.getCondCode(ISD::SETNE), Mask, VL);
3627   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
3628 }
3629 
3630 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
3631 // first position of a vector, and that vector is slid up to the insert index.
3632 // By limiting the active vector length to index+1 and merging with the
3633 // original vector (with an undisturbed tail policy for elements >= VL), we
3634 // achieve the desired result of leaving all elements untouched except the one
3635 // at VL-1, which is replaced with the desired value.
3636 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
3637                                                     SelectionDAG &DAG) const {
3638   SDLoc DL(Op);
3639   MVT VecVT = Op.getSimpleValueType();
3640   SDValue Vec = Op.getOperand(0);
3641   SDValue Val = Op.getOperand(1);
3642   SDValue Idx = Op.getOperand(2);
3643 
3644   if (VecVT.getVectorElementType() == MVT::i1) {
3645     // FIXME: For now we just promote to an i8 vector and insert into that,
3646     // but this is probably not optimal.
3647     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3648     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3649     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
3650     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
3651   }
3652 
3653   MVT ContainerVT = VecVT;
3654   // If the operand is a fixed-length vector, convert to a scalable one.
3655   if (VecVT.isFixedLengthVector()) {
3656     ContainerVT = getContainerForFixedLengthVector(VecVT);
3657     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3658   }
3659 
3660   MVT XLenVT = Subtarget.getXLenVT();
3661 
3662   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3663   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
3664   // Even i64-element vectors on RV32 can be lowered without scalar
3665   // legalization if the most-significant 32 bits of the value are not affected
3666   // by the sign-extension of the lower 32 bits.
3667   // TODO: We could also catch sign extensions of a 32-bit value.
3668   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
3669     const auto *CVal = cast<ConstantSDNode>(Val);
3670     if (isInt<32>(CVal->getSExtValue())) {
3671       IsLegalInsert = true;
3672       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3673     }
3674   }
3675 
3676   SDValue Mask, VL;
3677   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3678 
3679   SDValue ValInVec;
3680 
3681   if (IsLegalInsert) {
3682     unsigned Opc =
3683         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
3684     if (isNullConstant(Idx)) {
3685       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
3686       if (!VecVT.isFixedLengthVector())
3687         return Vec;
3688       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
3689     }
3690     ValInVec =
3691         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
3692   } else {
3693     // On RV32, i64-element vectors must be specially handled to place the
3694     // value at element 0, by using two vslide1up instructions in sequence on
3695     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
3696     // this.
3697     SDValue One = DAG.getConstant(1, DL, XLenVT);
3698     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
3699     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
3700     MVT I32ContainerVT =
3701         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
3702     SDValue I32Mask =
3703         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
3704     // Limit the active VL to two.
3705     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
3706     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
3707     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
3708     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero,
3709                            InsertI64VL);
3710     // First slide in the hi value, then the lo in underneath it.
3711     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3712                            ValHi, I32Mask, InsertI64VL);
3713     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3714                            ValLo, I32Mask, InsertI64VL);
3715     // Bitcast back to the right container type.
3716     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
3717   }
3718 
3719   // Now that the value is in a vector, slide it into position.
3720   SDValue InsertVL =
3721       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
3722   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
3723                                 ValInVec, Idx, Mask, InsertVL);
3724   if (!VecVT.isFixedLengthVector())
3725     return Slideup;
3726   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
3727 }
3728 
3729 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
3730 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
3731 // types this is done using VMV_X_S to allow us to glean information about the
3732 // sign bits of the result.
3733 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
3734                                                      SelectionDAG &DAG) const {
3735   SDLoc DL(Op);
3736   SDValue Idx = Op.getOperand(1);
3737   SDValue Vec = Op.getOperand(0);
3738   EVT EltVT = Op.getValueType();
3739   MVT VecVT = Vec.getSimpleValueType();
3740   MVT XLenVT = Subtarget.getXLenVT();
3741 
3742   if (VecVT.getVectorElementType() == MVT::i1) {
3743     // FIXME: For now we just promote to an i8 vector and extract from that,
3744     // but this is probably not optimal.
3745     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3746     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3747     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
3748   }
3749 
3750   // If this is a fixed vector, we need to convert it to a scalable vector.
3751   MVT ContainerVT = VecVT;
3752   if (VecVT.isFixedLengthVector()) {
3753     ContainerVT = getContainerForFixedLengthVector(VecVT);
3754     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3755   }
3756 
3757   // If the index is 0, the vector is already in the right position.
3758   if (!isNullConstant(Idx)) {
3759     // Use a VL of 1 to avoid processing more elements than we need.
3760     SDValue VL = DAG.getConstant(1, DL, XLenVT);
3761     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3762     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3763     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
3764                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
3765   }
3766 
3767   if (!EltVT.isInteger()) {
3768     // Floating-point extracts are handled in TableGen.
3769     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
3770                        DAG.getConstant(0, DL, XLenVT));
3771   }
3772 
3773   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
3774   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
3775 }
3776 
3777 // Some RVV intrinsics may claim that they want an integer operand to be
3778 // promoted or expanded.
3779 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
3780                                           const RISCVSubtarget &Subtarget) {
3781   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
3782           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
3783          "Unexpected opcode");
3784 
3785   if (!Subtarget.hasVInstructions())
3786     return SDValue();
3787 
3788   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
3789   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
3790   SDLoc DL(Op);
3791 
3792   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
3793       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
3794   if (!II || !II->SplatOperand)
3795     return SDValue();
3796 
3797   unsigned SplatOp = II->SplatOperand + HasChain;
3798   assert(SplatOp < Op.getNumOperands());
3799 
3800   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
3801   SDValue &ScalarOp = Operands[SplatOp];
3802   MVT OpVT = ScalarOp.getSimpleValueType();
3803   MVT XLenVT = Subtarget.getXLenVT();
3804 
3805   // If this isn't a scalar, or its type is XLenVT we're done.
3806   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
3807     return SDValue();
3808 
3809   // Simplest case is that the operand needs to be promoted to XLenVT.
3810   if (OpVT.bitsLT(XLenVT)) {
3811     // If the operand is a constant, sign extend to increase our chances
3812     // of being able to use a .vi instruction. ANY_EXTEND would become a
3813     // a zero extend and the simm5 check in isel would fail.
3814     // FIXME: Should we ignore the upper bits in isel instead?
3815     unsigned ExtOpc =
3816         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
3817     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
3818     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3819   }
3820 
3821   // Use the previous operand to get the vXi64 VT. The result might be a mask
3822   // VT for compares. Using the previous operand assumes that the previous
3823   // operand will never have a smaller element size than a scalar operand and
3824   // that a widening operation never uses SEW=64.
3825   // NOTE: If this fails the below assert, we can probably just find the
3826   // element count from any operand or result and use it to construct the VT.
3827   assert(II->SplatOperand > 1 && "Unexpected splat operand!");
3828   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
3829 
3830   // The more complex case is when the scalar is larger than XLenVT.
3831   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
3832          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
3833 
3834   // If this is a sign-extended 32-bit constant, we can truncate it and rely
3835   // on the instruction to sign-extend since SEW>XLEN.
3836   if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) {
3837     if (isInt<32>(CVal->getSExtValue())) {
3838       ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3839       return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3840     }
3841   }
3842 
3843   // We need to convert the scalar to a splat vector.
3844   // FIXME: Can we implicitly truncate the scalar if it is known to
3845   // be sign extended?
3846   // VL should be the last operand.
3847   SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
3848   assert(VL.getValueType() == XLenVT);
3849   ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG);
3850   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3851 }
3852 
3853 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
3854                                                      SelectionDAG &DAG) const {
3855   unsigned IntNo = Op.getConstantOperandVal(0);
3856   SDLoc DL(Op);
3857   MVT XLenVT = Subtarget.getXLenVT();
3858 
3859   switch (IntNo) {
3860   default:
3861     break; // Don't custom lower most intrinsics.
3862   case Intrinsic::thread_pointer: {
3863     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3864     return DAG.getRegister(RISCV::X4, PtrVT);
3865   }
3866   case Intrinsic::riscv_orc_b:
3867     // Lower to the GORCI encoding for orc.b.
3868     return DAG.getNode(RISCVISD::GORC, DL, XLenVT, Op.getOperand(1),
3869                        DAG.getConstant(7, DL, XLenVT));
3870   case Intrinsic::riscv_grev:
3871   case Intrinsic::riscv_gorc: {
3872     unsigned Opc =
3873         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
3874     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3875   }
3876   case Intrinsic::riscv_shfl:
3877   case Intrinsic::riscv_unshfl: {
3878     unsigned Opc =
3879         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
3880     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3881   }
3882   case Intrinsic::riscv_bcompress:
3883   case Intrinsic::riscv_bdecompress: {
3884     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
3885                                                        : RISCVISD::BDECOMPRESS;
3886     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3887   }
3888   case Intrinsic::riscv_vmv_x_s:
3889     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
3890     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
3891                        Op.getOperand(1));
3892   case Intrinsic::riscv_vmv_v_x:
3893     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
3894                             Op.getSimpleValueType(), DL, DAG, Subtarget);
3895   case Intrinsic::riscv_vfmv_v_f:
3896     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
3897                        Op.getOperand(1), Op.getOperand(2));
3898   case Intrinsic::riscv_vmv_s_x: {
3899     SDValue Scalar = Op.getOperand(2);
3900 
3901     if (Scalar.getValueType().bitsLE(XLenVT)) {
3902       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
3903       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
3904                          Op.getOperand(1), Scalar, Op.getOperand(3));
3905     }
3906 
3907     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
3908 
3909     // This is an i64 value that lives in two scalar registers. We have to
3910     // insert this in a convoluted way. First we build vXi64 splat containing
3911     // the/ two values that we assemble using some bit math. Next we'll use
3912     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
3913     // to merge element 0 from our splat into the source vector.
3914     // FIXME: This is probably not the best way to do this, but it is
3915     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
3916     // point.
3917     //   sw lo, (a0)
3918     //   sw hi, 4(a0)
3919     //   vlse vX, (a0)
3920     //
3921     //   vid.v      vVid
3922     //   vmseq.vx   mMask, vVid, 0
3923     //   vmerge.vvm vDest, vSrc, vVal, mMask
3924     MVT VT = Op.getSimpleValueType();
3925     SDValue Vec = Op.getOperand(1);
3926     SDValue VL = Op.getOperand(3);
3927 
3928     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
3929     SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
3930                                       DAG.getConstant(0, DL, MVT::i32), VL);
3931 
3932     MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
3933     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3934     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
3935     SDValue SelectCond =
3936         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
3937                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
3938     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
3939                        Vec, VL);
3940   }
3941   case Intrinsic::riscv_vslide1up:
3942   case Intrinsic::riscv_vslide1down:
3943   case Intrinsic::riscv_vslide1up_mask:
3944   case Intrinsic::riscv_vslide1down_mask: {
3945     // We need to special case these when the scalar is larger than XLen.
3946     unsigned NumOps = Op.getNumOperands();
3947     bool IsMasked = NumOps == 7;
3948     unsigned OpOffset = IsMasked ? 1 : 0;
3949     SDValue Scalar = Op.getOperand(2 + OpOffset);
3950     if (Scalar.getValueType().bitsLE(XLenVT))
3951       break;
3952 
3953     // Splatting a sign extended constant is fine.
3954     if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar))
3955       if (isInt<32>(CVal->getSExtValue()))
3956         break;
3957 
3958     MVT VT = Op.getSimpleValueType();
3959     assert(VT.getVectorElementType() == MVT::i64 &&
3960            Scalar.getValueType() == MVT::i64 && "Unexpected VTs");
3961 
3962     // Convert the vector source to the equivalent nxvXi32 vector.
3963     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
3964     SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset));
3965 
3966     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3967                                    DAG.getConstant(0, DL, XLenVT));
3968     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3969                                    DAG.getConstant(1, DL, XLenVT));
3970 
3971     // Double the VL since we halved SEW.
3972     SDValue VL = Op.getOperand(NumOps - (1 + OpOffset));
3973     SDValue I32VL =
3974         DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
3975 
3976     MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount());
3977     SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL);
3978 
3979     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
3980     // instructions.
3981     if (IntNo == Intrinsic::riscv_vslide1up ||
3982         IntNo == Intrinsic::riscv_vslide1up_mask) {
3983       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi,
3984                         I32Mask, I32VL);
3985       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo,
3986                         I32Mask, I32VL);
3987     } else {
3988       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo,
3989                         I32Mask, I32VL);
3990       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi,
3991                         I32Mask, I32VL);
3992     }
3993 
3994     // Convert back to nxvXi64.
3995     Vec = DAG.getBitcast(VT, Vec);
3996 
3997     if (!IsMasked)
3998       return Vec;
3999 
4000     // Apply mask after the operation.
4001     SDValue Mask = Op.getOperand(NumOps - 3);
4002     SDValue MaskedOff = Op.getOperand(1);
4003     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL);
4004   }
4005   }
4006 
4007   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
4008 }
4009 
4010 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4011                                                     SelectionDAG &DAG) const {
4012   unsigned IntNo = Op.getConstantOperandVal(1);
4013   switch (IntNo) {
4014   default:
4015     break;
4016   case Intrinsic::riscv_masked_strided_load: {
4017     SDLoc DL(Op);
4018     MVT XLenVT = Subtarget.getXLenVT();
4019 
4020     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4021     // the selection of the masked intrinsics doesn't do this for us.
4022     SDValue Mask = Op.getOperand(5);
4023     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4024 
4025     MVT VT = Op->getSimpleValueType(0);
4026     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4027 
4028     SDValue PassThru = Op.getOperand(2);
4029     if (!IsUnmasked) {
4030       MVT MaskVT =
4031           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4032       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4033       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4034     }
4035 
4036     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4037 
4038     SDValue IntID = DAG.getTargetConstant(
4039         IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL,
4040         XLenVT);
4041 
4042     auto *Load = cast<MemIntrinsicSDNode>(Op);
4043     SmallVector<SDValue, 8> Ops{Load->getChain(), IntID};
4044     if (!IsUnmasked)
4045       Ops.push_back(PassThru);
4046     Ops.push_back(Op.getOperand(3)); // Ptr
4047     Ops.push_back(Op.getOperand(4)); // Stride
4048     if (!IsUnmasked)
4049       Ops.push_back(Mask);
4050     Ops.push_back(VL);
4051     if (!IsUnmasked) {
4052       SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
4053       Ops.push_back(Policy);
4054     }
4055 
4056     SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4057     SDValue Result =
4058         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4059                                 Load->getMemoryVT(), Load->getMemOperand());
4060     SDValue Chain = Result.getValue(1);
4061     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4062     return DAG.getMergeValues({Result, Chain}, DL);
4063   }
4064   }
4065 
4066   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
4067 }
4068 
4069 SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
4070                                                  SelectionDAG &DAG) const {
4071   unsigned IntNo = Op.getConstantOperandVal(1);
4072   switch (IntNo) {
4073   default:
4074     break;
4075   case Intrinsic::riscv_masked_strided_store: {
4076     SDLoc DL(Op);
4077     MVT XLenVT = Subtarget.getXLenVT();
4078 
4079     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4080     // the selection of the masked intrinsics doesn't do this for us.
4081     SDValue Mask = Op.getOperand(5);
4082     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4083 
4084     SDValue Val = Op.getOperand(2);
4085     MVT VT = Val.getSimpleValueType();
4086     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4087 
4088     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4089     if (!IsUnmasked) {
4090       MVT MaskVT =
4091           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4092       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4093     }
4094 
4095     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4096 
4097     SDValue IntID = DAG.getTargetConstant(
4098         IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL,
4099         XLenVT);
4100 
4101     auto *Store = cast<MemIntrinsicSDNode>(Op);
4102     SmallVector<SDValue, 8> Ops{Store->getChain(), IntID};
4103     Ops.push_back(Val);
4104     Ops.push_back(Op.getOperand(3)); // Ptr
4105     Ops.push_back(Op.getOperand(4)); // Stride
4106     if (!IsUnmasked)
4107       Ops.push_back(Mask);
4108     Ops.push_back(VL);
4109 
4110     return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Store->getVTList(),
4111                                    Ops, Store->getMemoryVT(),
4112                                    Store->getMemOperand());
4113   }
4114   }
4115 
4116   return SDValue();
4117 }
4118 
4119 static MVT getLMUL1VT(MVT VT) {
4120   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
4121          "Unexpected vector MVT");
4122   return MVT::getScalableVectorVT(
4123       VT.getVectorElementType(),
4124       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
4125 }
4126 
4127 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
4128   switch (ISDOpcode) {
4129   default:
4130     llvm_unreachable("Unhandled reduction");
4131   case ISD::VECREDUCE_ADD:
4132     return RISCVISD::VECREDUCE_ADD_VL;
4133   case ISD::VECREDUCE_UMAX:
4134     return RISCVISD::VECREDUCE_UMAX_VL;
4135   case ISD::VECREDUCE_SMAX:
4136     return RISCVISD::VECREDUCE_SMAX_VL;
4137   case ISD::VECREDUCE_UMIN:
4138     return RISCVISD::VECREDUCE_UMIN_VL;
4139   case ISD::VECREDUCE_SMIN:
4140     return RISCVISD::VECREDUCE_SMIN_VL;
4141   case ISD::VECREDUCE_AND:
4142     return RISCVISD::VECREDUCE_AND_VL;
4143   case ISD::VECREDUCE_OR:
4144     return RISCVISD::VECREDUCE_OR_VL;
4145   case ISD::VECREDUCE_XOR:
4146     return RISCVISD::VECREDUCE_XOR_VL;
4147   }
4148 }
4149 
4150 SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
4151                                                          SelectionDAG &DAG,
4152                                                          bool IsVP) const {
4153   SDLoc DL(Op);
4154   SDValue Vec = Op.getOperand(IsVP ? 1 : 0);
4155   MVT VecVT = Vec.getSimpleValueType();
4156   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
4157           Op.getOpcode() == ISD::VECREDUCE_OR ||
4158           Op.getOpcode() == ISD::VECREDUCE_XOR ||
4159           Op.getOpcode() == ISD::VP_REDUCE_AND ||
4160           Op.getOpcode() == ISD::VP_REDUCE_OR ||
4161           Op.getOpcode() == ISD::VP_REDUCE_XOR) &&
4162          "Unexpected reduction lowering");
4163 
4164   MVT XLenVT = Subtarget.getXLenVT();
4165   assert(Op.getValueType() == XLenVT &&
4166          "Expected reduction output to be legalized to XLenVT");
4167 
4168   MVT ContainerVT = VecVT;
4169   if (VecVT.isFixedLengthVector()) {
4170     ContainerVT = getContainerForFixedLengthVector(VecVT);
4171     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4172   }
4173 
4174   SDValue Mask, VL;
4175   if (IsVP) {
4176     Mask = Op.getOperand(2);
4177     VL = Op.getOperand(3);
4178   } else {
4179     std::tie(Mask, VL) =
4180         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4181   }
4182 
4183   unsigned BaseOpc;
4184   ISD::CondCode CC;
4185   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
4186 
4187   switch (Op.getOpcode()) {
4188   default:
4189     llvm_unreachable("Unhandled reduction");
4190   case ISD::VECREDUCE_AND:
4191   case ISD::VP_REDUCE_AND: {
4192     // vpopc ~x == 0
4193     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
4194     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL);
4195     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
4196     CC = ISD::SETEQ;
4197     BaseOpc = ISD::AND;
4198     break;
4199   }
4200   case ISD::VECREDUCE_OR:
4201   case ISD::VP_REDUCE_OR:
4202     // vpopc x != 0
4203     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
4204     CC = ISD::SETNE;
4205     BaseOpc = ISD::OR;
4206     break;
4207   case ISD::VECREDUCE_XOR:
4208   case ISD::VP_REDUCE_XOR: {
4209     // ((vpopc x) & 1) != 0
4210     SDValue One = DAG.getConstant(1, DL, XLenVT);
4211     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
4212     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
4213     CC = ISD::SETNE;
4214     BaseOpc = ISD::XOR;
4215     break;
4216   }
4217   }
4218 
4219   SDValue SetCC = DAG.getSetCC(DL, XLenVT, Vec, Zero, CC);
4220 
4221   if (!IsVP)
4222     return SetCC;
4223 
4224   // Now include the start value in the operation.
4225   // Note that we must return the start value when no elements are operated
4226   // upon. The vpopc instructions we've emitted in each case above will return
4227   // 0 for an inactive vector, and so we've already received the neutral value:
4228   // AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we
4229   // can simply include the start value.
4230   return DAG.getNode(BaseOpc, DL, XLenVT, SetCC, Op.getOperand(0));
4231 }
4232 
4233 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
4234                                             SelectionDAG &DAG) const {
4235   SDLoc DL(Op);
4236   SDValue Vec = Op.getOperand(0);
4237   EVT VecEVT = Vec.getValueType();
4238 
4239   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
4240 
4241   // Due to ordering in legalize types we may have a vector type that needs to
4242   // be split. Do that manually so we can get down to a legal type.
4243   while (getTypeAction(*DAG.getContext(), VecEVT) ==
4244          TargetLowering::TypeSplitVector) {
4245     SDValue Lo, Hi;
4246     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
4247     VecEVT = Lo.getValueType();
4248     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
4249   }
4250 
4251   // TODO: The type may need to be widened rather than split. Or widened before
4252   // it can be split.
4253   if (!isTypeLegal(VecEVT))
4254     return SDValue();
4255 
4256   MVT VecVT = VecEVT.getSimpleVT();
4257   MVT VecEltVT = VecVT.getVectorElementType();
4258   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
4259 
4260   MVT ContainerVT = VecVT;
4261   if (VecVT.isFixedLengthVector()) {
4262     ContainerVT = getContainerForFixedLengthVector(VecVT);
4263     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4264   }
4265 
4266   MVT M1VT = getLMUL1VT(ContainerVT);
4267 
4268   SDValue Mask, VL;
4269   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4270 
4271   // FIXME: This is a VLMAX splat which might be too large and can prevent
4272   // vsetvli removal.
4273   SDValue NeutralElem =
4274       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
4275   SDValue IdentitySplat = DAG.getSplatVector(M1VT, DL, NeutralElem);
4276   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT), Vec,
4277                                   IdentitySplat, Mask, VL);
4278   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
4279                              DAG.getConstant(0, DL, Subtarget.getXLenVT()));
4280   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
4281 }
4282 
4283 // Given a reduction op, this function returns the matching reduction opcode,
4284 // the vector SDValue and the scalar SDValue required to lower this to a
4285 // RISCVISD node.
4286 static std::tuple<unsigned, SDValue, SDValue>
4287 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
4288   SDLoc DL(Op);
4289   auto Flags = Op->getFlags();
4290   unsigned Opcode = Op.getOpcode();
4291   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
4292   switch (Opcode) {
4293   default:
4294     llvm_unreachable("Unhandled reduction");
4295   case ISD::VECREDUCE_FADD:
4296     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0),
4297                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
4298   case ISD::VECREDUCE_SEQ_FADD:
4299     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
4300                            Op.getOperand(0));
4301   case ISD::VECREDUCE_FMIN:
4302     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
4303                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
4304   case ISD::VECREDUCE_FMAX:
4305     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
4306                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
4307   }
4308 }
4309 
4310 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
4311                                               SelectionDAG &DAG) const {
4312   SDLoc DL(Op);
4313   MVT VecEltVT = Op.getSimpleValueType();
4314 
4315   unsigned RVVOpcode;
4316   SDValue VectorVal, ScalarVal;
4317   std::tie(RVVOpcode, VectorVal, ScalarVal) =
4318       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
4319   MVT VecVT = VectorVal.getSimpleValueType();
4320 
4321   MVT ContainerVT = VecVT;
4322   if (VecVT.isFixedLengthVector()) {
4323     ContainerVT = getContainerForFixedLengthVector(VecVT);
4324     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
4325   }
4326 
4327   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
4328 
4329   SDValue Mask, VL;
4330   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4331 
4332   // FIXME: This is a VLMAX splat which might be too large and can prevent
4333   // vsetvli removal.
4334   SDValue ScalarSplat = DAG.getSplatVector(M1VT, DL, ScalarVal);
4335   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT),
4336                                   VectorVal, ScalarSplat, Mask, VL);
4337   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
4338                      DAG.getConstant(0, DL, Subtarget.getXLenVT()));
4339 }
4340 
4341 static unsigned getRVVVPReductionOp(unsigned ISDOpcode) {
4342   switch (ISDOpcode) {
4343   default:
4344     llvm_unreachable("Unhandled reduction");
4345   case ISD::VP_REDUCE_ADD:
4346     return RISCVISD::VECREDUCE_ADD_VL;
4347   case ISD::VP_REDUCE_UMAX:
4348     return RISCVISD::VECREDUCE_UMAX_VL;
4349   case ISD::VP_REDUCE_SMAX:
4350     return RISCVISD::VECREDUCE_SMAX_VL;
4351   case ISD::VP_REDUCE_UMIN:
4352     return RISCVISD::VECREDUCE_UMIN_VL;
4353   case ISD::VP_REDUCE_SMIN:
4354     return RISCVISD::VECREDUCE_SMIN_VL;
4355   case ISD::VP_REDUCE_AND:
4356     return RISCVISD::VECREDUCE_AND_VL;
4357   case ISD::VP_REDUCE_OR:
4358     return RISCVISD::VECREDUCE_OR_VL;
4359   case ISD::VP_REDUCE_XOR:
4360     return RISCVISD::VECREDUCE_XOR_VL;
4361   case ISD::VP_REDUCE_FADD:
4362     return RISCVISD::VECREDUCE_FADD_VL;
4363   case ISD::VP_REDUCE_SEQ_FADD:
4364     return RISCVISD::VECREDUCE_SEQ_FADD_VL;
4365   case ISD::VP_REDUCE_FMAX:
4366     return RISCVISD::VECREDUCE_FMAX_VL;
4367   case ISD::VP_REDUCE_FMIN:
4368     return RISCVISD::VECREDUCE_FMIN_VL;
4369   }
4370 }
4371 
4372 SDValue RISCVTargetLowering::lowerVPREDUCE(SDValue Op,
4373                                            SelectionDAG &DAG) const {
4374   SDLoc DL(Op);
4375   SDValue Vec = Op.getOperand(1);
4376   EVT VecEVT = Vec.getValueType();
4377 
4378   // TODO: The type may need to be widened rather than split. Or widened before
4379   // it can be split.
4380   if (!isTypeLegal(VecEVT))
4381     return SDValue();
4382 
4383   MVT VecVT = VecEVT.getSimpleVT();
4384   MVT VecEltVT = VecVT.getVectorElementType();
4385   unsigned RVVOpcode = getRVVVPReductionOp(Op.getOpcode());
4386 
4387   MVT ContainerVT = VecVT;
4388   if (VecVT.isFixedLengthVector()) {
4389     ContainerVT = getContainerForFixedLengthVector(VecVT);
4390     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4391   }
4392 
4393   SDValue VL = Op.getOperand(3);
4394   SDValue Mask = Op.getOperand(2);
4395 
4396   MVT M1VT = getLMUL1VT(ContainerVT);
4397   MVT XLenVT = Subtarget.getXLenVT();
4398   MVT ResVT = !VecVT.isInteger() || VecEltVT.bitsGE(XLenVT) ? VecEltVT : XLenVT;
4399 
4400   // FIXME: This is a VLMAX splat which might be too large and can prevent
4401   // vsetvli removal.
4402   SDValue StartSplat = DAG.getSplatVector(M1VT, DL, Op.getOperand(0));
4403   SDValue Reduction =
4404       DAG.getNode(RVVOpcode, DL, M1VT, StartSplat, Vec, StartSplat, Mask, VL);
4405   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction,
4406                              DAG.getConstant(0, DL, Subtarget.getXLenVT()));
4407   if (!VecVT.isInteger())
4408     return Elt0;
4409   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
4410 }
4411 
4412 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
4413                                                    SelectionDAG &DAG) const {
4414   SDValue Vec = Op.getOperand(0);
4415   SDValue SubVec = Op.getOperand(1);
4416   MVT VecVT = Vec.getSimpleValueType();
4417   MVT SubVecVT = SubVec.getSimpleValueType();
4418 
4419   SDLoc DL(Op);
4420   MVT XLenVT = Subtarget.getXLenVT();
4421   unsigned OrigIdx = Op.getConstantOperandVal(2);
4422   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
4423 
4424   // We don't have the ability to slide mask vectors up indexed by their i1
4425   // elements; the smallest we can do is i8. Often we are able to bitcast to
4426   // equivalent i8 vectors. Note that when inserting a fixed-length vector
4427   // into a scalable one, we might not necessarily have enough scalable
4428   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
4429   if (SubVecVT.getVectorElementType() == MVT::i1 &&
4430       (OrigIdx != 0 || !Vec.isUndef())) {
4431     if (VecVT.getVectorMinNumElements() >= 8 &&
4432         SubVecVT.getVectorMinNumElements() >= 8) {
4433       assert(OrigIdx % 8 == 0 && "Invalid index");
4434       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
4435              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
4436              "Unexpected mask vector lowering");
4437       OrigIdx /= 8;
4438       SubVecVT =
4439           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
4440                            SubVecVT.isScalableVector());
4441       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
4442                                VecVT.isScalableVector());
4443       Vec = DAG.getBitcast(VecVT, Vec);
4444       SubVec = DAG.getBitcast(SubVecVT, SubVec);
4445     } else {
4446       // We can't slide this mask vector up indexed by its i1 elements.
4447       // This poses a problem when we wish to insert a scalable vector which
4448       // can't be re-expressed as a larger type. Just choose the slow path and
4449       // extend to a larger type, then truncate back down.
4450       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
4451       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
4452       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
4453       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
4454       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
4455                         Op.getOperand(2));
4456       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
4457       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
4458     }
4459   }
4460 
4461   // If the subvector vector is a fixed-length type, we cannot use subregister
4462   // manipulation to simplify the codegen; we don't know which register of a
4463   // LMUL group contains the specific subvector as we only know the minimum
4464   // register size. Therefore we must slide the vector group up the full
4465   // amount.
4466   if (SubVecVT.isFixedLengthVector()) {
4467     if (OrigIdx == 0 && Vec.isUndef())
4468       return Op;
4469     MVT ContainerVT = VecVT;
4470     if (VecVT.isFixedLengthVector()) {
4471       ContainerVT = getContainerForFixedLengthVector(VecVT);
4472       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4473     }
4474     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
4475                          DAG.getUNDEF(ContainerVT), SubVec,
4476                          DAG.getConstant(0, DL, XLenVT));
4477     SDValue Mask =
4478         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
4479     // Set the vector length to only the number of elements we care about. Note
4480     // that for slideup this includes the offset.
4481     SDValue VL =
4482         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
4483     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
4484     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
4485                                   SubVec, SlideupAmt, Mask, VL);
4486     if (VecVT.isFixedLengthVector())
4487       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
4488     return DAG.getBitcast(Op.getValueType(), Slideup);
4489   }
4490 
4491   unsigned SubRegIdx, RemIdx;
4492   std::tie(SubRegIdx, RemIdx) =
4493       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
4494           VecVT, SubVecVT, OrigIdx, TRI);
4495 
4496   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
4497   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
4498                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
4499                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
4500 
4501   // 1. If the Idx has been completely eliminated and this subvector's size is
4502   // a vector register or a multiple thereof, or the surrounding elements are
4503   // undef, then this is a subvector insert which naturally aligns to a vector
4504   // register. These can easily be handled using subregister manipulation.
4505   // 2. If the subvector is smaller than a vector register, then the insertion
4506   // must preserve the undisturbed elements of the register. We do this by
4507   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
4508   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
4509   // subvector within the vector register, and an INSERT_SUBVECTOR of that
4510   // LMUL=1 type back into the larger vector (resolving to another subregister
4511   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
4512   // to avoid allocating a large register group to hold our subvector.
4513   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
4514     return Op;
4515 
4516   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
4517   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
4518   // (in our case undisturbed). This means we can set up a subvector insertion
4519   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
4520   // size of the subvector.
4521   MVT InterSubVT = VecVT;
4522   SDValue AlignedExtract = Vec;
4523   unsigned AlignedIdx = OrigIdx - RemIdx;
4524   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
4525     InterSubVT = getLMUL1VT(VecVT);
4526     // Extract a subvector equal to the nearest full vector register type. This
4527     // should resolve to a EXTRACT_SUBREG instruction.
4528     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
4529                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
4530   }
4531 
4532   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
4533   // For scalable vectors this must be further multiplied by vscale.
4534   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
4535 
4536   SDValue Mask, VL;
4537   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
4538 
4539   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
4540   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
4541   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
4542   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
4543 
4544   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
4545                        DAG.getUNDEF(InterSubVT), SubVec,
4546                        DAG.getConstant(0, DL, XLenVT));
4547 
4548   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
4549                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
4550 
4551   // If required, insert this subvector back into the correct vector register.
4552   // This should resolve to an INSERT_SUBREG instruction.
4553   if (VecVT.bitsGT(InterSubVT))
4554     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
4555                           DAG.getConstant(AlignedIdx, DL, XLenVT));
4556 
4557   // We might have bitcast from a mask type: cast back to the original type if
4558   // required.
4559   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
4560 }
4561 
4562 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
4563                                                     SelectionDAG &DAG) const {
4564   SDValue Vec = Op.getOperand(0);
4565   MVT SubVecVT = Op.getSimpleValueType();
4566   MVT VecVT = Vec.getSimpleValueType();
4567 
4568   SDLoc DL(Op);
4569   MVT XLenVT = Subtarget.getXLenVT();
4570   unsigned OrigIdx = Op.getConstantOperandVal(1);
4571   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
4572 
4573   // We don't have the ability to slide mask vectors down indexed by their i1
4574   // elements; the smallest we can do is i8. Often we are able to bitcast to
4575   // equivalent i8 vectors. Note that when extracting a fixed-length vector
4576   // from a scalable one, we might not necessarily have enough scalable
4577   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
4578   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
4579     if (VecVT.getVectorMinNumElements() >= 8 &&
4580         SubVecVT.getVectorMinNumElements() >= 8) {
4581       assert(OrigIdx % 8 == 0 && "Invalid index");
4582       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
4583              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
4584              "Unexpected mask vector lowering");
4585       OrigIdx /= 8;
4586       SubVecVT =
4587           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
4588                            SubVecVT.isScalableVector());
4589       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
4590                                VecVT.isScalableVector());
4591       Vec = DAG.getBitcast(VecVT, Vec);
4592     } else {
4593       // We can't slide this mask vector down, indexed by its i1 elements.
4594       // This poses a problem when we wish to extract a scalable vector which
4595       // can't be re-expressed as a larger type. Just choose the slow path and
4596       // extend to a larger type, then truncate back down.
4597       // TODO: We could probably improve this when extracting certain fixed
4598       // from fixed, where we can extract as i8 and shift the correct element
4599       // right to reach the desired subvector?
4600       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
4601       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
4602       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
4603       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
4604                         Op.getOperand(1));
4605       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
4606       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
4607     }
4608   }
4609 
4610   // If the subvector vector is a fixed-length type, we cannot use subregister
4611   // manipulation to simplify the codegen; we don't know which register of a
4612   // LMUL group contains the specific subvector as we only know the minimum
4613   // register size. Therefore we must slide the vector group down the full
4614   // amount.
4615   if (SubVecVT.isFixedLengthVector()) {
4616     // With an index of 0 this is a cast-like subvector, which can be performed
4617     // with subregister operations.
4618     if (OrigIdx == 0)
4619       return Op;
4620     MVT ContainerVT = VecVT;
4621     if (VecVT.isFixedLengthVector()) {
4622       ContainerVT = getContainerForFixedLengthVector(VecVT);
4623       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4624     }
4625     SDValue Mask =
4626         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
4627     // Set the vector length to only the number of elements we care about. This
4628     // avoids sliding down elements we're going to discard straight away.
4629     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
4630     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
4631     SDValue Slidedown =
4632         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4633                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
4634     // Now we can use a cast-like subvector extract to get the result.
4635     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
4636                             DAG.getConstant(0, DL, XLenVT));
4637     return DAG.getBitcast(Op.getValueType(), Slidedown);
4638   }
4639 
4640   unsigned SubRegIdx, RemIdx;
4641   std::tie(SubRegIdx, RemIdx) =
4642       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
4643           VecVT, SubVecVT, OrigIdx, TRI);
4644 
4645   // If the Idx has been completely eliminated then this is a subvector extract
4646   // which naturally aligns to a vector register. These can easily be handled
4647   // using subregister manipulation.
4648   if (RemIdx == 0)
4649     return Op;
4650 
4651   // Else we must shift our vector register directly to extract the subvector.
4652   // Do this using VSLIDEDOWN.
4653 
4654   // If the vector type is an LMUL-group type, extract a subvector equal to the
4655   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
4656   // instruction.
4657   MVT InterSubVT = VecVT;
4658   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
4659     InterSubVT = getLMUL1VT(VecVT);
4660     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
4661                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
4662   }
4663 
4664   // Slide this vector register down by the desired number of elements in order
4665   // to place the desired subvector starting at element 0.
4666   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
4667   // For scalable vectors this must be further multiplied by vscale.
4668   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
4669 
4670   SDValue Mask, VL;
4671   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
4672   SDValue Slidedown =
4673       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
4674                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
4675 
4676   // Now the vector is in the right position, extract our final subvector. This
4677   // should resolve to a COPY.
4678   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
4679                           DAG.getConstant(0, DL, XLenVT));
4680 
4681   // We might have bitcast from a mask type: cast back to the original type if
4682   // required.
4683   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
4684 }
4685 
4686 // Lower step_vector to the vid instruction. Any non-identity step value must
4687 // be accounted for my manual expansion.
4688 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
4689                                               SelectionDAG &DAG) const {
4690   SDLoc DL(Op);
4691   MVT VT = Op.getSimpleValueType();
4692   MVT XLenVT = Subtarget.getXLenVT();
4693   SDValue Mask, VL;
4694   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
4695   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4696   uint64_t StepValImm = Op.getConstantOperandVal(0);
4697   if (StepValImm != 1) {
4698     if (isPowerOf2_64(StepValImm)) {
4699       SDValue StepVal =
4700           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
4701                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
4702       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
4703     } else {
4704       SDValue StepVal = lowerScalarSplat(
4705           DAG.getConstant(StepValImm, DL, VT.getVectorElementType()), VL, VT,
4706           DL, DAG, Subtarget);
4707       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
4708     }
4709   }
4710   return StepVec;
4711 }
4712 
4713 // Implement vector_reverse using vrgather.vv with indices determined by
4714 // subtracting the id of each element from (VLMAX-1). This will convert
4715 // the indices like so:
4716 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
4717 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
4718 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
4719                                                  SelectionDAG &DAG) const {
4720   SDLoc DL(Op);
4721   MVT VecVT = Op.getSimpleValueType();
4722   unsigned EltSize = VecVT.getScalarSizeInBits();
4723   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
4724 
4725   unsigned MaxVLMAX = 0;
4726   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
4727   if (VectorBitsMax != 0)
4728     MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
4729 
4730   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
4731   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
4732 
4733   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
4734   // to use vrgatherei16.vv.
4735   // TODO: It's also possible to use vrgatherei16.vv for other types to
4736   // decrease register width for the index calculation.
4737   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
4738     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
4739     // Reverse each half, then reassemble them in reverse order.
4740     // NOTE: It's also possible that after splitting that VLMAX no longer
4741     // requires vrgatherei16.vv.
4742     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
4743       SDValue Lo, Hi;
4744       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
4745       EVT LoVT, HiVT;
4746       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
4747       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
4748       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
4749       // Reassemble the low and high pieces reversed.
4750       // FIXME: This is a CONCAT_VECTORS.
4751       SDValue Res =
4752           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
4753                       DAG.getIntPtrConstant(0, DL));
4754       return DAG.getNode(
4755           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
4756           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
4757     }
4758 
4759     // Just promote the int type to i16 which will double the LMUL.
4760     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
4761     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
4762   }
4763 
4764   MVT XLenVT = Subtarget.getXLenVT();
4765   SDValue Mask, VL;
4766   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
4767 
4768   // Calculate VLMAX-1 for the desired SEW.
4769   unsigned MinElts = VecVT.getVectorMinNumElements();
4770   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
4771                               DAG.getConstant(MinElts, DL, XLenVT));
4772   SDValue VLMinus1 =
4773       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
4774 
4775   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
4776   bool IsRV32E64 =
4777       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
4778   SDValue SplatVL;
4779   if (!IsRV32E64)
4780     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
4781   else
4782     SplatVL = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, IntVT, VLMinus1);
4783 
4784   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
4785   SDValue Indices =
4786       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
4787 
4788   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
4789 }
4790 
4791 SDValue
4792 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
4793                                                      SelectionDAG &DAG) const {
4794   SDLoc DL(Op);
4795   auto *Load = cast<LoadSDNode>(Op);
4796 
4797   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
4798                                         Load->getMemoryVT(),
4799                                         *Load->getMemOperand()) &&
4800          "Expecting a correctly-aligned load");
4801 
4802   MVT VT = Op.getSimpleValueType();
4803   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4804 
4805   SDValue VL =
4806       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4807 
4808   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4809   SDValue NewLoad = DAG.getMemIntrinsicNode(
4810       RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL},
4811       Load->getMemoryVT(), Load->getMemOperand());
4812 
4813   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
4814   return DAG.getMergeValues({Result, Load->getChain()}, DL);
4815 }
4816 
4817 SDValue
4818 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
4819                                                       SelectionDAG &DAG) const {
4820   SDLoc DL(Op);
4821   auto *Store = cast<StoreSDNode>(Op);
4822 
4823   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
4824                                         Store->getMemoryVT(),
4825                                         *Store->getMemOperand()) &&
4826          "Expecting a correctly-aligned store");
4827 
4828   SDValue StoreVal = Store->getValue();
4829   MVT VT = StoreVal.getSimpleValueType();
4830 
4831   // If the size less than a byte, we need to pad with zeros to make a byte.
4832   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
4833     VT = MVT::v8i1;
4834     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
4835                            DAG.getConstant(0, DL, VT), StoreVal,
4836                            DAG.getIntPtrConstant(0, DL));
4837   }
4838 
4839   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4840 
4841   SDValue VL =
4842       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4843 
4844   SDValue NewValue =
4845       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
4846   return DAG.getMemIntrinsicNode(
4847       RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other),
4848       {Store->getChain(), NewValue, Store->getBasePtr(), VL},
4849       Store->getMemoryVT(), Store->getMemOperand());
4850 }
4851 
4852 SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
4853                                              SelectionDAG &DAG) const {
4854   SDLoc DL(Op);
4855   MVT VT = Op.getSimpleValueType();
4856 
4857   const auto *MemSD = cast<MemSDNode>(Op);
4858   EVT MemVT = MemSD->getMemoryVT();
4859   MachineMemOperand *MMO = MemSD->getMemOperand();
4860   SDValue Chain = MemSD->getChain();
4861   SDValue BasePtr = MemSD->getBasePtr();
4862 
4863   SDValue Mask, PassThru, VL;
4864   if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) {
4865     Mask = VPLoad->getMask();
4866     PassThru = DAG.getUNDEF(VT);
4867     VL = VPLoad->getVectorLength();
4868   } else {
4869     const auto *MLoad = cast<MaskedLoadSDNode>(Op);
4870     Mask = MLoad->getMask();
4871     PassThru = MLoad->getPassThru();
4872   }
4873 
4874   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4875 
4876   MVT XLenVT = Subtarget.getXLenVT();
4877 
4878   MVT ContainerVT = VT;
4879   if (VT.isFixedLengthVector()) {
4880     ContainerVT = getContainerForFixedLengthVector(VT);
4881     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4882     if (!IsUnmasked) {
4883       MVT MaskVT =
4884           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4885       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4886     }
4887   }
4888 
4889   if (!VL)
4890     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
4891 
4892   unsigned IntID =
4893       IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
4894   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
4895   if (!IsUnmasked)
4896     Ops.push_back(PassThru);
4897   Ops.push_back(BasePtr);
4898   if (!IsUnmasked)
4899     Ops.push_back(Mask);
4900   Ops.push_back(VL);
4901   if (!IsUnmasked)
4902     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
4903 
4904   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4905 
4906   SDValue Result =
4907       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
4908   Chain = Result.getValue(1);
4909 
4910   if (VT.isFixedLengthVector())
4911     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4912 
4913   return DAG.getMergeValues({Result, Chain}, DL);
4914 }
4915 
4916 SDValue RISCVTargetLowering::lowerMaskedStore(SDValue Op,
4917                                               SelectionDAG &DAG) const {
4918   SDLoc DL(Op);
4919 
4920   const auto *MemSD = cast<MemSDNode>(Op);
4921   EVT MemVT = MemSD->getMemoryVT();
4922   MachineMemOperand *MMO = MemSD->getMemOperand();
4923   SDValue Chain = MemSD->getChain();
4924   SDValue BasePtr = MemSD->getBasePtr();
4925   SDValue Val, Mask, VL;
4926 
4927   if (const auto *VPStore = dyn_cast<VPStoreSDNode>(Op)) {
4928     Val = VPStore->getValue();
4929     Mask = VPStore->getMask();
4930     VL = VPStore->getVectorLength();
4931   } else {
4932     const auto *MStore = cast<MaskedStoreSDNode>(Op);
4933     Val = MStore->getValue();
4934     Mask = MStore->getMask();
4935   }
4936 
4937   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4938 
4939   MVT VT = Val.getSimpleValueType();
4940   MVT XLenVT = Subtarget.getXLenVT();
4941 
4942   MVT ContainerVT = VT;
4943   if (VT.isFixedLengthVector()) {
4944     ContainerVT = getContainerForFixedLengthVector(VT);
4945 
4946     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4947     if (!IsUnmasked) {
4948       MVT MaskVT =
4949           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4950       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4951     }
4952   }
4953 
4954   if (!VL)
4955     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
4956 
4957   unsigned IntID =
4958       IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask;
4959   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
4960   Ops.push_back(Val);
4961   Ops.push_back(BasePtr);
4962   if (!IsUnmasked)
4963     Ops.push_back(Mask);
4964   Ops.push_back(VL);
4965 
4966   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
4967                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
4968 }
4969 
4970 SDValue
4971 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
4972                                                       SelectionDAG &DAG) const {
4973   MVT InVT = Op.getOperand(0).getSimpleValueType();
4974   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
4975 
4976   MVT VT = Op.getSimpleValueType();
4977 
4978   SDValue Op1 =
4979       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4980   SDValue Op2 =
4981       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
4982 
4983   SDLoc DL(Op);
4984   SDValue VL =
4985       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4986 
4987   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4988   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4989 
4990   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
4991                             Op.getOperand(2), Mask, VL);
4992 
4993   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
4994 }
4995 
4996 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
4997     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
4998   MVT VT = Op.getSimpleValueType();
4999 
5000   if (VT.getVectorElementType() == MVT::i1)
5001     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
5002 
5003   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
5004 }
5005 
5006 SDValue
5007 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op,
5008                                                       SelectionDAG &DAG) const {
5009   unsigned Opc;
5010   switch (Op.getOpcode()) {
5011   default: llvm_unreachable("Unexpected opcode!");
5012   case ISD::SHL: Opc = RISCVISD::SHL_VL; break;
5013   case ISD::SRA: Opc = RISCVISD::SRA_VL; break;
5014   case ISD::SRL: Opc = RISCVISD::SRL_VL; break;
5015   }
5016 
5017   return lowerToScalableOp(Op, DAG, Opc);
5018 }
5019 
5020 // Lower vector ABS to smax(X, sub(0, X)).
5021 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
5022   SDLoc DL(Op);
5023   MVT VT = Op.getSimpleValueType();
5024   SDValue X = Op.getOperand(0);
5025 
5026   assert(VT.isFixedLengthVector() && "Unexpected type");
5027 
5028   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5029   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
5030 
5031   SDValue Mask, VL;
5032   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5033 
5034   SDValue SplatZero =
5035       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
5036                   DAG.getConstant(0, DL, Subtarget.getXLenVT()));
5037   SDValue NegX =
5038       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
5039   SDValue Max =
5040       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
5041 
5042   return convertFromScalableVector(VT, Max, DAG, Subtarget);
5043 }
5044 
5045 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
5046     SDValue Op, SelectionDAG &DAG) const {
5047   SDLoc DL(Op);
5048   MVT VT = Op.getSimpleValueType();
5049   SDValue Mag = Op.getOperand(0);
5050   SDValue Sign = Op.getOperand(1);
5051   assert(Mag.getValueType() == Sign.getValueType() &&
5052          "Can only handle COPYSIGN with matching types.");
5053 
5054   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5055   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
5056   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
5057 
5058   SDValue Mask, VL;
5059   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5060 
5061   SDValue CopySign =
5062       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
5063 
5064   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
5065 }
5066 
5067 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
5068     SDValue Op, SelectionDAG &DAG) const {
5069   MVT VT = Op.getSimpleValueType();
5070   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5071 
5072   MVT I1ContainerVT =
5073       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5074 
5075   SDValue CC =
5076       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
5077   SDValue Op1 =
5078       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5079   SDValue Op2 =
5080       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
5081 
5082   SDLoc DL(Op);
5083   SDValue Mask, VL;
5084   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5085 
5086   SDValue Select =
5087       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
5088 
5089   return convertFromScalableVector(VT, Select, DAG, Subtarget);
5090 }
5091 
5092 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
5093                                                unsigned NewOpc,
5094                                                bool HasMask) const {
5095   MVT VT = Op.getSimpleValueType();
5096   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5097 
5098   // Create list of operands by converting existing ones to scalable types.
5099   SmallVector<SDValue, 6> Ops;
5100   for (const SDValue &V : Op->op_values()) {
5101     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
5102 
5103     // Pass through non-vector operands.
5104     if (!V.getValueType().isVector()) {
5105       Ops.push_back(V);
5106       continue;
5107     }
5108 
5109     // "cast" fixed length vector to a scalable vector.
5110     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
5111            "Only fixed length vectors are supported!");
5112     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
5113   }
5114 
5115   SDLoc DL(Op);
5116   SDValue Mask, VL;
5117   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5118   if (HasMask)
5119     Ops.push_back(Mask);
5120   Ops.push_back(VL);
5121 
5122   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
5123   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
5124 }
5125 
5126 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
5127 // * Operands of each node are assumed to be in the same order.
5128 // * The EVL operand is promoted from i32 to i64 on RV64.
5129 // * Fixed-length vectors are converted to their scalable-vector container
5130 //   types.
5131 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
5132                                        unsigned RISCVISDOpc) const {
5133   SDLoc DL(Op);
5134   MVT VT = Op.getSimpleValueType();
5135   SmallVector<SDValue, 4> Ops;
5136 
5137   for (const auto &OpIdx : enumerate(Op->ops())) {
5138     SDValue V = OpIdx.value();
5139     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
5140     // Pass through operands which aren't fixed-length vectors.
5141     if (!V.getValueType().isFixedLengthVector()) {
5142       Ops.push_back(V);
5143       continue;
5144     }
5145     // "cast" fixed length vector to a scalable vector.
5146     MVT OpVT = V.getSimpleValueType();
5147     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
5148     assert(useRVVForFixedLengthVectorVT(OpVT) &&
5149            "Only fixed length vectors are supported!");
5150     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
5151   }
5152 
5153   if (!VT.isFixedLengthVector())
5154     return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
5155 
5156   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5157 
5158   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
5159 
5160   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
5161 }
5162 
5163 // Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
5164 // matched to a RVV indexed load. The RVV indexed load instructions only
5165 // support the "unsigned unscaled" addressing mode; indices are implicitly
5166 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
5167 // signed or scaled indexing is extended to the XLEN value type and scaled
5168 // accordingly.
5169 SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
5170                                                SelectionDAG &DAG) const {
5171   SDLoc DL(Op);
5172   MVT VT = Op.getSimpleValueType();
5173 
5174   const auto *MemSD = cast<MemSDNode>(Op.getNode());
5175   EVT MemVT = MemSD->getMemoryVT();
5176   MachineMemOperand *MMO = MemSD->getMemOperand();
5177   SDValue Chain = MemSD->getChain();
5178   SDValue BasePtr = MemSD->getBasePtr();
5179 
5180   ISD::LoadExtType LoadExtType;
5181   SDValue Index, Mask, PassThru, VL;
5182 
5183   if (auto *VPGN = dyn_cast<VPGatherSDNode>(Op.getNode())) {
5184     Index = VPGN->getIndex();
5185     Mask = VPGN->getMask();
5186     PassThru = DAG.getUNDEF(VT);
5187     VL = VPGN->getVectorLength();
5188     // VP doesn't support extending loads.
5189     LoadExtType = ISD::NON_EXTLOAD;
5190   } else {
5191     // Else it must be a MGATHER.
5192     auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
5193     Index = MGN->getIndex();
5194     Mask = MGN->getMask();
5195     PassThru = MGN->getPassThru();
5196     LoadExtType = MGN->getExtensionType();
5197   }
5198 
5199   MVT IndexVT = Index.getSimpleValueType();
5200   MVT XLenVT = Subtarget.getXLenVT();
5201 
5202   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
5203          "Unexpected VTs!");
5204   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
5205   // Targets have to explicitly opt-in for extending vector loads.
5206   assert(LoadExtType == ISD::NON_EXTLOAD &&
5207          "Unexpected extending MGATHER/VP_GATHER");
5208   (void)LoadExtType;
5209 
5210   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
5211   // the selection of the masked intrinsics doesn't do this for us.
5212   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5213 
5214   MVT ContainerVT = VT;
5215   if (VT.isFixedLengthVector()) {
5216     // We need to use the larger of the result and index type to determine the
5217     // scalable type to use so we don't increase LMUL for any operand/result.
5218     if (VT.bitsGE(IndexVT)) {
5219       ContainerVT = getContainerForFixedLengthVector(VT);
5220       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
5221                                  ContainerVT.getVectorElementCount());
5222     } else {
5223       IndexVT = getContainerForFixedLengthVector(IndexVT);
5224       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
5225                                      IndexVT.getVectorElementCount());
5226     }
5227 
5228     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
5229 
5230     if (!IsUnmasked) {
5231       MVT MaskVT =
5232           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5233       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5234       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
5235     }
5236   }
5237 
5238   if (!VL)
5239     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5240 
5241   unsigned IntID =
5242       IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
5243   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5244   if (!IsUnmasked)
5245     Ops.push_back(PassThru);
5246   Ops.push_back(BasePtr);
5247   Ops.push_back(Index);
5248   if (!IsUnmasked)
5249     Ops.push_back(Mask);
5250   Ops.push_back(VL);
5251   if (!IsUnmasked)
5252     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
5253 
5254   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5255   SDValue Result =
5256       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
5257   Chain = Result.getValue(1);
5258 
5259   if (VT.isFixedLengthVector())
5260     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
5261 
5262   return DAG.getMergeValues({Result, Chain}, DL);
5263 }
5264 
5265 // Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be
5266 // matched to a RVV indexed store. The RVV indexed store instructions only
5267 // support the "unsigned unscaled" addressing mode; indices are implicitly
5268 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
5269 // signed or scaled indexing is extended to the XLEN value type and scaled
5270 // accordingly.
5271 SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
5272                                                 SelectionDAG &DAG) const {
5273   SDLoc DL(Op);
5274   const auto *MemSD = cast<MemSDNode>(Op.getNode());
5275   EVT MemVT = MemSD->getMemoryVT();
5276   MachineMemOperand *MMO = MemSD->getMemOperand();
5277   SDValue Chain = MemSD->getChain();
5278   SDValue BasePtr = MemSD->getBasePtr();
5279 
5280   bool IsTruncatingStore = false;
5281   SDValue Index, Mask, Val, VL;
5282 
5283   if (auto *VPSN = dyn_cast<VPScatterSDNode>(Op.getNode())) {
5284     Index = VPSN->getIndex();
5285     Mask = VPSN->getMask();
5286     Val = VPSN->getValue();
5287     VL = VPSN->getVectorLength();
5288     // VP doesn't support truncating stores.
5289     IsTruncatingStore = false;
5290   } else {
5291     // Else it must be a MSCATTER.
5292     auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
5293     Index = MSN->getIndex();
5294     Mask = MSN->getMask();
5295     Val = MSN->getValue();
5296     IsTruncatingStore = MSN->isTruncatingStore();
5297   }
5298 
5299   MVT VT = Val.getSimpleValueType();
5300   MVT IndexVT = Index.getSimpleValueType();
5301   MVT XLenVT = Subtarget.getXLenVT();
5302 
5303   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
5304          "Unexpected VTs!");
5305   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
5306   // Targets have to explicitly opt-in for extending vector loads and
5307   // truncating vector stores.
5308   assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER");
5309   (void)IsTruncatingStore;
5310 
5311   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
5312   // the selection of the masked intrinsics doesn't do this for us.
5313   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5314 
5315   MVT ContainerVT = VT;
5316   if (VT.isFixedLengthVector()) {
5317     // We need to use the larger of the value and index type to determine the
5318     // scalable type to use so we don't increase LMUL for any operand/result.
5319     if (VT.bitsGE(IndexVT)) {
5320       ContainerVT = getContainerForFixedLengthVector(VT);
5321       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
5322                                  ContainerVT.getVectorElementCount());
5323     } else {
5324       IndexVT = getContainerForFixedLengthVector(IndexVT);
5325       ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
5326                                      IndexVT.getVectorElementCount());
5327     }
5328 
5329     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
5330     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
5331 
5332     if (!IsUnmasked) {
5333       MVT MaskVT =
5334           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5335       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5336     }
5337   }
5338 
5339   if (!VL)
5340     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5341 
5342   unsigned IntID =
5343       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
5344   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5345   Ops.push_back(Val);
5346   Ops.push_back(BasePtr);
5347   Ops.push_back(Index);
5348   if (!IsUnmasked)
5349     Ops.push_back(Mask);
5350   Ops.push_back(VL);
5351 
5352   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
5353                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
5354 }
5355 
5356 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
5357                                                SelectionDAG &DAG) const {
5358   const MVT XLenVT = Subtarget.getXLenVT();
5359   SDLoc DL(Op);
5360   SDValue Chain = Op->getOperand(0);
5361   SDValue SysRegNo = DAG.getConstant(
5362       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
5363   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
5364   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
5365 
5366   // Encoding used for rounding mode in RISCV differs from that used in
5367   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
5368   // table, which consists of a sequence of 4-bit fields, each representing
5369   // corresponding FLT_ROUNDS mode.
5370   static const int Table =
5371       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
5372       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
5373       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
5374       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
5375       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
5376 
5377   SDValue Shift =
5378       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
5379   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
5380                                 DAG.getConstant(Table, DL, XLenVT), Shift);
5381   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
5382                                DAG.getConstant(7, DL, XLenVT));
5383 
5384   return DAG.getMergeValues({Masked, Chain}, DL);
5385 }
5386 
5387 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
5388                                                SelectionDAG &DAG) const {
5389   const MVT XLenVT = Subtarget.getXLenVT();
5390   SDLoc DL(Op);
5391   SDValue Chain = Op->getOperand(0);
5392   SDValue RMValue = Op->getOperand(1);
5393   SDValue SysRegNo = DAG.getConstant(
5394       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
5395 
5396   // Encoding used for rounding mode in RISCV differs from that used in
5397   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
5398   // a table, which consists of a sequence of 4-bit fields, each representing
5399   // corresponding RISCV mode.
5400   static const unsigned Table =
5401       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
5402       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
5403       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
5404       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
5405       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
5406 
5407   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
5408                               DAG.getConstant(2, DL, XLenVT));
5409   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
5410                                 DAG.getConstant(Table, DL, XLenVT), Shift);
5411   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
5412                         DAG.getConstant(0x7, DL, XLenVT));
5413   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
5414                      RMValue);
5415 }
5416 
5417 // Returns the opcode of the target-specific SDNode that implements the 32-bit
5418 // form of the given Opcode.
5419 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
5420   switch (Opcode) {
5421   default:
5422     llvm_unreachable("Unexpected opcode");
5423   case ISD::SHL:
5424     return RISCVISD::SLLW;
5425   case ISD::SRA:
5426     return RISCVISD::SRAW;
5427   case ISD::SRL:
5428     return RISCVISD::SRLW;
5429   case ISD::SDIV:
5430     return RISCVISD::DIVW;
5431   case ISD::UDIV:
5432     return RISCVISD::DIVUW;
5433   case ISD::UREM:
5434     return RISCVISD::REMUW;
5435   case ISD::ROTL:
5436     return RISCVISD::ROLW;
5437   case ISD::ROTR:
5438     return RISCVISD::RORW;
5439   case RISCVISD::GREV:
5440     return RISCVISD::GREVW;
5441   case RISCVISD::GORC:
5442     return RISCVISD::GORCW;
5443   }
5444 }
5445 
5446 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
5447 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
5448 // otherwise be promoted to i64, making it difficult to select the
5449 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of
5450 // type i8/i16/i32 is lost.
5451 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
5452                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
5453   SDLoc DL(N);
5454   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
5455   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
5456   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
5457   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
5458   // ReplaceNodeResults requires we maintain the same type for the return value.
5459   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
5460 }
5461 
5462 // Converts the given 32-bit operation to a i64 operation with signed extension
5463 // semantic to reduce the signed extension instructions.
5464 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
5465   SDLoc DL(N);
5466   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5467   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5468   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
5469   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
5470                                DAG.getValueType(MVT::i32));
5471   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
5472 }
5473 
5474 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
5475                                              SmallVectorImpl<SDValue> &Results,
5476                                              SelectionDAG &DAG) const {
5477   SDLoc DL(N);
5478   switch (N->getOpcode()) {
5479   default:
5480     llvm_unreachable("Don't know how to custom type legalize this operation!");
5481   case ISD::STRICT_FP_TO_SINT:
5482   case ISD::STRICT_FP_TO_UINT:
5483   case ISD::FP_TO_SINT:
5484   case ISD::FP_TO_UINT: {
5485     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5486            "Unexpected custom legalisation");
5487     bool IsStrict = N->isStrictFPOpcode();
5488     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
5489                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
5490     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
5491     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
5492         TargetLowering::TypeSoftenFloat) {
5493       // FIXME: Support strict FP.
5494       if (IsStrict)
5495         return;
5496       if (!isTypeLegal(Op0.getValueType()))
5497         return;
5498       unsigned Opc =
5499           IsSigned ? RISCVISD::FCVT_W_RTZ_RV64 : RISCVISD::FCVT_WU_RTZ_RV64;
5500       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, Op0);
5501       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5502       return;
5503     }
5504     // If the FP type needs to be softened, emit a library call using the 'si'
5505     // version. If we left it to default legalization we'd end up with 'di'. If
5506     // the FP type doesn't need to be softened just let generic type
5507     // legalization promote the result type.
5508     RTLIB::Libcall LC;
5509     if (IsSigned)
5510       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
5511     else
5512       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
5513     MakeLibCallOptions CallOptions;
5514     EVT OpVT = Op0.getValueType();
5515     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
5516     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
5517     SDValue Result;
5518     std::tie(Result, Chain) =
5519         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
5520     Results.push_back(Result);
5521     if (IsStrict)
5522       Results.push_back(Chain);
5523     break;
5524   }
5525   case ISD::READCYCLECOUNTER: {
5526     assert(!Subtarget.is64Bit() &&
5527            "READCYCLECOUNTER only has custom type legalization on riscv32");
5528 
5529     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
5530     SDValue RCW =
5531         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
5532 
5533     Results.push_back(
5534         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
5535     Results.push_back(RCW.getValue(2));
5536     break;
5537   }
5538   case ISD::MUL: {
5539     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
5540     unsigned XLen = Subtarget.getXLen();
5541     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
5542     if (Size > XLen) {
5543       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
5544       SDValue LHS = N->getOperand(0);
5545       SDValue RHS = N->getOperand(1);
5546       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
5547 
5548       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
5549       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
5550       // We need exactly one side to be unsigned.
5551       if (LHSIsU == RHSIsU)
5552         return;
5553 
5554       auto MakeMULPair = [&](SDValue S, SDValue U) {
5555         MVT XLenVT = Subtarget.getXLenVT();
5556         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
5557         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
5558         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
5559         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
5560         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
5561       };
5562 
5563       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
5564       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
5565 
5566       // The other operand should be signed, but still prefer MULH when
5567       // possible.
5568       if (RHSIsU && LHSIsS && !RHSIsS)
5569         Results.push_back(MakeMULPair(LHS, RHS));
5570       else if (LHSIsU && RHSIsS && !LHSIsS)
5571         Results.push_back(MakeMULPair(RHS, LHS));
5572 
5573       return;
5574     }
5575     LLVM_FALLTHROUGH;
5576   }
5577   case ISD::ADD:
5578   case ISD::SUB:
5579     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5580            "Unexpected custom legalisation");
5581     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
5582     break;
5583   case ISD::SHL:
5584   case ISD::SRA:
5585   case ISD::SRL:
5586     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5587            "Unexpected custom legalisation");
5588     if (N->getOperand(1).getOpcode() != ISD::Constant) {
5589       Results.push_back(customLegalizeToWOp(N, DAG));
5590       break;
5591     }
5592 
5593     // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is
5594     // similar to customLegalizeToWOpWithSExt, but we must zero_extend the
5595     // shift amount.
5596     if (N->getOpcode() == ISD::SHL) {
5597       SDLoc DL(N);
5598       SDValue NewOp0 =
5599           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5600       SDValue NewOp1 =
5601           DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1));
5602       SDValue NewWOp = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0, NewOp1);
5603       SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
5604                                    DAG.getValueType(MVT::i32));
5605       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
5606     }
5607 
5608     break;
5609   case ISD::ROTL:
5610   case ISD::ROTR:
5611     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5612            "Unexpected custom legalisation");
5613     Results.push_back(customLegalizeToWOp(N, DAG));
5614     break;
5615   case ISD::CTTZ:
5616   case ISD::CTTZ_ZERO_UNDEF:
5617   case ISD::CTLZ:
5618   case ISD::CTLZ_ZERO_UNDEF: {
5619     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5620            "Unexpected custom legalisation");
5621 
5622     SDValue NewOp0 =
5623         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5624     bool IsCTZ =
5625         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
5626     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
5627     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
5628     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5629     return;
5630   }
5631   case ISD::SDIV:
5632   case ISD::UDIV:
5633   case ISD::UREM: {
5634     MVT VT = N->getSimpleValueType(0);
5635     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
5636            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
5637            "Unexpected custom legalisation");
5638     // Don't promote division/remainder by constant since we should expand those
5639     // to multiply by magic constant.
5640     // FIXME: What if the expansion is disabled for minsize.
5641     if (N->getOperand(1).getOpcode() == ISD::Constant)
5642       return;
5643 
5644     // If the input is i32, use ANY_EXTEND since the W instructions don't read
5645     // the upper 32 bits. For other types we need to sign or zero extend
5646     // based on the opcode.
5647     unsigned ExtOpc = ISD::ANY_EXTEND;
5648     if (VT != MVT::i32)
5649       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
5650                                            : ISD::ZERO_EXTEND;
5651 
5652     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
5653     break;
5654   }
5655   case ISD::UADDO:
5656   case ISD::USUBO: {
5657     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5658            "Unexpected custom legalisation");
5659     bool IsAdd = N->getOpcode() == ISD::UADDO;
5660     // Create an ADDW or SUBW.
5661     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5662     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5663     SDValue Res =
5664         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
5665     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
5666                       DAG.getValueType(MVT::i32));
5667 
5668     // Sign extend the LHS and perform an unsigned compare with the ADDW result.
5669     // Since the inputs are sign extended from i32, this is equivalent to
5670     // comparing the lower 32 bits.
5671     LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
5672     SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
5673                                     IsAdd ? ISD::SETULT : ISD::SETUGT);
5674 
5675     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5676     Results.push_back(Overflow);
5677     return;
5678   }
5679   case ISD::UADDSAT:
5680   case ISD::USUBSAT: {
5681     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5682            "Unexpected custom legalisation");
5683     if (Subtarget.hasStdExtZbb()) {
5684       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
5685       // sign extend allows overflow of the lower 32 bits to be detected on
5686       // the promoted size.
5687       SDValue LHS =
5688           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
5689       SDValue RHS =
5690           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
5691       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
5692       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5693       return;
5694     }
5695 
5696     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
5697     // promotion for UADDO/USUBO.
5698     Results.push_back(expandAddSubSat(N, DAG));
5699     return;
5700   }
5701   case ISD::BITCAST: {
5702     EVT VT = N->getValueType(0);
5703     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
5704     SDValue Op0 = N->getOperand(0);
5705     EVT Op0VT = Op0.getValueType();
5706     MVT XLenVT = Subtarget.getXLenVT();
5707     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
5708       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
5709       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
5710     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
5711                Subtarget.hasStdExtF()) {
5712       SDValue FPConv =
5713           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
5714       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
5715     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
5716                isTypeLegal(Op0VT)) {
5717       // Custom-legalize bitcasts from fixed-length vector types to illegal
5718       // scalar types in order to improve codegen. Bitcast the vector to a
5719       // one-element vector type whose element type is the same as the result
5720       // type, and extract the first element.
5721       LLVMContext &Context = *DAG.getContext();
5722       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
5723       Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
5724                                     DAG.getConstant(0, DL, XLenVT)));
5725     }
5726     break;
5727   }
5728   case RISCVISD::GREV:
5729   case RISCVISD::GORC: {
5730     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5731            "Unexpected custom legalisation");
5732     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
5733     // This is similar to customLegalizeToWOp, except that we pass the second
5734     // operand (a TargetConstant) straight through: it is already of type
5735     // XLenVT.
5736     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
5737     SDValue NewOp0 =
5738         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5739     SDValue NewOp1 =
5740         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5741     SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
5742     // ReplaceNodeResults requires we maintain the same type for the return
5743     // value.
5744     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
5745     break;
5746   }
5747   case RISCVISD::SHFL: {
5748     // There is no SHFLIW instruction, but we can just promote the operation.
5749     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5750            "Unexpected custom legalisation");
5751     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
5752     SDValue NewOp0 =
5753         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5754     SDValue NewOp1 =
5755         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5756     SDValue NewRes = DAG.getNode(RISCVISD::SHFL, DL, MVT::i64, NewOp0, NewOp1);
5757     // ReplaceNodeResults requires we maintain the same type for the return
5758     // value.
5759     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
5760     break;
5761   }
5762   case ISD::BSWAP:
5763   case ISD::BITREVERSE: {
5764     MVT VT = N->getSimpleValueType(0);
5765     MVT XLenVT = Subtarget.getXLenVT();
5766     assert((VT == MVT::i8 || VT == MVT::i16 ||
5767             (VT == MVT::i32 && Subtarget.is64Bit())) &&
5768            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
5769     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
5770     unsigned Imm = VT.getSizeInBits() - 1;
5771     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
5772     if (N->getOpcode() == ISD::BSWAP)
5773       Imm &= ~0x7U;
5774     unsigned Opc = Subtarget.is64Bit() ? RISCVISD::GREVW : RISCVISD::GREV;
5775     SDValue GREVI =
5776         DAG.getNode(Opc, DL, XLenVT, NewOp0, DAG.getConstant(Imm, DL, XLenVT));
5777     // ReplaceNodeResults requires we maintain the same type for the return
5778     // value.
5779     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
5780     break;
5781   }
5782   case ISD::FSHL:
5783   case ISD::FSHR: {
5784     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5785            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
5786     SDValue NewOp0 =
5787         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5788     SDValue NewOp1 =
5789         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5790     SDValue NewOp2 =
5791         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5792     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
5793     // Mask the shift amount to 5 bits.
5794     NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
5795                          DAG.getConstant(0x1f, DL, MVT::i64));
5796     unsigned Opc =
5797         N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW;
5798     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2);
5799     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
5800     break;
5801   }
5802   case ISD::EXTRACT_VECTOR_ELT: {
5803     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
5804     // type is illegal (currently only vXi64 RV32).
5805     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
5806     // transferred to the destination register. We issue two of these from the
5807     // upper- and lower- halves of the SEW-bit vector element, slid down to the
5808     // first element.
5809     SDValue Vec = N->getOperand(0);
5810     SDValue Idx = N->getOperand(1);
5811 
5812     // The vector type hasn't been legalized yet so we can't issue target
5813     // specific nodes if it needs legalization.
5814     // FIXME: We would manually legalize if it's important.
5815     if (!isTypeLegal(Vec.getValueType()))
5816       return;
5817 
5818     MVT VecVT = Vec.getSimpleValueType();
5819 
5820     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
5821            VecVT.getVectorElementType() == MVT::i64 &&
5822            "Unexpected EXTRACT_VECTOR_ELT legalization");
5823 
5824     // If this is a fixed vector, we need to convert it to a scalable vector.
5825     MVT ContainerVT = VecVT;
5826     if (VecVT.isFixedLengthVector()) {
5827       ContainerVT = getContainerForFixedLengthVector(VecVT);
5828       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5829     }
5830 
5831     MVT XLenVT = Subtarget.getXLenVT();
5832 
5833     // Use a VL of 1 to avoid processing more elements than we need.
5834     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5835     SDValue VL = DAG.getConstant(1, DL, XLenVT);
5836     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5837 
5838     // Unless the index is known to be 0, we must slide the vector down to get
5839     // the desired element into index 0.
5840     if (!isNullConstant(Idx)) {
5841       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
5842                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
5843     }
5844 
5845     // Extract the lower XLEN bits of the correct vector element.
5846     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
5847 
5848     // To extract the upper XLEN bits of the vector element, shift the first
5849     // element right by 32 bits and re-extract the lower XLEN bits.
5850     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
5851                                      DAG.getConstant(32, DL, XLenVT), VL);
5852     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
5853                                  ThirtyTwoV, Mask, VL);
5854 
5855     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
5856 
5857     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
5858     break;
5859   }
5860   case ISD::INTRINSIC_WO_CHAIN: {
5861     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
5862     switch (IntNo) {
5863     default:
5864       llvm_unreachable(
5865           "Don't know how to custom type legalize this intrinsic!");
5866     case Intrinsic::riscv_orc_b: {
5867       // Lower to the GORCI encoding for orc.b with the operand extended.
5868       SDValue NewOp =
5869           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5870       // If Zbp is enabled, use GORCIW which will sign extend the result.
5871       unsigned Opc =
5872           Subtarget.hasStdExtZbp() ? RISCVISD::GORCW : RISCVISD::GORC;
5873       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp,
5874                                 DAG.getConstant(7, DL, MVT::i64));
5875       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5876       return;
5877     }
5878     case Intrinsic::riscv_grev:
5879     case Intrinsic::riscv_gorc: {
5880       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5881              "Unexpected custom legalisation");
5882       SDValue NewOp1 =
5883           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5884       SDValue NewOp2 =
5885           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5886       unsigned Opc =
5887           IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW;
5888       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5889       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5890       break;
5891     }
5892     case Intrinsic::riscv_shfl:
5893     case Intrinsic::riscv_unshfl: {
5894       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5895              "Unexpected custom legalisation");
5896       SDValue NewOp1 =
5897           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5898       SDValue NewOp2 =
5899           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5900       unsigned Opc =
5901           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
5902       if (isa<ConstantSDNode>(N->getOperand(2))) {
5903         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
5904                              DAG.getConstant(0xf, DL, MVT::i64));
5905         Opc =
5906             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
5907       }
5908       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5909       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5910       break;
5911     }
5912     case Intrinsic::riscv_bcompress:
5913     case Intrinsic::riscv_bdecompress: {
5914       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5915              "Unexpected custom legalisation");
5916       SDValue NewOp1 =
5917           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5918       SDValue NewOp2 =
5919           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5920       unsigned Opc = IntNo == Intrinsic::riscv_bcompress
5921                          ? RISCVISD::BCOMPRESSW
5922                          : RISCVISD::BDECOMPRESSW;
5923       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5924       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5925       break;
5926     }
5927     case Intrinsic::riscv_vmv_x_s: {
5928       EVT VT = N->getValueType(0);
5929       MVT XLenVT = Subtarget.getXLenVT();
5930       if (VT.bitsLT(XLenVT)) {
5931         // Simple case just extract using vmv.x.s and truncate.
5932         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
5933                                       Subtarget.getXLenVT(), N->getOperand(1));
5934         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
5935         return;
5936       }
5937 
5938       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
5939              "Unexpected custom legalization");
5940 
5941       // We need to do the move in two steps.
5942       SDValue Vec = N->getOperand(1);
5943       MVT VecVT = Vec.getSimpleValueType();
5944 
5945       // First extract the lower XLEN bits of the element.
5946       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
5947 
5948       // To extract the upper XLEN bits of the vector element, shift the first
5949       // element right by 32 bits and re-extract the lower XLEN bits.
5950       SDValue VL = DAG.getConstant(1, DL, XLenVT);
5951       MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
5952       SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5953       SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT,
5954                                        DAG.getConstant(32, DL, XLenVT), VL);
5955       SDValue LShr32 =
5956           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
5957       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
5958 
5959       Results.push_back(
5960           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
5961       break;
5962     }
5963     }
5964     break;
5965   }
5966   case ISD::VECREDUCE_ADD:
5967   case ISD::VECREDUCE_AND:
5968   case ISD::VECREDUCE_OR:
5969   case ISD::VECREDUCE_XOR:
5970   case ISD::VECREDUCE_SMAX:
5971   case ISD::VECREDUCE_UMAX:
5972   case ISD::VECREDUCE_SMIN:
5973   case ISD::VECREDUCE_UMIN:
5974     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
5975       Results.push_back(V);
5976     break;
5977   case ISD::VP_REDUCE_ADD:
5978   case ISD::VP_REDUCE_AND:
5979   case ISD::VP_REDUCE_OR:
5980   case ISD::VP_REDUCE_XOR:
5981   case ISD::VP_REDUCE_SMAX:
5982   case ISD::VP_REDUCE_UMAX:
5983   case ISD::VP_REDUCE_SMIN:
5984   case ISD::VP_REDUCE_UMIN:
5985     if (SDValue V = lowerVPREDUCE(SDValue(N, 0), DAG))
5986       Results.push_back(V);
5987     break;
5988   case ISD::FLT_ROUNDS_: {
5989     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
5990     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
5991     Results.push_back(Res.getValue(0));
5992     Results.push_back(Res.getValue(1));
5993     break;
5994   }
5995   }
5996 }
5997 
5998 // A structure to hold one of the bit-manipulation patterns below. Together, a
5999 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
6000 //   (or (and (shl x, 1), 0xAAAAAAAA),
6001 //       (and (srl x, 1), 0x55555555))
6002 struct RISCVBitmanipPat {
6003   SDValue Op;
6004   unsigned ShAmt;
6005   bool IsSHL;
6006 
6007   bool formsPairWith(const RISCVBitmanipPat &Other) const {
6008     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
6009   }
6010 };
6011 
6012 // Matches patterns of the form
6013 //   (and (shl x, C2), (C1 << C2))
6014 //   (and (srl x, C2), C1)
6015 //   (shl (and x, C1), C2)
6016 //   (srl (and x, (C1 << C2)), C2)
6017 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
6018 // The expected masks for each shift amount are specified in BitmanipMasks where
6019 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
6020 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
6021 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
6022 // XLen is 64.
6023 static Optional<RISCVBitmanipPat>
6024 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
6025   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
6026          "Unexpected number of masks");
6027   Optional<uint64_t> Mask;
6028   // Optionally consume a mask around the shift operation.
6029   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
6030     Mask = Op.getConstantOperandVal(1);
6031     Op = Op.getOperand(0);
6032   }
6033   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
6034     return None;
6035   bool IsSHL = Op.getOpcode() == ISD::SHL;
6036 
6037   if (!isa<ConstantSDNode>(Op.getOperand(1)))
6038     return None;
6039   uint64_t ShAmt = Op.getConstantOperandVal(1);
6040 
6041   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
6042   if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
6043     return None;
6044   // If we don't have enough masks for 64 bit, then we must be trying to
6045   // match SHFL so we're only allowed to shift 1/4 of the width.
6046   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
6047     return None;
6048 
6049   SDValue Src = Op.getOperand(0);
6050 
6051   // The expected mask is shifted left when the AND is found around SHL
6052   // patterns.
6053   //   ((x >> 1) & 0x55555555)
6054   //   ((x << 1) & 0xAAAAAAAA)
6055   bool SHLExpMask = IsSHL;
6056 
6057   if (!Mask) {
6058     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
6059     // the mask is all ones: consume that now.
6060     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
6061       Mask = Src.getConstantOperandVal(1);
6062       Src = Src.getOperand(0);
6063       // The expected mask is now in fact shifted left for SRL, so reverse the
6064       // decision.
6065       //   ((x & 0xAAAAAAAA) >> 1)
6066       //   ((x & 0x55555555) << 1)
6067       SHLExpMask = !SHLExpMask;
6068     } else {
6069       // Use a default shifted mask of all-ones if there's no AND, truncated
6070       // down to the expected width. This simplifies the logic later on.
6071       Mask = maskTrailingOnes<uint64_t>(Width);
6072       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
6073     }
6074   }
6075 
6076   unsigned MaskIdx = Log2_32(ShAmt);
6077   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
6078 
6079   if (SHLExpMask)
6080     ExpMask <<= ShAmt;
6081 
6082   if (Mask != ExpMask)
6083     return None;
6084 
6085   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
6086 }
6087 
6088 // Matches any of the following bit-manipulation patterns:
6089 //   (and (shl x, 1), (0x55555555 << 1))
6090 //   (and (srl x, 1), 0x55555555)
6091 //   (shl (and x, 0x55555555), 1)
6092 //   (srl (and x, (0x55555555 << 1)), 1)
6093 // where the shift amount and mask may vary thus:
6094 //   [1]  = 0x55555555 / 0xAAAAAAAA
6095 //   [2]  = 0x33333333 / 0xCCCCCCCC
6096 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
6097 //   [8]  = 0x00FF00FF / 0xFF00FF00
6098 //   [16] = 0x0000FFFF / 0xFFFFFFFF
6099 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
6100 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
6101   // These are the unshifted masks which we use to match bit-manipulation
6102   // patterns. They may be shifted left in certain circumstances.
6103   static const uint64_t BitmanipMasks[] = {
6104       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
6105       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
6106 
6107   return matchRISCVBitmanipPat(Op, BitmanipMasks);
6108 }
6109 
6110 // Match the following pattern as a GREVI(W) operation
6111 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
6112 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
6113                                const RISCVSubtarget &Subtarget) {
6114   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
6115   EVT VT = Op.getValueType();
6116 
6117   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
6118     auto LHS = matchGREVIPat(Op.getOperand(0));
6119     auto RHS = matchGREVIPat(Op.getOperand(1));
6120     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
6121       SDLoc DL(Op);
6122       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
6123                          DAG.getConstant(LHS->ShAmt, DL, VT));
6124     }
6125   }
6126   return SDValue();
6127 }
6128 
6129 // Matches any the following pattern as a GORCI(W) operation
6130 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
6131 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
6132 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
6133 // Note that with the variant of 3.,
6134 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
6135 // the inner pattern will first be matched as GREVI and then the outer
6136 // pattern will be matched to GORC via the first rule above.
6137 // 4.  (or (rotl/rotr x, bitwidth/2), x)
6138 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
6139                                const RISCVSubtarget &Subtarget) {
6140   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
6141   EVT VT = Op.getValueType();
6142 
6143   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
6144     SDLoc DL(Op);
6145     SDValue Op0 = Op.getOperand(0);
6146     SDValue Op1 = Op.getOperand(1);
6147 
6148     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
6149       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
6150           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
6151           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
6152         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
6153       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
6154       if ((Reverse.getOpcode() == ISD::ROTL ||
6155            Reverse.getOpcode() == ISD::ROTR) &&
6156           Reverse.getOperand(0) == X &&
6157           isa<ConstantSDNode>(Reverse.getOperand(1))) {
6158         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
6159         if (RotAmt == (VT.getSizeInBits() / 2))
6160           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
6161                              DAG.getConstant(RotAmt, DL, VT));
6162       }
6163       return SDValue();
6164     };
6165 
6166     // Check for either commutable permutation of (or (GREVI x, shamt), x)
6167     if (SDValue V = MatchOROfReverse(Op0, Op1))
6168       return V;
6169     if (SDValue V = MatchOROfReverse(Op1, Op0))
6170       return V;
6171 
6172     // OR is commutable so canonicalize its OR operand to the left
6173     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
6174       std::swap(Op0, Op1);
6175     if (Op0.getOpcode() != ISD::OR)
6176       return SDValue();
6177     SDValue OrOp0 = Op0.getOperand(0);
6178     SDValue OrOp1 = Op0.getOperand(1);
6179     auto LHS = matchGREVIPat(OrOp0);
6180     // OR is commutable so swap the operands and try again: x might have been
6181     // on the left
6182     if (!LHS) {
6183       std::swap(OrOp0, OrOp1);
6184       LHS = matchGREVIPat(OrOp0);
6185     }
6186     auto RHS = matchGREVIPat(Op1);
6187     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
6188       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
6189                          DAG.getConstant(LHS->ShAmt, DL, VT));
6190     }
6191   }
6192   return SDValue();
6193 }
6194 
6195 // Matches any of the following bit-manipulation patterns:
6196 //   (and (shl x, 1), (0x22222222 << 1))
6197 //   (and (srl x, 1), 0x22222222)
6198 //   (shl (and x, 0x22222222), 1)
6199 //   (srl (and x, (0x22222222 << 1)), 1)
6200 // where the shift amount and mask may vary thus:
6201 //   [1]  = 0x22222222 / 0x44444444
6202 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
6203 //   [4]  = 0x00F000F0 / 0x0F000F00
6204 //   [8]  = 0x0000FF00 / 0x00FF0000
6205 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
6206 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
6207   // These are the unshifted masks which we use to match bit-manipulation
6208   // patterns. They may be shifted left in certain circumstances.
6209   static const uint64_t BitmanipMasks[] = {
6210       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
6211       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
6212 
6213   return matchRISCVBitmanipPat(Op, BitmanipMasks);
6214 }
6215 
6216 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
6217 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
6218                                const RISCVSubtarget &Subtarget) {
6219   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
6220   EVT VT = Op.getValueType();
6221 
6222   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
6223     return SDValue();
6224 
6225   SDValue Op0 = Op.getOperand(0);
6226   SDValue Op1 = Op.getOperand(1);
6227 
6228   // Or is commutable so canonicalize the second OR to the LHS.
6229   if (Op0.getOpcode() != ISD::OR)
6230     std::swap(Op0, Op1);
6231   if (Op0.getOpcode() != ISD::OR)
6232     return SDValue();
6233 
6234   // We found an inner OR, so our operands are the operands of the inner OR
6235   // and the other operand of the outer OR.
6236   SDValue A = Op0.getOperand(0);
6237   SDValue B = Op0.getOperand(1);
6238   SDValue C = Op1;
6239 
6240   auto Match1 = matchSHFLPat(A);
6241   auto Match2 = matchSHFLPat(B);
6242 
6243   // If neither matched, we failed.
6244   if (!Match1 && !Match2)
6245     return SDValue();
6246 
6247   // We had at least one match. if one failed, try the remaining C operand.
6248   if (!Match1) {
6249     std::swap(A, C);
6250     Match1 = matchSHFLPat(A);
6251     if (!Match1)
6252       return SDValue();
6253   } else if (!Match2) {
6254     std::swap(B, C);
6255     Match2 = matchSHFLPat(B);
6256     if (!Match2)
6257       return SDValue();
6258   }
6259   assert(Match1 && Match2);
6260 
6261   // Make sure our matches pair up.
6262   if (!Match1->formsPairWith(*Match2))
6263     return SDValue();
6264 
6265   // All the remains is to make sure C is an AND with the same input, that masks
6266   // out the bits that are being shuffled.
6267   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
6268       C.getOperand(0) != Match1->Op)
6269     return SDValue();
6270 
6271   uint64_t Mask = C.getConstantOperandVal(1);
6272 
6273   static const uint64_t BitmanipMasks[] = {
6274       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
6275       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
6276   };
6277 
6278   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
6279   unsigned MaskIdx = Log2_32(Match1->ShAmt);
6280   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
6281 
6282   if (Mask != ExpMask)
6283     return SDValue();
6284 
6285   SDLoc DL(Op);
6286   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
6287                      DAG.getConstant(Match1->ShAmt, DL, VT));
6288 }
6289 
6290 // Optimize (add (shl x, c0), (shl y, c1)) ->
6291 //          (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
6292 static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
6293                                   const RISCVSubtarget &Subtarget) {
6294   // Perform this optimization only in the zba extension.
6295   if (!Subtarget.hasStdExtZba())
6296     return SDValue();
6297 
6298   // Skip for vector types and larger types.
6299   EVT VT = N->getValueType(0);
6300   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
6301     return SDValue();
6302 
6303   // The two operand nodes must be SHL and have no other use.
6304   SDValue N0 = N->getOperand(0);
6305   SDValue N1 = N->getOperand(1);
6306   if (N0->getOpcode() != ISD::SHL || N1->getOpcode() != ISD::SHL ||
6307       !N0->hasOneUse() || !N1->hasOneUse())
6308     return SDValue();
6309 
6310   // Check c0 and c1.
6311   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
6312   auto *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(1));
6313   if (!N0C || !N1C)
6314     return SDValue();
6315   int64_t C0 = N0C->getSExtValue();
6316   int64_t C1 = N1C->getSExtValue();
6317   if (C0 <= 0 || C1 <= 0)
6318     return SDValue();
6319 
6320   // Skip if SH1ADD/SH2ADD/SH3ADD are not applicable.
6321   int64_t Bits = std::min(C0, C1);
6322   int64_t Diff = std::abs(C0 - C1);
6323   if (Diff != 1 && Diff != 2 && Diff != 3)
6324     return SDValue();
6325 
6326   // Build nodes.
6327   SDLoc DL(N);
6328   SDValue NS = (C0 < C1) ? N0->getOperand(0) : N1->getOperand(0);
6329   SDValue NL = (C0 > C1) ? N0->getOperand(0) : N1->getOperand(0);
6330   SDValue NA0 =
6331       DAG.getNode(ISD::SHL, DL, VT, NL, DAG.getConstant(Diff, DL, VT));
6332   SDValue NA1 = DAG.getNode(ISD::ADD, DL, VT, NA0, NS);
6333   return DAG.getNode(ISD::SHL, DL, VT, NA1, DAG.getConstant(Bits, DL, VT));
6334 }
6335 
6336 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
6337 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
6338 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
6339 // not undo itself, but they are redundant.
6340 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
6341   SDValue Src = N->getOperand(0);
6342 
6343   if (Src.getOpcode() != N->getOpcode())
6344     return SDValue();
6345 
6346   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
6347       !isa<ConstantSDNode>(Src.getOperand(1)))
6348     return SDValue();
6349 
6350   unsigned ShAmt1 = N->getConstantOperandVal(1);
6351   unsigned ShAmt2 = Src.getConstantOperandVal(1);
6352   Src = Src.getOperand(0);
6353 
6354   unsigned CombinedShAmt;
6355   if (N->getOpcode() == RISCVISD::GORC || N->getOpcode() == RISCVISD::GORCW)
6356     CombinedShAmt = ShAmt1 | ShAmt2;
6357   else
6358     CombinedShAmt = ShAmt1 ^ ShAmt2;
6359 
6360   if (CombinedShAmt == 0)
6361     return Src;
6362 
6363   SDLoc DL(N);
6364   return DAG.getNode(
6365       N->getOpcode(), DL, N->getValueType(0), Src,
6366       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
6367 }
6368 
6369 // Combine a constant select operand into its use:
6370 //
6371 // (and (select cond, -1, c), x)
6372 //   -> (select cond, x, (and x, c))  [AllOnes=1]
6373 // (or  (select cond, 0, c), x)
6374 //   -> (select cond, x, (or x, c))  [AllOnes=0]
6375 // (xor (select cond, 0, c), x)
6376 //   -> (select cond, x, (xor x, c))  [AllOnes=0]
6377 // (add (select cond, 0, c), x)
6378 //   -> (select cond, x, (add x, c))  [AllOnes=0]
6379 // (sub x, (select cond, 0, c))
6380 //   -> (select cond, x, (sub x, c))  [AllOnes=0]
6381 static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
6382                                    SelectionDAG &DAG, bool AllOnes) {
6383   EVT VT = N->getValueType(0);
6384 
6385   // Skip vectors.
6386   if (VT.isVector())
6387     return SDValue();
6388 
6389   if ((Slct.getOpcode() != ISD::SELECT &&
6390        Slct.getOpcode() != RISCVISD::SELECT_CC) ||
6391       !Slct.hasOneUse())
6392     return SDValue();
6393 
6394   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
6395     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
6396   };
6397 
6398   bool SwapSelectOps;
6399   unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0;
6400   SDValue TrueVal = Slct.getOperand(1 + OpOffset);
6401   SDValue FalseVal = Slct.getOperand(2 + OpOffset);
6402   SDValue NonConstantVal;
6403   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
6404     SwapSelectOps = false;
6405     NonConstantVal = FalseVal;
6406   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
6407     SwapSelectOps = true;
6408     NonConstantVal = TrueVal;
6409   } else
6410     return SDValue();
6411 
6412   // Slct is now know to be the desired identity constant when CC is true.
6413   TrueVal = OtherOp;
6414   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
6415   // Unless SwapSelectOps says the condition should be false.
6416   if (SwapSelectOps)
6417     std::swap(TrueVal, FalseVal);
6418 
6419   if (Slct.getOpcode() == RISCVISD::SELECT_CC)
6420     return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
6421                        {Slct.getOperand(0), Slct.getOperand(1),
6422                         Slct.getOperand(2), TrueVal, FalseVal});
6423 
6424   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
6425                      {Slct.getOperand(0), TrueVal, FalseVal});
6426 }
6427 
6428 // Attempt combineSelectAndUse on each operand of a commutative operator N.
6429 static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
6430                                               bool AllOnes) {
6431   SDValue N0 = N->getOperand(0);
6432   SDValue N1 = N->getOperand(1);
6433   if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes))
6434     return Result;
6435   if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes))
6436     return Result;
6437   return SDValue();
6438 }
6439 
6440 // Transform (add (mul x, c0), c1) ->
6441 //           (add (mul (add x, c1/c0), c0), c1%c0).
6442 // if c1/c0 and c1%c0 are simm12, while c1 is not.
6443 // Or transform (add (mul x, c0), c1) ->
6444 //              (mul (add x, c1/c0), c0).
6445 // if c1%c0 is zero, and c1/c0 is simm12 while c1 is not.
6446 static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG,
6447                                      const RISCVSubtarget &Subtarget) {
6448   // Skip for vector types and larger types.
6449   EVT VT = N->getValueType(0);
6450   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
6451     return SDValue();
6452   // The first operand node must be a MUL and has no other use.
6453   SDValue N0 = N->getOperand(0);
6454   if (!N0->hasOneUse() || N0->getOpcode() != ISD::MUL)
6455     return SDValue();
6456   // Check if c0 and c1 match above conditions.
6457   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
6458   auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
6459   if (!N0C || !N1C)
6460     return SDValue();
6461   int64_t C0 = N0C->getSExtValue();
6462   int64_t C1 = N1C->getSExtValue();
6463   if (C0 == -1 || C0 == 0 || C0 == 1 || (C1 / C0) == 0 || isInt<12>(C1) ||
6464       !isInt<12>(C1 % C0) || !isInt<12>(C1 / C0))
6465     return SDValue();
6466   // If C0 * (C1 / C0) is a 12-bit integer, this transform will be reversed.
6467   if (isInt<12>(C0 * (C1 / C0)))
6468     return SDValue();
6469   // Build new nodes (add (mul (add x, c1/c0), c0), c1%c0).
6470   SDLoc DL(N);
6471   SDValue New0 = DAG.getNode(ISD::ADD, DL, VT, N0->getOperand(0),
6472                              DAG.getConstant(C1 / C0, DL, VT));
6473   SDValue New1 =
6474       DAG.getNode(ISD::MUL, DL, VT, New0, DAG.getConstant(C0, DL, VT));
6475   if ((C1 % C0) == 0)
6476     return New1;
6477   return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(C1 % C0, DL, VT));
6478 }
6479 
6480 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
6481                                  const RISCVSubtarget &Subtarget) {
6482   // Transform (add (mul x, c0), c1) ->
6483   //           (add (mul (add x, c1/c0), c0), c1%c0).
6484   // if c1/c0 and c1%c0 are simm12, while c1 is not.
6485   // Or transform (add (mul x, c0), c1) ->
6486   //              (mul (add x, c1/c0), c0).
6487   // if c1%c0 is zero, and c1/c0 is simm12 while c1 is not.
6488   if (SDValue V = transformAddImmMulImm(N, DAG, Subtarget))
6489     return V;
6490   // Fold (add (shl x, c0), (shl y, c1)) ->
6491   //      (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
6492   if (SDValue V = transformAddShlImm(N, DAG, Subtarget))
6493     return V;
6494   // fold (add (select lhs, rhs, cc, 0, y), x) ->
6495   //      (select lhs, rhs, cc, x, (add x, y))
6496   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
6497 }
6498 
6499 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
6500   // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
6501   //      (select lhs, rhs, cc, x, (sub x, y))
6502   SDValue N0 = N->getOperand(0);
6503   SDValue N1 = N->getOperand(1);
6504   return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false);
6505 }
6506 
6507 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG) {
6508   // fold (and (select lhs, rhs, cc, -1, y), x) ->
6509   //      (select lhs, rhs, cc, x, (and x, y))
6510   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true);
6511 }
6512 
6513 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
6514                                 const RISCVSubtarget &Subtarget) {
6515   if (Subtarget.hasStdExtZbp()) {
6516     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
6517       return GREV;
6518     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
6519       return GORC;
6520     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
6521       return SHFL;
6522   }
6523 
6524   // fold (or (select cond, 0, y), x) ->
6525   //      (select cond, x, (or x, y))
6526   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
6527 }
6528 
6529 static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) {
6530   // fold (xor (select cond, 0, y), x) ->
6531   //      (select cond, x, (xor x, y))
6532   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
6533 }
6534 
6535 // Attempt to turn ANY_EXTEND into SIGN_EXTEND if the input to the ANY_EXTEND
6536 // has users that require SIGN_EXTEND and the SIGN_EXTEND can be done for free
6537 // by an instruction like ADDW/SUBW/MULW. Without this the ANY_EXTEND would be
6538 // removed during type legalization leaving an ADD/SUB/MUL use that won't use
6539 // ADDW/SUBW/MULW.
6540 static SDValue performANY_EXTENDCombine(SDNode *N,
6541                                         TargetLowering::DAGCombinerInfo &DCI,
6542                                         const RISCVSubtarget &Subtarget) {
6543   if (!Subtarget.is64Bit())
6544     return SDValue();
6545 
6546   SelectionDAG &DAG = DCI.DAG;
6547 
6548   SDValue Src = N->getOperand(0);
6549   EVT VT = N->getValueType(0);
6550   if (VT != MVT::i64 || Src.getValueType() != MVT::i32)
6551     return SDValue();
6552 
6553   // The opcode must be one that can implicitly sign_extend.
6554   // FIXME: Additional opcodes.
6555   switch (Src.getOpcode()) {
6556   default:
6557     return SDValue();
6558   case ISD::MUL:
6559     if (!Subtarget.hasStdExtM())
6560       return SDValue();
6561     LLVM_FALLTHROUGH;
6562   case ISD::ADD:
6563   case ISD::SUB:
6564     break;
6565   }
6566 
6567   // Only handle cases where the result is used by a CopyToReg. That likely
6568   // means the value is a liveout of the basic block. This helps prevent
6569   // infinite combine loops like PR51206.
6570   if (none_of(N->uses(),
6571               [](SDNode *User) { return User->getOpcode() == ISD::CopyToReg; }))
6572     return SDValue();
6573 
6574   SmallVector<SDNode *, 4> SetCCs;
6575   for (SDNode::use_iterator UI = Src.getNode()->use_begin(),
6576                             UE = Src.getNode()->use_end();
6577        UI != UE; ++UI) {
6578     SDNode *User = *UI;
6579     if (User == N)
6580       continue;
6581     if (UI.getUse().getResNo() != Src.getResNo())
6582       continue;
6583     // All i32 setccs are legalized by sign extending operands.
6584     if (User->getOpcode() == ISD::SETCC) {
6585       SetCCs.push_back(User);
6586       continue;
6587     }
6588     // We don't know if we can extend this user.
6589     break;
6590   }
6591 
6592   // If we don't have any SetCCs, this isn't worthwhile.
6593   if (SetCCs.empty())
6594     return SDValue();
6595 
6596   SDLoc DL(N);
6597   SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Src);
6598   DCI.CombineTo(N, SExt);
6599 
6600   // Promote all the setccs.
6601   for (SDNode *SetCC : SetCCs) {
6602     SmallVector<SDValue, 4> Ops;
6603 
6604     for (unsigned j = 0; j != 2; ++j) {
6605       SDValue SOp = SetCC->getOperand(j);
6606       if (SOp == Src)
6607         Ops.push_back(SExt);
6608       else
6609         Ops.push_back(DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, SOp));
6610     }
6611 
6612     Ops.push_back(SetCC->getOperand(2));
6613     DCI.CombineTo(SetCC,
6614                   DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops));
6615   }
6616   return SDValue(N, 0);
6617 }
6618 
6619 // Try to form VWMUL or VWMULU.
6620 // FIXME: Support VWMULSU.
6621 static SDValue combineMUL_VLToVWMUL(SDNode *N, SDValue Op0, SDValue Op1,
6622                                     SelectionDAG &DAG) {
6623   assert(N->getOpcode() == RISCVISD::MUL_VL && "Unexpected opcode");
6624   bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
6625   bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
6626   if ((!IsSignExt && !IsZeroExt) || !Op0.hasOneUse())
6627     return SDValue();
6628 
6629   SDValue Mask = N->getOperand(2);
6630   SDValue VL = N->getOperand(3);
6631 
6632   // Make sure the mask and VL match.
6633   if (Op0.getOperand(1) != Mask || Op0.getOperand(2) != VL)
6634     return SDValue();
6635 
6636   MVT VT = N->getSimpleValueType(0);
6637 
6638   // Determine the narrow size for a widening multiply.
6639   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
6640   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
6641                                   VT.getVectorElementCount());
6642 
6643   SDLoc DL(N);
6644 
6645   // See if the other operand is the same opcode.
6646   if (Op0.getOpcode() == Op1.getOpcode()) {
6647     if (!Op1.hasOneUse())
6648       return SDValue();
6649 
6650     // Make sure the mask and VL match.
6651     if (Op1.getOperand(1) != Mask || Op1.getOperand(2) != VL)
6652       return SDValue();
6653 
6654     Op1 = Op1.getOperand(0);
6655   } else if (Op1.getOpcode() == RISCVISD::VMV_V_X_VL) {
6656     // The operand is a splat of a scalar.
6657 
6658     // The VL must be the same.
6659     if (Op1.getOperand(1) != VL)
6660       return SDValue();
6661 
6662     // Get the scalar value.
6663     Op1 = Op1.getOperand(0);
6664 
6665     // See if have enough sign bits or zero bits in the scalar to use a
6666     // widening multiply by splatting to smaller element size.
6667     unsigned EltBits = VT.getScalarSizeInBits();
6668     unsigned ScalarBits = Op1.getValueSizeInBits();
6669     // Make sure we're getting all element bits from the scalar register.
6670     // FIXME: Support implicit sign extension of vmv.v.x?
6671     if (ScalarBits < EltBits)
6672       return SDValue();
6673 
6674     if (IsSignExt) {
6675       if (DAG.ComputeNumSignBits(Op1) <= (ScalarBits - NarrowSize))
6676         return SDValue();
6677     } else {
6678       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
6679       if (!DAG.MaskedValueIsZero(Op1, Mask))
6680         return SDValue();
6681     }
6682 
6683     Op1 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT, Op1, VL);
6684   } else
6685     return SDValue();
6686 
6687   Op0 = Op0.getOperand(0);
6688 
6689   // Re-introduce narrower extends if needed.
6690   unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
6691   if (Op0.getValueType() != NarrowVT)
6692     Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
6693   if (Op1.getValueType() != NarrowVT)
6694     Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
6695 
6696   unsigned WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
6697   return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
6698 }
6699 
6700 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
6701                                                DAGCombinerInfo &DCI) const {
6702   SelectionDAG &DAG = DCI.DAG;
6703 
6704   // Helper to call SimplifyDemandedBits on an operand of N where only some low
6705   // bits are demanded. N will be added to the Worklist if it was not deleted.
6706   // Caller should return SDValue(N, 0) if this returns true.
6707   auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) {
6708     SDValue Op = N->getOperand(OpNo);
6709     APInt Mask = APInt::getLowBitsSet(Op.getValueSizeInBits(), LowBits);
6710     if (!SimplifyDemandedBits(Op, Mask, DCI))
6711       return false;
6712 
6713     if (N->getOpcode() != ISD::DELETED_NODE)
6714       DCI.AddToWorklist(N);
6715     return true;
6716   };
6717 
6718   switch (N->getOpcode()) {
6719   default:
6720     break;
6721   case RISCVISD::SplitF64: {
6722     SDValue Op0 = N->getOperand(0);
6723     // If the input to SplitF64 is just BuildPairF64 then the operation is
6724     // redundant. Instead, use BuildPairF64's operands directly.
6725     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
6726       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
6727 
6728     SDLoc DL(N);
6729 
6730     // It's cheaper to materialise two 32-bit integers than to load a double
6731     // from the constant pool and transfer it to integer registers through the
6732     // stack.
6733     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
6734       APInt V = C->getValueAPF().bitcastToAPInt();
6735       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
6736       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
6737       return DCI.CombineTo(N, Lo, Hi);
6738     }
6739 
6740     // This is a target-specific version of a DAGCombine performed in
6741     // DAGCombiner::visitBITCAST. It performs the equivalent of:
6742     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
6743     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
6744     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
6745         !Op0.getNode()->hasOneUse())
6746       break;
6747     SDValue NewSplitF64 =
6748         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
6749                     Op0.getOperand(0));
6750     SDValue Lo = NewSplitF64.getValue(0);
6751     SDValue Hi = NewSplitF64.getValue(1);
6752     APInt SignBit = APInt::getSignMask(32);
6753     if (Op0.getOpcode() == ISD::FNEG) {
6754       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
6755                                   DAG.getConstant(SignBit, DL, MVT::i32));
6756       return DCI.CombineTo(N, Lo, NewHi);
6757     }
6758     assert(Op0.getOpcode() == ISD::FABS);
6759     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
6760                                 DAG.getConstant(~SignBit, DL, MVT::i32));
6761     return DCI.CombineTo(N, Lo, NewHi);
6762   }
6763   case RISCVISD::SLLW:
6764   case RISCVISD::SRAW:
6765   case RISCVISD::SRLW:
6766   case RISCVISD::ROLW:
6767   case RISCVISD::RORW: {
6768     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
6769     if (SimplifyDemandedLowBitsHelper(0, 32) ||
6770         SimplifyDemandedLowBitsHelper(1, 5))
6771       return SDValue(N, 0);
6772     break;
6773   }
6774   case RISCVISD::CLZW:
6775   case RISCVISD::CTZW: {
6776     // Only the lower 32 bits of the first operand are read
6777     if (SimplifyDemandedLowBitsHelper(0, 32))
6778       return SDValue(N, 0);
6779     break;
6780   }
6781   case RISCVISD::FSL:
6782   case RISCVISD::FSR: {
6783     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
6784     unsigned BitWidth = N->getOperand(2).getValueSizeInBits();
6785     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
6786     if (SimplifyDemandedLowBitsHelper(2, Log2_32(BitWidth) + 1))
6787       return SDValue(N, 0);
6788     break;
6789   }
6790   case RISCVISD::FSLW:
6791   case RISCVISD::FSRW: {
6792     // Only the lower 32 bits of Values and lower 6 bits of shift amount are
6793     // read.
6794     if (SimplifyDemandedLowBitsHelper(0, 32) ||
6795         SimplifyDemandedLowBitsHelper(1, 32) ||
6796         SimplifyDemandedLowBitsHelper(2, 6))
6797       return SDValue(N, 0);
6798     break;
6799   }
6800   case RISCVISD::GREV:
6801   case RISCVISD::GORC: {
6802     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
6803     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
6804     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
6805     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth)))
6806       return SDValue(N, 0);
6807 
6808     return combineGREVI_GORCI(N, DCI.DAG);
6809   }
6810   case RISCVISD::GREVW:
6811   case RISCVISD::GORCW: {
6812     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
6813     if (SimplifyDemandedLowBitsHelper(0, 32) ||
6814         SimplifyDemandedLowBitsHelper(1, 5))
6815       return SDValue(N, 0);
6816 
6817     return combineGREVI_GORCI(N, DCI.DAG);
6818   }
6819   case RISCVISD::SHFL:
6820   case RISCVISD::UNSHFL: {
6821     // Only the lower log2(Bitwidth)-1 bits of the the shift amount are read.
6822     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
6823     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
6824     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) - 1))
6825       return SDValue(N, 0);
6826 
6827     break;
6828   }
6829   case RISCVISD::SHFLW:
6830   case RISCVISD::UNSHFLW: {
6831     // Only the lower 32 bits of LHS and lower 4 bits of RHS are read.
6832     SDValue LHS = N->getOperand(0);
6833     SDValue RHS = N->getOperand(1);
6834     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
6835     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 4);
6836     if (SimplifyDemandedLowBitsHelper(0, 32) ||
6837         SimplifyDemandedLowBitsHelper(1, 4))
6838       return SDValue(N, 0);
6839 
6840     break;
6841   }
6842   case RISCVISD::BCOMPRESSW:
6843   case RISCVISD::BDECOMPRESSW: {
6844     // Only the lower 32 bits of LHS and RHS are read.
6845     if (SimplifyDemandedLowBitsHelper(0, 32) ||
6846         SimplifyDemandedLowBitsHelper(1, 32))
6847       return SDValue(N, 0);
6848 
6849     break;
6850   }
6851   case RISCVISD::FMV_X_ANYEXTH:
6852   case RISCVISD::FMV_X_ANYEXTW_RV64: {
6853     SDLoc DL(N);
6854     SDValue Op0 = N->getOperand(0);
6855     MVT VT = N->getSimpleValueType(0);
6856     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
6857     // conversion is unnecessary and can be replaced with the FMV_W_X_RV64
6858     // operand. Similar for FMV_X_ANYEXTH and FMV_H_X.
6859     if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 &&
6860          Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) ||
6861         (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
6862          Op0->getOpcode() == RISCVISD::FMV_H_X)) {
6863       assert(Op0.getOperand(0).getValueType() == VT &&
6864              "Unexpected value type!");
6865       return Op0.getOperand(0);
6866     }
6867 
6868     // This is a target-specific version of a DAGCombine performed in
6869     // DAGCombiner::visitBITCAST. It performs the equivalent of:
6870     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
6871     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
6872     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
6873         !Op0.getNode()->hasOneUse())
6874       break;
6875     SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0));
6876     unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16;
6877     APInt SignBit = APInt::getSignMask(FPBits).sextOrSelf(VT.getSizeInBits());
6878     if (Op0.getOpcode() == ISD::FNEG)
6879       return DAG.getNode(ISD::XOR, DL, VT, NewFMV,
6880                          DAG.getConstant(SignBit, DL, VT));
6881 
6882     assert(Op0.getOpcode() == ISD::FABS);
6883     return DAG.getNode(ISD::AND, DL, VT, NewFMV,
6884                        DAG.getConstant(~SignBit, DL, VT));
6885   }
6886   case ISD::ADD:
6887     return performADDCombine(N, DAG, Subtarget);
6888   case ISD::SUB:
6889     return performSUBCombine(N, DAG);
6890   case ISD::AND:
6891     return performANDCombine(N, DAG);
6892   case ISD::OR:
6893     return performORCombine(N, DAG, Subtarget);
6894   case ISD::XOR:
6895     return performXORCombine(N, DAG);
6896   case ISD::ANY_EXTEND:
6897     return performANY_EXTENDCombine(N, DCI, Subtarget);
6898   case ISD::ZERO_EXTEND:
6899     // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during
6900     // type legalization. This is safe because fp_to_uint produces poison if
6901     // it overflows.
6902     if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit() &&
6903         N->getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
6904         isTypeLegal(N->getOperand(0).getOperand(0).getValueType()))
6905       return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64,
6906                          N->getOperand(0).getOperand(0));
6907     return SDValue();
6908   case RISCVISD::SELECT_CC: {
6909     // Transform
6910     SDValue LHS = N->getOperand(0);
6911     SDValue RHS = N->getOperand(1);
6912     SDValue TrueV = N->getOperand(3);
6913     SDValue FalseV = N->getOperand(4);
6914 
6915     // If the True and False values are the same, we don't need a select_cc.
6916     if (TrueV == FalseV)
6917       return TrueV;
6918 
6919     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(2))->get();
6920     if (!ISD::isIntEqualitySetCC(CCVal))
6921       break;
6922 
6923     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
6924     //      (select_cc X, Y, lt, trueV, falseV)
6925     // Sometimes the setcc is introduced after select_cc has been formed.
6926     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
6927         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
6928       // If we're looking for eq 0 instead of ne 0, we need to invert the
6929       // condition.
6930       bool Invert = CCVal == ISD::SETEQ;
6931       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
6932       if (Invert)
6933         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6934 
6935       SDLoc DL(N);
6936       RHS = LHS.getOperand(1);
6937       LHS = LHS.getOperand(0);
6938       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
6939 
6940       SDValue TargetCC = DAG.getCondCode(CCVal);
6941       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
6942                          {LHS, RHS, TargetCC, TrueV, FalseV});
6943     }
6944 
6945     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
6946     //      (select_cc X, Y, eq/ne, trueV, falseV)
6947     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
6948       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
6949                          {LHS.getOperand(0), LHS.getOperand(1),
6950                           N->getOperand(2), TrueV, FalseV});
6951     // (select_cc X, 1, setne, trueV, falseV) ->
6952     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
6953     // This can occur when legalizing some floating point comparisons.
6954     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
6955     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
6956       SDLoc DL(N);
6957       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6958       SDValue TargetCC = DAG.getCondCode(CCVal);
6959       RHS = DAG.getConstant(0, DL, LHS.getValueType());
6960       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
6961                          {LHS, RHS, TargetCC, TrueV, FalseV});
6962     }
6963 
6964     break;
6965   }
6966   case RISCVISD::BR_CC: {
6967     SDValue LHS = N->getOperand(1);
6968     SDValue RHS = N->getOperand(2);
6969     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
6970     if (!ISD::isIntEqualitySetCC(CCVal))
6971       break;
6972 
6973     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
6974     //      (br_cc X, Y, lt, dest)
6975     // Sometimes the setcc is introduced after br_cc has been formed.
6976     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
6977         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
6978       // If we're looking for eq 0 instead of ne 0, we need to invert the
6979       // condition.
6980       bool Invert = CCVal == ISD::SETEQ;
6981       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
6982       if (Invert)
6983         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6984 
6985       SDLoc DL(N);
6986       RHS = LHS.getOperand(1);
6987       LHS = LHS.getOperand(0);
6988       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
6989 
6990       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
6991                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
6992                          N->getOperand(4));
6993     }
6994 
6995     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
6996     //      (br_cc X, Y, eq/ne, trueV, falseV)
6997     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
6998       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
6999                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
7000                          N->getOperand(3), N->getOperand(4));
7001 
7002     // (br_cc X, 1, setne, br_cc) ->
7003     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
7004     // This can occur when legalizing some floating point comparisons.
7005     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
7006     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
7007       SDLoc DL(N);
7008       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
7009       SDValue TargetCC = DAG.getCondCode(CCVal);
7010       RHS = DAG.getConstant(0, DL, LHS.getValueType());
7011       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
7012                          N->getOperand(0), LHS, RHS, TargetCC,
7013                          N->getOperand(4));
7014     }
7015     break;
7016   }
7017   case ISD::FCOPYSIGN: {
7018     EVT VT = N->getValueType(0);
7019     if (!VT.isVector())
7020       break;
7021     // There is a form of VFSGNJ which injects the negated sign of its second
7022     // operand. Try and bubble any FNEG up after the extend/round to produce
7023     // this optimized pattern. Avoid modifying cases where FP_ROUND and
7024     // TRUNC=1.
7025     SDValue In2 = N->getOperand(1);
7026     // Avoid cases where the extend/round has multiple uses, as duplicating
7027     // those is typically more expensive than removing a fneg.
7028     if (!In2.hasOneUse())
7029       break;
7030     if (In2.getOpcode() != ISD::FP_EXTEND &&
7031         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
7032       break;
7033     In2 = In2.getOperand(0);
7034     if (In2.getOpcode() != ISD::FNEG)
7035       break;
7036     SDLoc DL(N);
7037     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
7038     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
7039                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
7040   }
7041   case ISD::MGATHER:
7042   case ISD::MSCATTER:
7043   case ISD::VP_GATHER:
7044   case ISD::VP_SCATTER: {
7045     if (!DCI.isBeforeLegalize())
7046       break;
7047     SDValue Index, ScaleOp;
7048     bool IsIndexScaled = false;
7049     bool IsIndexSigned = false;
7050     if (const auto *VPGSN = dyn_cast<VPGatherScatterSDNode>(N)) {
7051       Index = VPGSN->getIndex();
7052       ScaleOp = VPGSN->getScale();
7053       IsIndexScaled = VPGSN->isIndexScaled();
7054       IsIndexSigned = VPGSN->isIndexSigned();
7055     } else {
7056       const auto *MGSN = cast<MaskedGatherScatterSDNode>(N);
7057       Index = MGSN->getIndex();
7058       ScaleOp = MGSN->getScale();
7059       IsIndexScaled = MGSN->isIndexScaled();
7060       IsIndexSigned = MGSN->isIndexSigned();
7061     }
7062     EVT IndexVT = Index.getValueType();
7063     MVT XLenVT = Subtarget.getXLenVT();
7064     // RISCV indexed loads only support the "unsigned unscaled" addressing
7065     // mode, so anything else must be manually legalized.
7066     bool NeedsIdxLegalization =
7067         IsIndexScaled ||
7068         (IsIndexSigned && IndexVT.getVectorElementType().bitsLT(XLenVT));
7069     if (!NeedsIdxLegalization)
7070       break;
7071 
7072     SDLoc DL(N);
7073 
7074     // Any index legalization should first promote to XLenVT, so we don't lose
7075     // bits when scaling. This may create an illegal index type so we let
7076     // LLVM's legalization take care of the splitting.
7077     // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet.
7078     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
7079       IndexVT = IndexVT.changeVectorElementType(XLenVT);
7080       Index = DAG.getNode(IsIndexSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
7081                           DL, IndexVT, Index);
7082     }
7083 
7084     unsigned Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
7085     if (IsIndexScaled && Scale != 1) {
7086       // Manually scale the indices by the element size.
7087       // TODO: Sanitize the scale operand here?
7088       // TODO: For VP nodes, should we use VP_SHL here?
7089       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
7090       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
7091       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
7092     }
7093 
7094     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
7095     if (const auto *VPGN = dyn_cast<VPGatherSDNode>(N))
7096       return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL,
7097                              {VPGN->getChain(), VPGN->getBasePtr(), Index,
7098                               VPGN->getScale(), VPGN->getMask(),
7099                               VPGN->getVectorLength()},
7100                              VPGN->getMemOperand(), NewIndexTy);
7101     if (const auto *VPSN = dyn_cast<VPScatterSDNode>(N))
7102       return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL,
7103                               {VPSN->getChain(), VPSN->getValue(),
7104                                VPSN->getBasePtr(), Index, VPSN->getScale(),
7105                                VPSN->getMask(), VPSN->getVectorLength()},
7106                               VPSN->getMemOperand(), NewIndexTy);
7107     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N))
7108       return DAG.getMaskedGather(
7109           N->getVTList(), MGN->getMemoryVT(), DL,
7110           {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
7111            MGN->getBasePtr(), Index, MGN->getScale()},
7112           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
7113     const auto *MSN = cast<MaskedScatterSDNode>(N);
7114     return DAG.getMaskedScatter(
7115         N->getVTList(), MSN->getMemoryVT(), DL,
7116         {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
7117          Index, MSN->getScale()},
7118         MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
7119   }
7120   case RISCVISD::SRA_VL:
7121   case RISCVISD::SRL_VL:
7122   case RISCVISD::SHL_VL: {
7123     SDValue ShAmt = N->getOperand(1);
7124     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
7125       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
7126       SDLoc DL(N);
7127       SDValue VL = N->getOperand(3);
7128       EVT VT = N->getValueType(0);
7129       ShAmt =
7130           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, ShAmt.getOperand(0), VL);
7131       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
7132                          N->getOperand(2), N->getOperand(3));
7133     }
7134     break;
7135   }
7136   case ISD::SRA:
7137   case ISD::SRL:
7138   case ISD::SHL: {
7139     SDValue ShAmt = N->getOperand(1);
7140     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
7141       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
7142       SDLoc DL(N);
7143       EVT VT = N->getValueType(0);
7144       ShAmt =
7145           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VT, ShAmt.getOperand(0));
7146       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
7147     }
7148     break;
7149   }
7150   case RISCVISD::MUL_VL: {
7151     SDValue Op0 = N->getOperand(0);
7152     SDValue Op1 = N->getOperand(1);
7153     if (SDValue V = combineMUL_VLToVWMUL(N, Op0, Op1, DAG))
7154       return V;
7155     if (SDValue V = combineMUL_VLToVWMUL(N, Op1, Op0, DAG))
7156       return V;
7157     return SDValue();
7158   }
7159   case ISD::STORE: {
7160     auto *Store = cast<StoreSDNode>(N);
7161     SDValue Val = Store->getValue();
7162     // Combine store of vmv.x.s to vse with VL of 1.
7163     // FIXME: Support FP.
7164     if (Val.getOpcode() == RISCVISD::VMV_X_S) {
7165       SDValue Src = Val.getOperand(0);
7166       EVT VecVT = Src.getValueType();
7167       EVT MemVT = Store->getMemoryVT();
7168       // The memory VT and the element type must match.
7169       if (VecVT.getVectorElementType() == MemVT) {
7170         SDLoc DL(N);
7171         MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
7172         return DAG.getStoreVP(Store->getChain(), DL, Src, Store->getBasePtr(),
7173                               DAG.getConstant(1, DL, MaskVT),
7174                               DAG.getConstant(1, DL, Subtarget.getXLenVT()),
7175                               Store->getPointerInfo(),
7176                               Store->getOriginalAlign(),
7177                               Store->getMemOperand()->getFlags());
7178       }
7179     }
7180 
7181     break;
7182   }
7183   }
7184 
7185   return SDValue();
7186 }
7187 
7188 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
7189     const SDNode *N, CombineLevel Level) const {
7190   // The following folds are only desirable if `(OP _, c1 << c2)` can be
7191   // materialised in fewer instructions than `(OP _, c1)`:
7192   //
7193   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
7194   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
7195   SDValue N0 = N->getOperand(0);
7196   EVT Ty = N0.getValueType();
7197   if (Ty.isScalarInteger() &&
7198       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
7199     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7200     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
7201     if (C1 && C2) {
7202       const APInt &C1Int = C1->getAPIntValue();
7203       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
7204 
7205       // We can materialise `c1 << c2` into an add immediate, so it's "free",
7206       // and the combine should happen, to potentially allow further combines
7207       // later.
7208       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
7209           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
7210         return true;
7211 
7212       // We can materialise `c1` in an add immediate, so it's "free", and the
7213       // combine should be prevented.
7214       if (C1Int.getMinSignedBits() <= 64 &&
7215           isLegalAddImmediate(C1Int.getSExtValue()))
7216         return false;
7217 
7218       // Neither constant will fit into an immediate, so find materialisation
7219       // costs.
7220       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
7221                                               Subtarget.getFeatureBits(),
7222                                               /*CompressionCost*/true);
7223       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
7224           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(),
7225           /*CompressionCost*/true);
7226 
7227       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
7228       // combine should be prevented.
7229       if (C1Cost < ShiftedC1Cost)
7230         return false;
7231     }
7232   }
7233   return true;
7234 }
7235 
7236 bool RISCVTargetLowering::targetShrinkDemandedConstant(
7237     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
7238     TargetLoweringOpt &TLO) const {
7239   // Delay this optimization as late as possible.
7240   if (!TLO.LegalOps)
7241     return false;
7242 
7243   EVT VT = Op.getValueType();
7244   if (VT.isVector())
7245     return false;
7246 
7247   // Only handle AND for now.
7248   if (Op.getOpcode() != ISD::AND)
7249     return false;
7250 
7251   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
7252   if (!C)
7253     return false;
7254 
7255   const APInt &Mask = C->getAPIntValue();
7256 
7257   // Clear all non-demanded bits initially.
7258   APInt ShrunkMask = Mask & DemandedBits;
7259 
7260   // Try to make a smaller immediate by setting undemanded bits.
7261 
7262   APInt ExpandedMask = Mask | ~DemandedBits;
7263 
7264   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
7265     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
7266   };
7267   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
7268     if (NewMask == Mask)
7269       return true;
7270     SDLoc DL(Op);
7271     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
7272     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
7273     return TLO.CombineTo(Op, NewOp);
7274   };
7275 
7276   // If the shrunk mask fits in sign extended 12 bits, let the target
7277   // independent code apply it.
7278   if (ShrunkMask.isSignedIntN(12))
7279     return false;
7280 
7281   // Preserve (and X, 0xffff) when zext.h is supported.
7282   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
7283     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
7284     if (IsLegalMask(NewMask))
7285       return UseMask(NewMask);
7286   }
7287 
7288   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
7289   if (VT == MVT::i64) {
7290     APInt NewMask = APInt(64, 0xffffffff);
7291     if (IsLegalMask(NewMask))
7292       return UseMask(NewMask);
7293   }
7294 
7295   // For the remaining optimizations, we need to be able to make a negative
7296   // number through a combination of mask and undemanded bits.
7297   if (!ExpandedMask.isNegative())
7298     return false;
7299 
7300   // What is the fewest number of bits we need to represent the negative number.
7301   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
7302 
7303   // Try to make a 12 bit negative immediate. If that fails try to make a 32
7304   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
7305   APInt NewMask = ShrunkMask;
7306   if (MinSignedBits <= 12)
7307     NewMask.setBitsFrom(11);
7308   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
7309     NewMask.setBitsFrom(31);
7310   else
7311     return false;
7312 
7313   // Sanity check that our new mask is a subset of the demanded mask.
7314   assert(IsLegalMask(NewMask));
7315   return UseMask(NewMask);
7316 }
7317 
7318 static void computeGREV(APInt &Src, unsigned ShAmt) {
7319   ShAmt &= Src.getBitWidth() - 1;
7320   uint64_t x = Src.getZExtValue();
7321   if (ShAmt & 1)
7322     x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1);
7323   if (ShAmt & 2)
7324     x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2);
7325   if (ShAmt & 4)
7326     x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4);
7327   if (ShAmt & 8)
7328     x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8);
7329   if (ShAmt & 16)
7330     x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16);
7331   if (ShAmt & 32)
7332     x = ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32);
7333   Src = x;
7334 }
7335 
7336 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
7337                                                         KnownBits &Known,
7338                                                         const APInt &DemandedElts,
7339                                                         const SelectionDAG &DAG,
7340                                                         unsigned Depth) const {
7341   unsigned BitWidth = Known.getBitWidth();
7342   unsigned Opc = Op.getOpcode();
7343   assert((Opc >= ISD::BUILTIN_OP_END ||
7344           Opc == ISD::INTRINSIC_WO_CHAIN ||
7345           Opc == ISD::INTRINSIC_W_CHAIN ||
7346           Opc == ISD::INTRINSIC_VOID) &&
7347          "Should use MaskedValueIsZero if you don't know whether Op"
7348          " is a target node!");
7349 
7350   Known.resetAll();
7351   switch (Opc) {
7352   default: break;
7353   case RISCVISD::SELECT_CC: {
7354     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
7355     // If we don't know any bits, early out.
7356     if (Known.isUnknown())
7357       break;
7358     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
7359 
7360     // Only known if known in both the LHS and RHS.
7361     Known = KnownBits::commonBits(Known, Known2);
7362     break;
7363   }
7364   case RISCVISD::REMUW: {
7365     KnownBits Known2;
7366     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
7367     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
7368     // We only care about the lower 32 bits.
7369     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
7370     // Restore the original width by sign extending.
7371     Known = Known.sext(BitWidth);
7372     break;
7373   }
7374   case RISCVISD::DIVUW: {
7375     KnownBits Known2;
7376     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
7377     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
7378     // We only care about the lower 32 bits.
7379     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
7380     // Restore the original width by sign extending.
7381     Known = Known.sext(BitWidth);
7382     break;
7383   }
7384   case RISCVISD::CTZW: {
7385     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
7386     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
7387     unsigned LowBits = Log2_32(PossibleTZ) + 1;
7388     Known.Zero.setBitsFrom(LowBits);
7389     break;
7390   }
7391   case RISCVISD::CLZW: {
7392     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
7393     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
7394     unsigned LowBits = Log2_32(PossibleLZ) + 1;
7395     Known.Zero.setBitsFrom(LowBits);
7396     break;
7397   }
7398   case RISCVISD::GREV:
7399   case RISCVISD::GREVW: {
7400     if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
7401       Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
7402       if (Opc == RISCVISD::GREVW)
7403         Known = Known.trunc(32);
7404       unsigned ShAmt = C->getZExtValue();
7405       computeGREV(Known.Zero, ShAmt);
7406       computeGREV(Known.One, ShAmt);
7407       if (Opc == RISCVISD::GREVW)
7408         Known = Known.sext(BitWidth);
7409     }
7410     break;
7411   }
7412   case RISCVISD::READ_VLENB:
7413     // We assume VLENB is at least 16 bytes.
7414     Known.Zero.setLowBits(4);
7415     // We assume VLENB is no more than 65536 / 8 bytes.
7416     Known.Zero.setBitsFrom(14);
7417     break;
7418   case ISD::INTRINSIC_W_CHAIN: {
7419     unsigned IntNo = Op.getConstantOperandVal(1);
7420     switch (IntNo) {
7421     default:
7422       // We can't do anything for most intrinsics.
7423       break;
7424     case Intrinsic::riscv_vsetvli:
7425     case Intrinsic::riscv_vsetvlimax:
7426       // Assume that VL output is positive and would fit in an int32_t.
7427       // TODO: VLEN might be capped at 16 bits in a future V spec update.
7428       if (BitWidth >= 32)
7429         Known.Zero.setBitsFrom(31);
7430       break;
7431     }
7432     break;
7433   }
7434   }
7435 }
7436 
7437 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
7438     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
7439     unsigned Depth) const {
7440   switch (Op.getOpcode()) {
7441   default:
7442     break;
7443   case RISCVISD::SELECT_CC: {
7444     unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1);
7445     if (Tmp == 1) return 1;  // Early out.
7446     unsigned Tmp2 = DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1);
7447     return std::min(Tmp, Tmp2);
7448   }
7449   case RISCVISD::SLLW:
7450   case RISCVISD::SRAW:
7451   case RISCVISD::SRLW:
7452   case RISCVISD::DIVW:
7453   case RISCVISD::DIVUW:
7454   case RISCVISD::REMUW:
7455   case RISCVISD::ROLW:
7456   case RISCVISD::RORW:
7457   case RISCVISD::GREVW:
7458   case RISCVISD::GORCW:
7459   case RISCVISD::FSLW:
7460   case RISCVISD::FSRW:
7461   case RISCVISD::SHFLW:
7462   case RISCVISD::UNSHFLW:
7463   case RISCVISD::BCOMPRESSW:
7464   case RISCVISD::BDECOMPRESSW:
7465   case RISCVISD::FCVT_W_RTZ_RV64:
7466   case RISCVISD::FCVT_WU_RTZ_RV64:
7467     // TODO: As the result is sign-extended, this is conservatively correct. A
7468     // more precise answer could be calculated for SRAW depending on known
7469     // bits in the shift amount.
7470     return 33;
7471   case RISCVISD::SHFL:
7472   case RISCVISD::UNSHFL: {
7473     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
7474     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
7475     // will stay within the upper 32 bits. If there were more than 32 sign bits
7476     // before there will be at least 33 sign bits after.
7477     if (Op.getValueType() == MVT::i64 &&
7478         isa<ConstantSDNode>(Op.getOperand(1)) &&
7479         (Op.getConstantOperandVal(1) & 0x10) == 0) {
7480       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
7481       if (Tmp > 32)
7482         return 33;
7483     }
7484     break;
7485   }
7486   case RISCVISD::VMV_X_S:
7487     // The number of sign bits of the scalar result is computed by obtaining the
7488     // element type of the input vector operand, subtracting its width from the
7489     // XLEN, and then adding one (sign bit within the element type). If the
7490     // element type is wider than XLen, the least-significant XLEN bits are
7491     // taken.
7492     if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen())
7493       return 1;
7494     return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1;
7495   }
7496 
7497   return 1;
7498 }
7499 
7500 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
7501                                                   MachineBasicBlock *BB) {
7502   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
7503 
7504   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
7505   // Should the count have wrapped while it was being read, we need to try
7506   // again.
7507   // ...
7508   // read:
7509   // rdcycleh x3 # load high word of cycle
7510   // rdcycle  x2 # load low word of cycle
7511   // rdcycleh x4 # load high word of cycle
7512   // bne x3, x4, read # check if high word reads match, otherwise try again
7513   // ...
7514 
7515   MachineFunction &MF = *BB->getParent();
7516   const BasicBlock *LLVM_BB = BB->getBasicBlock();
7517   MachineFunction::iterator It = ++BB->getIterator();
7518 
7519   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
7520   MF.insert(It, LoopMBB);
7521 
7522   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
7523   MF.insert(It, DoneMBB);
7524 
7525   // Transfer the remainder of BB and its successor edges to DoneMBB.
7526   DoneMBB->splice(DoneMBB->begin(), BB,
7527                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
7528   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
7529 
7530   BB->addSuccessor(LoopMBB);
7531 
7532   MachineRegisterInfo &RegInfo = MF.getRegInfo();
7533   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
7534   Register LoReg = MI.getOperand(0).getReg();
7535   Register HiReg = MI.getOperand(1).getReg();
7536   DebugLoc DL = MI.getDebugLoc();
7537 
7538   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
7539   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
7540       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
7541       .addReg(RISCV::X0);
7542   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
7543       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
7544       .addReg(RISCV::X0);
7545   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
7546       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
7547       .addReg(RISCV::X0);
7548 
7549   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
7550       .addReg(HiReg)
7551       .addReg(ReadAgainReg)
7552       .addMBB(LoopMBB);
7553 
7554   LoopMBB->addSuccessor(LoopMBB);
7555   LoopMBB->addSuccessor(DoneMBB);
7556 
7557   MI.eraseFromParent();
7558 
7559   return DoneMBB;
7560 }
7561 
7562 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
7563                                              MachineBasicBlock *BB) {
7564   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
7565 
7566   MachineFunction &MF = *BB->getParent();
7567   DebugLoc DL = MI.getDebugLoc();
7568   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
7569   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
7570   Register LoReg = MI.getOperand(0).getReg();
7571   Register HiReg = MI.getOperand(1).getReg();
7572   Register SrcReg = MI.getOperand(2).getReg();
7573   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
7574   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
7575 
7576   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
7577                           RI);
7578   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
7579   MachineMemOperand *MMOLo =
7580       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
7581   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
7582       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
7583   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
7584       .addFrameIndex(FI)
7585       .addImm(0)
7586       .addMemOperand(MMOLo);
7587   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
7588       .addFrameIndex(FI)
7589       .addImm(4)
7590       .addMemOperand(MMOHi);
7591   MI.eraseFromParent(); // The pseudo instruction is gone now.
7592   return BB;
7593 }
7594 
7595 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
7596                                                  MachineBasicBlock *BB) {
7597   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
7598          "Unexpected instruction");
7599 
7600   MachineFunction &MF = *BB->getParent();
7601   DebugLoc DL = MI.getDebugLoc();
7602   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
7603   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
7604   Register DstReg = MI.getOperand(0).getReg();
7605   Register LoReg = MI.getOperand(1).getReg();
7606   Register HiReg = MI.getOperand(2).getReg();
7607   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
7608   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
7609 
7610   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
7611   MachineMemOperand *MMOLo =
7612       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
7613   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
7614       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
7615   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
7616       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
7617       .addFrameIndex(FI)
7618       .addImm(0)
7619       .addMemOperand(MMOLo);
7620   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
7621       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
7622       .addFrameIndex(FI)
7623       .addImm(4)
7624       .addMemOperand(MMOHi);
7625   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
7626   MI.eraseFromParent(); // The pseudo instruction is gone now.
7627   return BB;
7628 }
7629 
7630 static bool isSelectPseudo(MachineInstr &MI) {
7631   switch (MI.getOpcode()) {
7632   default:
7633     return false;
7634   case RISCV::Select_GPR_Using_CC_GPR:
7635   case RISCV::Select_FPR16_Using_CC_GPR:
7636   case RISCV::Select_FPR32_Using_CC_GPR:
7637   case RISCV::Select_FPR64_Using_CC_GPR:
7638     return true;
7639   }
7640 }
7641 
7642 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
7643                                            MachineBasicBlock *BB,
7644                                            const RISCVSubtarget &Subtarget) {
7645   // To "insert" Select_* instructions, we actually have to insert the triangle
7646   // control-flow pattern.  The incoming instructions know the destination vreg
7647   // to set, the condition code register to branch on, the true/false values to
7648   // select between, and the condcode to use to select the appropriate branch.
7649   //
7650   // We produce the following control flow:
7651   //     HeadMBB
7652   //     |  \
7653   //     |  IfFalseMBB
7654   //     | /
7655   //    TailMBB
7656   //
7657   // When we find a sequence of selects we attempt to optimize their emission
7658   // by sharing the control flow. Currently we only handle cases where we have
7659   // multiple selects with the exact same condition (same LHS, RHS and CC).
7660   // The selects may be interleaved with other instructions if the other
7661   // instructions meet some requirements we deem safe:
7662   // - They are debug instructions. Otherwise,
7663   // - They do not have side-effects, do not access memory and their inputs do
7664   //   not depend on the results of the select pseudo-instructions.
7665   // The TrueV/FalseV operands of the selects cannot depend on the result of
7666   // previous selects in the sequence.
7667   // These conditions could be further relaxed. See the X86 target for a
7668   // related approach and more information.
7669   Register LHS = MI.getOperand(1).getReg();
7670   Register RHS = MI.getOperand(2).getReg();
7671   auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
7672 
7673   SmallVector<MachineInstr *, 4> SelectDebugValues;
7674   SmallSet<Register, 4> SelectDests;
7675   SelectDests.insert(MI.getOperand(0).getReg());
7676 
7677   MachineInstr *LastSelectPseudo = &MI;
7678 
7679   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
7680        SequenceMBBI != E; ++SequenceMBBI) {
7681     if (SequenceMBBI->isDebugInstr())
7682       continue;
7683     else if (isSelectPseudo(*SequenceMBBI)) {
7684       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
7685           SequenceMBBI->getOperand(2).getReg() != RHS ||
7686           SequenceMBBI->getOperand(3).getImm() != CC ||
7687           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
7688           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
7689         break;
7690       LastSelectPseudo = &*SequenceMBBI;
7691       SequenceMBBI->collectDebugValues(SelectDebugValues);
7692       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
7693     } else {
7694       if (SequenceMBBI->hasUnmodeledSideEffects() ||
7695           SequenceMBBI->mayLoadOrStore())
7696         break;
7697       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
7698             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
7699           }))
7700         break;
7701     }
7702   }
7703 
7704   const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
7705   const BasicBlock *LLVM_BB = BB->getBasicBlock();
7706   DebugLoc DL = MI.getDebugLoc();
7707   MachineFunction::iterator I = ++BB->getIterator();
7708 
7709   MachineBasicBlock *HeadMBB = BB;
7710   MachineFunction *F = BB->getParent();
7711   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
7712   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
7713 
7714   F->insert(I, IfFalseMBB);
7715   F->insert(I, TailMBB);
7716 
7717   // Transfer debug instructions associated with the selects to TailMBB.
7718   for (MachineInstr *DebugInstr : SelectDebugValues) {
7719     TailMBB->push_back(DebugInstr->removeFromParent());
7720   }
7721 
7722   // Move all instructions after the sequence to TailMBB.
7723   TailMBB->splice(TailMBB->end(), HeadMBB,
7724                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
7725   // Update machine-CFG edges by transferring all successors of the current
7726   // block to the new block which will contain the Phi nodes for the selects.
7727   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
7728   // Set the successors for HeadMBB.
7729   HeadMBB->addSuccessor(IfFalseMBB);
7730   HeadMBB->addSuccessor(TailMBB);
7731 
7732   // Insert appropriate branch.
7733   BuildMI(HeadMBB, DL, TII.getBrCond(CC))
7734     .addReg(LHS)
7735     .addReg(RHS)
7736     .addMBB(TailMBB);
7737 
7738   // IfFalseMBB just falls through to TailMBB.
7739   IfFalseMBB->addSuccessor(TailMBB);
7740 
7741   // Create PHIs for all of the select pseudo-instructions.
7742   auto SelectMBBI = MI.getIterator();
7743   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
7744   auto InsertionPoint = TailMBB->begin();
7745   while (SelectMBBI != SelectEnd) {
7746     auto Next = std::next(SelectMBBI);
7747     if (isSelectPseudo(*SelectMBBI)) {
7748       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
7749       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
7750               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
7751           .addReg(SelectMBBI->getOperand(4).getReg())
7752           .addMBB(HeadMBB)
7753           .addReg(SelectMBBI->getOperand(5).getReg())
7754           .addMBB(IfFalseMBB);
7755       SelectMBBI->eraseFromParent();
7756     }
7757     SelectMBBI = Next;
7758   }
7759 
7760   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
7761   return TailMBB;
7762 }
7763 
7764 MachineBasicBlock *
7765 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
7766                                                  MachineBasicBlock *BB) const {
7767   switch (MI.getOpcode()) {
7768   default:
7769     llvm_unreachable("Unexpected instr type to insert");
7770   case RISCV::ReadCycleWide:
7771     assert(!Subtarget.is64Bit() &&
7772            "ReadCycleWrite is only to be used on riscv32");
7773     return emitReadCycleWidePseudo(MI, BB);
7774   case RISCV::Select_GPR_Using_CC_GPR:
7775   case RISCV::Select_FPR16_Using_CC_GPR:
7776   case RISCV::Select_FPR32_Using_CC_GPR:
7777   case RISCV::Select_FPR64_Using_CC_GPR:
7778     return emitSelectPseudo(MI, BB, Subtarget);
7779   case RISCV::BuildPairF64Pseudo:
7780     return emitBuildPairF64Pseudo(MI, BB);
7781   case RISCV::SplitF64Pseudo:
7782     return emitSplitF64Pseudo(MI, BB);
7783   }
7784 }
7785 
7786 // Calling Convention Implementation.
7787 // The expectations for frontend ABI lowering vary from target to target.
7788 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
7789 // details, but this is a longer term goal. For now, we simply try to keep the
7790 // role of the frontend as simple and well-defined as possible. The rules can
7791 // be summarised as:
7792 // * Never split up large scalar arguments. We handle them here.
7793 // * If a hardfloat calling convention is being used, and the struct may be
7794 // passed in a pair of registers (fp+fp, int+fp), and both registers are
7795 // available, then pass as two separate arguments. If either the GPRs or FPRs
7796 // are exhausted, then pass according to the rule below.
7797 // * If a struct could never be passed in registers or directly in a stack
7798 // slot (as it is larger than 2*XLEN and the floating point rules don't
7799 // apply), then pass it using a pointer with the byval attribute.
7800 // * If a struct is less than 2*XLEN, then coerce to either a two-element
7801 // word-sized array or a 2*XLEN scalar (depending on alignment).
7802 // * The frontend can determine whether a struct is returned by reference or
7803 // not based on its size and fields. If it will be returned by reference, the
7804 // frontend must modify the prototype so a pointer with the sret annotation is
7805 // passed as the first argument. This is not necessary for large scalar
7806 // returns.
7807 // * Struct return values and varargs should be coerced to structs containing
7808 // register-size fields in the same situations they would be for fixed
7809 // arguments.
7810 
7811 static const MCPhysReg ArgGPRs[] = {
7812   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
7813   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
7814 };
7815 static const MCPhysReg ArgFPR16s[] = {
7816   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
7817   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
7818 };
7819 static const MCPhysReg ArgFPR32s[] = {
7820   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
7821   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
7822 };
7823 static const MCPhysReg ArgFPR64s[] = {
7824   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
7825   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
7826 };
7827 // This is an interim calling convention and it may be changed in the future.
7828 static const MCPhysReg ArgVRs[] = {
7829     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
7830     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
7831     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
7832 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
7833                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
7834                                      RISCV::V20M2, RISCV::V22M2};
7835 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
7836                                      RISCV::V20M4};
7837 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
7838 
7839 // Pass a 2*XLEN argument that has been split into two XLEN values through
7840 // registers or the stack as necessary.
7841 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
7842                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
7843                                 MVT ValVT2, MVT LocVT2,
7844                                 ISD::ArgFlagsTy ArgFlags2) {
7845   unsigned XLenInBytes = XLen / 8;
7846   if (Register Reg = State.AllocateReg(ArgGPRs)) {
7847     // At least one half can be passed via register.
7848     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
7849                                      VA1.getLocVT(), CCValAssign::Full));
7850   } else {
7851     // Both halves must be passed on the stack, with proper alignment.
7852     Align StackAlign =
7853         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
7854     State.addLoc(
7855         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
7856                             State.AllocateStack(XLenInBytes, StackAlign),
7857                             VA1.getLocVT(), CCValAssign::Full));
7858     State.addLoc(CCValAssign::getMem(
7859         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
7860         LocVT2, CCValAssign::Full));
7861     return false;
7862   }
7863 
7864   if (Register Reg = State.AllocateReg(ArgGPRs)) {
7865     // The second half can also be passed via register.
7866     State.addLoc(
7867         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
7868   } else {
7869     // The second half is passed via the stack, without additional alignment.
7870     State.addLoc(CCValAssign::getMem(
7871         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
7872         LocVT2, CCValAssign::Full));
7873   }
7874 
7875   return false;
7876 }
7877 
7878 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
7879                                Optional<unsigned> FirstMaskArgument,
7880                                CCState &State, const RISCVTargetLowering &TLI) {
7881   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
7882   if (RC == &RISCV::VRRegClass) {
7883     // Assign the first mask argument to V0.
7884     // This is an interim calling convention and it may be changed in the
7885     // future.
7886     if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
7887       return State.AllocateReg(RISCV::V0);
7888     return State.AllocateReg(ArgVRs);
7889   }
7890   if (RC == &RISCV::VRM2RegClass)
7891     return State.AllocateReg(ArgVRM2s);
7892   if (RC == &RISCV::VRM4RegClass)
7893     return State.AllocateReg(ArgVRM4s);
7894   if (RC == &RISCV::VRM8RegClass)
7895     return State.AllocateReg(ArgVRM8s);
7896   llvm_unreachable("Unhandled register class for ValueType");
7897 }
7898 
7899 // Implements the RISC-V calling convention. Returns true upon failure.
7900 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
7901                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
7902                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
7903                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
7904                      Optional<unsigned> FirstMaskArgument) {
7905   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
7906   assert(XLen == 32 || XLen == 64);
7907   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
7908 
7909   // Any return value split in to more than two values can't be returned
7910   // directly. Vectors are returned via the available vector registers.
7911   if (!LocVT.isVector() && IsRet && ValNo > 1)
7912     return true;
7913 
7914   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
7915   // variadic argument, or if no F16/F32 argument registers are available.
7916   bool UseGPRForF16_F32 = true;
7917   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
7918   // variadic argument, or if no F64 argument registers are available.
7919   bool UseGPRForF64 = true;
7920 
7921   switch (ABI) {
7922   default:
7923     llvm_unreachable("Unexpected ABI");
7924   case RISCVABI::ABI_ILP32:
7925   case RISCVABI::ABI_LP64:
7926     break;
7927   case RISCVABI::ABI_ILP32F:
7928   case RISCVABI::ABI_LP64F:
7929     UseGPRForF16_F32 = !IsFixed;
7930     break;
7931   case RISCVABI::ABI_ILP32D:
7932   case RISCVABI::ABI_LP64D:
7933     UseGPRForF16_F32 = !IsFixed;
7934     UseGPRForF64 = !IsFixed;
7935     break;
7936   }
7937 
7938   // FPR16, FPR32, and FPR64 alias each other.
7939   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
7940     UseGPRForF16_F32 = true;
7941     UseGPRForF64 = true;
7942   }
7943 
7944   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
7945   // similar local variables rather than directly checking against the target
7946   // ABI.
7947 
7948   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
7949     LocVT = XLenVT;
7950     LocInfo = CCValAssign::BCvt;
7951   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
7952     LocVT = MVT::i64;
7953     LocInfo = CCValAssign::BCvt;
7954   }
7955 
7956   // If this is a variadic argument, the RISC-V calling convention requires
7957   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
7958   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
7959   // be used regardless of whether the original argument was split during
7960   // legalisation or not. The argument will not be passed by registers if the
7961   // original type is larger than 2*XLEN, so the register alignment rule does
7962   // not apply.
7963   unsigned TwoXLenInBytes = (2 * XLen) / 8;
7964   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
7965       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
7966     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
7967     // Skip 'odd' register if necessary.
7968     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
7969       State.AllocateReg(ArgGPRs);
7970   }
7971 
7972   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
7973   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
7974       State.getPendingArgFlags();
7975 
7976   assert(PendingLocs.size() == PendingArgFlags.size() &&
7977          "PendingLocs and PendingArgFlags out of sync");
7978 
7979   // Handle passing f64 on RV32D with a soft float ABI or when floating point
7980   // registers are exhausted.
7981   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
7982     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
7983            "Can't lower f64 if it is split");
7984     // Depending on available argument GPRS, f64 may be passed in a pair of
7985     // GPRs, split between a GPR and the stack, or passed completely on the
7986     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
7987     // cases.
7988     Register Reg = State.AllocateReg(ArgGPRs);
7989     LocVT = MVT::i32;
7990     if (!Reg) {
7991       unsigned StackOffset = State.AllocateStack(8, Align(8));
7992       State.addLoc(
7993           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
7994       return false;
7995     }
7996     if (!State.AllocateReg(ArgGPRs))
7997       State.AllocateStack(4, Align(4));
7998     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7999     return false;
8000   }
8001 
8002   // Fixed-length vectors are located in the corresponding scalable-vector
8003   // container types.
8004   if (ValVT.isFixedLengthVector())
8005     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
8006 
8007   // Split arguments might be passed indirectly, so keep track of the pending
8008   // values. Split vectors are passed via a mix of registers and indirectly, so
8009   // treat them as we would any other argument.
8010   if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
8011     LocVT = XLenVT;
8012     LocInfo = CCValAssign::Indirect;
8013     PendingLocs.push_back(
8014         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
8015     PendingArgFlags.push_back(ArgFlags);
8016     if (!ArgFlags.isSplitEnd()) {
8017       return false;
8018     }
8019   }
8020 
8021   // If the split argument only had two elements, it should be passed directly
8022   // in registers or on the stack.
8023   if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
8024       PendingLocs.size() <= 2) {
8025     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
8026     // Apply the normal calling convention rules to the first half of the
8027     // split argument.
8028     CCValAssign VA = PendingLocs[0];
8029     ISD::ArgFlagsTy AF = PendingArgFlags[0];
8030     PendingLocs.clear();
8031     PendingArgFlags.clear();
8032     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
8033                                ArgFlags);
8034   }
8035 
8036   // Allocate to a register if possible, or else a stack slot.
8037   Register Reg;
8038   unsigned StoreSizeBytes = XLen / 8;
8039   Align StackAlign = Align(XLen / 8);
8040 
8041   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
8042     Reg = State.AllocateReg(ArgFPR16s);
8043   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
8044     Reg = State.AllocateReg(ArgFPR32s);
8045   else if (ValVT == MVT::f64 && !UseGPRForF64)
8046     Reg = State.AllocateReg(ArgFPR64s);
8047   else if (ValVT.isVector()) {
8048     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
8049     if (!Reg) {
8050       // For return values, the vector must be passed fully via registers or
8051       // via the stack.
8052       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
8053       // but we're using all of them.
8054       if (IsRet)
8055         return true;
8056       // Try using a GPR to pass the address
8057       if ((Reg = State.AllocateReg(ArgGPRs))) {
8058         LocVT = XLenVT;
8059         LocInfo = CCValAssign::Indirect;
8060       } else if (ValVT.isScalableVector()) {
8061         report_fatal_error("Unable to pass scalable vector types on the stack");
8062       } else {
8063         // Pass fixed-length vectors on the stack.
8064         LocVT = ValVT;
8065         StoreSizeBytes = ValVT.getStoreSize();
8066         // Align vectors to their element sizes, being careful for vXi1
8067         // vectors.
8068         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
8069       }
8070     }
8071   } else {
8072     Reg = State.AllocateReg(ArgGPRs);
8073   }
8074 
8075   unsigned StackOffset =
8076       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
8077 
8078   // If we reach this point and PendingLocs is non-empty, we must be at the
8079   // end of a split argument that must be passed indirectly.
8080   if (!PendingLocs.empty()) {
8081     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
8082     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
8083 
8084     for (auto &It : PendingLocs) {
8085       if (Reg)
8086         It.convertToReg(Reg);
8087       else
8088         It.convertToMem(StackOffset);
8089       State.addLoc(It);
8090     }
8091     PendingLocs.clear();
8092     PendingArgFlags.clear();
8093     return false;
8094   }
8095 
8096   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
8097           (TLI.getSubtarget().hasVInstructions() && ValVT.isVector())) &&
8098          "Expected an XLenVT or vector types at this stage");
8099 
8100   if (Reg) {
8101     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8102     return false;
8103   }
8104 
8105   // When a floating-point value is passed on the stack, no bit-conversion is
8106   // needed.
8107   if (ValVT.isFloatingPoint()) {
8108     LocVT = ValVT;
8109     LocInfo = CCValAssign::Full;
8110   }
8111   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
8112   return false;
8113 }
8114 
8115 template <typename ArgTy>
8116 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
8117   for (const auto &ArgIdx : enumerate(Args)) {
8118     MVT ArgVT = ArgIdx.value().VT;
8119     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
8120       return ArgIdx.index();
8121   }
8122   return None;
8123 }
8124 
8125 void RISCVTargetLowering::analyzeInputArgs(
8126     MachineFunction &MF, CCState &CCInfo,
8127     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
8128     RISCVCCAssignFn Fn) const {
8129   unsigned NumArgs = Ins.size();
8130   FunctionType *FType = MF.getFunction().getFunctionType();
8131 
8132   Optional<unsigned> FirstMaskArgument;
8133   if (Subtarget.hasVInstructions())
8134     FirstMaskArgument = preAssignMask(Ins);
8135 
8136   for (unsigned i = 0; i != NumArgs; ++i) {
8137     MVT ArgVT = Ins[i].VT;
8138     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
8139 
8140     Type *ArgTy = nullptr;
8141     if (IsRet)
8142       ArgTy = FType->getReturnType();
8143     else if (Ins[i].isOrigArg())
8144       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
8145 
8146     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
8147     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
8148            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
8149            FirstMaskArgument)) {
8150       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
8151                         << EVT(ArgVT).getEVTString() << '\n');
8152       llvm_unreachable(nullptr);
8153     }
8154   }
8155 }
8156 
8157 void RISCVTargetLowering::analyzeOutputArgs(
8158     MachineFunction &MF, CCState &CCInfo,
8159     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
8160     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
8161   unsigned NumArgs = Outs.size();
8162 
8163   Optional<unsigned> FirstMaskArgument;
8164   if (Subtarget.hasVInstructions())
8165     FirstMaskArgument = preAssignMask(Outs);
8166 
8167   for (unsigned i = 0; i != NumArgs; i++) {
8168     MVT ArgVT = Outs[i].VT;
8169     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
8170     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
8171 
8172     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
8173     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
8174            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
8175            FirstMaskArgument)) {
8176       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
8177                         << EVT(ArgVT).getEVTString() << "\n");
8178       llvm_unreachable(nullptr);
8179     }
8180   }
8181 }
8182 
8183 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
8184 // values.
8185 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
8186                                    const CCValAssign &VA, const SDLoc &DL,
8187                                    const RISCVSubtarget &Subtarget) {
8188   switch (VA.getLocInfo()) {
8189   default:
8190     llvm_unreachable("Unexpected CCValAssign::LocInfo");
8191   case CCValAssign::Full:
8192     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
8193       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
8194     break;
8195   case CCValAssign::BCvt:
8196     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
8197       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
8198     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
8199       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
8200     else
8201       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
8202     break;
8203   }
8204   return Val;
8205 }
8206 
8207 // The caller is responsible for loading the full value if the argument is
8208 // passed with CCValAssign::Indirect.
8209 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
8210                                 const CCValAssign &VA, const SDLoc &DL,
8211                                 const RISCVTargetLowering &TLI) {
8212   MachineFunction &MF = DAG.getMachineFunction();
8213   MachineRegisterInfo &RegInfo = MF.getRegInfo();
8214   EVT LocVT = VA.getLocVT();
8215   SDValue Val;
8216   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
8217   Register VReg = RegInfo.createVirtualRegister(RC);
8218   RegInfo.addLiveIn(VA.getLocReg(), VReg);
8219   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
8220 
8221   if (VA.getLocInfo() == CCValAssign::Indirect)
8222     return Val;
8223 
8224   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
8225 }
8226 
8227 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
8228                                    const CCValAssign &VA, const SDLoc &DL,
8229                                    const RISCVSubtarget &Subtarget) {
8230   EVT LocVT = VA.getLocVT();
8231 
8232   switch (VA.getLocInfo()) {
8233   default:
8234     llvm_unreachable("Unexpected CCValAssign::LocInfo");
8235   case CCValAssign::Full:
8236     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
8237       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
8238     break;
8239   case CCValAssign::BCvt:
8240     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
8241       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
8242     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
8243       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
8244     else
8245       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
8246     break;
8247   }
8248   return Val;
8249 }
8250 
8251 // The caller is responsible for loading the full value if the argument is
8252 // passed with CCValAssign::Indirect.
8253 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
8254                                 const CCValAssign &VA, const SDLoc &DL) {
8255   MachineFunction &MF = DAG.getMachineFunction();
8256   MachineFrameInfo &MFI = MF.getFrameInfo();
8257   EVT LocVT = VA.getLocVT();
8258   EVT ValVT = VA.getValVT();
8259   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
8260   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
8261                                  /*Immutable=*/true);
8262   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
8263   SDValue Val;
8264 
8265   ISD::LoadExtType ExtType;
8266   switch (VA.getLocInfo()) {
8267   default:
8268     llvm_unreachable("Unexpected CCValAssign::LocInfo");
8269   case CCValAssign::Full:
8270   case CCValAssign::Indirect:
8271   case CCValAssign::BCvt:
8272     ExtType = ISD::NON_EXTLOAD;
8273     break;
8274   }
8275   Val = DAG.getExtLoad(
8276       ExtType, DL, LocVT, Chain, FIN,
8277       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
8278   return Val;
8279 }
8280 
8281 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
8282                                        const CCValAssign &VA, const SDLoc &DL) {
8283   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
8284          "Unexpected VA");
8285   MachineFunction &MF = DAG.getMachineFunction();
8286   MachineFrameInfo &MFI = MF.getFrameInfo();
8287   MachineRegisterInfo &RegInfo = MF.getRegInfo();
8288 
8289   if (VA.isMemLoc()) {
8290     // f64 is passed on the stack.
8291     int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
8292     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
8293     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
8294                        MachinePointerInfo::getFixedStack(MF, FI));
8295   }
8296 
8297   assert(VA.isRegLoc() && "Expected register VA assignment");
8298 
8299   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
8300   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
8301   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
8302   SDValue Hi;
8303   if (VA.getLocReg() == RISCV::X17) {
8304     // Second half of f64 is passed on the stack.
8305     int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
8306     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
8307     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
8308                      MachinePointerInfo::getFixedStack(MF, FI));
8309   } else {
8310     // Second half of f64 is passed in another GPR.
8311     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
8312     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
8313     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
8314   }
8315   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
8316 }
8317 
8318 // FastCC has less than 1% performance improvement for some particular
8319 // benchmark. But theoretically, it may has benenfit for some cases.
8320 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
8321                             unsigned ValNo, MVT ValVT, MVT LocVT,
8322                             CCValAssign::LocInfo LocInfo,
8323                             ISD::ArgFlagsTy ArgFlags, CCState &State,
8324                             bool IsFixed, bool IsRet, Type *OrigTy,
8325                             const RISCVTargetLowering &TLI,
8326                             Optional<unsigned> FirstMaskArgument) {
8327 
8328   // X5 and X6 might be used for save-restore libcall.
8329   static const MCPhysReg GPRList[] = {
8330       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
8331       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
8332       RISCV::X29, RISCV::X30, RISCV::X31};
8333 
8334   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
8335     if (unsigned Reg = State.AllocateReg(GPRList)) {
8336       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8337       return false;
8338     }
8339   }
8340 
8341   if (LocVT == MVT::f16) {
8342     static const MCPhysReg FPR16List[] = {
8343         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
8344         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
8345         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
8346         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
8347     if (unsigned Reg = State.AllocateReg(FPR16List)) {
8348       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8349       return false;
8350     }
8351   }
8352 
8353   if (LocVT == MVT::f32) {
8354     static const MCPhysReg FPR32List[] = {
8355         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
8356         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
8357         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
8358         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
8359     if (unsigned Reg = State.AllocateReg(FPR32List)) {
8360       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8361       return false;
8362     }
8363   }
8364 
8365   if (LocVT == MVT::f64) {
8366     static const MCPhysReg FPR64List[] = {
8367         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
8368         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
8369         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
8370         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
8371     if (unsigned Reg = State.AllocateReg(FPR64List)) {
8372       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8373       return false;
8374     }
8375   }
8376 
8377   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
8378     unsigned Offset4 = State.AllocateStack(4, Align(4));
8379     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
8380     return false;
8381   }
8382 
8383   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
8384     unsigned Offset5 = State.AllocateStack(8, Align(8));
8385     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
8386     return false;
8387   }
8388 
8389   if (LocVT.isVector()) {
8390     if (unsigned Reg =
8391             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
8392       // Fixed-length vectors are located in the corresponding scalable-vector
8393       // container types.
8394       if (ValVT.isFixedLengthVector())
8395         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
8396       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8397     } else {
8398       // Try and pass the address via a "fast" GPR.
8399       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
8400         LocInfo = CCValAssign::Indirect;
8401         LocVT = TLI.getSubtarget().getXLenVT();
8402         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
8403       } else if (ValVT.isFixedLengthVector()) {
8404         auto StackAlign =
8405             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
8406         unsigned StackOffset =
8407             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
8408         State.addLoc(
8409             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
8410       } else {
8411         // Can't pass scalable vectors on the stack.
8412         return true;
8413       }
8414     }
8415 
8416     return false;
8417   }
8418 
8419   return true; // CC didn't match.
8420 }
8421 
8422 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
8423                          CCValAssign::LocInfo LocInfo,
8424                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
8425 
8426   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
8427     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
8428     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
8429     static const MCPhysReg GPRList[] = {
8430         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
8431         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
8432     if (unsigned Reg = State.AllocateReg(GPRList)) {
8433       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8434       return false;
8435     }
8436   }
8437 
8438   if (LocVT == MVT::f32) {
8439     // Pass in STG registers: F1, ..., F6
8440     //                        fs0 ... fs5
8441     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
8442                                           RISCV::F18_F, RISCV::F19_F,
8443                                           RISCV::F20_F, RISCV::F21_F};
8444     if (unsigned Reg = State.AllocateReg(FPR32List)) {
8445       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8446       return false;
8447     }
8448   }
8449 
8450   if (LocVT == MVT::f64) {
8451     // Pass in STG registers: D1, ..., D6
8452     //                        fs6 ... fs11
8453     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
8454                                           RISCV::F24_D, RISCV::F25_D,
8455                                           RISCV::F26_D, RISCV::F27_D};
8456     if (unsigned Reg = State.AllocateReg(FPR64List)) {
8457       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8458       return false;
8459     }
8460   }
8461 
8462   report_fatal_error("No registers left in GHC calling convention");
8463   return true;
8464 }
8465 
8466 // Transform physical registers into virtual registers.
8467 SDValue RISCVTargetLowering::LowerFormalArguments(
8468     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
8469     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
8470     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
8471 
8472   MachineFunction &MF = DAG.getMachineFunction();
8473 
8474   switch (CallConv) {
8475   default:
8476     report_fatal_error("Unsupported calling convention");
8477   case CallingConv::C:
8478   case CallingConv::Fast:
8479     break;
8480   case CallingConv::GHC:
8481     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
8482         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
8483       report_fatal_error(
8484         "GHC calling convention requires the F and D instruction set extensions");
8485   }
8486 
8487   const Function &Func = MF.getFunction();
8488   if (Func.hasFnAttribute("interrupt")) {
8489     if (!Func.arg_empty())
8490       report_fatal_error(
8491         "Functions with the interrupt attribute cannot have arguments!");
8492 
8493     StringRef Kind =
8494       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
8495 
8496     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
8497       report_fatal_error(
8498         "Function interrupt attribute argument not supported!");
8499   }
8500 
8501   EVT PtrVT = getPointerTy(DAG.getDataLayout());
8502   MVT XLenVT = Subtarget.getXLenVT();
8503   unsigned XLenInBytes = Subtarget.getXLen() / 8;
8504   // Used with vargs to acumulate store chains.
8505   std::vector<SDValue> OutChains;
8506 
8507   // Assign locations to all of the incoming arguments.
8508   SmallVector<CCValAssign, 16> ArgLocs;
8509   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
8510 
8511   if (CallConv == CallingConv::GHC)
8512     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
8513   else
8514     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
8515                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
8516                                                    : CC_RISCV);
8517 
8518   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
8519     CCValAssign &VA = ArgLocs[i];
8520     SDValue ArgValue;
8521     // Passing f64 on RV32D with a soft float ABI must be handled as a special
8522     // case.
8523     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
8524       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
8525     else if (VA.isRegLoc())
8526       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
8527     else
8528       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
8529 
8530     if (VA.getLocInfo() == CCValAssign::Indirect) {
8531       // If the original argument was split and passed by reference (e.g. i128
8532       // on RV32), we need to load all parts of it here (using the same
8533       // address). Vectors may be partly split to registers and partly to the
8534       // stack, in which case the base address is partly offset and subsequent
8535       // stores are relative to that.
8536       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
8537                                    MachinePointerInfo()));
8538       unsigned ArgIndex = Ins[i].OrigArgIndex;
8539       unsigned ArgPartOffset = Ins[i].PartOffset;
8540       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
8541       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
8542         CCValAssign &PartVA = ArgLocs[i + 1];
8543         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
8544         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
8545         if (PartVA.getValVT().isScalableVector())
8546           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
8547         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
8548         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
8549                                      MachinePointerInfo()));
8550         ++i;
8551       }
8552       continue;
8553     }
8554     InVals.push_back(ArgValue);
8555   }
8556 
8557   if (IsVarArg) {
8558     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
8559     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
8560     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
8561     MachineFrameInfo &MFI = MF.getFrameInfo();
8562     MachineRegisterInfo &RegInfo = MF.getRegInfo();
8563     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
8564 
8565     // Offset of the first variable argument from stack pointer, and size of
8566     // the vararg save area. For now, the varargs save area is either zero or
8567     // large enough to hold a0-a7.
8568     int VaArgOffset, VarArgsSaveSize;
8569 
8570     // If all registers are allocated, then all varargs must be passed on the
8571     // stack and we don't need to save any argregs.
8572     if (ArgRegs.size() == Idx) {
8573       VaArgOffset = CCInfo.getNextStackOffset();
8574       VarArgsSaveSize = 0;
8575     } else {
8576       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
8577       VaArgOffset = -VarArgsSaveSize;
8578     }
8579 
8580     // Record the frame index of the first variable argument
8581     // which is a value necessary to VASTART.
8582     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
8583     RVFI->setVarArgsFrameIndex(FI);
8584 
8585     // If saving an odd number of registers then create an extra stack slot to
8586     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
8587     // offsets to even-numbered registered remain 2*XLEN-aligned.
8588     if (Idx % 2) {
8589       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
8590       VarArgsSaveSize += XLenInBytes;
8591     }
8592 
8593     // Copy the integer registers that may have been used for passing varargs
8594     // to the vararg save area.
8595     for (unsigned I = Idx; I < ArgRegs.size();
8596          ++I, VaArgOffset += XLenInBytes) {
8597       const Register Reg = RegInfo.createVirtualRegister(RC);
8598       RegInfo.addLiveIn(ArgRegs[I], Reg);
8599       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
8600       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
8601       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
8602       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
8603                                    MachinePointerInfo::getFixedStack(MF, FI));
8604       cast<StoreSDNode>(Store.getNode())
8605           ->getMemOperand()
8606           ->setValue((Value *)nullptr);
8607       OutChains.push_back(Store);
8608     }
8609     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
8610   }
8611 
8612   // All stores are grouped in one node to allow the matching between
8613   // the size of Ins and InVals. This only happens for vararg functions.
8614   if (!OutChains.empty()) {
8615     OutChains.push_back(Chain);
8616     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
8617   }
8618 
8619   return Chain;
8620 }
8621 
8622 /// isEligibleForTailCallOptimization - Check whether the call is eligible
8623 /// for tail call optimization.
8624 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
8625 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
8626     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
8627     const SmallVector<CCValAssign, 16> &ArgLocs) const {
8628 
8629   auto &Callee = CLI.Callee;
8630   auto CalleeCC = CLI.CallConv;
8631   auto &Outs = CLI.Outs;
8632   auto &Caller = MF.getFunction();
8633   auto CallerCC = Caller.getCallingConv();
8634 
8635   // Exception-handling functions need a special set of instructions to
8636   // indicate a return to the hardware. Tail-calling another function would
8637   // probably break this.
8638   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
8639   // should be expanded as new function attributes are introduced.
8640   if (Caller.hasFnAttribute("interrupt"))
8641     return false;
8642 
8643   // Do not tail call opt if the stack is used to pass parameters.
8644   if (CCInfo.getNextStackOffset() != 0)
8645     return false;
8646 
8647   // Do not tail call opt if any parameters need to be passed indirectly.
8648   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
8649   // passed indirectly. So the address of the value will be passed in a
8650   // register, or if not available, then the address is put on the stack. In
8651   // order to pass indirectly, space on the stack often needs to be allocated
8652   // in order to store the value. In this case the CCInfo.getNextStackOffset()
8653   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
8654   // are passed CCValAssign::Indirect.
8655   for (auto &VA : ArgLocs)
8656     if (VA.getLocInfo() == CCValAssign::Indirect)
8657       return false;
8658 
8659   // Do not tail call opt if either caller or callee uses struct return
8660   // semantics.
8661   auto IsCallerStructRet = Caller.hasStructRetAttr();
8662   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
8663   if (IsCallerStructRet || IsCalleeStructRet)
8664     return false;
8665 
8666   // Externally-defined functions with weak linkage should not be
8667   // tail-called. The behaviour of branch instructions in this situation (as
8668   // used for tail calls) is implementation-defined, so we cannot rely on the
8669   // linker replacing the tail call with a return.
8670   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
8671     const GlobalValue *GV = G->getGlobal();
8672     if (GV->hasExternalWeakLinkage())
8673       return false;
8674   }
8675 
8676   // The callee has to preserve all registers the caller needs to preserve.
8677   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
8678   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
8679   if (CalleeCC != CallerCC) {
8680     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
8681     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
8682       return false;
8683   }
8684 
8685   // Byval parameters hand the function a pointer directly into the stack area
8686   // we want to reuse during a tail call. Working around this *is* possible
8687   // but less efficient and uglier in LowerCall.
8688   for (auto &Arg : Outs)
8689     if (Arg.Flags.isByVal())
8690       return false;
8691 
8692   return true;
8693 }
8694 
8695 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
8696   return DAG.getDataLayout().getPrefTypeAlign(
8697       VT.getTypeForEVT(*DAG.getContext()));
8698 }
8699 
8700 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
8701 // and output parameter nodes.
8702 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
8703                                        SmallVectorImpl<SDValue> &InVals) const {
8704   SelectionDAG &DAG = CLI.DAG;
8705   SDLoc &DL = CLI.DL;
8706   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
8707   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
8708   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
8709   SDValue Chain = CLI.Chain;
8710   SDValue Callee = CLI.Callee;
8711   bool &IsTailCall = CLI.IsTailCall;
8712   CallingConv::ID CallConv = CLI.CallConv;
8713   bool IsVarArg = CLI.IsVarArg;
8714   EVT PtrVT = getPointerTy(DAG.getDataLayout());
8715   MVT XLenVT = Subtarget.getXLenVT();
8716 
8717   MachineFunction &MF = DAG.getMachineFunction();
8718 
8719   // Analyze the operands of the call, assigning locations to each operand.
8720   SmallVector<CCValAssign, 16> ArgLocs;
8721   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
8722 
8723   if (CallConv == CallingConv::GHC)
8724     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
8725   else
8726     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
8727                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
8728                                                     : CC_RISCV);
8729 
8730   // Check if it's really possible to do a tail call.
8731   if (IsTailCall)
8732     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
8733 
8734   if (IsTailCall)
8735     ++NumTailCalls;
8736   else if (CLI.CB && CLI.CB->isMustTailCall())
8737     report_fatal_error("failed to perform tail call elimination on a call "
8738                        "site marked musttail");
8739 
8740   // Get a count of how many bytes are to be pushed on the stack.
8741   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
8742 
8743   // Create local copies for byval args
8744   SmallVector<SDValue, 8> ByValArgs;
8745   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
8746     ISD::ArgFlagsTy Flags = Outs[i].Flags;
8747     if (!Flags.isByVal())
8748       continue;
8749 
8750     SDValue Arg = OutVals[i];
8751     unsigned Size = Flags.getByValSize();
8752     Align Alignment = Flags.getNonZeroByValAlign();
8753 
8754     int FI =
8755         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
8756     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
8757     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
8758 
8759     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
8760                           /*IsVolatile=*/false,
8761                           /*AlwaysInline=*/false, IsTailCall,
8762                           MachinePointerInfo(), MachinePointerInfo());
8763     ByValArgs.push_back(FIPtr);
8764   }
8765 
8766   if (!IsTailCall)
8767     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
8768 
8769   // Copy argument values to their designated locations.
8770   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
8771   SmallVector<SDValue, 8> MemOpChains;
8772   SDValue StackPtr;
8773   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
8774     CCValAssign &VA = ArgLocs[i];
8775     SDValue ArgValue = OutVals[i];
8776     ISD::ArgFlagsTy Flags = Outs[i].Flags;
8777 
8778     // Handle passing f64 on RV32D with a soft float ABI as a special case.
8779     bool IsF64OnRV32DSoftABI =
8780         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
8781     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
8782       SDValue SplitF64 = DAG.getNode(
8783           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
8784       SDValue Lo = SplitF64.getValue(0);
8785       SDValue Hi = SplitF64.getValue(1);
8786 
8787       Register RegLo = VA.getLocReg();
8788       RegsToPass.push_back(std::make_pair(RegLo, Lo));
8789 
8790       if (RegLo == RISCV::X17) {
8791         // Second half of f64 is passed on the stack.
8792         // Work out the address of the stack slot.
8793         if (!StackPtr.getNode())
8794           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
8795         // Emit the store.
8796         MemOpChains.push_back(
8797             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
8798       } else {
8799         // Second half of f64 is passed in another GPR.
8800         assert(RegLo < RISCV::X31 && "Invalid register pair");
8801         Register RegHigh = RegLo + 1;
8802         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
8803       }
8804       continue;
8805     }
8806 
8807     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
8808     // as any other MemLoc.
8809 
8810     // Promote the value if needed.
8811     // For now, only handle fully promoted and indirect arguments.
8812     if (VA.getLocInfo() == CCValAssign::Indirect) {
8813       // Store the argument in a stack slot and pass its address.
8814       Align StackAlign =
8815           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
8816                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
8817       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
8818       // If the original argument was split (e.g. i128), we need
8819       // to store the required parts of it here (and pass just one address).
8820       // Vectors may be partly split to registers and partly to the stack, in
8821       // which case the base address is partly offset and subsequent stores are
8822       // relative to that.
8823       unsigned ArgIndex = Outs[i].OrigArgIndex;
8824       unsigned ArgPartOffset = Outs[i].PartOffset;
8825       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
8826       // Calculate the total size to store. We don't have access to what we're
8827       // actually storing other than performing the loop and collecting the
8828       // info.
8829       SmallVector<std::pair<SDValue, SDValue>> Parts;
8830       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
8831         SDValue PartValue = OutVals[i + 1];
8832         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
8833         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
8834         EVT PartVT = PartValue.getValueType();
8835         if (PartVT.isScalableVector())
8836           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
8837         StoredSize += PartVT.getStoreSize();
8838         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
8839         Parts.push_back(std::make_pair(PartValue, Offset));
8840         ++i;
8841       }
8842       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
8843       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
8844       MemOpChains.push_back(
8845           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
8846                        MachinePointerInfo::getFixedStack(MF, FI)));
8847       for (const auto &Part : Parts) {
8848         SDValue PartValue = Part.first;
8849         SDValue PartOffset = Part.second;
8850         SDValue Address =
8851             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
8852         MemOpChains.push_back(
8853             DAG.getStore(Chain, DL, PartValue, Address,
8854                          MachinePointerInfo::getFixedStack(MF, FI)));
8855       }
8856       ArgValue = SpillSlot;
8857     } else {
8858       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
8859     }
8860 
8861     // Use local copy if it is a byval arg.
8862     if (Flags.isByVal())
8863       ArgValue = ByValArgs[j++];
8864 
8865     if (VA.isRegLoc()) {
8866       // Queue up the argument copies and emit them at the end.
8867       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
8868     } else {
8869       assert(VA.isMemLoc() && "Argument not register or memory");
8870       assert(!IsTailCall && "Tail call not allowed if stack is used "
8871                             "for passing parameters");
8872 
8873       // Work out the address of the stack slot.
8874       if (!StackPtr.getNode())
8875         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
8876       SDValue Address =
8877           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
8878                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
8879 
8880       // Emit the store.
8881       MemOpChains.push_back(
8882           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
8883     }
8884   }
8885 
8886   // Join the stores, which are independent of one another.
8887   if (!MemOpChains.empty())
8888     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
8889 
8890   SDValue Glue;
8891 
8892   // Build a sequence of copy-to-reg nodes, chained and glued together.
8893   for (auto &Reg : RegsToPass) {
8894     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
8895     Glue = Chain.getValue(1);
8896   }
8897 
8898   // Validate that none of the argument registers have been marked as
8899   // reserved, if so report an error. Do the same for the return address if this
8900   // is not a tailcall.
8901   validateCCReservedRegs(RegsToPass, MF);
8902   if (!IsTailCall &&
8903       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
8904     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
8905         MF.getFunction(),
8906         "Return address register required, but has been reserved."});
8907 
8908   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
8909   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
8910   // split it and then direct call can be matched by PseudoCALL.
8911   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
8912     const GlobalValue *GV = S->getGlobal();
8913 
8914     unsigned OpFlags = RISCVII::MO_CALL;
8915     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
8916       OpFlags = RISCVII::MO_PLT;
8917 
8918     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
8919   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
8920     unsigned OpFlags = RISCVII::MO_CALL;
8921 
8922     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
8923                                                  nullptr))
8924       OpFlags = RISCVII::MO_PLT;
8925 
8926     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
8927   }
8928 
8929   // The first call operand is the chain and the second is the target address.
8930   SmallVector<SDValue, 8> Ops;
8931   Ops.push_back(Chain);
8932   Ops.push_back(Callee);
8933 
8934   // Add argument registers to the end of the list so that they are
8935   // known live into the call.
8936   for (auto &Reg : RegsToPass)
8937     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
8938 
8939   if (!IsTailCall) {
8940     // Add a register mask operand representing the call-preserved registers.
8941     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
8942     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
8943     assert(Mask && "Missing call preserved mask for calling convention");
8944     Ops.push_back(DAG.getRegisterMask(Mask));
8945   }
8946 
8947   // Glue the call to the argument copies, if any.
8948   if (Glue.getNode())
8949     Ops.push_back(Glue);
8950 
8951   // Emit the call.
8952   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
8953 
8954   if (IsTailCall) {
8955     MF.getFrameInfo().setHasTailCall();
8956     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
8957   }
8958 
8959   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
8960   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
8961   Glue = Chain.getValue(1);
8962 
8963   // Mark the end of the call, which is glued to the call itself.
8964   Chain = DAG.getCALLSEQ_END(Chain,
8965                              DAG.getConstant(NumBytes, DL, PtrVT, true),
8966                              DAG.getConstant(0, DL, PtrVT, true),
8967                              Glue, DL);
8968   Glue = Chain.getValue(1);
8969 
8970   // Assign locations to each value returned by this call.
8971   SmallVector<CCValAssign, 16> RVLocs;
8972   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
8973   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
8974 
8975   // Copy all of the result registers out of their specified physreg.
8976   for (auto &VA : RVLocs) {
8977     // Copy the value out
8978     SDValue RetValue =
8979         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
8980     // Glue the RetValue to the end of the call sequence
8981     Chain = RetValue.getValue(1);
8982     Glue = RetValue.getValue(2);
8983 
8984     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
8985       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
8986       SDValue RetValue2 =
8987           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
8988       Chain = RetValue2.getValue(1);
8989       Glue = RetValue2.getValue(2);
8990       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
8991                              RetValue2);
8992     }
8993 
8994     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
8995 
8996     InVals.push_back(RetValue);
8997   }
8998 
8999   return Chain;
9000 }
9001 
9002 bool RISCVTargetLowering::CanLowerReturn(
9003     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
9004     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
9005   SmallVector<CCValAssign, 16> RVLocs;
9006   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
9007 
9008   Optional<unsigned> FirstMaskArgument;
9009   if (Subtarget.hasVInstructions())
9010     FirstMaskArgument = preAssignMask(Outs);
9011 
9012   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
9013     MVT VT = Outs[i].VT;
9014     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
9015     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
9016     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
9017                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
9018                  *this, FirstMaskArgument))
9019       return false;
9020   }
9021   return true;
9022 }
9023 
9024 SDValue
9025 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
9026                                  bool IsVarArg,
9027                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
9028                                  const SmallVectorImpl<SDValue> &OutVals,
9029                                  const SDLoc &DL, SelectionDAG &DAG) const {
9030   const MachineFunction &MF = DAG.getMachineFunction();
9031   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
9032 
9033   // Stores the assignment of the return value to a location.
9034   SmallVector<CCValAssign, 16> RVLocs;
9035 
9036   // Info about the registers and stack slot.
9037   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
9038                  *DAG.getContext());
9039 
9040   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
9041                     nullptr, CC_RISCV);
9042 
9043   if (CallConv == CallingConv::GHC && !RVLocs.empty())
9044     report_fatal_error("GHC functions return void only");
9045 
9046   SDValue Glue;
9047   SmallVector<SDValue, 4> RetOps(1, Chain);
9048 
9049   // Copy the result values into the output registers.
9050   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
9051     SDValue Val = OutVals[i];
9052     CCValAssign &VA = RVLocs[i];
9053     assert(VA.isRegLoc() && "Can only return in registers!");
9054 
9055     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
9056       // Handle returning f64 on RV32D with a soft float ABI.
9057       assert(VA.isRegLoc() && "Expected return via registers");
9058       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
9059                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
9060       SDValue Lo = SplitF64.getValue(0);
9061       SDValue Hi = SplitF64.getValue(1);
9062       Register RegLo = VA.getLocReg();
9063       assert(RegLo < RISCV::X31 && "Invalid register pair");
9064       Register RegHi = RegLo + 1;
9065 
9066       if (STI.isRegisterReservedByUser(RegLo) ||
9067           STI.isRegisterReservedByUser(RegHi))
9068         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
9069             MF.getFunction(),
9070             "Return value register required, but has been reserved."});
9071 
9072       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
9073       Glue = Chain.getValue(1);
9074       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
9075       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
9076       Glue = Chain.getValue(1);
9077       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
9078     } else {
9079       // Handle a 'normal' return.
9080       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
9081       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
9082 
9083       if (STI.isRegisterReservedByUser(VA.getLocReg()))
9084         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
9085             MF.getFunction(),
9086             "Return value register required, but has been reserved."});
9087 
9088       // Guarantee that all emitted copies are stuck together.
9089       Glue = Chain.getValue(1);
9090       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
9091     }
9092   }
9093 
9094   RetOps[0] = Chain; // Update chain.
9095 
9096   // Add the glue node if we have it.
9097   if (Glue.getNode()) {
9098     RetOps.push_back(Glue);
9099   }
9100 
9101   unsigned RetOpc = RISCVISD::RET_FLAG;
9102   // Interrupt service routines use different return instructions.
9103   const Function &Func = DAG.getMachineFunction().getFunction();
9104   if (Func.hasFnAttribute("interrupt")) {
9105     if (!Func.getReturnType()->isVoidTy())
9106       report_fatal_error(
9107           "Functions with the interrupt attribute must have void return type!");
9108 
9109     MachineFunction &MF = DAG.getMachineFunction();
9110     StringRef Kind =
9111       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
9112 
9113     if (Kind == "user")
9114       RetOpc = RISCVISD::URET_FLAG;
9115     else if (Kind == "supervisor")
9116       RetOpc = RISCVISD::SRET_FLAG;
9117     else
9118       RetOpc = RISCVISD::MRET_FLAG;
9119   }
9120 
9121   return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
9122 }
9123 
9124 void RISCVTargetLowering::validateCCReservedRegs(
9125     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
9126     MachineFunction &MF) const {
9127   const Function &F = MF.getFunction();
9128   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
9129 
9130   if (llvm::any_of(Regs, [&STI](auto Reg) {
9131         return STI.isRegisterReservedByUser(Reg.first);
9132       }))
9133     F.getContext().diagnose(DiagnosticInfoUnsupported{
9134         F, "Argument register required, but has been reserved."});
9135 }
9136 
9137 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
9138   return CI->isTailCall();
9139 }
9140 
9141 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
9142 #define NODE_NAME_CASE(NODE)                                                   \
9143   case RISCVISD::NODE:                                                         \
9144     return "RISCVISD::" #NODE;
9145   // clang-format off
9146   switch ((RISCVISD::NodeType)Opcode) {
9147   case RISCVISD::FIRST_NUMBER:
9148     break;
9149   NODE_NAME_CASE(RET_FLAG)
9150   NODE_NAME_CASE(URET_FLAG)
9151   NODE_NAME_CASE(SRET_FLAG)
9152   NODE_NAME_CASE(MRET_FLAG)
9153   NODE_NAME_CASE(CALL)
9154   NODE_NAME_CASE(SELECT_CC)
9155   NODE_NAME_CASE(BR_CC)
9156   NODE_NAME_CASE(BuildPairF64)
9157   NODE_NAME_CASE(SplitF64)
9158   NODE_NAME_CASE(TAIL)
9159   NODE_NAME_CASE(MULHSU)
9160   NODE_NAME_CASE(SLLW)
9161   NODE_NAME_CASE(SRAW)
9162   NODE_NAME_CASE(SRLW)
9163   NODE_NAME_CASE(DIVW)
9164   NODE_NAME_CASE(DIVUW)
9165   NODE_NAME_CASE(REMUW)
9166   NODE_NAME_CASE(ROLW)
9167   NODE_NAME_CASE(RORW)
9168   NODE_NAME_CASE(CLZW)
9169   NODE_NAME_CASE(CTZW)
9170   NODE_NAME_CASE(FSLW)
9171   NODE_NAME_CASE(FSRW)
9172   NODE_NAME_CASE(FSL)
9173   NODE_NAME_CASE(FSR)
9174   NODE_NAME_CASE(FMV_H_X)
9175   NODE_NAME_CASE(FMV_X_ANYEXTH)
9176   NODE_NAME_CASE(FMV_W_X_RV64)
9177   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
9178   NODE_NAME_CASE(FCVT_X_RTZ)
9179   NODE_NAME_CASE(FCVT_XU_RTZ)
9180   NODE_NAME_CASE(FCVT_W_RTZ_RV64)
9181   NODE_NAME_CASE(FCVT_WU_RTZ_RV64)
9182   NODE_NAME_CASE(READ_CYCLE_WIDE)
9183   NODE_NAME_CASE(GREV)
9184   NODE_NAME_CASE(GREVW)
9185   NODE_NAME_CASE(GORC)
9186   NODE_NAME_CASE(GORCW)
9187   NODE_NAME_CASE(SHFL)
9188   NODE_NAME_CASE(SHFLW)
9189   NODE_NAME_CASE(UNSHFL)
9190   NODE_NAME_CASE(UNSHFLW)
9191   NODE_NAME_CASE(BCOMPRESS)
9192   NODE_NAME_CASE(BCOMPRESSW)
9193   NODE_NAME_CASE(BDECOMPRESS)
9194   NODE_NAME_CASE(BDECOMPRESSW)
9195   NODE_NAME_CASE(VMV_V_X_VL)
9196   NODE_NAME_CASE(VFMV_V_F_VL)
9197   NODE_NAME_CASE(VMV_X_S)
9198   NODE_NAME_CASE(VMV_S_X_VL)
9199   NODE_NAME_CASE(VFMV_S_F_VL)
9200   NODE_NAME_CASE(SPLAT_VECTOR_I64)
9201   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
9202   NODE_NAME_CASE(READ_VLENB)
9203   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
9204   NODE_NAME_CASE(VSLIDEUP_VL)
9205   NODE_NAME_CASE(VSLIDE1UP_VL)
9206   NODE_NAME_CASE(VSLIDEDOWN_VL)
9207   NODE_NAME_CASE(VSLIDE1DOWN_VL)
9208   NODE_NAME_CASE(VID_VL)
9209   NODE_NAME_CASE(VFNCVT_ROD_VL)
9210   NODE_NAME_CASE(VECREDUCE_ADD_VL)
9211   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
9212   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
9213   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
9214   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
9215   NODE_NAME_CASE(VECREDUCE_AND_VL)
9216   NODE_NAME_CASE(VECREDUCE_OR_VL)
9217   NODE_NAME_CASE(VECREDUCE_XOR_VL)
9218   NODE_NAME_CASE(VECREDUCE_FADD_VL)
9219   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
9220   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
9221   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
9222   NODE_NAME_CASE(ADD_VL)
9223   NODE_NAME_CASE(AND_VL)
9224   NODE_NAME_CASE(MUL_VL)
9225   NODE_NAME_CASE(OR_VL)
9226   NODE_NAME_CASE(SDIV_VL)
9227   NODE_NAME_CASE(SHL_VL)
9228   NODE_NAME_CASE(SREM_VL)
9229   NODE_NAME_CASE(SRA_VL)
9230   NODE_NAME_CASE(SRL_VL)
9231   NODE_NAME_CASE(SUB_VL)
9232   NODE_NAME_CASE(UDIV_VL)
9233   NODE_NAME_CASE(UREM_VL)
9234   NODE_NAME_CASE(XOR_VL)
9235   NODE_NAME_CASE(SADDSAT_VL)
9236   NODE_NAME_CASE(UADDSAT_VL)
9237   NODE_NAME_CASE(SSUBSAT_VL)
9238   NODE_NAME_CASE(USUBSAT_VL)
9239   NODE_NAME_CASE(FADD_VL)
9240   NODE_NAME_CASE(FSUB_VL)
9241   NODE_NAME_CASE(FMUL_VL)
9242   NODE_NAME_CASE(FDIV_VL)
9243   NODE_NAME_CASE(FNEG_VL)
9244   NODE_NAME_CASE(FABS_VL)
9245   NODE_NAME_CASE(FSQRT_VL)
9246   NODE_NAME_CASE(FMA_VL)
9247   NODE_NAME_CASE(FCOPYSIGN_VL)
9248   NODE_NAME_CASE(SMIN_VL)
9249   NODE_NAME_CASE(SMAX_VL)
9250   NODE_NAME_CASE(UMIN_VL)
9251   NODE_NAME_CASE(UMAX_VL)
9252   NODE_NAME_CASE(FMINNUM_VL)
9253   NODE_NAME_CASE(FMAXNUM_VL)
9254   NODE_NAME_CASE(MULHS_VL)
9255   NODE_NAME_CASE(MULHU_VL)
9256   NODE_NAME_CASE(FP_TO_SINT_VL)
9257   NODE_NAME_CASE(FP_TO_UINT_VL)
9258   NODE_NAME_CASE(SINT_TO_FP_VL)
9259   NODE_NAME_CASE(UINT_TO_FP_VL)
9260   NODE_NAME_CASE(FP_EXTEND_VL)
9261   NODE_NAME_CASE(FP_ROUND_VL)
9262   NODE_NAME_CASE(VWMUL_VL)
9263   NODE_NAME_CASE(VWMULU_VL)
9264   NODE_NAME_CASE(SETCC_VL)
9265   NODE_NAME_CASE(VSELECT_VL)
9266   NODE_NAME_CASE(VMAND_VL)
9267   NODE_NAME_CASE(VMOR_VL)
9268   NODE_NAME_CASE(VMXOR_VL)
9269   NODE_NAME_CASE(VMCLR_VL)
9270   NODE_NAME_CASE(VMSET_VL)
9271   NODE_NAME_CASE(VRGATHER_VX_VL)
9272   NODE_NAME_CASE(VRGATHER_VV_VL)
9273   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
9274   NODE_NAME_CASE(VSEXT_VL)
9275   NODE_NAME_CASE(VZEXT_VL)
9276   NODE_NAME_CASE(VPOPC_VL)
9277   NODE_NAME_CASE(VLE_VL)
9278   NODE_NAME_CASE(VSE_VL)
9279   NODE_NAME_CASE(READ_CSR)
9280   NODE_NAME_CASE(WRITE_CSR)
9281   NODE_NAME_CASE(SWAP_CSR)
9282   }
9283   // clang-format on
9284   return nullptr;
9285 #undef NODE_NAME_CASE
9286 }
9287 
9288 /// getConstraintType - Given a constraint letter, return the type of
9289 /// constraint it is for this target.
9290 RISCVTargetLowering::ConstraintType
9291 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
9292   if (Constraint.size() == 1) {
9293     switch (Constraint[0]) {
9294     default:
9295       break;
9296     case 'f':
9297       return C_RegisterClass;
9298     case 'I':
9299     case 'J':
9300     case 'K':
9301       return C_Immediate;
9302     case 'A':
9303       return C_Memory;
9304     case 'S': // A symbolic address
9305       return C_Other;
9306     }
9307   } else {
9308     if (Constraint == "vr" || Constraint == "vm")
9309       return C_RegisterClass;
9310   }
9311   return TargetLowering::getConstraintType(Constraint);
9312 }
9313 
9314 std::pair<unsigned, const TargetRegisterClass *>
9315 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
9316                                                   StringRef Constraint,
9317                                                   MVT VT) const {
9318   // First, see if this is a constraint that directly corresponds to a
9319   // RISCV register class.
9320   if (Constraint.size() == 1) {
9321     switch (Constraint[0]) {
9322     case 'r':
9323       return std::make_pair(0U, &RISCV::GPRRegClass);
9324     case 'f':
9325       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
9326         return std::make_pair(0U, &RISCV::FPR16RegClass);
9327       if (Subtarget.hasStdExtF() && VT == MVT::f32)
9328         return std::make_pair(0U, &RISCV::FPR32RegClass);
9329       if (Subtarget.hasStdExtD() && VT == MVT::f64)
9330         return std::make_pair(0U, &RISCV::FPR64RegClass);
9331       break;
9332     default:
9333       break;
9334     }
9335   } else {
9336     if (Constraint == "vr") {
9337       for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass,
9338                              &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
9339         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
9340           return std::make_pair(0U, RC);
9341       }
9342     } else if (Constraint == "vm") {
9343       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
9344         return std::make_pair(0U, &RISCV::VMRegClass);
9345     }
9346   }
9347 
9348   // Clang will correctly decode the usage of register name aliases into their
9349   // official names. However, other frontends like `rustc` do not. This allows
9350   // users of these frontends to use the ABI names for registers in LLVM-style
9351   // register constraints.
9352   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
9353                                .Case("{zero}", RISCV::X0)
9354                                .Case("{ra}", RISCV::X1)
9355                                .Case("{sp}", RISCV::X2)
9356                                .Case("{gp}", RISCV::X3)
9357                                .Case("{tp}", RISCV::X4)
9358                                .Case("{t0}", RISCV::X5)
9359                                .Case("{t1}", RISCV::X6)
9360                                .Case("{t2}", RISCV::X7)
9361                                .Cases("{s0}", "{fp}", RISCV::X8)
9362                                .Case("{s1}", RISCV::X9)
9363                                .Case("{a0}", RISCV::X10)
9364                                .Case("{a1}", RISCV::X11)
9365                                .Case("{a2}", RISCV::X12)
9366                                .Case("{a3}", RISCV::X13)
9367                                .Case("{a4}", RISCV::X14)
9368                                .Case("{a5}", RISCV::X15)
9369                                .Case("{a6}", RISCV::X16)
9370                                .Case("{a7}", RISCV::X17)
9371                                .Case("{s2}", RISCV::X18)
9372                                .Case("{s3}", RISCV::X19)
9373                                .Case("{s4}", RISCV::X20)
9374                                .Case("{s5}", RISCV::X21)
9375                                .Case("{s6}", RISCV::X22)
9376                                .Case("{s7}", RISCV::X23)
9377                                .Case("{s8}", RISCV::X24)
9378                                .Case("{s9}", RISCV::X25)
9379                                .Case("{s10}", RISCV::X26)
9380                                .Case("{s11}", RISCV::X27)
9381                                .Case("{t3}", RISCV::X28)
9382                                .Case("{t4}", RISCV::X29)
9383                                .Case("{t5}", RISCV::X30)
9384                                .Case("{t6}", RISCV::X31)
9385                                .Default(RISCV::NoRegister);
9386   if (XRegFromAlias != RISCV::NoRegister)
9387     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
9388 
9389   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
9390   // TableGen record rather than the AsmName to choose registers for InlineAsm
9391   // constraints, plus we want to match those names to the widest floating point
9392   // register type available, manually select floating point registers here.
9393   //
9394   // The second case is the ABI name of the register, so that frontends can also
9395   // use the ABI names in register constraint lists.
9396   if (Subtarget.hasStdExtF()) {
9397     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
9398                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
9399                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
9400                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
9401                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
9402                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
9403                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
9404                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
9405                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
9406                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
9407                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
9408                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
9409                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
9410                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
9411                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
9412                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
9413                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
9414                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
9415                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
9416                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
9417                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
9418                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
9419                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
9420                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
9421                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
9422                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
9423                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
9424                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
9425                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
9426                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
9427                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
9428                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
9429                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
9430                         .Default(RISCV::NoRegister);
9431     if (FReg != RISCV::NoRegister) {
9432       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
9433       if (Subtarget.hasStdExtD()) {
9434         unsigned RegNo = FReg - RISCV::F0_F;
9435         unsigned DReg = RISCV::F0_D + RegNo;
9436         return std::make_pair(DReg, &RISCV::FPR64RegClass);
9437       }
9438       return std::make_pair(FReg, &RISCV::FPR32RegClass);
9439     }
9440   }
9441 
9442   if (Subtarget.hasVInstructions()) {
9443     Register VReg = StringSwitch<Register>(Constraint.lower())
9444                         .Case("{v0}", RISCV::V0)
9445                         .Case("{v1}", RISCV::V1)
9446                         .Case("{v2}", RISCV::V2)
9447                         .Case("{v3}", RISCV::V3)
9448                         .Case("{v4}", RISCV::V4)
9449                         .Case("{v5}", RISCV::V5)
9450                         .Case("{v6}", RISCV::V6)
9451                         .Case("{v7}", RISCV::V7)
9452                         .Case("{v8}", RISCV::V8)
9453                         .Case("{v9}", RISCV::V9)
9454                         .Case("{v10}", RISCV::V10)
9455                         .Case("{v11}", RISCV::V11)
9456                         .Case("{v12}", RISCV::V12)
9457                         .Case("{v13}", RISCV::V13)
9458                         .Case("{v14}", RISCV::V14)
9459                         .Case("{v15}", RISCV::V15)
9460                         .Case("{v16}", RISCV::V16)
9461                         .Case("{v17}", RISCV::V17)
9462                         .Case("{v18}", RISCV::V18)
9463                         .Case("{v19}", RISCV::V19)
9464                         .Case("{v20}", RISCV::V20)
9465                         .Case("{v21}", RISCV::V21)
9466                         .Case("{v22}", RISCV::V22)
9467                         .Case("{v23}", RISCV::V23)
9468                         .Case("{v24}", RISCV::V24)
9469                         .Case("{v25}", RISCV::V25)
9470                         .Case("{v26}", RISCV::V26)
9471                         .Case("{v27}", RISCV::V27)
9472                         .Case("{v28}", RISCV::V28)
9473                         .Case("{v29}", RISCV::V29)
9474                         .Case("{v30}", RISCV::V30)
9475                         .Case("{v31}", RISCV::V31)
9476                         .Default(RISCV::NoRegister);
9477     if (VReg != RISCV::NoRegister) {
9478       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
9479         return std::make_pair(VReg, &RISCV::VMRegClass);
9480       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
9481         return std::make_pair(VReg, &RISCV::VRRegClass);
9482       for (const auto *RC :
9483            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
9484         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
9485           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
9486           return std::make_pair(VReg, RC);
9487         }
9488       }
9489     }
9490   }
9491 
9492   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
9493 }
9494 
9495 unsigned
9496 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
9497   // Currently only support length 1 constraints.
9498   if (ConstraintCode.size() == 1) {
9499     switch (ConstraintCode[0]) {
9500     case 'A':
9501       return InlineAsm::Constraint_A;
9502     default:
9503       break;
9504     }
9505   }
9506 
9507   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
9508 }
9509 
9510 void RISCVTargetLowering::LowerAsmOperandForConstraint(
9511     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
9512     SelectionDAG &DAG) const {
9513   // Currently only support length 1 constraints.
9514   if (Constraint.length() == 1) {
9515     switch (Constraint[0]) {
9516     case 'I':
9517       // Validate & create a 12-bit signed immediate operand.
9518       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
9519         uint64_t CVal = C->getSExtValue();
9520         if (isInt<12>(CVal))
9521           Ops.push_back(
9522               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
9523       }
9524       return;
9525     case 'J':
9526       // Validate & create an integer zero operand.
9527       if (auto *C = dyn_cast<ConstantSDNode>(Op))
9528         if (C->getZExtValue() == 0)
9529           Ops.push_back(
9530               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
9531       return;
9532     case 'K':
9533       // Validate & create a 5-bit unsigned immediate operand.
9534       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
9535         uint64_t CVal = C->getZExtValue();
9536         if (isUInt<5>(CVal))
9537           Ops.push_back(
9538               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
9539       }
9540       return;
9541     case 'S':
9542       if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
9543         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
9544                                                  GA->getValueType(0)));
9545       } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
9546         Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
9547                                                 BA->getValueType(0)));
9548       }
9549       return;
9550     default:
9551       break;
9552     }
9553   }
9554   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
9555 }
9556 
9557 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
9558                                                    Instruction *Inst,
9559                                                    AtomicOrdering Ord) const {
9560   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
9561     return Builder.CreateFence(Ord);
9562   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
9563     return Builder.CreateFence(AtomicOrdering::Release);
9564   return nullptr;
9565 }
9566 
9567 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
9568                                                     Instruction *Inst,
9569                                                     AtomicOrdering Ord) const {
9570   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
9571     return Builder.CreateFence(AtomicOrdering::Acquire);
9572   return nullptr;
9573 }
9574 
9575 TargetLowering::AtomicExpansionKind
9576 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
9577   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
9578   // point operations can't be used in an lr/sc sequence without breaking the
9579   // forward-progress guarantee.
9580   if (AI->isFloatingPointOperation())
9581     return AtomicExpansionKind::CmpXChg;
9582 
9583   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
9584   if (Size == 8 || Size == 16)
9585     return AtomicExpansionKind::MaskedIntrinsic;
9586   return AtomicExpansionKind::None;
9587 }
9588 
9589 static Intrinsic::ID
9590 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
9591   if (XLen == 32) {
9592     switch (BinOp) {
9593     default:
9594       llvm_unreachable("Unexpected AtomicRMW BinOp");
9595     case AtomicRMWInst::Xchg:
9596       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
9597     case AtomicRMWInst::Add:
9598       return Intrinsic::riscv_masked_atomicrmw_add_i32;
9599     case AtomicRMWInst::Sub:
9600       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
9601     case AtomicRMWInst::Nand:
9602       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
9603     case AtomicRMWInst::Max:
9604       return Intrinsic::riscv_masked_atomicrmw_max_i32;
9605     case AtomicRMWInst::Min:
9606       return Intrinsic::riscv_masked_atomicrmw_min_i32;
9607     case AtomicRMWInst::UMax:
9608       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
9609     case AtomicRMWInst::UMin:
9610       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
9611     }
9612   }
9613 
9614   if (XLen == 64) {
9615     switch (BinOp) {
9616     default:
9617       llvm_unreachable("Unexpected AtomicRMW BinOp");
9618     case AtomicRMWInst::Xchg:
9619       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
9620     case AtomicRMWInst::Add:
9621       return Intrinsic::riscv_masked_atomicrmw_add_i64;
9622     case AtomicRMWInst::Sub:
9623       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
9624     case AtomicRMWInst::Nand:
9625       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
9626     case AtomicRMWInst::Max:
9627       return Intrinsic::riscv_masked_atomicrmw_max_i64;
9628     case AtomicRMWInst::Min:
9629       return Intrinsic::riscv_masked_atomicrmw_min_i64;
9630     case AtomicRMWInst::UMax:
9631       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
9632     case AtomicRMWInst::UMin:
9633       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
9634     }
9635   }
9636 
9637   llvm_unreachable("Unexpected XLen\n");
9638 }
9639 
9640 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
9641     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
9642     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
9643   unsigned XLen = Subtarget.getXLen();
9644   Value *Ordering =
9645       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
9646   Type *Tys[] = {AlignedAddr->getType()};
9647   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
9648       AI->getModule(),
9649       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
9650 
9651   if (XLen == 64) {
9652     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
9653     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
9654     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
9655   }
9656 
9657   Value *Result;
9658 
9659   // Must pass the shift amount needed to sign extend the loaded value prior
9660   // to performing a signed comparison for min/max. ShiftAmt is the number of
9661   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
9662   // is the number of bits to left+right shift the value in order to
9663   // sign-extend.
9664   if (AI->getOperation() == AtomicRMWInst::Min ||
9665       AI->getOperation() == AtomicRMWInst::Max) {
9666     const DataLayout &DL = AI->getModule()->getDataLayout();
9667     unsigned ValWidth =
9668         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
9669     Value *SextShamt =
9670         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
9671     Result = Builder.CreateCall(LrwOpScwLoop,
9672                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
9673   } else {
9674     Result =
9675         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
9676   }
9677 
9678   if (XLen == 64)
9679     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
9680   return Result;
9681 }
9682 
9683 TargetLowering::AtomicExpansionKind
9684 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
9685     AtomicCmpXchgInst *CI) const {
9686   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
9687   if (Size == 8 || Size == 16)
9688     return AtomicExpansionKind::MaskedIntrinsic;
9689   return AtomicExpansionKind::None;
9690 }
9691 
9692 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
9693     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
9694     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
9695   unsigned XLen = Subtarget.getXLen();
9696   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
9697   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
9698   if (XLen == 64) {
9699     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
9700     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
9701     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
9702     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
9703   }
9704   Type *Tys[] = {AlignedAddr->getType()};
9705   Function *MaskedCmpXchg =
9706       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
9707   Value *Result = Builder.CreateCall(
9708       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
9709   if (XLen == 64)
9710     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
9711   return Result;
9712 }
9713 
9714 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
9715   return false;
9716 }
9717 
9718 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
9719                                                      EVT VT) const {
9720   VT = VT.getScalarType();
9721 
9722   if (!VT.isSimple())
9723     return false;
9724 
9725   switch (VT.getSimpleVT().SimpleTy) {
9726   case MVT::f16:
9727     return Subtarget.hasStdExtZfh();
9728   case MVT::f32:
9729     return Subtarget.hasStdExtF();
9730   case MVT::f64:
9731     return Subtarget.hasStdExtD();
9732   default:
9733     break;
9734   }
9735 
9736   return false;
9737 }
9738 
9739 Register RISCVTargetLowering::getExceptionPointerRegister(
9740     const Constant *PersonalityFn) const {
9741   return RISCV::X10;
9742 }
9743 
9744 Register RISCVTargetLowering::getExceptionSelectorRegister(
9745     const Constant *PersonalityFn) const {
9746   return RISCV::X11;
9747 }
9748 
9749 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
9750   // Return false to suppress the unnecessary extensions if the LibCall
9751   // arguments or return value is f32 type for LP64 ABI.
9752   RISCVABI::ABI ABI = Subtarget.getTargetABI();
9753   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
9754     return false;
9755 
9756   return true;
9757 }
9758 
9759 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
9760   if (Subtarget.is64Bit() && Type == MVT::i32)
9761     return true;
9762 
9763   return IsSigned;
9764 }
9765 
9766 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
9767                                                  SDValue C) const {
9768   // Check integral scalar types.
9769   if (VT.isScalarInteger()) {
9770     // Omit the optimization if the sub target has the M extension and the data
9771     // size exceeds XLen.
9772     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
9773       return false;
9774     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
9775       // Break the MUL to a SLLI and an ADD/SUB.
9776       const APInt &Imm = ConstNode->getAPIntValue();
9777       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
9778           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
9779         return true;
9780       // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12.
9781       if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) &&
9782           ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
9783            (Imm - 8).isPowerOf2()))
9784         return true;
9785       // Omit the following optimization if the sub target has the M extension
9786       // and the data size >= XLen.
9787       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
9788         return false;
9789       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
9790       // a pair of LUI/ADDI.
9791       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
9792         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
9793         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
9794             (1 - ImmS).isPowerOf2())
9795         return true;
9796       }
9797     }
9798   }
9799 
9800   return false;
9801 }
9802 
9803 bool RISCVTargetLowering::isMulAddWithConstProfitable(
9804     const SDValue &AddNode, const SDValue &ConstNode) const {
9805   // Let the DAGCombiner decide for vectors.
9806   EVT VT = AddNode.getValueType();
9807   if (VT.isVector())
9808     return true;
9809 
9810   // Let the DAGCombiner decide for larger types.
9811   if (VT.getScalarSizeInBits() > Subtarget.getXLen())
9812     return true;
9813 
9814   // It is worse if c1 is simm12 while c1*c2 is not.
9815   ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
9816   ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
9817   const APInt &C1 = C1Node->getAPIntValue();
9818   const APInt &C2 = C2Node->getAPIntValue();
9819   if (C1.isSignedIntN(12) && !(C1 * C2).isSignedIntN(12))
9820     return false;
9821 
9822   // Default to true and let the DAGCombiner decide.
9823   return true;
9824 }
9825 
9826 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
9827     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
9828     bool *Fast) const {
9829   if (!VT.isVector())
9830     return false;
9831 
9832   EVT ElemVT = VT.getVectorElementType();
9833   if (Alignment >= ElemVT.getStoreSize()) {
9834     if (Fast)
9835       *Fast = true;
9836     return true;
9837   }
9838 
9839   return false;
9840 }
9841 
9842 bool RISCVTargetLowering::splitValueIntoRegisterParts(
9843     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
9844     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
9845   bool IsABIRegCopy = CC.hasValue();
9846   EVT ValueVT = Val.getValueType();
9847   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
9848     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
9849     // and cast to f32.
9850     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
9851     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
9852     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
9853                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
9854     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
9855     Parts[0] = Val;
9856     return true;
9857   }
9858 
9859   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
9860     LLVMContext &Context = *DAG.getContext();
9861     EVT ValueEltVT = ValueVT.getVectorElementType();
9862     EVT PartEltVT = PartVT.getVectorElementType();
9863     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
9864     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
9865     if (PartVTBitSize % ValueVTBitSize == 0) {
9866       // If the element types are different, bitcast to the same element type of
9867       // PartVT first.
9868       if (ValueEltVT != PartEltVT) {
9869         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
9870         assert(Count != 0 && "The number of element should not be zero.");
9871         EVT SameEltTypeVT =
9872             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
9873         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
9874       }
9875       Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
9876                         Val, DAG.getConstant(0, DL, Subtarget.getXLenVT()));
9877       Parts[0] = Val;
9878       return true;
9879     }
9880   }
9881   return false;
9882 }
9883 
9884 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
9885     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
9886     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
9887   bool IsABIRegCopy = CC.hasValue();
9888   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
9889     SDValue Val = Parts[0];
9890 
9891     // Cast the f32 to i32, truncate to i16, and cast back to f16.
9892     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
9893     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
9894     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
9895     return Val;
9896   }
9897 
9898   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
9899     LLVMContext &Context = *DAG.getContext();
9900     SDValue Val = Parts[0];
9901     EVT ValueEltVT = ValueVT.getVectorElementType();
9902     EVT PartEltVT = PartVT.getVectorElementType();
9903     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
9904     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
9905     if (PartVTBitSize % ValueVTBitSize == 0) {
9906       EVT SameEltTypeVT = ValueVT;
9907       // If the element types are different, convert it to the same element type
9908       // of PartVT.
9909       if (ValueEltVT != PartEltVT) {
9910         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
9911         assert(Count != 0 && "The number of element should not be zero.");
9912         SameEltTypeVT =
9913             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
9914       }
9915       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SameEltTypeVT, Val,
9916                         DAG.getConstant(0, DL, Subtarget.getXLenVT()));
9917       if (ValueEltVT != PartEltVT)
9918         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
9919       return Val;
9920     }
9921   }
9922   return SDValue();
9923 }
9924 
9925 #define GET_REGISTER_MATCHER
9926 #include "RISCVGenAsmMatcher.inc"
9927 
9928 Register
9929 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
9930                                        const MachineFunction &MF) const {
9931   Register Reg = MatchRegisterAltName(RegName);
9932   if (Reg == RISCV::NoRegister)
9933     Reg = MatchRegisterName(RegName);
9934   if (Reg == RISCV::NoRegister)
9935     report_fatal_error(
9936         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
9937   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
9938   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
9939     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
9940                              StringRef(RegName) + "\"."));
9941   return Reg;
9942 }
9943 
9944 namespace llvm {
9945 namespace RISCVVIntrinsicsTable {
9946 
9947 #define GET_RISCVVIntrinsicsTable_IMPL
9948 #include "RISCVGenSearchableTables.inc"
9949 
9950 } // namespace RISCVVIntrinsicsTable
9951 
9952 } // namespace llvm
9953