1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/MemoryLocation.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/CodeGen/ValueTypes.h"
30 #include "llvm/IR/DiagnosticInfo.h"
31 #include "llvm/IR/DiagnosticPrinter.h"
32 #include "llvm/IR/IRBuilder.h"
33 #include "llvm/IR/IntrinsicsRISCV.h"
34 #include "llvm/IR/PatternMatch.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/KnownBits.h"
38 #include "llvm/Support/MathExtras.h"
39 #include "llvm/Support/raw_ostream.h"
40 
41 using namespace llvm;
42 
43 #define DEBUG_TYPE "riscv-lower"
44 
45 STATISTIC(NumTailCalls, "Number of tail calls");
46 
47 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
48                                          const RISCVSubtarget &STI)
49     : TargetLowering(TM), Subtarget(STI) {
50 
51   if (Subtarget.isRV32E())
52     report_fatal_error("Codegen not yet implemented for RV32E");
53 
54   RISCVABI::ABI ABI = Subtarget.getTargetABI();
55   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
56 
57   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
58       !Subtarget.hasStdExtF()) {
59     errs() << "Hard-float 'f' ABI can't be used for a target that "
60                 "doesn't support the F instruction set extension (ignoring "
61                           "target-abi)\n";
62     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
63   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
64              !Subtarget.hasStdExtD()) {
65     errs() << "Hard-float 'd' ABI can't be used for a target that "
66               "doesn't support the D instruction set extension (ignoring "
67               "target-abi)\n";
68     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
69   }
70 
71   switch (ABI) {
72   default:
73     report_fatal_error("Don't know how to lower this ABI");
74   case RISCVABI::ABI_ILP32:
75   case RISCVABI::ABI_ILP32F:
76   case RISCVABI::ABI_ILP32D:
77   case RISCVABI::ABI_LP64:
78   case RISCVABI::ABI_LP64F:
79   case RISCVABI::ABI_LP64D:
80     break;
81   }
82 
83   MVT XLenVT = Subtarget.getXLenVT();
84 
85   // Set up the register classes.
86   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
87 
88   if (Subtarget.hasStdExtZfh())
89     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
90   if (Subtarget.hasStdExtF())
91     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
92   if (Subtarget.hasStdExtD())
93     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
94 
95   static const MVT::SimpleValueType BoolVecVTs[] = {
96       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
97       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
98   static const MVT::SimpleValueType IntVecVTs[] = {
99       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
100       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
101       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
102       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
103       MVT::nxv4i64, MVT::nxv8i64};
104   static const MVT::SimpleValueType F16VecVTs[] = {
105       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
106       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
107   static const MVT::SimpleValueType F32VecVTs[] = {
108       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
109   static const MVT::SimpleValueType F64VecVTs[] = {
110       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
111 
112   if (Subtarget.hasVInstructions()) {
113     auto addRegClassForRVV = [this](MVT VT) {
114       unsigned Size = VT.getSizeInBits().getKnownMinValue();
115       assert(Size <= 512 && isPowerOf2_32(Size));
116       const TargetRegisterClass *RC;
117       if (Size <= 64)
118         RC = &RISCV::VRRegClass;
119       else if (Size == 128)
120         RC = &RISCV::VRM2RegClass;
121       else if (Size == 256)
122         RC = &RISCV::VRM4RegClass;
123       else
124         RC = &RISCV::VRM8RegClass;
125 
126       addRegisterClass(VT, RC);
127     };
128 
129     for (MVT VT : BoolVecVTs)
130       addRegClassForRVV(VT);
131     for (MVT VT : IntVecVTs) {
132       if (VT.getVectorElementType() == MVT::i64 &&
133           !Subtarget.hasVInstructionsI64())
134         continue;
135       addRegClassForRVV(VT);
136     }
137 
138     if (Subtarget.hasVInstructionsF16())
139       for (MVT VT : F16VecVTs)
140         addRegClassForRVV(VT);
141 
142     if (Subtarget.hasVInstructionsF32())
143       for (MVT VT : F32VecVTs)
144         addRegClassForRVV(VT);
145 
146     if (Subtarget.hasVInstructionsF64())
147       for (MVT VT : F64VecVTs)
148         addRegClassForRVV(VT);
149 
150     if (Subtarget.useRVVForFixedLengthVectors()) {
151       auto addRegClassForFixedVectors = [this](MVT VT) {
152         MVT ContainerVT = getContainerForFixedLengthVector(VT);
153         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
154         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
155         addRegisterClass(VT, TRI.getRegClass(RCID));
156       };
157       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
158         if (useRVVForFixedLengthVectorVT(VT))
159           addRegClassForFixedVectors(VT);
160 
161       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
162         if (useRVVForFixedLengthVectorVT(VT))
163           addRegClassForFixedVectors(VT);
164     }
165   }
166 
167   // Compute derived properties from the register classes.
168   computeRegisterProperties(STI.getRegisterInfo());
169 
170   setStackPointerRegisterToSaveRestore(RISCV::X2);
171 
172   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
173     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
174 
175   // TODO: add all necessary setOperationAction calls.
176   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
177 
178   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
179   setOperationAction(ISD::BR_CC, XLenVT, Expand);
180   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
181   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
182 
183   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
184   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
185 
186   setOperationAction(ISD::VASTART, MVT::Other, Custom);
187   setOperationAction(ISD::VAARG, MVT::Other, Expand);
188   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
189   setOperationAction(ISD::VAEND, MVT::Other, Expand);
190 
191   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
192   if (!Subtarget.hasStdExtZbb()) {
193     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
194     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
195   }
196 
197   if (Subtarget.is64Bit()) {
198     setOperationAction(ISD::ADD, MVT::i32, Custom);
199     setOperationAction(ISD::SUB, MVT::i32, Custom);
200     setOperationAction(ISD::SHL, MVT::i32, Custom);
201     setOperationAction(ISD::SRA, MVT::i32, Custom);
202     setOperationAction(ISD::SRL, MVT::i32, Custom);
203 
204     setOperationAction(ISD::UADDO, MVT::i32, Custom);
205     setOperationAction(ISD::USUBO, MVT::i32, Custom);
206     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
207     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
208   } else {
209     setLibcallName(RTLIB::SHL_I128, nullptr);
210     setLibcallName(RTLIB::SRL_I128, nullptr);
211     setLibcallName(RTLIB::SRA_I128, nullptr);
212     setLibcallName(RTLIB::MUL_I128, nullptr);
213     setLibcallName(RTLIB::MULO_I64, nullptr);
214   }
215 
216   if (!Subtarget.hasStdExtM()) {
217     setOperationAction(ISD::MUL, XLenVT, Expand);
218     setOperationAction(ISD::MULHS, XLenVT, Expand);
219     setOperationAction(ISD::MULHU, XLenVT, Expand);
220     setOperationAction(ISD::SDIV, XLenVT, Expand);
221     setOperationAction(ISD::UDIV, XLenVT, Expand);
222     setOperationAction(ISD::SREM, XLenVT, Expand);
223     setOperationAction(ISD::UREM, XLenVT, Expand);
224   } else {
225     if (Subtarget.is64Bit()) {
226       setOperationAction(ISD::MUL, MVT::i32, Custom);
227       setOperationAction(ISD::MUL, MVT::i128, Custom);
228 
229       setOperationAction(ISD::SDIV, MVT::i8, Custom);
230       setOperationAction(ISD::UDIV, MVT::i8, Custom);
231       setOperationAction(ISD::UREM, MVT::i8, Custom);
232       setOperationAction(ISD::SDIV, MVT::i16, Custom);
233       setOperationAction(ISD::UDIV, MVT::i16, Custom);
234       setOperationAction(ISD::UREM, MVT::i16, Custom);
235       setOperationAction(ISD::SDIV, MVT::i32, Custom);
236       setOperationAction(ISD::UDIV, MVT::i32, Custom);
237       setOperationAction(ISD::UREM, MVT::i32, Custom);
238     } else {
239       setOperationAction(ISD::MUL, MVT::i64, Custom);
240     }
241   }
242 
243   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
244   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
245   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
246   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
247 
248   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
249   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
250   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
251 
252   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
253     if (Subtarget.is64Bit()) {
254       setOperationAction(ISD::ROTL, MVT::i32, Custom);
255       setOperationAction(ISD::ROTR, MVT::i32, Custom);
256     }
257   } else {
258     setOperationAction(ISD::ROTL, XLenVT, Expand);
259     setOperationAction(ISD::ROTR, XLenVT, Expand);
260   }
261 
262   if (Subtarget.hasStdExtZbp()) {
263     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
264     // more combining.
265     setOperationAction(ISD::BITREVERSE, XLenVT,   Custom);
266     setOperationAction(ISD::BSWAP,      XLenVT,   Custom);
267     setOperationAction(ISD::BITREVERSE, MVT::i8,  Custom);
268     // BSWAP i8 doesn't exist.
269     setOperationAction(ISD::BITREVERSE, MVT::i16, Custom);
270     setOperationAction(ISD::BSWAP,      MVT::i16, Custom);
271 
272     if (Subtarget.is64Bit()) {
273       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
274       setOperationAction(ISD::BSWAP,      MVT::i32, Custom);
275     }
276   } else {
277     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
278     // pattern match it directly in isel.
279     setOperationAction(ISD::BSWAP, XLenVT,
280                        Subtarget.hasStdExtZbb() ? Legal : Expand);
281   }
282 
283   if (Subtarget.hasStdExtZbb()) {
284     setOperationAction(ISD::SMIN, XLenVT, Legal);
285     setOperationAction(ISD::SMAX, XLenVT, Legal);
286     setOperationAction(ISD::UMIN, XLenVT, Legal);
287     setOperationAction(ISD::UMAX, XLenVT, Legal);
288 
289     if (Subtarget.is64Bit()) {
290       setOperationAction(ISD::CTTZ, MVT::i32, Custom);
291       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
292       setOperationAction(ISD::CTLZ, MVT::i32, Custom);
293       setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
294     }
295   } else {
296     setOperationAction(ISD::CTTZ, XLenVT, Expand);
297     setOperationAction(ISD::CTLZ, XLenVT, Expand);
298     setOperationAction(ISD::CTPOP, XLenVT, Expand);
299   }
300 
301   if (Subtarget.hasStdExtZbt()) {
302     setOperationAction(ISD::FSHL, XLenVT, Custom);
303     setOperationAction(ISD::FSHR, XLenVT, Custom);
304     setOperationAction(ISD::SELECT, XLenVT, Legal);
305 
306     if (Subtarget.is64Bit()) {
307       setOperationAction(ISD::FSHL, MVT::i32, Custom);
308       setOperationAction(ISD::FSHR, MVT::i32, Custom);
309     }
310   } else {
311     setOperationAction(ISD::SELECT, XLenVT, Custom);
312   }
313 
314   static const ISD::CondCode FPCCToExpand[] = {
315       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
316       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
317       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
318 
319   static const ISD::NodeType FPOpToExpand[] = {
320       ISD::FSIN, ISD::FCOS,       ISD::FSINCOS,   ISD::FPOW,
321       ISD::FREM, ISD::FP16_TO_FP, ISD::FP_TO_FP16};
322 
323   if (Subtarget.hasStdExtZfh())
324     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
325 
326   if (Subtarget.hasStdExtZfh()) {
327     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
328     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
329     setOperationAction(ISD::LRINT, MVT::f16, Legal);
330     setOperationAction(ISD::LLRINT, MVT::f16, Legal);
331     setOperationAction(ISD::LROUND, MVT::f16, Legal);
332     setOperationAction(ISD::LLROUND, MVT::f16, Legal);
333     setOperationAction(ISD::STRICT_LRINT, MVT::f16, Legal);
334     setOperationAction(ISD::STRICT_LLRINT, MVT::f16, Legal);
335     setOperationAction(ISD::STRICT_LROUND, MVT::f16, Legal);
336     setOperationAction(ISD::STRICT_LLROUND, MVT::f16, Legal);
337     setOperationAction(ISD::STRICT_FADD, MVT::f16, Legal);
338     setOperationAction(ISD::STRICT_FMA, MVT::f16, Legal);
339     setOperationAction(ISD::STRICT_FSUB, MVT::f16, Legal);
340     setOperationAction(ISD::STRICT_FMUL, MVT::f16, Legal);
341     setOperationAction(ISD::STRICT_FDIV, MVT::f16, Legal);
342     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Legal);
343     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
344     setOperationAction(ISD::STRICT_FSQRT, MVT::f16, Legal);
345     for (auto CC : FPCCToExpand)
346       setCondCodeAction(CC, MVT::f16, Expand);
347     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
348     setOperationAction(ISD::SELECT, MVT::f16, Custom);
349     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
350 
351     setOperationAction(ISD::FREM,       MVT::f16, Promote);
352     setOperationAction(ISD::FCEIL,      MVT::f16, Promote);
353     setOperationAction(ISD::FFLOOR,     MVT::f16, Promote);
354     setOperationAction(ISD::FNEARBYINT, MVT::f16, Promote);
355     setOperationAction(ISD::FRINT,      MVT::f16, Promote);
356     setOperationAction(ISD::FROUND,     MVT::f16, Promote);
357     setOperationAction(ISD::FROUNDEVEN, MVT::f16, Promote);
358     setOperationAction(ISD::FTRUNC,     MVT::f16, Promote);
359     setOperationAction(ISD::FPOW,       MVT::f16, Promote);
360     setOperationAction(ISD::FPOWI,      MVT::f16, Promote);
361     setOperationAction(ISD::FCOS,       MVT::f16, Promote);
362     setOperationAction(ISD::FSIN,       MVT::f16, Promote);
363     setOperationAction(ISD::FSINCOS,    MVT::f16, Promote);
364     setOperationAction(ISD::FEXP,       MVT::f16, Promote);
365     setOperationAction(ISD::FEXP2,      MVT::f16, Promote);
366     setOperationAction(ISD::FLOG,       MVT::f16, Promote);
367     setOperationAction(ISD::FLOG2,      MVT::f16, Promote);
368     setOperationAction(ISD::FLOG10,     MVT::f16, Promote);
369 
370     // FIXME: Need to promote f16 STRICT_* to f32 libcalls, but we don't have
371     // complete support for all operations in LegalizeDAG.
372 
373     // We need to custom promote this.
374     if (Subtarget.is64Bit())
375       setOperationAction(ISD::FPOWI, MVT::i32, Custom);
376   }
377 
378   if (Subtarget.hasStdExtF()) {
379     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
380     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
381     setOperationAction(ISD::LRINT, MVT::f32, Legal);
382     setOperationAction(ISD::LLRINT, MVT::f32, Legal);
383     setOperationAction(ISD::LROUND, MVT::f32, Legal);
384     setOperationAction(ISD::LLROUND, MVT::f32, Legal);
385     setOperationAction(ISD::STRICT_LRINT, MVT::f32, Legal);
386     setOperationAction(ISD::STRICT_LLRINT, MVT::f32, Legal);
387     setOperationAction(ISD::STRICT_LROUND, MVT::f32, Legal);
388     setOperationAction(ISD::STRICT_LLROUND, MVT::f32, Legal);
389     setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
390     setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal);
391     setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
392     setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
393     setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
394     setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
395     for (auto CC : FPCCToExpand)
396       setCondCodeAction(CC, MVT::f32, Expand);
397     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
398     setOperationAction(ISD::SELECT, MVT::f32, Custom);
399     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
400     for (auto Op : FPOpToExpand)
401       setOperationAction(Op, MVT::f32, Expand);
402     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
403     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
404   }
405 
406   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
407     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
408 
409   if (Subtarget.hasStdExtD()) {
410     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
411     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
412     setOperationAction(ISD::LRINT, MVT::f64, Legal);
413     setOperationAction(ISD::LLRINT, MVT::f64, Legal);
414     setOperationAction(ISD::LROUND, MVT::f64, Legal);
415     setOperationAction(ISD::LLROUND, MVT::f64, Legal);
416     setOperationAction(ISD::STRICT_LRINT, MVT::f64, Legal);
417     setOperationAction(ISD::STRICT_LLRINT, MVT::f64, Legal);
418     setOperationAction(ISD::STRICT_LROUND, MVT::f64, Legal);
419     setOperationAction(ISD::STRICT_LLROUND, MVT::f64, Legal);
420     setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal);
421     setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
422     setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
423     setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
424     setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
425     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
426     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
427     setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
428     for (auto CC : FPCCToExpand)
429       setCondCodeAction(CC, MVT::f64, Expand);
430     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
431     setOperationAction(ISD::SELECT, MVT::f64, Custom);
432     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
433     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
434     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
435     for (auto Op : FPOpToExpand)
436       setOperationAction(Op, MVT::f64, Expand);
437     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
438     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
439   }
440 
441   if (Subtarget.is64Bit()) {
442     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
443     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
444     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
445     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
446   }
447 
448   if (Subtarget.hasStdExtF()) {
449     setOperationAction(ISD::FP_TO_UINT_SAT, XLenVT, Custom);
450     setOperationAction(ISD::FP_TO_SINT_SAT, XLenVT, Custom);
451 
452     setOperationAction(ISD::STRICT_FP_TO_UINT, XLenVT, Legal);
453     setOperationAction(ISD::STRICT_FP_TO_SINT, XLenVT, Legal);
454     setOperationAction(ISD::STRICT_UINT_TO_FP, XLenVT, Legal);
455     setOperationAction(ISD::STRICT_SINT_TO_FP, XLenVT, Legal);
456 
457     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
458     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
459   }
460 
461   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
462   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
463   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
464   setOperationAction(ISD::JumpTable, XLenVT, Custom);
465 
466   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
467 
468   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
469   // Unfortunately this can't be determined just from the ISA naming string.
470   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
471                      Subtarget.is64Bit() ? Legal : Custom);
472 
473   setOperationAction(ISD::TRAP, MVT::Other, Legal);
474   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
475   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
476   if (Subtarget.is64Bit())
477     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
478 
479   if (Subtarget.hasStdExtA()) {
480     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
481     setMinCmpXchgSizeInBits(32);
482   } else {
483     setMaxAtomicSizeInBitsSupported(0);
484   }
485 
486   setBooleanContents(ZeroOrOneBooleanContent);
487 
488   if (Subtarget.hasVInstructions()) {
489     setBooleanVectorContents(ZeroOrOneBooleanContent);
490 
491     setOperationAction(ISD::VSCALE, XLenVT, Custom);
492 
493     // RVV intrinsics may have illegal operands.
494     // We also need to custom legalize vmv.x.s.
495     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
496     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
497     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
498     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
499     if (Subtarget.is64Bit()) {
500       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
501     } else {
502       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
503       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
504     }
505 
506     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
507     setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
508 
509     static const unsigned IntegerVPOps[] = {
510         ISD::VP_ADD,         ISD::VP_SUB,         ISD::VP_MUL,
511         ISD::VP_SDIV,        ISD::VP_UDIV,        ISD::VP_SREM,
512         ISD::VP_UREM,        ISD::VP_AND,         ISD::VP_OR,
513         ISD::VP_XOR,         ISD::VP_ASHR,        ISD::VP_LSHR,
514         ISD::VP_SHL,         ISD::VP_REDUCE_ADD,  ISD::VP_REDUCE_AND,
515         ISD::VP_REDUCE_OR,   ISD::VP_REDUCE_XOR,  ISD::VP_REDUCE_SMAX,
516         ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN,
517         ISD::VP_SELECT};
518 
519     static const unsigned FloatingPointVPOps[] = {
520         ISD::VP_FADD,        ISD::VP_FSUB,        ISD::VP_FMUL,
521         ISD::VP_FDIV,        ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD,
522         ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX, ISD::VP_SELECT};
523 
524     if (!Subtarget.is64Bit()) {
525       // We must custom-lower certain vXi64 operations on RV32 due to the vector
526       // element type being illegal.
527       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
528       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
529 
530       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
531       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
532       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
533       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
534       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
535       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
536       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
537       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
538 
539       setOperationAction(ISD::VP_REDUCE_ADD, MVT::i64, Custom);
540       setOperationAction(ISD::VP_REDUCE_AND, MVT::i64, Custom);
541       setOperationAction(ISD::VP_REDUCE_OR, MVT::i64, Custom);
542       setOperationAction(ISD::VP_REDUCE_XOR, MVT::i64, Custom);
543       setOperationAction(ISD::VP_REDUCE_SMAX, MVT::i64, Custom);
544       setOperationAction(ISD::VP_REDUCE_SMIN, MVT::i64, Custom);
545       setOperationAction(ISD::VP_REDUCE_UMAX, MVT::i64, Custom);
546       setOperationAction(ISD::VP_REDUCE_UMIN, MVT::i64, Custom);
547     }
548 
549     for (MVT VT : BoolVecVTs) {
550       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
551 
552       // Mask VTs are custom-expanded into a series of standard nodes
553       setOperationAction(ISD::TRUNCATE, VT, Custom);
554       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
555       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
556       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
557 
558       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
559       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
560 
561       setOperationAction(ISD::SELECT, VT, Custom);
562       setOperationAction(ISD::SELECT_CC, VT, Expand);
563       setOperationAction(ISD::VSELECT, VT, Expand);
564       setOperationAction(ISD::VP_SELECT, VT, Expand);
565 
566       setOperationAction(ISD::VP_AND, VT, Custom);
567       setOperationAction(ISD::VP_OR, VT, Custom);
568       setOperationAction(ISD::VP_XOR, VT, Custom);
569 
570       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
571       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
572       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
573 
574       setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
575       setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
576       setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
577 
578       // RVV has native int->float & float->int conversions where the
579       // element type sizes are within one power-of-two of each other. Any
580       // wider distances between type sizes have to be lowered as sequences
581       // which progressively narrow the gap in stages.
582       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
583       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
584       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
585       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
586 
587       // Expand all extending loads to types larger than this, and truncating
588       // stores from types larger than this.
589       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
590         setTruncStoreAction(OtherVT, VT, Expand);
591         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
592         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
593         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
594       }
595     }
596 
597     for (MVT VT : IntVecVTs) {
598       if (VT.getVectorElementType() == MVT::i64 &&
599           !Subtarget.hasVInstructionsI64())
600         continue;
601 
602       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
603       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
604 
605       // Vectors implement MULHS/MULHU.
606       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
607       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
608 
609       setOperationAction(ISD::SMIN, VT, Legal);
610       setOperationAction(ISD::SMAX, VT, Legal);
611       setOperationAction(ISD::UMIN, VT, Legal);
612       setOperationAction(ISD::UMAX, VT, Legal);
613 
614       setOperationAction(ISD::ROTL, VT, Expand);
615       setOperationAction(ISD::ROTR, VT, Expand);
616 
617       setOperationAction(ISD::CTTZ, VT, Expand);
618       setOperationAction(ISD::CTLZ, VT, Expand);
619       setOperationAction(ISD::CTPOP, VT, Expand);
620 
621       setOperationAction(ISD::BSWAP, VT, Expand);
622 
623       // Custom-lower extensions and truncations from/to mask types.
624       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
625       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
626       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
627 
628       // RVV has native int->float & float->int conversions where the
629       // element type sizes are within one power-of-two of each other. Any
630       // wider distances between type sizes have to be lowered as sequences
631       // which progressively narrow the gap in stages.
632       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
633       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
634       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
635       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
636 
637       setOperationAction(ISD::SADDSAT, VT, Legal);
638       setOperationAction(ISD::UADDSAT, VT, Legal);
639       setOperationAction(ISD::SSUBSAT, VT, Legal);
640       setOperationAction(ISD::USUBSAT, VT, Legal);
641 
642       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
643       // nodes which truncate by one power of two at a time.
644       setOperationAction(ISD::TRUNCATE, VT, Custom);
645 
646       // Custom-lower insert/extract operations to simplify patterns.
647       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
648       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
649 
650       // Custom-lower reduction operations to set up the corresponding custom
651       // nodes' operands.
652       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
653       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
654       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
655       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
656       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
657       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
658       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
659       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
660 
661       for (unsigned VPOpc : IntegerVPOps)
662         setOperationAction(VPOpc, VT, Custom);
663 
664       setOperationAction(ISD::LOAD, VT, Custom);
665       setOperationAction(ISD::STORE, VT, Custom);
666 
667       setOperationAction(ISD::MLOAD, VT, Custom);
668       setOperationAction(ISD::MSTORE, VT, Custom);
669       setOperationAction(ISD::MGATHER, VT, Custom);
670       setOperationAction(ISD::MSCATTER, VT, Custom);
671 
672       setOperationAction(ISD::VP_LOAD, VT, Custom);
673       setOperationAction(ISD::VP_STORE, VT, Custom);
674       setOperationAction(ISD::VP_GATHER, VT, Custom);
675       setOperationAction(ISD::VP_SCATTER, VT, Custom);
676 
677       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
678       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
679       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
680 
681       setOperationAction(ISD::SELECT, VT, Custom);
682       setOperationAction(ISD::SELECT_CC, VT, Expand);
683 
684       setOperationAction(ISD::STEP_VECTOR, VT, Custom);
685       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
686 
687       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
688         setTruncStoreAction(VT, OtherVT, Expand);
689         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
690         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
691         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
692       }
693 
694       // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
695       // type that can represent the value exactly.
696       if (VT.getVectorElementType() != MVT::i64) {
697         MVT FloatEltVT =
698             VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
699         EVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
700         if (isTypeLegal(FloatVT)) {
701           setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
702           setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
703         }
704       }
705     }
706 
707     // Expand various CCs to best match the RVV ISA, which natively supports UNE
708     // but no other unordered comparisons, and supports all ordered comparisons
709     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
710     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
711     // and we pattern-match those back to the "original", swapping operands once
712     // more. This way we catch both operations and both "vf" and "fv" forms with
713     // fewer patterns.
714     static const ISD::CondCode VFPCCToExpand[] = {
715         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
716         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
717         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
718     };
719 
720     // Sets common operation actions on RVV floating-point vector types.
721     const auto SetCommonVFPActions = [&](MVT VT) {
722       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
723       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
724       // sizes are within one power-of-two of each other. Therefore conversions
725       // between vXf16 and vXf64 must be lowered as sequences which convert via
726       // vXf32.
727       setOperationAction(ISD::FP_ROUND, VT, Custom);
728       setOperationAction(ISD::FP_EXTEND, VT, Custom);
729       // Custom-lower insert/extract operations to simplify patterns.
730       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
731       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
732       // Expand various condition codes (explained above).
733       for (auto CC : VFPCCToExpand)
734         setCondCodeAction(CC, VT, Expand);
735 
736       setOperationAction(ISD::FMINNUM, VT, Legal);
737       setOperationAction(ISD::FMAXNUM, VT, Legal);
738 
739       setOperationAction(ISD::FTRUNC, VT, Custom);
740       setOperationAction(ISD::FCEIL, VT, Custom);
741       setOperationAction(ISD::FFLOOR, VT, Custom);
742 
743       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
744       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
745       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
746       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
747 
748       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
749 
750       setOperationAction(ISD::LOAD, VT, Custom);
751       setOperationAction(ISD::STORE, VT, Custom);
752 
753       setOperationAction(ISD::MLOAD, VT, Custom);
754       setOperationAction(ISD::MSTORE, VT, Custom);
755       setOperationAction(ISD::MGATHER, VT, Custom);
756       setOperationAction(ISD::MSCATTER, VT, Custom);
757 
758       setOperationAction(ISD::VP_LOAD, VT, Custom);
759       setOperationAction(ISD::VP_STORE, VT, Custom);
760       setOperationAction(ISD::VP_GATHER, VT, Custom);
761       setOperationAction(ISD::VP_SCATTER, VT, Custom);
762 
763       setOperationAction(ISD::SELECT, VT, Custom);
764       setOperationAction(ISD::SELECT_CC, VT, Expand);
765 
766       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
767       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
768       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
769 
770       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
771 
772       for (unsigned VPOpc : FloatingPointVPOps)
773         setOperationAction(VPOpc, VT, Custom);
774     };
775 
776     // Sets common extload/truncstore actions on RVV floating-point vector
777     // types.
778     const auto SetCommonVFPExtLoadTruncStoreActions =
779         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
780           for (auto SmallVT : SmallerVTs) {
781             setTruncStoreAction(VT, SmallVT, Expand);
782             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
783           }
784         };
785 
786     if (Subtarget.hasVInstructionsF16())
787       for (MVT VT : F16VecVTs)
788         SetCommonVFPActions(VT);
789 
790     for (MVT VT : F32VecVTs) {
791       if (Subtarget.hasVInstructionsF32())
792         SetCommonVFPActions(VT);
793       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
794     }
795 
796     for (MVT VT : F64VecVTs) {
797       if (Subtarget.hasVInstructionsF64())
798         SetCommonVFPActions(VT);
799       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
800       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
801     }
802 
803     if (Subtarget.useRVVForFixedLengthVectors()) {
804       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
805         if (!useRVVForFixedLengthVectorVT(VT))
806           continue;
807 
808         // By default everything must be expanded.
809         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
810           setOperationAction(Op, VT, Expand);
811         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
812           setTruncStoreAction(VT, OtherVT, Expand);
813           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
814           setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
815           setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
816         }
817 
818         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
819         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
820         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
821 
822         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
823         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
824 
825         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
826         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
827 
828         setOperationAction(ISD::LOAD, VT, Custom);
829         setOperationAction(ISD::STORE, VT, Custom);
830 
831         setOperationAction(ISD::SETCC, VT, Custom);
832 
833         setOperationAction(ISD::SELECT, VT, Custom);
834 
835         setOperationAction(ISD::TRUNCATE, VT, Custom);
836 
837         setOperationAction(ISD::BITCAST, VT, Custom);
838 
839         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
840         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
841         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
842 
843         setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
844         setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
845         setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
846 
847         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
848         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
849         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
850         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
851 
852         // Operations below are different for between masks and other vectors.
853         if (VT.getVectorElementType() == MVT::i1) {
854           setOperationAction(ISD::VP_AND, VT, Custom);
855           setOperationAction(ISD::VP_OR, VT, Custom);
856           setOperationAction(ISD::VP_XOR, VT, Custom);
857           setOperationAction(ISD::AND, VT, Custom);
858           setOperationAction(ISD::OR, VT, Custom);
859           setOperationAction(ISD::XOR, VT, Custom);
860           continue;
861         }
862 
863         // Use SPLAT_VECTOR to prevent type legalization from destroying the
864         // splats when type legalizing i64 scalar on RV32.
865         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
866         // improvements first.
867         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
868           setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
869           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
870         }
871 
872         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
873         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
874 
875         setOperationAction(ISD::MLOAD, VT, Custom);
876         setOperationAction(ISD::MSTORE, VT, Custom);
877         setOperationAction(ISD::MGATHER, VT, Custom);
878         setOperationAction(ISD::MSCATTER, VT, Custom);
879 
880         setOperationAction(ISD::VP_LOAD, VT, Custom);
881         setOperationAction(ISD::VP_STORE, VT, Custom);
882         setOperationAction(ISD::VP_GATHER, VT, Custom);
883         setOperationAction(ISD::VP_SCATTER, VT, Custom);
884 
885         setOperationAction(ISD::ADD, VT, Custom);
886         setOperationAction(ISD::MUL, VT, Custom);
887         setOperationAction(ISD::SUB, VT, Custom);
888         setOperationAction(ISD::AND, VT, Custom);
889         setOperationAction(ISD::OR, VT, Custom);
890         setOperationAction(ISD::XOR, VT, Custom);
891         setOperationAction(ISD::SDIV, VT, Custom);
892         setOperationAction(ISD::SREM, VT, Custom);
893         setOperationAction(ISD::UDIV, VT, Custom);
894         setOperationAction(ISD::UREM, VT, Custom);
895         setOperationAction(ISD::SHL, VT, Custom);
896         setOperationAction(ISD::SRA, VT, Custom);
897         setOperationAction(ISD::SRL, VT, Custom);
898 
899         setOperationAction(ISD::SMIN, VT, Custom);
900         setOperationAction(ISD::SMAX, VT, Custom);
901         setOperationAction(ISD::UMIN, VT, Custom);
902         setOperationAction(ISD::UMAX, VT, Custom);
903         setOperationAction(ISD::ABS,  VT, Custom);
904 
905         setOperationAction(ISD::MULHS, VT, Custom);
906         setOperationAction(ISD::MULHU, VT, Custom);
907 
908         setOperationAction(ISD::SADDSAT, VT, Custom);
909         setOperationAction(ISD::UADDSAT, VT, Custom);
910         setOperationAction(ISD::SSUBSAT, VT, Custom);
911         setOperationAction(ISD::USUBSAT, VT, Custom);
912 
913         setOperationAction(ISD::VSELECT, VT, Custom);
914         setOperationAction(ISD::SELECT_CC, VT, Expand);
915 
916         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
917         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
918         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
919 
920         // Custom-lower reduction operations to set up the corresponding custom
921         // nodes' operands.
922         setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
923         setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
924         setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
925         setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
926         setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
927 
928         for (unsigned VPOpc : IntegerVPOps)
929           setOperationAction(VPOpc, VT, Custom);
930 
931         // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
932         // type that can represent the value exactly.
933         if (VT.getVectorElementType() != MVT::i64) {
934           MVT FloatEltVT =
935               VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
936           EVT FloatVT =
937               MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
938           if (isTypeLegal(FloatVT)) {
939             setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
940             setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
941           }
942         }
943       }
944 
945       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
946         if (!useRVVForFixedLengthVectorVT(VT))
947           continue;
948 
949         // By default everything must be expanded.
950         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
951           setOperationAction(Op, VT, Expand);
952         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
953           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
954           setTruncStoreAction(VT, OtherVT, Expand);
955         }
956 
957         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
958         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
959         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
960 
961         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
962         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
963         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
964         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
965         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
966 
967         setOperationAction(ISD::LOAD, VT, Custom);
968         setOperationAction(ISD::STORE, VT, Custom);
969         setOperationAction(ISD::MLOAD, VT, Custom);
970         setOperationAction(ISD::MSTORE, VT, Custom);
971         setOperationAction(ISD::MGATHER, VT, Custom);
972         setOperationAction(ISD::MSCATTER, VT, Custom);
973 
974         setOperationAction(ISD::VP_LOAD, VT, Custom);
975         setOperationAction(ISD::VP_STORE, VT, Custom);
976         setOperationAction(ISD::VP_GATHER, VT, Custom);
977         setOperationAction(ISD::VP_SCATTER, VT, Custom);
978 
979         setOperationAction(ISD::FADD, VT, Custom);
980         setOperationAction(ISD::FSUB, VT, Custom);
981         setOperationAction(ISD::FMUL, VT, Custom);
982         setOperationAction(ISD::FDIV, VT, Custom);
983         setOperationAction(ISD::FNEG, VT, Custom);
984         setOperationAction(ISD::FABS, VT, Custom);
985         setOperationAction(ISD::FCOPYSIGN, VT, Custom);
986         setOperationAction(ISD::FSQRT, VT, Custom);
987         setOperationAction(ISD::FMA, VT, Custom);
988         setOperationAction(ISD::FMINNUM, VT, Custom);
989         setOperationAction(ISD::FMAXNUM, VT, Custom);
990 
991         setOperationAction(ISD::FP_ROUND, VT, Custom);
992         setOperationAction(ISD::FP_EXTEND, VT, Custom);
993 
994         setOperationAction(ISD::FTRUNC, VT, Custom);
995         setOperationAction(ISD::FCEIL, VT, Custom);
996         setOperationAction(ISD::FFLOOR, VT, Custom);
997 
998         for (auto CC : VFPCCToExpand)
999           setCondCodeAction(CC, VT, Expand);
1000 
1001         setOperationAction(ISD::VSELECT, VT, Custom);
1002         setOperationAction(ISD::SELECT, VT, Custom);
1003         setOperationAction(ISD::SELECT_CC, VT, Expand);
1004 
1005         setOperationAction(ISD::BITCAST, VT, Custom);
1006 
1007         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
1008         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
1009         setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
1010         setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
1011 
1012         for (unsigned VPOpc : FloatingPointVPOps)
1013           setOperationAction(VPOpc, VT, Custom);
1014       }
1015 
1016       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
1017       setOperationAction(ISD::BITCAST, MVT::i8, Custom);
1018       setOperationAction(ISD::BITCAST, MVT::i16, Custom);
1019       setOperationAction(ISD::BITCAST, MVT::i32, Custom);
1020       setOperationAction(ISD::BITCAST, MVT::i64, Custom);
1021       setOperationAction(ISD::BITCAST, MVT::f16, Custom);
1022       setOperationAction(ISD::BITCAST, MVT::f32, Custom);
1023       setOperationAction(ISD::BITCAST, MVT::f64, Custom);
1024     }
1025   }
1026 
1027   // Function alignments.
1028   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
1029   setMinFunctionAlignment(FunctionAlignment);
1030   setPrefFunctionAlignment(FunctionAlignment);
1031 
1032   setMinimumJumpTableEntries(5);
1033 
1034   // Jumps are expensive, compared to logic
1035   setJumpIsExpensive();
1036 
1037   setTargetDAGCombine(ISD::ADD);
1038   setTargetDAGCombine(ISD::SUB);
1039   setTargetDAGCombine(ISD::AND);
1040   setTargetDAGCombine(ISD::OR);
1041   setTargetDAGCombine(ISD::XOR);
1042   setTargetDAGCombine(ISD::ANY_EXTEND);
1043   setTargetDAGCombine(ISD::ZERO_EXTEND);
1044   if (Subtarget.hasVInstructions()) {
1045     setTargetDAGCombine(ISD::FCOPYSIGN);
1046     setTargetDAGCombine(ISD::MGATHER);
1047     setTargetDAGCombine(ISD::MSCATTER);
1048     setTargetDAGCombine(ISD::VP_GATHER);
1049     setTargetDAGCombine(ISD::VP_SCATTER);
1050     setTargetDAGCombine(ISD::SRA);
1051     setTargetDAGCombine(ISD::SRL);
1052     setTargetDAGCombine(ISD::SHL);
1053     setTargetDAGCombine(ISD::STORE);
1054   }
1055 }
1056 
1057 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
1058                                             LLVMContext &Context,
1059                                             EVT VT) const {
1060   if (!VT.isVector())
1061     return getPointerTy(DL);
1062   if (Subtarget.hasVInstructions() &&
1063       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
1064     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
1065   return VT.changeVectorElementTypeToInteger();
1066 }
1067 
1068 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
1069   return Subtarget.getXLenVT();
1070 }
1071 
1072 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
1073                                              const CallInst &I,
1074                                              MachineFunction &MF,
1075                                              unsigned Intrinsic) const {
1076   auto &DL = I.getModule()->getDataLayout();
1077   switch (Intrinsic) {
1078   default:
1079     return false;
1080   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
1081   case Intrinsic::riscv_masked_atomicrmw_add_i32:
1082   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
1083   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
1084   case Intrinsic::riscv_masked_atomicrmw_max_i32:
1085   case Intrinsic::riscv_masked_atomicrmw_min_i32:
1086   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
1087   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
1088   case Intrinsic::riscv_masked_cmpxchg_i32: {
1089     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
1090     Info.opc = ISD::INTRINSIC_W_CHAIN;
1091     Info.memVT = MVT::getVT(PtrTy->getElementType());
1092     Info.ptrVal = I.getArgOperand(0);
1093     Info.offset = 0;
1094     Info.align = Align(4);
1095     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
1096                  MachineMemOperand::MOVolatile;
1097     return true;
1098   }
1099   case Intrinsic::riscv_masked_strided_load:
1100     Info.opc = ISD::INTRINSIC_W_CHAIN;
1101     Info.ptrVal = I.getArgOperand(1);
1102     Info.memVT = getValueType(DL, I.getType()->getScalarType());
1103     Info.align = Align(DL.getTypeSizeInBits(I.getType()->getScalarType()) / 8);
1104     Info.size = MemoryLocation::UnknownSize;
1105     Info.flags |= MachineMemOperand::MOLoad;
1106     return true;
1107   case Intrinsic::riscv_masked_strided_store:
1108     Info.opc = ISD::INTRINSIC_VOID;
1109     Info.ptrVal = I.getArgOperand(1);
1110     Info.memVT =
1111         getValueType(DL, I.getArgOperand(0)->getType()->getScalarType());
1112     Info.align = Align(
1113         DL.getTypeSizeInBits(I.getArgOperand(0)->getType()->getScalarType()) /
1114         8);
1115     Info.size = MemoryLocation::UnknownSize;
1116     Info.flags |= MachineMemOperand::MOStore;
1117     return true;
1118   }
1119 }
1120 
1121 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1122                                                 const AddrMode &AM, Type *Ty,
1123                                                 unsigned AS,
1124                                                 Instruction *I) const {
1125   // No global is ever allowed as a base.
1126   if (AM.BaseGV)
1127     return false;
1128 
1129   // Require a 12-bit signed offset.
1130   if (!isInt<12>(AM.BaseOffs))
1131     return false;
1132 
1133   switch (AM.Scale) {
1134   case 0: // "r+i" or just "i", depending on HasBaseReg.
1135     break;
1136   case 1:
1137     if (!AM.HasBaseReg) // allow "r+i".
1138       break;
1139     return false; // disallow "r+r" or "r+r+i".
1140   default:
1141     return false;
1142   }
1143 
1144   return true;
1145 }
1146 
1147 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
1148   return isInt<12>(Imm);
1149 }
1150 
1151 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
1152   return isInt<12>(Imm);
1153 }
1154 
1155 // On RV32, 64-bit integers are split into their high and low parts and held
1156 // in two different registers, so the trunc is free since the low register can
1157 // just be used.
1158 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
1159   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
1160     return false;
1161   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
1162   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
1163   return (SrcBits == 64 && DestBits == 32);
1164 }
1165 
1166 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
1167   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
1168       !SrcVT.isInteger() || !DstVT.isInteger())
1169     return false;
1170   unsigned SrcBits = SrcVT.getSizeInBits();
1171   unsigned DestBits = DstVT.getSizeInBits();
1172   return (SrcBits == 64 && DestBits == 32);
1173 }
1174 
1175 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
1176   // Zexts are free if they can be combined with a load.
1177   // Don't advertise i32->i64 zextload as being free for RV64. It interacts
1178   // poorly with type legalization of compares preferring sext.
1179   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
1180     EVT MemVT = LD->getMemoryVT();
1181     if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
1182         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
1183          LD->getExtensionType() == ISD::ZEXTLOAD))
1184       return true;
1185   }
1186 
1187   return TargetLowering::isZExtFree(Val, VT2);
1188 }
1189 
1190 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
1191   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
1192 }
1193 
1194 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
1195   return Subtarget.hasStdExtZbb();
1196 }
1197 
1198 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
1199   return Subtarget.hasStdExtZbb();
1200 }
1201 
1202 bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {
1203   EVT VT = Y.getValueType();
1204 
1205   // FIXME: Support vectors once we have tests.
1206   if (VT.isVector())
1207     return false;
1208 
1209   return Subtarget.hasStdExtZbb() && !isa<ConstantSDNode>(Y);
1210 }
1211 
1212 /// Check if sinking \p I's operands to I's basic block is profitable, because
1213 /// the operands can be folded into a target instruction, e.g.
1214 /// splats of scalars can fold into vector instructions.
1215 bool RISCVTargetLowering::shouldSinkOperands(
1216     Instruction *I, SmallVectorImpl<Use *> &Ops) const {
1217   using namespace llvm::PatternMatch;
1218 
1219   if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions())
1220     return false;
1221 
1222   auto IsSinker = [&](Instruction *I, int Operand) {
1223     switch (I->getOpcode()) {
1224     case Instruction::Add:
1225     case Instruction::Sub:
1226     case Instruction::Mul:
1227     case Instruction::And:
1228     case Instruction::Or:
1229     case Instruction::Xor:
1230     case Instruction::FAdd:
1231     case Instruction::FSub:
1232     case Instruction::FMul:
1233     case Instruction::FDiv:
1234     case Instruction::ICmp:
1235     case Instruction::FCmp:
1236       return true;
1237     case Instruction::Shl:
1238     case Instruction::LShr:
1239     case Instruction::AShr:
1240     case Instruction::UDiv:
1241     case Instruction::SDiv:
1242     case Instruction::URem:
1243     case Instruction::SRem:
1244       return Operand == 1;
1245     case Instruction::Call:
1246       if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1247         switch (II->getIntrinsicID()) {
1248         case Intrinsic::fma:
1249           return Operand == 0 || Operand == 1;
1250         default:
1251           return false;
1252         }
1253       }
1254       return false;
1255     default:
1256       return false;
1257     }
1258   };
1259 
1260   for (auto OpIdx : enumerate(I->operands())) {
1261     if (!IsSinker(I, OpIdx.index()))
1262       continue;
1263 
1264     Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
1265     // Make sure we are not already sinking this operand
1266     if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
1267       continue;
1268 
1269     // We are looking for a splat that can be sunk.
1270     if (!match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
1271                              m_Undef(), m_ZeroMask())))
1272       continue;
1273 
1274     // All uses of the shuffle should be sunk to avoid duplicating it across gpr
1275     // and vector registers
1276     for (Use &U : Op->uses()) {
1277       Instruction *Insn = cast<Instruction>(U.getUser());
1278       if (!IsSinker(Insn, U.getOperandNo()))
1279         return false;
1280     }
1281 
1282     Ops.push_back(&Op->getOperandUse(0));
1283     Ops.push_back(&OpIdx.value());
1284   }
1285   return true;
1286 }
1287 
1288 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
1289                                        bool ForCodeSize) const {
1290   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1291   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
1292     return false;
1293   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
1294     return false;
1295   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
1296     return false;
1297   if (Imm.isNegZero())
1298     return false;
1299   return Imm.isZero();
1300 }
1301 
1302 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
1303   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
1304          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
1305          (VT == MVT::f64 && Subtarget.hasStdExtD());
1306 }
1307 
1308 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1309                                                       CallingConv::ID CC,
1310                                                       EVT VT) const {
1311   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1312   // We might still end up using a GPR but that will be decided based on ABI.
1313   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1314   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1315     return MVT::f32;
1316 
1317   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1318 }
1319 
1320 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1321                                                            CallingConv::ID CC,
1322                                                            EVT VT) const {
1323   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1324   // We might still end up using a GPR but that will be decided based on ABI.
1325   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1326   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1327     return 1;
1328 
1329   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1330 }
1331 
1332 // Changes the condition code and swaps operands if necessary, so the SetCC
1333 // operation matches one of the comparisons supported directly by branches
1334 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1335 // with 1/-1.
1336 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1337                                     ISD::CondCode &CC, SelectionDAG &DAG) {
1338   // Convert X > -1 to X >= 0.
1339   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1340     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1341     CC = ISD::SETGE;
1342     return;
1343   }
1344   // Convert X < 1 to 0 >= X.
1345   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1346     RHS = LHS;
1347     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1348     CC = ISD::SETGE;
1349     return;
1350   }
1351 
1352   switch (CC) {
1353   default:
1354     break;
1355   case ISD::SETGT:
1356   case ISD::SETLE:
1357   case ISD::SETUGT:
1358   case ISD::SETULE:
1359     CC = ISD::getSetCCSwappedOperands(CC);
1360     std::swap(LHS, RHS);
1361     break;
1362   }
1363 }
1364 
1365 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1366   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1367   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1368   if (VT.getVectorElementType() == MVT::i1)
1369     KnownSize *= 8;
1370 
1371   switch (KnownSize) {
1372   default:
1373     llvm_unreachable("Invalid LMUL.");
1374   case 8:
1375     return RISCVII::VLMUL::LMUL_F8;
1376   case 16:
1377     return RISCVII::VLMUL::LMUL_F4;
1378   case 32:
1379     return RISCVII::VLMUL::LMUL_F2;
1380   case 64:
1381     return RISCVII::VLMUL::LMUL_1;
1382   case 128:
1383     return RISCVII::VLMUL::LMUL_2;
1384   case 256:
1385     return RISCVII::VLMUL::LMUL_4;
1386   case 512:
1387     return RISCVII::VLMUL::LMUL_8;
1388   }
1389 }
1390 
1391 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1392   switch (LMul) {
1393   default:
1394     llvm_unreachable("Invalid LMUL.");
1395   case RISCVII::VLMUL::LMUL_F8:
1396   case RISCVII::VLMUL::LMUL_F4:
1397   case RISCVII::VLMUL::LMUL_F2:
1398   case RISCVII::VLMUL::LMUL_1:
1399     return RISCV::VRRegClassID;
1400   case RISCVII::VLMUL::LMUL_2:
1401     return RISCV::VRM2RegClassID;
1402   case RISCVII::VLMUL::LMUL_4:
1403     return RISCV::VRM4RegClassID;
1404   case RISCVII::VLMUL::LMUL_8:
1405     return RISCV::VRM8RegClassID;
1406   }
1407 }
1408 
1409 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1410   RISCVII::VLMUL LMUL = getLMUL(VT);
1411   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1412       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1413       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1414       LMUL == RISCVII::VLMUL::LMUL_1) {
1415     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1416                   "Unexpected subreg numbering");
1417     return RISCV::sub_vrm1_0 + Index;
1418   }
1419   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1420     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1421                   "Unexpected subreg numbering");
1422     return RISCV::sub_vrm2_0 + Index;
1423   }
1424   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1425     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1426                   "Unexpected subreg numbering");
1427     return RISCV::sub_vrm4_0 + Index;
1428   }
1429   llvm_unreachable("Invalid vector type.");
1430 }
1431 
1432 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1433   if (VT.getVectorElementType() == MVT::i1)
1434     return RISCV::VRRegClassID;
1435   return getRegClassIDForLMUL(getLMUL(VT));
1436 }
1437 
1438 // Attempt to decompose a subvector insert/extract between VecVT and
1439 // SubVecVT via subregister indices. Returns the subregister index that
1440 // can perform the subvector insert/extract with the given element index, as
1441 // well as the index corresponding to any leftover subvectors that must be
1442 // further inserted/extracted within the register class for SubVecVT.
1443 std::pair<unsigned, unsigned>
1444 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1445     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1446     const RISCVRegisterInfo *TRI) {
1447   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1448                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1449                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1450                 "Register classes not ordered");
1451   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1452   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1453   // Try to compose a subregister index that takes us from the incoming
1454   // LMUL>1 register class down to the outgoing one. At each step we half
1455   // the LMUL:
1456   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1457   // Note that this is not guaranteed to find a subregister index, such as
1458   // when we are extracting from one VR type to another.
1459   unsigned SubRegIdx = RISCV::NoSubRegister;
1460   for (const unsigned RCID :
1461        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1462     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1463       VecVT = VecVT.getHalfNumVectorElementsVT();
1464       bool IsHi =
1465           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1466       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1467                                             getSubregIndexByMVT(VecVT, IsHi));
1468       if (IsHi)
1469         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1470     }
1471   return {SubRegIdx, InsertExtractIdx};
1472 }
1473 
1474 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1475 // stores for those types.
1476 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1477   return !Subtarget.useRVVForFixedLengthVectors() ||
1478          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1479 }
1480 
1481 bool RISCVTargetLowering::isLegalElementTypeForRVV(Type *ScalarTy) const {
1482   if (ScalarTy->isPointerTy())
1483     return true;
1484 
1485   if (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) ||
1486       ScalarTy->isIntegerTy(32))
1487     return true;
1488 
1489   if (ScalarTy->isIntegerTy(64))
1490     return Subtarget.hasVInstructionsI64();
1491 
1492   if (ScalarTy->isHalfTy())
1493     return Subtarget.hasVInstructionsF16();
1494   if (ScalarTy->isFloatTy())
1495     return Subtarget.hasVInstructionsF32();
1496   if (ScalarTy->isDoubleTy())
1497     return Subtarget.hasVInstructionsF64();
1498 
1499   return false;
1500 }
1501 
1502 static bool useRVVForFixedLengthVectorVT(MVT VT,
1503                                          const RISCVSubtarget &Subtarget) {
1504   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1505   if (!Subtarget.useRVVForFixedLengthVectors())
1506     return false;
1507 
1508   // We only support a set of vector types with a consistent maximum fixed size
1509   // across all supported vector element types to avoid legalization issues.
1510   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1511   // fixed-length vector type we support is 1024 bytes.
1512   if (VT.getFixedSizeInBits() > 1024 * 8)
1513     return false;
1514 
1515   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1516 
1517   MVT EltVT = VT.getVectorElementType();
1518 
1519   // Don't use RVV for vectors we cannot scalarize if required.
1520   switch (EltVT.SimpleTy) {
1521   // i1 is supported but has different rules.
1522   default:
1523     return false;
1524   case MVT::i1:
1525     // Masks can only use a single register.
1526     if (VT.getVectorNumElements() > MinVLen)
1527       return false;
1528     MinVLen /= 8;
1529     break;
1530   case MVT::i8:
1531   case MVT::i16:
1532   case MVT::i32:
1533     break;
1534   case MVT::i64:
1535     if (!Subtarget.hasVInstructionsI64())
1536       return false;
1537     break;
1538   case MVT::f16:
1539     if (!Subtarget.hasVInstructionsF16())
1540       return false;
1541     break;
1542   case MVT::f32:
1543     if (!Subtarget.hasVInstructionsF32())
1544       return false;
1545     break;
1546   case MVT::f64:
1547     if (!Subtarget.hasVInstructionsF64())
1548       return false;
1549     break;
1550   }
1551 
1552   // Reject elements larger than ELEN.
1553   if (EltVT.getSizeInBits() > Subtarget.getMaxELENForFixedLengthVectors())
1554     return false;
1555 
1556   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1557   // Don't use RVV for types that don't fit.
1558   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1559     return false;
1560 
1561   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1562   // the base fixed length RVV support in place.
1563   if (!VT.isPow2VectorType())
1564     return false;
1565 
1566   return true;
1567 }
1568 
1569 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1570   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1571 }
1572 
1573 // Return the largest legal scalable vector type that matches VT's element type.
1574 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1575                                             const RISCVSubtarget &Subtarget) {
1576   // This may be called before legal types are setup.
1577   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1578           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1579          "Expected legal fixed length vector!");
1580 
1581   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1582   unsigned MaxELen = Subtarget.getMaxELENForFixedLengthVectors();
1583 
1584   MVT EltVT = VT.getVectorElementType();
1585   switch (EltVT.SimpleTy) {
1586   default:
1587     llvm_unreachable("unexpected element type for RVV container");
1588   case MVT::i1:
1589   case MVT::i8:
1590   case MVT::i16:
1591   case MVT::i32:
1592   case MVT::i64:
1593   case MVT::f16:
1594   case MVT::f32:
1595   case MVT::f64: {
1596     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1597     // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within
1598     // each fractional LMUL we support SEW between 8 and LMUL*ELEN.
1599     unsigned NumElts =
1600         (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen;
1601     NumElts = std::max(NumElts, RISCV::RVVBitsPerBlock / MaxELen);
1602     assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts");
1603     return MVT::getScalableVectorVT(EltVT, NumElts);
1604   }
1605   }
1606 }
1607 
1608 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1609                                             const RISCVSubtarget &Subtarget) {
1610   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1611                                           Subtarget);
1612 }
1613 
1614 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1615   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1616 }
1617 
1618 // Grow V to consume an entire RVV register.
1619 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1620                                        const RISCVSubtarget &Subtarget) {
1621   assert(VT.isScalableVector() &&
1622          "Expected to convert into a scalable vector!");
1623   assert(V.getValueType().isFixedLengthVector() &&
1624          "Expected a fixed length vector operand!");
1625   SDLoc DL(V);
1626   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1627   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1628 }
1629 
1630 // Shrink V so it's just big enough to maintain a VT's worth of data.
1631 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1632                                          const RISCVSubtarget &Subtarget) {
1633   assert(VT.isFixedLengthVector() &&
1634          "Expected to convert into a fixed length vector!");
1635   assert(V.getValueType().isScalableVector() &&
1636          "Expected a scalable vector operand!");
1637   SDLoc DL(V);
1638   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1639   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1640 }
1641 
1642 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1643 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1644 // the vector type that it is contained in.
1645 static std::pair<SDValue, SDValue>
1646 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1647                 const RISCVSubtarget &Subtarget) {
1648   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1649   MVT XLenVT = Subtarget.getXLenVT();
1650   SDValue VL = VecVT.isFixedLengthVector()
1651                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1652                    : DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
1653   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1654   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1655   return {Mask, VL};
1656 }
1657 
1658 // As above but assuming the given type is a scalable vector type.
1659 static std::pair<SDValue, SDValue>
1660 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1661                         const RISCVSubtarget &Subtarget) {
1662   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1663   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1664 }
1665 
1666 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1667 // of either is (currently) supported. This can get us into an infinite loop
1668 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1669 // as a ..., etc.
1670 // Until either (or both) of these can reliably lower any node, reporting that
1671 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1672 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1673 // which is not desirable.
1674 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1675     EVT VT, unsigned DefinedValues) const {
1676   return false;
1677 }
1678 
1679 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
1680   // Only splats are currently supported.
1681   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
1682     return true;
1683 
1684   return false;
1685 }
1686 
1687 static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
1688                                   const RISCVSubtarget &Subtarget) {
1689   // RISCV FP-to-int conversions saturate to the destination register size, but
1690   // don't produce 0 for nan. We can use a conversion instruction and fix the
1691   // nan case with a compare and a select.
1692   SDValue Src = Op.getOperand(0);
1693 
1694   EVT DstVT = Op.getValueType();
1695   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1696 
1697   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
1698   unsigned Opc;
1699   if (SatVT == DstVT)
1700     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
1701   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
1702     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
1703   else
1704     return SDValue();
1705   // FIXME: Support other SatVTs by clamping before or after the conversion.
1706 
1707   SDLoc DL(Op);
1708   SDValue FpToInt = DAG.getNode(
1709       Opc, DL, DstVT, Src,
1710       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, Subtarget.getXLenVT()));
1711 
1712   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
1713   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
1714 }
1715 
1716 // Expand vector FTRUNC, FCEIL, and FFLOOR by converting to the integer domain
1717 // and back. Taking care to avoid converting values that are nan or already
1718 // correct.
1719 // TODO: Floor and ceil could be shorter by changing rounding mode, but we don't
1720 // have FRM dependencies modeled yet.
1721 static SDValue lowerFTRUNC_FCEIL_FFLOOR(SDValue Op, SelectionDAG &DAG) {
1722   MVT VT = Op.getSimpleValueType();
1723   assert(VT.isVector() && "Unexpected type");
1724 
1725   SDLoc DL(Op);
1726 
1727   // Freeze the source since we are increasing the number of uses.
1728   SDValue Src = DAG.getNode(ISD::FREEZE, DL, VT, Op.getOperand(0));
1729 
1730   // Truncate to integer and convert back to FP.
1731   MVT IntVT = VT.changeVectorElementTypeToInteger();
1732   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Src);
1733   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1734 
1735   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1736 
1737   if (Op.getOpcode() == ISD::FCEIL) {
1738     // If the truncated value is the greater than or equal to the original
1739     // value, we've computed the ceil. Otherwise, we went the wrong way and
1740     // need to increase by 1.
1741     // FIXME: This should use a masked operation. Handle here or in isel?
1742     SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Truncated,
1743                                  DAG.getConstantFP(1.0, DL, VT));
1744     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOLT);
1745     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1746   } else if (Op.getOpcode() == ISD::FFLOOR) {
1747     // If the truncated value is the less than or equal to the original value,
1748     // we've computed the floor. Otherwise, we went the wrong way and need to
1749     // decrease by 1.
1750     // FIXME: This should use a masked operation. Handle here or in isel?
1751     SDValue Adjust = DAG.getNode(ISD::FSUB, DL, VT, Truncated,
1752                                  DAG.getConstantFP(1.0, DL, VT));
1753     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOGT);
1754     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1755   }
1756 
1757   // Restore the original sign so that -0.0 is preserved.
1758   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1759 
1760   // Determine the largest integer that can be represented exactly. This and
1761   // values larger than it don't have any fractional bits so don't need to
1762   // be converted.
1763   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1764   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1765   APFloat MaxVal = APFloat(FltSem);
1766   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1767                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1768   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1769 
1770   // If abs(Src) was larger than MaxVal or nan, keep it.
1771   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1772   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1773   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1774 }
1775 
1776 static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG,
1777                                  const RISCVSubtarget &Subtarget) {
1778   MVT VT = Op.getSimpleValueType();
1779   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1780 
1781   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1782 
1783   SDLoc DL(Op);
1784   SDValue Mask, VL;
1785   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1786 
1787   unsigned Opc =
1788       VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1789   SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, Op.getOperand(0), VL);
1790   return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1791 }
1792 
1793 struct VIDSequence {
1794   int64_t StepNumerator;
1795   unsigned StepDenominator;
1796   int64_t Addend;
1797 };
1798 
1799 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
1800 // to the (non-zero) step S and start value X. This can be then lowered as the
1801 // RVV sequence (VID * S) + X, for example.
1802 // The step S is represented as an integer numerator divided by a positive
1803 // denominator. Note that the implementation currently only identifies
1804 // sequences in which either the numerator is +/- 1 or the denominator is 1. It
1805 // cannot detect 2/3, for example.
1806 // Note that this method will also match potentially unappealing index
1807 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to
1808 // determine whether this is worth generating code for.
1809 static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
1810   unsigned NumElts = Op.getNumOperands();
1811   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
1812   if (!Op.getValueType().isInteger())
1813     return None;
1814 
1815   Optional<unsigned> SeqStepDenom;
1816   Optional<int64_t> SeqStepNum, SeqAddend;
1817   Optional<std::pair<uint64_t, unsigned>> PrevElt;
1818   unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
1819   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1820     // Assume undef elements match the sequence; we just have to be careful
1821     // when interpolating across them.
1822     if (Op.getOperand(Idx).isUndef())
1823       continue;
1824     // The BUILD_VECTOR must be all constants.
1825     if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
1826       return None;
1827 
1828     uint64_t Val = Op.getConstantOperandVal(Idx) &
1829                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1830 
1831     if (PrevElt) {
1832       // Calculate the step since the last non-undef element, and ensure
1833       // it's consistent across the entire sequence.
1834       unsigned IdxDiff = Idx - PrevElt->second;
1835       int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
1836 
1837       // A zero-value value difference means that we're somewhere in the middle
1838       // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
1839       // step change before evaluating the sequence.
1840       if (ValDiff != 0) {
1841         int64_t Remainder = ValDiff % IdxDiff;
1842         // Normalize the step if it's greater than 1.
1843         if (Remainder != ValDiff) {
1844           // The difference must cleanly divide the element span.
1845           if (Remainder != 0)
1846             return None;
1847           ValDiff /= IdxDiff;
1848           IdxDiff = 1;
1849         }
1850 
1851         if (!SeqStepNum)
1852           SeqStepNum = ValDiff;
1853         else if (ValDiff != SeqStepNum)
1854           return None;
1855 
1856         if (!SeqStepDenom)
1857           SeqStepDenom = IdxDiff;
1858         else if (IdxDiff != *SeqStepDenom)
1859           return None;
1860       }
1861     }
1862 
1863     // Record and/or check any addend.
1864     if (SeqStepNum && SeqStepDenom) {
1865       uint64_t ExpectedVal =
1866           (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom;
1867       int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits);
1868       if (!SeqAddend)
1869         SeqAddend = Addend;
1870       else if (SeqAddend != Addend)
1871         return None;
1872     }
1873 
1874     // Record this non-undef element for later.
1875     if (!PrevElt || PrevElt->first != Val)
1876       PrevElt = std::make_pair(Val, Idx);
1877   }
1878   // We need to have logged both a step and an addend for this to count as
1879   // a legal index sequence.
1880   if (!SeqStepNum || !SeqStepDenom || !SeqAddend)
1881     return None;
1882 
1883   return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
1884 }
1885 
1886 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
1887                                  const RISCVSubtarget &Subtarget) {
1888   MVT VT = Op.getSimpleValueType();
1889   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1890 
1891   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1892 
1893   SDLoc DL(Op);
1894   SDValue Mask, VL;
1895   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1896 
1897   MVT XLenVT = Subtarget.getXLenVT();
1898   unsigned NumElts = Op.getNumOperands();
1899 
1900   if (VT.getVectorElementType() == MVT::i1) {
1901     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
1902       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
1903       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
1904     }
1905 
1906     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
1907       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
1908       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
1909     }
1910 
1911     // Lower constant mask BUILD_VECTORs via an integer vector type, in
1912     // scalar integer chunks whose bit-width depends on the number of mask
1913     // bits and XLEN.
1914     // First, determine the most appropriate scalar integer type to use. This
1915     // is at most XLenVT, but may be shrunk to a smaller vector element type
1916     // according to the size of the final vector - use i8 chunks rather than
1917     // XLenVT if we're producing a v8i1. This results in more consistent
1918     // codegen across RV32 and RV64.
1919     unsigned NumViaIntegerBits =
1920         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
1921     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
1922       // If we have to use more than one INSERT_VECTOR_ELT then this
1923       // optimization is likely to increase code size; avoid peforming it in
1924       // such a case. We can use a load from a constant pool in this case.
1925       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
1926         return SDValue();
1927       // Now we can create our integer vector type. Note that it may be larger
1928       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
1929       MVT IntegerViaVecVT =
1930           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
1931                            divideCeil(NumElts, NumViaIntegerBits));
1932 
1933       uint64_t Bits = 0;
1934       unsigned BitPos = 0, IntegerEltIdx = 0;
1935       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
1936 
1937       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
1938         // Once we accumulate enough bits to fill our scalar type, insert into
1939         // our vector and clear our accumulated data.
1940         if (I != 0 && I % NumViaIntegerBits == 0) {
1941           if (NumViaIntegerBits <= 32)
1942             Bits = SignExtend64(Bits, 32);
1943           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1944           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
1945                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1946           Bits = 0;
1947           BitPos = 0;
1948           IntegerEltIdx++;
1949         }
1950         SDValue V = Op.getOperand(I);
1951         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
1952         Bits |= ((uint64_t)BitValue << BitPos);
1953       }
1954 
1955       // Insert the (remaining) scalar value into position in our integer
1956       // vector type.
1957       if (NumViaIntegerBits <= 32)
1958         Bits = SignExtend64(Bits, 32);
1959       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1960       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
1961                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1962 
1963       if (NumElts < NumViaIntegerBits) {
1964         // If we're producing a smaller vector than our minimum legal integer
1965         // type, bitcast to the equivalent (known-legal) mask type, and extract
1966         // our final mask.
1967         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
1968         Vec = DAG.getBitcast(MVT::v8i1, Vec);
1969         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
1970                           DAG.getConstant(0, DL, XLenVT));
1971       } else {
1972         // Else we must have produced an integer type with the same size as the
1973         // mask type; bitcast for the final result.
1974         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
1975         Vec = DAG.getBitcast(VT, Vec);
1976       }
1977 
1978       return Vec;
1979     }
1980 
1981     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
1982     // vector type, we have a legal equivalently-sized i8 type, so we can use
1983     // that.
1984     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
1985     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
1986 
1987     SDValue WideVec;
1988     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1989       // For a splat, perform a scalar truncate before creating the wider
1990       // vector.
1991       assert(Splat.getValueType() == XLenVT &&
1992              "Unexpected type for i1 splat value");
1993       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
1994                           DAG.getConstant(1, DL, XLenVT));
1995       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
1996     } else {
1997       SmallVector<SDValue, 8> Ops(Op->op_values());
1998       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
1999       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
2000       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
2001     }
2002 
2003     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
2004   }
2005 
2006   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2007     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
2008                                         : RISCVISD::VMV_V_X_VL;
2009     Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL);
2010     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2011   }
2012 
2013   // Try and match index sequences, which we can lower to the vid instruction
2014   // with optional modifications. An all-undef vector is matched by
2015   // getSplatValue, above.
2016   if (auto SimpleVID = isSimpleVIDSequence(Op)) {
2017     int64_t StepNumerator = SimpleVID->StepNumerator;
2018     unsigned StepDenominator = SimpleVID->StepDenominator;
2019     int64_t Addend = SimpleVID->Addend;
2020 
2021     assert(StepNumerator != 0 && "Invalid step");
2022     bool Negate = false;
2023     int64_t SplatStepVal = StepNumerator;
2024     unsigned StepOpcode = ISD::MUL;
2025     if (StepNumerator != 1) {
2026       if (isPowerOf2_64(std::abs(StepNumerator))) {
2027         Negate = StepNumerator < 0;
2028         StepOpcode = ISD::SHL;
2029         SplatStepVal = Log2_64(std::abs(StepNumerator));
2030       }
2031     }
2032 
2033     // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
2034     // threshold since it's the immediate value many RVV instructions accept.
2035     // There is no vmul.vi instruction so ensure multiply constant can fit in
2036     // a single addi instruction.
2037     if (((StepOpcode == ISD::MUL && isInt<12>(SplatStepVal)) ||
2038          (StepOpcode == ISD::SHL && isUInt<5>(SplatStepVal))) &&
2039         isPowerOf2_32(StepDenominator) && isInt<5>(Addend)) {
2040       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
2041       // Convert right out of the scalable type so we can use standard ISD
2042       // nodes for the rest of the computation. If we used scalable types with
2043       // these, we'd lose the fixed-length vector info and generate worse
2044       // vsetvli code.
2045       VID = convertFromScalableVector(VT, VID, DAG, Subtarget);
2046       if ((StepOpcode == ISD::MUL && SplatStepVal != 1) ||
2047           (StepOpcode == ISD::SHL && SplatStepVal != 0)) {
2048         SDValue SplatStep = DAG.getSplatVector(
2049             VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
2050         VID = DAG.getNode(StepOpcode, DL, VT, VID, SplatStep);
2051       }
2052       if (StepDenominator != 1) {
2053         SDValue SplatStep = DAG.getSplatVector(
2054             VT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT));
2055         VID = DAG.getNode(ISD::SRL, DL, VT, VID, SplatStep);
2056       }
2057       if (Addend != 0 || Negate) {
2058         SDValue SplatAddend =
2059             DAG.getSplatVector(VT, DL, DAG.getConstant(Addend, DL, XLenVT));
2060         VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID);
2061       }
2062       return VID;
2063     }
2064   }
2065 
2066   // Attempt to detect "hidden" splats, which only reveal themselves as splats
2067   // when re-interpreted as a vector with a larger element type. For example,
2068   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
2069   // could be instead splat as
2070   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
2071   // TODO: This optimization could also work on non-constant splats, but it
2072   // would require bit-manipulation instructions to construct the splat value.
2073   SmallVector<SDValue> Sequence;
2074   unsigned EltBitSize = VT.getScalarSizeInBits();
2075   const auto *BV = cast<BuildVectorSDNode>(Op);
2076   if (VT.isInteger() && EltBitSize < 64 &&
2077       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
2078       BV->getRepeatedSequence(Sequence) &&
2079       (Sequence.size() * EltBitSize) <= 64) {
2080     unsigned SeqLen = Sequence.size();
2081     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
2082     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
2083     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
2084             ViaIntVT == MVT::i64) &&
2085            "Unexpected sequence type");
2086 
2087     unsigned EltIdx = 0;
2088     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
2089     uint64_t SplatValue = 0;
2090     // Construct the amalgamated value which can be splatted as this larger
2091     // vector type.
2092     for (const auto &SeqV : Sequence) {
2093       if (!SeqV.isUndef())
2094         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
2095                        << (EltIdx * EltBitSize));
2096       EltIdx++;
2097     }
2098 
2099     // On RV64, sign-extend from 32 to 64 bits where possible in order to
2100     // achieve better constant materializion.
2101     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
2102       SplatValue = SignExtend64(SplatValue, 32);
2103 
2104     // Since we can't introduce illegal i64 types at this stage, we can only
2105     // perform an i64 splat on RV32 if it is its own sign-extended value. That
2106     // way we can use RVV instructions to splat.
2107     assert((ViaIntVT.bitsLE(XLenVT) ||
2108             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
2109            "Unexpected bitcast sequence");
2110     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
2111       SDValue ViaVL =
2112           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
2113       MVT ViaContainerVT =
2114           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
2115       SDValue Splat =
2116           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
2117                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
2118       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
2119       return DAG.getBitcast(VT, Splat);
2120     }
2121   }
2122 
2123   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
2124   // which constitute a large proportion of the elements. In such cases we can
2125   // splat a vector with the dominant element and make up the shortfall with
2126   // INSERT_VECTOR_ELTs.
2127   // Note that this includes vectors of 2 elements by association. The
2128   // upper-most element is the "dominant" one, allowing us to use a splat to
2129   // "insert" the upper element, and an insert of the lower element at position
2130   // 0, which improves codegen.
2131   SDValue DominantValue;
2132   unsigned MostCommonCount = 0;
2133   DenseMap<SDValue, unsigned> ValueCounts;
2134   unsigned NumUndefElts =
2135       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
2136 
2137   // Track the number of scalar loads we know we'd be inserting, estimated as
2138   // any non-zero floating-point constant. Other kinds of element are either
2139   // already in registers or are materialized on demand. The threshold at which
2140   // a vector load is more desirable than several scalar materializion and
2141   // vector-insertion instructions is not known.
2142   unsigned NumScalarLoads = 0;
2143 
2144   for (SDValue V : Op->op_values()) {
2145     if (V.isUndef())
2146       continue;
2147 
2148     ValueCounts.insert(std::make_pair(V, 0));
2149     unsigned &Count = ValueCounts[V];
2150 
2151     if (auto *CFP = dyn_cast<ConstantFPSDNode>(V))
2152       NumScalarLoads += !CFP->isExactlyValue(+0.0);
2153 
2154     // Is this value dominant? In case of a tie, prefer the highest element as
2155     // it's cheaper to insert near the beginning of a vector than it is at the
2156     // end.
2157     if (++Count >= MostCommonCount) {
2158       DominantValue = V;
2159       MostCommonCount = Count;
2160     }
2161   }
2162 
2163   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
2164   unsigned NumDefElts = NumElts - NumUndefElts;
2165   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
2166 
2167   // Don't perform this optimization when optimizing for size, since
2168   // materializing elements and inserting them tends to cause code bloat.
2169   if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts &&
2170       ((MostCommonCount > DominantValueCountThreshold) ||
2171        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
2172     // Start by splatting the most common element.
2173     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
2174 
2175     DenseSet<SDValue> Processed{DominantValue};
2176     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
2177     for (const auto &OpIdx : enumerate(Op->ops())) {
2178       const SDValue &V = OpIdx.value();
2179       if (V.isUndef() || !Processed.insert(V).second)
2180         continue;
2181       if (ValueCounts[V] == 1) {
2182         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
2183                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
2184       } else {
2185         // Blend in all instances of this value using a VSELECT, using a
2186         // mask where each bit signals whether that element is the one
2187         // we're after.
2188         SmallVector<SDValue> Ops;
2189         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
2190           return DAG.getConstant(V == V1, DL, XLenVT);
2191         });
2192         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
2193                           DAG.getBuildVector(SelMaskTy, DL, Ops),
2194                           DAG.getSplatBuildVector(VT, DL, V), Vec);
2195       }
2196     }
2197 
2198     return Vec;
2199   }
2200 
2201   return SDValue();
2202 }
2203 
2204 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Lo,
2205                                    SDValue Hi, SDValue VL, SelectionDAG &DAG) {
2206   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
2207     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
2208     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
2209     // If Hi constant is all the same sign bit as Lo, lower this as a custom
2210     // node in order to try and match RVV vector/scalar instructions.
2211     if ((LoC >> 31) == HiC)
2212       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL);
2213   }
2214 
2215   // Fall back to a stack store and stride x0 vector load.
2216   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Lo, Hi, VL);
2217 }
2218 
2219 // Called by type legalization to handle splat of i64 on RV32.
2220 // FIXME: We can optimize this when the type has sign or zero bits in one
2221 // of the halves.
2222 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar,
2223                                    SDValue VL, SelectionDAG &DAG) {
2224   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
2225   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2226                            DAG.getConstant(0, DL, MVT::i32));
2227   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2228                            DAG.getConstant(1, DL, MVT::i32));
2229   return splatPartsI64WithVL(DL, VT, Lo, Hi, VL, DAG);
2230 }
2231 
2232 // This function lowers a splat of a scalar operand Splat with the vector
2233 // length VL. It ensures the final sequence is type legal, which is useful when
2234 // lowering a splat after type legalization.
2235 static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL,
2236                                 SelectionDAG &DAG,
2237                                 const RISCVSubtarget &Subtarget) {
2238   if (VT.isFloatingPoint())
2239     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Scalar, VL);
2240 
2241   MVT XLenVT = Subtarget.getXLenVT();
2242 
2243   // Simplest case is that the operand needs to be promoted to XLenVT.
2244   if (Scalar.getValueType().bitsLE(XLenVT)) {
2245     // If the operand is a constant, sign extend to increase our chances
2246     // of being able to use a .vi instruction. ANY_EXTEND would become a
2247     // a zero extend and the simm5 check in isel would fail.
2248     // FIXME: Should we ignore the upper bits in isel instead?
2249     unsigned ExtOpc =
2250         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
2251     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
2252     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Scalar, VL);
2253   }
2254 
2255   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
2256          "Unexpected scalar for splat lowering!");
2257 
2258   // Otherwise use the more complicated splatting algorithm.
2259   return splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
2260 }
2261 
2262 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
2263                                    const RISCVSubtarget &Subtarget) {
2264   SDValue V1 = Op.getOperand(0);
2265   SDValue V2 = Op.getOperand(1);
2266   SDLoc DL(Op);
2267   MVT XLenVT = Subtarget.getXLenVT();
2268   MVT VT = Op.getSimpleValueType();
2269   unsigned NumElts = VT.getVectorNumElements();
2270   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2271 
2272   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2273 
2274   SDValue TrueMask, VL;
2275   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2276 
2277   if (SVN->isSplat()) {
2278     const int Lane = SVN->getSplatIndex();
2279     if (Lane >= 0) {
2280       MVT SVT = VT.getVectorElementType();
2281 
2282       // Turn splatted vector load into a strided load with an X0 stride.
2283       SDValue V = V1;
2284       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
2285       // with undef.
2286       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
2287       int Offset = Lane;
2288       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
2289         int OpElements =
2290             V.getOperand(0).getSimpleValueType().getVectorNumElements();
2291         V = V.getOperand(Offset / OpElements);
2292         Offset %= OpElements;
2293       }
2294 
2295       // We need to ensure the load isn't atomic or volatile.
2296       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
2297         auto *Ld = cast<LoadSDNode>(V);
2298         Offset *= SVT.getStoreSize();
2299         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
2300                                                    TypeSize::Fixed(Offset), DL);
2301 
2302         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
2303         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
2304           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
2305           SDValue IntID =
2306               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
2307           SDValue Ops[] = {Ld->getChain(), IntID, NewAddr,
2308                            DAG.getRegister(RISCV::X0, XLenVT), VL};
2309           SDValue NewLoad = DAG.getMemIntrinsicNode(
2310               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
2311               DAG.getMachineFunction().getMachineMemOperand(
2312                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
2313           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
2314           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
2315         }
2316 
2317         // Otherwise use a scalar load and splat. This will give the best
2318         // opportunity to fold a splat into the operation. ISel can turn it into
2319         // the x0 strided load if we aren't able to fold away the select.
2320         if (SVT.isFloatingPoint())
2321           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
2322                           Ld->getPointerInfo().getWithOffset(Offset),
2323                           Ld->getOriginalAlign(),
2324                           Ld->getMemOperand()->getFlags());
2325         else
2326           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
2327                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
2328                              Ld->getOriginalAlign(),
2329                              Ld->getMemOperand()->getFlags());
2330         DAG.makeEquivalentMemoryOrdering(Ld, V);
2331 
2332         unsigned Opc =
2333             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
2334         SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, V, VL);
2335         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2336       }
2337 
2338       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2339       assert(Lane < (int)NumElts && "Unexpected lane!");
2340       SDValue Gather =
2341           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
2342                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
2343       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2344     }
2345   }
2346 
2347   // Detect shuffles which can be re-expressed as vector selects; these are
2348   // shuffles in which each element in the destination is taken from an element
2349   // at the corresponding index in either source vectors.
2350   bool IsSelect = all_of(enumerate(SVN->getMask()), [&](const auto &MaskIdx) {
2351     int MaskIndex = MaskIdx.value();
2352     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
2353   });
2354 
2355   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
2356 
2357   SmallVector<SDValue> MaskVals;
2358   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
2359   // merged with a second vrgather.
2360   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
2361 
2362   // By default we preserve the original operand order, and use a mask to
2363   // select LHS as true and RHS as false. However, since RVV vector selects may
2364   // feature splats but only on the LHS, we may choose to invert our mask and
2365   // instead select between RHS and LHS.
2366   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
2367   bool InvertMask = IsSelect == SwapOps;
2368 
2369   // Keep a track of which non-undef indices are used by each LHS/RHS shuffle
2370   // half.
2371   DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts;
2372 
2373   // Now construct the mask that will be used by the vselect or blended
2374   // vrgather operation. For vrgathers, construct the appropriate indices into
2375   // each vector.
2376   for (int MaskIndex : SVN->getMask()) {
2377     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
2378     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
2379     if (!IsSelect) {
2380       bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
2381       GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
2382                                      ? DAG.getConstant(MaskIndex, DL, XLenVT)
2383                                      : DAG.getUNDEF(XLenVT));
2384       GatherIndicesRHS.push_back(
2385           IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
2386                             : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
2387       if (IsLHSOrUndefIndex && MaskIndex >= 0)
2388         ++LHSIndexCounts[MaskIndex];
2389       if (!IsLHSOrUndefIndex)
2390         ++RHSIndexCounts[MaskIndex - NumElts];
2391     }
2392   }
2393 
2394   if (SwapOps) {
2395     std::swap(V1, V2);
2396     std::swap(GatherIndicesLHS, GatherIndicesRHS);
2397   }
2398 
2399   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
2400   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
2401   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
2402 
2403   if (IsSelect)
2404     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
2405 
2406   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
2407     // On such a large vector we're unable to use i8 as the index type.
2408     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
2409     // may involve vector splitting if we're already at LMUL=8, or our
2410     // user-supplied maximum fixed-length LMUL.
2411     return SDValue();
2412   }
2413 
2414   unsigned GatherVXOpc = RISCVISD::VRGATHER_VX_VL;
2415   unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL;
2416   MVT IndexVT = VT.changeTypeToInteger();
2417   // Since we can't introduce illegal index types at this stage, use i16 and
2418   // vrgatherei16 if the corresponding index type for plain vrgather is greater
2419   // than XLenVT.
2420   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
2421     GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
2422     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
2423   }
2424 
2425   MVT IndexContainerVT =
2426       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
2427 
2428   SDValue Gather;
2429   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
2430   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
2431   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
2432     Gather = lowerScalarSplat(SplatValue, VL, ContainerVT, DL, DAG, Subtarget);
2433   } else {
2434     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2435     // If only one index is used, we can use a "splat" vrgather.
2436     // TODO: We can splat the most-common index and fix-up any stragglers, if
2437     // that's beneficial.
2438     if (LHSIndexCounts.size() == 1) {
2439       int SplatIndex = LHSIndexCounts.begin()->getFirst();
2440       Gather =
2441           DAG.getNode(GatherVXOpc, DL, ContainerVT, V1,
2442                       DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2443     } else {
2444       SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
2445       LHSIndices =
2446           convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
2447 
2448       Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
2449                            TrueMask, VL);
2450     }
2451   }
2452 
2453   // If a second vector operand is used by this shuffle, blend it in with an
2454   // additional vrgather.
2455   if (!V2.isUndef()) {
2456     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
2457     // If only one index is used, we can use a "splat" vrgather.
2458     // TODO: We can splat the most-common index and fix-up any stragglers, if
2459     // that's beneficial.
2460     if (RHSIndexCounts.size() == 1) {
2461       int SplatIndex = RHSIndexCounts.begin()->getFirst();
2462       V2 = DAG.getNode(GatherVXOpc, DL, ContainerVT, V2,
2463                        DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2464     } else {
2465       SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
2466       RHSIndices =
2467           convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
2468       V2 = DAG.getNode(GatherVVOpc, DL, ContainerVT, V2, RHSIndices, TrueMask,
2469                        VL);
2470     }
2471 
2472     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
2473     SelectMask =
2474         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
2475 
2476     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
2477                          Gather, VL);
2478   }
2479 
2480   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2481 }
2482 
2483 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
2484                                      SDLoc DL, SelectionDAG &DAG,
2485                                      const RISCVSubtarget &Subtarget) {
2486   if (VT.isScalableVector())
2487     return DAG.getFPExtendOrRound(Op, DL, VT);
2488   assert(VT.isFixedLengthVector() &&
2489          "Unexpected value type for RVV FP extend/round lowering");
2490   SDValue Mask, VL;
2491   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2492   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
2493                         ? RISCVISD::FP_EXTEND_VL
2494                         : RISCVISD::FP_ROUND_VL;
2495   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
2496 }
2497 
2498 // Lower CTLZ_ZERO_UNDEF or CTTZ_ZERO_UNDEF by converting to FP and extracting
2499 // the exponent.
2500 static SDValue lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
2501   MVT VT = Op.getSimpleValueType();
2502   unsigned EltSize = VT.getScalarSizeInBits();
2503   SDValue Src = Op.getOperand(0);
2504   SDLoc DL(Op);
2505 
2506   // We need a FP type that can represent the value.
2507   // TODO: Use f16 for i8 when possible?
2508   MVT FloatEltVT = EltSize == 32 ? MVT::f64 : MVT::f32;
2509   MVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
2510 
2511   // Legal types should have been checked in the RISCVTargetLowering
2512   // constructor.
2513   // TODO: Splitting may make sense in some cases.
2514   assert(DAG.getTargetLoweringInfo().isTypeLegal(FloatVT) &&
2515          "Expected legal float type!");
2516 
2517   // For CTTZ_ZERO_UNDEF, we need to extract the lowest set bit using X & -X.
2518   // The trailing zero count is equal to log2 of this single bit value.
2519   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
2520     SDValue Neg =
2521         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
2522     Src = DAG.getNode(ISD::AND, DL, VT, Src, Neg);
2523   }
2524 
2525   // We have a legal FP type, convert to it.
2526   SDValue FloatVal = DAG.getNode(ISD::UINT_TO_FP, DL, FloatVT, Src);
2527   // Bitcast to integer and shift the exponent to the LSB.
2528   EVT IntVT = FloatVT.changeVectorElementTypeToInteger();
2529   SDValue Bitcast = DAG.getBitcast(IntVT, FloatVal);
2530   unsigned ShiftAmt = FloatEltVT == MVT::f64 ? 52 : 23;
2531   SDValue Shift = DAG.getNode(ISD::SRL, DL, IntVT, Bitcast,
2532                               DAG.getConstant(ShiftAmt, DL, IntVT));
2533   // Truncate back to original type to allow vnsrl.
2534   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, Shift);
2535   // The exponent contains log2 of the value in biased form.
2536   unsigned ExponentBias = FloatEltVT == MVT::f64 ? 1023 : 127;
2537 
2538   // For trailing zeros, we just need to subtract the bias.
2539   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF)
2540     return DAG.getNode(ISD::SUB, DL, VT, Trunc,
2541                        DAG.getConstant(ExponentBias, DL, VT));
2542 
2543   // For leading zeros, we need to remove the bias and convert from log2 to
2544   // leading zeros. We can do this by subtracting from (Bias + (EltSize - 1)).
2545   unsigned Adjust = ExponentBias + (EltSize - 1);
2546   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(Adjust, DL, VT), Trunc);
2547 }
2548 
2549 // While RVV has alignment restrictions, we should always be able to load as a
2550 // legal equivalently-sized byte-typed vector instead. This method is
2551 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
2552 // the load is already correctly-aligned, it returns SDValue().
2553 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
2554                                                     SelectionDAG &DAG) const {
2555   auto *Load = cast<LoadSDNode>(Op);
2556   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
2557 
2558   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2559                                      Load->getMemoryVT(),
2560                                      *Load->getMemOperand()))
2561     return SDValue();
2562 
2563   SDLoc DL(Op);
2564   MVT VT = Op.getSimpleValueType();
2565   unsigned EltSizeBits = VT.getScalarSizeInBits();
2566   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2567          "Unexpected unaligned RVV load type");
2568   MVT NewVT =
2569       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2570   assert(NewVT.isValid() &&
2571          "Expecting equally-sized RVV vector types to be legal");
2572   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
2573                           Load->getPointerInfo(), Load->getOriginalAlign(),
2574                           Load->getMemOperand()->getFlags());
2575   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
2576 }
2577 
2578 // While RVV has alignment restrictions, we should always be able to store as a
2579 // legal equivalently-sized byte-typed vector instead. This method is
2580 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
2581 // returns SDValue() if the store is already correctly aligned.
2582 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
2583                                                      SelectionDAG &DAG) const {
2584   auto *Store = cast<StoreSDNode>(Op);
2585   assert(Store && Store->getValue().getValueType().isVector() &&
2586          "Expected vector store");
2587 
2588   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2589                                      Store->getMemoryVT(),
2590                                      *Store->getMemOperand()))
2591     return SDValue();
2592 
2593   SDLoc DL(Op);
2594   SDValue StoredVal = Store->getValue();
2595   MVT VT = StoredVal.getSimpleValueType();
2596   unsigned EltSizeBits = VT.getScalarSizeInBits();
2597   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2598          "Unexpected unaligned RVV store type");
2599   MVT NewVT =
2600       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2601   assert(NewVT.isValid() &&
2602          "Expecting equally-sized RVV vector types to be legal");
2603   StoredVal = DAG.getBitcast(NewVT, StoredVal);
2604   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
2605                       Store->getPointerInfo(), Store->getOriginalAlign(),
2606                       Store->getMemOperand()->getFlags());
2607 }
2608 
2609 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
2610                                             SelectionDAG &DAG) const {
2611   switch (Op.getOpcode()) {
2612   default:
2613     report_fatal_error("unimplemented operand");
2614   case ISD::GlobalAddress:
2615     return lowerGlobalAddress(Op, DAG);
2616   case ISD::BlockAddress:
2617     return lowerBlockAddress(Op, DAG);
2618   case ISD::ConstantPool:
2619     return lowerConstantPool(Op, DAG);
2620   case ISD::JumpTable:
2621     return lowerJumpTable(Op, DAG);
2622   case ISD::GlobalTLSAddress:
2623     return lowerGlobalTLSAddress(Op, DAG);
2624   case ISD::SELECT:
2625     return lowerSELECT(Op, DAG);
2626   case ISD::BRCOND:
2627     return lowerBRCOND(Op, DAG);
2628   case ISD::VASTART:
2629     return lowerVASTART(Op, DAG);
2630   case ISD::FRAMEADDR:
2631     return lowerFRAMEADDR(Op, DAG);
2632   case ISD::RETURNADDR:
2633     return lowerRETURNADDR(Op, DAG);
2634   case ISD::SHL_PARTS:
2635     return lowerShiftLeftParts(Op, DAG);
2636   case ISD::SRA_PARTS:
2637     return lowerShiftRightParts(Op, DAG, true);
2638   case ISD::SRL_PARTS:
2639     return lowerShiftRightParts(Op, DAG, false);
2640   case ISD::BITCAST: {
2641     SDLoc DL(Op);
2642     EVT VT = Op.getValueType();
2643     SDValue Op0 = Op.getOperand(0);
2644     EVT Op0VT = Op0.getValueType();
2645     MVT XLenVT = Subtarget.getXLenVT();
2646     if (VT.isFixedLengthVector()) {
2647       // We can handle fixed length vector bitcasts with a simple replacement
2648       // in isel.
2649       if (Op0VT.isFixedLengthVector())
2650         return Op;
2651       // When bitcasting from scalar to fixed-length vector, insert the scalar
2652       // into a one-element vector of the result type, and perform a vector
2653       // bitcast.
2654       if (!Op0VT.isVector()) {
2655         EVT BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
2656         if (!isTypeLegal(BVT))
2657           return SDValue();
2658         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
2659                                               DAG.getUNDEF(BVT), Op0,
2660                                               DAG.getConstant(0, DL, XLenVT)));
2661       }
2662       return SDValue();
2663     }
2664     // Custom-legalize bitcasts from fixed-length vector types to scalar types
2665     // thus: bitcast the vector to a one-element vector type whose element type
2666     // is the same as the result type, and extract the first element.
2667     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
2668       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
2669       if (!isTypeLegal(BVT))
2670         return SDValue();
2671       SDValue BVec = DAG.getBitcast(BVT, Op0);
2672       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
2673                          DAG.getConstant(0, DL, XLenVT));
2674     }
2675     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
2676       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
2677       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
2678       return FPConv;
2679     }
2680     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
2681         Subtarget.hasStdExtF()) {
2682       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
2683       SDValue FPConv =
2684           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
2685       return FPConv;
2686     }
2687     return SDValue();
2688   }
2689   case ISD::INTRINSIC_WO_CHAIN:
2690     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
2691   case ISD::INTRINSIC_W_CHAIN:
2692     return LowerINTRINSIC_W_CHAIN(Op, DAG);
2693   case ISD::INTRINSIC_VOID:
2694     return LowerINTRINSIC_VOID(Op, DAG);
2695   case ISD::BSWAP:
2696   case ISD::BITREVERSE: {
2697     // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
2698     assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
2699     MVT VT = Op.getSimpleValueType();
2700     SDLoc DL(Op);
2701     // Start with the maximum immediate value which is the bitwidth - 1.
2702     unsigned Imm = VT.getSizeInBits() - 1;
2703     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
2704     if (Op.getOpcode() == ISD::BSWAP)
2705       Imm &= ~0x7U;
2706     return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
2707                        DAG.getConstant(Imm, DL, VT));
2708   }
2709   case ISD::FSHL:
2710   case ISD::FSHR: {
2711     MVT VT = Op.getSimpleValueType();
2712     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
2713     SDLoc DL(Op);
2714     if (Op.getOperand(2).getOpcode() == ISD::Constant)
2715       return Op;
2716     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
2717     // use log(XLen) bits. Mask the shift amount accordingly.
2718     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
2719     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
2720                                 DAG.getConstant(ShAmtWidth, DL, VT));
2721     unsigned Opc = Op.getOpcode() == ISD::FSHL ? RISCVISD::FSL : RISCVISD::FSR;
2722     return DAG.getNode(Opc, DL, VT, Op.getOperand(0), Op.getOperand(1), ShAmt);
2723   }
2724   case ISD::TRUNCATE: {
2725     SDLoc DL(Op);
2726     MVT VT = Op.getSimpleValueType();
2727     // Only custom-lower vector truncates
2728     if (!VT.isVector())
2729       return Op;
2730 
2731     // Truncates to mask types are handled differently
2732     if (VT.getVectorElementType() == MVT::i1)
2733       return lowerVectorMaskTrunc(Op, DAG);
2734 
2735     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
2736     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
2737     // truncate by one power of two at a time.
2738     MVT DstEltVT = VT.getVectorElementType();
2739 
2740     SDValue Src = Op.getOperand(0);
2741     MVT SrcVT = Src.getSimpleValueType();
2742     MVT SrcEltVT = SrcVT.getVectorElementType();
2743 
2744     assert(DstEltVT.bitsLT(SrcEltVT) &&
2745            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
2746            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
2747            "Unexpected vector truncate lowering");
2748 
2749     MVT ContainerVT = SrcVT;
2750     if (SrcVT.isFixedLengthVector()) {
2751       ContainerVT = getContainerForFixedLengthVector(SrcVT);
2752       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
2753     }
2754 
2755     SDValue Result = Src;
2756     SDValue Mask, VL;
2757     std::tie(Mask, VL) =
2758         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
2759     LLVMContext &Context = *DAG.getContext();
2760     const ElementCount Count = ContainerVT.getVectorElementCount();
2761     do {
2762       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
2763       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
2764       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
2765                            Mask, VL);
2766     } while (SrcEltVT != DstEltVT);
2767 
2768     if (SrcVT.isFixedLengthVector())
2769       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
2770 
2771     return Result;
2772   }
2773   case ISD::ANY_EXTEND:
2774   case ISD::ZERO_EXTEND:
2775     if (Op.getOperand(0).getValueType().isVector() &&
2776         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2777       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
2778     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
2779   case ISD::SIGN_EXTEND:
2780     if (Op.getOperand(0).getValueType().isVector() &&
2781         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2782       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
2783     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
2784   case ISD::SPLAT_VECTOR_PARTS:
2785     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
2786   case ISD::INSERT_VECTOR_ELT:
2787     return lowerINSERT_VECTOR_ELT(Op, DAG);
2788   case ISD::EXTRACT_VECTOR_ELT:
2789     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
2790   case ISD::VSCALE: {
2791     MVT VT = Op.getSimpleValueType();
2792     SDLoc DL(Op);
2793     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
2794     // We define our scalable vector types for lmul=1 to use a 64 bit known
2795     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
2796     // vscale as VLENB / 8.
2797     static_assert(RISCV::RVVBitsPerBlock == 64, "Unexpected bits per block!");
2798     if (isa<ConstantSDNode>(Op.getOperand(0))) {
2799       // We assume VLENB is a multiple of 8. We manually choose the best shift
2800       // here because SimplifyDemandedBits isn't always able to simplify it.
2801       uint64_t Val = Op.getConstantOperandVal(0);
2802       if (isPowerOf2_64(Val)) {
2803         uint64_t Log2 = Log2_64(Val);
2804         if (Log2 < 3)
2805           return DAG.getNode(ISD::SRL, DL, VT, VLENB,
2806                              DAG.getConstant(3 - Log2, DL, VT));
2807         if (Log2 > 3)
2808           return DAG.getNode(ISD::SHL, DL, VT, VLENB,
2809                              DAG.getConstant(Log2 - 3, DL, VT));
2810         return VLENB;
2811       }
2812       // If the multiplier is a multiple of 8, scale it down to avoid needing
2813       // to shift the VLENB value.
2814       if ((Val % 8) == 0)
2815         return DAG.getNode(ISD::MUL, DL, VT, VLENB,
2816                            DAG.getConstant(Val / 8, DL, VT));
2817     }
2818 
2819     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
2820                                  DAG.getConstant(3, DL, VT));
2821     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
2822   }
2823   case ISD::FPOWI: {
2824     // Custom promote f16 powi with illegal i32 integer type on RV64. Once
2825     // promoted this will be legalized into a libcall by LegalizeIntegerTypes.
2826     if (Op.getValueType() == MVT::f16 && Subtarget.is64Bit() &&
2827         Op.getOperand(1).getValueType() == MVT::i32) {
2828       SDLoc DL(Op);
2829       SDValue Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op.getOperand(0));
2830       SDValue Powi =
2831           DAG.getNode(ISD::FPOWI, DL, MVT::f32, Op0, Op.getOperand(1));
2832       return DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, Powi,
2833                          DAG.getIntPtrConstant(0, DL));
2834     }
2835     return SDValue();
2836   }
2837   case ISD::FP_EXTEND: {
2838     // RVV can only do fp_extend to types double the size as the source. We
2839     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
2840     // via f32.
2841     SDLoc DL(Op);
2842     MVT VT = Op.getSimpleValueType();
2843     SDValue Src = Op.getOperand(0);
2844     MVT SrcVT = Src.getSimpleValueType();
2845 
2846     // Prepare any fixed-length vector operands.
2847     MVT ContainerVT = VT;
2848     if (SrcVT.isFixedLengthVector()) {
2849       ContainerVT = getContainerForFixedLengthVector(VT);
2850       MVT SrcContainerVT =
2851           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
2852       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2853     }
2854 
2855     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
2856         SrcVT.getVectorElementType() != MVT::f16) {
2857       // For scalable vectors, we only need to close the gap between
2858       // vXf16->vXf64.
2859       if (!VT.isFixedLengthVector())
2860         return Op;
2861       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
2862       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2863       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2864     }
2865 
2866     MVT InterVT = VT.changeVectorElementType(MVT::f32);
2867     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
2868     SDValue IntermediateExtend = getRVVFPExtendOrRound(
2869         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
2870 
2871     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
2872                                            DL, DAG, Subtarget);
2873     if (VT.isFixedLengthVector())
2874       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
2875     return Extend;
2876   }
2877   case ISD::FP_ROUND: {
2878     // RVV can only do fp_round to types half the size as the source. We
2879     // custom-lower f64->f16 rounds via RVV's round-to-odd float
2880     // conversion instruction.
2881     SDLoc DL(Op);
2882     MVT VT = Op.getSimpleValueType();
2883     SDValue Src = Op.getOperand(0);
2884     MVT SrcVT = Src.getSimpleValueType();
2885 
2886     // Prepare any fixed-length vector operands.
2887     MVT ContainerVT = VT;
2888     if (VT.isFixedLengthVector()) {
2889       MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2890       ContainerVT =
2891           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2892       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2893     }
2894 
2895     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
2896         SrcVT.getVectorElementType() != MVT::f64) {
2897       // For scalable vectors, we only need to close the gap between
2898       // vXf64<->vXf16.
2899       if (!VT.isFixedLengthVector())
2900         return Op;
2901       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
2902       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2903       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2904     }
2905 
2906     SDValue Mask, VL;
2907     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2908 
2909     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
2910     SDValue IntermediateRound =
2911         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
2912     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
2913                                           DL, DAG, Subtarget);
2914 
2915     if (VT.isFixedLengthVector())
2916       return convertFromScalableVector(VT, Round, DAG, Subtarget);
2917     return Round;
2918   }
2919   case ISD::FP_TO_SINT:
2920   case ISD::FP_TO_UINT:
2921   case ISD::SINT_TO_FP:
2922   case ISD::UINT_TO_FP: {
2923     // RVV can only do fp<->int conversions to types half/double the size as
2924     // the source. We custom-lower any conversions that do two hops into
2925     // sequences.
2926     MVT VT = Op.getSimpleValueType();
2927     if (!VT.isVector())
2928       return Op;
2929     SDLoc DL(Op);
2930     SDValue Src = Op.getOperand(0);
2931     MVT EltVT = VT.getVectorElementType();
2932     MVT SrcVT = Src.getSimpleValueType();
2933     MVT SrcEltVT = SrcVT.getVectorElementType();
2934     unsigned EltSize = EltVT.getSizeInBits();
2935     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
2936     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
2937            "Unexpected vector element types");
2938 
2939     bool IsInt2FP = SrcEltVT.isInteger();
2940     // Widening conversions
2941     if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
2942       if (IsInt2FP) {
2943         // Do a regular integer sign/zero extension then convert to float.
2944         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
2945                                       VT.getVectorElementCount());
2946         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
2947                                  ? ISD::ZERO_EXTEND
2948                                  : ISD::SIGN_EXTEND;
2949         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
2950         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
2951       }
2952       // FP2Int
2953       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
2954       // Do one doubling fp_extend then complete the operation by converting
2955       // to int.
2956       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2957       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
2958       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
2959     }
2960 
2961     // Narrowing conversions
2962     if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
2963       if (IsInt2FP) {
2964         // One narrowing int_to_fp, then an fp_round.
2965         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
2966         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2967         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
2968         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
2969       }
2970       // FP2Int
2971       // One narrowing fp_to_int, then truncate the integer. If the float isn't
2972       // representable by the integer, the result is poison.
2973       MVT IVecVT =
2974           MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
2975                            VT.getVectorElementCount());
2976       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
2977       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
2978     }
2979 
2980     // Scalable vectors can exit here. Patterns will handle equally-sized
2981     // conversions halving/doubling ones.
2982     if (!VT.isFixedLengthVector())
2983       return Op;
2984 
2985     // For fixed-length vectors we lower to a custom "VL" node.
2986     unsigned RVVOpc = 0;
2987     switch (Op.getOpcode()) {
2988     default:
2989       llvm_unreachable("Impossible opcode");
2990     case ISD::FP_TO_SINT:
2991       RVVOpc = RISCVISD::FP_TO_SINT_VL;
2992       break;
2993     case ISD::FP_TO_UINT:
2994       RVVOpc = RISCVISD::FP_TO_UINT_VL;
2995       break;
2996     case ISD::SINT_TO_FP:
2997       RVVOpc = RISCVISD::SINT_TO_FP_VL;
2998       break;
2999     case ISD::UINT_TO_FP:
3000       RVVOpc = RISCVISD::UINT_TO_FP_VL;
3001       break;
3002     }
3003 
3004     MVT ContainerVT, SrcContainerVT;
3005     // Derive the reference container type from the larger vector type.
3006     if (SrcEltSize > EltSize) {
3007       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
3008       ContainerVT =
3009           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
3010     } else {
3011       ContainerVT = getContainerForFixedLengthVector(VT);
3012       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
3013     }
3014 
3015     SDValue Mask, VL;
3016     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3017 
3018     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3019     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
3020     return convertFromScalableVector(VT, Src, DAG, Subtarget);
3021   }
3022   case ISD::FP_TO_SINT_SAT:
3023   case ISD::FP_TO_UINT_SAT:
3024     return lowerFP_TO_INT_SAT(Op, DAG, Subtarget);
3025   case ISD::FTRUNC:
3026   case ISD::FCEIL:
3027   case ISD::FFLOOR:
3028     return lowerFTRUNC_FCEIL_FFLOOR(Op, DAG);
3029   case ISD::VECREDUCE_ADD:
3030   case ISD::VECREDUCE_UMAX:
3031   case ISD::VECREDUCE_SMAX:
3032   case ISD::VECREDUCE_UMIN:
3033   case ISD::VECREDUCE_SMIN:
3034     return lowerVECREDUCE(Op, DAG);
3035   case ISD::VECREDUCE_AND:
3036   case ISD::VECREDUCE_OR:
3037   case ISD::VECREDUCE_XOR:
3038     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3039       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ false);
3040     return lowerVECREDUCE(Op, DAG);
3041   case ISD::VECREDUCE_FADD:
3042   case ISD::VECREDUCE_SEQ_FADD:
3043   case ISD::VECREDUCE_FMIN:
3044   case ISD::VECREDUCE_FMAX:
3045     return lowerFPVECREDUCE(Op, DAG);
3046   case ISD::VP_REDUCE_ADD:
3047   case ISD::VP_REDUCE_UMAX:
3048   case ISD::VP_REDUCE_SMAX:
3049   case ISD::VP_REDUCE_UMIN:
3050   case ISD::VP_REDUCE_SMIN:
3051   case ISD::VP_REDUCE_FADD:
3052   case ISD::VP_REDUCE_SEQ_FADD:
3053   case ISD::VP_REDUCE_FMIN:
3054   case ISD::VP_REDUCE_FMAX:
3055     return lowerVPREDUCE(Op, DAG);
3056   case ISD::VP_REDUCE_AND:
3057   case ISD::VP_REDUCE_OR:
3058   case ISD::VP_REDUCE_XOR:
3059     if (Op.getOperand(1).getValueType().getVectorElementType() == MVT::i1)
3060       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ true);
3061     return lowerVPREDUCE(Op, DAG);
3062   case ISD::INSERT_SUBVECTOR:
3063     return lowerINSERT_SUBVECTOR(Op, DAG);
3064   case ISD::EXTRACT_SUBVECTOR:
3065     return lowerEXTRACT_SUBVECTOR(Op, DAG);
3066   case ISD::STEP_VECTOR:
3067     return lowerSTEP_VECTOR(Op, DAG);
3068   case ISD::VECTOR_REVERSE:
3069     return lowerVECTOR_REVERSE(Op, DAG);
3070   case ISD::BUILD_VECTOR:
3071     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
3072   case ISD::SPLAT_VECTOR:
3073     if (Op.getValueType().getVectorElementType() == MVT::i1)
3074       return lowerVectorMaskSplat(Op, DAG);
3075     return lowerSPLAT_VECTOR(Op, DAG, Subtarget);
3076   case ISD::VECTOR_SHUFFLE:
3077     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
3078   case ISD::CONCAT_VECTORS: {
3079     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
3080     // better than going through the stack, as the default expansion does.
3081     SDLoc DL(Op);
3082     MVT VT = Op.getSimpleValueType();
3083     unsigned NumOpElts =
3084         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
3085     SDValue Vec = DAG.getUNDEF(VT);
3086     for (const auto &OpIdx : enumerate(Op->ops()))
3087       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, OpIdx.value(),
3088                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
3089     return Vec;
3090   }
3091   case ISD::LOAD:
3092     if (auto V = expandUnalignedRVVLoad(Op, DAG))
3093       return V;
3094     if (Op.getValueType().isFixedLengthVector())
3095       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
3096     return Op;
3097   case ISD::STORE:
3098     if (auto V = expandUnalignedRVVStore(Op, DAG))
3099       return V;
3100     if (Op.getOperand(1).getValueType().isFixedLengthVector())
3101       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
3102     return Op;
3103   case ISD::MLOAD:
3104   case ISD::VP_LOAD:
3105     return lowerMaskedLoad(Op, DAG);
3106   case ISD::MSTORE:
3107   case ISD::VP_STORE:
3108     return lowerMaskedStore(Op, DAG);
3109   case ISD::SETCC:
3110     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
3111   case ISD::ADD:
3112     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
3113   case ISD::SUB:
3114     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
3115   case ISD::MUL:
3116     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
3117   case ISD::MULHS:
3118     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
3119   case ISD::MULHU:
3120     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
3121   case ISD::AND:
3122     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
3123                                               RISCVISD::AND_VL);
3124   case ISD::OR:
3125     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
3126                                               RISCVISD::OR_VL);
3127   case ISD::XOR:
3128     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
3129                                               RISCVISD::XOR_VL);
3130   case ISD::SDIV:
3131     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
3132   case ISD::SREM:
3133     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
3134   case ISD::UDIV:
3135     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
3136   case ISD::UREM:
3137     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
3138   case ISD::SHL:
3139   case ISD::SRA:
3140   case ISD::SRL:
3141     if (Op.getSimpleValueType().isFixedLengthVector())
3142       return lowerFixedLengthVectorShiftToRVV(Op, DAG);
3143     // This can be called for an i32 shift amount that needs to be promoted.
3144     assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
3145            "Unexpected custom legalisation");
3146     return SDValue();
3147   case ISD::SADDSAT:
3148     return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL);
3149   case ISD::UADDSAT:
3150     return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL);
3151   case ISD::SSUBSAT:
3152     return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL);
3153   case ISD::USUBSAT:
3154     return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL);
3155   case ISD::FADD:
3156     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
3157   case ISD::FSUB:
3158     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
3159   case ISD::FMUL:
3160     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
3161   case ISD::FDIV:
3162     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
3163   case ISD::FNEG:
3164     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
3165   case ISD::FABS:
3166     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
3167   case ISD::FSQRT:
3168     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
3169   case ISD::FMA:
3170     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
3171   case ISD::SMIN:
3172     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
3173   case ISD::SMAX:
3174     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
3175   case ISD::UMIN:
3176     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
3177   case ISD::UMAX:
3178     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
3179   case ISD::FMINNUM:
3180     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
3181   case ISD::FMAXNUM:
3182     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
3183   case ISD::ABS:
3184     return lowerABS(Op, DAG);
3185   case ISD::CTLZ_ZERO_UNDEF:
3186   case ISD::CTTZ_ZERO_UNDEF:
3187     return lowerCTLZ_CTTZ_ZERO_UNDEF(Op, DAG);
3188   case ISD::VSELECT:
3189     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
3190   case ISD::FCOPYSIGN:
3191     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
3192   case ISD::MGATHER:
3193   case ISD::VP_GATHER:
3194     return lowerMaskedGather(Op, DAG);
3195   case ISD::MSCATTER:
3196   case ISD::VP_SCATTER:
3197     return lowerMaskedScatter(Op, DAG);
3198   case ISD::FLT_ROUNDS_:
3199     return lowerGET_ROUNDING(Op, DAG);
3200   case ISD::SET_ROUNDING:
3201     return lowerSET_ROUNDING(Op, DAG);
3202   case ISD::VP_SELECT:
3203     return lowerVPOp(Op, DAG, RISCVISD::VSELECT_VL);
3204   case ISD::VP_ADD:
3205     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
3206   case ISD::VP_SUB:
3207     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
3208   case ISD::VP_MUL:
3209     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
3210   case ISD::VP_SDIV:
3211     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
3212   case ISD::VP_UDIV:
3213     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
3214   case ISD::VP_SREM:
3215     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
3216   case ISD::VP_UREM:
3217     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
3218   case ISD::VP_AND:
3219     return lowerLogicVPOp(Op, DAG, RISCVISD::VMAND_VL, RISCVISD::AND_VL);
3220   case ISD::VP_OR:
3221     return lowerLogicVPOp(Op, DAG, RISCVISD::VMOR_VL, RISCVISD::OR_VL);
3222   case ISD::VP_XOR:
3223     return lowerLogicVPOp(Op, DAG, RISCVISD::VMXOR_VL, RISCVISD::XOR_VL);
3224   case ISD::VP_ASHR:
3225     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
3226   case ISD::VP_LSHR:
3227     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
3228   case ISD::VP_SHL:
3229     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
3230   case ISD::VP_FADD:
3231     return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
3232   case ISD::VP_FSUB:
3233     return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
3234   case ISD::VP_FMUL:
3235     return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
3236   case ISD::VP_FDIV:
3237     return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
3238   }
3239 }
3240 
3241 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
3242                              SelectionDAG &DAG, unsigned Flags) {
3243   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
3244 }
3245 
3246 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
3247                              SelectionDAG &DAG, unsigned Flags) {
3248   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
3249                                    Flags);
3250 }
3251 
3252 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
3253                              SelectionDAG &DAG, unsigned Flags) {
3254   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
3255                                    N->getOffset(), Flags);
3256 }
3257 
3258 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
3259                              SelectionDAG &DAG, unsigned Flags) {
3260   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
3261 }
3262 
3263 template <class NodeTy>
3264 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
3265                                      bool IsLocal) const {
3266   SDLoc DL(N);
3267   EVT Ty = getPointerTy(DAG.getDataLayout());
3268 
3269   if (isPositionIndependent()) {
3270     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3271     if (IsLocal)
3272       // Use PC-relative addressing to access the symbol. This generates the
3273       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
3274       // %pcrel_lo(auipc)).
3275       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3276 
3277     // Use PC-relative addressing to access the GOT for this symbol, then load
3278     // the address from the GOT. This generates the pattern (PseudoLA sym),
3279     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
3280     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
3281   }
3282 
3283   switch (getTargetMachine().getCodeModel()) {
3284   default:
3285     report_fatal_error("Unsupported code model for lowering");
3286   case CodeModel::Small: {
3287     // Generate a sequence for accessing addresses within the first 2 GiB of
3288     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
3289     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
3290     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
3291     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3292     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
3293   }
3294   case CodeModel::Medium: {
3295     // Generate a sequence for accessing addresses within any 2GiB range within
3296     // the address space. This generates the pattern (PseudoLLA sym), which
3297     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
3298     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3299     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3300   }
3301   }
3302 }
3303 
3304 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
3305                                                 SelectionDAG &DAG) const {
3306   SDLoc DL(Op);
3307   EVT Ty = Op.getValueType();
3308   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3309   int64_t Offset = N->getOffset();
3310   MVT XLenVT = Subtarget.getXLenVT();
3311 
3312   const GlobalValue *GV = N->getGlobal();
3313   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
3314   SDValue Addr = getAddr(N, DAG, IsLocal);
3315 
3316   // In order to maximise the opportunity for common subexpression elimination,
3317   // emit a separate ADD node for the global address offset instead of folding
3318   // it in the global address node. Later peephole optimisations may choose to
3319   // fold it back in when profitable.
3320   if (Offset != 0)
3321     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3322                        DAG.getConstant(Offset, DL, XLenVT));
3323   return Addr;
3324 }
3325 
3326 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
3327                                                SelectionDAG &DAG) const {
3328   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
3329 
3330   return getAddr(N, DAG);
3331 }
3332 
3333 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
3334                                                SelectionDAG &DAG) const {
3335   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
3336 
3337   return getAddr(N, DAG);
3338 }
3339 
3340 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
3341                                             SelectionDAG &DAG) const {
3342   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
3343 
3344   return getAddr(N, DAG);
3345 }
3346 
3347 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
3348                                               SelectionDAG &DAG,
3349                                               bool UseGOT) const {
3350   SDLoc DL(N);
3351   EVT Ty = getPointerTy(DAG.getDataLayout());
3352   const GlobalValue *GV = N->getGlobal();
3353   MVT XLenVT = Subtarget.getXLenVT();
3354 
3355   if (UseGOT) {
3356     // Use PC-relative addressing to access the GOT for this TLS symbol, then
3357     // load the address from the GOT and add the thread pointer. This generates
3358     // the pattern (PseudoLA_TLS_IE sym), which expands to
3359     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
3360     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3361     SDValue Load =
3362         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
3363 
3364     // Add the thread pointer.
3365     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3366     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
3367   }
3368 
3369   // Generate a sequence for accessing the address relative to the thread
3370   // pointer, with the appropriate adjustment for the thread pointer offset.
3371   // This generates the pattern
3372   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
3373   SDValue AddrHi =
3374       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
3375   SDValue AddrAdd =
3376       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
3377   SDValue AddrLo =
3378       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
3379 
3380   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3381   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3382   SDValue MNAdd = SDValue(
3383       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
3384       0);
3385   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
3386 }
3387 
3388 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
3389                                                SelectionDAG &DAG) const {
3390   SDLoc DL(N);
3391   EVT Ty = getPointerTy(DAG.getDataLayout());
3392   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
3393   const GlobalValue *GV = N->getGlobal();
3394 
3395   // Use a PC-relative addressing mode to access the global dynamic GOT address.
3396   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
3397   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
3398   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3399   SDValue Load =
3400       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
3401 
3402   // Prepare argument list to generate call.
3403   ArgListTy Args;
3404   ArgListEntry Entry;
3405   Entry.Node = Load;
3406   Entry.Ty = CallTy;
3407   Args.push_back(Entry);
3408 
3409   // Setup call to __tls_get_addr.
3410   TargetLowering::CallLoweringInfo CLI(DAG);
3411   CLI.setDebugLoc(DL)
3412       .setChain(DAG.getEntryNode())
3413       .setLibCallee(CallingConv::C, CallTy,
3414                     DAG.getExternalSymbol("__tls_get_addr", Ty),
3415                     std::move(Args));
3416 
3417   return LowerCallTo(CLI).first;
3418 }
3419 
3420 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
3421                                                    SelectionDAG &DAG) const {
3422   SDLoc DL(Op);
3423   EVT Ty = Op.getValueType();
3424   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3425   int64_t Offset = N->getOffset();
3426   MVT XLenVT = Subtarget.getXLenVT();
3427 
3428   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
3429 
3430   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
3431       CallingConv::GHC)
3432     report_fatal_error("In GHC calling convention TLS is not supported");
3433 
3434   SDValue Addr;
3435   switch (Model) {
3436   case TLSModel::LocalExec:
3437     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
3438     break;
3439   case TLSModel::InitialExec:
3440     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
3441     break;
3442   case TLSModel::LocalDynamic:
3443   case TLSModel::GeneralDynamic:
3444     Addr = getDynamicTLSAddr(N, DAG);
3445     break;
3446   }
3447 
3448   // In order to maximise the opportunity for common subexpression elimination,
3449   // emit a separate ADD node for the global address offset instead of folding
3450   // it in the global address node. Later peephole optimisations may choose to
3451   // fold it back in when profitable.
3452   if (Offset != 0)
3453     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3454                        DAG.getConstant(Offset, DL, XLenVT));
3455   return Addr;
3456 }
3457 
3458 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3459   SDValue CondV = Op.getOperand(0);
3460   SDValue TrueV = Op.getOperand(1);
3461   SDValue FalseV = Op.getOperand(2);
3462   SDLoc DL(Op);
3463   MVT VT = Op.getSimpleValueType();
3464   MVT XLenVT = Subtarget.getXLenVT();
3465 
3466   // Lower vector SELECTs to VSELECTs by splatting the condition.
3467   if (VT.isVector()) {
3468     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
3469     SDValue CondSplat = VT.isScalableVector()
3470                             ? DAG.getSplatVector(SplatCondVT, DL, CondV)
3471                             : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
3472     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
3473   }
3474 
3475   // If the result type is XLenVT and CondV is the output of a SETCC node
3476   // which also operated on XLenVT inputs, then merge the SETCC node into the
3477   // lowered RISCVISD::SELECT_CC to take advantage of the integer
3478   // compare+branch instructions. i.e.:
3479   // (select (setcc lhs, rhs, cc), truev, falsev)
3480   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
3481   if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
3482       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
3483     SDValue LHS = CondV.getOperand(0);
3484     SDValue RHS = CondV.getOperand(1);
3485     const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
3486     ISD::CondCode CCVal = CC->get();
3487 
3488     // Special case for a select of 2 constants that have a diffence of 1.
3489     // Normally this is done by DAGCombine, but if the select is introduced by
3490     // type legalization or op legalization, we miss it. Restricting to SETLT
3491     // case for now because that is what signed saturating add/sub need.
3492     // FIXME: We don't need the condition to be SETLT or even a SETCC,
3493     // but we would probably want to swap the true/false values if the condition
3494     // is SETGE/SETLE to avoid an XORI.
3495     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
3496         CCVal == ISD::SETLT) {
3497       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
3498       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
3499       if (TrueVal - 1 == FalseVal)
3500         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
3501       if (TrueVal + 1 == FalseVal)
3502         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
3503     }
3504 
3505     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3506 
3507     SDValue TargetCC = DAG.getCondCode(CCVal);
3508     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
3509     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3510   }
3511 
3512   // Otherwise:
3513   // (select condv, truev, falsev)
3514   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
3515   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3516   SDValue SetNE = DAG.getCondCode(ISD::SETNE);
3517 
3518   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
3519 
3520   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3521 }
3522 
3523 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
3524   SDValue CondV = Op.getOperand(1);
3525   SDLoc DL(Op);
3526   MVT XLenVT = Subtarget.getXLenVT();
3527 
3528   if (CondV.getOpcode() == ISD::SETCC &&
3529       CondV.getOperand(0).getValueType() == XLenVT) {
3530     SDValue LHS = CondV.getOperand(0);
3531     SDValue RHS = CondV.getOperand(1);
3532     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
3533 
3534     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3535 
3536     SDValue TargetCC = DAG.getCondCode(CCVal);
3537     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3538                        LHS, RHS, TargetCC, Op.getOperand(2));
3539   }
3540 
3541   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3542                      CondV, DAG.getConstant(0, DL, XLenVT),
3543                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
3544 }
3545 
3546 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3547   MachineFunction &MF = DAG.getMachineFunction();
3548   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
3549 
3550   SDLoc DL(Op);
3551   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3552                                  getPointerTy(MF.getDataLayout()));
3553 
3554   // vastart just stores the address of the VarArgsFrameIndex slot into the
3555   // memory location argument.
3556   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3557   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
3558                       MachinePointerInfo(SV));
3559 }
3560 
3561 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
3562                                             SelectionDAG &DAG) const {
3563   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3564   MachineFunction &MF = DAG.getMachineFunction();
3565   MachineFrameInfo &MFI = MF.getFrameInfo();
3566   MFI.setFrameAddressIsTaken(true);
3567   Register FrameReg = RI.getFrameRegister(MF);
3568   int XLenInBytes = Subtarget.getXLen() / 8;
3569 
3570   EVT VT = Op.getValueType();
3571   SDLoc DL(Op);
3572   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
3573   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3574   while (Depth--) {
3575     int Offset = -(XLenInBytes * 2);
3576     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
3577                               DAG.getIntPtrConstant(Offset, DL));
3578     FrameAddr =
3579         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
3580   }
3581   return FrameAddr;
3582 }
3583 
3584 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
3585                                              SelectionDAG &DAG) const {
3586   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3587   MachineFunction &MF = DAG.getMachineFunction();
3588   MachineFrameInfo &MFI = MF.getFrameInfo();
3589   MFI.setReturnAddressIsTaken(true);
3590   MVT XLenVT = Subtarget.getXLenVT();
3591   int XLenInBytes = Subtarget.getXLen() / 8;
3592 
3593   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
3594     return SDValue();
3595 
3596   EVT VT = Op.getValueType();
3597   SDLoc DL(Op);
3598   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3599   if (Depth) {
3600     int Off = -XLenInBytes;
3601     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
3602     SDValue Offset = DAG.getConstant(Off, DL, VT);
3603     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
3604                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
3605                        MachinePointerInfo());
3606   }
3607 
3608   // Return the value of the return address register, marking it an implicit
3609   // live-in.
3610   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
3611   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
3612 }
3613 
3614 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
3615                                                  SelectionDAG &DAG) const {
3616   SDLoc DL(Op);
3617   SDValue Lo = Op.getOperand(0);
3618   SDValue Hi = Op.getOperand(1);
3619   SDValue Shamt = Op.getOperand(2);
3620   EVT VT = Lo.getValueType();
3621 
3622   // if Shamt-XLEN < 0: // Shamt < XLEN
3623   //   Lo = Lo << Shamt
3624   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
3625   // else:
3626   //   Lo = 0
3627   //   Hi = Lo << (Shamt-XLEN)
3628 
3629   SDValue Zero = DAG.getConstant(0, DL, VT);
3630   SDValue One = DAG.getConstant(1, DL, VT);
3631   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3632   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3633   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3634   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
3635 
3636   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
3637   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
3638   SDValue ShiftRightLo =
3639       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
3640   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
3641   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
3642   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
3643 
3644   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3645 
3646   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
3647   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3648 
3649   SDValue Parts[2] = {Lo, Hi};
3650   return DAG.getMergeValues(Parts, DL);
3651 }
3652 
3653 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
3654                                                   bool IsSRA) const {
3655   SDLoc DL(Op);
3656   SDValue Lo = Op.getOperand(0);
3657   SDValue Hi = Op.getOperand(1);
3658   SDValue Shamt = Op.getOperand(2);
3659   EVT VT = Lo.getValueType();
3660 
3661   // SRA expansion:
3662   //   if Shamt-XLEN < 0: // Shamt < XLEN
3663   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
3664   //     Hi = Hi >>s Shamt
3665   //   else:
3666   //     Lo = Hi >>s (Shamt-XLEN);
3667   //     Hi = Hi >>s (XLEN-1)
3668   //
3669   // SRL expansion:
3670   //   if Shamt-XLEN < 0: // Shamt < XLEN
3671   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
3672   //     Hi = Hi >>u Shamt
3673   //   else:
3674   //     Lo = Hi >>u (Shamt-XLEN);
3675   //     Hi = 0;
3676 
3677   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
3678 
3679   SDValue Zero = DAG.getConstant(0, DL, VT);
3680   SDValue One = DAG.getConstant(1, DL, VT);
3681   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3682   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3683   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3684   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
3685 
3686   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
3687   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
3688   SDValue ShiftLeftHi =
3689       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
3690   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
3691   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
3692   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
3693   SDValue HiFalse =
3694       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
3695 
3696   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3697 
3698   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
3699   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3700 
3701   SDValue Parts[2] = {Lo, Hi};
3702   return DAG.getMergeValues(Parts, DL);
3703 }
3704 
3705 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
3706 // legal equivalently-sized i8 type, so we can use that as a go-between.
3707 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
3708                                                   SelectionDAG &DAG) const {
3709   SDLoc DL(Op);
3710   MVT VT = Op.getSimpleValueType();
3711   SDValue SplatVal = Op.getOperand(0);
3712   // All-zeros or all-ones splats are handled specially.
3713   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
3714     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
3715     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
3716   }
3717   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
3718     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
3719     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
3720   }
3721   MVT XLenVT = Subtarget.getXLenVT();
3722   assert(SplatVal.getValueType() == XLenVT &&
3723          "Unexpected type for i1 splat value");
3724   MVT InterVT = VT.changeVectorElementType(MVT::i8);
3725   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
3726                          DAG.getConstant(1, DL, XLenVT));
3727   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
3728   SDValue Zero = DAG.getConstant(0, DL, InterVT);
3729   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
3730 }
3731 
3732 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
3733 // illegal (currently only vXi64 RV32).
3734 // FIXME: We could also catch non-constant sign-extended i32 values and lower
3735 // them to SPLAT_VECTOR_I64
3736 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
3737                                                      SelectionDAG &DAG) const {
3738   SDLoc DL(Op);
3739   MVT VecVT = Op.getSimpleValueType();
3740   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
3741          "Unexpected SPLAT_VECTOR_PARTS lowering");
3742 
3743   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
3744   SDValue Lo = Op.getOperand(0);
3745   SDValue Hi = Op.getOperand(1);
3746 
3747   if (VecVT.isFixedLengthVector()) {
3748     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
3749     SDLoc DL(Op);
3750     SDValue Mask, VL;
3751     std::tie(Mask, VL) =
3752         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3753 
3754     SDValue Res = splatPartsI64WithVL(DL, ContainerVT, Lo, Hi, VL, DAG);
3755     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
3756   }
3757 
3758   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
3759     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
3760     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
3761     // If Hi constant is all the same sign bit as Lo, lower this as a custom
3762     // node in order to try and match RVV vector/scalar instructions.
3763     if ((LoC >> 31) == HiC)
3764       return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
3765   }
3766 
3767   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
3768   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
3769       isa<ConstantSDNode>(Hi.getOperand(1)) &&
3770       Hi.getConstantOperandVal(1) == 31)
3771     return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
3772 
3773   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
3774   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT, Lo, Hi,
3775                      DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, MVT::i64));
3776 }
3777 
3778 // Custom-lower extensions from mask vectors by using a vselect either with 1
3779 // for zero/any-extension or -1 for sign-extension:
3780 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
3781 // Note that any-extension is lowered identically to zero-extension.
3782 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
3783                                                 int64_t ExtTrueVal) const {
3784   SDLoc DL(Op);
3785   MVT VecVT = Op.getSimpleValueType();
3786   SDValue Src = Op.getOperand(0);
3787   // Only custom-lower extensions from mask types
3788   assert(Src.getValueType().isVector() &&
3789          Src.getValueType().getVectorElementType() == MVT::i1);
3790 
3791   MVT XLenVT = Subtarget.getXLenVT();
3792   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
3793   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
3794 
3795   if (VecVT.isScalableVector()) {
3796     // Be careful not to introduce illegal scalar types at this stage, and be
3797     // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
3798     // illegal and must be expanded. Since we know that the constants are
3799     // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly.
3800     bool IsRV32E64 =
3801         !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
3802 
3803     if (!IsRV32E64) {
3804       SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
3805       SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
3806     } else {
3807       SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero);
3808       SplatTrueVal =
3809           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal);
3810     }
3811 
3812     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
3813   }
3814 
3815   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
3816   MVT I1ContainerVT =
3817       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3818 
3819   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
3820 
3821   SDValue Mask, VL;
3822   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3823 
3824   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL);
3825   SplatTrueVal =
3826       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL);
3827   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
3828                                SplatTrueVal, SplatZero, VL);
3829 
3830   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
3831 }
3832 
3833 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
3834     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
3835   MVT ExtVT = Op.getSimpleValueType();
3836   // Only custom-lower extensions from fixed-length vector types.
3837   if (!ExtVT.isFixedLengthVector())
3838     return Op;
3839   MVT VT = Op.getOperand(0).getSimpleValueType();
3840   // Grab the canonical container type for the extended type. Infer the smaller
3841   // type from that to ensure the same number of vector elements, as we know
3842   // the LMUL will be sufficient to hold the smaller type.
3843   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
3844   // Get the extended container type manually to ensure the same number of
3845   // vector elements between source and dest.
3846   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
3847                                      ContainerExtVT.getVectorElementCount());
3848 
3849   SDValue Op1 =
3850       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
3851 
3852   SDLoc DL(Op);
3853   SDValue Mask, VL;
3854   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3855 
3856   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
3857 
3858   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
3859 }
3860 
3861 // Custom-lower truncations from vectors to mask vectors by using a mask and a
3862 // setcc operation:
3863 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
3864 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
3865                                                   SelectionDAG &DAG) const {
3866   SDLoc DL(Op);
3867   EVT MaskVT = Op.getValueType();
3868   // Only expect to custom-lower truncations to mask types
3869   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
3870          "Unexpected type for vector mask lowering");
3871   SDValue Src = Op.getOperand(0);
3872   MVT VecVT = Src.getSimpleValueType();
3873 
3874   // If this is a fixed vector, we need to convert it to a scalable vector.
3875   MVT ContainerVT = VecVT;
3876   if (VecVT.isFixedLengthVector()) {
3877     ContainerVT = getContainerForFixedLengthVector(VecVT);
3878     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
3879   }
3880 
3881   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
3882   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
3883 
3884   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne);
3885   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero);
3886 
3887   if (VecVT.isScalableVector()) {
3888     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
3889     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
3890   }
3891 
3892   SDValue Mask, VL;
3893   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3894 
3895   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
3896   SDValue Trunc =
3897       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
3898   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
3899                       DAG.getCondCode(ISD::SETNE), Mask, VL);
3900   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
3901 }
3902 
3903 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
3904 // first position of a vector, and that vector is slid up to the insert index.
3905 // By limiting the active vector length to index+1 and merging with the
3906 // original vector (with an undisturbed tail policy for elements >= VL), we
3907 // achieve the desired result of leaving all elements untouched except the one
3908 // at VL-1, which is replaced with the desired value.
3909 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
3910                                                     SelectionDAG &DAG) const {
3911   SDLoc DL(Op);
3912   MVT VecVT = Op.getSimpleValueType();
3913   SDValue Vec = Op.getOperand(0);
3914   SDValue Val = Op.getOperand(1);
3915   SDValue Idx = Op.getOperand(2);
3916 
3917   if (VecVT.getVectorElementType() == MVT::i1) {
3918     // FIXME: For now we just promote to an i8 vector and insert into that,
3919     // but this is probably not optimal.
3920     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3921     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3922     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
3923     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
3924   }
3925 
3926   MVT ContainerVT = VecVT;
3927   // If the operand is a fixed-length vector, convert to a scalable one.
3928   if (VecVT.isFixedLengthVector()) {
3929     ContainerVT = getContainerForFixedLengthVector(VecVT);
3930     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3931   }
3932 
3933   MVT XLenVT = Subtarget.getXLenVT();
3934 
3935   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3936   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
3937   // Even i64-element vectors on RV32 can be lowered without scalar
3938   // legalization if the most-significant 32 bits of the value are not affected
3939   // by the sign-extension of the lower 32 bits.
3940   // TODO: We could also catch sign extensions of a 32-bit value.
3941   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
3942     const auto *CVal = cast<ConstantSDNode>(Val);
3943     if (isInt<32>(CVal->getSExtValue())) {
3944       IsLegalInsert = true;
3945       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3946     }
3947   }
3948 
3949   SDValue Mask, VL;
3950   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3951 
3952   SDValue ValInVec;
3953 
3954   if (IsLegalInsert) {
3955     unsigned Opc =
3956         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
3957     if (isNullConstant(Idx)) {
3958       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
3959       if (!VecVT.isFixedLengthVector())
3960         return Vec;
3961       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
3962     }
3963     ValInVec =
3964         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
3965   } else {
3966     // On RV32, i64-element vectors must be specially handled to place the
3967     // value at element 0, by using two vslide1up instructions in sequence on
3968     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
3969     // this.
3970     SDValue One = DAG.getConstant(1, DL, XLenVT);
3971     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
3972     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
3973     MVT I32ContainerVT =
3974         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
3975     SDValue I32Mask =
3976         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
3977     // Limit the active VL to two.
3978     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
3979     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
3980     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
3981     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero,
3982                            InsertI64VL);
3983     // First slide in the hi value, then the lo in underneath it.
3984     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3985                            ValHi, I32Mask, InsertI64VL);
3986     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3987                            ValLo, I32Mask, InsertI64VL);
3988     // Bitcast back to the right container type.
3989     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
3990   }
3991 
3992   // Now that the value is in a vector, slide it into position.
3993   SDValue InsertVL =
3994       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
3995   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
3996                                 ValInVec, Idx, Mask, InsertVL);
3997   if (!VecVT.isFixedLengthVector())
3998     return Slideup;
3999   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
4000 }
4001 
4002 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
4003 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
4004 // types this is done using VMV_X_S to allow us to glean information about the
4005 // sign bits of the result.
4006 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4007                                                      SelectionDAG &DAG) const {
4008   SDLoc DL(Op);
4009   SDValue Idx = Op.getOperand(1);
4010   SDValue Vec = Op.getOperand(0);
4011   EVT EltVT = Op.getValueType();
4012   MVT VecVT = Vec.getSimpleValueType();
4013   MVT XLenVT = Subtarget.getXLenVT();
4014 
4015   if (VecVT.getVectorElementType() == MVT::i1) {
4016     // FIXME: For now we just promote to an i8 vector and extract from that,
4017     // but this is probably not optimal.
4018     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4019     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4020     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
4021   }
4022 
4023   // If this is a fixed vector, we need to convert it to a scalable vector.
4024   MVT ContainerVT = VecVT;
4025   if (VecVT.isFixedLengthVector()) {
4026     ContainerVT = getContainerForFixedLengthVector(VecVT);
4027     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4028   }
4029 
4030   // If the index is 0, the vector is already in the right position.
4031   if (!isNullConstant(Idx)) {
4032     // Use a VL of 1 to avoid processing more elements than we need.
4033     SDValue VL = DAG.getConstant(1, DL, XLenVT);
4034     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4035     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4036     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4037                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
4038   }
4039 
4040   if (!EltVT.isInteger()) {
4041     // Floating-point extracts are handled in TableGen.
4042     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
4043                        DAG.getConstant(0, DL, XLenVT));
4044   }
4045 
4046   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
4047   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
4048 }
4049 
4050 // Some RVV intrinsics may claim that they want an integer operand to be
4051 // promoted or expanded.
4052 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
4053                                           const RISCVSubtarget &Subtarget) {
4054   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
4055           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
4056          "Unexpected opcode");
4057 
4058   if (!Subtarget.hasVInstructions())
4059     return SDValue();
4060 
4061   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
4062   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
4063   SDLoc DL(Op);
4064 
4065   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
4066       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
4067   if (!II || !II->SplatOperand)
4068     return SDValue();
4069 
4070   unsigned SplatOp = II->SplatOperand + HasChain;
4071   assert(SplatOp < Op.getNumOperands());
4072 
4073   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
4074   SDValue &ScalarOp = Operands[SplatOp];
4075   MVT OpVT = ScalarOp.getSimpleValueType();
4076   MVT XLenVT = Subtarget.getXLenVT();
4077 
4078   // If this isn't a scalar, or its type is XLenVT we're done.
4079   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
4080     return SDValue();
4081 
4082   // Simplest case is that the operand needs to be promoted to XLenVT.
4083   if (OpVT.bitsLT(XLenVT)) {
4084     // If the operand is a constant, sign extend to increase our chances
4085     // of being able to use a .vi instruction. ANY_EXTEND would become a
4086     // a zero extend and the simm5 check in isel would fail.
4087     // FIXME: Should we ignore the upper bits in isel instead?
4088     unsigned ExtOpc =
4089         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
4090     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
4091     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4092   }
4093 
4094   // Use the previous operand to get the vXi64 VT. The result might be a mask
4095   // VT for compares. Using the previous operand assumes that the previous
4096   // operand will never have a smaller element size than a scalar operand and
4097   // that a widening operation never uses SEW=64.
4098   // NOTE: If this fails the below assert, we can probably just find the
4099   // element count from any operand or result and use it to construct the VT.
4100   assert(II->SplatOperand > 1 && "Unexpected splat operand!");
4101   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
4102 
4103   // The more complex case is when the scalar is larger than XLenVT.
4104   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
4105          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
4106 
4107   // If this is a sign-extended 32-bit constant, we can truncate it and rely
4108   // on the instruction to sign-extend since SEW>XLEN.
4109   if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) {
4110     if (isInt<32>(CVal->getSExtValue())) {
4111       ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
4112       return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4113     }
4114   }
4115 
4116   // We need to convert the scalar to a splat vector.
4117   // FIXME: Can we implicitly truncate the scalar if it is known to
4118   // be sign extended?
4119   // VL should be the last operand.
4120   SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
4121   assert(VL.getValueType() == XLenVT);
4122   ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG);
4123   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4124 }
4125 
4126 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4127                                                      SelectionDAG &DAG) const {
4128   unsigned IntNo = Op.getConstantOperandVal(0);
4129   SDLoc DL(Op);
4130   MVT XLenVT = Subtarget.getXLenVT();
4131 
4132   switch (IntNo) {
4133   default:
4134     break; // Don't custom lower most intrinsics.
4135   case Intrinsic::thread_pointer: {
4136     EVT PtrVT = getPointerTy(DAG.getDataLayout());
4137     return DAG.getRegister(RISCV::X4, PtrVT);
4138   }
4139   case Intrinsic::riscv_orc_b:
4140     // Lower to the GORCI encoding for orc.b.
4141     return DAG.getNode(RISCVISD::GORC, DL, XLenVT, Op.getOperand(1),
4142                        DAG.getConstant(7, DL, XLenVT));
4143   case Intrinsic::riscv_grev:
4144   case Intrinsic::riscv_gorc: {
4145     unsigned Opc =
4146         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
4147     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4148   }
4149   case Intrinsic::riscv_shfl:
4150   case Intrinsic::riscv_unshfl: {
4151     unsigned Opc =
4152         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4153     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4154   }
4155   case Intrinsic::riscv_bcompress:
4156   case Intrinsic::riscv_bdecompress: {
4157     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
4158                                                        : RISCVISD::BDECOMPRESS;
4159     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4160   }
4161   case Intrinsic::riscv_vmv_x_s:
4162     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
4163     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
4164                        Op.getOperand(1));
4165   case Intrinsic::riscv_vmv_v_x:
4166     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
4167                             Op.getSimpleValueType(), DL, DAG, Subtarget);
4168   case Intrinsic::riscv_vfmv_v_f:
4169     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
4170                        Op.getOperand(1), Op.getOperand(2));
4171   case Intrinsic::riscv_vmv_s_x: {
4172     SDValue Scalar = Op.getOperand(2);
4173 
4174     if (Scalar.getValueType().bitsLE(XLenVT)) {
4175       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
4176       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
4177                          Op.getOperand(1), Scalar, Op.getOperand(3));
4178     }
4179 
4180     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
4181 
4182     // This is an i64 value that lives in two scalar registers. We have to
4183     // insert this in a convoluted way. First we build vXi64 splat containing
4184     // the/ two values that we assemble using some bit math. Next we'll use
4185     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
4186     // to merge element 0 from our splat into the source vector.
4187     // FIXME: This is probably not the best way to do this, but it is
4188     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
4189     // point.
4190     //   sw lo, (a0)
4191     //   sw hi, 4(a0)
4192     //   vlse vX, (a0)
4193     //
4194     //   vid.v      vVid
4195     //   vmseq.vx   mMask, vVid, 0
4196     //   vmerge.vvm vDest, vSrc, vVal, mMask
4197     MVT VT = Op.getSimpleValueType();
4198     SDValue Vec = Op.getOperand(1);
4199     SDValue VL = Op.getOperand(3);
4200 
4201     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
4202     SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
4203                                       DAG.getConstant(0, DL, MVT::i32), VL);
4204 
4205     MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
4206     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4207     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4208     SDValue SelectCond =
4209         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
4210                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
4211     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
4212                        Vec, VL);
4213   }
4214   case Intrinsic::riscv_vslide1up:
4215   case Intrinsic::riscv_vslide1down:
4216   case Intrinsic::riscv_vslide1up_mask:
4217   case Intrinsic::riscv_vslide1down_mask: {
4218     // We need to special case these when the scalar is larger than XLen.
4219     unsigned NumOps = Op.getNumOperands();
4220     bool IsMasked = NumOps == 7;
4221     unsigned OpOffset = IsMasked ? 1 : 0;
4222     SDValue Scalar = Op.getOperand(2 + OpOffset);
4223     if (Scalar.getValueType().bitsLE(XLenVT))
4224       break;
4225 
4226     // Splatting a sign extended constant is fine.
4227     if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar))
4228       if (isInt<32>(CVal->getSExtValue()))
4229         break;
4230 
4231     MVT VT = Op.getSimpleValueType();
4232     assert(VT.getVectorElementType() == MVT::i64 &&
4233            Scalar.getValueType() == MVT::i64 && "Unexpected VTs");
4234 
4235     // Convert the vector source to the equivalent nxvXi32 vector.
4236     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
4237     SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset));
4238 
4239     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
4240                                    DAG.getConstant(0, DL, XLenVT));
4241     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
4242                                    DAG.getConstant(1, DL, XLenVT));
4243 
4244     // Double the VL since we halved SEW.
4245     SDValue VL = Op.getOperand(NumOps - (1 + OpOffset));
4246     SDValue I32VL =
4247         DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
4248 
4249     MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount());
4250     SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL);
4251 
4252     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
4253     // instructions.
4254     if (IntNo == Intrinsic::riscv_vslide1up ||
4255         IntNo == Intrinsic::riscv_vslide1up_mask) {
4256       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi,
4257                         I32Mask, I32VL);
4258       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo,
4259                         I32Mask, I32VL);
4260     } else {
4261       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo,
4262                         I32Mask, I32VL);
4263       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi,
4264                         I32Mask, I32VL);
4265     }
4266 
4267     // Convert back to nxvXi64.
4268     Vec = DAG.getBitcast(VT, Vec);
4269 
4270     if (!IsMasked)
4271       return Vec;
4272 
4273     // Apply mask after the operation.
4274     SDValue Mask = Op.getOperand(NumOps - 3);
4275     SDValue MaskedOff = Op.getOperand(1);
4276     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL);
4277   }
4278   }
4279 
4280   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
4281 }
4282 
4283 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4284                                                     SelectionDAG &DAG) const {
4285   unsigned IntNo = Op.getConstantOperandVal(1);
4286   switch (IntNo) {
4287   default:
4288     break;
4289   case Intrinsic::riscv_masked_strided_load: {
4290     SDLoc DL(Op);
4291     MVT XLenVT = Subtarget.getXLenVT();
4292 
4293     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4294     // the selection of the masked intrinsics doesn't do this for us.
4295     SDValue Mask = Op.getOperand(5);
4296     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4297 
4298     MVT VT = Op->getSimpleValueType(0);
4299     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4300 
4301     SDValue PassThru = Op.getOperand(2);
4302     if (!IsUnmasked) {
4303       MVT MaskVT =
4304           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4305       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4306       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4307     }
4308 
4309     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4310 
4311     SDValue IntID = DAG.getTargetConstant(
4312         IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL,
4313         XLenVT);
4314 
4315     auto *Load = cast<MemIntrinsicSDNode>(Op);
4316     SmallVector<SDValue, 8> Ops{Load->getChain(), IntID};
4317     if (!IsUnmasked)
4318       Ops.push_back(PassThru);
4319     Ops.push_back(Op.getOperand(3)); // Ptr
4320     Ops.push_back(Op.getOperand(4)); // Stride
4321     if (!IsUnmasked)
4322       Ops.push_back(Mask);
4323     Ops.push_back(VL);
4324     if (!IsUnmasked) {
4325       SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
4326       Ops.push_back(Policy);
4327     }
4328 
4329     SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4330     SDValue Result =
4331         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4332                                 Load->getMemoryVT(), Load->getMemOperand());
4333     SDValue Chain = Result.getValue(1);
4334     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4335     return DAG.getMergeValues({Result, Chain}, DL);
4336   }
4337   }
4338 
4339   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
4340 }
4341 
4342 SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
4343                                                  SelectionDAG &DAG) const {
4344   unsigned IntNo = Op.getConstantOperandVal(1);
4345   switch (IntNo) {
4346   default:
4347     break;
4348   case Intrinsic::riscv_masked_strided_store: {
4349     SDLoc DL(Op);
4350     MVT XLenVT = Subtarget.getXLenVT();
4351 
4352     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4353     // the selection of the masked intrinsics doesn't do this for us.
4354     SDValue Mask = Op.getOperand(5);
4355     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4356 
4357     SDValue Val = Op.getOperand(2);
4358     MVT VT = Val.getSimpleValueType();
4359     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4360 
4361     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4362     if (!IsUnmasked) {
4363       MVT MaskVT =
4364           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4365       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4366     }
4367 
4368     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4369 
4370     SDValue IntID = DAG.getTargetConstant(
4371         IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL,
4372         XLenVT);
4373 
4374     auto *Store = cast<MemIntrinsicSDNode>(Op);
4375     SmallVector<SDValue, 8> Ops{Store->getChain(), IntID};
4376     Ops.push_back(Val);
4377     Ops.push_back(Op.getOperand(3)); // Ptr
4378     Ops.push_back(Op.getOperand(4)); // Stride
4379     if (!IsUnmasked)
4380       Ops.push_back(Mask);
4381     Ops.push_back(VL);
4382 
4383     return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Store->getVTList(),
4384                                    Ops, Store->getMemoryVT(),
4385                                    Store->getMemOperand());
4386   }
4387   }
4388 
4389   return SDValue();
4390 }
4391 
4392 static MVT getLMUL1VT(MVT VT) {
4393   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
4394          "Unexpected vector MVT");
4395   return MVT::getScalableVectorVT(
4396       VT.getVectorElementType(),
4397       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
4398 }
4399 
4400 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
4401   switch (ISDOpcode) {
4402   default:
4403     llvm_unreachable("Unhandled reduction");
4404   case ISD::VECREDUCE_ADD:
4405     return RISCVISD::VECREDUCE_ADD_VL;
4406   case ISD::VECREDUCE_UMAX:
4407     return RISCVISD::VECREDUCE_UMAX_VL;
4408   case ISD::VECREDUCE_SMAX:
4409     return RISCVISD::VECREDUCE_SMAX_VL;
4410   case ISD::VECREDUCE_UMIN:
4411     return RISCVISD::VECREDUCE_UMIN_VL;
4412   case ISD::VECREDUCE_SMIN:
4413     return RISCVISD::VECREDUCE_SMIN_VL;
4414   case ISD::VECREDUCE_AND:
4415     return RISCVISD::VECREDUCE_AND_VL;
4416   case ISD::VECREDUCE_OR:
4417     return RISCVISD::VECREDUCE_OR_VL;
4418   case ISD::VECREDUCE_XOR:
4419     return RISCVISD::VECREDUCE_XOR_VL;
4420   }
4421 }
4422 
4423 SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
4424                                                          SelectionDAG &DAG,
4425                                                          bool IsVP) const {
4426   SDLoc DL(Op);
4427   SDValue Vec = Op.getOperand(IsVP ? 1 : 0);
4428   MVT VecVT = Vec.getSimpleValueType();
4429   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
4430           Op.getOpcode() == ISD::VECREDUCE_OR ||
4431           Op.getOpcode() == ISD::VECREDUCE_XOR ||
4432           Op.getOpcode() == ISD::VP_REDUCE_AND ||
4433           Op.getOpcode() == ISD::VP_REDUCE_OR ||
4434           Op.getOpcode() == ISD::VP_REDUCE_XOR) &&
4435          "Unexpected reduction lowering");
4436 
4437   MVT XLenVT = Subtarget.getXLenVT();
4438   assert(Op.getValueType() == XLenVT &&
4439          "Expected reduction output to be legalized to XLenVT");
4440 
4441   MVT ContainerVT = VecVT;
4442   if (VecVT.isFixedLengthVector()) {
4443     ContainerVT = getContainerForFixedLengthVector(VecVT);
4444     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4445   }
4446 
4447   SDValue Mask, VL;
4448   if (IsVP) {
4449     Mask = Op.getOperand(2);
4450     VL = Op.getOperand(3);
4451   } else {
4452     std::tie(Mask, VL) =
4453         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4454   }
4455 
4456   unsigned BaseOpc;
4457   ISD::CondCode CC;
4458   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
4459 
4460   switch (Op.getOpcode()) {
4461   default:
4462     llvm_unreachable("Unhandled reduction");
4463   case ISD::VECREDUCE_AND:
4464   case ISD::VP_REDUCE_AND: {
4465     // vcpop ~x == 0
4466     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
4467     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL);
4468     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
4469     CC = ISD::SETEQ;
4470     BaseOpc = ISD::AND;
4471     break;
4472   }
4473   case ISD::VECREDUCE_OR:
4474   case ISD::VP_REDUCE_OR:
4475     // vcpop x != 0
4476     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
4477     CC = ISD::SETNE;
4478     BaseOpc = ISD::OR;
4479     break;
4480   case ISD::VECREDUCE_XOR:
4481   case ISD::VP_REDUCE_XOR: {
4482     // ((vcpop x) & 1) != 0
4483     SDValue One = DAG.getConstant(1, DL, XLenVT);
4484     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
4485     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
4486     CC = ISD::SETNE;
4487     BaseOpc = ISD::XOR;
4488     break;
4489   }
4490   }
4491 
4492   SDValue SetCC = DAG.getSetCC(DL, XLenVT, Vec, Zero, CC);
4493 
4494   if (!IsVP)
4495     return SetCC;
4496 
4497   // Now include the start value in the operation.
4498   // Note that we must return the start value when no elements are operated
4499   // upon. The vcpop instructions we've emitted in each case above will return
4500   // 0 for an inactive vector, and so we've already received the neutral value:
4501   // AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we
4502   // can simply include the start value.
4503   return DAG.getNode(BaseOpc, DL, XLenVT, SetCC, Op.getOperand(0));
4504 }
4505 
4506 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
4507                                             SelectionDAG &DAG) const {
4508   SDLoc DL(Op);
4509   SDValue Vec = Op.getOperand(0);
4510   EVT VecEVT = Vec.getValueType();
4511 
4512   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
4513 
4514   // Due to ordering in legalize types we may have a vector type that needs to
4515   // be split. Do that manually so we can get down to a legal type.
4516   while (getTypeAction(*DAG.getContext(), VecEVT) ==
4517          TargetLowering::TypeSplitVector) {
4518     SDValue Lo, Hi;
4519     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
4520     VecEVT = Lo.getValueType();
4521     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
4522   }
4523 
4524   // TODO: The type may need to be widened rather than split. Or widened before
4525   // it can be split.
4526   if (!isTypeLegal(VecEVT))
4527     return SDValue();
4528 
4529   MVT VecVT = VecEVT.getSimpleVT();
4530   MVT VecEltVT = VecVT.getVectorElementType();
4531   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
4532 
4533   MVT ContainerVT = VecVT;
4534   if (VecVT.isFixedLengthVector()) {
4535     ContainerVT = getContainerForFixedLengthVector(VecVT);
4536     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4537   }
4538 
4539   MVT M1VT = getLMUL1VT(ContainerVT);
4540   MVT XLenVT = Subtarget.getXLenVT();
4541 
4542   SDValue Mask, VL;
4543   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4544 
4545   SDValue NeutralElem =
4546       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
4547   SDValue IdentitySplat = lowerScalarSplat(
4548       NeutralElem, DAG.getConstant(1, DL, XLenVT), M1VT, DL, DAG, Subtarget);
4549   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT), Vec,
4550                                   IdentitySplat, Mask, VL);
4551   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
4552                              DAG.getConstant(0, DL, XLenVT));
4553   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
4554 }
4555 
4556 // Given a reduction op, this function returns the matching reduction opcode,
4557 // the vector SDValue and the scalar SDValue required to lower this to a
4558 // RISCVISD node.
4559 static std::tuple<unsigned, SDValue, SDValue>
4560 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
4561   SDLoc DL(Op);
4562   auto Flags = Op->getFlags();
4563   unsigned Opcode = Op.getOpcode();
4564   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
4565   switch (Opcode) {
4566   default:
4567     llvm_unreachable("Unhandled reduction");
4568   case ISD::VECREDUCE_FADD: {
4569     // Use positive zero if we can. It is cheaper to materialize.
4570     SDValue Zero =
4571         DAG.getConstantFP(Flags.hasNoSignedZeros() ? 0.0 : -0.0, DL, EltVT);
4572     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0), Zero);
4573   }
4574   case ISD::VECREDUCE_SEQ_FADD:
4575     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
4576                            Op.getOperand(0));
4577   case ISD::VECREDUCE_FMIN:
4578     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
4579                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
4580   case ISD::VECREDUCE_FMAX:
4581     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
4582                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
4583   }
4584 }
4585 
4586 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
4587                                               SelectionDAG &DAG) const {
4588   SDLoc DL(Op);
4589   MVT VecEltVT = Op.getSimpleValueType();
4590 
4591   unsigned RVVOpcode;
4592   SDValue VectorVal, ScalarVal;
4593   std::tie(RVVOpcode, VectorVal, ScalarVal) =
4594       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
4595   MVT VecVT = VectorVal.getSimpleValueType();
4596 
4597   MVT ContainerVT = VecVT;
4598   if (VecVT.isFixedLengthVector()) {
4599     ContainerVT = getContainerForFixedLengthVector(VecVT);
4600     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
4601   }
4602 
4603   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
4604   MVT XLenVT = Subtarget.getXLenVT();
4605 
4606   SDValue Mask, VL;
4607   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4608 
4609   SDValue ScalarSplat = lowerScalarSplat(
4610       ScalarVal, DAG.getConstant(1, DL, XLenVT), M1VT, DL, DAG, Subtarget);
4611   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT),
4612                                   VectorVal, ScalarSplat, Mask, VL);
4613   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
4614                      DAG.getConstant(0, DL, XLenVT));
4615 }
4616 
4617 static unsigned getRVVVPReductionOp(unsigned ISDOpcode) {
4618   switch (ISDOpcode) {
4619   default:
4620     llvm_unreachable("Unhandled reduction");
4621   case ISD::VP_REDUCE_ADD:
4622     return RISCVISD::VECREDUCE_ADD_VL;
4623   case ISD::VP_REDUCE_UMAX:
4624     return RISCVISD::VECREDUCE_UMAX_VL;
4625   case ISD::VP_REDUCE_SMAX:
4626     return RISCVISD::VECREDUCE_SMAX_VL;
4627   case ISD::VP_REDUCE_UMIN:
4628     return RISCVISD::VECREDUCE_UMIN_VL;
4629   case ISD::VP_REDUCE_SMIN:
4630     return RISCVISD::VECREDUCE_SMIN_VL;
4631   case ISD::VP_REDUCE_AND:
4632     return RISCVISD::VECREDUCE_AND_VL;
4633   case ISD::VP_REDUCE_OR:
4634     return RISCVISD::VECREDUCE_OR_VL;
4635   case ISD::VP_REDUCE_XOR:
4636     return RISCVISD::VECREDUCE_XOR_VL;
4637   case ISD::VP_REDUCE_FADD:
4638     return RISCVISD::VECREDUCE_FADD_VL;
4639   case ISD::VP_REDUCE_SEQ_FADD:
4640     return RISCVISD::VECREDUCE_SEQ_FADD_VL;
4641   case ISD::VP_REDUCE_FMAX:
4642     return RISCVISD::VECREDUCE_FMAX_VL;
4643   case ISD::VP_REDUCE_FMIN:
4644     return RISCVISD::VECREDUCE_FMIN_VL;
4645   }
4646 }
4647 
4648 SDValue RISCVTargetLowering::lowerVPREDUCE(SDValue Op,
4649                                            SelectionDAG &DAG) const {
4650   SDLoc DL(Op);
4651   SDValue Vec = Op.getOperand(1);
4652   EVT VecEVT = Vec.getValueType();
4653 
4654   // TODO: The type may need to be widened rather than split. Or widened before
4655   // it can be split.
4656   if (!isTypeLegal(VecEVT))
4657     return SDValue();
4658 
4659   MVT VecVT = VecEVT.getSimpleVT();
4660   MVT VecEltVT = VecVT.getVectorElementType();
4661   unsigned RVVOpcode = getRVVVPReductionOp(Op.getOpcode());
4662 
4663   MVT ContainerVT = VecVT;
4664   if (VecVT.isFixedLengthVector()) {
4665     ContainerVT = getContainerForFixedLengthVector(VecVT);
4666     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4667   }
4668 
4669   SDValue VL = Op.getOperand(3);
4670   SDValue Mask = Op.getOperand(2);
4671 
4672   MVT M1VT = getLMUL1VT(ContainerVT);
4673   MVT XLenVT = Subtarget.getXLenVT();
4674   MVT ResVT = !VecVT.isInteger() || VecEltVT.bitsGE(XLenVT) ? VecEltVT : XLenVT;
4675 
4676   SDValue StartSplat =
4677       lowerScalarSplat(Op.getOperand(0), DAG.getConstant(1, DL, XLenVT), M1VT,
4678                        DL, DAG, Subtarget);
4679   SDValue Reduction =
4680       DAG.getNode(RVVOpcode, DL, M1VT, StartSplat, Vec, StartSplat, Mask, VL);
4681   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction,
4682                              DAG.getConstant(0, DL, XLenVT));
4683   if (!VecVT.isInteger())
4684     return Elt0;
4685   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
4686 }
4687 
4688 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
4689                                                    SelectionDAG &DAG) const {
4690   SDValue Vec = Op.getOperand(0);
4691   SDValue SubVec = Op.getOperand(1);
4692   MVT VecVT = Vec.getSimpleValueType();
4693   MVT SubVecVT = SubVec.getSimpleValueType();
4694 
4695   SDLoc DL(Op);
4696   MVT XLenVT = Subtarget.getXLenVT();
4697   unsigned OrigIdx = Op.getConstantOperandVal(2);
4698   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
4699 
4700   // We don't have the ability to slide mask vectors up indexed by their i1
4701   // elements; the smallest we can do is i8. Often we are able to bitcast to
4702   // equivalent i8 vectors. Note that when inserting a fixed-length vector
4703   // into a scalable one, we might not necessarily have enough scalable
4704   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
4705   if (SubVecVT.getVectorElementType() == MVT::i1 &&
4706       (OrigIdx != 0 || !Vec.isUndef())) {
4707     if (VecVT.getVectorMinNumElements() >= 8 &&
4708         SubVecVT.getVectorMinNumElements() >= 8) {
4709       assert(OrigIdx % 8 == 0 && "Invalid index");
4710       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
4711              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
4712              "Unexpected mask vector lowering");
4713       OrigIdx /= 8;
4714       SubVecVT =
4715           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
4716                            SubVecVT.isScalableVector());
4717       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
4718                                VecVT.isScalableVector());
4719       Vec = DAG.getBitcast(VecVT, Vec);
4720       SubVec = DAG.getBitcast(SubVecVT, SubVec);
4721     } else {
4722       // We can't slide this mask vector up indexed by its i1 elements.
4723       // This poses a problem when we wish to insert a scalable vector which
4724       // can't be re-expressed as a larger type. Just choose the slow path and
4725       // extend to a larger type, then truncate back down.
4726       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
4727       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
4728       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
4729       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
4730       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
4731                         Op.getOperand(2));
4732       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
4733       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
4734     }
4735   }
4736 
4737   // If the subvector vector is a fixed-length type, we cannot use subregister
4738   // manipulation to simplify the codegen; we don't know which register of a
4739   // LMUL group contains the specific subvector as we only know the minimum
4740   // register size. Therefore we must slide the vector group up the full
4741   // amount.
4742   if (SubVecVT.isFixedLengthVector()) {
4743     if (OrigIdx == 0 && Vec.isUndef())
4744       return Op;
4745     MVT ContainerVT = VecVT;
4746     if (VecVT.isFixedLengthVector()) {
4747       ContainerVT = getContainerForFixedLengthVector(VecVT);
4748       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4749     }
4750     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
4751                          DAG.getUNDEF(ContainerVT), SubVec,
4752                          DAG.getConstant(0, DL, XLenVT));
4753     SDValue Mask =
4754         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
4755     // Set the vector length to only the number of elements we care about. Note
4756     // that for slideup this includes the offset.
4757     SDValue VL =
4758         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
4759     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
4760     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
4761                                   SubVec, SlideupAmt, Mask, VL);
4762     if (VecVT.isFixedLengthVector())
4763       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
4764     return DAG.getBitcast(Op.getValueType(), Slideup);
4765   }
4766 
4767   unsigned SubRegIdx, RemIdx;
4768   std::tie(SubRegIdx, RemIdx) =
4769       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
4770           VecVT, SubVecVT, OrigIdx, TRI);
4771 
4772   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
4773   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
4774                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
4775                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
4776 
4777   // 1. If the Idx has been completely eliminated and this subvector's size is
4778   // a vector register or a multiple thereof, or the surrounding elements are
4779   // undef, then this is a subvector insert which naturally aligns to a vector
4780   // register. These can easily be handled using subregister manipulation.
4781   // 2. If the subvector is smaller than a vector register, then the insertion
4782   // must preserve the undisturbed elements of the register. We do this by
4783   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
4784   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
4785   // subvector within the vector register, and an INSERT_SUBVECTOR of that
4786   // LMUL=1 type back into the larger vector (resolving to another subregister
4787   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
4788   // to avoid allocating a large register group to hold our subvector.
4789   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
4790     return Op;
4791 
4792   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
4793   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
4794   // (in our case undisturbed). This means we can set up a subvector insertion
4795   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
4796   // size of the subvector.
4797   MVT InterSubVT = VecVT;
4798   SDValue AlignedExtract = Vec;
4799   unsigned AlignedIdx = OrigIdx - RemIdx;
4800   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
4801     InterSubVT = getLMUL1VT(VecVT);
4802     // Extract a subvector equal to the nearest full vector register type. This
4803     // should resolve to a EXTRACT_SUBREG instruction.
4804     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
4805                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
4806   }
4807 
4808   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
4809   // For scalable vectors this must be further multiplied by vscale.
4810   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
4811 
4812   SDValue Mask, VL;
4813   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
4814 
4815   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
4816   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
4817   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
4818   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
4819 
4820   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
4821                        DAG.getUNDEF(InterSubVT), SubVec,
4822                        DAG.getConstant(0, DL, XLenVT));
4823 
4824   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
4825                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
4826 
4827   // If required, insert this subvector back into the correct vector register.
4828   // This should resolve to an INSERT_SUBREG instruction.
4829   if (VecVT.bitsGT(InterSubVT))
4830     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
4831                           DAG.getConstant(AlignedIdx, DL, XLenVT));
4832 
4833   // We might have bitcast from a mask type: cast back to the original type if
4834   // required.
4835   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
4836 }
4837 
4838 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
4839                                                     SelectionDAG &DAG) const {
4840   SDValue Vec = Op.getOperand(0);
4841   MVT SubVecVT = Op.getSimpleValueType();
4842   MVT VecVT = Vec.getSimpleValueType();
4843 
4844   SDLoc DL(Op);
4845   MVT XLenVT = Subtarget.getXLenVT();
4846   unsigned OrigIdx = Op.getConstantOperandVal(1);
4847   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
4848 
4849   // We don't have the ability to slide mask vectors down indexed by their i1
4850   // elements; the smallest we can do is i8. Often we are able to bitcast to
4851   // equivalent i8 vectors. Note that when extracting a fixed-length vector
4852   // from a scalable one, we might not necessarily have enough scalable
4853   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
4854   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
4855     if (VecVT.getVectorMinNumElements() >= 8 &&
4856         SubVecVT.getVectorMinNumElements() >= 8) {
4857       assert(OrigIdx % 8 == 0 && "Invalid index");
4858       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
4859              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
4860              "Unexpected mask vector lowering");
4861       OrigIdx /= 8;
4862       SubVecVT =
4863           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
4864                            SubVecVT.isScalableVector());
4865       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
4866                                VecVT.isScalableVector());
4867       Vec = DAG.getBitcast(VecVT, Vec);
4868     } else {
4869       // We can't slide this mask vector down, indexed by its i1 elements.
4870       // This poses a problem when we wish to extract a scalable vector which
4871       // can't be re-expressed as a larger type. Just choose the slow path and
4872       // extend to a larger type, then truncate back down.
4873       // TODO: We could probably improve this when extracting certain fixed
4874       // from fixed, where we can extract as i8 and shift the correct element
4875       // right to reach the desired subvector?
4876       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
4877       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
4878       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
4879       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
4880                         Op.getOperand(1));
4881       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
4882       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
4883     }
4884   }
4885 
4886   // If the subvector vector is a fixed-length type, we cannot use subregister
4887   // manipulation to simplify the codegen; we don't know which register of a
4888   // LMUL group contains the specific subvector as we only know the minimum
4889   // register size. Therefore we must slide the vector group down the full
4890   // amount.
4891   if (SubVecVT.isFixedLengthVector()) {
4892     // With an index of 0 this is a cast-like subvector, which can be performed
4893     // with subregister operations.
4894     if (OrigIdx == 0)
4895       return Op;
4896     MVT ContainerVT = VecVT;
4897     if (VecVT.isFixedLengthVector()) {
4898       ContainerVT = getContainerForFixedLengthVector(VecVT);
4899       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4900     }
4901     SDValue Mask =
4902         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
4903     // Set the vector length to only the number of elements we care about. This
4904     // avoids sliding down elements we're going to discard straight away.
4905     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
4906     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
4907     SDValue Slidedown =
4908         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4909                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
4910     // Now we can use a cast-like subvector extract to get the result.
4911     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
4912                             DAG.getConstant(0, DL, XLenVT));
4913     return DAG.getBitcast(Op.getValueType(), Slidedown);
4914   }
4915 
4916   unsigned SubRegIdx, RemIdx;
4917   std::tie(SubRegIdx, RemIdx) =
4918       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
4919           VecVT, SubVecVT, OrigIdx, TRI);
4920 
4921   // If the Idx has been completely eliminated then this is a subvector extract
4922   // which naturally aligns to a vector register. These can easily be handled
4923   // using subregister manipulation.
4924   if (RemIdx == 0)
4925     return Op;
4926 
4927   // Else we must shift our vector register directly to extract the subvector.
4928   // Do this using VSLIDEDOWN.
4929 
4930   // If the vector type is an LMUL-group type, extract a subvector equal to the
4931   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
4932   // instruction.
4933   MVT InterSubVT = VecVT;
4934   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
4935     InterSubVT = getLMUL1VT(VecVT);
4936     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
4937                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
4938   }
4939 
4940   // Slide this vector register down by the desired number of elements in order
4941   // to place the desired subvector starting at element 0.
4942   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
4943   // For scalable vectors this must be further multiplied by vscale.
4944   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
4945 
4946   SDValue Mask, VL;
4947   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
4948   SDValue Slidedown =
4949       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
4950                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
4951 
4952   // Now the vector is in the right position, extract our final subvector. This
4953   // should resolve to a COPY.
4954   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
4955                           DAG.getConstant(0, DL, XLenVT));
4956 
4957   // We might have bitcast from a mask type: cast back to the original type if
4958   // required.
4959   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
4960 }
4961 
4962 // Lower step_vector to the vid instruction. Any non-identity step value must
4963 // be accounted for my manual expansion.
4964 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
4965                                               SelectionDAG &DAG) const {
4966   SDLoc DL(Op);
4967   MVT VT = Op.getSimpleValueType();
4968   MVT XLenVT = Subtarget.getXLenVT();
4969   SDValue Mask, VL;
4970   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
4971   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4972   uint64_t StepValImm = Op.getConstantOperandVal(0);
4973   if (StepValImm != 1) {
4974     if (isPowerOf2_64(StepValImm)) {
4975       SDValue StepVal =
4976           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
4977                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
4978       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
4979     } else {
4980       SDValue StepVal = lowerScalarSplat(
4981           DAG.getConstant(StepValImm, DL, VT.getVectorElementType()), VL, VT,
4982           DL, DAG, Subtarget);
4983       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
4984     }
4985   }
4986   return StepVec;
4987 }
4988 
4989 // Implement vector_reverse using vrgather.vv with indices determined by
4990 // subtracting the id of each element from (VLMAX-1). This will convert
4991 // the indices like so:
4992 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
4993 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
4994 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
4995                                                  SelectionDAG &DAG) const {
4996   SDLoc DL(Op);
4997   MVT VecVT = Op.getSimpleValueType();
4998   unsigned EltSize = VecVT.getScalarSizeInBits();
4999   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
5000 
5001   unsigned MaxVLMAX = 0;
5002   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
5003   if (VectorBitsMax != 0)
5004     MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
5005 
5006   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
5007   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
5008 
5009   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
5010   // to use vrgatherei16.vv.
5011   // TODO: It's also possible to use vrgatherei16.vv for other types to
5012   // decrease register width for the index calculation.
5013   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
5014     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
5015     // Reverse each half, then reassemble them in reverse order.
5016     // NOTE: It's also possible that after splitting that VLMAX no longer
5017     // requires vrgatherei16.vv.
5018     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
5019       SDValue Lo, Hi;
5020       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
5021       EVT LoVT, HiVT;
5022       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
5023       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
5024       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
5025       // Reassemble the low and high pieces reversed.
5026       // FIXME: This is a CONCAT_VECTORS.
5027       SDValue Res =
5028           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
5029                       DAG.getIntPtrConstant(0, DL));
5030       return DAG.getNode(
5031           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
5032           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
5033     }
5034 
5035     // Just promote the int type to i16 which will double the LMUL.
5036     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
5037     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
5038   }
5039 
5040   MVT XLenVT = Subtarget.getXLenVT();
5041   SDValue Mask, VL;
5042   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5043 
5044   // Calculate VLMAX-1 for the desired SEW.
5045   unsigned MinElts = VecVT.getVectorMinNumElements();
5046   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
5047                               DAG.getConstant(MinElts, DL, XLenVT));
5048   SDValue VLMinus1 =
5049       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
5050 
5051   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
5052   bool IsRV32E64 =
5053       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
5054   SDValue SplatVL;
5055   if (!IsRV32E64)
5056     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
5057   else
5058     SplatVL = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, IntVT, VLMinus1);
5059 
5060   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
5061   SDValue Indices =
5062       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
5063 
5064   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
5065 }
5066 
5067 SDValue
5068 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
5069                                                      SelectionDAG &DAG) const {
5070   SDLoc DL(Op);
5071   auto *Load = cast<LoadSDNode>(Op);
5072 
5073   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5074                                         Load->getMemoryVT(),
5075                                         *Load->getMemOperand()) &&
5076          "Expecting a correctly-aligned load");
5077 
5078   MVT VT = Op.getSimpleValueType();
5079   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5080 
5081   SDValue VL =
5082       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5083 
5084   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5085   SDValue NewLoad = DAG.getMemIntrinsicNode(
5086       RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL},
5087       Load->getMemoryVT(), Load->getMemOperand());
5088 
5089   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
5090   return DAG.getMergeValues({Result, Load->getChain()}, DL);
5091 }
5092 
5093 SDValue
5094 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
5095                                                       SelectionDAG &DAG) const {
5096   SDLoc DL(Op);
5097   auto *Store = cast<StoreSDNode>(Op);
5098 
5099   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5100                                         Store->getMemoryVT(),
5101                                         *Store->getMemOperand()) &&
5102          "Expecting a correctly-aligned store");
5103 
5104   SDValue StoreVal = Store->getValue();
5105   MVT VT = StoreVal.getSimpleValueType();
5106 
5107   // If the size less than a byte, we need to pad with zeros to make a byte.
5108   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
5109     VT = MVT::v8i1;
5110     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
5111                            DAG.getConstant(0, DL, VT), StoreVal,
5112                            DAG.getIntPtrConstant(0, DL));
5113   }
5114 
5115   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5116 
5117   SDValue VL =
5118       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5119 
5120   SDValue NewValue =
5121       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
5122   return DAG.getMemIntrinsicNode(
5123       RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other),
5124       {Store->getChain(), NewValue, Store->getBasePtr(), VL},
5125       Store->getMemoryVT(), Store->getMemOperand());
5126 }
5127 
5128 SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
5129                                              SelectionDAG &DAG) const {
5130   SDLoc DL(Op);
5131   MVT VT = Op.getSimpleValueType();
5132 
5133   const auto *MemSD = cast<MemSDNode>(Op);
5134   EVT MemVT = MemSD->getMemoryVT();
5135   MachineMemOperand *MMO = MemSD->getMemOperand();
5136   SDValue Chain = MemSD->getChain();
5137   SDValue BasePtr = MemSD->getBasePtr();
5138 
5139   SDValue Mask, PassThru, VL;
5140   if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) {
5141     Mask = VPLoad->getMask();
5142     PassThru = DAG.getUNDEF(VT);
5143     VL = VPLoad->getVectorLength();
5144   } else {
5145     const auto *MLoad = cast<MaskedLoadSDNode>(Op);
5146     Mask = MLoad->getMask();
5147     PassThru = MLoad->getPassThru();
5148   }
5149 
5150   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5151 
5152   MVT XLenVT = Subtarget.getXLenVT();
5153 
5154   MVT ContainerVT = VT;
5155   if (VT.isFixedLengthVector()) {
5156     ContainerVT = getContainerForFixedLengthVector(VT);
5157     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
5158     if (!IsUnmasked) {
5159       MVT MaskVT =
5160           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5161       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5162     }
5163   }
5164 
5165   if (!VL)
5166     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5167 
5168   unsigned IntID =
5169       IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
5170   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5171   if (!IsUnmasked)
5172     Ops.push_back(PassThru);
5173   Ops.push_back(BasePtr);
5174   if (!IsUnmasked)
5175     Ops.push_back(Mask);
5176   Ops.push_back(VL);
5177   if (!IsUnmasked)
5178     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
5179 
5180   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5181 
5182   SDValue Result =
5183       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
5184   Chain = Result.getValue(1);
5185 
5186   if (VT.isFixedLengthVector())
5187     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
5188 
5189   return DAG.getMergeValues({Result, Chain}, DL);
5190 }
5191 
5192 SDValue RISCVTargetLowering::lowerMaskedStore(SDValue Op,
5193                                               SelectionDAG &DAG) const {
5194   SDLoc DL(Op);
5195 
5196   const auto *MemSD = cast<MemSDNode>(Op);
5197   EVT MemVT = MemSD->getMemoryVT();
5198   MachineMemOperand *MMO = MemSD->getMemOperand();
5199   SDValue Chain = MemSD->getChain();
5200   SDValue BasePtr = MemSD->getBasePtr();
5201   SDValue Val, Mask, VL;
5202 
5203   if (const auto *VPStore = dyn_cast<VPStoreSDNode>(Op)) {
5204     Val = VPStore->getValue();
5205     Mask = VPStore->getMask();
5206     VL = VPStore->getVectorLength();
5207   } else {
5208     const auto *MStore = cast<MaskedStoreSDNode>(Op);
5209     Val = MStore->getValue();
5210     Mask = MStore->getMask();
5211   }
5212 
5213   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5214 
5215   MVT VT = Val.getSimpleValueType();
5216   MVT XLenVT = Subtarget.getXLenVT();
5217 
5218   MVT ContainerVT = VT;
5219   if (VT.isFixedLengthVector()) {
5220     ContainerVT = getContainerForFixedLengthVector(VT);
5221 
5222     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
5223     if (!IsUnmasked) {
5224       MVT MaskVT =
5225           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5226       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5227     }
5228   }
5229 
5230   if (!VL)
5231     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5232 
5233   unsigned IntID =
5234       IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask;
5235   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5236   Ops.push_back(Val);
5237   Ops.push_back(BasePtr);
5238   if (!IsUnmasked)
5239     Ops.push_back(Mask);
5240   Ops.push_back(VL);
5241 
5242   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
5243                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
5244 }
5245 
5246 SDValue
5247 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
5248                                                       SelectionDAG &DAG) const {
5249   MVT InVT = Op.getOperand(0).getSimpleValueType();
5250   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
5251 
5252   MVT VT = Op.getSimpleValueType();
5253 
5254   SDValue Op1 =
5255       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
5256   SDValue Op2 =
5257       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5258 
5259   SDLoc DL(Op);
5260   SDValue VL =
5261       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5262 
5263   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5264   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5265 
5266   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
5267                             Op.getOperand(2), Mask, VL);
5268 
5269   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
5270 }
5271 
5272 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
5273     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
5274   MVT VT = Op.getSimpleValueType();
5275 
5276   if (VT.getVectorElementType() == MVT::i1)
5277     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
5278 
5279   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
5280 }
5281 
5282 SDValue
5283 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op,
5284                                                       SelectionDAG &DAG) const {
5285   unsigned Opc;
5286   switch (Op.getOpcode()) {
5287   default: llvm_unreachable("Unexpected opcode!");
5288   case ISD::SHL: Opc = RISCVISD::SHL_VL; break;
5289   case ISD::SRA: Opc = RISCVISD::SRA_VL; break;
5290   case ISD::SRL: Opc = RISCVISD::SRL_VL; break;
5291   }
5292 
5293   return lowerToScalableOp(Op, DAG, Opc);
5294 }
5295 
5296 // Lower vector ABS to smax(X, sub(0, X)).
5297 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
5298   SDLoc DL(Op);
5299   MVT VT = Op.getSimpleValueType();
5300   SDValue X = Op.getOperand(0);
5301 
5302   assert(VT.isFixedLengthVector() && "Unexpected type");
5303 
5304   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5305   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
5306 
5307   SDValue Mask, VL;
5308   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5309 
5310   SDValue SplatZero =
5311       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
5312                   DAG.getConstant(0, DL, Subtarget.getXLenVT()));
5313   SDValue NegX =
5314       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
5315   SDValue Max =
5316       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
5317 
5318   return convertFromScalableVector(VT, Max, DAG, Subtarget);
5319 }
5320 
5321 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
5322     SDValue Op, SelectionDAG &DAG) const {
5323   SDLoc DL(Op);
5324   MVT VT = Op.getSimpleValueType();
5325   SDValue Mag = Op.getOperand(0);
5326   SDValue Sign = Op.getOperand(1);
5327   assert(Mag.getValueType() == Sign.getValueType() &&
5328          "Can only handle COPYSIGN with matching types.");
5329 
5330   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5331   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
5332   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
5333 
5334   SDValue Mask, VL;
5335   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5336 
5337   SDValue CopySign =
5338       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
5339 
5340   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
5341 }
5342 
5343 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
5344     SDValue Op, SelectionDAG &DAG) const {
5345   MVT VT = Op.getSimpleValueType();
5346   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5347 
5348   MVT I1ContainerVT =
5349       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5350 
5351   SDValue CC =
5352       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
5353   SDValue Op1 =
5354       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5355   SDValue Op2 =
5356       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
5357 
5358   SDLoc DL(Op);
5359   SDValue Mask, VL;
5360   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5361 
5362   SDValue Select =
5363       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
5364 
5365   return convertFromScalableVector(VT, Select, DAG, Subtarget);
5366 }
5367 
5368 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
5369                                                unsigned NewOpc,
5370                                                bool HasMask) const {
5371   MVT VT = Op.getSimpleValueType();
5372   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5373 
5374   // Create list of operands by converting existing ones to scalable types.
5375   SmallVector<SDValue, 6> Ops;
5376   for (const SDValue &V : Op->op_values()) {
5377     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
5378 
5379     // Pass through non-vector operands.
5380     if (!V.getValueType().isVector()) {
5381       Ops.push_back(V);
5382       continue;
5383     }
5384 
5385     // "cast" fixed length vector to a scalable vector.
5386     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
5387            "Only fixed length vectors are supported!");
5388     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
5389   }
5390 
5391   SDLoc DL(Op);
5392   SDValue Mask, VL;
5393   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5394   if (HasMask)
5395     Ops.push_back(Mask);
5396   Ops.push_back(VL);
5397 
5398   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
5399   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
5400 }
5401 
5402 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
5403 // * Operands of each node are assumed to be in the same order.
5404 // * The EVL operand is promoted from i32 to i64 on RV64.
5405 // * Fixed-length vectors are converted to their scalable-vector container
5406 //   types.
5407 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
5408                                        unsigned RISCVISDOpc) const {
5409   SDLoc DL(Op);
5410   MVT VT = Op.getSimpleValueType();
5411   SmallVector<SDValue, 4> Ops;
5412 
5413   for (const auto &OpIdx : enumerate(Op->ops())) {
5414     SDValue V = OpIdx.value();
5415     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
5416     // Pass through operands which aren't fixed-length vectors.
5417     if (!V.getValueType().isFixedLengthVector()) {
5418       Ops.push_back(V);
5419       continue;
5420     }
5421     // "cast" fixed length vector to a scalable vector.
5422     MVT OpVT = V.getSimpleValueType();
5423     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
5424     assert(useRVVForFixedLengthVectorVT(OpVT) &&
5425            "Only fixed length vectors are supported!");
5426     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
5427   }
5428 
5429   if (!VT.isFixedLengthVector())
5430     return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
5431 
5432   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5433 
5434   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
5435 
5436   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
5437 }
5438 
5439 SDValue RISCVTargetLowering::lowerLogicVPOp(SDValue Op, SelectionDAG &DAG,
5440                                             unsigned MaskOpc,
5441                                             unsigned VecOpc) const {
5442   MVT VT = Op.getSimpleValueType();
5443   if (VT.getVectorElementType() != MVT::i1)
5444     return lowerVPOp(Op, DAG, VecOpc);
5445 
5446   // It is safe to drop mask parameter as masked-off elements are undef.
5447   SDValue Op1 = Op->getOperand(0);
5448   SDValue Op2 = Op->getOperand(1);
5449   SDValue VL = Op->getOperand(3);
5450 
5451   MVT ContainerVT = VT;
5452   const bool IsFixed = VT.isFixedLengthVector();
5453   if (IsFixed) {
5454     ContainerVT = getContainerForFixedLengthVector(VT);
5455     Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
5456     Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
5457   }
5458 
5459   SDLoc DL(Op);
5460   SDValue Val = DAG.getNode(MaskOpc, DL, ContainerVT, Op1, Op2, VL);
5461   if (!IsFixed)
5462     return Val;
5463   return convertFromScalableVector(VT, Val, DAG, Subtarget);
5464 }
5465 
5466 // Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
5467 // matched to a RVV indexed load. The RVV indexed load instructions only
5468 // support the "unsigned unscaled" addressing mode; indices are implicitly
5469 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
5470 // signed or scaled indexing is extended to the XLEN value type and scaled
5471 // accordingly.
5472 SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
5473                                                SelectionDAG &DAG) const {
5474   SDLoc DL(Op);
5475   MVT VT = Op.getSimpleValueType();
5476 
5477   const auto *MemSD = cast<MemSDNode>(Op.getNode());
5478   EVT MemVT = MemSD->getMemoryVT();
5479   MachineMemOperand *MMO = MemSD->getMemOperand();
5480   SDValue Chain = MemSD->getChain();
5481   SDValue BasePtr = MemSD->getBasePtr();
5482 
5483   ISD::LoadExtType LoadExtType;
5484   SDValue Index, Mask, PassThru, VL;
5485 
5486   if (auto *VPGN = dyn_cast<VPGatherSDNode>(Op.getNode())) {
5487     Index = VPGN->getIndex();
5488     Mask = VPGN->getMask();
5489     PassThru = DAG.getUNDEF(VT);
5490     VL = VPGN->getVectorLength();
5491     // VP doesn't support extending loads.
5492     LoadExtType = ISD::NON_EXTLOAD;
5493   } else {
5494     // Else it must be a MGATHER.
5495     auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
5496     Index = MGN->getIndex();
5497     Mask = MGN->getMask();
5498     PassThru = MGN->getPassThru();
5499     LoadExtType = MGN->getExtensionType();
5500   }
5501 
5502   MVT IndexVT = Index.getSimpleValueType();
5503   MVT XLenVT = Subtarget.getXLenVT();
5504 
5505   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
5506          "Unexpected VTs!");
5507   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
5508   // Targets have to explicitly opt-in for extending vector loads.
5509   assert(LoadExtType == ISD::NON_EXTLOAD &&
5510          "Unexpected extending MGATHER/VP_GATHER");
5511   (void)LoadExtType;
5512 
5513   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
5514   // the selection of the masked intrinsics doesn't do this for us.
5515   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5516 
5517   MVT ContainerVT = VT;
5518   if (VT.isFixedLengthVector()) {
5519     // We need to use the larger of the result and index type to determine the
5520     // scalable type to use so we don't increase LMUL for any operand/result.
5521     if (VT.bitsGE(IndexVT)) {
5522       ContainerVT = getContainerForFixedLengthVector(VT);
5523       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
5524                                  ContainerVT.getVectorElementCount());
5525     } else {
5526       IndexVT = getContainerForFixedLengthVector(IndexVT);
5527       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
5528                                      IndexVT.getVectorElementCount());
5529     }
5530 
5531     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
5532 
5533     if (!IsUnmasked) {
5534       MVT MaskVT =
5535           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5536       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5537       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
5538     }
5539   }
5540 
5541   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
5542       IndexVT = IndexVT.changeVectorElementType(XLenVT);
5543       Index = DAG.getNode(ISD::TRUNCATE, DL, IndexVT, Index);
5544   }
5545 
5546   if (!VL)
5547     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5548 
5549   unsigned IntID =
5550       IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
5551   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5552   if (!IsUnmasked)
5553     Ops.push_back(PassThru);
5554   Ops.push_back(BasePtr);
5555   Ops.push_back(Index);
5556   if (!IsUnmasked)
5557     Ops.push_back(Mask);
5558   Ops.push_back(VL);
5559   if (!IsUnmasked)
5560     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
5561 
5562   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5563   SDValue Result =
5564       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
5565   Chain = Result.getValue(1);
5566 
5567   if (VT.isFixedLengthVector())
5568     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
5569 
5570   return DAG.getMergeValues({Result, Chain}, DL);
5571 }
5572 
5573 // Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be
5574 // matched to a RVV indexed store. The RVV indexed store instructions only
5575 // support the "unsigned unscaled" addressing mode; indices are implicitly
5576 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
5577 // signed or scaled indexing is extended to the XLEN value type and scaled
5578 // accordingly.
5579 SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
5580                                                 SelectionDAG &DAG) const {
5581   SDLoc DL(Op);
5582   const auto *MemSD = cast<MemSDNode>(Op.getNode());
5583   EVT MemVT = MemSD->getMemoryVT();
5584   MachineMemOperand *MMO = MemSD->getMemOperand();
5585   SDValue Chain = MemSD->getChain();
5586   SDValue BasePtr = MemSD->getBasePtr();
5587 
5588   bool IsTruncatingStore = false;
5589   SDValue Index, Mask, Val, VL;
5590 
5591   if (auto *VPSN = dyn_cast<VPScatterSDNode>(Op.getNode())) {
5592     Index = VPSN->getIndex();
5593     Mask = VPSN->getMask();
5594     Val = VPSN->getValue();
5595     VL = VPSN->getVectorLength();
5596     // VP doesn't support truncating stores.
5597     IsTruncatingStore = false;
5598   } else {
5599     // Else it must be a MSCATTER.
5600     auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
5601     Index = MSN->getIndex();
5602     Mask = MSN->getMask();
5603     Val = MSN->getValue();
5604     IsTruncatingStore = MSN->isTruncatingStore();
5605   }
5606 
5607   MVT VT = Val.getSimpleValueType();
5608   MVT IndexVT = Index.getSimpleValueType();
5609   MVT XLenVT = Subtarget.getXLenVT();
5610 
5611   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
5612          "Unexpected VTs!");
5613   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
5614   // Targets have to explicitly opt-in for extending vector loads and
5615   // truncating vector stores.
5616   assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER");
5617   (void)IsTruncatingStore;
5618 
5619   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
5620   // the selection of the masked intrinsics doesn't do this for us.
5621   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5622 
5623   MVT ContainerVT = VT;
5624   if (VT.isFixedLengthVector()) {
5625     // We need to use the larger of the value and index type to determine the
5626     // scalable type to use so we don't increase LMUL for any operand/result.
5627     if (VT.bitsGE(IndexVT)) {
5628       ContainerVT = getContainerForFixedLengthVector(VT);
5629       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
5630                                  ContainerVT.getVectorElementCount());
5631     } else {
5632       IndexVT = getContainerForFixedLengthVector(IndexVT);
5633       ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
5634                                      IndexVT.getVectorElementCount());
5635     }
5636 
5637     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
5638     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
5639 
5640     if (!IsUnmasked) {
5641       MVT MaskVT =
5642           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5643       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5644     }
5645   }
5646 
5647   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
5648       IndexVT = IndexVT.changeVectorElementType(XLenVT);
5649       Index = DAG.getNode(ISD::TRUNCATE, DL, IndexVT, Index);
5650   }
5651 
5652   if (!VL)
5653     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5654 
5655   unsigned IntID =
5656       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
5657   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5658   Ops.push_back(Val);
5659   Ops.push_back(BasePtr);
5660   Ops.push_back(Index);
5661   if (!IsUnmasked)
5662     Ops.push_back(Mask);
5663   Ops.push_back(VL);
5664 
5665   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
5666                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
5667 }
5668 
5669 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
5670                                                SelectionDAG &DAG) const {
5671   const MVT XLenVT = Subtarget.getXLenVT();
5672   SDLoc DL(Op);
5673   SDValue Chain = Op->getOperand(0);
5674   SDValue SysRegNo = DAG.getTargetConstant(
5675       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
5676   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
5677   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
5678 
5679   // Encoding used for rounding mode in RISCV differs from that used in
5680   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
5681   // table, which consists of a sequence of 4-bit fields, each representing
5682   // corresponding FLT_ROUNDS mode.
5683   static const int Table =
5684       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
5685       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
5686       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
5687       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
5688       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
5689 
5690   SDValue Shift =
5691       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
5692   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
5693                                 DAG.getConstant(Table, DL, XLenVT), Shift);
5694   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
5695                                DAG.getConstant(7, DL, XLenVT));
5696 
5697   return DAG.getMergeValues({Masked, Chain}, DL);
5698 }
5699 
5700 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
5701                                                SelectionDAG &DAG) const {
5702   const MVT XLenVT = Subtarget.getXLenVT();
5703   SDLoc DL(Op);
5704   SDValue Chain = Op->getOperand(0);
5705   SDValue RMValue = Op->getOperand(1);
5706   SDValue SysRegNo = DAG.getTargetConstant(
5707       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
5708 
5709   // Encoding used for rounding mode in RISCV differs from that used in
5710   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
5711   // a table, which consists of a sequence of 4-bit fields, each representing
5712   // corresponding RISCV mode.
5713   static const unsigned Table =
5714       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
5715       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
5716       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
5717       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
5718       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
5719 
5720   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
5721                               DAG.getConstant(2, DL, XLenVT));
5722   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
5723                                 DAG.getConstant(Table, DL, XLenVT), Shift);
5724   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
5725                         DAG.getConstant(0x7, DL, XLenVT));
5726   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
5727                      RMValue);
5728 }
5729 
5730 // Returns the opcode of the target-specific SDNode that implements the 32-bit
5731 // form of the given Opcode.
5732 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
5733   switch (Opcode) {
5734   default:
5735     llvm_unreachable("Unexpected opcode");
5736   case ISD::SHL:
5737     return RISCVISD::SLLW;
5738   case ISD::SRA:
5739     return RISCVISD::SRAW;
5740   case ISD::SRL:
5741     return RISCVISD::SRLW;
5742   case ISD::SDIV:
5743     return RISCVISD::DIVW;
5744   case ISD::UDIV:
5745     return RISCVISD::DIVUW;
5746   case ISD::UREM:
5747     return RISCVISD::REMUW;
5748   case ISD::ROTL:
5749     return RISCVISD::ROLW;
5750   case ISD::ROTR:
5751     return RISCVISD::RORW;
5752   case RISCVISD::GREV:
5753     return RISCVISD::GREVW;
5754   case RISCVISD::GORC:
5755     return RISCVISD::GORCW;
5756   }
5757 }
5758 
5759 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
5760 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
5761 // otherwise be promoted to i64, making it difficult to select the
5762 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of
5763 // type i8/i16/i32 is lost.
5764 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
5765                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
5766   SDLoc DL(N);
5767   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
5768   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
5769   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
5770   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
5771   // ReplaceNodeResults requires we maintain the same type for the return value.
5772   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
5773 }
5774 
5775 // Converts the given 32-bit operation to a i64 operation with signed extension
5776 // semantic to reduce the signed extension instructions.
5777 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
5778   SDLoc DL(N);
5779   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5780   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5781   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
5782   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
5783                                DAG.getValueType(MVT::i32));
5784   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
5785 }
5786 
5787 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
5788                                              SmallVectorImpl<SDValue> &Results,
5789                                              SelectionDAG &DAG) const {
5790   SDLoc DL(N);
5791   switch (N->getOpcode()) {
5792   default:
5793     llvm_unreachable("Don't know how to custom type legalize this operation!");
5794   case ISD::STRICT_FP_TO_SINT:
5795   case ISD::STRICT_FP_TO_UINT:
5796   case ISD::FP_TO_SINT:
5797   case ISD::FP_TO_UINT: {
5798     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5799            "Unexpected custom legalisation");
5800     bool IsStrict = N->isStrictFPOpcode();
5801     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
5802                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
5803     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
5804     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
5805         TargetLowering::TypeSoftenFloat) {
5806       if (!isTypeLegal(Op0.getValueType()))
5807         return;
5808       if (IsStrict) {
5809         unsigned Opc = IsSigned ? RISCVISD::STRICT_FCVT_W_RV64
5810                                 : RISCVISD::STRICT_FCVT_WU_RV64;
5811         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
5812         SDValue Res = DAG.getNode(
5813             Opc, DL, VTs, N->getOperand(0), Op0,
5814             DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
5815         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5816         Results.push_back(Res.getValue(1));
5817         return;
5818       }
5819       unsigned Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
5820       SDValue Res =
5821           DAG.getNode(Opc, DL, MVT::i64, Op0,
5822                       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
5823       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5824       return;
5825     }
5826     // If the FP type needs to be softened, emit a library call using the 'si'
5827     // version. If we left it to default legalization we'd end up with 'di'. If
5828     // the FP type doesn't need to be softened just let generic type
5829     // legalization promote the result type.
5830     RTLIB::Libcall LC;
5831     if (IsSigned)
5832       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
5833     else
5834       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
5835     MakeLibCallOptions CallOptions;
5836     EVT OpVT = Op0.getValueType();
5837     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
5838     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
5839     SDValue Result;
5840     std::tie(Result, Chain) =
5841         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
5842     Results.push_back(Result);
5843     if (IsStrict)
5844       Results.push_back(Chain);
5845     break;
5846   }
5847   case ISD::READCYCLECOUNTER: {
5848     assert(!Subtarget.is64Bit() &&
5849            "READCYCLECOUNTER only has custom type legalization on riscv32");
5850 
5851     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
5852     SDValue RCW =
5853         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
5854 
5855     Results.push_back(
5856         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
5857     Results.push_back(RCW.getValue(2));
5858     break;
5859   }
5860   case ISD::MUL: {
5861     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
5862     unsigned XLen = Subtarget.getXLen();
5863     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
5864     if (Size > XLen) {
5865       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
5866       SDValue LHS = N->getOperand(0);
5867       SDValue RHS = N->getOperand(1);
5868       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
5869 
5870       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
5871       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
5872       // We need exactly one side to be unsigned.
5873       if (LHSIsU == RHSIsU)
5874         return;
5875 
5876       auto MakeMULPair = [&](SDValue S, SDValue U) {
5877         MVT XLenVT = Subtarget.getXLenVT();
5878         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
5879         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
5880         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
5881         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
5882         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
5883       };
5884 
5885       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
5886       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
5887 
5888       // The other operand should be signed, but still prefer MULH when
5889       // possible.
5890       if (RHSIsU && LHSIsS && !RHSIsS)
5891         Results.push_back(MakeMULPair(LHS, RHS));
5892       else if (LHSIsU && RHSIsS && !LHSIsS)
5893         Results.push_back(MakeMULPair(RHS, LHS));
5894 
5895       return;
5896     }
5897     LLVM_FALLTHROUGH;
5898   }
5899   case ISD::ADD:
5900   case ISD::SUB:
5901     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5902            "Unexpected custom legalisation");
5903     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
5904     break;
5905   case ISD::SHL:
5906   case ISD::SRA:
5907   case ISD::SRL:
5908     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5909            "Unexpected custom legalisation");
5910     if (N->getOperand(1).getOpcode() != ISD::Constant) {
5911       Results.push_back(customLegalizeToWOp(N, DAG));
5912       break;
5913     }
5914 
5915     // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is
5916     // similar to customLegalizeToWOpWithSExt, but we must zero_extend the
5917     // shift amount.
5918     if (N->getOpcode() == ISD::SHL) {
5919       SDLoc DL(N);
5920       SDValue NewOp0 =
5921           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5922       SDValue NewOp1 =
5923           DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1));
5924       SDValue NewWOp = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0, NewOp1);
5925       SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
5926                                    DAG.getValueType(MVT::i32));
5927       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
5928     }
5929 
5930     break;
5931   case ISD::ROTL:
5932   case ISD::ROTR:
5933     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5934            "Unexpected custom legalisation");
5935     Results.push_back(customLegalizeToWOp(N, DAG));
5936     break;
5937   case ISD::CTTZ:
5938   case ISD::CTTZ_ZERO_UNDEF:
5939   case ISD::CTLZ:
5940   case ISD::CTLZ_ZERO_UNDEF: {
5941     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5942            "Unexpected custom legalisation");
5943 
5944     SDValue NewOp0 =
5945         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5946     bool IsCTZ =
5947         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
5948     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
5949     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
5950     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5951     return;
5952   }
5953   case ISD::SDIV:
5954   case ISD::UDIV:
5955   case ISD::UREM: {
5956     MVT VT = N->getSimpleValueType(0);
5957     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
5958            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
5959            "Unexpected custom legalisation");
5960     // Don't promote division/remainder by constant since we should expand those
5961     // to multiply by magic constant.
5962     // FIXME: What if the expansion is disabled for minsize.
5963     if (N->getOperand(1).getOpcode() == ISD::Constant)
5964       return;
5965 
5966     // If the input is i32, use ANY_EXTEND since the W instructions don't read
5967     // the upper 32 bits. For other types we need to sign or zero extend
5968     // based on the opcode.
5969     unsigned ExtOpc = ISD::ANY_EXTEND;
5970     if (VT != MVT::i32)
5971       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
5972                                            : ISD::ZERO_EXTEND;
5973 
5974     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
5975     break;
5976   }
5977   case ISD::UADDO:
5978   case ISD::USUBO: {
5979     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5980            "Unexpected custom legalisation");
5981     bool IsAdd = N->getOpcode() == ISD::UADDO;
5982     // Create an ADDW or SUBW.
5983     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5984     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5985     SDValue Res =
5986         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
5987     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
5988                       DAG.getValueType(MVT::i32));
5989 
5990     // Sign extend the LHS and perform an unsigned compare with the ADDW result.
5991     // Since the inputs are sign extended from i32, this is equivalent to
5992     // comparing the lower 32 bits.
5993     LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
5994     SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
5995                                     IsAdd ? ISD::SETULT : ISD::SETUGT);
5996 
5997     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5998     Results.push_back(Overflow);
5999     return;
6000   }
6001   case ISD::UADDSAT:
6002   case ISD::USUBSAT: {
6003     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6004            "Unexpected custom legalisation");
6005     if (Subtarget.hasStdExtZbb()) {
6006       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
6007       // sign extend allows overflow of the lower 32 bits to be detected on
6008       // the promoted size.
6009       SDValue LHS =
6010           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6011       SDValue RHS =
6012           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
6013       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
6014       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6015       return;
6016     }
6017 
6018     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
6019     // promotion for UADDO/USUBO.
6020     Results.push_back(expandAddSubSat(N, DAG));
6021     return;
6022   }
6023   case ISD::BITCAST: {
6024     EVT VT = N->getValueType(0);
6025     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
6026     SDValue Op0 = N->getOperand(0);
6027     EVT Op0VT = Op0.getValueType();
6028     MVT XLenVT = Subtarget.getXLenVT();
6029     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
6030       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
6031       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
6032     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
6033                Subtarget.hasStdExtF()) {
6034       SDValue FPConv =
6035           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
6036       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
6037     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
6038                isTypeLegal(Op0VT)) {
6039       // Custom-legalize bitcasts from fixed-length vector types to illegal
6040       // scalar types in order to improve codegen. Bitcast the vector to a
6041       // one-element vector type whose element type is the same as the result
6042       // type, and extract the first element.
6043       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
6044       if (isTypeLegal(BVT)) {
6045         SDValue BVec = DAG.getBitcast(BVT, Op0);
6046         Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
6047                                       DAG.getConstant(0, DL, XLenVT)));
6048       }
6049     }
6050     break;
6051   }
6052   case RISCVISD::GREV:
6053   case RISCVISD::GORC: {
6054     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6055            "Unexpected custom legalisation");
6056     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
6057     // This is similar to customLegalizeToWOp, except that we pass the second
6058     // operand (a TargetConstant) straight through: it is already of type
6059     // XLenVT.
6060     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
6061     SDValue NewOp0 =
6062         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6063     SDValue NewOp1 =
6064         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6065     SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
6066     // ReplaceNodeResults requires we maintain the same type for the return
6067     // value.
6068     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6069     break;
6070   }
6071   case RISCVISD::SHFL: {
6072     // There is no SHFLIW instruction, but we can just promote the operation.
6073     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6074            "Unexpected custom legalisation");
6075     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
6076     SDValue NewOp0 =
6077         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6078     SDValue NewOp1 =
6079         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6080     SDValue NewRes = DAG.getNode(RISCVISD::SHFL, DL, MVT::i64, NewOp0, NewOp1);
6081     // ReplaceNodeResults requires we maintain the same type for the return
6082     // value.
6083     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6084     break;
6085   }
6086   case ISD::BSWAP:
6087   case ISD::BITREVERSE: {
6088     MVT VT = N->getSimpleValueType(0);
6089     MVT XLenVT = Subtarget.getXLenVT();
6090     assert((VT == MVT::i8 || VT == MVT::i16 ||
6091             (VT == MVT::i32 && Subtarget.is64Bit())) &&
6092            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
6093     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
6094     unsigned Imm = VT.getSizeInBits() - 1;
6095     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
6096     if (N->getOpcode() == ISD::BSWAP)
6097       Imm &= ~0x7U;
6098     unsigned Opc = Subtarget.is64Bit() ? RISCVISD::GREVW : RISCVISD::GREV;
6099     SDValue GREVI =
6100         DAG.getNode(Opc, DL, XLenVT, NewOp0, DAG.getConstant(Imm, DL, XLenVT));
6101     // ReplaceNodeResults requires we maintain the same type for the return
6102     // value.
6103     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
6104     break;
6105   }
6106   case ISD::FSHL:
6107   case ISD::FSHR: {
6108     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6109            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
6110     SDValue NewOp0 =
6111         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6112     SDValue NewOp1 =
6113         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6114     SDValue NewOp2 =
6115         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6116     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
6117     // Mask the shift amount to 5 bits.
6118     NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
6119                          DAG.getConstant(0x1f, DL, MVT::i64));
6120     unsigned Opc =
6121         N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW;
6122     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2);
6123     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
6124     break;
6125   }
6126   case ISD::EXTRACT_VECTOR_ELT: {
6127     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
6128     // type is illegal (currently only vXi64 RV32).
6129     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
6130     // transferred to the destination register. We issue two of these from the
6131     // upper- and lower- halves of the SEW-bit vector element, slid down to the
6132     // first element.
6133     SDValue Vec = N->getOperand(0);
6134     SDValue Idx = N->getOperand(1);
6135 
6136     // The vector type hasn't been legalized yet so we can't issue target
6137     // specific nodes if it needs legalization.
6138     // FIXME: We would manually legalize if it's important.
6139     if (!isTypeLegal(Vec.getValueType()))
6140       return;
6141 
6142     MVT VecVT = Vec.getSimpleValueType();
6143 
6144     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
6145            VecVT.getVectorElementType() == MVT::i64 &&
6146            "Unexpected EXTRACT_VECTOR_ELT legalization");
6147 
6148     // If this is a fixed vector, we need to convert it to a scalable vector.
6149     MVT ContainerVT = VecVT;
6150     if (VecVT.isFixedLengthVector()) {
6151       ContainerVT = getContainerForFixedLengthVector(VecVT);
6152       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
6153     }
6154 
6155     MVT XLenVT = Subtarget.getXLenVT();
6156 
6157     // Use a VL of 1 to avoid processing more elements than we need.
6158     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6159     SDValue VL = DAG.getConstant(1, DL, XLenVT);
6160     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
6161 
6162     // Unless the index is known to be 0, we must slide the vector down to get
6163     // the desired element into index 0.
6164     if (!isNullConstant(Idx)) {
6165       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
6166                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
6167     }
6168 
6169     // Extract the lower XLEN bits of the correct vector element.
6170     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
6171 
6172     // To extract the upper XLEN bits of the vector element, shift the first
6173     // element right by 32 bits and re-extract the lower XLEN bits.
6174     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
6175                                      DAG.getConstant(32, DL, XLenVT), VL);
6176     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
6177                                  ThirtyTwoV, Mask, VL);
6178 
6179     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
6180 
6181     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
6182     break;
6183   }
6184   case ISD::INTRINSIC_WO_CHAIN: {
6185     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
6186     switch (IntNo) {
6187     default:
6188       llvm_unreachable(
6189           "Don't know how to custom type legalize this intrinsic!");
6190     case Intrinsic::riscv_orc_b: {
6191       // Lower to the GORCI encoding for orc.b with the operand extended.
6192       SDValue NewOp =
6193           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6194       // If Zbp is enabled, use GORCIW which will sign extend the result.
6195       unsigned Opc =
6196           Subtarget.hasStdExtZbp() ? RISCVISD::GORCW : RISCVISD::GORC;
6197       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp,
6198                                 DAG.getConstant(7, DL, MVT::i64));
6199       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6200       return;
6201     }
6202     case Intrinsic::riscv_grev:
6203     case Intrinsic::riscv_gorc: {
6204       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6205              "Unexpected custom legalisation");
6206       SDValue NewOp1 =
6207           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6208       SDValue NewOp2 =
6209           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6210       unsigned Opc =
6211           IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW;
6212       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
6213       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6214       break;
6215     }
6216     case Intrinsic::riscv_shfl:
6217     case Intrinsic::riscv_unshfl: {
6218       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6219              "Unexpected custom legalisation");
6220       SDValue NewOp1 =
6221           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6222       SDValue NewOp2 =
6223           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6224       unsigned Opc =
6225           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
6226       if (isa<ConstantSDNode>(N->getOperand(2))) {
6227         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
6228                              DAG.getConstant(0xf, DL, MVT::i64));
6229         Opc =
6230             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
6231       }
6232       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
6233       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6234       break;
6235     }
6236     case Intrinsic::riscv_bcompress:
6237     case Intrinsic::riscv_bdecompress: {
6238       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6239              "Unexpected custom legalisation");
6240       SDValue NewOp1 =
6241           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6242       SDValue NewOp2 =
6243           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6244       unsigned Opc = IntNo == Intrinsic::riscv_bcompress
6245                          ? RISCVISD::BCOMPRESSW
6246                          : RISCVISD::BDECOMPRESSW;
6247       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
6248       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6249       break;
6250     }
6251     case Intrinsic::riscv_vmv_x_s: {
6252       EVT VT = N->getValueType(0);
6253       MVT XLenVT = Subtarget.getXLenVT();
6254       if (VT.bitsLT(XLenVT)) {
6255         // Simple case just extract using vmv.x.s and truncate.
6256         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
6257                                       Subtarget.getXLenVT(), N->getOperand(1));
6258         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
6259         return;
6260       }
6261 
6262       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
6263              "Unexpected custom legalization");
6264 
6265       // We need to do the move in two steps.
6266       SDValue Vec = N->getOperand(1);
6267       MVT VecVT = Vec.getSimpleValueType();
6268 
6269       // First extract the lower XLEN bits of the element.
6270       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
6271 
6272       // To extract the upper XLEN bits of the vector element, shift the first
6273       // element right by 32 bits and re-extract the lower XLEN bits.
6274       SDValue VL = DAG.getConstant(1, DL, XLenVT);
6275       MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
6276       SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
6277       SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT,
6278                                        DAG.getConstant(32, DL, XLenVT), VL);
6279       SDValue LShr32 =
6280           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
6281       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
6282 
6283       Results.push_back(
6284           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
6285       break;
6286     }
6287     }
6288     break;
6289   }
6290   case ISD::VECREDUCE_ADD:
6291   case ISD::VECREDUCE_AND:
6292   case ISD::VECREDUCE_OR:
6293   case ISD::VECREDUCE_XOR:
6294   case ISD::VECREDUCE_SMAX:
6295   case ISD::VECREDUCE_UMAX:
6296   case ISD::VECREDUCE_SMIN:
6297   case ISD::VECREDUCE_UMIN:
6298     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
6299       Results.push_back(V);
6300     break;
6301   case ISD::VP_REDUCE_ADD:
6302   case ISD::VP_REDUCE_AND:
6303   case ISD::VP_REDUCE_OR:
6304   case ISD::VP_REDUCE_XOR:
6305   case ISD::VP_REDUCE_SMAX:
6306   case ISD::VP_REDUCE_UMAX:
6307   case ISD::VP_REDUCE_SMIN:
6308   case ISD::VP_REDUCE_UMIN:
6309     if (SDValue V = lowerVPREDUCE(SDValue(N, 0), DAG))
6310       Results.push_back(V);
6311     break;
6312   case ISD::FLT_ROUNDS_: {
6313     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
6314     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
6315     Results.push_back(Res.getValue(0));
6316     Results.push_back(Res.getValue(1));
6317     break;
6318   }
6319   }
6320 }
6321 
6322 // A structure to hold one of the bit-manipulation patterns below. Together, a
6323 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
6324 //   (or (and (shl x, 1), 0xAAAAAAAA),
6325 //       (and (srl x, 1), 0x55555555))
6326 struct RISCVBitmanipPat {
6327   SDValue Op;
6328   unsigned ShAmt;
6329   bool IsSHL;
6330 
6331   bool formsPairWith(const RISCVBitmanipPat &Other) const {
6332     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
6333   }
6334 };
6335 
6336 // Matches patterns of the form
6337 //   (and (shl x, C2), (C1 << C2))
6338 //   (and (srl x, C2), C1)
6339 //   (shl (and x, C1), C2)
6340 //   (srl (and x, (C1 << C2)), C2)
6341 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
6342 // The expected masks for each shift amount are specified in BitmanipMasks where
6343 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
6344 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
6345 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
6346 // XLen is 64.
6347 static Optional<RISCVBitmanipPat>
6348 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
6349   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
6350          "Unexpected number of masks");
6351   Optional<uint64_t> Mask;
6352   // Optionally consume a mask around the shift operation.
6353   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
6354     Mask = Op.getConstantOperandVal(1);
6355     Op = Op.getOperand(0);
6356   }
6357   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
6358     return None;
6359   bool IsSHL = Op.getOpcode() == ISD::SHL;
6360 
6361   if (!isa<ConstantSDNode>(Op.getOperand(1)))
6362     return None;
6363   uint64_t ShAmt = Op.getConstantOperandVal(1);
6364 
6365   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
6366   if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
6367     return None;
6368   // If we don't have enough masks for 64 bit, then we must be trying to
6369   // match SHFL so we're only allowed to shift 1/4 of the width.
6370   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
6371     return None;
6372 
6373   SDValue Src = Op.getOperand(0);
6374 
6375   // The expected mask is shifted left when the AND is found around SHL
6376   // patterns.
6377   //   ((x >> 1) & 0x55555555)
6378   //   ((x << 1) & 0xAAAAAAAA)
6379   bool SHLExpMask = IsSHL;
6380 
6381   if (!Mask) {
6382     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
6383     // the mask is all ones: consume that now.
6384     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
6385       Mask = Src.getConstantOperandVal(1);
6386       Src = Src.getOperand(0);
6387       // The expected mask is now in fact shifted left for SRL, so reverse the
6388       // decision.
6389       //   ((x & 0xAAAAAAAA) >> 1)
6390       //   ((x & 0x55555555) << 1)
6391       SHLExpMask = !SHLExpMask;
6392     } else {
6393       // Use a default shifted mask of all-ones if there's no AND, truncated
6394       // down to the expected width. This simplifies the logic later on.
6395       Mask = maskTrailingOnes<uint64_t>(Width);
6396       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
6397     }
6398   }
6399 
6400   unsigned MaskIdx = Log2_32(ShAmt);
6401   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
6402 
6403   if (SHLExpMask)
6404     ExpMask <<= ShAmt;
6405 
6406   if (Mask != ExpMask)
6407     return None;
6408 
6409   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
6410 }
6411 
6412 // Matches any of the following bit-manipulation patterns:
6413 //   (and (shl x, 1), (0x55555555 << 1))
6414 //   (and (srl x, 1), 0x55555555)
6415 //   (shl (and x, 0x55555555), 1)
6416 //   (srl (and x, (0x55555555 << 1)), 1)
6417 // where the shift amount and mask may vary thus:
6418 //   [1]  = 0x55555555 / 0xAAAAAAAA
6419 //   [2]  = 0x33333333 / 0xCCCCCCCC
6420 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
6421 //   [8]  = 0x00FF00FF / 0xFF00FF00
6422 //   [16] = 0x0000FFFF / 0xFFFFFFFF
6423 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
6424 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
6425   // These are the unshifted masks which we use to match bit-manipulation
6426   // patterns. They may be shifted left in certain circumstances.
6427   static const uint64_t BitmanipMasks[] = {
6428       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
6429       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
6430 
6431   return matchRISCVBitmanipPat(Op, BitmanipMasks);
6432 }
6433 
6434 // Match the following pattern as a GREVI(W) operation
6435 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
6436 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
6437                                const RISCVSubtarget &Subtarget) {
6438   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
6439   EVT VT = Op.getValueType();
6440 
6441   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
6442     auto LHS = matchGREVIPat(Op.getOperand(0));
6443     auto RHS = matchGREVIPat(Op.getOperand(1));
6444     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
6445       SDLoc DL(Op);
6446       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
6447                          DAG.getConstant(LHS->ShAmt, DL, VT));
6448     }
6449   }
6450   return SDValue();
6451 }
6452 
6453 // Matches any the following pattern as a GORCI(W) operation
6454 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
6455 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
6456 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
6457 // Note that with the variant of 3.,
6458 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
6459 // the inner pattern will first be matched as GREVI and then the outer
6460 // pattern will be matched to GORC via the first rule above.
6461 // 4.  (or (rotl/rotr x, bitwidth/2), x)
6462 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
6463                                const RISCVSubtarget &Subtarget) {
6464   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
6465   EVT VT = Op.getValueType();
6466 
6467   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
6468     SDLoc DL(Op);
6469     SDValue Op0 = Op.getOperand(0);
6470     SDValue Op1 = Op.getOperand(1);
6471 
6472     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
6473       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
6474           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
6475           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
6476         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
6477       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
6478       if ((Reverse.getOpcode() == ISD::ROTL ||
6479            Reverse.getOpcode() == ISD::ROTR) &&
6480           Reverse.getOperand(0) == X &&
6481           isa<ConstantSDNode>(Reverse.getOperand(1))) {
6482         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
6483         if (RotAmt == (VT.getSizeInBits() / 2))
6484           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
6485                              DAG.getConstant(RotAmt, DL, VT));
6486       }
6487       return SDValue();
6488     };
6489 
6490     // Check for either commutable permutation of (or (GREVI x, shamt), x)
6491     if (SDValue V = MatchOROfReverse(Op0, Op1))
6492       return V;
6493     if (SDValue V = MatchOROfReverse(Op1, Op0))
6494       return V;
6495 
6496     // OR is commutable so canonicalize its OR operand to the left
6497     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
6498       std::swap(Op0, Op1);
6499     if (Op0.getOpcode() != ISD::OR)
6500       return SDValue();
6501     SDValue OrOp0 = Op0.getOperand(0);
6502     SDValue OrOp1 = Op0.getOperand(1);
6503     auto LHS = matchGREVIPat(OrOp0);
6504     // OR is commutable so swap the operands and try again: x might have been
6505     // on the left
6506     if (!LHS) {
6507       std::swap(OrOp0, OrOp1);
6508       LHS = matchGREVIPat(OrOp0);
6509     }
6510     auto RHS = matchGREVIPat(Op1);
6511     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
6512       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
6513                          DAG.getConstant(LHS->ShAmt, DL, VT));
6514     }
6515   }
6516   return SDValue();
6517 }
6518 
6519 // Matches any of the following bit-manipulation patterns:
6520 //   (and (shl x, 1), (0x22222222 << 1))
6521 //   (and (srl x, 1), 0x22222222)
6522 //   (shl (and x, 0x22222222), 1)
6523 //   (srl (and x, (0x22222222 << 1)), 1)
6524 // where the shift amount and mask may vary thus:
6525 //   [1]  = 0x22222222 / 0x44444444
6526 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
6527 //   [4]  = 0x00F000F0 / 0x0F000F00
6528 //   [8]  = 0x0000FF00 / 0x00FF0000
6529 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
6530 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
6531   // These are the unshifted masks which we use to match bit-manipulation
6532   // patterns. They may be shifted left in certain circumstances.
6533   static const uint64_t BitmanipMasks[] = {
6534       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
6535       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
6536 
6537   return matchRISCVBitmanipPat(Op, BitmanipMasks);
6538 }
6539 
6540 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
6541 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
6542                                const RISCVSubtarget &Subtarget) {
6543   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
6544   EVT VT = Op.getValueType();
6545 
6546   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
6547     return SDValue();
6548 
6549   SDValue Op0 = Op.getOperand(0);
6550   SDValue Op1 = Op.getOperand(1);
6551 
6552   // Or is commutable so canonicalize the second OR to the LHS.
6553   if (Op0.getOpcode() != ISD::OR)
6554     std::swap(Op0, Op1);
6555   if (Op0.getOpcode() != ISD::OR)
6556     return SDValue();
6557 
6558   // We found an inner OR, so our operands are the operands of the inner OR
6559   // and the other operand of the outer OR.
6560   SDValue A = Op0.getOperand(0);
6561   SDValue B = Op0.getOperand(1);
6562   SDValue C = Op1;
6563 
6564   auto Match1 = matchSHFLPat(A);
6565   auto Match2 = matchSHFLPat(B);
6566 
6567   // If neither matched, we failed.
6568   if (!Match1 && !Match2)
6569     return SDValue();
6570 
6571   // We had at least one match. if one failed, try the remaining C operand.
6572   if (!Match1) {
6573     std::swap(A, C);
6574     Match1 = matchSHFLPat(A);
6575     if (!Match1)
6576       return SDValue();
6577   } else if (!Match2) {
6578     std::swap(B, C);
6579     Match2 = matchSHFLPat(B);
6580     if (!Match2)
6581       return SDValue();
6582   }
6583   assert(Match1 && Match2);
6584 
6585   // Make sure our matches pair up.
6586   if (!Match1->formsPairWith(*Match2))
6587     return SDValue();
6588 
6589   // All the remains is to make sure C is an AND with the same input, that masks
6590   // out the bits that are being shuffled.
6591   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
6592       C.getOperand(0) != Match1->Op)
6593     return SDValue();
6594 
6595   uint64_t Mask = C.getConstantOperandVal(1);
6596 
6597   static const uint64_t BitmanipMasks[] = {
6598       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
6599       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
6600   };
6601 
6602   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
6603   unsigned MaskIdx = Log2_32(Match1->ShAmt);
6604   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
6605 
6606   if (Mask != ExpMask)
6607     return SDValue();
6608 
6609   SDLoc DL(Op);
6610   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
6611                      DAG.getConstant(Match1->ShAmt, DL, VT));
6612 }
6613 
6614 // Optimize (add (shl x, c0), (shl y, c1)) ->
6615 //          (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
6616 static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
6617                                   const RISCVSubtarget &Subtarget) {
6618   // Perform this optimization only in the zba extension.
6619   if (!Subtarget.hasStdExtZba())
6620     return SDValue();
6621 
6622   // Skip for vector types and larger types.
6623   EVT VT = N->getValueType(0);
6624   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
6625     return SDValue();
6626 
6627   // The two operand nodes must be SHL and have no other use.
6628   SDValue N0 = N->getOperand(0);
6629   SDValue N1 = N->getOperand(1);
6630   if (N0->getOpcode() != ISD::SHL || N1->getOpcode() != ISD::SHL ||
6631       !N0->hasOneUse() || !N1->hasOneUse())
6632     return SDValue();
6633 
6634   // Check c0 and c1.
6635   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
6636   auto *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(1));
6637   if (!N0C || !N1C)
6638     return SDValue();
6639   int64_t C0 = N0C->getSExtValue();
6640   int64_t C1 = N1C->getSExtValue();
6641   if (C0 <= 0 || C1 <= 0)
6642     return SDValue();
6643 
6644   // Skip if SH1ADD/SH2ADD/SH3ADD are not applicable.
6645   int64_t Bits = std::min(C0, C1);
6646   int64_t Diff = std::abs(C0 - C1);
6647   if (Diff != 1 && Diff != 2 && Diff != 3)
6648     return SDValue();
6649 
6650   // Build nodes.
6651   SDLoc DL(N);
6652   SDValue NS = (C0 < C1) ? N0->getOperand(0) : N1->getOperand(0);
6653   SDValue NL = (C0 > C1) ? N0->getOperand(0) : N1->getOperand(0);
6654   SDValue NA0 =
6655       DAG.getNode(ISD::SHL, DL, VT, NL, DAG.getConstant(Diff, DL, VT));
6656   SDValue NA1 = DAG.getNode(ISD::ADD, DL, VT, NA0, NS);
6657   return DAG.getNode(ISD::SHL, DL, VT, NA1, DAG.getConstant(Bits, DL, VT));
6658 }
6659 
6660 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
6661 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
6662 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
6663 // not undo itself, but they are redundant.
6664 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
6665   SDValue Src = N->getOperand(0);
6666 
6667   if (Src.getOpcode() != N->getOpcode())
6668     return SDValue();
6669 
6670   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
6671       !isa<ConstantSDNode>(Src.getOperand(1)))
6672     return SDValue();
6673 
6674   unsigned ShAmt1 = N->getConstantOperandVal(1);
6675   unsigned ShAmt2 = Src.getConstantOperandVal(1);
6676   Src = Src.getOperand(0);
6677 
6678   unsigned CombinedShAmt;
6679   if (N->getOpcode() == RISCVISD::GORC || N->getOpcode() == RISCVISD::GORCW)
6680     CombinedShAmt = ShAmt1 | ShAmt2;
6681   else
6682     CombinedShAmt = ShAmt1 ^ ShAmt2;
6683 
6684   if (CombinedShAmt == 0)
6685     return Src;
6686 
6687   SDLoc DL(N);
6688   return DAG.getNode(
6689       N->getOpcode(), DL, N->getValueType(0), Src,
6690       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
6691 }
6692 
6693 // Combine a constant select operand into its use:
6694 //
6695 // (and (select cond, -1, c), x)
6696 //   -> (select cond, x, (and x, c))  [AllOnes=1]
6697 // (or  (select cond, 0, c), x)
6698 //   -> (select cond, x, (or x, c))  [AllOnes=0]
6699 // (xor (select cond, 0, c), x)
6700 //   -> (select cond, x, (xor x, c))  [AllOnes=0]
6701 // (add (select cond, 0, c), x)
6702 //   -> (select cond, x, (add x, c))  [AllOnes=0]
6703 // (sub x, (select cond, 0, c))
6704 //   -> (select cond, x, (sub x, c))  [AllOnes=0]
6705 static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
6706                                    SelectionDAG &DAG, bool AllOnes) {
6707   EVT VT = N->getValueType(0);
6708 
6709   // Skip vectors.
6710   if (VT.isVector())
6711     return SDValue();
6712 
6713   if ((Slct.getOpcode() != ISD::SELECT &&
6714        Slct.getOpcode() != RISCVISD::SELECT_CC) ||
6715       !Slct.hasOneUse())
6716     return SDValue();
6717 
6718   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
6719     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
6720   };
6721 
6722   bool SwapSelectOps;
6723   unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0;
6724   SDValue TrueVal = Slct.getOperand(1 + OpOffset);
6725   SDValue FalseVal = Slct.getOperand(2 + OpOffset);
6726   SDValue NonConstantVal;
6727   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
6728     SwapSelectOps = false;
6729     NonConstantVal = FalseVal;
6730   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
6731     SwapSelectOps = true;
6732     NonConstantVal = TrueVal;
6733   } else
6734     return SDValue();
6735 
6736   // Slct is now know to be the desired identity constant when CC is true.
6737   TrueVal = OtherOp;
6738   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
6739   // Unless SwapSelectOps says the condition should be false.
6740   if (SwapSelectOps)
6741     std::swap(TrueVal, FalseVal);
6742 
6743   if (Slct.getOpcode() == RISCVISD::SELECT_CC)
6744     return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
6745                        {Slct.getOperand(0), Slct.getOperand(1),
6746                         Slct.getOperand(2), TrueVal, FalseVal});
6747 
6748   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
6749                      {Slct.getOperand(0), TrueVal, FalseVal});
6750 }
6751 
6752 // Attempt combineSelectAndUse on each operand of a commutative operator N.
6753 static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
6754                                               bool AllOnes) {
6755   SDValue N0 = N->getOperand(0);
6756   SDValue N1 = N->getOperand(1);
6757   if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes))
6758     return Result;
6759   if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes))
6760     return Result;
6761   return SDValue();
6762 }
6763 
6764 // Transform (add (mul x, c0), c1) ->
6765 //           (add (mul (add x, c1/c0), c0), c1%c0).
6766 // if c1/c0 and c1%c0 are simm12, while c1 is not. A special corner case
6767 // that should be excluded is when c0*(c1/c0) is simm12, which will lead
6768 // to an infinite loop in DAGCombine if transformed.
6769 // Or transform (add (mul x, c0), c1) ->
6770 //              (add (mul (add x, c1/c0+1), c0), c1%c0-c0),
6771 // if c1/c0+1 and c1%c0-c0 are simm12, while c1 is not. A special corner
6772 // case that should be excluded is when c0*(c1/c0+1) is simm12, which will
6773 // lead to an infinite loop in DAGCombine if transformed.
6774 // Or transform (add (mul x, c0), c1) ->
6775 //              (add (mul (add x, c1/c0-1), c0), c1%c0+c0),
6776 // if c1/c0-1 and c1%c0+c0 are simm12, while c1 is not. A special corner
6777 // case that should be excluded is when c0*(c1/c0-1) is simm12, which will
6778 // lead to an infinite loop in DAGCombine if transformed.
6779 // Or transform (add (mul x, c0), c1) ->
6780 //              (mul (add x, c1/c0), c0).
6781 // if c1%c0 is zero, and c1/c0 is simm12 while c1 is not.
6782 static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG,
6783                                      const RISCVSubtarget &Subtarget) {
6784   // Skip for vector types and larger types.
6785   EVT VT = N->getValueType(0);
6786   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
6787     return SDValue();
6788   // The first operand node must be a MUL and has no other use.
6789   SDValue N0 = N->getOperand(0);
6790   if (!N0->hasOneUse() || N0->getOpcode() != ISD::MUL)
6791     return SDValue();
6792   // Check if c0 and c1 match above conditions.
6793   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
6794   auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
6795   if (!N0C || !N1C)
6796     return SDValue();
6797   int64_t C0 = N0C->getSExtValue();
6798   int64_t C1 = N1C->getSExtValue();
6799   int64_t CA, CB;
6800   if (C0 == -1 || C0 == 0 || C0 == 1 || isInt<12>(C1))
6801     return SDValue();
6802   // Search for proper CA (non-zero) and CB that both are simm12.
6803   if ((C1 / C0) != 0 && isInt<12>(C1 / C0) && isInt<12>(C1 % C0) &&
6804       !isInt<12>(C0 * (C1 / C0))) {
6805     CA = C1 / C0;
6806     CB = C1 % C0;
6807   } else if ((C1 / C0 + 1) != 0 && isInt<12>(C1 / C0 + 1) &&
6808              isInt<12>(C1 % C0 - C0) && !isInt<12>(C0 * (C1 / C0 + 1))) {
6809     CA = C1 / C0 + 1;
6810     CB = C1 % C0 - C0;
6811   } else if ((C1 / C0 - 1) != 0 && isInt<12>(C1 / C0 - 1) &&
6812              isInt<12>(C1 % C0 + C0) && !isInt<12>(C0 * (C1 / C0 - 1))) {
6813     CA = C1 / C0 - 1;
6814     CB = C1 % C0 + C0;
6815   } else
6816     return SDValue();
6817   // Build new nodes (add (mul (add x, c1/c0), c0), c1%c0).
6818   SDLoc DL(N);
6819   SDValue New0 = DAG.getNode(ISD::ADD, DL, VT, N0->getOperand(0),
6820                              DAG.getConstant(CA, DL, VT));
6821   SDValue New1 =
6822       DAG.getNode(ISD::MUL, DL, VT, New0, DAG.getConstant(C0, DL, VT));
6823   return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(CB, DL, VT));
6824 }
6825 
6826 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
6827                                  const RISCVSubtarget &Subtarget) {
6828   if (SDValue V = transformAddImmMulImm(N, DAG, Subtarget))
6829     return V;
6830   if (SDValue V = transformAddShlImm(N, DAG, Subtarget))
6831     return V;
6832   // fold (add (select lhs, rhs, cc, 0, y), x) ->
6833   //      (select lhs, rhs, cc, x, (add x, y))
6834   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
6835 }
6836 
6837 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
6838   // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
6839   //      (select lhs, rhs, cc, x, (sub x, y))
6840   SDValue N0 = N->getOperand(0);
6841   SDValue N1 = N->getOperand(1);
6842   return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false);
6843 }
6844 
6845 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG) {
6846   // fold (and (select lhs, rhs, cc, -1, y), x) ->
6847   //      (select lhs, rhs, cc, x, (and x, y))
6848   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true);
6849 }
6850 
6851 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
6852                                 const RISCVSubtarget &Subtarget) {
6853   if (Subtarget.hasStdExtZbp()) {
6854     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
6855       return GREV;
6856     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
6857       return GORC;
6858     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
6859       return SHFL;
6860   }
6861 
6862   // fold (or (select cond, 0, y), x) ->
6863   //      (select cond, x, (or x, y))
6864   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
6865 }
6866 
6867 static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) {
6868   // fold (xor (select cond, 0, y), x) ->
6869   //      (select cond, x, (xor x, y))
6870   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
6871 }
6872 
6873 // Attempt to turn ANY_EXTEND into SIGN_EXTEND if the input to the ANY_EXTEND
6874 // has users that require SIGN_EXTEND and the SIGN_EXTEND can be done for free
6875 // by an instruction like ADDW/SUBW/MULW. Without this the ANY_EXTEND would be
6876 // removed during type legalization leaving an ADD/SUB/MUL use that won't use
6877 // ADDW/SUBW/MULW.
6878 static SDValue performANY_EXTENDCombine(SDNode *N,
6879                                         TargetLowering::DAGCombinerInfo &DCI,
6880                                         const RISCVSubtarget &Subtarget) {
6881   if (!Subtarget.is64Bit())
6882     return SDValue();
6883 
6884   SelectionDAG &DAG = DCI.DAG;
6885 
6886   SDValue Src = N->getOperand(0);
6887   EVT VT = N->getValueType(0);
6888   if (VT != MVT::i64 || Src.getValueType() != MVT::i32)
6889     return SDValue();
6890 
6891   // The opcode must be one that can implicitly sign_extend.
6892   // FIXME: Additional opcodes.
6893   switch (Src.getOpcode()) {
6894   default:
6895     return SDValue();
6896   case ISD::MUL:
6897     if (!Subtarget.hasStdExtM())
6898       return SDValue();
6899     LLVM_FALLTHROUGH;
6900   case ISD::ADD:
6901   case ISD::SUB:
6902     break;
6903   }
6904 
6905   // Only handle cases where the result is used by a CopyToReg. That likely
6906   // means the value is a liveout of the basic block. This helps prevent
6907   // infinite combine loops like PR51206.
6908   if (none_of(N->uses(),
6909               [](SDNode *User) { return User->getOpcode() == ISD::CopyToReg; }))
6910     return SDValue();
6911 
6912   SmallVector<SDNode *, 4> SetCCs;
6913   for (SDNode::use_iterator UI = Src.getNode()->use_begin(),
6914                             UE = Src.getNode()->use_end();
6915        UI != UE; ++UI) {
6916     SDNode *User = *UI;
6917     if (User == N)
6918       continue;
6919     if (UI.getUse().getResNo() != Src.getResNo())
6920       continue;
6921     // All i32 setccs are legalized by sign extending operands.
6922     if (User->getOpcode() == ISD::SETCC) {
6923       SetCCs.push_back(User);
6924       continue;
6925     }
6926     // We don't know if we can extend this user.
6927     break;
6928   }
6929 
6930   // If we don't have any SetCCs, this isn't worthwhile.
6931   if (SetCCs.empty())
6932     return SDValue();
6933 
6934   SDLoc DL(N);
6935   SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Src);
6936   DCI.CombineTo(N, SExt);
6937 
6938   // Promote all the setccs.
6939   for (SDNode *SetCC : SetCCs) {
6940     SmallVector<SDValue, 4> Ops;
6941 
6942     for (unsigned j = 0; j != 2; ++j) {
6943       SDValue SOp = SetCC->getOperand(j);
6944       if (SOp == Src)
6945         Ops.push_back(SExt);
6946       else
6947         Ops.push_back(DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, SOp));
6948     }
6949 
6950     Ops.push_back(SetCC->getOperand(2));
6951     DCI.CombineTo(SetCC,
6952                   DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops));
6953   }
6954   return SDValue(N, 0);
6955 }
6956 
6957 // Try to form VWMUL or VWMULU.
6958 // FIXME: Support VWMULSU.
6959 static SDValue combineMUL_VLToVWMUL(SDNode *N, SDValue Op0, SDValue Op1,
6960                                     SelectionDAG &DAG) {
6961   assert(N->getOpcode() == RISCVISD::MUL_VL && "Unexpected opcode");
6962   bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
6963   bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
6964   if ((!IsSignExt && !IsZeroExt) || !Op0.hasOneUse())
6965     return SDValue();
6966 
6967   SDValue Mask = N->getOperand(2);
6968   SDValue VL = N->getOperand(3);
6969 
6970   // Make sure the mask and VL match.
6971   if (Op0.getOperand(1) != Mask || Op0.getOperand(2) != VL)
6972     return SDValue();
6973 
6974   MVT VT = N->getSimpleValueType(0);
6975 
6976   // Determine the narrow size for a widening multiply.
6977   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
6978   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
6979                                   VT.getVectorElementCount());
6980 
6981   SDLoc DL(N);
6982 
6983   // See if the other operand is the same opcode.
6984   if (Op0.getOpcode() == Op1.getOpcode()) {
6985     if (!Op1.hasOneUse())
6986       return SDValue();
6987 
6988     // Make sure the mask and VL match.
6989     if (Op1.getOperand(1) != Mask || Op1.getOperand(2) != VL)
6990       return SDValue();
6991 
6992     Op1 = Op1.getOperand(0);
6993   } else if (Op1.getOpcode() == RISCVISD::VMV_V_X_VL) {
6994     // The operand is a splat of a scalar.
6995 
6996     // The VL must be the same.
6997     if (Op1.getOperand(1) != VL)
6998       return SDValue();
6999 
7000     // Get the scalar value.
7001     Op1 = Op1.getOperand(0);
7002 
7003     // See if have enough sign bits or zero bits in the scalar to use a
7004     // widening multiply by splatting to smaller element size.
7005     unsigned EltBits = VT.getScalarSizeInBits();
7006     unsigned ScalarBits = Op1.getValueSizeInBits();
7007     // Make sure we're getting all element bits from the scalar register.
7008     // FIXME: Support implicit sign extension of vmv.v.x?
7009     if (ScalarBits < EltBits)
7010       return SDValue();
7011 
7012     if (IsSignExt) {
7013       if (DAG.ComputeNumSignBits(Op1) <= (ScalarBits - NarrowSize))
7014         return SDValue();
7015     } else {
7016       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
7017       if (!DAG.MaskedValueIsZero(Op1, Mask))
7018         return SDValue();
7019     }
7020 
7021     Op1 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT, Op1, VL);
7022   } else
7023     return SDValue();
7024 
7025   Op0 = Op0.getOperand(0);
7026 
7027   // Re-introduce narrower extends if needed.
7028   unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
7029   if (Op0.getValueType() != NarrowVT)
7030     Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
7031   if (Op1.getValueType() != NarrowVT)
7032     Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
7033 
7034   unsigned WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
7035   return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
7036 }
7037 
7038 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
7039                                                DAGCombinerInfo &DCI) const {
7040   SelectionDAG &DAG = DCI.DAG;
7041 
7042   // Helper to call SimplifyDemandedBits on an operand of N where only some low
7043   // bits are demanded. N will be added to the Worklist if it was not deleted.
7044   // Caller should return SDValue(N, 0) if this returns true.
7045   auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) {
7046     SDValue Op = N->getOperand(OpNo);
7047     APInt Mask = APInt::getLowBitsSet(Op.getValueSizeInBits(), LowBits);
7048     if (!SimplifyDemandedBits(Op, Mask, DCI))
7049       return false;
7050 
7051     if (N->getOpcode() != ISD::DELETED_NODE)
7052       DCI.AddToWorklist(N);
7053     return true;
7054   };
7055 
7056   switch (N->getOpcode()) {
7057   default:
7058     break;
7059   case RISCVISD::SplitF64: {
7060     SDValue Op0 = N->getOperand(0);
7061     // If the input to SplitF64 is just BuildPairF64 then the operation is
7062     // redundant. Instead, use BuildPairF64's operands directly.
7063     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
7064       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
7065 
7066     SDLoc DL(N);
7067 
7068     // It's cheaper to materialise two 32-bit integers than to load a double
7069     // from the constant pool and transfer it to integer registers through the
7070     // stack.
7071     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
7072       APInt V = C->getValueAPF().bitcastToAPInt();
7073       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
7074       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
7075       return DCI.CombineTo(N, Lo, Hi);
7076     }
7077 
7078     // This is a target-specific version of a DAGCombine performed in
7079     // DAGCombiner::visitBITCAST. It performs the equivalent of:
7080     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
7081     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
7082     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
7083         !Op0.getNode()->hasOneUse())
7084       break;
7085     SDValue NewSplitF64 =
7086         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
7087                     Op0.getOperand(0));
7088     SDValue Lo = NewSplitF64.getValue(0);
7089     SDValue Hi = NewSplitF64.getValue(1);
7090     APInt SignBit = APInt::getSignMask(32);
7091     if (Op0.getOpcode() == ISD::FNEG) {
7092       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
7093                                   DAG.getConstant(SignBit, DL, MVT::i32));
7094       return DCI.CombineTo(N, Lo, NewHi);
7095     }
7096     assert(Op0.getOpcode() == ISD::FABS);
7097     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
7098                                 DAG.getConstant(~SignBit, DL, MVT::i32));
7099     return DCI.CombineTo(N, Lo, NewHi);
7100   }
7101   case RISCVISD::SLLW:
7102   case RISCVISD::SRAW:
7103   case RISCVISD::SRLW:
7104   case RISCVISD::ROLW:
7105   case RISCVISD::RORW: {
7106     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
7107     if (SimplifyDemandedLowBitsHelper(0, 32) ||
7108         SimplifyDemandedLowBitsHelper(1, 5))
7109       return SDValue(N, 0);
7110     break;
7111   }
7112   case RISCVISD::CLZW:
7113   case RISCVISD::CTZW: {
7114     // Only the lower 32 bits of the first operand are read
7115     if (SimplifyDemandedLowBitsHelper(0, 32))
7116       return SDValue(N, 0);
7117     break;
7118   }
7119   case RISCVISD::FSL:
7120   case RISCVISD::FSR: {
7121     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
7122     unsigned BitWidth = N->getOperand(2).getValueSizeInBits();
7123     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
7124     if (SimplifyDemandedLowBitsHelper(2, Log2_32(BitWidth) + 1))
7125       return SDValue(N, 0);
7126     break;
7127   }
7128   case RISCVISD::FSLW:
7129   case RISCVISD::FSRW: {
7130     // Only the lower 32 bits of Values and lower 6 bits of shift amount are
7131     // read.
7132     if (SimplifyDemandedLowBitsHelper(0, 32) ||
7133         SimplifyDemandedLowBitsHelper(1, 32) ||
7134         SimplifyDemandedLowBitsHelper(2, 6))
7135       return SDValue(N, 0);
7136     break;
7137   }
7138   case RISCVISD::GREV:
7139   case RISCVISD::GORC: {
7140     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
7141     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
7142     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
7143     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth)))
7144       return SDValue(N, 0);
7145 
7146     return combineGREVI_GORCI(N, DAG);
7147   }
7148   case RISCVISD::GREVW:
7149   case RISCVISD::GORCW: {
7150     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
7151     if (SimplifyDemandedLowBitsHelper(0, 32) ||
7152         SimplifyDemandedLowBitsHelper(1, 5))
7153       return SDValue(N, 0);
7154 
7155     return combineGREVI_GORCI(N, DAG);
7156   }
7157   case RISCVISD::SHFL:
7158   case RISCVISD::UNSHFL: {
7159     // Only the lower log2(Bitwidth)-1 bits of the the shift amount are read.
7160     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
7161     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
7162     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) - 1))
7163       return SDValue(N, 0);
7164 
7165     break;
7166   }
7167   case RISCVISD::SHFLW:
7168   case RISCVISD::UNSHFLW: {
7169     // Only the lower 32 bits of LHS and lower 4 bits of RHS are read.
7170     SDValue LHS = N->getOperand(0);
7171     SDValue RHS = N->getOperand(1);
7172     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
7173     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 4);
7174     if (SimplifyDemandedLowBitsHelper(0, 32) ||
7175         SimplifyDemandedLowBitsHelper(1, 4))
7176       return SDValue(N, 0);
7177 
7178     break;
7179   }
7180   case RISCVISD::BCOMPRESSW:
7181   case RISCVISD::BDECOMPRESSW: {
7182     // Only the lower 32 bits of LHS and RHS are read.
7183     if (SimplifyDemandedLowBitsHelper(0, 32) ||
7184         SimplifyDemandedLowBitsHelper(1, 32))
7185       return SDValue(N, 0);
7186 
7187     break;
7188   }
7189   case RISCVISD::FMV_X_ANYEXTH:
7190   case RISCVISD::FMV_X_ANYEXTW_RV64: {
7191     SDLoc DL(N);
7192     SDValue Op0 = N->getOperand(0);
7193     MVT VT = N->getSimpleValueType(0);
7194     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
7195     // conversion is unnecessary and can be replaced with the FMV_W_X_RV64
7196     // operand. Similar for FMV_X_ANYEXTH and FMV_H_X.
7197     if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 &&
7198          Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) ||
7199         (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
7200          Op0->getOpcode() == RISCVISD::FMV_H_X)) {
7201       assert(Op0.getOperand(0).getValueType() == VT &&
7202              "Unexpected value type!");
7203       return Op0.getOperand(0);
7204     }
7205 
7206     // This is a target-specific version of a DAGCombine performed in
7207     // DAGCombiner::visitBITCAST. It performs the equivalent of:
7208     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
7209     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
7210     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
7211         !Op0.getNode()->hasOneUse())
7212       break;
7213     SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0));
7214     unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16;
7215     APInt SignBit = APInt::getSignMask(FPBits).sextOrSelf(VT.getSizeInBits());
7216     if (Op0.getOpcode() == ISD::FNEG)
7217       return DAG.getNode(ISD::XOR, DL, VT, NewFMV,
7218                          DAG.getConstant(SignBit, DL, VT));
7219 
7220     assert(Op0.getOpcode() == ISD::FABS);
7221     return DAG.getNode(ISD::AND, DL, VT, NewFMV,
7222                        DAG.getConstant(~SignBit, DL, VT));
7223   }
7224   case ISD::ADD:
7225     return performADDCombine(N, DAG, Subtarget);
7226   case ISD::SUB:
7227     return performSUBCombine(N, DAG);
7228   case ISD::AND:
7229     return performANDCombine(N, DAG);
7230   case ISD::OR:
7231     return performORCombine(N, DAG, Subtarget);
7232   case ISD::XOR:
7233     return performXORCombine(N, DAG);
7234   case ISD::ANY_EXTEND:
7235     return performANY_EXTENDCombine(N, DCI, Subtarget);
7236   case ISD::ZERO_EXTEND:
7237     // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during
7238     // type legalization. This is safe because fp_to_uint produces poison if
7239     // it overflows.
7240     if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit()) {
7241       SDValue Src = N->getOperand(0);
7242       if (Src.getOpcode() == ISD::FP_TO_UINT &&
7243           isTypeLegal(Src.getOperand(0).getValueType()))
7244         return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64,
7245                            Src.getOperand(0));
7246       if (Src.getOpcode() == ISD::STRICT_FP_TO_UINT && Src.hasOneUse() &&
7247           isTypeLegal(Src.getOperand(1).getValueType())) {
7248         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
7249         SDValue Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, SDLoc(N), VTs,
7250                                   Src.getOperand(0), Src.getOperand(1));
7251         DCI.CombineTo(N, Res);
7252         DAG.ReplaceAllUsesOfValueWith(Src.getValue(1), Res.getValue(1));
7253         DCI.recursivelyDeleteUnusedNodes(Src.getNode());
7254         return SDValue(N, 0); // Return N so it doesn't get rechecked.
7255       }
7256     }
7257     return SDValue();
7258   case RISCVISD::SELECT_CC: {
7259     // Transform
7260     SDValue LHS = N->getOperand(0);
7261     SDValue RHS = N->getOperand(1);
7262     SDValue TrueV = N->getOperand(3);
7263     SDValue FalseV = N->getOperand(4);
7264 
7265     // If the True and False values are the same, we don't need a select_cc.
7266     if (TrueV == FalseV)
7267       return TrueV;
7268 
7269     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(2))->get();
7270     if (!ISD::isIntEqualitySetCC(CCVal))
7271       break;
7272 
7273     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
7274     //      (select_cc X, Y, lt, trueV, falseV)
7275     // Sometimes the setcc is introduced after select_cc has been formed.
7276     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
7277         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
7278       // If we're looking for eq 0 instead of ne 0, we need to invert the
7279       // condition.
7280       bool Invert = CCVal == ISD::SETEQ;
7281       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
7282       if (Invert)
7283         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
7284 
7285       SDLoc DL(N);
7286       RHS = LHS.getOperand(1);
7287       LHS = LHS.getOperand(0);
7288       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
7289 
7290       SDValue TargetCC = DAG.getCondCode(CCVal);
7291       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
7292                          {LHS, RHS, TargetCC, TrueV, FalseV});
7293     }
7294 
7295     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
7296     //      (select_cc X, Y, eq/ne, trueV, falseV)
7297     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
7298       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
7299                          {LHS.getOperand(0), LHS.getOperand(1),
7300                           N->getOperand(2), TrueV, FalseV});
7301     // (select_cc X, 1, setne, trueV, falseV) ->
7302     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
7303     // This can occur when legalizing some floating point comparisons.
7304     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
7305     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
7306       SDLoc DL(N);
7307       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
7308       SDValue TargetCC = DAG.getCondCode(CCVal);
7309       RHS = DAG.getConstant(0, DL, LHS.getValueType());
7310       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
7311                          {LHS, RHS, TargetCC, TrueV, FalseV});
7312     }
7313 
7314     break;
7315   }
7316   case RISCVISD::BR_CC: {
7317     SDValue LHS = N->getOperand(1);
7318     SDValue RHS = N->getOperand(2);
7319     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
7320     if (!ISD::isIntEqualitySetCC(CCVal))
7321       break;
7322 
7323     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
7324     //      (br_cc X, Y, lt, dest)
7325     // Sometimes the setcc is introduced after br_cc has been formed.
7326     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
7327         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
7328       // If we're looking for eq 0 instead of ne 0, we need to invert the
7329       // condition.
7330       bool Invert = CCVal == ISD::SETEQ;
7331       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
7332       if (Invert)
7333         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
7334 
7335       SDLoc DL(N);
7336       RHS = LHS.getOperand(1);
7337       LHS = LHS.getOperand(0);
7338       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
7339 
7340       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
7341                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
7342                          N->getOperand(4));
7343     }
7344 
7345     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
7346     //      (br_cc X, Y, eq/ne, trueV, falseV)
7347     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
7348       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
7349                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
7350                          N->getOperand(3), N->getOperand(4));
7351 
7352     // (br_cc X, 1, setne, br_cc) ->
7353     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
7354     // This can occur when legalizing some floating point comparisons.
7355     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
7356     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
7357       SDLoc DL(N);
7358       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
7359       SDValue TargetCC = DAG.getCondCode(CCVal);
7360       RHS = DAG.getConstant(0, DL, LHS.getValueType());
7361       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
7362                          N->getOperand(0), LHS, RHS, TargetCC,
7363                          N->getOperand(4));
7364     }
7365     break;
7366   }
7367   case ISD::FCOPYSIGN: {
7368     EVT VT = N->getValueType(0);
7369     if (!VT.isVector())
7370       break;
7371     // There is a form of VFSGNJ which injects the negated sign of its second
7372     // operand. Try and bubble any FNEG up after the extend/round to produce
7373     // this optimized pattern. Avoid modifying cases where FP_ROUND and
7374     // TRUNC=1.
7375     SDValue In2 = N->getOperand(1);
7376     // Avoid cases where the extend/round has multiple uses, as duplicating
7377     // those is typically more expensive than removing a fneg.
7378     if (!In2.hasOneUse())
7379       break;
7380     if (In2.getOpcode() != ISD::FP_EXTEND &&
7381         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
7382       break;
7383     In2 = In2.getOperand(0);
7384     if (In2.getOpcode() != ISD::FNEG)
7385       break;
7386     SDLoc DL(N);
7387     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
7388     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
7389                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
7390   }
7391   case ISD::MGATHER:
7392   case ISD::MSCATTER:
7393   case ISD::VP_GATHER:
7394   case ISD::VP_SCATTER: {
7395     if (!DCI.isBeforeLegalize())
7396       break;
7397     SDValue Index, ScaleOp;
7398     bool IsIndexScaled = false;
7399     bool IsIndexSigned = false;
7400     if (const auto *VPGSN = dyn_cast<VPGatherScatterSDNode>(N)) {
7401       Index = VPGSN->getIndex();
7402       ScaleOp = VPGSN->getScale();
7403       IsIndexScaled = VPGSN->isIndexScaled();
7404       IsIndexSigned = VPGSN->isIndexSigned();
7405     } else {
7406       const auto *MGSN = cast<MaskedGatherScatterSDNode>(N);
7407       Index = MGSN->getIndex();
7408       ScaleOp = MGSN->getScale();
7409       IsIndexScaled = MGSN->isIndexScaled();
7410       IsIndexSigned = MGSN->isIndexSigned();
7411     }
7412     EVT IndexVT = Index.getValueType();
7413     MVT XLenVT = Subtarget.getXLenVT();
7414     // RISCV indexed loads only support the "unsigned unscaled" addressing
7415     // mode, so anything else must be manually legalized.
7416     bool NeedsIdxLegalization =
7417         IsIndexScaled ||
7418         (IsIndexSigned && IndexVT.getVectorElementType().bitsLT(XLenVT));
7419     if (!NeedsIdxLegalization)
7420       break;
7421 
7422     SDLoc DL(N);
7423 
7424     // Any index legalization should first promote to XLenVT, so we don't lose
7425     // bits when scaling. This may create an illegal index type so we let
7426     // LLVM's legalization take care of the splitting.
7427     // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet.
7428     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
7429       IndexVT = IndexVT.changeVectorElementType(XLenVT);
7430       Index = DAG.getNode(IsIndexSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
7431                           DL, IndexVT, Index);
7432     }
7433 
7434     unsigned Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
7435     if (IsIndexScaled && Scale != 1) {
7436       // Manually scale the indices by the element size.
7437       // TODO: Sanitize the scale operand here?
7438       // TODO: For VP nodes, should we use VP_SHL here?
7439       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
7440       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
7441       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
7442     }
7443 
7444     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
7445     if (const auto *VPGN = dyn_cast<VPGatherSDNode>(N))
7446       return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL,
7447                              {VPGN->getChain(), VPGN->getBasePtr(), Index,
7448                               VPGN->getScale(), VPGN->getMask(),
7449                               VPGN->getVectorLength()},
7450                              VPGN->getMemOperand(), NewIndexTy);
7451     if (const auto *VPSN = dyn_cast<VPScatterSDNode>(N))
7452       return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL,
7453                               {VPSN->getChain(), VPSN->getValue(),
7454                                VPSN->getBasePtr(), Index, VPSN->getScale(),
7455                                VPSN->getMask(), VPSN->getVectorLength()},
7456                               VPSN->getMemOperand(), NewIndexTy);
7457     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N))
7458       return DAG.getMaskedGather(
7459           N->getVTList(), MGN->getMemoryVT(), DL,
7460           {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
7461            MGN->getBasePtr(), Index, MGN->getScale()},
7462           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
7463     const auto *MSN = cast<MaskedScatterSDNode>(N);
7464     return DAG.getMaskedScatter(
7465         N->getVTList(), MSN->getMemoryVT(), DL,
7466         {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
7467          Index, MSN->getScale()},
7468         MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
7469   }
7470   case RISCVISD::SRA_VL:
7471   case RISCVISD::SRL_VL:
7472   case RISCVISD::SHL_VL: {
7473     SDValue ShAmt = N->getOperand(1);
7474     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
7475       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
7476       SDLoc DL(N);
7477       SDValue VL = N->getOperand(3);
7478       EVT VT = N->getValueType(0);
7479       ShAmt =
7480           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, ShAmt.getOperand(0), VL);
7481       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
7482                          N->getOperand(2), N->getOperand(3));
7483     }
7484     break;
7485   }
7486   case ISD::SRA:
7487   case ISD::SRL:
7488   case ISD::SHL: {
7489     SDValue ShAmt = N->getOperand(1);
7490     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
7491       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
7492       SDLoc DL(N);
7493       EVT VT = N->getValueType(0);
7494       ShAmt =
7495           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VT, ShAmt.getOperand(0));
7496       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
7497     }
7498     break;
7499   }
7500   case RISCVISD::MUL_VL: {
7501     SDValue Op0 = N->getOperand(0);
7502     SDValue Op1 = N->getOperand(1);
7503     if (SDValue V = combineMUL_VLToVWMUL(N, Op0, Op1, DAG))
7504       return V;
7505     if (SDValue V = combineMUL_VLToVWMUL(N, Op1, Op0, DAG))
7506       return V;
7507     return SDValue();
7508   }
7509   case ISD::STORE: {
7510     auto *Store = cast<StoreSDNode>(N);
7511     SDValue Val = Store->getValue();
7512     // Combine store of vmv.x.s to vse with VL of 1.
7513     // FIXME: Support FP.
7514     if (Val.getOpcode() == RISCVISD::VMV_X_S) {
7515       SDValue Src = Val.getOperand(0);
7516       EVT VecVT = Src.getValueType();
7517       EVT MemVT = Store->getMemoryVT();
7518       // The memory VT and the element type must match.
7519       if (VecVT.getVectorElementType() == MemVT) {
7520         SDLoc DL(N);
7521         MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
7522         return DAG.getStoreVP(Store->getChain(), DL, Src, Store->getBasePtr(),
7523                               DAG.getConstant(1, DL, MaskVT),
7524                               DAG.getConstant(1, DL, Subtarget.getXLenVT()),
7525                               Store->getPointerInfo(),
7526                               Store->getOriginalAlign(),
7527                               Store->getMemOperand()->getFlags());
7528       }
7529     }
7530 
7531     break;
7532   }
7533   }
7534 
7535   return SDValue();
7536 }
7537 
7538 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
7539     const SDNode *N, CombineLevel Level) const {
7540   // The following folds are only desirable if `(OP _, c1 << c2)` can be
7541   // materialised in fewer instructions than `(OP _, c1)`:
7542   //
7543   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
7544   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
7545   SDValue N0 = N->getOperand(0);
7546   EVT Ty = N0.getValueType();
7547   if (Ty.isScalarInteger() &&
7548       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
7549     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7550     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
7551     if (C1 && C2) {
7552       const APInt &C1Int = C1->getAPIntValue();
7553       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
7554 
7555       // We can materialise `c1 << c2` into an add immediate, so it's "free",
7556       // and the combine should happen, to potentially allow further combines
7557       // later.
7558       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
7559           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
7560         return true;
7561 
7562       // We can materialise `c1` in an add immediate, so it's "free", and the
7563       // combine should be prevented.
7564       if (C1Int.getMinSignedBits() <= 64 &&
7565           isLegalAddImmediate(C1Int.getSExtValue()))
7566         return false;
7567 
7568       // Neither constant will fit into an immediate, so find materialisation
7569       // costs.
7570       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
7571                                               Subtarget.getFeatureBits(),
7572                                               /*CompressionCost*/true);
7573       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
7574           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(),
7575           /*CompressionCost*/true);
7576 
7577       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
7578       // combine should be prevented.
7579       if (C1Cost < ShiftedC1Cost)
7580         return false;
7581     }
7582   }
7583   return true;
7584 }
7585 
7586 bool RISCVTargetLowering::targetShrinkDemandedConstant(
7587     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
7588     TargetLoweringOpt &TLO) const {
7589   // Delay this optimization as late as possible.
7590   if (!TLO.LegalOps)
7591     return false;
7592 
7593   EVT VT = Op.getValueType();
7594   if (VT.isVector())
7595     return false;
7596 
7597   // Only handle AND for now.
7598   if (Op.getOpcode() != ISD::AND)
7599     return false;
7600 
7601   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
7602   if (!C)
7603     return false;
7604 
7605   const APInt &Mask = C->getAPIntValue();
7606 
7607   // Clear all non-demanded bits initially.
7608   APInt ShrunkMask = Mask & DemandedBits;
7609 
7610   // Try to make a smaller immediate by setting undemanded bits.
7611 
7612   APInt ExpandedMask = Mask | ~DemandedBits;
7613 
7614   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
7615     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
7616   };
7617   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
7618     if (NewMask == Mask)
7619       return true;
7620     SDLoc DL(Op);
7621     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
7622     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
7623     return TLO.CombineTo(Op, NewOp);
7624   };
7625 
7626   // If the shrunk mask fits in sign extended 12 bits, let the target
7627   // independent code apply it.
7628   if (ShrunkMask.isSignedIntN(12))
7629     return false;
7630 
7631   // Preserve (and X, 0xffff) when zext.h is supported.
7632   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
7633     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
7634     if (IsLegalMask(NewMask))
7635       return UseMask(NewMask);
7636   }
7637 
7638   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
7639   if (VT == MVT::i64) {
7640     APInt NewMask = APInt(64, 0xffffffff);
7641     if (IsLegalMask(NewMask))
7642       return UseMask(NewMask);
7643   }
7644 
7645   // For the remaining optimizations, we need to be able to make a negative
7646   // number through a combination of mask and undemanded bits.
7647   if (!ExpandedMask.isNegative())
7648     return false;
7649 
7650   // What is the fewest number of bits we need to represent the negative number.
7651   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
7652 
7653   // Try to make a 12 bit negative immediate. If that fails try to make a 32
7654   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
7655   APInt NewMask = ShrunkMask;
7656   if (MinSignedBits <= 12)
7657     NewMask.setBitsFrom(11);
7658   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
7659     NewMask.setBitsFrom(31);
7660   else
7661     return false;
7662 
7663   // Check that our new mask is a subset of the demanded mask.
7664   assert(IsLegalMask(NewMask));
7665   return UseMask(NewMask);
7666 }
7667 
7668 static void computeGREV(APInt &Src, unsigned ShAmt) {
7669   ShAmt &= Src.getBitWidth() - 1;
7670   uint64_t x = Src.getZExtValue();
7671   if (ShAmt & 1)
7672     x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1);
7673   if (ShAmt & 2)
7674     x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2);
7675   if (ShAmt & 4)
7676     x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4);
7677   if (ShAmt & 8)
7678     x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8);
7679   if (ShAmt & 16)
7680     x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16);
7681   if (ShAmt & 32)
7682     x = ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32);
7683   Src = x;
7684 }
7685 
7686 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
7687                                                         KnownBits &Known,
7688                                                         const APInt &DemandedElts,
7689                                                         const SelectionDAG &DAG,
7690                                                         unsigned Depth) const {
7691   unsigned BitWidth = Known.getBitWidth();
7692   unsigned Opc = Op.getOpcode();
7693   assert((Opc >= ISD::BUILTIN_OP_END ||
7694           Opc == ISD::INTRINSIC_WO_CHAIN ||
7695           Opc == ISD::INTRINSIC_W_CHAIN ||
7696           Opc == ISD::INTRINSIC_VOID) &&
7697          "Should use MaskedValueIsZero if you don't know whether Op"
7698          " is a target node!");
7699 
7700   Known.resetAll();
7701   switch (Opc) {
7702   default: break;
7703   case RISCVISD::SELECT_CC: {
7704     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
7705     // If we don't know any bits, early out.
7706     if (Known.isUnknown())
7707       break;
7708     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
7709 
7710     // Only known if known in both the LHS and RHS.
7711     Known = KnownBits::commonBits(Known, Known2);
7712     break;
7713   }
7714   case RISCVISD::REMUW: {
7715     KnownBits Known2;
7716     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
7717     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
7718     // We only care about the lower 32 bits.
7719     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
7720     // Restore the original width by sign extending.
7721     Known = Known.sext(BitWidth);
7722     break;
7723   }
7724   case RISCVISD::DIVUW: {
7725     KnownBits Known2;
7726     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
7727     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
7728     // We only care about the lower 32 bits.
7729     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
7730     // Restore the original width by sign extending.
7731     Known = Known.sext(BitWidth);
7732     break;
7733   }
7734   case RISCVISD::CTZW: {
7735     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
7736     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
7737     unsigned LowBits = Log2_32(PossibleTZ) + 1;
7738     Known.Zero.setBitsFrom(LowBits);
7739     break;
7740   }
7741   case RISCVISD::CLZW: {
7742     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
7743     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
7744     unsigned LowBits = Log2_32(PossibleLZ) + 1;
7745     Known.Zero.setBitsFrom(LowBits);
7746     break;
7747   }
7748   case RISCVISD::GREV:
7749   case RISCVISD::GREVW: {
7750     if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
7751       Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
7752       if (Opc == RISCVISD::GREVW)
7753         Known = Known.trunc(32);
7754       unsigned ShAmt = C->getZExtValue();
7755       computeGREV(Known.Zero, ShAmt);
7756       computeGREV(Known.One, ShAmt);
7757       if (Opc == RISCVISD::GREVW)
7758         Known = Known.sext(BitWidth);
7759     }
7760     break;
7761   }
7762   case RISCVISD::READ_VLENB:
7763     // We assume VLENB is at least 16 bytes.
7764     Known.Zero.setLowBits(4);
7765     // We assume VLENB is no more than 65536 / 8 bytes.
7766     Known.Zero.setBitsFrom(14);
7767     break;
7768   case ISD::INTRINSIC_W_CHAIN: {
7769     unsigned IntNo = Op.getConstantOperandVal(1);
7770     switch (IntNo) {
7771     default:
7772       // We can't do anything for most intrinsics.
7773       break;
7774     case Intrinsic::riscv_vsetvli:
7775     case Intrinsic::riscv_vsetvlimax:
7776       // Assume that VL output is positive and would fit in an int32_t.
7777       // TODO: VLEN might be capped at 16 bits in a future V spec update.
7778       if (BitWidth >= 32)
7779         Known.Zero.setBitsFrom(31);
7780       break;
7781     }
7782     break;
7783   }
7784   }
7785 }
7786 
7787 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
7788     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
7789     unsigned Depth) const {
7790   switch (Op.getOpcode()) {
7791   default:
7792     break;
7793   case RISCVISD::SELECT_CC: {
7794     unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1);
7795     if (Tmp == 1) return 1;  // Early out.
7796     unsigned Tmp2 = DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1);
7797     return std::min(Tmp, Tmp2);
7798   }
7799   case RISCVISD::SLLW:
7800   case RISCVISD::SRAW:
7801   case RISCVISD::SRLW:
7802   case RISCVISD::DIVW:
7803   case RISCVISD::DIVUW:
7804   case RISCVISD::REMUW:
7805   case RISCVISD::ROLW:
7806   case RISCVISD::RORW:
7807   case RISCVISD::GREVW:
7808   case RISCVISD::GORCW:
7809   case RISCVISD::FSLW:
7810   case RISCVISD::FSRW:
7811   case RISCVISD::SHFLW:
7812   case RISCVISD::UNSHFLW:
7813   case RISCVISD::BCOMPRESSW:
7814   case RISCVISD::BDECOMPRESSW:
7815   case RISCVISD::FCVT_W_RV64:
7816   case RISCVISD::FCVT_WU_RV64:
7817   case RISCVISD::STRICT_FCVT_W_RV64:
7818   case RISCVISD::STRICT_FCVT_WU_RV64:
7819     // TODO: As the result is sign-extended, this is conservatively correct. A
7820     // more precise answer could be calculated for SRAW depending on known
7821     // bits in the shift amount.
7822     return 33;
7823   case RISCVISD::SHFL:
7824   case RISCVISD::UNSHFL: {
7825     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
7826     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
7827     // will stay within the upper 32 bits. If there were more than 32 sign bits
7828     // before there will be at least 33 sign bits after.
7829     if (Op.getValueType() == MVT::i64 &&
7830         isa<ConstantSDNode>(Op.getOperand(1)) &&
7831         (Op.getConstantOperandVal(1) & 0x10) == 0) {
7832       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
7833       if (Tmp > 32)
7834         return 33;
7835     }
7836     break;
7837   }
7838   case RISCVISD::VMV_X_S:
7839     // The number of sign bits of the scalar result is computed by obtaining the
7840     // element type of the input vector operand, subtracting its width from the
7841     // XLEN, and then adding one (sign bit within the element type). If the
7842     // element type is wider than XLen, the least-significant XLEN bits are
7843     // taken.
7844     if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen())
7845       return 1;
7846     return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1;
7847   }
7848 
7849   return 1;
7850 }
7851 
7852 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
7853                                                   MachineBasicBlock *BB) {
7854   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
7855 
7856   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
7857   // Should the count have wrapped while it was being read, we need to try
7858   // again.
7859   // ...
7860   // read:
7861   // rdcycleh x3 # load high word of cycle
7862   // rdcycle  x2 # load low word of cycle
7863   // rdcycleh x4 # load high word of cycle
7864   // bne x3, x4, read # check if high word reads match, otherwise try again
7865   // ...
7866 
7867   MachineFunction &MF = *BB->getParent();
7868   const BasicBlock *LLVM_BB = BB->getBasicBlock();
7869   MachineFunction::iterator It = ++BB->getIterator();
7870 
7871   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
7872   MF.insert(It, LoopMBB);
7873 
7874   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
7875   MF.insert(It, DoneMBB);
7876 
7877   // Transfer the remainder of BB and its successor edges to DoneMBB.
7878   DoneMBB->splice(DoneMBB->begin(), BB,
7879                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
7880   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
7881 
7882   BB->addSuccessor(LoopMBB);
7883 
7884   MachineRegisterInfo &RegInfo = MF.getRegInfo();
7885   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
7886   Register LoReg = MI.getOperand(0).getReg();
7887   Register HiReg = MI.getOperand(1).getReg();
7888   DebugLoc DL = MI.getDebugLoc();
7889 
7890   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
7891   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
7892       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
7893       .addReg(RISCV::X0);
7894   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
7895       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
7896       .addReg(RISCV::X0);
7897   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
7898       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
7899       .addReg(RISCV::X0);
7900 
7901   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
7902       .addReg(HiReg)
7903       .addReg(ReadAgainReg)
7904       .addMBB(LoopMBB);
7905 
7906   LoopMBB->addSuccessor(LoopMBB);
7907   LoopMBB->addSuccessor(DoneMBB);
7908 
7909   MI.eraseFromParent();
7910 
7911   return DoneMBB;
7912 }
7913 
7914 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
7915                                              MachineBasicBlock *BB) {
7916   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
7917 
7918   MachineFunction &MF = *BB->getParent();
7919   DebugLoc DL = MI.getDebugLoc();
7920   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
7921   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
7922   Register LoReg = MI.getOperand(0).getReg();
7923   Register HiReg = MI.getOperand(1).getReg();
7924   Register SrcReg = MI.getOperand(2).getReg();
7925   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
7926   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
7927 
7928   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
7929                           RI);
7930   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
7931   MachineMemOperand *MMOLo =
7932       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
7933   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
7934       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
7935   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
7936       .addFrameIndex(FI)
7937       .addImm(0)
7938       .addMemOperand(MMOLo);
7939   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
7940       .addFrameIndex(FI)
7941       .addImm(4)
7942       .addMemOperand(MMOHi);
7943   MI.eraseFromParent(); // The pseudo instruction is gone now.
7944   return BB;
7945 }
7946 
7947 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
7948                                                  MachineBasicBlock *BB) {
7949   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
7950          "Unexpected instruction");
7951 
7952   MachineFunction &MF = *BB->getParent();
7953   DebugLoc DL = MI.getDebugLoc();
7954   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
7955   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
7956   Register DstReg = MI.getOperand(0).getReg();
7957   Register LoReg = MI.getOperand(1).getReg();
7958   Register HiReg = MI.getOperand(2).getReg();
7959   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
7960   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
7961 
7962   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
7963   MachineMemOperand *MMOLo =
7964       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
7965   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
7966       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
7967   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
7968       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
7969       .addFrameIndex(FI)
7970       .addImm(0)
7971       .addMemOperand(MMOLo);
7972   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
7973       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
7974       .addFrameIndex(FI)
7975       .addImm(4)
7976       .addMemOperand(MMOHi);
7977   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
7978   MI.eraseFromParent(); // The pseudo instruction is gone now.
7979   return BB;
7980 }
7981 
7982 static bool isSelectPseudo(MachineInstr &MI) {
7983   switch (MI.getOpcode()) {
7984   default:
7985     return false;
7986   case RISCV::Select_GPR_Using_CC_GPR:
7987   case RISCV::Select_FPR16_Using_CC_GPR:
7988   case RISCV::Select_FPR32_Using_CC_GPR:
7989   case RISCV::Select_FPR64_Using_CC_GPR:
7990     return true;
7991   }
7992 }
7993 
7994 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
7995                                            MachineBasicBlock *BB,
7996                                            const RISCVSubtarget &Subtarget) {
7997   // To "insert" Select_* instructions, we actually have to insert the triangle
7998   // control-flow pattern.  The incoming instructions know the destination vreg
7999   // to set, the condition code register to branch on, the true/false values to
8000   // select between, and the condcode to use to select the appropriate branch.
8001   //
8002   // We produce the following control flow:
8003   //     HeadMBB
8004   //     |  \
8005   //     |  IfFalseMBB
8006   //     | /
8007   //    TailMBB
8008   //
8009   // When we find a sequence of selects we attempt to optimize their emission
8010   // by sharing the control flow. Currently we only handle cases where we have
8011   // multiple selects with the exact same condition (same LHS, RHS and CC).
8012   // The selects may be interleaved with other instructions if the other
8013   // instructions meet some requirements we deem safe:
8014   // - They are debug instructions. Otherwise,
8015   // - They do not have side-effects, do not access memory and their inputs do
8016   //   not depend on the results of the select pseudo-instructions.
8017   // The TrueV/FalseV operands of the selects cannot depend on the result of
8018   // previous selects in the sequence.
8019   // These conditions could be further relaxed. See the X86 target for a
8020   // related approach and more information.
8021   Register LHS = MI.getOperand(1).getReg();
8022   Register RHS = MI.getOperand(2).getReg();
8023   auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
8024 
8025   SmallVector<MachineInstr *, 4> SelectDebugValues;
8026   SmallSet<Register, 4> SelectDests;
8027   SelectDests.insert(MI.getOperand(0).getReg());
8028 
8029   MachineInstr *LastSelectPseudo = &MI;
8030 
8031   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
8032        SequenceMBBI != E; ++SequenceMBBI) {
8033     if (SequenceMBBI->isDebugInstr())
8034       continue;
8035     else if (isSelectPseudo(*SequenceMBBI)) {
8036       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
8037           SequenceMBBI->getOperand(2).getReg() != RHS ||
8038           SequenceMBBI->getOperand(3).getImm() != CC ||
8039           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
8040           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
8041         break;
8042       LastSelectPseudo = &*SequenceMBBI;
8043       SequenceMBBI->collectDebugValues(SelectDebugValues);
8044       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
8045     } else {
8046       if (SequenceMBBI->hasUnmodeledSideEffects() ||
8047           SequenceMBBI->mayLoadOrStore())
8048         break;
8049       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
8050             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
8051           }))
8052         break;
8053     }
8054   }
8055 
8056   const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
8057   const BasicBlock *LLVM_BB = BB->getBasicBlock();
8058   DebugLoc DL = MI.getDebugLoc();
8059   MachineFunction::iterator I = ++BB->getIterator();
8060 
8061   MachineBasicBlock *HeadMBB = BB;
8062   MachineFunction *F = BB->getParent();
8063   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
8064   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
8065 
8066   F->insert(I, IfFalseMBB);
8067   F->insert(I, TailMBB);
8068 
8069   // Transfer debug instructions associated with the selects to TailMBB.
8070   for (MachineInstr *DebugInstr : SelectDebugValues) {
8071     TailMBB->push_back(DebugInstr->removeFromParent());
8072   }
8073 
8074   // Move all instructions after the sequence to TailMBB.
8075   TailMBB->splice(TailMBB->end(), HeadMBB,
8076                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
8077   // Update machine-CFG edges by transferring all successors of the current
8078   // block to the new block which will contain the Phi nodes for the selects.
8079   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
8080   // Set the successors for HeadMBB.
8081   HeadMBB->addSuccessor(IfFalseMBB);
8082   HeadMBB->addSuccessor(TailMBB);
8083 
8084   // Insert appropriate branch.
8085   BuildMI(HeadMBB, DL, TII.getBrCond(CC))
8086     .addReg(LHS)
8087     .addReg(RHS)
8088     .addMBB(TailMBB);
8089 
8090   // IfFalseMBB just falls through to TailMBB.
8091   IfFalseMBB->addSuccessor(TailMBB);
8092 
8093   // Create PHIs for all of the select pseudo-instructions.
8094   auto SelectMBBI = MI.getIterator();
8095   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
8096   auto InsertionPoint = TailMBB->begin();
8097   while (SelectMBBI != SelectEnd) {
8098     auto Next = std::next(SelectMBBI);
8099     if (isSelectPseudo(*SelectMBBI)) {
8100       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
8101       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
8102               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
8103           .addReg(SelectMBBI->getOperand(4).getReg())
8104           .addMBB(HeadMBB)
8105           .addReg(SelectMBBI->getOperand(5).getReg())
8106           .addMBB(IfFalseMBB);
8107       SelectMBBI->eraseFromParent();
8108     }
8109     SelectMBBI = Next;
8110   }
8111 
8112   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
8113   return TailMBB;
8114 }
8115 
8116 MachineBasicBlock *
8117 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
8118                                                  MachineBasicBlock *BB) const {
8119   switch (MI.getOpcode()) {
8120   default:
8121     llvm_unreachable("Unexpected instr type to insert");
8122   case RISCV::ReadCycleWide:
8123     assert(!Subtarget.is64Bit() &&
8124            "ReadCycleWrite is only to be used on riscv32");
8125     return emitReadCycleWidePseudo(MI, BB);
8126   case RISCV::Select_GPR_Using_CC_GPR:
8127   case RISCV::Select_FPR16_Using_CC_GPR:
8128   case RISCV::Select_FPR32_Using_CC_GPR:
8129   case RISCV::Select_FPR64_Using_CC_GPR:
8130     return emitSelectPseudo(MI, BB, Subtarget);
8131   case RISCV::BuildPairF64Pseudo:
8132     return emitBuildPairF64Pseudo(MI, BB);
8133   case RISCV::SplitF64Pseudo:
8134     return emitSplitF64Pseudo(MI, BB);
8135   }
8136 }
8137 
8138 void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
8139                                                         SDNode *Node) const {
8140   // Add FRM dependency to any instructions with dynamic rounding mode.
8141   unsigned Opc = MI.getOpcode();
8142   auto Idx = RISCV::getNamedOperandIdx(Opc, RISCV::OpName::frm);
8143   if (Idx < 0)
8144     return;
8145   if (MI.getOperand(Idx).getImm() != RISCVFPRndMode::DYN)
8146     return;
8147   // If the instruction already reads FRM, don't add another read.
8148   if (MI.readsRegister(RISCV::FRM))
8149     return;
8150   MI.addOperand(
8151       MachineOperand::CreateReg(RISCV::FRM, /*isDef*/ false, /*isImp*/ true));
8152 }
8153 
8154 // Calling Convention Implementation.
8155 // The expectations for frontend ABI lowering vary from target to target.
8156 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
8157 // details, but this is a longer term goal. For now, we simply try to keep the
8158 // role of the frontend as simple and well-defined as possible. The rules can
8159 // be summarised as:
8160 // * Never split up large scalar arguments. We handle them here.
8161 // * If a hardfloat calling convention is being used, and the struct may be
8162 // passed in a pair of registers (fp+fp, int+fp), and both registers are
8163 // available, then pass as two separate arguments. If either the GPRs or FPRs
8164 // are exhausted, then pass according to the rule below.
8165 // * If a struct could never be passed in registers or directly in a stack
8166 // slot (as it is larger than 2*XLEN and the floating point rules don't
8167 // apply), then pass it using a pointer with the byval attribute.
8168 // * If a struct is less than 2*XLEN, then coerce to either a two-element
8169 // word-sized array or a 2*XLEN scalar (depending on alignment).
8170 // * The frontend can determine whether a struct is returned by reference or
8171 // not based on its size and fields. If it will be returned by reference, the
8172 // frontend must modify the prototype so a pointer with the sret annotation is
8173 // passed as the first argument. This is not necessary for large scalar
8174 // returns.
8175 // * Struct return values and varargs should be coerced to structs containing
8176 // register-size fields in the same situations they would be for fixed
8177 // arguments.
8178 
8179 static const MCPhysReg ArgGPRs[] = {
8180   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
8181   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
8182 };
8183 static const MCPhysReg ArgFPR16s[] = {
8184   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
8185   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
8186 };
8187 static const MCPhysReg ArgFPR32s[] = {
8188   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
8189   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
8190 };
8191 static const MCPhysReg ArgFPR64s[] = {
8192   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
8193   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
8194 };
8195 // This is an interim calling convention and it may be changed in the future.
8196 static const MCPhysReg ArgVRs[] = {
8197     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
8198     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
8199     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
8200 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
8201                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
8202                                      RISCV::V20M2, RISCV::V22M2};
8203 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
8204                                      RISCV::V20M4};
8205 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
8206 
8207 // Pass a 2*XLEN argument that has been split into two XLEN values through
8208 // registers or the stack as necessary.
8209 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
8210                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
8211                                 MVT ValVT2, MVT LocVT2,
8212                                 ISD::ArgFlagsTy ArgFlags2) {
8213   unsigned XLenInBytes = XLen / 8;
8214   if (Register Reg = State.AllocateReg(ArgGPRs)) {
8215     // At least one half can be passed via register.
8216     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
8217                                      VA1.getLocVT(), CCValAssign::Full));
8218   } else {
8219     // Both halves must be passed on the stack, with proper alignment.
8220     Align StackAlign =
8221         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
8222     State.addLoc(
8223         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
8224                             State.AllocateStack(XLenInBytes, StackAlign),
8225                             VA1.getLocVT(), CCValAssign::Full));
8226     State.addLoc(CCValAssign::getMem(
8227         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
8228         LocVT2, CCValAssign::Full));
8229     return false;
8230   }
8231 
8232   if (Register Reg = State.AllocateReg(ArgGPRs)) {
8233     // The second half can also be passed via register.
8234     State.addLoc(
8235         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
8236   } else {
8237     // The second half is passed via the stack, without additional alignment.
8238     State.addLoc(CCValAssign::getMem(
8239         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
8240         LocVT2, CCValAssign::Full));
8241   }
8242 
8243   return false;
8244 }
8245 
8246 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
8247                                Optional<unsigned> FirstMaskArgument,
8248                                CCState &State, const RISCVTargetLowering &TLI) {
8249   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
8250   if (RC == &RISCV::VRRegClass) {
8251     // Assign the first mask argument to V0.
8252     // This is an interim calling convention and it may be changed in the
8253     // future.
8254     if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
8255       return State.AllocateReg(RISCV::V0);
8256     return State.AllocateReg(ArgVRs);
8257   }
8258   if (RC == &RISCV::VRM2RegClass)
8259     return State.AllocateReg(ArgVRM2s);
8260   if (RC == &RISCV::VRM4RegClass)
8261     return State.AllocateReg(ArgVRM4s);
8262   if (RC == &RISCV::VRM8RegClass)
8263     return State.AllocateReg(ArgVRM8s);
8264   llvm_unreachable("Unhandled register class for ValueType");
8265 }
8266 
8267 // Implements the RISC-V calling convention. Returns true upon failure.
8268 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
8269                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
8270                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
8271                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
8272                      Optional<unsigned> FirstMaskArgument) {
8273   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
8274   assert(XLen == 32 || XLen == 64);
8275   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
8276 
8277   // Any return value split in to more than two values can't be returned
8278   // directly. Vectors are returned via the available vector registers.
8279   if (!LocVT.isVector() && IsRet && ValNo > 1)
8280     return true;
8281 
8282   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
8283   // variadic argument, or if no F16/F32 argument registers are available.
8284   bool UseGPRForF16_F32 = true;
8285   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
8286   // variadic argument, or if no F64 argument registers are available.
8287   bool UseGPRForF64 = true;
8288 
8289   switch (ABI) {
8290   default:
8291     llvm_unreachable("Unexpected ABI");
8292   case RISCVABI::ABI_ILP32:
8293   case RISCVABI::ABI_LP64:
8294     break;
8295   case RISCVABI::ABI_ILP32F:
8296   case RISCVABI::ABI_LP64F:
8297     UseGPRForF16_F32 = !IsFixed;
8298     break;
8299   case RISCVABI::ABI_ILP32D:
8300   case RISCVABI::ABI_LP64D:
8301     UseGPRForF16_F32 = !IsFixed;
8302     UseGPRForF64 = !IsFixed;
8303     break;
8304   }
8305 
8306   // FPR16, FPR32, and FPR64 alias each other.
8307   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
8308     UseGPRForF16_F32 = true;
8309     UseGPRForF64 = true;
8310   }
8311 
8312   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
8313   // similar local variables rather than directly checking against the target
8314   // ABI.
8315 
8316   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
8317     LocVT = XLenVT;
8318     LocInfo = CCValAssign::BCvt;
8319   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
8320     LocVT = MVT::i64;
8321     LocInfo = CCValAssign::BCvt;
8322   }
8323 
8324   // If this is a variadic argument, the RISC-V calling convention requires
8325   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
8326   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
8327   // be used regardless of whether the original argument was split during
8328   // legalisation or not. The argument will not be passed by registers if the
8329   // original type is larger than 2*XLEN, so the register alignment rule does
8330   // not apply.
8331   unsigned TwoXLenInBytes = (2 * XLen) / 8;
8332   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
8333       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
8334     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
8335     // Skip 'odd' register if necessary.
8336     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
8337       State.AllocateReg(ArgGPRs);
8338   }
8339 
8340   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
8341   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
8342       State.getPendingArgFlags();
8343 
8344   assert(PendingLocs.size() == PendingArgFlags.size() &&
8345          "PendingLocs and PendingArgFlags out of sync");
8346 
8347   // Handle passing f64 on RV32D with a soft float ABI or when floating point
8348   // registers are exhausted.
8349   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
8350     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
8351            "Can't lower f64 if it is split");
8352     // Depending on available argument GPRS, f64 may be passed in a pair of
8353     // GPRs, split between a GPR and the stack, or passed completely on the
8354     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
8355     // cases.
8356     Register Reg = State.AllocateReg(ArgGPRs);
8357     LocVT = MVT::i32;
8358     if (!Reg) {
8359       unsigned StackOffset = State.AllocateStack(8, Align(8));
8360       State.addLoc(
8361           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
8362       return false;
8363     }
8364     if (!State.AllocateReg(ArgGPRs))
8365       State.AllocateStack(4, Align(4));
8366     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8367     return false;
8368   }
8369 
8370   // Fixed-length vectors are located in the corresponding scalable-vector
8371   // container types.
8372   if (ValVT.isFixedLengthVector())
8373     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
8374 
8375   // Split arguments might be passed indirectly, so keep track of the pending
8376   // values. Split vectors are passed via a mix of registers and indirectly, so
8377   // treat them as we would any other argument.
8378   if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
8379     LocVT = XLenVT;
8380     LocInfo = CCValAssign::Indirect;
8381     PendingLocs.push_back(
8382         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
8383     PendingArgFlags.push_back(ArgFlags);
8384     if (!ArgFlags.isSplitEnd()) {
8385       return false;
8386     }
8387   }
8388 
8389   // If the split argument only had two elements, it should be passed directly
8390   // in registers or on the stack.
8391   if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
8392       PendingLocs.size() <= 2) {
8393     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
8394     // Apply the normal calling convention rules to the first half of the
8395     // split argument.
8396     CCValAssign VA = PendingLocs[0];
8397     ISD::ArgFlagsTy AF = PendingArgFlags[0];
8398     PendingLocs.clear();
8399     PendingArgFlags.clear();
8400     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
8401                                ArgFlags);
8402   }
8403 
8404   // Allocate to a register if possible, or else a stack slot.
8405   Register Reg;
8406   unsigned StoreSizeBytes = XLen / 8;
8407   Align StackAlign = Align(XLen / 8);
8408 
8409   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
8410     Reg = State.AllocateReg(ArgFPR16s);
8411   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
8412     Reg = State.AllocateReg(ArgFPR32s);
8413   else if (ValVT == MVT::f64 && !UseGPRForF64)
8414     Reg = State.AllocateReg(ArgFPR64s);
8415   else if (ValVT.isVector()) {
8416     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
8417     if (!Reg) {
8418       // For return values, the vector must be passed fully via registers or
8419       // via the stack.
8420       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
8421       // but we're using all of them.
8422       if (IsRet)
8423         return true;
8424       // Try using a GPR to pass the address
8425       if ((Reg = State.AllocateReg(ArgGPRs))) {
8426         LocVT = XLenVT;
8427         LocInfo = CCValAssign::Indirect;
8428       } else if (ValVT.isScalableVector()) {
8429         LocVT = XLenVT;
8430         LocInfo = CCValAssign::Indirect;
8431       } else {
8432         // Pass fixed-length vectors on the stack.
8433         LocVT = ValVT;
8434         StoreSizeBytes = ValVT.getStoreSize();
8435         // Align vectors to their element sizes, being careful for vXi1
8436         // vectors.
8437         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
8438       }
8439     }
8440   } else {
8441     Reg = State.AllocateReg(ArgGPRs);
8442   }
8443 
8444   unsigned StackOffset =
8445       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
8446 
8447   // If we reach this point and PendingLocs is non-empty, we must be at the
8448   // end of a split argument that must be passed indirectly.
8449   if (!PendingLocs.empty()) {
8450     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
8451     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
8452 
8453     for (auto &It : PendingLocs) {
8454       if (Reg)
8455         It.convertToReg(Reg);
8456       else
8457         It.convertToMem(StackOffset);
8458       State.addLoc(It);
8459     }
8460     PendingLocs.clear();
8461     PendingArgFlags.clear();
8462     return false;
8463   }
8464 
8465   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
8466           (TLI.getSubtarget().hasVInstructions() && ValVT.isVector())) &&
8467          "Expected an XLenVT or vector types at this stage");
8468 
8469   if (Reg) {
8470     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8471     return false;
8472   }
8473 
8474   // When a floating-point value is passed on the stack, no bit-conversion is
8475   // needed.
8476   if (ValVT.isFloatingPoint()) {
8477     LocVT = ValVT;
8478     LocInfo = CCValAssign::Full;
8479   }
8480   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
8481   return false;
8482 }
8483 
8484 template <typename ArgTy>
8485 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
8486   for (const auto &ArgIdx : enumerate(Args)) {
8487     MVT ArgVT = ArgIdx.value().VT;
8488     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
8489       return ArgIdx.index();
8490   }
8491   return None;
8492 }
8493 
8494 void RISCVTargetLowering::analyzeInputArgs(
8495     MachineFunction &MF, CCState &CCInfo,
8496     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
8497     RISCVCCAssignFn Fn) const {
8498   unsigned NumArgs = Ins.size();
8499   FunctionType *FType = MF.getFunction().getFunctionType();
8500 
8501   Optional<unsigned> FirstMaskArgument;
8502   if (Subtarget.hasVInstructions())
8503     FirstMaskArgument = preAssignMask(Ins);
8504 
8505   for (unsigned i = 0; i != NumArgs; ++i) {
8506     MVT ArgVT = Ins[i].VT;
8507     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
8508 
8509     Type *ArgTy = nullptr;
8510     if (IsRet)
8511       ArgTy = FType->getReturnType();
8512     else if (Ins[i].isOrigArg())
8513       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
8514 
8515     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
8516     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
8517            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
8518            FirstMaskArgument)) {
8519       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
8520                         << EVT(ArgVT).getEVTString() << '\n');
8521       llvm_unreachable(nullptr);
8522     }
8523   }
8524 }
8525 
8526 void RISCVTargetLowering::analyzeOutputArgs(
8527     MachineFunction &MF, CCState &CCInfo,
8528     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
8529     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
8530   unsigned NumArgs = Outs.size();
8531 
8532   Optional<unsigned> FirstMaskArgument;
8533   if (Subtarget.hasVInstructions())
8534     FirstMaskArgument = preAssignMask(Outs);
8535 
8536   for (unsigned i = 0; i != NumArgs; i++) {
8537     MVT ArgVT = Outs[i].VT;
8538     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
8539     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
8540 
8541     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
8542     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
8543            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
8544            FirstMaskArgument)) {
8545       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
8546                         << EVT(ArgVT).getEVTString() << "\n");
8547       llvm_unreachable(nullptr);
8548     }
8549   }
8550 }
8551 
8552 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
8553 // values.
8554 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
8555                                    const CCValAssign &VA, const SDLoc &DL,
8556                                    const RISCVSubtarget &Subtarget) {
8557   switch (VA.getLocInfo()) {
8558   default:
8559     llvm_unreachable("Unexpected CCValAssign::LocInfo");
8560   case CCValAssign::Full:
8561     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
8562       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
8563     break;
8564   case CCValAssign::BCvt:
8565     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
8566       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
8567     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
8568       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
8569     else
8570       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
8571     break;
8572   }
8573   return Val;
8574 }
8575 
8576 // The caller is responsible for loading the full value if the argument is
8577 // passed with CCValAssign::Indirect.
8578 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
8579                                 const CCValAssign &VA, const SDLoc &DL,
8580                                 const RISCVTargetLowering &TLI) {
8581   MachineFunction &MF = DAG.getMachineFunction();
8582   MachineRegisterInfo &RegInfo = MF.getRegInfo();
8583   EVT LocVT = VA.getLocVT();
8584   SDValue Val;
8585   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
8586   Register VReg = RegInfo.createVirtualRegister(RC);
8587   RegInfo.addLiveIn(VA.getLocReg(), VReg);
8588   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
8589 
8590   if (VA.getLocInfo() == CCValAssign::Indirect)
8591     return Val;
8592 
8593   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
8594 }
8595 
8596 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
8597                                    const CCValAssign &VA, const SDLoc &DL,
8598                                    const RISCVSubtarget &Subtarget) {
8599   EVT LocVT = VA.getLocVT();
8600 
8601   switch (VA.getLocInfo()) {
8602   default:
8603     llvm_unreachable("Unexpected CCValAssign::LocInfo");
8604   case CCValAssign::Full:
8605     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
8606       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
8607     break;
8608   case CCValAssign::BCvt:
8609     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
8610       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
8611     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
8612       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
8613     else
8614       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
8615     break;
8616   }
8617   return Val;
8618 }
8619 
8620 // The caller is responsible for loading the full value if the argument is
8621 // passed with CCValAssign::Indirect.
8622 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
8623                                 const CCValAssign &VA, const SDLoc &DL) {
8624   MachineFunction &MF = DAG.getMachineFunction();
8625   MachineFrameInfo &MFI = MF.getFrameInfo();
8626   EVT LocVT = VA.getLocVT();
8627   EVT ValVT = VA.getValVT();
8628   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
8629   if (ValVT.isScalableVector()) {
8630     // When the value is a scalable vector, we save the pointer which points to
8631     // the scalable vector value in the stack. The ValVT will be the pointer
8632     // type, instead of the scalable vector type.
8633     ValVT = LocVT;
8634   }
8635   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
8636                                  /*IsImmutable=*/true);
8637   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
8638   SDValue Val;
8639 
8640   ISD::LoadExtType ExtType;
8641   switch (VA.getLocInfo()) {
8642   default:
8643     llvm_unreachable("Unexpected CCValAssign::LocInfo");
8644   case CCValAssign::Full:
8645   case CCValAssign::Indirect:
8646   case CCValAssign::BCvt:
8647     ExtType = ISD::NON_EXTLOAD;
8648     break;
8649   }
8650   Val = DAG.getExtLoad(
8651       ExtType, DL, LocVT, Chain, FIN,
8652       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
8653   return Val;
8654 }
8655 
8656 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
8657                                        const CCValAssign &VA, const SDLoc &DL) {
8658   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
8659          "Unexpected VA");
8660   MachineFunction &MF = DAG.getMachineFunction();
8661   MachineFrameInfo &MFI = MF.getFrameInfo();
8662   MachineRegisterInfo &RegInfo = MF.getRegInfo();
8663 
8664   if (VA.isMemLoc()) {
8665     // f64 is passed on the stack.
8666     int FI =
8667         MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*IsImmutable=*/true);
8668     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
8669     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
8670                        MachinePointerInfo::getFixedStack(MF, FI));
8671   }
8672 
8673   assert(VA.isRegLoc() && "Expected register VA assignment");
8674 
8675   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
8676   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
8677   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
8678   SDValue Hi;
8679   if (VA.getLocReg() == RISCV::X17) {
8680     // Second half of f64 is passed on the stack.
8681     int FI = MFI.CreateFixedObject(4, 0, /*IsImmutable=*/true);
8682     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
8683     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
8684                      MachinePointerInfo::getFixedStack(MF, FI));
8685   } else {
8686     // Second half of f64 is passed in another GPR.
8687     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
8688     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
8689     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
8690   }
8691   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
8692 }
8693 
8694 // FastCC has less than 1% performance improvement for some particular
8695 // benchmark. But theoretically, it may has benenfit for some cases.
8696 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
8697                             unsigned ValNo, MVT ValVT, MVT LocVT,
8698                             CCValAssign::LocInfo LocInfo,
8699                             ISD::ArgFlagsTy ArgFlags, CCState &State,
8700                             bool IsFixed, bool IsRet, Type *OrigTy,
8701                             const RISCVTargetLowering &TLI,
8702                             Optional<unsigned> FirstMaskArgument) {
8703 
8704   // X5 and X6 might be used for save-restore libcall.
8705   static const MCPhysReg GPRList[] = {
8706       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
8707       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
8708       RISCV::X29, RISCV::X30, RISCV::X31};
8709 
8710   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
8711     if (unsigned Reg = State.AllocateReg(GPRList)) {
8712       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8713       return false;
8714     }
8715   }
8716 
8717   if (LocVT == MVT::f16) {
8718     static const MCPhysReg FPR16List[] = {
8719         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
8720         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
8721         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
8722         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
8723     if (unsigned Reg = State.AllocateReg(FPR16List)) {
8724       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8725       return false;
8726     }
8727   }
8728 
8729   if (LocVT == MVT::f32) {
8730     static const MCPhysReg FPR32List[] = {
8731         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
8732         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
8733         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
8734         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
8735     if (unsigned Reg = State.AllocateReg(FPR32List)) {
8736       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8737       return false;
8738     }
8739   }
8740 
8741   if (LocVT == MVT::f64) {
8742     static const MCPhysReg FPR64List[] = {
8743         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
8744         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
8745         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
8746         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
8747     if (unsigned Reg = State.AllocateReg(FPR64List)) {
8748       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8749       return false;
8750     }
8751   }
8752 
8753   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
8754     unsigned Offset4 = State.AllocateStack(4, Align(4));
8755     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
8756     return false;
8757   }
8758 
8759   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
8760     unsigned Offset5 = State.AllocateStack(8, Align(8));
8761     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
8762     return false;
8763   }
8764 
8765   if (LocVT.isVector()) {
8766     if (unsigned Reg =
8767             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
8768       // Fixed-length vectors are located in the corresponding scalable-vector
8769       // container types.
8770       if (ValVT.isFixedLengthVector())
8771         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
8772       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8773     } else {
8774       // Try and pass the address via a "fast" GPR.
8775       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
8776         LocInfo = CCValAssign::Indirect;
8777         LocVT = TLI.getSubtarget().getXLenVT();
8778         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
8779       } else if (ValVT.isFixedLengthVector()) {
8780         auto StackAlign =
8781             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
8782         unsigned StackOffset =
8783             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
8784         State.addLoc(
8785             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
8786       } else {
8787         // Can't pass scalable vectors on the stack.
8788         return true;
8789       }
8790     }
8791 
8792     return false;
8793   }
8794 
8795   return true; // CC didn't match.
8796 }
8797 
8798 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
8799                          CCValAssign::LocInfo LocInfo,
8800                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
8801 
8802   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
8803     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
8804     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
8805     static const MCPhysReg GPRList[] = {
8806         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
8807         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
8808     if (unsigned Reg = State.AllocateReg(GPRList)) {
8809       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8810       return false;
8811     }
8812   }
8813 
8814   if (LocVT == MVT::f32) {
8815     // Pass in STG registers: F1, ..., F6
8816     //                        fs0 ... fs5
8817     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
8818                                           RISCV::F18_F, RISCV::F19_F,
8819                                           RISCV::F20_F, RISCV::F21_F};
8820     if (unsigned Reg = State.AllocateReg(FPR32List)) {
8821       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8822       return false;
8823     }
8824   }
8825 
8826   if (LocVT == MVT::f64) {
8827     // Pass in STG registers: D1, ..., D6
8828     //                        fs6 ... fs11
8829     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
8830                                           RISCV::F24_D, RISCV::F25_D,
8831                                           RISCV::F26_D, RISCV::F27_D};
8832     if (unsigned Reg = State.AllocateReg(FPR64List)) {
8833       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8834       return false;
8835     }
8836   }
8837 
8838   report_fatal_error("No registers left in GHC calling convention");
8839   return true;
8840 }
8841 
8842 // Transform physical registers into virtual registers.
8843 SDValue RISCVTargetLowering::LowerFormalArguments(
8844     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
8845     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
8846     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
8847 
8848   MachineFunction &MF = DAG.getMachineFunction();
8849 
8850   switch (CallConv) {
8851   default:
8852     report_fatal_error("Unsupported calling convention");
8853   case CallingConv::C:
8854   case CallingConv::Fast:
8855     break;
8856   case CallingConv::GHC:
8857     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
8858         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
8859       report_fatal_error(
8860         "GHC calling convention requires the F and D instruction set extensions");
8861   }
8862 
8863   const Function &Func = MF.getFunction();
8864   if (Func.hasFnAttribute("interrupt")) {
8865     if (!Func.arg_empty())
8866       report_fatal_error(
8867         "Functions with the interrupt attribute cannot have arguments!");
8868 
8869     StringRef Kind =
8870       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
8871 
8872     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
8873       report_fatal_error(
8874         "Function interrupt attribute argument not supported!");
8875   }
8876 
8877   EVT PtrVT = getPointerTy(DAG.getDataLayout());
8878   MVT XLenVT = Subtarget.getXLenVT();
8879   unsigned XLenInBytes = Subtarget.getXLen() / 8;
8880   // Used with vargs to acumulate store chains.
8881   std::vector<SDValue> OutChains;
8882 
8883   // Assign locations to all of the incoming arguments.
8884   SmallVector<CCValAssign, 16> ArgLocs;
8885   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
8886 
8887   if (CallConv == CallingConv::GHC)
8888     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
8889   else
8890     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
8891                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
8892                                                    : CC_RISCV);
8893 
8894   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
8895     CCValAssign &VA = ArgLocs[i];
8896     SDValue ArgValue;
8897     // Passing f64 on RV32D with a soft float ABI must be handled as a special
8898     // case.
8899     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
8900       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
8901     else if (VA.isRegLoc())
8902       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
8903     else
8904       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
8905 
8906     if (VA.getLocInfo() == CCValAssign::Indirect) {
8907       // If the original argument was split and passed by reference (e.g. i128
8908       // on RV32), we need to load all parts of it here (using the same
8909       // address). Vectors may be partly split to registers and partly to the
8910       // stack, in which case the base address is partly offset and subsequent
8911       // stores are relative to that.
8912       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
8913                                    MachinePointerInfo()));
8914       unsigned ArgIndex = Ins[i].OrigArgIndex;
8915       unsigned ArgPartOffset = Ins[i].PartOffset;
8916       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
8917       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
8918         CCValAssign &PartVA = ArgLocs[i + 1];
8919         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
8920         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
8921         if (PartVA.getValVT().isScalableVector())
8922           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
8923         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
8924         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
8925                                      MachinePointerInfo()));
8926         ++i;
8927       }
8928       continue;
8929     }
8930     InVals.push_back(ArgValue);
8931   }
8932 
8933   if (IsVarArg) {
8934     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
8935     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
8936     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
8937     MachineFrameInfo &MFI = MF.getFrameInfo();
8938     MachineRegisterInfo &RegInfo = MF.getRegInfo();
8939     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
8940 
8941     // Offset of the first variable argument from stack pointer, and size of
8942     // the vararg save area. For now, the varargs save area is either zero or
8943     // large enough to hold a0-a7.
8944     int VaArgOffset, VarArgsSaveSize;
8945 
8946     // If all registers are allocated, then all varargs must be passed on the
8947     // stack and we don't need to save any argregs.
8948     if (ArgRegs.size() == Idx) {
8949       VaArgOffset = CCInfo.getNextStackOffset();
8950       VarArgsSaveSize = 0;
8951     } else {
8952       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
8953       VaArgOffset = -VarArgsSaveSize;
8954     }
8955 
8956     // Record the frame index of the first variable argument
8957     // which is a value necessary to VASTART.
8958     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
8959     RVFI->setVarArgsFrameIndex(FI);
8960 
8961     // If saving an odd number of registers then create an extra stack slot to
8962     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
8963     // offsets to even-numbered registered remain 2*XLEN-aligned.
8964     if (Idx % 2) {
8965       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
8966       VarArgsSaveSize += XLenInBytes;
8967     }
8968 
8969     // Copy the integer registers that may have been used for passing varargs
8970     // to the vararg save area.
8971     for (unsigned I = Idx; I < ArgRegs.size();
8972          ++I, VaArgOffset += XLenInBytes) {
8973       const Register Reg = RegInfo.createVirtualRegister(RC);
8974       RegInfo.addLiveIn(ArgRegs[I], Reg);
8975       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
8976       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
8977       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
8978       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
8979                                    MachinePointerInfo::getFixedStack(MF, FI));
8980       cast<StoreSDNode>(Store.getNode())
8981           ->getMemOperand()
8982           ->setValue((Value *)nullptr);
8983       OutChains.push_back(Store);
8984     }
8985     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
8986   }
8987 
8988   // All stores are grouped in one node to allow the matching between
8989   // the size of Ins and InVals. This only happens for vararg functions.
8990   if (!OutChains.empty()) {
8991     OutChains.push_back(Chain);
8992     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
8993   }
8994 
8995   return Chain;
8996 }
8997 
8998 /// isEligibleForTailCallOptimization - Check whether the call is eligible
8999 /// for tail call optimization.
9000 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
9001 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
9002     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
9003     const SmallVector<CCValAssign, 16> &ArgLocs) const {
9004 
9005   auto &Callee = CLI.Callee;
9006   auto CalleeCC = CLI.CallConv;
9007   auto &Outs = CLI.Outs;
9008   auto &Caller = MF.getFunction();
9009   auto CallerCC = Caller.getCallingConv();
9010 
9011   // Exception-handling functions need a special set of instructions to
9012   // indicate a return to the hardware. Tail-calling another function would
9013   // probably break this.
9014   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
9015   // should be expanded as new function attributes are introduced.
9016   if (Caller.hasFnAttribute("interrupt"))
9017     return false;
9018 
9019   // Do not tail call opt if the stack is used to pass parameters.
9020   if (CCInfo.getNextStackOffset() != 0)
9021     return false;
9022 
9023   // Do not tail call opt if any parameters need to be passed indirectly.
9024   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
9025   // passed indirectly. So the address of the value will be passed in a
9026   // register, or if not available, then the address is put on the stack. In
9027   // order to pass indirectly, space on the stack often needs to be allocated
9028   // in order to store the value. In this case the CCInfo.getNextStackOffset()
9029   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
9030   // are passed CCValAssign::Indirect.
9031   for (auto &VA : ArgLocs)
9032     if (VA.getLocInfo() == CCValAssign::Indirect)
9033       return false;
9034 
9035   // Do not tail call opt if either caller or callee uses struct return
9036   // semantics.
9037   auto IsCallerStructRet = Caller.hasStructRetAttr();
9038   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
9039   if (IsCallerStructRet || IsCalleeStructRet)
9040     return false;
9041 
9042   // Externally-defined functions with weak linkage should not be
9043   // tail-called. The behaviour of branch instructions in this situation (as
9044   // used for tail calls) is implementation-defined, so we cannot rely on the
9045   // linker replacing the tail call with a return.
9046   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
9047     const GlobalValue *GV = G->getGlobal();
9048     if (GV->hasExternalWeakLinkage())
9049       return false;
9050   }
9051 
9052   // The callee has to preserve all registers the caller needs to preserve.
9053   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
9054   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
9055   if (CalleeCC != CallerCC) {
9056     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
9057     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
9058       return false;
9059   }
9060 
9061   // Byval parameters hand the function a pointer directly into the stack area
9062   // we want to reuse during a tail call. Working around this *is* possible
9063   // but less efficient and uglier in LowerCall.
9064   for (auto &Arg : Outs)
9065     if (Arg.Flags.isByVal())
9066       return false;
9067 
9068   return true;
9069 }
9070 
9071 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
9072   return DAG.getDataLayout().getPrefTypeAlign(
9073       VT.getTypeForEVT(*DAG.getContext()));
9074 }
9075 
9076 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
9077 // and output parameter nodes.
9078 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
9079                                        SmallVectorImpl<SDValue> &InVals) const {
9080   SelectionDAG &DAG = CLI.DAG;
9081   SDLoc &DL = CLI.DL;
9082   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
9083   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
9084   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
9085   SDValue Chain = CLI.Chain;
9086   SDValue Callee = CLI.Callee;
9087   bool &IsTailCall = CLI.IsTailCall;
9088   CallingConv::ID CallConv = CLI.CallConv;
9089   bool IsVarArg = CLI.IsVarArg;
9090   EVT PtrVT = getPointerTy(DAG.getDataLayout());
9091   MVT XLenVT = Subtarget.getXLenVT();
9092 
9093   MachineFunction &MF = DAG.getMachineFunction();
9094 
9095   // Analyze the operands of the call, assigning locations to each operand.
9096   SmallVector<CCValAssign, 16> ArgLocs;
9097   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
9098 
9099   if (CallConv == CallingConv::GHC)
9100     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
9101   else
9102     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
9103                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
9104                                                     : CC_RISCV);
9105 
9106   // Check if it's really possible to do a tail call.
9107   if (IsTailCall)
9108     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
9109 
9110   if (IsTailCall)
9111     ++NumTailCalls;
9112   else if (CLI.CB && CLI.CB->isMustTailCall())
9113     report_fatal_error("failed to perform tail call elimination on a call "
9114                        "site marked musttail");
9115 
9116   // Get a count of how many bytes are to be pushed on the stack.
9117   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
9118 
9119   // Create local copies for byval args
9120   SmallVector<SDValue, 8> ByValArgs;
9121   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
9122     ISD::ArgFlagsTy Flags = Outs[i].Flags;
9123     if (!Flags.isByVal())
9124       continue;
9125 
9126     SDValue Arg = OutVals[i];
9127     unsigned Size = Flags.getByValSize();
9128     Align Alignment = Flags.getNonZeroByValAlign();
9129 
9130     int FI =
9131         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
9132     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
9133     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
9134 
9135     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
9136                           /*IsVolatile=*/false,
9137                           /*AlwaysInline=*/false, IsTailCall,
9138                           MachinePointerInfo(), MachinePointerInfo());
9139     ByValArgs.push_back(FIPtr);
9140   }
9141 
9142   if (!IsTailCall)
9143     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
9144 
9145   // Copy argument values to their designated locations.
9146   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
9147   SmallVector<SDValue, 8> MemOpChains;
9148   SDValue StackPtr;
9149   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
9150     CCValAssign &VA = ArgLocs[i];
9151     SDValue ArgValue = OutVals[i];
9152     ISD::ArgFlagsTy Flags = Outs[i].Flags;
9153 
9154     // Handle passing f64 on RV32D with a soft float ABI as a special case.
9155     bool IsF64OnRV32DSoftABI =
9156         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
9157     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
9158       SDValue SplitF64 = DAG.getNode(
9159           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
9160       SDValue Lo = SplitF64.getValue(0);
9161       SDValue Hi = SplitF64.getValue(1);
9162 
9163       Register RegLo = VA.getLocReg();
9164       RegsToPass.push_back(std::make_pair(RegLo, Lo));
9165 
9166       if (RegLo == RISCV::X17) {
9167         // Second half of f64 is passed on the stack.
9168         // Work out the address of the stack slot.
9169         if (!StackPtr.getNode())
9170           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
9171         // Emit the store.
9172         MemOpChains.push_back(
9173             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
9174       } else {
9175         // Second half of f64 is passed in another GPR.
9176         assert(RegLo < RISCV::X31 && "Invalid register pair");
9177         Register RegHigh = RegLo + 1;
9178         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
9179       }
9180       continue;
9181     }
9182 
9183     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
9184     // as any other MemLoc.
9185 
9186     // Promote the value if needed.
9187     // For now, only handle fully promoted and indirect arguments.
9188     if (VA.getLocInfo() == CCValAssign::Indirect) {
9189       // Store the argument in a stack slot and pass its address.
9190       Align StackAlign =
9191           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
9192                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
9193       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
9194       // If the original argument was split (e.g. i128), we need
9195       // to store the required parts of it here (and pass just one address).
9196       // Vectors may be partly split to registers and partly to the stack, in
9197       // which case the base address is partly offset and subsequent stores are
9198       // relative to that.
9199       unsigned ArgIndex = Outs[i].OrigArgIndex;
9200       unsigned ArgPartOffset = Outs[i].PartOffset;
9201       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
9202       // Calculate the total size to store. We don't have access to what we're
9203       // actually storing other than performing the loop and collecting the
9204       // info.
9205       SmallVector<std::pair<SDValue, SDValue>> Parts;
9206       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
9207         SDValue PartValue = OutVals[i + 1];
9208         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
9209         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
9210         EVT PartVT = PartValue.getValueType();
9211         if (PartVT.isScalableVector())
9212           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
9213         StoredSize += PartVT.getStoreSize();
9214         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
9215         Parts.push_back(std::make_pair(PartValue, Offset));
9216         ++i;
9217       }
9218       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
9219       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
9220       MemOpChains.push_back(
9221           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
9222                        MachinePointerInfo::getFixedStack(MF, FI)));
9223       for (const auto &Part : Parts) {
9224         SDValue PartValue = Part.first;
9225         SDValue PartOffset = Part.second;
9226         SDValue Address =
9227             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
9228         MemOpChains.push_back(
9229             DAG.getStore(Chain, DL, PartValue, Address,
9230                          MachinePointerInfo::getFixedStack(MF, FI)));
9231       }
9232       ArgValue = SpillSlot;
9233     } else {
9234       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
9235     }
9236 
9237     // Use local copy if it is a byval arg.
9238     if (Flags.isByVal())
9239       ArgValue = ByValArgs[j++];
9240 
9241     if (VA.isRegLoc()) {
9242       // Queue up the argument copies and emit them at the end.
9243       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
9244     } else {
9245       assert(VA.isMemLoc() && "Argument not register or memory");
9246       assert(!IsTailCall && "Tail call not allowed if stack is used "
9247                             "for passing parameters");
9248 
9249       // Work out the address of the stack slot.
9250       if (!StackPtr.getNode())
9251         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
9252       SDValue Address =
9253           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
9254                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
9255 
9256       // Emit the store.
9257       MemOpChains.push_back(
9258           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
9259     }
9260   }
9261 
9262   // Join the stores, which are independent of one another.
9263   if (!MemOpChains.empty())
9264     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
9265 
9266   SDValue Glue;
9267 
9268   // Build a sequence of copy-to-reg nodes, chained and glued together.
9269   for (auto &Reg : RegsToPass) {
9270     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
9271     Glue = Chain.getValue(1);
9272   }
9273 
9274   // Validate that none of the argument registers have been marked as
9275   // reserved, if so report an error. Do the same for the return address if this
9276   // is not a tailcall.
9277   validateCCReservedRegs(RegsToPass, MF);
9278   if (!IsTailCall &&
9279       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
9280     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
9281         MF.getFunction(),
9282         "Return address register required, but has been reserved."});
9283 
9284   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
9285   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
9286   // split it and then direct call can be matched by PseudoCALL.
9287   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
9288     const GlobalValue *GV = S->getGlobal();
9289 
9290     unsigned OpFlags = RISCVII::MO_CALL;
9291     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
9292       OpFlags = RISCVII::MO_PLT;
9293 
9294     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
9295   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
9296     unsigned OpFlags = RISCVII::MO_CALL;
9297 
9298     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
9299                                                  nullptr))
9300       OpFlags = RISCVII::MO_PLT;
9301 
9302     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
9303   }
9304 
9305   // The first call operand is the chain and the second is the target address.
9306   SmallVector<SDValue, 8> Ops;
9307   Ops.push_back(Chain);
9308   Ops.push_back(Callee);
9309 
9310   // Add argument registers to the end of the list so that they are
9311   // known live into the call.
9312   for (auto &Reg : RegsToPass)
9313     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
9314 
9315   if (!IsTailCall) {
9316     // Add a register mask operand representing the call-preserved registers.
9317     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
9318     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
9319     assert(Mask && "Missing call preserved mask for calling convention");
9320     Ops.push_back(DAG.getRegisterMask(Mask));
9321   }
9322 
9323   // Glue the call to the argument copies, if any.
9324   if (Glue.getNode())
9325     Ops.push_back(Glue);
9326 
9327   // Emit the call.
9328   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
9329 
9330   if (IsTailCall) {
9331     MF.getFrameInfo().setHasTailCall();
9332     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
9333   }
9334 
9335   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
9336   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
9337   Glue = Chain.getValue(1);
9338 
9339   // Mark the end of the call, which is glued to the call itself.
9340   Chain = DAG.getCALLSEQ_END(Chain,
9341                              DAG.getConstant(NumBytes, DL, PtrVT, true),
9342                              DAG.getConstant(0, DL, PtrVT, true),
9343                              Glue, DL);
9344   Glue = Chain.getValue(1);
9345 
9346   // Assign locations to each value returned by this call.
9347   SmallVector<CCValAssign, 16> RVLocs;
9348   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
9349   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
9350 
9351   // Copy all of the result registers out of their specified physreg.
9352   for (auto &VA : RVLocs) {
9353     // Copy the value out
9354     SDValue RetValue =
9355         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
9356     // Glue the RetValue to the end of the call sequence
9357     Chain = RetValue.getValue(1);
9358     Glue = RetValue.getValue(2);
9359 
9360     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
9361       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
9362       SDValue RetValue2 =
9363           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
9364       Chain = RetValue2.getValue(1);
9365       Glue = RetValue2.getValue(2);
9366       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
9367                              RetValue2);
9368     }
9369 
9370     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
9371 
9372     InVals.push_back(RetValue);
9373   }
9374 
9375   return Chain;
9376 }
9377 
9378 bool RISCVTargetLowering::CanLowerReturn(
9379     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
9380     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
9381   SmallVector<CCValAssign, 16> RVLocs;
9382   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
9383 
9384   Optional<unsigned> FirstMaskArgument;
9385   if (Subtarget.hasVInstructions())
9386     FirstMaskArgument = preAssignMask(Outs);
9387 
9388   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
9389     MVT VT = Outs[i].VT;
9390     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
9391     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
9392     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
9393                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
9394                  *this, FirstMaskArgument))
9395       return false;
9396   }
9397   return true;
9398 }
9399 
9400 SDValue
9401 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
9402                                  bool IsVarArg,
9403                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
9404                                  const SmallVectorImpl<SDValue> &OutVals,
9405                                  const SDLoc &DL, SelectionDAG &DAG) const {
9406   const MachineFunction &MF = DAG.getMachineFunction();
9407   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
9408 
9409   // Stores the assignment of the return value to a location.
9410   SmallVector<CCValAssign, 16> RVLocs;
9411 
9412   // Info about the registers and stack slot.
9413   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
9414                  *DAG.getContext());
9415 
9416   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
9417                     nullptr, CC_RISCV);
9418 
9419   if (CallConv == CallingConv::GHC && !RVLocs.empty())
9420     report_fatal_error("GHC functions return void only");
9421 
9422   SDValue Glue;
9423   SmallVector<SDValue, 4> RetOps(1, Chain);
9424 
9425   // Copy the result values into the output registers.
9426   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
9427     SDValue Val = OutVals[i];
9428     CCValAssign &VA = RVLocs[i];
9429     assert(VA.isRegLoc() && "Can only return in registers!");
9430 
9431     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
9432       // Handle returning f64 on RV32D with a soft float ABI.
9433       assert(VA.isRegLoc() && "Expected return via registers");
9434       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
9435                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
9436       SDValue Lo = SplitF64.getValue(0);
9437       SDValue Hi = SplitF64.getValue(1);
9438       Register RegLo = VA.getLocReg();
9439       assert(RegLo < RISCV::X31 && "Invalid register pair");
9440       Register RegHi = RegLo + 1;
9441 
9442       if (STI.isRegisterReservedByUser(RegLo) ||
9443           STI.isRegisterReservedByUser(RegHi))
9444         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
9445             MF.getFunction(),
9446             "Return value register required, but has been reserved."});
9447 
9448       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
9449       Glue = Chain.getValue(1);
9450       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
9451       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
9452       Glue = Chain.getValue(1);
9453       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
9454     } else {
9455       // Handle a 'normal' return.
9456       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
9457       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
9458 
9459       if (STI.isRegisterReservedByUser(VA.getLocReg()))
9460         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
9461             MF.getFunction(),
9462             "Return value register required, but has been reserved."});
9463 
9464       // Guarantee that all emitted copies are stuck together.
9465       Glue = Chain.getValue(1);
9466       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
9467     }
9468   }
9469 
9470   RetOps[0] = Chain; // Update chain.
9471 
9472   // Add the glue node if we have it.
9473   if (Glue.getNode()) {
9474     RetOps.push_back(Glue);
9475   }
9476 
9477   unsigned RetOpc = RISCVISD::RET_FLAG;
9478   // Interrupt service routines use different return instructions.
9479   const Function &Func = DAG.getMachineFunction().getFunction();
9480   if (Func.hasFnAttribute("interrupt")) {
9481     if (!Func.getReturnType()->isVoidTy())
9482       report_fatal_error(
9483           "Functions with the interrupt attribute must have void return type!");
9484 
9485     MachineFunction &MF = DAG.getMachineFunction();
9486     StringRef Kind =
9487       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
9488 
9489     if (Kind == "user")
9490       RetOpc = RISCVISD::URET_FLAG;
9491     else if (Kind == "supervisor")
9492       RetOpc = RISCVISD::SRET_FLAG;
9493     else
9494       RetOpc = RISCVISD::MRET_FLAG;
9495   }
9496 
9497   return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
9498 }
9499 
9500 void RISCVTargetLowering::validateCCReservedRegs(
9501     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
9502     MachineFunction &MF) const {
9503   const Function &F = MF.getFunction();
9504   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
9505 
9506   if (llvm::any_of(Regs, [&STI](auto Reg) {
9507         return STI.isRegisterReservedByUser(Reg.first);
9508       }))
9509     F.getContext().diagnose(DiagnosticInfoUnsupported{
9510         F, "Argument register required, but has been reserved."});
9511 }
9512 
9513 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
9514   return CI->isTailCall();
9515 }
9516 
9517 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
9518 #define NODE_NAME_CASE(NODE)                                                   \
9519   case RISCVISD::NODE:                                                         \
9520     return "RISCVISD::" #NODE;
9521   // clang-format off
9522   switch ((RISCVISD::NodeType)Opcode) {
9523   case RISCVISD::FIRST_NUMBER:
9524     break;
9525   NODE_NAME_CASE(RET_FLAG)
9526   NODE_NAME_CASE(URET_FLAG)
9527   NODE_NAME_CASE(SRET_FLAG)
9528   NODE_NAME_CASE(MRET_FLAG)
9529   NODE_NAME_CASE(CALL)
9530   NODE_NAME_CASE(SELECT_CC)
9531   NODE_NAME_CASE(BR_CC)
9532   NODE_NAME_CASE(BuildPairF64)
9533   NODE_NAME_CASE(SplitF64)
9534   NODE_NAME_CASE(TAIL)
9535   NODE_NAME_CASE(MULHSU)
9536   NODE_NAME_CASE(SLLW)
9537   NODE_NAME_CASE(SRAW)
9538   NODE_NAME_CASE(SRLW)
9539   NODE_NAME_CASE(DIVW)
9540   NODE_NAME_CASE(DIVUW)
9541   NODE_NAME_CASE(REMUW)
9542   NODE_NAME_CASE(ROLW)
9543   NODE_NAME_CASE(RORW)
9544   NODE_NAME_CASE(CLZW)
9545   NODE_NAME_CASE(CTZW)
9546   NODE_NAME_CASE(FSLW)
9547   NODE_NAME_CASE(FSRW)
9548   NODE_NAME_CASE(FSL)
9549   NODE_NAME_CASE(FSR)
9550   NODE_NAME_CASE(FMV_H_X)
9551   NODE_NAME_CASE(FMV_X_ANYEXTH)
9552   NODE_NAME_CASE(FMV_W_X_RV64)
9553   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
9554   NODE_NAME_CASE(FCVT_X)
9555   NODE_NAME_CASE(FCVT_XU)
9556   NODE_NAME_CASE(FCVT_W_RV64)
9557   NODE_NAME_CASE(FCVT_WU_RV64)
9558   NODE_NAME_CASE(STRICT_FCVT_W_RV64)
9559   NODE_NAME_CASE(STRICT_FCVT_WU_RV64)
9560   NODE_NAME_CASE(READ_CYCLE_WIDE)
9561   NODE_NAME_CASE(GREV)
9562   NODE_NAME_CASE(GREVW)
9563   NODE_NAME_CASE(GORC)
9564   NODE_NAME_CASE(GORCW)
9565   NODE_NAME_CASE(SHFL)
9566   NODE_NAME_CASE(SHFLW)
9567   NODE_NAME_CASE(UNSHFL)
9568   NODE_NAME_CASE(UNSHFLW)
9569   NODE_NAME_CASE(BCOMPRESS)
9570   NODE_NAME_CASE(BCOMPRESSW)
9571   NODE_NAME_CASE(BDECOMPRESS)
9572   NODE_NAME_CASE(BDECOMPRESSW)
9573   NODE_NAME_CASE(VMV_V_X_VL)
9574   NODE_NAME_CASE(VFMV_V_F_VL)
9575   NODE_NAME_CASE(VMV_X_S)
9576   NODE_NAME_CASE(VMV_S_X_VL)
9577   NODE_NAME_CASE(VFMV_S_F_VL)
9578   NODE_NAME_CASE(SPLAT_VECTOR_I64)
9579   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
9580   NODE_NAME_CASE(READ_VLENB)
9581   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
9582   NODE_NAME_CASE(VSLIDEUP_VL)
9583   NODE_NAME_CASE(VSLIDE1UP_VL)
9584   NODE_NAME_CASE(VSLIDEDOWN_VL)
9585   NODE_NAME_CASE(VSLIDE1DOWN_VL)
9586   NODE_NAME_CASE(VID_VL)
9587   NODE_NAME_CASE(VFNCVT_ROD_VL)
9588   NODE_NAME_CASE(VECREDUCE_ADD_VL)
9589   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
9590   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
9591   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
9592   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
9593   NODE_NAME_CASE(VECREDUCE_AND_VL)
9594   NODE_NAME_CASE(VECREDUCE_OR_VL)
9595   NODE_NAME_CASE(VECREDUCE_XOR_VL)
9596   NODE_NAME_CASE(VECREDUCE_FADD_VL)
9597   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
9598   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
9599   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
9600   NODE_NAME_CASE(ADD_VL)
9601   NODE_NAME_CASE(AND_VL)
9602   NODE_NAME_CASE(MUL_VL)
9603   NODE_NAME_CASE(OR_VL)
9604   NODE_NAME_CASE(SDIV_VL)
9605   NODE_NAME_CASE(SHL_VL)
9606   NODE_NAME_CASE(SREM_VL)
9607   NODE_NAME_CASE(SRA_VL)
9608   NODE_NAME_CASE(SRL_VL)
9609   NODE_NAME_CASE(SUB_VL)
9610   NODE_NAME_CASE(UDIV_VL)
9611   NODE_NAME_CASE(UREM_VL)
9612   NODE_NAME_CASE(XOR_VL)
9613   NODE_NAME_CASE(SADDSAT_VL)
9614   NODE_NAME_CASE(UADDSAT_VL)
9615   NODE_NAME_CASE(SSUBSAT_VL)
9616   NODE_NAME_CASE(USUBSAT_VL)
9617   NODE_NAME_CASE(FADD_VL)
9618   NODE_NAME_CASE(FSUB_VL)
9619   NODE_NAME_CASE(FMUL_VL)
9620   NODE_NAME_CASE(FDIV_VL)
9621   NODE_NAME_CASE(FNEG_VL)
9622   NODE_NAME_CASE(FABS_VL)
9623   NODE_NAME_CASE(FSQRT_VL)
9624   NODE_NAME_CASE(FMA_VL)
9625   NODE_NAME_CASE(FCOPYSIGN_VL)
9626   NODE_NAME_CASE(SMIN_VL)
9627   NODE_NAME_CASE(SMAX_VL)
9628   NODE_NAME_CASE(UMIN_VL)
9629   NODE_NAME_CASE(UMAX_VL)
9630   NODE_NAME_CASE(FMINNUM_VL)
9631   NODE_NAME_CASE(FMAXNUM_VL)
9632   NODE_NAME_CASE(MULHS_VL)
9633   NODE_NAME_CASE(MULHU_VL)
9634   NODE_NAME_CASE(FP_TO_SINT_VL)
9635   NODE_NAME_CASE(FP_TO_UINT_VL)
9636   NODE_NAME_CASE(SINT_TO_FP_VL)
9637   NODE_NAME_CASE(UINT_TO_FP_VL)
9638   NODE_NAME_CASE(FP_EXTEND_VL)
9639   NODE_NAME_CASE(FP_ROUND_VL)
9640   NODE_NAME_CASE(VWMUL_VL)
9641   NODE_NAME_CASE(VWMULU_VL)
9642   NODE_NAME_CASE(SETCC_VL)
9643   NODE_NAME_CASE(VSELECT_VL)
9644   NODE_NAME_CASE(VMAND_VL)
9645   NODE_NAME_CASE(VMOR_VL)
9646   NODE_NAME_CASE(VMXOR_VL)
9647   NODE_NAME_CASE(VMCLR_VL)
9648   NODE_NAME_CASE(VMSET_VL)
9649   NODE_NAME_CASE(VRGATHER_VX_VL)
9650   NODE_NAME_CASE(VRGATHER_VV_VL)
9651   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
9652   NODE_NAME_CASE(VSEXT_VL)
9653   NODE_NAME_CASE(VZEXT_VL)
9654   NODE_NAME_CASE(VCPOP_VL)
9655   NODE_NAME_CASE(VLE_VL)
9656   NODE_NAME_CASE(VSE_VL)
9657   NODE_NAME_CASE(READ_CSR)
9658   NODE_NAME_CASE(WRITE_CSR)
9659   NODE_NAME_CASE(SWAP_CSR)
9660   }
9661   // clang-format on
9662   return nullptr;
9663 #undef NODE_NAME_CASE
9664 }
9665 
9666 /// getConstraintType - Given a constraint letter, return the type of
9667 /// constraint it is for this target.
9668 RISCVTargetLowering::ConstraintType
9669 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
9670   if (Constraint.size() == 1) {
9671     switch (Constraint[0]) {
9672     default:
9673       break;
9674     case 'f':
9675       return C_RegisterClass;
9676     case 'I':
9677     case 'J':
9678     case 'K':
9679       return C_Immediate;
9680     case 'A':
9681       return C_Memory;
9682     case 'S': // A symbolic address
9683       return C_Other;
9684     }
9685   } else {
9686     if (Constraint == "vr" || Constraint == "vm")
9687       return C_RegisterClass;
9688   }
9689   return TargetLowering::getConstraintType(Constraint);
9690 }
9691 
9692 std::pair<unsigned, const TargetRegisterClass *>
9693 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
9694                                                   StringRef Constraint,
9695                                                   MVT VT) const {
9696   // First, see if this is a constraint that directly corresponds to a
9697   // RISCV register class.
9698   if (Constraint.size() == 1) {
9699     switch (Constraint[0]) {
9700     case 'r':
9701       // TODO: Support fixed vectors up to XLen for P extension?
9702       if (VT.isVector())
9703         break;
9704       return std::make_pair(0U, &RISCV::GPRRegClass);
9705     case 'f':
9706       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
9707         return std::make_pair(0U, &RISCV::FPR16RegClass);
9708       if (Subtarget.hasStdExtF() && VT == MVT::f32)
9709         return std::make_pair(0U, &RISCV::FPR32RegClass);
9710       if (Subtarget.hasStdExtD() && VT == MVT::f64)
9711         return std::make_pair(0U, &RISCV::FPR64RegClass);
9712       break;
9713     default:
9714       break;
9715     }
9716   } else if (Constraint == "vr") {
9717     for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass,
9718                            &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
9719       if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
9720         return std::make_pair(0U, RC);
9721     }
9722   } else if (Constraint == "vm") {
9723     if (TRI->isTypeLegalForClass(RISCV::VMV0RegClass, VT.SimpleTy))
9724       return std::make_pair(0U, &RISCV::VMV0RegClass);
9725   }
9726 
9727   // Clang will correctly decode the usage of register name aliases into their
9728   // official names. However, other frontends like `rustc` do not. This allows
9729   // users of these frontends to use the ABI names for registers in LLVM-style
9730   // register constraints.
9731   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
9732                                .Case("{zero}", RISCV::X0)
9733                                .Case("{ra}", RISCV::X1)
9734                                .Case("{sp}", RISCV::X2)
9735                                .Case("{gp}", RISCV::X3)
9736                                .Case("{tp}", RISCV::X4)
9737                                .Case("{t0}", RISCV::X5)
9738                                .Case("{t1}", RISCV::X6)
9739                                .Case("{t2}", RISCV::X7)
9740                                .Cases("{s0}", "{fp}", RISCV::X8)
9741                                .Case("{s1}", RISCV::X9)
9742                                .Case("{a0}", RISCV::X10)
9743                                .Case("{a1}", RISCV::X11)
9744                                .Case("{a2}", RISCV::X12)
9745                                .Case("{a3}", RISCV::X13)
9746                                .Case("{a4}", RISCV::X14)
9747                                .Case("{a5}", RISCV::X15)
9748                                .Case("{a6}", RISCV::X16)
9749                                .Case("{a7}", RISCV::X17)
9750                                .Case("{s2}", RISCV::X18)
9751                                .Case("{s3}", RISCV::X19)
9752                                .Case("{s4}", RISCV::X20)
9753                                .Case("{s5}", RISCV::X21)
9754                                .Case("{s6}", RISCV::X22)
9755                                .Case("{s7}", RISCV::X23)
9756                                .Case("{s8}", RISCV::X24)
9757                                .Case("{s9}", RISCV::X25)
9758                                .Case("{s10}", RISCV::X26)
9759                                .Case("{s11}", RISCV::X27)
9760                                .Case("{t3}", RISCV::X28)
9761                                .Case("{t4}", RISCV::X29)
9762                                .Case("{t5}", RISCV::X30)
9763                                .Case("{t6}", RISCV::X31)
9764                                .Default(RISCV::NoRegister);
9765   if (XRegFromAlias != RISCV::NoRegister)
9766     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
9767 
9768   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
9769   // TableGen record rather than the AsmName to choose registers for InlineAsm
9770   // constraints, plus we want to match those names to the widest floating point
9771   // register type available, manually select floating point registers here.
9772   //
9773   // The second case is the ABI name of the register, so that frontends can also
9774   // use the ABI names in register constraint lists.
9775   if (Subtarget.hasStdExtF()) {
9776     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
9777                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
9778                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
9779                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
9780                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
9781                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
9782                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
9783                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
9784                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
9785                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
9786                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
9787                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
9788                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
9789                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
9790                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
9791                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
9792                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
9793                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
9794                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
9795                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
9796                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
9797                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
9798                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
9799                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
9800                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
9801                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
9802                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
9803                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
9804                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
9805                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
9806                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
9807                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
9808                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
9809                         .Default(RISCV::NoRegister);
9810     if (FReg != RISCV::NoRegister) {
9811       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
9812       if (Subtarget.hasStdExtD()) {
9813         unsigned RegNo = FReg - RISCV::F0_F;
9814         unsigned DReg = RISCV::F0_D + RegNo;
9815         return std::make_pair(DReg, &RISCV::FPR64RegClass);
9816       }
9817       return std::make_pair(FReg, &RISCV::FPR32RegClass);
9818     }
9819   }
9820 
9821   if (Subtarget.hasVInstructions()) {
9822     Register VReg = StringSwitch<Register>(Constraint.lower())
9823                         .Case("{v0}", RISCV::V0)
9824                         .Case("{v1}", RISCV::V1)
9825                         .Case("{v2}", RISCV::V2)
9826                         .Case("{v3}", RISCV::V3)
9827                         .Case("{v4}", RISCV::V4)
9828                         .Case("{v5}", RISCV::V5)
9829                         .Case("{v6}", RISCV::V6)
9830                         .Case("{v7}", RISCV::V7)
9831                         .Case("{v8}", RISCV::V8)
9832                         .Case("{v9}", RISCV::V9)
9833                         .Case("{v10}", RISCV::V10)
9834                         .Case("{v11}", RISCV::V11)
9835                         .Case("{v12}", RISCV::V12)
9836                         .Case("{v13}", RISCV::V13)
9837                         .Case("{v14}", RISCV::V14)
9838                         .Case("{v15}", RISCV::V15)
9839                         .Case("{v16}", RISCV::V16)
9840                         .Case("{v17}", RISCV::V17)
9841                         .Case("{v18}", RISCV::V18)
9842                         .Case("{v19}", RISCV::V19)
9843                         .Case("{v20}", RISCV::V20)
9844                         .Case("{v21}", RISCV::V21)
9845                         .Case("{v22}", RISCV::V22)
9846                         .Case("{v23}", RISCV::V23)
9847                         .Case("{v24}", RISCV::V24)
9848                         .Case("{v25}", RISCV::V25)
9849                         .Case("{v26}", RISCV::V26)
9850                         .Case("{v27}", RISCV::V27)
9851                         .Case("{v28}", RISCV::V28)
9852                         .Case("{v29}", RISCV::V29)
9853                         .Case("{v30}", RISCV::V30)
9854                         .Case("{v31}", RISCV::V31)
9855                         .Default(RISCV::NoRegister);
9856     if (VReg != RISCV::NoRegister) {
9857       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
9858         return std::make_pair(VReg, &RISCV::VMRegClass);
9859       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
9860         return std::make_pair(VReg, &RISCV::VRRegClass);
9861       for (const auto *RC :
9862            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
9863         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
9864           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
9865           return std::make_pair(VReg, RC);
9866         }
9867       }
9868     }
9869   }
9870 
9871   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
9872 }
9873 
9874 unsigned
9875 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
9876   // Currently only support length 1 constraints.
9877   if (ConstraintCode.size() == 1) {
9878     switch (ConstraintCode[0]) {
9879     case 'A':
9880       return InlineAsm::Constraint_A;
9881     default:
9882       break;
9883     }
9884   }
9885 
9886   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
9887 }
9888 
9889 void RISCVTargetLowering::LowerAsmOperandForConstraint(
9890     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
9891     SelectionDAG &DAG) const {
9892   // Currently only support length 1 constraints.
9893   if (Constraint.length() == 1) {
9894     switch (Constraint[0]) {
9895     case 'I':
9896       // Validate & create a 12-bit signed immediate operand.
9897       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
9898         uint64_t CVal = C->getSExtValue();
9899         if (isInt<12>(CVal))
9900           Ops.push_back(
9901               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
9902       }
9903       return;
9904     case 'J':
9905       // Validate & create an integer zero operand.
9906       if (auto *C = dyn_cast<ConstantSDNode>(Op))
9907         if (C->getZExtValue() == 0)
9908           Ops.push_back(
9909               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
9910       return;
9911     case 'K':
9912       // Validate & create a 5-bit unsigned immediate operand.
9913       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
9914         uint64_t CVal = C->getZExtValue();
9915         if (isUInt<5>(CVal))
9916           Ops.push_back(
9917               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
9918       }
9919       return;
9920     case 'S':
9921       if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
9922         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
9923                                                  GA->getValueType(0)));
9924       } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
9925         Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
9926                                                 BA->getValueType(0)));
9927       }
9928       return;
9929     default:
9930       break;
9931     }
9932   }
9933   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
9934 }
9935 
9936 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
9937                                                    Instruction *Inst,
9938                                                    AtomicOrdering Ord) const {
9939   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
9940     return Builder.CreateFence(Ord);
9941   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
9942     return Builder.CreateFence(AtomicOrdering::Release);
9943   return nullptr;
9944 }
9945 
9946 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
9947                                                     Instruction *Inst,
9948                                                     AtomicOrdering Ord) const {
9949   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
9950     return Builder.CreateFence(AtomicOrdering::Acquire);
9951   return nullptr;
9952 }
9953 
9954 TargetLowering::AtomicExpansionKind
9955 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
9956   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
9957   // point operations can't be used in an lr/sc sequence without breaking the
9958   // forward-progress guarantee.
9959   if (AI->isFloatingPointOperation())
9960     return AtomicExpansionKind::CmpXChg;
9961 
9962   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
9963   if (Size == 8 || Size == 16)
9964     return AtomicExpansionKind::MaskedIntrinsic;
9965   return AtomicExpansionKind::None;
9966 }
9967 
9968 static Intrinsic::ID
9969 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
9970   if (XLen == 32) {
9971     switch (BinOp) {
9972     default:
9973       llvm_unreachable("Unexpected AtomicRMW BinOp");
9974     case AtomicRMWInst::Xchg:
9975       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
9976     case AtomicRMWInst::Add:
9977       return Intrinsic::riscv_masked_atomicrmw_add_i32;
9978     case AtomicRMWInst::Sub:
9979       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
9980     case AtomicRMWInst::Nand:
9981       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
9982     case AtomicRMWInst::Max:
9983       return Intrinsic::riscv_masked_atomicrmw_max_i32;
9984     case AtomicRMWInst::Min:
9985       return Intrinsic::riscv_masked_atomicrmw_min_i32;
9986     case AtomicRMWInst::UMax:
9987       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
9988     case AtomicRMWInst::UMin:
9989       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
9990     }
9991   }
9992 
9993   if (XLen == 64) {
9994     switch (BinOp) {
9995     default:
9996       llvm_unreachable("Unexpected AtomicRMW BinOp");
9997     case AtomicRMWInst::Xchg:
9998       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
9999     case AtomicRMWInst::Add:
10000       return Intrinsic::riscv_masked_atomicrmw_add_i64;
10001     case AtomicRMWInst::Sub:
10002       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
10003     case AtomicRMWInst::Nand:
10004       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
10005     case AtomicRMWInst::Max:
10006       return Intrinsic::riscv_masked_atomicrmw_max_i64;
10007     case AtomicRMWInst::Min:
10008       return Intrinsic::riscv_masked_atomicrmw_min_i64;
10009     case AtomicRMWInst::UMax:
10010       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
10011     case AtomicRMWInst::UMin:
10012       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
10013     }
10014   }
10015 
10016   llvm_unreachable("Unexpected XLen\n");
10017 }
10018 
10019 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
10020     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
10021     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
10022   unsigned XLen = Subtarget.getXLen();
10023   Value *Ordering =
10024       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
10025   Type *Tys[] = {AlignedAddr->getType()};
10026   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
10027       AI->getModule(),
10028       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
10029 
10030   if (XLen == 64) {
10031     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
10032     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
10033     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
10034   }
10035 
10036   Value *Result;
10037 
10038   // Must pass the shift amount needed to sign extend the loaded value prior
10039   // to performing a signed comparison for min/max. ShiftAmt is the number of
10040   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
10041   // is the number of bits to left+right shift the value in order to
10042   // sign-extend.
10043   if (AI->getOperation() == AtomicRMWInst::Min ||
10044       AI->getOperation() == AtomicRMWInst::Max) {
10045     const DataLayout &DL = AI->getModule()->getDataLayout();
10046     unsigned ValWidth =
10047         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
10048     Value *SextShamt =
10049         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
10050     Result = Builder.CreateCall(LrwOpScwLoop,
10051                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
10052   } else {
10053     Result =
10054         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
10055   }
10056 
10057   if (XLen == 64)
10058     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
10059   return Result;
10060 }
10061 
10062 TargetLowering::AtomicExpansionKind
10063 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
10064     AtomicCmpXchgInst *CI) const {
10065   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
10066   if (Size == 8 || Size == 16)
10067     return AtomicExpansionKind::MaskedIntrinsic;
10068   return AtomicExpansionKind::None;
10069 }
10070 
10071 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
10072     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
10073     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
10074   unsigned XLen = Subtarget.getXLen();
10075   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
10076   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
10077   if (XLen == 64) {
10078     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
10079     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
10080     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
10081     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
10082   }
10083   Type *Tys[] = {AlignedAddr->getType()};
10084   Function *MaskedCmpXchg =
10085       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
10086   Value *Result = Builder.CreateCall(
10087       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
10088   if (XLen == 64)
10089     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
10090   return Result;
10091 }
10092 
10093 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
10094   return false;
10095 }
10096 
10097 bool RISCVTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
10098                                                EVT VT) const {
10099   if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple())
10100     return false;
10101 
10102   switch (FPVT.getSimpleVT().SimpleTy) {
10103   case MVT::f16:
10104     return Subtarget.hasStdExtZfh();
10105   case MVT::f32:
10106     return Subtarget.hasStdExtF();
10107   case MVT::f64:
10108     return Subtarget.hasStdExtD();
10109   default:
10110     return false;
10111   }
10112 }
10113 
10114 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
10115                                                      EVT VT) const {
10116   VT = VT.getScalarType();
10117 
10118   if (!VT.isSimple())
10119     return false;
10120 
10121   switch (VT.getSimpleVT().SimpleTy) {
10122   case MVT::f16:
10123     return Subtarget.hasStdExtZfh();
10124   case MVT::f32:
10125     return Subtarget.hasStdExtF();
10126   case MVT::f64:
10127     return Subtarget.hasStdExtD();
10128   default:
10129     break;
10130   }
10131 
10132   return false;
10133 }
10134 
10135 Register RISCVTargetLowering::getExceptionPointerRegister(
10136     const Constant *PersonalityFn) const {
10137   return RISCV::X10;
10138 }
10139 
10140 Register RISCVTargetLowering::getExceptionSelectorRegister(
10141     const Constant *PersonalityFn) const {
10142   return RISCV::X11;
10143 }
10144 
10145 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
10146   // Return false to suppress the unnecessary extensions if the LibCall
10147   // arguments or return value is f32 type for LP64 ABI.
10148   RISCVABI::ABI ABI = Subtarget.getTargetABI();
10149   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
10150     return false;
10151 
10152   return true;
10153 }
10154 
10155 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
10156   if (Subtarget.is64Bit() && Type == MVT::i32)
10157     return true;
10158 
10159   return IsSigned;
10160 }
10161 
10162 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
10163                                                  SDValue C) const {
10164   // Check integral scalar types.
10165   if (VT.isScalarInteger()) {
10166     // Omit the optimization if the sub target has the M extension and the data
10167     // size exceeds XLen.
10168     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
10169       return false;
10170     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
10171       // Break the MUL to a SLLI and an ADD/SUB.
10172       const APInt &Imm = ConstNode->getAPIntValue();
10173       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
10174           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
10175         return true;
10176       // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12.
10177       if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) &&
10178           ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
10179            (Imm - 8).isPowerOf2()))
10180         return true;
10181       // Omit the following optimization if the sub target has the M extension
10182       // and the data size >= XLen.
10183       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
10184         return false;
10185       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
10186       // a pair of LUI/ADDI.
10187       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
10188         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
10189         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
10190             (1 - ImmS).isPowerOf2())
10191         return true;
10192       }
10193     }
10194   }
10195 
10196   return false;
10197 }
10198 
10199 bool RISCVTargetLowering::isMulAddWithConstProfitable(
10200     const SDValue &AddNode, const SDValue &ConstNode) const {
10201   // Let the DAGCombiner decide for vectors.
10202   EVT VT = AddNode.getValueType();
10203   if (VT.isVector())
10204     return true;
10205 
10206   // Let the DAGCombiner decide for larger types.
10207   if (VT.getScalarSizeInBits() > Subtarget.getXLen())
10208     return true;
10209 
10210   // It is worse if c1 is simm12 while c1*c2 is not.
10211   ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
10212   ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
10213   const APInt &C1 = C1Node->getAPIntValue();
10214   const APInt &C2 = C2Node->getAPIntValue();
10215   if (C1.isSignedIntN(12) && !(C1 * C2).isSignedIntN(12))
10216     return false;
10217 
10218   // Default to true and let the DAGCombiner decide.
10219   return true;
10220 }
10221 
10222 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
10223     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
10224     bool *Fast) const {
10225   if (!VT.isVector())
10226     return false;
10227 
10228   EVT ElemVT = VT.getVectorElementType();
10229   if (Alignment >= ElemVT.getStoreSize()) {
10230     if (Fast)
10231       *Fast = true;
10232     return true;
10233   }
10234 
10235   return false;
10236 }
10237 
10238 bool RISCVTargetLowering::splitValueIntoRegisterParts(
10239     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
10240     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
10241   bool IsABIRegCopy = CC.hasValue();
10242   EVT ValueVT = Val.getValueType();
10243   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
10244     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
10245     // and cast to f32.
10246     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
10247     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
10248     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
10249                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
10250     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
10251     Parts[0] = Val;
10252     return true;
10253   }
10254 
10255   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
10256     LLVMContext &Context = *DAG.getContext();
10257     EVT ValueEltVT = ValueVT.getVectorElementType();
10258     EVT PartEltVT = PartVT.getVectorElementType();
10259     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
10260     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
10261     if (PartVTBitSize % ValueVTBitSize == 0) {
10262       assert(PartVTBitSize >= ValueVTBitSize);
10263       // If the element types are different, bitcast to the same element type of
10264       // PartVT first.
10265       // Give an example here, we want copy a <vscale x 1 x i8> value to
10266       // <vscale x 4 x i16>.
10267       // We need to convert <vscale x 1 x i8> to <vscale x 8 x i8> by insert
10268       // subvector, then we can bitcast to <vscale x 4 x i16>.
10269       if (ValueEltVT != PartEltVT) {
10270         if (PartVTBitSize > ValueVTBitSize) {
10271           unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
10272           assert(Count != 0 && "The number of element should not be zero.");
10273           EVT SameEltTypeVT =
10274               EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
10275           Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SameEltTypeVT,
10276                             DAG.getUNDEF(SameEltTypeVT), Val,
10277                             DAG.getVectorIdxConstant(0, DL));
10278         }
10279         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
10280       } else {
10281         Val =
10282             DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
10283                         Val, DAG.getVectorIdxConstant(0, DL));
10284       }
10285       Parts[0] = Val;
10286       return true;
10287     }
10288   }
10289   return false;
10290 }
10291 
10292 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
10293     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
10294     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
10295   bool IsABIRegCopy = CC.hasValue();
10296   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
10297     SDValue Val = Parts[0];
10298 
10299     // Cast the f32 to i32, truncate to i16, and cast back to f16.
10300     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
10301     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
10302     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
10303     return Val;
10304   }
10305 
10306   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
10307     LLVMContext &Context = *DAG.getContext();
10308     SDValue Val = Parts[0];
10309     EVT ValueEltVT = ValueVT.getVectorElementType();
10310     EVT PartEltVT = PartVT.getVectorElementType();
10311     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
10312     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
10313     if (PartVTBitSize % ValueVTBitSize == 0) {
10314       assert(PartVTBitSize >= ValueVTBitSize);
10315       EVT SameEltTypeVT = ValueVT;
10316       // If the element types are different, convert it to the same element type
10317       // of PartVT.
10318       // Give an example here, we want copy a <vscale x 1 x i8> value from
10319       // <vscale x 4 x i16>.
10320       // We need to convert <vscale x 4 x i16> to <vscale x 8 x i8> first,
10321       // then we can extract <vscale x 1 x i8>.
10322       if (ValueEltVT != PartEltVT) {
10323         unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
10324         assert(Count != 0 && "The number of element should not be zero.");
10325         SameEltTypeVT =
10326             EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
10327         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
10328       }
10329       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
10330                         DAG.getVectorIdxConstant(0, DL));
10331       return Val;
10332     }
10333   }
10334   return SDValue();
10335 }
10336 
10337 #define GET_REGISTER_MATCHER
10338 #include "RISCVGenAsmMatcher.inc"
10339 
10340 Register
10341 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
10342                                        const MachineFunction &MF) const {
10343   Register Reg = MatchRegisterAltName(RegName);
10344   if (Reg == RISCV::NoRegister)
10345     Reg = MatchRegisterName(RegName);
10346   if (Reg == RISCV::NoRegister)
10347     report_fatal_error(
10348         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
10349   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
10350   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
10351     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
10352                              StringRef(RegName) + "\"."));
10353   return Reg;
10354 }
10355 
10356 namespace llvm {
10357 namespace RISCVVIntrinsicsTable {
10358 
10359 #define GET_RISCVVIntrinsicsTable_IMPL
10360 #include "RISCVGenSearchableTables.inc"
10361 
10362 } // namespace RISCVVIntrinsicsTable
10363 
10364 } // namespace llvm
10365