1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/CodeGen/ValueTypes.h"
30 #include "llvm/IR/DiagnosticInfo.h"
31 #include "llvm/IR/DiagnosticPrinter.h"
32 #include "llvm/IR/IntrinsicsRISCV.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/KnownBits.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/raw_ostream.h"
38 
39 using namespace llvm;
40 
41 #define DEBUG_TYPE "riscv-lower"
42 
43 STATISTIC(NumTailCalls, "Number of tail calls");
44 
45 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
46                                          const RISCVSubtarget &STI)
47     : TargetLowering(TM), Subtarget(STI) {
48 
49   if (Subtarget.isRV32E())
50     report_fatal_error("Codegen not yet implemented for RV32E");
51 
52   RISCVABI::ABI ABI = Subtarget.getTargetABI();
53   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
54 
55   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
56       !Subtarget.hasStdExtF()) {
57     errs() << "Hard-float 'f' ABI can't be used for a target that "
58                 "doesn't support the F instruction set extension (ignoring "
59                           "target-abi)\n";
60     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
61   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
62              !Subtarget.hasStdExtD()) {
63     errs() << "Hard-float 'd' ABI can't be used for a target that "
64               "doesn't support the D instruction set extension (ignoring "
65               "target-abi)\n";
66     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
67   }
68 
69   switch (ABI) {
70   default:
71     report_fatal_error("Don't know how to lower this ABI");
72   case RISCVABI::ABI_ILP32:
73   case RISCVABI::ABI_ILP32F:
74   case RISCVABI::ABI_ILP32D:
75   case RISCVABI::ABI_LP64:
76   case RISCVABI::ABI_LP64F:
77   case RISCVABI::ABI_LP64D:
78     break;
79   }
80 
81   MVT XLenVT = Subtarget.getXLenVT();
82 
83   // Set up the register classes.
84   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
85 
86   if (Subtarget.hasStdExtZfh())
87     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
88   if (Subtarget.hasStdExtF())
89     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
90   if (Subtarget.hasStdExtD())
91     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
92 
93   static const MVT::SimpleValueType BoolVecVTs[] = {
94       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
95       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
96   static const MVT::SimpleValueType IntVecVTs[] = {
97       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
98       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
99       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
100       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
101       MVT::nxv4i64, MVT::nxv8i64};
102   static const MVT::SimpleValueType F16VecVTs[] = {
103       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
104       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
105   static const MVT::SimpleValueType F32VecVTs[] = {
106       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
107   static const MVT::SimpleValueType F64VecVTs[] = {
108       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
109 
110   if (Subtarget.hasStdExtV()) {
111     auto addRegClassForRVV = [this](MVT VT) {
112       unsigned Size = VT.getSizeInBits().getKnownMinValue();
113       assert(Size <= 512 && isPowerOf2_32(Size));
114       const TargetRegisterClass *RC;
115       if (Size <= 64)
116         RC = &RISCV::VRRegClass;
117       else if (Size == 128)
118         RC = &RISCV::VRM2RegClass;
119       else if (Size == 256)
120         RC = &RISCV::VRM4RegClass;
121       else
122         RC = &RISCV::VRM8RegClass;
123 
124       addRegisterClass(VT, RC);
125     };
126 
127     for (MVT VT : BoolVecVTs)
128       addRegClassForRVV(VT);
129     for (MVT VT : IntVecVTs)
130       addRegClassForRVV(VT);
131 
132     if (Subtarget.hasStdExtZfh())
133       for (MVT VT : F16VecVTs)
134         addRegClassForRVV(VT);
135 
136     if (Subtarget.hasStdExtF())
137       for (MVT VT : F32VecVTs)
138         addRegClassForRVV(VT);
139 
140     if (Subtarget.hasStdExtD())
141       for (MVT VT : F64VecVTs)
142         addRegClassForRVV(VT);
143 
144     if (Subtarget.useRVVForFixedLengthVectors()) {
145       auto addRegClassForFixedVectors = [this](MVT VT) {
146         unsigned LMul = Subtarget.getLMULForFixedLengthVector(VT);
147         const TargetRegisterClass *RC;
148         if (LMul == 1)
149           RC = &RISCV::VRRegClass;
150         else if (LMul == 2)
151           RC = &RISCV::VRM2RegClass;
152         else if (LMul == 4)
153           RC = &RISCV::VRM4RegClass;
154         else if (LMul == 8)
155           RC = &RISCV::VRM8RegClass;
156         else
157           llvm_unreachable("Unexpected LMul!");
158 
159         addRegisterClass(VT, RC);
160       };
161       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
162         if (useRVVForFixedLengthVectorVT(VT))
163           addRegClassForFixedVectors(VT);
164 
165       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
166         if (useRVVForFixedLengthVectorVT(VT))
167           addRegClassForFixedVectors(VT);
168     }
169   }
170 
171   // Compute derived properties from the register classes.
172   computeRegisterProperties(STI.getRegisterInfo());
173 
174   setStackPointerRegisterToSaveRestore(RISCV::X2);
175 
176   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
177     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
178 
179   // TODO: add all necessary setOperationAction calls.
180   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
181 
182   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
183   setOperationAction(ISD::BR_CC, XLenVT, Expand);
184   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
185 
186   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
187   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
188 
189   setOperationAction(ISD::VASTART, MVT::Other, Custom);
190   setOperationAction(ISD::VAARG, MVT::Other, Expand);
191   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
192   setOperationAction(ISD::VAEND, MVT::Other, Expand);
193 
194   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
195   if (!Subtarget.hasStdExtZbb()) {
196     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
197     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
198   }
199 
200   if (Subtarget.is64Bit()) {
201     setOperationAction(ISD::ADD, MVT::i32, Custom);
202     setOperationAction(ISD::SUB, MVT::i32, Custom);
203     setOperationAction(ISD::SHL, MVT::i32, Custom);
204     setOperationAction(ISD::SRA, MVT::i32, Custom);
205     setOperationAction(ISD::SRL, MVT::i32, Custom);
206   }
207 
208   if (!Subtarget.hasStdExtM()) {
209     setOperationAction(ISD::MUL, XLenVT, Expand);
210     setOperationAction(ISD::MULHS, XLenVT, Expand);
211     setOperationAction(ISD::MULHU, XLenVT, Expand);
212     setOperationAction(ISD::SDIV, XLenVT, Expand);
213     setOperationAction(ISD::UDIV, XLenVT, Expand);
214     setOperationAction(ISD::SREM, XLenVT, Expand);
215     setOperationAction(ISD::UREM, XLenVT, Expand);
216   }
217 
218   if (Subtarget.is64Bit() && Subtarget.hasStdExtM()) {
219     setOperationAction(ISD::MUL, MVT::i32, Custom);
220 
221     setOperationAction(ISD::SDIV, MVT::i8, Custom);
222     setOperationAction(ISD::UDIV, MVT::i8, Custom);
223     setOperationAction(ISD::UREM, MVT::i8, Custom);
224     setOperationAction(ISD::SDIV, MVT::i16, Custom);
225     setOperationAction(ISD::UDIV, MVT::i16, Custom);
226     setOperationAction(ISD::UREM, MVT::i16, Custom);
227     setOperationAction(ISD::SDIV, MVT::i32, Custom);
228     setOperationAction(ISD::UDIV, MVT::i32, Custom);
229     setOperationAction(ISD::UREM, MVT::i32, Custom);
230   }
231 
232   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
233   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
234   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
235   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
236 
237   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
238   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
239   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
240 
241   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
242     if (Subtarget.is64Bit()) {
243       setOperationAction(ISD::ROTL, MVT::i32, Custom);
244       setOperationAction(ISD::ROTR, MVT::i32, Custom);
245     }
246   } else {
247     setOperationAction(ISD::ROTL, XLenVT, Expand);
248     setOperationAction(ISD::ROTR, XLenVT, Expand);
249   }
250 
251   if (Subtarget.hasStdExtZbp()) {
252     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
253     // more combining.
254     setOperationAction(ISD::BITREVERSE, XLenVT, Custom);
255     setOperationAction(ISD::BSWAP, XLenVT, Custom);
256 
257     if (Subtarget.is64Bit()) {
258       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
259       setOperationAction(ISD::BSWAP, MVT::i32, Custom);
260     }
261   } else {
262     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
263     // pattern match it directly in isel.
264     setOperationAction(ISD::BSWAP, XLenVT,
265                        Subtarget.hasStdExtZbb() ? Legal : Expand);
266   }
267 
268   if (Subtarget.hasStdExtZbb()) {
269     setOperationAction(ISD::SMIN, XLenVT, Legal);
270     setOperationAction(ISD::SMAX, XLenVT, Legal);
271     setOperationAction(ISD::UMIN, XLenVT, Legal);
272     setOperationAction(ISD::UMAX, XLenVT, Legal);
273   } else {
274     setOperationAction(ISD::CTTZ, XLenVT, Expand);
275     setOperationAction(ISD::CTLZ, XLenVT, Expand);
276     setOperationAction(ISD::CTPOP, XLenVT, Expand);
277   }
278 
279   if (Subtarget.hasStdExtZbt()) {
280     setOperationAction(ISD::FSHL, XLenVT, Custom);
281     setOperationAction(ISD::FSHR, XLenVT, Custom);
282     setOperationAction(ISD::SELECT, XLenVT, Legal);
283 
284     if (Subtarget.is64Bit()) {
285       setOperationAction(ISD::FSHL, MVT::i32, Custom);
286       setOperationAction(ISD::FSHR, MVT::i32, Custom);
287     }
288   } else {
289     setOperationAction(ISD::SELECT, XLenVT, Custom);
290   }
291 
292   ISD::CondCode FPCCToExpand[] = {
293       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
294       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
295       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
296 
297   ISD::NodeType FPOpToExpand[] = {
298       ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP,
299       ISD::FP_TO_FP16};
300 
301   if (Subtarget.hasStdExtZfh())
302     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
303 
304   if (Subtarget.hasStdExtZfh()) {
305     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
306     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
307     for (auto CC : FPCCToExpand)
308       setCondCodeAction(CC, MVT::f16, Expand);
309     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
310     setOperationAction(ISD::SELECT, MVT::f16, Custom);
311     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
312     for (auto Op : FPOpToExpand)
313       setOperationAction(Op, MVT::f16, Expand);
314   }
315 
316   if (Subtarget.hasStdExtF()) {
317     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
318     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
319     for (auto CC : FPCCToExpand)
320       setCondCodeAction(CC, MVT::f32, Expand);
321     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
322     setOperationAction(ISD::SELECT, MVT::f32, Custom);
323     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
324     for (auto Op : FPOpToExpand)
325       setOperationAction(Op, MVT::f32, Expand);
326     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
327     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
328   }
329 
330   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
331     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
332 
333   if (Subtarget.hasStdExtD()) {
334     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
335     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
336     for (auto CC : FPCCToExpand)
337       setCondCodeAction(CC, MVT::f64, Expand);
338     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
339     setOperationAction(ISD::SELECT, MVT::f64, Custom);
340     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
341     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
342     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
343     for (auto Op : FPOpToExpand)
344       setOperationAction(Op, MVT::f64, Expand);
345     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
346     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
347   }
348 
349   if (Subtarget.is64Bit()) {
350     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
351     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
352     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
353     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
354   }
355 
356   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
357   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
358   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
359   setOperationAction(ISD::JumpTable, XLenVT, Custom);
360 
361   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
362 
363   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
364   // Unfortunately this can't be determined just from the ISA naming string.
365   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
366                      Subtarget.is64Bit() ? Legal : Custom);
367 
368   setOperationAction(ISD::TRAP, MVT::Other, Legal);
369   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
370   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
371 
372   if (Subtarget.hasStdExtA()) {
373     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
374     setMinCmpXchgSizeInBits(32);
375   } else {
376     setMaxAtomicSizeInBitsSupported(0);
377   }
378 
379   setBooleanContents(ZeroOrOneBooleanContent);
380 
381   if (Subtarget.hasStdExtV()) {
382     setBooleanVectorContents(ZeroOrOneBooleanContent);
383 
384     setOperationAction(ISD::VSCALE, XLenVT, Custom);
385 
386     // RVV intrinsics may have illegal operands.
387     // We also need to custom legalize vmv.x.s.
388     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
389     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
390     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
391     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
392     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
393     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
394 
395     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
396 
397     if (Subtarget.is64Bit()) {
398       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
399       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
400     } else {
401       // We must custom-lower certain vXi64 operations on RV32 due to the vector
402       // element type being illegal.
403       setOperationAction(ISD::SPLAT_VECTOR, MVT::i64, Custom);
404       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
405       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
406 
407       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
408       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
409       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
410       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
411       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
412       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
413       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
414       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
415     }
416 
417     for (MVT VT : BoolVecVTs) {
418       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
419 
420       // Mask VTs are custom-expanded into a series of standard nodes
421       setOperationAction(ISD::TRUNCATE, VT, Custom);
422       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
423       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
424     }
425 
426     for (MVT VT : IntVecVTs) {
427       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
428 
429       setOperationAction(ISD::SMIN, VT, Legal);
430       setOperationAction(ISD::SMAX, VT, Legal);
431       setOperationAction(ISD::UMIN, VT, Legal);
432       setOperationAction(ISD::UMAX, VT, Legal);
433 
434       setOperationAction(ISD::ROTL, VT, Expand);
435       setOperationAction(ISD::ROTR, VT, Expand);
436 
437       // Custom-lower extensions and truncations from/to mask types.
438       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
439       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
440       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
441 
442       // RVV has native int->float & float->int conversions where the
443       // element type sizes are within one power-of-two of each other. Any
444       // wider distances between type sizes have to be lowered as sequences
445       // which progressively narrow the gap in stages.
446       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
447       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
448       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
449       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
450 
451       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
452       // nodes which truncate by one power of two at a time.
453       setOperationAction(ISD::TRUNCATE, VT, Custom);
454 
455       // Custom-lower insert/extract operations to simplify patterns.
456       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
457       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
458 
459       // Custom-lower reduction operations to set up the corresponding custom
460       // nodes' operands.
461       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
462       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
463       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
464       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
465       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
466       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
467       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
468       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
469 
470       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
471       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
472     }
473 
474     // Expand various CCs to best match the RVV ISA, which natively supports UNE
475     // but no other unordered comparisons, and supports all ordered comparisons
476     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
477     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
478     // and we pattern-match those back to the "original", swapping operands once
479     // more. This way we catch both operations and both "vf" and "fv" forms with
480     // fewer patterns.
481     ISD::CondCode VFPCCToExpand[] = {
482         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
483         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
484         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
485     };
486 
487     // Sets common operation actions on RVV floating-point vector types.
488     const auto SetCommonVFPActions = [&](MVT VT) {
489       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
490       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
491       // sizes are within one power-of-two of each other. Therefore conversions
492       // between vXf16 and vXf64 must be lowered as sequences which convert via
493       // vXf32.
494       setOperationAction(ISD::FP_ROUND, VT, Custom);
495       setOperationAction(ISD::FP_EXTEND, VT, Custom);
496       // Custom-lower insert/extract operations to simplify patterns.
497       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
498       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
499       // Expand various condition codes (explained above).
500       for (auto CC : VFPCCToExpand)
501         setCondCodeAction(CC, VT, Expand);
502 
503       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
504       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
505       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
506 
507       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
508       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
509     };
510 
511     if (Subtarget.hasStdExtZfh())
512       for (MVT VT : F16VecVTs)
513         SetCommonVFPActions(VT);
514 
515     if (Subtarget.hasStdExtF())
516       for (MVT VT : F32VecVTs)
517         SetCommonVFPActions(VT);
518 
519     if (Subtarget.hasStdExtD())
520       for (MVT VT : F64VecVTs)
521         SetCommonVFPActions(VT);
522 
523     if (Subtarget.useRVVForFixedLengthVectors()) {
524       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
525         if (!useRVVForFixedLengthVectorVT(VT))
526           continue;
527 
528         // By default everything must be expanded.
529         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
530           setOperationAction(Op, VT, Expand);
531         for (MVT OtherVT : MVT::fixedlen_vector_valuetypes())
532           setTruncStoreAction(VT, OtherVT, Expand);
533 
534         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
535         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
536         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
537 
538         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
539 
540         setOperationAction(ISD::LOAD, VT, Custom);
541         setOperationAction(ISD::STORE, VT, Custom);
542 
543         setOperationAction(ISD::SETCC, VT, Custom);
544 
545         setOperationAction(ISD::TRUNCATE, VT, Custom);
546 
547         // Operations below are different for between masks and other vectors.
548         if (VT.getVectorElementType() == MVT::i1) {
549           setOperationAction(ISD::AND, VT, Custom);
550           setOperationAction(ISD::OR, VT, Custom);
551           setOperationAction(ISD::XOR, VT, Custom);
552           continue;
553         }
554 
555         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
556         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
557 
558         setOperationAction(ISD::ADD, VT, Custom);
559         setOperationAction(ISD::MUL, VT, Custom);
560         setOperationAction(ISD::SUB, VT, Custom);
561         setOperationAction(ISD::AND, VT, Custom);
562         setOperationAction(ISD::OR, VT, Custom);
563         setOperationAction(ISD::XOR, VT, Custom);
564         setOperationAction(ISD::SDIV, VT, Custom);
565         setOperationAction(ISD::SREM, VT, Custom);
566         setOperationAction(ISD::UDIV, VT, Custom);
567         setOperationAction(ISD::UREM, VT, Custom);
568         setOperationAction(ISD::SHL, VT, Custom);
569         setOperationAction(ISD::SRA, VT, Custom);
570         setOperationAction(ISD::SRL, VT, Custom);
571 
572         setOperationAction(ISD::SMIN, VT, Custom);
573         setOperationAction(ISD::SMAX, VT, Custom);
574         setOperationAction(ISD::UMIN, VT, Custom);
575         setOperationAction(ISD::UMAX, VT, Custom);
576 
577         setOperationAction(ISD::MULHS, VT, Custom);
578         setOperationAction(ISD::MULHU, VT, Custom);
579 
580         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
581         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
582         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
583         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
584 
585         setOperationAction(ISD::VSELECT, VT, Custom);
586 
587         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
588         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
589         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
590 
591         setOperationAction(ISD::BITCAST, VT, Custom);
592       }
593 
594       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
595         if (!useRVVForFixedLengthVectorVT(VT))
596           continue;
597 
598         // By default everything must be expanded.
599         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
600           setOperationAction(Op, VT, Expand);
601         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
602           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
603           setTruncStoreAction(VT, OtherVT, Expand);
604         }
605 
606         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
607         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
608         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
609 
610         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
611         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
612         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
613 
614         setOperationAction(ISD::LOAD, VT, Custom);
615         setOperationAction(ISD::STORE, VT, Custom);
616         setOperationAction(ISD::FADD, VT, Custom);
617         setOperationAction(ISD::FSUB, VT, Custom);
618         setOperationAction(ISD::FMUL, VT, Custom);
619         setOperationAction(ISD::FDIV, VT, Custom);
620         setOperationAction(ISD::FNEG, VT, Custom);
621         setOperationAction(ISD::FABS, VT, Custom);
622         setOperationAction(ISD::FSQRT, VT, Custom);
623         setOperationAction(ISD::FMA, VT, Custom);
624 
625         setOperationAction(ISD::FP_ROUND, VT, Custom);
626         setOperationAction(ISD::FP_EXTEND, VT, Custom);
627 
628         for (auto CC : VFPCCToExpand)
629           setCondCodeAction(CC, VT, Expand);
630 
631         setOperationAction(ISD::VSELECT, VT, Custom);
632 
633         setOperationAction(ISD::BITCAST, VT, Custom);
634       }
635     }
636   }
637 
638   // Function alignments.
639   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
640   setMinFunctionAlignment(FunctionAlignment);
641   setPrefFunctionAlignment(FunctionAlignment);
642 
643   setMinimumJumpTableEntries(5);
644 
645   // Jumps are expensive, compared to logic
646   setJumpIsExpensive();
647 
648   // We can use any register for comparisons
649   setHasMultipleConditionRegisters();
650 
651   setTargetDAGCombine(ISD::SETCC);
652   if (Subtarget.hasStdExtZbp()) {
653     setTargetDAGCombine(ISD::OR);
654   }
655   if (Subtarget.hasStdExtV())
656     setTargetDAGCombine(ISD::FCOPYSIGN);
657 }
658 
659 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
660                                             LLVMContext &Context,
661                                             EVT VT) const {
662   if (!VT.isVector())
663     return getPointerTy(DL);
664   if (Subtarget.hasStdExtV() &&
665       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
666     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
667   return VT.changeVectorElementTypeToInteger();
668 }
669 
670 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
671                                              const CallInst &I,
672                                              MachineFunction &MF,
673                                              unsigned Intrinsic) const {
674   switch (Intrinsic) {
675   default:
676     return false;
677   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
678   case Intrinsic::riscv_masked_atomicrmw_add_i32:
679   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
680   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
681   case Intrinsic::riscv_masked_atomicrmw_max_i32:
682   case Intrinsic::riscv_masked_atomicrmw_min_i32:
683   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
684   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
685   case Intrinsic::riscv_masked_cmpxchg_i32:
686     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
687     Info.opc = ISD::INTRINSIC_W_CHAIN;
688     Info.memVT = MVT::getVT(PtrTy->getElementType());
689     Info.ptrVal = I.getArgOperand(0);
690     Info.offset = 0;
691     Info.align = Align(4);
692     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
693                  MachineMemOperand::MOVolatile;
694     return true;
695   }
696 }
697 
698 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
699                                                 const AddrMode &AM, Type *Ty,
700                                                 unsigned AS,
701                                                 Instruction *I) const {
702   // No global is ever allowed as a base.
703   if (AM.BaseGV)
704     return false;
705 
706   // Require a 12-bit signed offset.
707   if (!isInt<12>(AM.BaseOffs))
708     return false;
709 
710   switch (AM.Scale) {
711   case 0: // "r+i" or just "i", depending on HasBaseReg.
712     break;
713   case 1:
714     if (!AM.HasBaseReg) // allow "r+i".
715       break;
716     return false; // disallow "r+r" or "r+r+i".
717   default:
718     return false;
719   }
720 
721   return true;
722 }
723 
724 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
725   return isInt<12>(Imm);
726 }
727 
728 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
729   return isInt<12>(Imm);
730 }
731 
732 // On RV32, 64-bit integers are split into their high and low parts and held
733 // in two different registers, so the trunc is free since the low register can
734 // just be used.
735 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
736   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
737     return false;
738   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
739   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
740   return (SrcBits == 64 && DestBits == 32);
741 }
742 
743 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
744   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
745       !SrcVT.isInteger() || !DstVT.isInteger())
746     return false;
747   unsigned SrcBits = SrcVT.getSizeInBits();
748   unsigned DestBits = DstVT.getSizeInBits();
749   return (SrcBits == 64 && DestBits == 32);
750 }
751 
752 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
753   // Zexts are free if they can be combined with a load.
754   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
755     EVT MemVT = LD->getMemoryVT();
756     if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
757          (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
758         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
759          LD->getExtensionType() == ISD::ZEXTLOAD))
760       return true;
761   }
762 
763   return TargetLowering::isZExtFree(Val, VT2);
764 }
765 
766 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
767   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
768 }
769 
770 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
771   return Subtarget.hasStdExtZbb();
772 }
773 
774 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
775   return Subtarget.hasStdExtZbb();
776 }
777 
778 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
779                                        bool ForCodeSize) const {
780   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
781     return false;
782   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
783     return false;
784   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
785     return false;
786   if (Imm.isNegZero())
787     return false;
788   return Imm.isZero();
789 }
790 
791 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
792   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
793          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
794          (VT == MVT::f64 && Subtarget.hasStdExtD());
795 }
796 
797 // Changes the condition code and swaps operands if necessary, so the SetCC
798 // operation matches one of the comparisons supported directly in the RISC-V
799 // ISA.
800 static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) {
801   switch (CC) {
802   default:
803     break;
804   case ISD::SETGT:
805   case ISD::SETLE:
806   case ISD::SETUGT:
807   case ISD::SETULE:
808     CC = ISD::getSetCCSwappedOperands(CC);
809     std::swap(LHS, RHS);
810     break;
811   }
812 }
813 
814 // Return the RISC-V branch opcode that matches the given DAG integer
815 // condition code. The CondCode must be one of those supported by the RISC-V
816 // ISA (see normaliseSetCC).
817 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
818   switch (CC) {
819   default:
820     llvm_unreachable("Unsupported CondCode");
821   case ISD::SETEQ:
822     return RISCV::BEQ;
823   case ISD::SETNE:
824     return RISCV::BNE;
825   case ISD::SETLT:
826     return RISCV::BLT;
827   case ISD::SETGE:
828     return RISCV::BGE;
829   case ISD::SETULT:
830     return RISCV::BLTU;
831   case ISD::SETUGE:
832     return RISCV::BGEU;
833   }
834 }
835 
836 RISCVVLMUL RISCVTargetLowering::getLMUL(MVT VT) {
837   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
838   if (VT.getVectorElementType() == MVT::i1)
839     KnownSize *= 8;
840 
841   switch (KnownSize) {
842   default:
843     llvm_unreachable("Invalid LMUL.");
844   case 8:
845     return RISCVVLMUL::LMUL_F8;
846   case 16:
847     return RISCVVLMUL::LMUL_F4;
848   case 32:
849     return RISCVVLMUL::LMUL_F2;
850   case 64:
851     return RISCVVLMUL::LMUL_1;
852   case 128:
853     return RISCVVLMUL::LMUL_2;
854   case 256:
855     return RISCVVLMUL::LMUL_4;
856   case 512:
857     return RISCVVLMUL::LMUL_8;
858   }
859 }
860 
861 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVVLMUL LMul) {
862   switch (LMul) {
863   default:
864     llvm_unreachable("Invalid LMUL.");
865   case RISCVVLMUL::LMUL_F8:
866   case RISCVVLMUL::LMUL_F4:
867   case RISCVVLMUL::LMUL_F2:
868   case RISCVVLMUL::LMUL_1:
869     return RISCV::VRRegClassID;
870   case RISCVVLMUL::LMUL_2:
871     return RISCV::VRM2RegClassID;
872   case RISCVVLMUL::LMUL_4:
873     return RISCV::VRM4RegClassID;
874   case RISCVVLMUL::LMUL_8:
875     return RISCV::VRM8RegClassID;
876   }
877 }
878 
879 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
880   RISCVVLMUL LMUL = getLMUL(VT);
881   if (LMUL == RISCVVLMUL::LMUL_F8 || LMUL == RISCVVLMUL::LMUL_F4 ||
882       LMUL == RISCVVLMUL::LMUL_F2 || LMUL == RISCVVLMUL::LMUL_1) {
883     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
884                   "Unexpected subreg numbering");
885     return RISCV::sub_vrm1_0 + Index;
886   }
887   if (LMUL == RISCVVLMUL::LMUL_2) {
888     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
889                   "Unexpected subreg numbering");
890     return RISCV::sub_vrm2_0 + Index;
891   }
892   if (LMUL == RISCVVLMUL::LMUL_4) {
893     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
894                   "Unexpected subreg numbering");
895     return RISCV::sub_vrm4_0 + Index;
896   }
897   llvm_unreachable("Invalid vector type.");
898 }
899 
900 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
901   if (VT.getVectorElementType() == MVT::i1)
902     return RISCV::VRRegClassID;
903   return getRegClassIDForLMUL(getLMUL(VT));
904 }
905 
906 // Attempt to decompose a subvector insert/extract between VecVT and
907 // SubVecVT via subregister indices. Returns the subregister index that
908 // can perform the subvector insert/extract with the given element index, as
909 // well as the index corresponding to any leftover subvectors that must be
910 // further inserted/extracted within the register class for SubVecVT.
911 std::pair<unsigned, unsigned>
912 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
913     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
914     const RISCVRegisterInfo *TRI) {
915   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
916                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
917                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
918                 "Register classes not ordered");
919   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
920   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
921   // Try to compose a subregister index that takes us from the incoming
922   // LMUL>1 register class down to the outgoing one. At each step we half
923   // the LMUL:
924   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
925   // Note that this is not guaranteed to find a subregister index, such as
926   // when we are extracting from one VR type to another.
927   unsigned SubRegIdx = RISCV::NoSubRegister;
928   for (const unsigned RCID :
929        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
930     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
931       VecVT = VecVT.getHalfNumVectorElementsVT();
932       bool IsHi =
933           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
934       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
935                                             getSubregIndexByMVT(VecVT, IsHi));
936       if (IsHi)
937         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
938     }
939   return {SubRegIdx, InsertExtractIdx};
940 }
941 
942 // Return the largest legal scalable vector type that matches VT's element type.
943 MVT RISCVTargetLowering::getContainerForFixedLengthVector(
944     SelectionDAG &DAG, MVT VT, const RISCVSubtarget &Subtarget) {
945   assert(VT.isFixedLengthVector() &&
946          DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
947          "Expected legal fixed length vector!");
948 
949   unsigned LMul = Subtarget.getLMULForFixedLengthVector(VT);
950   assert(LMul <= 8 && isPowerOf2_32(LMul) && "Unexpected LMUL!");
951 
952   MVT EltVT = VT.getVectorElementType();
953   switch (EltVT.SimpleTy) {
954   default:
955     llvm_unreachable("unexpected element type for RVV container");
956   case MVT::i1: {
957     // Masks are calculated assuming 8-bit elements since that's when we need
958     // the most elements.
959     unsigned EltsPerBlock = RISCV::RVVBitsPerBlock / 8;
960     return MVT::getScalableVectorVT(MVT::i1, LMul * EltsPerBlock);
961   }
962   case MVT::i8:
963   case MVT::i16:
964   case MVT::i32:
965   case MVT::i64:
966   case MVT::f16:
967   case MVT::f32:
968   case MVT::f64: {
969     unsigned EltsPerBlock = RISCV::RVVBitsPerBlock / EltVT.getSizeInBits();
970     return MVT::getScalableVectorVT(EltVT, LMul * EltsPerBlock);
971   }
972   }
973 }
974 
975 // Grow V to consume an entire RVV register.
976 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
977                                        const RISCVSubtarget &Subtarget) {
978   assert(VT.isScalableVector() &&
979          "Expected to convert into a scalable vector!");
980   assert(V.getValueType().isFixedLengthVector() &&
981          "Expected a fixed length vector operand!");
982   SDLoc DL(V);
983   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
984   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
985 }
986 
987 // Shrink V so it's just big enough to maintain a VT's worth of data.
988 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
989                                          const RISCVSubtarget &Subtarget) {
990   assert(VT.isFixedLengthVector() &&
991          "Expected to convert into a fixed length vector!");
992   assert(V.getValueType().isScalableVector() &&
993          "Expected a scalable vector operand!");
994   SDLoc DL(V);
995   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
996   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
997 }
998 
999 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1000 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1001 // the vector type that it is contained in.
1002 static std::pair<SDValue, SDValue>
1003 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1004                 const RISCVSubtarget &Subtarget) {
1005   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1006   MVT XLenVT = Subtarget.getXLenVT();
1007   SDValue VL = VecVT.isFixedLengthVector()
1008                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1009                    : DAG.getRegister(RISCV::X0, XLenVT);
1010   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1011   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1012   return {Mask, VL};
1013 }
1014 
1015 // As above but assuming the given type is a scalable vector type.
1016 static std::pair<SDValue, SDValue>
1017 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1018                         const RISCVSubtarget &Subtarget) {
1019   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1020   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1021 }
1022 
1023 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
1024                                  const RISCVSubtarget &Subtarget) {
1025   MVT VT = Op.getSimpleValueType();
1026   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1027 
1028   MVT ContainerVT =
1029       RISCVTargetLowering::getContainerForFixedLengthVector(DAG, VT, Subtarget);
1030 
1031   SDLoc DL(Op);
1032   SDValue Mask, VL;
1033   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1034 
1035   if (VT.getVectorElementType() == MVT::i1) {
1036     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
1037       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
1038       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
1039     }
1040 
1041     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
1042       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
1043       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
1044     }
1045 
1046     return SDValue();
1047   }
1048 
1049   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1050     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
1051                                         : RISCVISD::VMV_V_X_VL;
1052     Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL);
1053     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1054   }
1055 
1056   // Try and match an index sequence, which we can lower directly to the vid
1057   // instruction. An all-undef vector is matched by getSplatValue, above.
1058   if (VT.isInteger()) {
1059     bool IsVID = true;
1060     for (unsigned i = 0, e = Op.getNumOperands(); i < e && IsVID; i++)
1061       IsVID &= Op.getOperand(i).isUndef() ||
1062                (isa<ConstantSDNode>(Op.getOperand(i)) &&
1063                 Op.getConstantOperandVal(i) == i);
1064 
1065     if (IsVID) {
1066       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
1067       return convertFromScalableVector(VT, VID, DAG, Subtarget);
1068     }
1069   }
1070 
1071   return SDValue();
1072 }
1073 
1074 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
1075                                    const RISCVSubtarget &Subtarget) {
1076   SDValue V1 = Op.getOperand(0);
1077   SDLoc DL(Op);
1078   MVT VT = Op.getSimpleValueType();
1079   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
1080 
1081   if (SVN->isSplat()) {
1082     int Lane = SVN->getSplatIndex();
1083     if (Lane >= 0) {
1084       MVT ContainerVT = RISCVTargetLowering::getContainerForFixedLengthVector(
1085           DAG, VT, Subtarget);
1086 
1087       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
1088       assert(Lane < (int)VT.getVectorNumElements() && "Unexpected lane!");
1089 
1090       SDValue Mask, VL;
1091       std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1092       MVT XLenVT = Subtarget.getXLenVT();
1093       SDValue Gather =
1094           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
1095                       DAG.getConstant(Lane, DL, XLenVT), Mask, VL);
1096       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1097     }
1098   }
1099 
1100   return SDValue();
1101 }
1102 
1103 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
1104                                      SDLoc DL, SelectionDAG &DAG,
1105                                      const RISCVSubtarget &Subtarget) {
1106   if (VT.isScalableVector())
1107     return DAG.getFPExtendOrRound(Op, DL, VT);
1108   assert(VT.isFixedLengthVector() &&
1109          "Unexpected value type for RVV FP extend/round lowering");
1110   SDValue Mask, VL;
1111   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1112   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
1113                         ? RISCVISD::FP_EXTEND_VL
1114                         : RISCVISD::FP_ROUND_VL;
1115   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
1116 }
1117 
1118 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
1119                                             SelectionDAG &DAG) const {
1120   switch (Op.getOpcode()) {
1121   default:
1122     report_fatal_error("unimplemented operand");
1123   case ISD::GlobalAddress:
1124     return lowerGlobalAddress(Op, DAG);
1125   case ISD::BlockAddress:
1126     return lowerBlockAddress(Op, DAG);
1127   case ISD::ConstantPool:
1128     return lowerConstantPool(Op, DAG);
1129   case ISD::JumpTable:
1130     return lowerJumpTable(Op, DAG);
1131   case ISD::GlobalTLSAddress:
1132     return lowerGlobalTLSAddress(Op, DAG);
1133   case ISD::SELECT:
1134     return lowerSELECT(Op, DAG);
1135   case ISD::VASTART:
1136     return lowerVASTART(Op, DAG);
1137   case ISD::FRAMEADDR:
1138     return lowerFRAMEADDR(Op, DAG);
1139   case ISD::RETURNADDR:
1140     return lowerRETURNADDR(Op, DAG);
1141   case ISD::SHL_PARTS:
1142     return lowerShiftLeftParts(Op, DAG);
1143   case ISD::SRA_PARTS:
1144     return lowerShiftRightParts(Op, DAG, true);
1145   case ISD::SRL_PARTS:
1146     return lowerShiftRightParts(Op, DAG, false);
1147   case ISD::BITCAST: {
1148     SDValue Op0 = Op.getOperand(0);
1149     // We can handle fixed length vector bitcasts with a simple replacement
1150     // in isel.
1151     if (Op.getValueType().isFixedLengthVector()) {
1152       if (Op0.getValueType().isFixedLengthVector())
1153         return Op;
1154       return SDValue();
1155     }
1156     assert(((Subtarget.is64Bit() && Subtarget.hasStdExtF()) ||
1157             Subtarget.hasStdExtZfh()) &&
1158            "Unexpected custom legalisation");
1159     SDLoc DL(Op);
1160     if (Op.getValueType() == MVT::f16 && Subtarget.hasStdExtZfh()) {
1161       if (Op0.getValueType() != MVT::i16)
1162         return SDValue();
1163       SDValue NewOp0 =
1164           DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), Op0);
1165       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
1166       return FPConv;
1167     } else if (Op.getValueType() == MVT::f32 && Subtarget.is64Bit() &&
1168                Subtarget.hasStdExtF()) {
1169       if (Op0.getValueType() != MVT::i32)
1170         return SDValue();
1171       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
1172       SDValue FPConv =
1173           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
1174       return FPConv;
1175     }
1176     return SDValue();
1177   }
1178   case ISD::INTRINSIC_WO_CHAIN:
1179     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
1180   case ISD::INTRINSIC_W_CHAIN:
1181     return LowerINTRINSIC_W_CHAIN(Op, DAG);
1182   case ISD::BSWAP:
1183   case ISD::BITREVERSE: {
1184     // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
1185     assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
1186     MVT VT = Op.getSimpleValueType();
1187     SDLoc DL(Op);
1188     // Start with the maximum immediate value which is the bitwidth - 1.
1189     unsigned Imm = VT.getSizeInBits() - 1;
1190     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
1191     if (Op.getOpcode() == ISD::BSWAP)
1192       Imm &= ~0x7U;
1193     return DAG.getNode(RISCVISD::GREVI, DL, VT, Op.getOperand(0),
1194                        DAG.getTargetConstant(Imm, DL, Subtarget.getXLenVT()));
1195   }
1196   case ISD::FSHL:
1197   case ISD::FSHR: {
1198     MVT VT = Op.getSimpleValueType();
1199     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
1200     SDLoc DL(Op);
1201     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
1202     // use log(XLen) bits. Mask the shift amount accordingly.
1203     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
1204     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
1205                                 DAG.getConstant(ShAmtWidth, DL, VT));
1206     unsigned Opc = Op.getOpcode() == ISD::FSHL ? RISCVISD::FSL : RISCVISD::FSR;
1207     return DAG.getNode(Opc, DL, VT, Op.getOperand(0), Op.getOperand(1), ShAmt);
1208   }
1209   case ISD::TRUNCATE: {
1210     SDLoc DL(Op);
1211     MVT VT = Op.getSimpleValueType();
1212     // Only custom-lower vector truncates
1213     if (!VT.isVector())
1214       return Op;
1215 
1216     // Truncates to mask types are handled differently
1217     if (VT.getVectorElementType() == MVT::i1)
1218       return lowerVectorMaskTrunc(Op, DAG);
1219 
1220     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
1221     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
1222     // truncate by one power of two at a time.
1223     MVT DstEltVT = VT.getVectorElementType();
1224 
1225     SDValue Src = Op.getOperand(0);
1226     MVT SrcVT = Src.getSimpleValueType();
1227     MVT SrcEltVT = SrcVT.getVectorElementType();
1228 
1229     assert(DstEltVT.bitsLT(SrcEltVT) &&
1230            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
1231            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
1232            "Unexpected vector truncate lowering");
1233 
1234     MVT ContainerVT = SrcVT;
1235     if (SrcVT.isFixedLengthVector()) {
1236       ContainerVT = RISCVTargetLowering::getContainerForFixedLengthVector(
1237           DAG, SrcVT, Subtarget);
1238       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
1239     }
1240 
1241     SDValue Result = Src;
1242     SDValue Mask, VL;
1243     std::tie(Mask, VL) =
1244         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
1245     LLVMContext &Context = *DAG.getContext();
1246     const ElementCount Count = ContainerVT.getVectorElementCount();
1247     do {
1248       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
1249       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
1250       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
1251                            Mask, VL);
1252     } while (SrcEltVT != DstEltVT);
1253 
1254     if (SrcVT.isFixedLengthVector())
1255       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
1256 
1257     return Result;
1258   }
1259   case ISD::ANY_EXTEND:
1260   case ISD::ZERO_EXTEND:
1261     if (Op.getOperand(0).getValueType().isVector() &&
1262         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
1263       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
1264     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
1265   case ISD::SIGN_EXTEND:
1266     if (Op.getOperand(0).getValueType().isVector() &&
1267         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
1268       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
1269     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
1270   case ISD::SPLAT_VECTOR:
1271     return lowerSPLATVECTOR(Op, DAG);
1272   case ISD::INSERT_VECTOR_ELT:
1273     return lowerINSERT_VECTOR_ELT(Op, DAG);
1274   case ISD::EXTRACT_VECTOR_ELT:
1275     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
1276   case ISD::VSCALE: {
1277     MVT VT = Op.getSimpleValueType();
1278     SDLoc DL(Op);
1279     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
1280     // We define our scalable vector types for lmul=1 to use a 64 bit known
1281     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
1282     // vscale as VLENB / 8.
1283     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
1284                                  DAG.getConstant(3, DL, VT));
1285     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
1286   }
1287   case ISD::FP_EXTEND: {
1288     // RVV can only do fp_extend to types double the size as the source. We
1289     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
1290     // via f32.
1291     SDLoc DL(Op);
1292     MVT VT = Op.getSimpleValueType();
1293     SDValue Src = Op.getOperand(0);
1294     MVT SrcVT = Src.getSimpleValueType();
1295 
1296     // Prepare any fixed-length vector operands.
1297     MVT ContainerVT = VT;
1298     if (SrcVT.isFixedLengthVector()) {
1299       ContainerVT = RISCVTargetLowering::getContainerForFixedLengthVector(
1300           DAG, VT, Subtarget);
1301       MVT SrcContainerVT =
1302           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
1303       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
1304     }
1305 
1306     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
1307         SrcVT.getVectorElementType() != MVT::f16) {
1308       // For scalable vectors, we only need to close the gap between
1309       // vXf16->vXf64.
1310       if (!VT.isFixedLengthVector())
1311         return Op;
1312       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
1313       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
1314       return convertFromScalableVector(VT, Src, DAG, Subtarget);
1315     }
1316 
1317     MVT InterVT = VT.changeVectorElementType(MVT::f32);
1318     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
1319     SDValue IntermediateExtend = getRVVFPExtendOrRound(
1320         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
1321 
1322     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
1323                                            DL, DAG, Subtarget);
1324     if (VT.isFixedLengthVector())
1325       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
1326     return Extend;
1327   }
1328   case ISD::FP_ROUND: {
1329     // RVV can only do fp_round to types half the size as the source. We
1330     // custom-lower f64->f16 rounds via RVV's round-to-odd float
1331     // conversion instruction.
1332     SDLoc DL(Op);
1333     MVT VT = Op.getSimpleValueType();
1334     SDValue Src = Op.getOperand(0);
1335     MVT SrcVT = Src.getSimpleValueType();
1336 
1337     // Prepare any fixed-length vector operands.
1338     MVT ContainerVT = VT;
1339     if (VT.isFixedLengthVector()) {
1340       MVT SrcContainerVT =
1341           RISCVTargetLowering::getContainerForFixedLengthVector(DAG, SrcVT,
1342                                                                 Subtarget);
1343       ContainerVT =
1344           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
1345       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
1346     }
1347 
1348     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
1349         SrcVT.getVectorElementType() != MVT::f64) {
1350       // For scalable vectors, we only need to close the gap between
1351       // vXf64<->vXf16.
1352       if (!VT.isFixedLengthVector())
1353         return Op;
1354       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
1355       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
1356       return convertFromScalableVector(VT, Src, DAG, Subtarget);
1357     }
1358 
1359     SDValue Mask, VL;
1360     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1361 
1362     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
1363     SDValue IntermediateRound =
1364         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
1365     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
1366                                           DL, DAG, Subtarget);
1367 
1368     if (VT.isFixedLengthVector())
1369       return convertFromScalableVector(VT, Round, DAG, Subtarget);
1370     return Round;
1371   }
1372   case ISD::FP_TO_SINT:
1373   case ISD::FP_TO_UINT:
1374   case ISD::SINT_TO_FP:
1375   case ISD::UINT_TO_FP: {
1376     // RVV can only do fp<->int conversions to types half/double the size as
1377     // the source. We custom-lower any conversions that do two hops into
1378     // sequences.
1379     MVT VT = Op.getSimpleValueType();
1380     if (!VT.isVector())
1381       return Op;
1382     SDLoc DL(Op);
1383     SDValue Src = Op.getOperand(0);
1384     MVT EltVT = VT.getVectorElementType();
1385     MVT SrcVT = Src.getSimpleValueType();
1386     MVT SrcEltVT = SrcVT.getVectorElementType();
1387     unsigned EltSize = EltVT.getSizeInBits();
1388     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
1389     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
1390            "Unexpected vector element types");
1391 
1392     bool IsInt2FP = SrcEltVT.isInteger();
1393     // Widening conversions
1394     if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
1395       if (IsInt2FP) {
1396         // Do a regular integer sign/zero extension then convert to float.
1397         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
1398                                       VT.getVectorElementCount());
1399         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
1400                                  ? ISD::ZERO_EXTEND
1401                                  : ISD::SIGN_EXTEND;
1402         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
1403         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
1404       }
1405       // FP2Int
1406       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
1407       // Do one doubling fp_extend then complete the operation by converting
1408       // to int.
1409       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
1410       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
1411       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
1412     }
1413 
1414     // Narrowing conversions
1415     if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
1416       if (IsInt2FP) {
1417         // One narrowing int_to_fp, then an fp_round.
1418         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
1419         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
1420         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
1421         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
1422       }
1423       // FP2Int
1424       // One narrowing fp_to_int, then truncate the integer. If the float isn't
1425       // representable by the integer, the result is poison.
1426       MVT IVecVT =
1427           MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
1428                            VT.getVectorElementCount());
1429       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
1430       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
1431     }
1432 
1433     // Scalable vectors can exit here. Patterns will handle equally-sized
1434     // conversions halving/doubling ones.
1435     if (!VT.isFixedLengthVector())
1436       return Op;
1437 
1438     // For fixed-length vectors we lower to a custom "VL" node.
1439     unsigned RVVOpc = 0;
1440     switch (Op.getOpcode()) {
1441     default:
1442       llvm_unreachable("Impossible opcode");
1443     case ISD::FP_TO_SINT:
1444       RVVOpc = RISCVISD::FP_TO_SINT_VL;
1445       break;
1446     case ISD::FP_TO_UINT:
1447       RVVOpc = RISCVISD::FP_TO_UINT_VL;
1448       break;
1449     case ISD::SINT_TO_FP:
1450       RVVOpc = RISCVISD::SINT_TO_FP_VL;
1451       break;
1452     case ISD::UINT_TO_FP:
1453       RVVOpc = RISCVISD::UINT_TO_FP_VL;
1454       break;
1455     }
1456 
1457     MVT ContainerVT, SrcContainerVT;
1458     // Derive the reference container type from the larger vector type.
1459     if (SrcEltSize > EltSize) {
1460       SrcContainerVT = RISCVTargetLowering::getContainerForFixedLengthVector(
1461           DAG, SrcVT, Subtarget);
1462       ContainerVT =
1463           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
1464     } else {
1465       ContainerVT = RISCVTargetLowering::getContainerForFixedLengthVector(
1466           DAG, VT, Subtarget);
1467       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
1468     }
1469 
1470     SDValue Mask, VL;
1471     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1472 
1473     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
1474     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
1475     return convertFromScalableVector(VT, Src, DAG, Subtarget);
1476   }
1477   case ISD::VECREDUCE_ADD:
1478   case ISD::VECREDUCE_UMAX:
1479   case ISD::VECREDUCE_SMAX:
1480   case ISD::VECREDUCE_UMIN:
1481   case ISD::VECREDUCE_SMIN:
1482   case ISD::VECREDUCE_AND:
1483   case ISD::VECREDUCE_OR:
1484   case ISD::VECREDUCE_XOR:
1485     return lowerVECREDUCE(Op, DAG);
1486   case ISD::VECREDUCE_FADD:
1487   case ISD::VECREDUCE_SEQ_FADD:
1488     return lowerFPVECREDUCE(Op, DAG);
1489   case ISD::INSERT_SUBVECTOR:
1490     return lowerINSERT_SUBVECTOR(Op, DAG);
1491   case ISD::EXTRACT_SUBVECTOR:
1492     return lowerEXTRACT_SUBVECTOR(Op, DAG);
1493   case ISD::BUILD_VECTOR:
1494     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
1495   case ISD::VECTOR_SHUFFLE:
1496     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
1497   case ISD::LOAD:
1498     return lowerFixedLengthVectorLoadToRVV(Op, DAG);
1499   case ISD::STORE:
1500     return lowerFixedLengthVectorStoreToRVV(Op, DAG);
1501   case ISD::SETCC:
1502     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
1503   case ISD::ADD:
1504     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
1505   case ISD::SUB:
1506     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
1507   case ISD::MUL:
1508     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
1509   case ISD::MULHS:
1510     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
1511   case ISD::MULHU:
1512     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
1513   case ISD::AND:
1514     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
1515                                               RISCVISD::AND_VL);
1516   case ISD::OR:
1517     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
1518                                               RISCVISD::OR_VL);
1519   case ISD::XOR:
1520     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
1521                                               RISCVISD::XOR_VL);
1522   case ISD::SDIV:
1523     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
1524   case ISD::SREM:
1525     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
1526   case ISD::UDIV:
1527     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
1528   case ISD::UREM:
1529     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
1530   case ISD::SHL:
1531     return lowerToScalableOp(Op, DAG, RISCVISD::SHL_VL);
1532   case ISD::SRA:
1533     return lowerToScalableOp(Op, DAG, RISCVISD::SRA_VL);
1534   case ISD::SRL:
1535     return lowerToScalableOp(Op, DAG, RISCVISD::SRL_VL);
1536   case ISD::FADD:
1537     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
1538   case ISD::FSUB:
1539     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
1540   case ISD::FMUL:
1541     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
1542   case ISD::FDIV:
1543     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
1544   case ISD::FNEG:
1545     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
1546   case ISD::FABS:
1547     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
1548   case ISD::FSQRT:
1549     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
1550   case ISD::FMA:
1551     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
1552   case ISD::SMIN:
1553     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
1554   case ISD::SMAX:
1555     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
1556   case ISD::UMIN:
1557     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
1558   case ISD::UMAX:
1559     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
1560   case ISD::VSELECT:
1561     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
1562   }
1563 }
1564 
1565 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
1566                              SelectionDAG &DAG, unsigned Flags) {
1567   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
1568 }
1569 
1570 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
1571                              SelectionDAG &DAG, unsigned Flags) {
1572   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
1573                                    Flags);
1574 }
1575 
1576 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
1577                              SelectionDAG &DAG, unsigned Flags) {
1578   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
1579                                    N->getOffset(), Flags);
1580 }
1581 
1582 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
1583                              SelectionDAG &DAG, unsigned Flags) {
1584   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
1585 }
1586 
1587 template <class NodeTy>
1588 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
1589                                      bool IsLocal) const {
1590   SDLoc DL(N);
1591   EVT Ty = getPointerTy(DAG.getDataLayout());
1592 
1593   if (isPositionIndependent()) {
1594     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
1595     if (IsLocal)
1596       // Use PC-relative addressing to access the symbol. This generates the
1597       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
1598       // %pcrel_lo(auipc)).
1599       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
1600 
1601     // Use PC-relative addressing to access the GOT for this symbol, then load
1602     // the address from the GOT. This generates the pattern (PseudoLA sym),
1603     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
1604     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
1605   }
1606 
1607   switch (getTargetMachine().getCodeModel()) {
1608   default:
1609     report_fatal_error("Unsupported code model for lowering");
1610   case CodeModel::Small: {
1611     // Generate a sequence for accessing addresses within the first 2 GiB of
1612     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
1613     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
1614     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
1615     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
1616     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
1617   }
1618   case CodeModel::Medium: {
1619     // Generate a sequence for accessing addresses within any 2GiB range within
1620     // the address space. This generates the pattern (PseudoLLA sym), which
1621     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
1622     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
1623     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
1624   }
1625   }
1626 }
1627 
1628 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
1629                                                 SelectionDAG &DAG) const {
1630   SDLoc DL(Op);
1631   EVT Ty = Op.getValueType();
1632   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
1633   int64_t Offset = N->getOffset();
1634   MVT XLenVT = Subtarget.getXLenVT();
1635 
1636   const GlobalValue *GV = N->getGlobal();
1637   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
1638   SDValue Addr = getAddr(N, DAG, IsLocal);
1639 
1640   // In order to maximise the opportunity for common subexpression elimination,
1641   // emit a separate ADD node for the global address offset instead of folding
1642   // it in the global address node. Later peephole optimisations may choose to
1643   // fold it back in when profitable.
1644   if (Offset != 0)
1645     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
1646                        DAG.getConstant(Offset, DL, XLenVT));
1647   return Addr;
1648 }
1649 
1650 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
1651                                                SelectionDAG &DAG) const {
1652   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
1653 
1654   return getAddr(N, DAG);
1655 }
1656 
1657 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
1658                                                SelectionDAG &DAG) const {
1659   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
1660 
1661   return getAddr(N, DAG);
1662 }
1663 
1664 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
1665                                             SelectionDAG &DAG) const {
1666   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
1667 
1668   return getAddr(N, DAG);
1669 }
1670 
1671 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
1672                                               SelectionDAG &DAG,
1673                                               bool UseGOT) const {
1674   SDLoc DL(N);
1675   EVT Ty = getPointerTy(DAG.getDataLayout());
1676   const GlobalValue *GV = N->getGlobal();
1677   MVT XLenVT = Subtarget.getXLenVT();
1678 
1679   if (UseGOT) {
1680     // Use PC-relative addressing to access the GOT for this TLS symbol, then
1681     // load the address from the GOT and add the thread pointer. This generates
1682     // the pattern (PseudoLA_TLS_IE sym), which expands to
1683     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
1684     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
1685     SDValue Load =
1686         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
1687 
1688     // Add the thread pointer.
1689     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
1690     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
1691   }
1692 
1693   // Generate a sequence for accessing the address relative to the thread
1694   // pointer, with the appropriate adjustment for the thread pointer offset.
1695   // This generates the pattern
1696   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
1697   SDValue AddrHi =
1698       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
1699   SDValue AddrAdd =
1700       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
1701   SDValue AddrLo =
1702       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
1703 
1704   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
1705   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
1706   SDValue MNAdd = SDValue(
1707       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
1708       0);
1709   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
1710 }
1711 
1712 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
1713                                                SelectionDAG &DAG) const {
1714   SDLoc DL(N);
1715   EVT Ty = getPointerTy(DAG.getDataLayout());
1716   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
1717   const GlobalValue *GV = N->getGlobal();
1718 
1719   // Use a PC-relative addressing mode to access the global dynamic GOT address.
1720   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
1721   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
1722   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
1723   SDValue Load =
1724       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
1725 
1726   // Prepare argument list to generate call.
1727   ArgListTy Args;
1728   ArgListEntry Entry;
1729   Entry.Node = Load;
1730   Entry.Ty = CallTy;
1731   Args.push_back(Entry);
1732 
1733   // Setup call to __tls_get_addr.
1734   TargetLowering::CallLoweringInfo CLI(DAG);
1735   CLI.setDebugLoc(DL)
1736       .setChain(DAG.getEntryNode())
1737       .setLibCallee(CallingConv::C, CallTy,
1738                     DAG.getExternalSymbol("__tls_get_addr", Ty),
1739                     std::move(Args));
1740 
1741   return LowerCallTo(CLI).first;
1742 }
1743 
1744 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
1745                                                    SelectionDAG &DAG) const {
1746   SDLoc DL(Op);
1747   EVT Ty = Op.getValueType();
1748   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
1749   int64_t Offset = N->getOffset();
1750   MVT XLenVT = Subtarget.getXLenVT();
1751 
1752   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
1753 
1754   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
1755       CallingConv::GHC)
1756     report_fatal_error("In GHC calling convention TLS is not supported");
1757 
1758   SDValue Addr;
1759   switch (Model) {
1760   case TLSModel::LocalExec:
1761     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
1762     break;
1763   case TLSModel::InitialExec:
1764     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
1765     break;
1766   case TLSModel::LocalDynamic:
1767   case TLSModel::GeneralDynamic:
1768     Addr = getDynamicTLSAddr(N, DAG);
1769     break;
1770   }
1771 
1772   // In order to maximise the opportunity for common subexpression elimination,
1773   // emit a separate ADD node for the global address offset instead of folding
1774   // it in the global address node. Later peephole optimisations may choose to
1775   // fold it back in when profitable.
1776   if (Offset != 0)
1777     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
1778                        DAG.getConstant(Offset, DL, XLenVT));
1779   return Addr;
1780 }
1781 
1782 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
1783   SDValue CondV = Op.getOperand(0);
1784   SDValue TrueV = Op.getOperand(1);
1785   SDValue FalseV = Op.getOperand(2);
1786   SDLoc DL(Op);
1787   MVT XLenVT = Subtarget.getXLenVT();
1788 
1789   // If the result type is XLenVT and CondV is the output of a SETCC node
1790   // which also operated on XLenVT inputs, then merge the SETCC node into the
1791   // lowered RISCVISD::SELECT_CC to take advantage of the integer
1792   // compare+branch instructions. i.e.:
1793   // (select (setcc lhs, rhs, cc), truev, falsev)
1794   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
1795   if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC &&
1796       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
1797     SDValue LHS = CondV.getOperand(0);
1798     SDValue RHS = CondV.getOperand(1);
1799     auto CC = cast<CondCodeSDNode>(CondV.getOperand(2));
1800     ISD::CondCode CCVal = CC->get();
1801 
1802     normaliseSetCC(LHS, RHS, CCVal);
1803 
1804     SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT);
1805     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
1806     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
1807   }
1808 
1809   // Otherwise:
1810   // (select condv, truev, falsev)
1811   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
1812   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
1813   SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT);
1814 
1815   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
1816 
1817   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
1818 }
1819 
1820 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
1821   MachineFunction &MF = DAG.getMachineFunction();
1822   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
1823 
1824   SDLoc DL(Op);
1825   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
1826                                  getPointerTy(MF.getDataLayout()));
1827 
1828   // vastart just stores the address of the VarArgsFrameIndex slot into the
1829   // memory location argument.
1830   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1831   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
1832                       MachinePointerInfo(SV));
1833 }
1834 
1835 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
1836                                             SelectionDAG &DAG) const {
1837   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
1838   MachineFunction &MF = DAG.getMachineFunction();
1839   MachineFrameInfo &MFI = MF.getFrameInfo();
1840   MFI.setFrameAddressIsTaken(true);
1841   Register FrameReg = RI.getFrameRegister(MF);
1842   int XLenInBytes = Subtarget.getXLen() / 8;
1843 
1844   EVT VT = Op.getValueType();
1845   SDLoc DL(Op);
1846   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
1847   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1848   while (Depth--) {
1849     int Offset = -(XLenInBytes * 2);
1850     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
1851                               DAG.getIntPtrConstant(Offset, DL));
1852     FrameAddr =
1853         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
1854   }
1855   return FrameAddr;
1856 }
1857 
1858 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
1859                                              SelectionDAG &DAG) const {
1860   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
1861   MachineFunction &MF = DAG.getMachineFunction();
1862   MachineFrameInfo &MFI = MF.getFrameInfo();
1863   MFI.setReturnAddressIsTaken(true);
1864   MVT XLenVT = Subtarget.getXLenVT();
1865   int XLenInBytes = Subtarget.getXLen() / 8;
1866 
1867   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
1868     return SDValue();
1869 
1870   EVT VT = Op.getValueType();
1871   SDLoc DL(Op);
1872   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1873   if (Depth) {
1874     int Off = -XLenInBytes;
1875     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
1876     SDValue Offset = DAG.getConstant(Off, DL, VT);
1877     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
1878                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
1879                        MachinePointerInfo());
1880   }
1881 
1882   // Return the value of the return address register, marking it an implicit
1883   // live-in.
1884   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
1885   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
1886 }
1887 
1888 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
1889                                                  SelectionDAG &DAG) const {
1890   SDLoc DL(Op);
1891   SDValue Lo = Op.getOperand(0);
1892   SDValue Hi = Op.getOperand(1);
1893   SDValue Shamt = Op.getOperand(2);
1894   EVT VT = Lo.getValueType();
1895 
1896   // if Shamt-XLEN < 0: // Shamt < XLEN
1897   //   Lo = Lo << Shamt
1898   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
1899   // else:
1900   //   Lo = 0
1901   //   Hi = Lo << (Shamt-XLEN)
1902 
1903   SDValue Zero = DAG.getConstant(0, DL, VT);
1904   SDValue One = DAG.getConstant(1, DL, VT);
1905   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
1906   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
1907   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
1908   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
1909 
1910   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
1911   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
1912   SDValue ShiftRightLo =
1913       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
1914   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
1915   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
1916   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
1917 
1918   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
1919 
1920   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
1921   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
1922 
1923   SDValue Parts[2] = {Lo, Hi};
1924   return DAG.getMergeValues(Parts, DL);
1925 }
1926 
1927 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
1928                                                   bool IsSRA) const {
1929   SDLoc DL(Op);
1930   SDValue Lo = Op.getOperand(0);
1931   SDValue Hi = Op.getOperand(1);
1932   SDValue Shamt = Op.getOperand(2);
1933   EVT VT = Lo.getValueType();
1934 
1935   // SRA expansion:
1936   //   if Shamt-XLEN < 0: // Shamt < XLEN
1937   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
1938   //     Hi = Hi >>s Shamt
1939   //   else:
1940   //     Lo = Hi >>s (Shamt-XLEN);
1941   //     Hi = Hi >>s (XLEN-1)
1942   //
1943   // SRL expansion:
1944   //   if Shamt-XLEN < 0: // Shamt < XLEN
1945   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
1946   //     Hi = Hi >>u Shamt
1947   //   else:
1948   //     Lo = Hi >>u (Shamt-XLEN);
1949   //     Hi = 0;
1950 
1951   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
1952 
1953   SDValue Zero = DAG.getConstant(0, DL, VT);
1954   SDValue One = DAG.getConstant(1, DL, VT);
1955   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
1956   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
1957   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
1958   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
1959 
1960   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
1961   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
1962   SDValue ShiftLeftHi =
1963       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
1964   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
1965   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
1966   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
1967   SDValue HiFalse =
1968       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
1969 
1970   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
1971 
1972   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
1973   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
1974 
1975   SDValue Parts[2] = {Lo, Hi};
1976   return DAG.getMergeValues(Parts, DL);
1977 }
1978 
1979 // Custom-lower a SPLAT_VECTOR where XLEN<SEW, as the SEW element type is
1980 // illegal (currently only vXi64 RV32).
1981 // FIXME: We could also catch non-constant sign-extended i32 values and lower
1982 // them to SPLAT_VECTOR_I64
1983 SDValue RISCVTargetLowering::lowerSPLATVECTOR(SDValue Op,
1984                                               SelectionDAG &DAG) const {
1985   SDLoc DL(Op);
1986   EVT VecVT = Op.getValueType();
1987   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
1988          "Unexpected SPLAT_VECTOR lowering");
1989   SDValue SplatVal = Op.getOperand(0);
1990 
1991   // If we can prove that the value is a sign-extended 32-bit value, lower this
1992   // as a custom node in order to try and match RVV vector/scalar instructions.
1993   if (auto *CVal = dyn_cast<ConstantSDNode>(SplatVal)) {
1994     if (isInt<32>(CVal->getSExtValue()))
1995       return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT,
1996                          DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32));
1997   }
1998 
1999   if (SplatVal.getOpcode() == ISD::SIGN_EXTEND &&
2000       SplatVal.getOperand(0).getValueType() == MVT::i32) {
2001     return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT,
2002                        SplatVal.getOperand(0));
2003   }
2004 
2005   // Else, on RV32 we lower an i64-element SPLAT_VECTOR thus, being careful not
2006   // to accidentally sign-extend the 32-bit halves to the e64 SEW:
2007   // vmv.v.x vX, hi
2008   // vsll.vx vX, vX, /*32*/
2009   // vmv.v.x vY, lo
2010   // vsll.vx vY, vY, /*32*/
2011   // vsrl.vx vY, vY, /*32*/
2012   // vor.vv vX, vX, vY
2013   SDValue One = DAG.getConstant(1, DL, MVT::i32);
2014   SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
2015   SDValue ThirtyTwoV = DAG.getConstant(32, DL, VecVT);
2016   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, SplatVal, Zero);
2017   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, SplatVal, One);
2018 
2019   Lo = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
2020   Lo = DAG.getNode(ISD::SHL, DL, VecVT, Lo, ThirtyTwoV);
2021   Lo = DAG.getNode(ISD::SRL, DL, VecVT, Lo, ThirtyTwoV);
2022 
2023   if (isNullConstant(Hi))
2024     return Lo;
2025 
2026   Hi = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Hi);
2027   Hi = DAG.getNode(ISD::SHL, DL, VecVT, Hi, ThirtyTwoV);
2028 
2029   return DAG.getNode(ISD::OR, DL, VecVT, Lo, Hi);
2030 }
2031 
2032 // Custom-lower extensions from mask vectors by using a vselect either with 1
2033 // for zero/any-extension or -1 for sign-extension:
2034 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
2035 // Note that any-extension is lowered identically to zero-extension.
2036 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
2037                                                 int64_t ExtTrueVal) const {
2038   SDLoc DL(Op);
2039   MVT VecVT = Op.getSimpleValueType();
2040   SDValue Src = Op.getOperand(0);
2041   // Only custom-lower extensions from mask types
2042   assert(Src.getValueType().isVector() &&
2043          Src.getValueType().getVectorElementType() == MVT::i1);
2044 
2045   MVT XLenVT = Subtarget.getXLenVT();
2046   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
2047   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
2048 
2049   if (VecVT.isScalableVector()) {
2050     // Be careful not to introduce illegal scalar types at this stage, and be
2051     // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
2052     // illegal and must be expanded. Since we know that the constants are
2053     // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly.
2054     bool IsRV32E64 =
2055         !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
2056 
2057     if (!IsRV32E64) {
2058       SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
2059       SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
2060     } else {
2061       SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero);
2062       SplatTrueVal =
2063           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal);
2064     }
2065 
2066     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
2067   }
2068 
2069   MVT ContainerVT = RISCVTargetLowering::getContainerForFixedLengthVector(
2070       DAG, VecVT, Subtarget);
2071   MVT I1ContainerVT =
2072       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
2073 
2074   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
2075 
2076   SDValue Mask, VL;
2077   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
2078 
2079   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL);
2080   SplatTrueVal =
2081       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL);
2082   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
2083                                SplatTrueVal, SplatZero, VL);
2084 
2085   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
2086 }
2087 
2088 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
2089     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
2090   MVT ExtVT = Op.getSimpleValueType();
2091   // Only custom-lower extensions from fixed-length vector types.
2092   if (!ExtVT.isFixedLengthVector())
2093     return Op;
2094   MVT VT = Op.getOperand(0).getSimpleValueType();
2095   // Grab the canonical container type for the extended type. Infer the smaller
2096   // type from that to ensure the same number of vector elements, as we know
2097   // the LMUL will be sufficient to hold the smaller type.
2098   MVT ContainerExtVT = RISCVTargetLowering::getContainerForFixedLengthVector(
2099       DAG, ExtVT, Subtarget);
2100   // Get the extended container type manually to ensure the same number of
2101   // vector elements between source and dest.
2102   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
2103                                      ContainerExtVT.getVectorElementCount());
2104 
2105   SDValue Op1 =
2106       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
2107 
2108   SDLoc DL(Op);
2109   SDValue Mask, VL;
2110   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2111 
2112   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
2113 
2114   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
2115 }
2116 
2117 // Custom-lower truncations from vectors to mask vectors by using a mask and a
2118 // setcc operation:
2119 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
2120 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
2121                                                   SelectionDAG &DAG) const {
2122   SDLoc DL(Op);
2123   EVT MaskVT = Op.getValueType();
2124   // Only expect to custom-lower truncations to mask types
2125   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
2126          "Unexpected type for vector mask lowering");
2127   SDValue Src = Op.getOperand(0);
2128   MVT VecVT = Src.getSimpleValueType();
2129 
2130   // If this is a fixed vector, we need to convert it to a scalable vector.
2131   MVT ContainerVT = VecVT;
2132   if (VecVT.isFixedLengthVector()) {
2133     ContainerVT = getContainerForFixedLengthVector(DAG, VecVT, Subtarget);
2134     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
2135   }
2136 
2137   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
2138   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
2139 
2140   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne);
2141   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero);
2142 
2143   if (VecVT.isScalableVector()) {
2144     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
2145     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
2146   }
2147 
2148   SDValue Mask, VL;
2149   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
2150 
2151   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
2152   SDValue Trunc =
2153       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
2154   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
2155                       DAG.getCondCode(ISD::SETNE), Mask, VL);
2156   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
2157 }
2158 
2159 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
2160                                                     SelectionDAG &DAG) const {
2161   SDLoc DL(Op);
2162   MVT VecVT = Op.getSimpleValueType();
2163   SDValue Vec = Op.getOperand(0);
2164   SDValue Val = Op.getOperand(1);
2165   SDValue Idx = Op.getOperand(2);
2166 
2167   // Custom-legalize INSERT_VECTOR_ELT where XLEN>=SEW, so that the vector is
2168   // first slid down into position, the value is inserted into the first
2169   // position, and the vector is slid back up. We do this to simplify patterns.
2170   //   (slideup vec, (insertelt (slidedown impdef, vec, idx), val, 0), idx),
2171   if (Subtarget.is64Bit() || Val.getValueType() != MVT::i64) {
2172     if (isNullConstant(Idx))
2173       return Op;
2174     SDValue Mask, VL;
2175     std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
2176     SDValue Slidedown = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, VecVT,
2177                                     DAG.getUNDEF(VecVT), Vec, Idx, Mask, VL);
2178     SDValue InsertElt0 =
2179         DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecVT, Slidedown, Val,
2180                     DAG.getConstant(0, DL, Subtarget.getXLenVT()));
2181 
2182     return DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, VecVT, Vec, InsertElt0, Idx,
2183                        Mask, VL);
2184   }
2185 
2186   if (!VecVT.isScalableVector())
2187     return SDValue();
2188 
2189   // Custom-legalize INSERT_VECTOR_ELT where XLEN<SEW, as the SEW element type
2190   // is illegal (currently only vXi64 RV32).
2191   // Since there is no easy way of getting a single element into a vector when
2192   // XLEN<SEW, we lower the operation to the following sequence:
2193   //   splat      vVal, rVal
2194   //   vid.v      vVid
2195   //   vmseq.vx   mMask, vVid, rIdx
2196   //   vmerge.vvm vDest, vSrc, vVal, mMask
2197   // This essentially merges the original vector with the inserted element by
2198   // using a mask whose only set bit is that corresponding to the insert
2199   // index.
2200   SDValue SplattedVal = DAG.getSplatVector(VecVT, DL, Val);
2201   SDValue SplattedIdx = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Idx);
2202 
2203   SDValue Mask, VL;
2204   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
2205   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VecVT, Mask, VL);
2206   auto SetCCVT =
2207       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VecVT);
2208   SDValue SelectCond = DAG.getSetCC(DL, SetCCVT, VID, SplattedIdx, ISD::SETEQ);
2209 
2210   return DAG.getNode(ISD::VSELECT, DL, VecVT, SelectCond, SplattedVal, Vec);
2211 }
2212 
2213 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
2214 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
2215 // types this is done using VMV_X_S to allow us to glean information about the
2216 // sign bits of the result.
2217 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
2218                                                      SelectionDAG &DAG) const {
2219   SDLoc DL(Op);
2220   SDValue Idx = Op.getOperand(1);
2221   SDValue Vec = Op.getOperand(0);
2222   EVT EltVT = Op.getValueType();
2223   MVT VecVT = Vec.getSimpleValueType();
2224   MVT XLenVT = Subtarget.getXLenVT();
2225 
2226   // If this is a fixed vector, we need to convert it to a scalable vector.
2227   MVT ContainerVT = VecVT;
2228   if (VecVT.isFixedLengthVector()) {
2229     ContainerVT = getContainerForFixedLengthVector(DAG, VecVT, Subtarget);
2230     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
2231   }
2232 
2233   // If the index is 0, the vector is already in the right position.
2234   if (!isNullConstant(Idx)) {
2235     // Use a VL of 1 to avoid processing more elements than we need.
2236     SDValue VL = DAG.getConstant(1, DL, XLenVT);
2237     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
2238     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
2239     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
2240                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
2241   }
2242 
2243   if (!EltVT.isInteger()) {
2244     // Floating-point extracts are handled in TableGen.
2245     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
2246                        DAG.getConstant(0, DL, XLenVT));
2247   }
2248 
2249   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
2250   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
2251 }
2252 
2253 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
2254                                                      SelectionDAG &DAG) const {
2255   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2256   SDLoc DL(Op);
2257 
2258   if (Subtarget.hasStdExtV()) {
2259     // Some RVV intrinsics may claim that they want an integer operand to be
2260     // extended.
2261     if (const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
2262             RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo)) {
2263       if (II->ExtendedOperand) {
2264         assert(II->ExtendedOperand < Op.getNumOperands());
2265         SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
2266         SDValue &ScalarOp = Operands[II->ExtendedOperand];
2267         EVT OpVT = ScalarOp.getValueType();
2268         if (OpVT == MVT::i8 || OpVT == MVT::i16 ||
2269             (OpVT == MVT::i32 && Subtarget.is64Bit())) {
2270           // If the operand is a constant, sign extend to increase our chances
2271           // of being able to use a .vi instruction. ANY_EXTEND would become a
2272           // a zero extend and the simm5 check in isel would fail.
2273           // FIXME: Should we ignore the upper bits in isel instead?
2274           unsigned ExtOpc = isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND
2275                                                           : ISD::ANY_EXTEND;
2276           ScalarOp = DAG.getNode(ExtOpc, DL, Subtarget.getXLenVT(), ScalarOp);
2277           return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
2278                              Operands);
2279         }
2280       }
2281     }
2282   }
2283 
2284   switch (IntNo) {
2285   default:
2286     return SDValue();    // Don't custom lower most intrinsics.
2287   case Intrinsic::thread_pointer: {
2288     EVT PtrVT = getPointerTy(DAG.getDataLayout());
2289     return DAG.getRegister(RISCV::X4, PtrVT);
2290   }
2291   case Intrinsic::riscv_vmv_x_s:
2292     assert(Op.getValueType() == Subtarget.getXLenVT() && "Unexpected VT!");
2293     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
2294                        Op.getOperand(1));
2295   case Intrinsic::riscv_vmv_v_x: {
2296     SDValue Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(),
2297                                  Op.getOperand(1));
2298     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, Op.getValueType(),
2299                        Scalar, Op.getOperand(2));
2300   }
2301   case Intrinsic::riscv_vfmv_v_f:
2302     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
2303                        Op.getOperand(1), Op.getOperand(2));
2304   }
2305 }
2306 
2307 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
2308                                                     SelectionDAG &DAG) const {
2309   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
2310   SDLoc DL(Op);
2311 
2312   if (Subtarget.hasStdExtV()) {
2313     // Some RVV intrinsics may claim that they want an integer operand to be
2314     // extended.
2315     if (const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
2316             RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo)) {
2317       if (II->ExtendedOperand) {
2318         // The operands start from the second argument in INTRINSIC_W_CHAIN.
2319         unsigned ExtendOp = II->ExtendedOperand + 1;
2320         assert(ExtendOp < Op.getNumOperands());
2321         SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
2322         SDValue &ScalarOp = Operands[ExtendOp];
2323         EVT OpVT = ScalarOp.getValueType();
2324         if (OpVT == MVT::i8 || OpVT == MVT::i16 ||
2325             (OpVT == MVT::i32 && Subtarget.is64Bit())) {
2326           // If the operand is a constant, sign extend to increase our chances
2327           // of being able to use a .vi instruction. ANY_EXTEND would become a
2328           // a zero extend and the simm5 check in isel would fail.
2329           // FIXME: Should we ignore the upper bits in isel instead?
2330           unsigned ExtOpc = isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND
2331                                                           : ISD::ANY_EXTEND;
2332           ScalarOp = DAG.getNode(ExtOpc, DL, Subtarget.getXLenVT(), ScalarOp);
2333           return DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, Op->getVTList(),
2334                              Operands);
2335         }
2336       }
2337     }
2338   }
2339 
2340   return SDValue(); // Don't custom lower most intrinsics.
2341 }
2342 
2343 static MVT getLMUL1VT(MVT VT) {
2344   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
2345          "Unexpected vector MVT");
2346   return MVT::getScalableVectorVT(
2347       VT.getVectorElementType(),
2348       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
2349 }
2350 
2351 static std::pair<unsigned, uint64_t>
2352 getRVVReductionOpAndIdentityVal(unsigned ISDOpcode, unsigned EltSizeBits) {
2353   switch (ISDOpcode) {
2354   default:
2355     llvm_unreachable("Unhandled reduction");
2356   case ISD::VECREDUCE_ADD:
2357     return {RISCVISD::VECREDUCE_ADD, 0};
2358   case ISD::VECREDUCE_UMAX:
2359     return {RISCVISD::VECREDUCE_UMAX, 0};
2360   case ISD::VECREDUCE_SMAX:
2361     return {RISCVISD::VECREDUCE_SMAX, minIntN(EltSizeBits)};
2362   case ISD::VECREDUCE_UMIN:
2363     return {RISCVISD::VECREDUCE_UMIN, maxUIntN(EltSizeBits)};
2364   case ISD::VECREDUCE_SMIN:
2365     return {RISCVISD::VECREDUCE_SMIN, maxIntN(EltSizeBits)};
2366   case ISD::VECREDUCE_AND:
2367     return {RISCVISD::VECREDUCE_AND, -1};
2368   case ISD::VECREDUCE_OR:
2369     return {RISCVISD::VECREDUCE_OR, 0};
2370   case ISD::VECREDUCE_XOR:
2371     return {RISCVISD::VECREDUCE_XOR, 0};
2372   }
2373 }
2374 
2375 // Take a (supported) standard ISD reduction opcode and transform it to a RISCV
2376 // reduction opcode. Note that this returns a vector type, which must be
2377 // further processed to access the scalar result in element 0.
2378 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
2379                                             SelectionDAG &DAG) const {
2380   SDLoc DL(Op);
2381   assert(Op.getValueType().isSimple() &&
2382          Op.getOperand(0).getValueType().isSimple() &&
2383          "Unexpected vector-reduce lowering");
2384   MVT VecVT = Op.getOperand(0).getSimpleValueType();
2385   MVT VecEltVT = VecVT.getVectorElementType();
2386   unsigned RVVOpcode;
2387   uint64_t IdentityVal;
2388   std::tie(RVVOpcode, IdentityVal) =
2389       getRVVReductionOpAndIdentityVal(Op.getOpcode(), VecEltVT.getSizeInBits());
2390   MVT M1VT = getLMUL1VT(VecVT);
2391   SDValue IdentitySplat =
2392       DAG.getSplatVector(M1VT, DL, DAG.getConstant(IdentityVal, DL, VecEltVT));
2393   SDValue Reduction =
2394       DAG.getNode(RVVOpcode, DL, M1VT, Op.getOperand(0), IdentitySplat);
2395   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
2396                              DAG.getConstant(0, DL, Subtarget.getXLenVT()));
2397   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
2398 }
2399 
2400 // Given a reduction op, this function returns the matching reduction opcode,
2401 // the vector SDValue and the scalar SDValue required to lower this to a
2402 // RISCVISD node.
2403 static std::tuple<unsigned, SDValue, SDValue>
2404 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
2405   SDLoc DL(Op);
2406   switch (Op.getOpcode()) {
2407   default:
2408     llvm_unreachable("Unhandled reduction");
2409   case ISD::VECREDUCE_FADD:
2410     return std::make_tuple(RISCVISD::VECREDUCE_FADD, Op.getOperand(0),
2411                            DAG.getConstantFP(0.0, DL, EltVT));
2412   case ISD::VECREDUCE_SEQ_FADD:
2413     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD, Op.getOperand(1),
2414                            Op.getOperand(0));
2415   }
2416 }
2417 
2418 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
2419                                               SelectionDAG &DAG) const {
2420   SDLoc DL(Op);
2421   MVT VecEltVT = Op.getSimpleValueType();
2422 
2423   unsigned RVVOpcode;
2424   SDValue VectorVal, ScalarVal;
2425   std::tie(RVVOpcode, VectorVal, ScalarVal) =
2426       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
2427 
2428   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
2429   SDValue ScalarSplat = DAG.getSplatVector(M1VT, DL, ScalarVal);
2430   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, VectorVal, ScalarSplat);
2431   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
2432                      DAG.getConstant(0, DL, Subtarget.getXLenVT()));
2433 }
2434 
2435 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
2436                                                    SelectionDAG &DAG) const {
2437   SDValue Vec = Op.getOperand(0);
2438   SDValue SubVec = Op.getOperand(1);
2439   MVT VecVT = Vec.getSimpleValueType();
2440   MVT SubVecVT = SubVec.getSimpleValueType();
2441 
2442   SDLoc DL(Op);
2443   MVT XLenVT = Subtarget.getXLenVT();
2444   unsigned OrigIdx = Op.getConstantOperandVal(2);
2445   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
2446 
2447   // We don't have the ability to slide mask vectors up indexed by their i1
2448   // elements; the smallest we can do is i8. Often we are able to bitcast to
2449   // equivalent i8 vectors. Note that when inserting a fixed-length vector
2450   // into a scalable one, we might not necessarily have enough scalable
2451   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
2452   if (SubVecVT.getVectorElementType() == MVT::i1 &&
2453       (OrigIdx != 0 || !Vec.isUndef())) {
2454     if (VecVT.getVectorMinNumElements() >= 8 &&
2455         SubVecVT.getVectorMinNumElements() >= 8) {
2456       assert(OrigIdx % 8 == 0 && "Invalid index");
2457       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
2458              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
2459              "Unexpected mask vector lowering");
2460       OrigIdx /= 8;
2461       SubVecVT =
2462           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
2463                            SubVecVT.isScalableVector());
2464       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
2465                                VecVT.isScalableVector());
2466       Vec = DAG.getBitcast(VecVT, Vec);
2467       SubVec = DAG.getBitcast(SubVecVT, SubVec);
2468     } else {
2469       // We can't slide this mask vector up indexed by its i1 elements.
2470       // This poses a problem when we wish to insert a scalable vector which
2471       // can't be re-expressed as a larger type. Just choose the slow path and
2472       // extend to a larger type, then truncate back down.
2473       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
2474       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
2475       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
2476       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
2477       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
2478                         Op.getOperand(2));
2479       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
2480       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
2481     }
2482   }
2483 
2484   // If the subvector vector is a fixed-length type, we cannot use subregister
2485   // manipulation to simplify the codegen; we don't know which register of a
2486   // LMUL group contains the specific subvector as we only know the minimum
2487   // register size. Therefore we must slide the vector group up the full
2488   // amount.
2489   if (SubVecVT.isFixedLengthVector()) {
2490     if (OrigIdx == 0 && Vec.isUndef())
2491       return Op;
2492     MVT ContainerVT = VecVT;
2493     if (VecVT.isFixedLengthVector()) {
2494       ContainerVT = RISCVTargetLowering::getContainerForFixedLengthVector(
2495           DAG, VecVT, Subtarget);
2496       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
2497     }
2498     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
2499                          DAG.getUNDEF(ContainerVT), SubVec,
2500                          DAG.getConstant(0, DL, XLenVT));
2501     SDValue Mask =
2502         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
2503     // Set the vector length to only the number of elements we care about. Note
2504     // that for slideup this includes the offset.
2505     SDValue VL =
2506         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
2507     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
2508     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
2509                                   SubVec, SlideupAmt, Mask, VL);
2510     if (!VecVT.isFixedLengthVector())
2511       return Slideup;
2512     return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
2513   }
2514 
2515   unsigned SubRegIdx, RemIdx;
2516   std::tie(SubRegIdx, RemIdx) =
2517       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
2518           VecVT, SubVecVT, OrigIdx, TRI);
2519 
2520   RISCVVLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
2521   bool IsSubVecPartReg = SubVecLMUL == RISCVVLMUL::LMUL_F2 ||
2522                          SubVecLMUL == RISCVVLMUL::LMUL_F4 ||
2523                          SubVecLMUL == RISCVVLMUL::LMUL_F8;
2524 
2525   // 1. If the Idx has been completely eliminated and this subvector's size is
2526   // a vector register or a multiple thereof, or the surrounding elements are
2527   // undef, then this is a subvector insert which naturally aligns to a vector
2528   // register. These can easily be handled using subregister manipulation.
2529   // 2. If the subvector is smaller than a vector register, then the insertion
2530   // must preserve the undisturbed elements of the register. We do this by
2531   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
2532   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
2533   // subvector within the vector register, and an INSERT_SUBVECTOR of that
2534   // LMUL=1 type back into the larger vector (resolving to another subregister
2535   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
2536   // to avoid allocating a large register group to hold our subvector.
2537   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
2538     return Op;
2539 
2540   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
2541   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
2542   // (in our case undisturbed). This means we can set up a subvector insertion
2543   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
2544   // size of the subvector.
2545   MVT InterSubVT = VecVT;
2546   SDValue AlignedExtract = Vec;
2547   unsigned AlignedIdx = OrigIdx - RemIdx;
2548   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
2549     InterSubVT = getLMUL1VT(VecVT);
2550     // Extract a subvector equal to the nearest full vector register type. This
2551     // should resolve to a EXTRACT_SUBREG instruction.
2552     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
2553                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
2554   }
2555 
2556   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
2557   // For scalable vectors this must be further multiplied by vscale.
2558   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
2559 
2560   SDValue Mask, VL;
2561   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
2562 
2563   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
2564   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
2565   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
2566   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
2567 
2568   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
2569                        DAG.getUNDEF(InterSubVT), SubVec,
2570                        DAG.getConstant(0, DL, XLenVT));
2571 
2572   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
2573                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
2574 
2575   // If required, insert this subvector back into the correct vector register.
2576   // This should resolve to an INSERT_SUBREG instruction.
2577   if (VecVT.bitsGT(InterSubVT))
2578     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
2579                           DAG.getConstant(AlignedIdx, DL, XLenVT));
2580 
2581   // We might have bitcast from a mask type: cast back to the original type if
2582   // required.
2583   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
2584 }
2585 
2586 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
2587                                                     SelectionDAG &DAG) const {
2588   SDValue Vec = Op.getOperand(0);
2589   MVT SubVecVT = Op.getSimpleValueType();
2590   MVT VecVT = Vec.getSimpleValueType();
2591 
2592   SDLoc DL(Op);
2593   MVT XLenVT = Subtarget.getXLenVT();
2594   unsigned OrigIdx = Op.getConstantOperandVal(1);
2595   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
2596 
2597   // We don't have the ability to slide mask vectors down indexed by their i1
2598   // elements; the smallest we can do is i8. Often we are able to bitcast to
2599   // equivalent i8 vectors. Note that when extracting a fixed-length vector
2600   // from a scalable one, we might not necessarily have enough scalable
2601   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
2602   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
2603     if (VecVT.getVectorMinNumElements() >= 8 &&
2604         SubVecVT.getVectorMinNumElements() >= 8) {
2605       assert(OrigIdx % 8 == 0 && "Invalid index");
2606       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
2607              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
2608              "Unexpected mask vector lowering");
2609       OrigIdx /= 8;
2610       SubVecVT =
2611           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
2612                            SubVecVT.isScalableVector());
2613       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
2614                                VecVT.isScalableVector());
2615       Vec = DAG.getBitcast(VecVT, Vec);
2616     } else {
2617       // We can't slide this mask vector down, indexed by its i1 elements.
2618       // This poses a problem when we wish to extract a scalable vector which
2619       // can't be re-expressed as a larger type. Just choose the slow path and
2620       // extend to a larger type, then truncate back down.
2621       // TODO: We could probably improve this when extracting certain fixed
2622       // from fixed, where we can extract as i8 and shift the correct element
2623       // right to reach the desired subvector?
2624       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
2625       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
2626       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
2627       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
2628                         Op.getOperand(1));
2629       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
2630       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
2631     }
2632   }
2633 
2634   // If the subvector vector is a fixed-length type, we cannot use subregister
2635   // manipulation to simplify the codegen; we don't know which register of a
2636   // LMUL group contains the specific subvector as we only know the minimum
2637   // register size. Therefore we must slide the vector group down the full
2638   // amount.
2639   if (SubVecVT.isFixedLengthVector()) {
2640     // With an index of 0 this is a cast-like subvector, which can be performed
2641     // with subregister operations.
2642     if (OrigIdx == 0)
2643       return Op;
2644     MVT ContainerVT = VecVT;
2645     if (VecVT.isFixedLengthVector()) {
2646       ContainerVT = RISCVTargetLowering::getContainerForFixedLengthVector(
2647           DAG, VecVT, Subtarget);
2648       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
2649     }
2650     SDValue Mask =
2651         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
2652     // Set the vector length to only the number of elements we care about. This
2653     // avoids sliding down elements we're going to discard straight away.
2654     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
2655     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
2656     SDValue Slidedown =
2657         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
2658                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
2659     // Now we can use a cast-like subvector extract to get the result.
2660     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
2661                        DAG.getConstant(0, DL, XLenVT));
2662   }
2663 
2664   unsigned SubRegIdx, RemIdx;
2665   std::tie(SubRegIdx, RemIdx) =
2666       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
2667           VecVT, SubVecVT, OrigIdx, TRI);
2668 
2669   // If the Idx has been completely eliminated then this is a subvector extract
2670   // which naturally aligns to a vector register. These can easily be handled
2671   // using subregister manipulation.
2672   if (RemIdx == 0)
2673     return Op;
2674 
2675   // Else we must shift our vector register directly to extract the subvector.
2676   // Do this using VSLIDEDOWN.
2677 
2678   // If the vector type is an LMUL-group type, extract a subvector equal to the
2679   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
2680   // instruction.
2681   MVT InterSubVT = VecVT;
2682   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
2683     InterSubVT = getLMUL1VT(VecVT);
2684     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
2685                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
2686   }
2687 
2688   // Slide this vector register down by the desired number of elements in order
2689   // to place the desired subvector starting at element 0.
2690   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
2691   // For scalable vectors this must be further multiplied by vscale.
2692   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
2693 
2694   SDValue Mask, VL;
2695   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
2696   SDValue Slidedown =
2697       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
2698                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
2699 
2700   // Now the vector is in the right position, extract our final subvector. This
2701   // should resolve to a COPY.
2702   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
2703                           DAG.getConstant(0, DL, XLenVT));
2704 
2705   // We might have bitcast from a mask type: cast back to the original type if
2706   // required.
2707   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
2708 }
2709 
2710 SDValue
2711 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
2712                                                      SelectionDAG &DAG) const {
2713   auto *Load = cast<LoadSDNode>(Op);
2714 
2715   SDLoc DL(Op);
2716   MVT VT = Op.getSimpleValueType();
2717   MVT ContainerVT =
2718       RISCVTargetLowering::getContainerForFixedLengthVector(DAG, VT, Subtarget);
2719 
2720   SDValue VL =
2721       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
2722 
2723   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
2724   SDValue NewLoad = DAG.getMemIntrinsicNode(
2725       RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL},
2726       Load->getMemoryVT(), Load->getMemOperand());
2727 
2728   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
2729   return DAG.getMergeValues({Result, Load->getChain()}, DL);
2730 }
2731 
2732 SDValue
2733 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
2734                                                       SelectionDAG &DAG) const {
2735   auto *Store = cast<StoreSDNode>(Op);
2736 
2737   SDLoc DL(Op);
2738   MVT VT = Store->getValue().getSimpleValueType();
2739 
2740   // FIXME: We probably need to zero any extra bits in a byte for mask stores.
2741   // This is tricky to do.
2742 
2743   MVT ContainerVT =
2744       RISCVTargetLowering::getContainerForFixedLengthVector(DAG, VT, Subtarget);
2745 
2746   SDValue VL =
2747       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
2748 
2749   SDValue NewValue =
2750       convertToScalableVector(ContainerVT, Store->getValue(), DAG, Subtarget);
2751   return DAG.getMemIntrinsicNode(
2752       RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other),
2753       {Store->getChain(), NewValue, Store->getBasePtr(), VL},
2754       Store->getMemoryVT(), Store->getMemOperand());
2755 }
2756 
2757 SDValue
2758 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
2759                                                       SelectionDAG &DAG) const {
2760   MVT InVT = Op.getOperand(0).getSimpleValueType();
2761   MVT ContainerVT = RISCVTargetLowering::getContainerForFixedLengthVector(
2762       DAG, InVT, Subtarget);
2763 
2764   MVT VT = Op.getSimpleValueType();
2765 
2766   SDValue Op1 =
2767       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
2768   SDValue Op2 =
2769       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
2770 
2771   SDLoc DL(Op);
2772   SDValue VL =
2773       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
2774 
2775   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2776 
2777   bool Invert = false;
2778   Optional<unsigned> LogicOpc;
2779   if (ContainerVT.isFloatingPoint()) {
2780     bool Swap = false;
2781     switch (CC) {
2782     default:
2783       break;
2784     case ISD::SETULE:
2785     case ISD::SETULT:
2786       Swap = true;
2787       LLVM_FALLTHROUGH;
2788     case ISD::SETUGE:
2789     case ISD::SETUGT:
2790       CC = getSetCCInverse(CC, ContainerVT);
2791       Invert = true;
2792       break;
2793     case ISD::SETOGE:
2794     case ISD::SETOGT:
2795     case ISD::SETGE:
2796     case ISD::SETGT:
2797       Swap = true;
2798       break;
2799     case ISD::SETUEQ:
2800       // Use !((OLT Op1, Op2) || (OLT Op2, Op1))
2801       Invert = true;
2802       LogicOpc = RISCVISD::VMOR_VL;
2803       CC = ISD::SETOLT;
2804       break;
2805     case ISD::SETONE:
2806       // Use ((OLT Op1, Op2) || (OLT Op2, Op1))
2807       LogicOpc = RISCVISD::VMOR_VL;
2808       CC = ISD::SETOLT;
2809       break;
2810     case ISD::SETO:
2811       // Use (OEQ Op1, Op1) && (OEQ Op2, Op2)
2812       LogicOpc = RISCVISD::VMAND_VL;
2813       CC = ISD::SETOEQ;
2814       break;
2815     case ISD::SETUO:
2816       // Use (UNE Op1, Op1) || (UNE Op2, Op2)
2817       LogicOpc = RISCVISD::VMOR_VL;
2818       CC = ISD::SETUNE;
2819       break;
2820     }
2821 
2822     if (Swap) {
2823       CC = getSetCCSwappedOperands(CC);
2824       std::swap(Op1, Op2);
2825     }
2826   }
2827 
2828   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
2829   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
2830 
2831   // There are 3 cases we need to emit.
2832   // 1. For (OEQ Op1, Op1) && (OEQ Op2, Op2) or (UNE Op1, Op1) || (UNE Op2, Op2)
2833   //    we need to compare each operand with itself.
2834   // 2. For (OLT Op1, Op2) || (OLT Op2, Op1) we need to compare Op1 and Op2 in
2835   //    both orders.
2836   // 3. For any other case we just need one compare with Op1 and Op2.
2837   SDValue Cmp;
2838   if (LogicOpc && (CC == ISD::SETOEQ || CC == ISD::SETUNE)) {
2839     Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op1,
2840                       DAG.getCondCode(CC), Mask, VL);
2841     SDValue Cmp2 = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op2, Op2,
2842                                DAG.getCondCode(CC), Mask, VL);
2843     Cmp = DAG.getNode(*LogicOpc, DL, MaskVT, Cmp, Cmp2, VL);
2844   } else {
2845     Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
2846                       DAG.getCondCode(CC), Mask, VL);
2847     if (LogicOpc) {
2848       SDValue Cmp2 = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op2, Op1,
2849                                  DAG.getCondCode(CC), Mask, VL);
2850       Cmp = DAG.getNode(*LogicOpc, DL, MaskVT, Cmp, Cmp2, VL);
2851     }
2852   }
2853 
2854   if (Invert) {
2855     SDValue AllOnes = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
2856     Cmp = DAG.getNode(RISCVISD::VMXOR_VL, DL, MaskVT, Cmp, AllOnes, VL);
2857   }
2858 
2859   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
2860 }
2861 
2862 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
2863     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
2864   MVT VT = Op.getSimpleValueType();
2865 
2866   if (VT.getVectorElementType() == MVT::i1)
2867     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
2868 
2869   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
2870 }
2871 
2872 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
2873     SDValue Op, SelectionDAG &DAG) const {
2874   MVT VT = Op.getSimpleValueType();
2875   MVT ContainerVT =
2876       RISCVTargetLowering::getContainerForFixedLengthVector(DAG, VT, Subtarget);
2877 
2878   MVT I1ContainerVT =
2879       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
2880 
2881   SDValue CC =
2882       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
2883   SDValue Op1 =
2884       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
2885   SDValue Op2 =
2886       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
2887 
2888   SDLoc DL(Op);
2889   SDValue Mask, VL;
2890   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2891 
2892   SDValue Select =
2893       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
2894 
2895   return convertFromScalableVector(VT, Select, DAG, Subtarget);
2896 }
2897 
2898 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
2899                                                unsigned NewOpc,
2900                                                bool HasMask) const {
2901   MVT VT = Op.getSimpleValueType();
2902   assert(useRVVForFixedLengthVectorVT(VT) &&
2903          "Only expected to lower fixed length vector operation!");
2904   MVT ContainerVT =
2905       RISCVTargetLowering::getContainerForFixedLengthVector(DAG, VT, Subtarget);
2906 
2907   // Create list of operands by converting existing ones to scalable types.
2908   SmallVector<SDValue, 6> Ops;
2909   for (const SDValue &V : Op->op_values()) {
2910     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
2911 
2912     // Pass through non-vector operands.
2913     if (!V.getValueType().isVector()) {
2914       Ops.push_back(V);
2915       continue;
2916     }
2917 
2918     // "cast" fixed length vector to a scalable vector.
2919     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
2920            "Only fixed length vectors are supported!");
2921     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
2922   }
2923 
2924   SDLoc DL(Op);
2925   SDValue Mask, VL;
2926   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2927   if (HasMask)
2928     Ops.push_back(Mask);
2929   Ops.push_back(VL);
2930 
2931   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
2932   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
2933 }
2934 
2935 // Returns the opcode of the target-specific SDNode that implements the 32-bit
2936 // form of the given Opcode.
2937 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
2938   switch (Opcode) {
2939   default:
2940     llvm_unreachable("Unexpected opcode");
2941   case ISD::SHL:
2942     return RISCVISD::SLLW;
2943   case ISD::SRA:
2944     return RISCVISD::SRAW;
2945   case ISD::SRL:
2946     return RISCVISD::SRLW;
2947   case ISD::SDIV:
2948     return RISCVISD::DIVW;
2949   case ISD::UDIV:
2950     return RISCVISD::DIVUW;
2951   case ISD::UREM:
2952     return RISCVISD::REMUW;
2953   case ISD::ROTL:
2954     return RISCVISD::ROLW;
2955   case ISD::ROTR:
2956     return RISCVISD::RORW;
2957   case RISCVISD::GREVI:
2958     return RISCVISD::GREVIW;
2959   case RISCVISD::GORCI:
2960     return RISCVISD::GORCIW;
2961   }
2962 }
2963 
2964 // Converts the given 32-bit operation to a target-specific SelectionDAG node.
2965 // Because i32 isn't a legal type for RV64, these operations would otherwise
2966 // be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W
2967 // later one because the fact the operation was originally of type i32 is
2968 // lost.
2969 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
2970                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
2971   SDLoc DL(N);
2972   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
2973   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
2974   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
2975   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
2976   // ReplaceNodeResults requires we maintain the same type for the return value.
2977   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
2978 }
2979 
2980 // Converts the given 32-bit operation to a i64 operation with signed extension
2981 // semantic to reduce the signed extension instructions.
2982 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
2983   SDLoc DL(N);
2984   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
2985   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
2986   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
2987   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
2988                                DAG.getValueType(MVT::i32));
2989   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
2990 }
2991 
2992 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
2993                                              SmallVectorImpl<SDValue> &Results,
2994                                              SelectionDAG &DAG) const {
2995   SDLoc DL(N);
2996   switch (N->getOpcode()) {
2997   default:
2998     llvm_unreachable("Don't know how to custom type legalize this operation!");
2999   case ISD::STRICT_FP_TO_SINT:
3000   case ISD::STRICT_FP_TO_UINT:
3001   case ISD::FP_TO_SINT:
3002   case ISD::FP_TO_UINT: {
3003     bool IsStrict = N->isStrictFPOpcode();
3004     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
3005            "Unexpected custom legalisation");
3006     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
3007     // If the FP type needs to be softened, emit a library call using the 'si'
3008     // version. If we left it to default legalization we'd end up with 'di'. If
3009     // the FP type doesn't need to be softened just let generic type
3010     // legalization promote the result type.
3011     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
3012         TargetLowering::TypeSoftenFloat)
3013       return;
3014     RTLIB::Libcall LC;
3015     if (N->getOpcode() == ISD::FP_TO_SINT ||
3016         N->getOpcode() == ISD::STRICT_FP_TO_SINT)
3017       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
3018     else
3019       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
3020     MakeLibCallOptions CallOptions;
3021     EVT OpVT = Op0.getValueType();
3022     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
3023     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
3024     SDValue Result;
3025     std::tie(Result, Chain) =
3026         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
3027     Results.push_back(Result);
3028     if (IsStrict)
3029       Results.push_back(Chain);
3030     break;
3031   }
3032   case ISD::READCYCLECOUNTER: {
3033     assert(!Subtarget.is64Bit() &&
3034            "READCYCLECOUNTER only has custom type legalization on riscv32");
3035 
3036     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
3037     SDValue RCW =
3038         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
3039 
3040     Results.push_back(
3041         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
3042     Results.push_back(RCW.getValue(2));
3043     break;
3044   }
3045   case ISD::ADD:
3046   case ISD::SUB:
3047   case ISD::MUL:
3048     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
3049            "Unexpected custom legalisation");
3050     if (N->getOperand(1).getOpcode() == ISD::Constant)
3051       return;
3052     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
3053     break;
3054   case ISD::SHL:
3055   case ISD::SRA:
3056   case ISD::SRL:
3057     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
3058            "Unexpected custom legalisation");
3059     if (N->getOperand(1).getOpcode() == ISD::Constant)
3060       return;
3061     Results.push_back(customLegalizeToWOp(N, DAG));
3062     break;
3063   case ISD::ROTL:
3064   case ISD::ROTR:
3065     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
3066            "Unexpected custom legalisation");
3067     Results.push_back(customLegalizeToWOp(N, DAG));
3068     break;
3069   case ISD::SDIV:
3070   case ISD::UDIV:
3071   case ISD::UREM: {
3072     MVT VT = N->getSimpleValueType(0);
3073     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
3074            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
3075            "Unexpected custom legalisation");
3076     if (N->getOperand(0).getOpcode() == ISD::Constant ||
3077         N->getOperand(1).getOpcode() == ISD::Constant)
3078       return;
3079 
3080     // If the input is i32, use ANY_EXTEND since the W instructions don't read
3081     // the upper 32 bits. For other types we need to sign or zero extend
3082     // based on the opcode.
3083     unsigned ExtOpc = ISD::ANY_EXTEND;
3084     if (VT != MVT::i32)
3085       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
3086                                            : ISD::ZERO_EXTEND;
3087 
3088     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
3089     break;
3090   }
3091   case ISD::BITCAST: {
3092     assert(((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
3093              Subtarget.hasStdExtF()) ||
3094             (N->getValueType(0) == MVT::i16 && Subtarget.hasStdExtZfh())) &&
3095            "Unexpected custom legalisation");
3096     SDValue Op0 = N->getOperand(0);
3097     if (N->getValueType(0) == MVT::i16 && Subtarget.hasStdExtZfh()) {
3098       if (Op0.getValueType() != MVT::f16)
3099         return;
3100       SDValue FPConv =
3101           DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, Subtarget.getXLenVT(), Op0);
3102       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
3103     } else if (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
3104                Subtarget.hasStdExtF()) {
3105       if (Op0.getValueType() != MVT::f32)
3106         return;
3107       SDValue FPConv =
3108           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
3109       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
3110     }
3111     break;
3112   }
3113   case RISCVISD::GREVI:
3114   case RISCVISD::GORCI: {
3115     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
3116            "Unexpected custom legalisation");
3117     // This is similar to customLegalizeToWOp, except that we pass the second
3118     // operand (a TargetConstant) straight through: it is already of type
3119     // XLenVT.
3120     SDLoc DL(N);
3121     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
3122     SDValue NewOp0 =
3123         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
3124     SDValue NewRes =
3125         DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, N->getOperand(1));
3126     // ReplaceNodeResults requires we maintain the same type for the return
3127     // value.
3128     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
3129     break;
3130   }
3131   case RISCVISD::SHFLI: {
3132     // There is no SHFLIW instruction, but we can just promote the operation.
3133     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
3134            "Unexpected custom legalisation");
3135     SDLoc DL(N);
3136     SDValue NewOp0 =
3137         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
3138     SDValue NewRes =
3139         DAG.getNode(RISCVISD::SHFLI, DL, MVT::i64, NewOp0, N->getOperand(1));
3140     // ReplaceNodeResults requires we maintain the same type for the return
3141     // value.
3142     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
3143     break;
3144   }
3145   case ISD::BSWAP:
3146   case ISD::BITREVERSE: {
3147     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
3148            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
3149     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
3150                                  N->getOperand(0));
3151     unsigned Imm = N->getOpcode() == ISD::BITREVERSE ? 31 : 24;
3152     SDValue GREVIW = DAG.getNode(RISCVISD::GREVIW, DL, MVT::i64, NewOp0,
3153                                  DAG.getTargetConstant(Imm, DL,
3154                                                        Subtarget.getXLenVT()));
3155     // ReplaceNodeResults requires we maintain the same type for the return
3156     // value.
3157     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, GREVIW));
3158     break;
3159   }
3160   case ISD::FSHL:
3161   case ISD::FSHR: {
3162     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
3163            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
3164     SDValue NewOp0 =
3165         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
3166     SDValue NewOp1 =
3167         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
3168     SDValue NewOp2 =
3169         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
3170     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
3171     // Mask the shift amount to 5 bits.
3172     NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
3173                          DAG.getConstant(0x1f, DL, MVT::i64));
3174     unsigned Opc =
3175         N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW;
3176     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2);
3177     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
3178     break;
3179   }
3180   case ISD::EXTRACT_VECTOR_ELT: {
3181     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
3182     // type is illegal (currently only vXi64 RV32).
3183     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
3184     // transferred to the destination register. We issue two of these from the
3185     // upper- and lower- halves of the SEW-bit vector element, slid down to the
3186     // first element.
3187     SDLoc DL(N);
3188     SDValue Vec = N->getOperand(0);
3189     SDValue Idx = N->getOperand(1);
3190 
3191     // The vector type hasn't been legalized yet so we can't issue target
3192     // specific nodes if it needs legalization.
3193     // FIXME: We would manually legalize if it's important.
3194     if (!isTypeLegal(Vec.getValueType()))
3195       return;
3196 
3197     MVT VecVT = Vec.getSimpleValueType();
3198 
3199     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
3200            VecVT.getVectorElementType() == MVT::i64 &&
3201            "Unexpected EXTRACT_VECTOR_ELT legalization");
3202 
3203     // If this is a fixed vector, we need to convert it to a scalable vector.
3204     MVT ContainerVT = VecVT;
3205     if (VecVT.isFixedLengthVector()) {
3206       ContainerVT = getContainerForFixedLengthVector(DAG, VecVT, Subtarget);
3207       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3208     }
3209 
3210     MVT XLenVT = Subtarget.getXLenVT();
3211 
3212     // Use a VL of 1 to avoid processing more elements than we need.
3213     MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
3214     SDValue VL = DAG.getConstant(1, DL, XLenVT);
3215     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3216 
3217     // Unless the index is known to be 0, we must slide the vector down to get
3218     // the desired element into index 0.
3219     if (!isNullConstant(Idx)) {
3220       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
3221                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
3222     }
3223 
3224     // Extract the lower XLEN bits of the correct vector element.
3225     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
3226 
3227     // To extract the upper XLEN bits of the vector element, shift the first
3228     // element right by 32 bits and re-extract the lower XLEN bits.
3229     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
3230                                      DAG.getConstant(32, DL, XLenVT), VL);
3231     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
3232                                  ThirtyTwoV, Mask, VL);
3233 
3234     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
3235 
3236     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
3237     break;
3238   }
3239   case ISD::INTRINSIC_WO_CHAIN: {
3240     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3241     switch (IntNo) {
3242     default:
3243       llvm_unreachable(
3244           "Don't know how to custom type legalize this intrinsic!");
3245     case Intrinsic::riscv_vmv_x_s: {
3246       EVT VT = N->getValueType(0);
3247       assert((VT == MVT::i8 || VT == MVT::i16 ||
3248               (Subtarget.is64Bit() && VT == MVT::i32)) &&
3249              "Unexpected custom legalisation!");
3250       SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
3251                                     Subtarget.getXLenVT(), N->getOperand(1));
3252       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
3253       break;
3254     }
3255     }
3256     break;
3257   }
3258   case ISD::VECREDUCE_ADD:
3259   case ISD::VECREDUCE_AND:
3260   case ISD::VECREDUCE_OR:
3261   case ISD::VECREDUCE_XOR:
3262   case ISD::VECREDUCE_SMAX:
3263   case ISD::VECREDUCE_UMAX:
3264   case ISD::VECREDUCE_SMIN:
3265   case ISD::VECREDUCE_UMIN:
3266     // The custom-lowering for these nodes returns a vector whose first element
3267     // is the result of the reduction. Extract its first element and let the
3268     // legalization for EXTRACT_VECTOR_ELT do the rest of the job.
3269     Results.push_back(lowerVECREDUCE(SDValue(N, 0), DAG));
3270     break;
3271   }
3272 }
3273 
3274 // A structure to hold one of the bit-manipulation patterns below. Together, a
3275 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
3276 //   (or (and (shl x, 1), 0xAAAAAAAA),
3277 //       (and (srl x, 1), 0x55555555))
3278 struct RISCVBitmanipPat {
3279   SDValue Op;
3280   unsigned ShAmt;
3281   bool IsSHL;
3282 
3283   bool formsPairWith(const RISCVBitmanipPat &Other) const {
3284     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
3285   }
3286 };
3287 
3288 // Matches patterns of the form
3289 //   (and (shl x, C2), (C1 << C2))
3290 //   (and (srl x, C2), C1)
3291 //   (shl (and x, C1), C2)
3292 //   (srl (and x, (C1 << C2)), C2)
3293 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
3294 // The expected masks for each shift amount are specified in BitmanipMasks where
3295 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
3296 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
3297 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
3298 // XLen is 64.
3299 static Optional<RISCVBitmanipPat>
3300 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
3301   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
3302          "Unexpected number of masks");
3303   Optional<uint64_t> Mask;
3304   // Optionally consume a mask around the shift operation.
3305   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
3306     Mask = Op.getConstantOperandVal(1);
3307     Op = Op.getOperand(0);
3308   }
3309   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
3310     return None;
3311   bool IsSHL = Op.getOpcode() == ISD::SHL;
3312 
3313   if (!isa<ConstantSDNode>(Op.getOperand(1)))
3314     return None;
3315   uint64_t ShAmt = Op.getConstantOperandVal(1);
3316 
3317   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
3318   if (ShAmt >= Width && !isPowerOf2_64(ShAmt))
3319     return None;
3320   // If we don't have enough masks for 64 bit, then we must be trying to
3321   // match SHFL so we're only allowed to shift 1/4 of the width.
3322   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
3323     return None;
3324 
3325   SDValue Src = Op.getOperand(0);
3326 
3327   // The expected mask is shifted left when the AND is found around SHL
3328   // patterns.
3329   //   ((x >> 1) & 0x55555555)
3330   //   ((x << 1) & 0xAAAAAAAA)
3331   bool SHLExpMask = IsSHL;
3332 
3333   if (!Mask) {
3334     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
3335     // the mask is all ones: consume that now.
3336     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
3337       Mask = Src.getConstantOperandVal(1);
3338       Src = Src.getOperand(0);
3339       // The expected mask is now in fact shifted left for SRL, so reverse the
3340       // decision.
3341       //   ((x & 0xAAAAAAAA) >> 1)
3342       //   ((x & 0x55555555) << 1)
3343       SHLExpMask = !SHLExpMask;
3344     } else {
3345       // Use a default shifted mask of all-ones if there's no AND, truncated
3346       // down to the expected width. This simplifies the logic later on.
3347       Mask = maskTrailingOnes<uint64_t>(Width);
3348       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
3349     }
3350   }
3351 
3352   unsigned MaskIdx = Log2_32(ShAmt);
3353   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
3354 
3355   if (SHLExpMask)
3356     ExpMask <<= ShAmt;
3357 
3358   if (Mask != ExpMask)
3359     return None;
3360 
3361   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
3362 }
3363 
3364 // Matches any of the following bit-manipulation patterns:
3365 //   (and (shl x, 1), (0x55555555 << 1))
3366 //   (and (srl x, 1), 0x55555555)
3367 //   (shl (and x, 0x55555555), 1)
3368 //   (srl (and x, (0x55555555 << 1)), 1)
3369 // where the shift amount and mask may vary thus:
3370 //   [1]  = 0x55555555 / 0xAAAAAAAA
3371 //   [2]  = 0x33333333 / 0xCCCCCCCC
3372 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
3373 //   [8]  = 0x00FF00FF / 0xFF00FF00
3374 //   [16] = 0x0000FFFF / 0xFFFFFFFF
3375 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
3376 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
3377   // These are the unshifted masks which we use to match bit-manipulation
3378   // patterns. They may be shifted left in certain circumstances.
3379   static const uint64_t BitmanipMasks[] = {
3380       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
3381       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
3382 
3383   return matchRISCVBitmanipPat(Op, BitmanipMasks);
3384 }
3385 
3386 // Match the following pattern as a GREVI(W) operation
3387 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
3388 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
3389                                const RISCVSubtarget &Subtarget) {
3390   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
3391   EVT VT = Op.getValueType();
3392 
3393   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
3394     auto LHS = matchGREVIPat(Op.getOperand(0));
3395     auto RHS = matchGREVIPat(Op.getOperand(1));
3396     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
3397       SDLoc DL(Op);
3398       return DAG.getNode(
3399           RISCVISD::GREVI, DL, VT, LHS->Op,
3400           DAG.getTargetConstant(LHS->ShAmt, DL, Subtarget.getXLenVT()));
3401     }
3402   }
3403   return SDValue();
3404 }
3405 
3406 // Matches any the following pattern as a GORCI(W) operation
3407 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
3408 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
3409 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
3410 // Note that with the variant of 3.,
3411 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
3412 // the inner pattern will first be matched as GREVI and then the outer
3413 // pattern will be matched to GORC via the first rule above.
3414 // 4.  (or (rotl/rotr x, bitwidth/2), x)
3415 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
3416                                const RISCVSubtarget &Subtarget) {
3417   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
3418   EVT VT = Op.getValueType();
3419 
3420   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
3421     SDLoc DL(Op);
3422     SDValue Op0 = Op.getOperand(0);
3423     SDValue Op1 = Op.getOperand(1);
3424 
3425     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
3426       if (Reverse.getOpcode() == RISCVISD::GREVI && Reverse.getOperand(0) == X &&
3427           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
3428         return DAG.getNode(RISCVISD::GORCI, DL, VT, X, Reverse.getOperand(1));
3429       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
3430       if ((Reverse.getOpcode() == ISD::ROTL ||
3431            Reverse.getOpcode() == ISD::ROTR) &&
3432           Reverse.getOperand(0) == X &&
3433           isa<ConstantSDNode>(Reverse.getOperand(1))) {
3434         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
3435         if (RotAmt == (VT.getSizeInBits() / 2))
3436           return DAG.getNode(
3437               RISCVISD::GORCI, DL, VT, X,
3438               DAG.getTargetConstant(RotAmt, DL, Subtarget.getXLenVT()));
3439       }
3440       return SDValue();
3441     };
3442 
3443     // Check for either commutable permutation of (or (GREVI x, shamt), x)
3444     if (SDValue V = MatchOROfReverse(Op0, Op1))
3445       return V;
3446     if (SDValue V = MatchOROfReverse(Op1, Op0))
3447       return V;
3448 
3449     // OR is commutable so canonicalize its OR operand to the left
3450     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
3451       std::swap(Op0, Op1);
3452     if (Op0.getOpcode() != ISD::OR)
3453       return SDValue();
3454     SDValue OrOp0 = Op0.getOperand(0);
3455     SDValue OrOp1 = Op0.getOperand(1);
3456     auto LHS = matchGREVIPat(OrOp0);
3457     // OR is commutable so swap the operands and try again: x might have been
3458     // on the left
3459     if (!LHS) {
3460       std::swap(OrOp0, OrOp1);
3461       LHS = matchGREVIPat(OrOp0);
3462     }
3463     auto RHS = matchGREVIPat(Op1);
3464     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
3465       return DAG.getNode(
3466           RISCVISD::GORCI, DL, VT, LHS->Op,
3467           DAG.getTargetConstant(LHS->ShAmt, DL, Subtarget.getXLenVT()));
3468     }
3469   }
3470   return SDValue();
3471 }
3472 
3473 // Matches any of the following bit-manipulation patterns:
3474 //   (and (shl x, 1), (0x22222222 << 1))
3475 //   (and (srl x, 1), 0x22222222)
3476 //   (shl (and x, 0x22222222), 1)
3477 //   (srl (and x, (0x22222222 << 1)), 1)
3478 // where the shift amount and mask may vary thus:
3479 //   [1]  = 0x22222222 / 0x44444444
3480 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
3481 //   [4]  = 0x00F000F0 / 0x0F000F00
3482 //   [8]  = 0x0000FF00 / 0x00FF0000
3483 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
3484 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
3485   // These are the unshifted masks which we use to match bit-manipulation
3486   // patterns. They may be shifted left in certain circumstances.
3487   static const uint64_t BitmanipMasks[] = {
3488       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
3489       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
3490 
3491   return matchRISCVBitmanipPat(Op, BitmanipMasks);
3492 }
3493 
3494 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
3495 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
3496                                const RISCVSubtarget &Subtarget) {
3497   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
3498   EVT VT = Op.getValueType();
3499 
3500   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
3501     return SDValue();
3502 
3503   SDValue Op0 = Op.getOperand(0);
3504   SDValue Op1 = Op.getOperand(1);
3505 
3506   // Or is commutable so canonicalize the second OR to the LHS.
3507   if (Op0.getOpcode() != ISD::OR)
3508     std::swap(Op0, Op1);
3509   if (Op0.getOpcode() != ISD::OR)
3510     return SDValue();
3511 
3512   // We found an inner OR, so our operands are the operands of the inner OR
3513   // and the other operand of the outer OR.
3514   SDValue A = Op0.getOperand(0);
3515   SDValue B = Op0.getOperand(1);
3516   SDValue C = Op1;
3517 
3518   auto Match1 = matchSHFLPat(A);
3519   auto Match2 = matchSHFLPat(B);
3520 
3521   // If neither matched, we failed.
3522   if (!Match1 && !Match2)
3523     return SDValue();
3524 
3525   // We had at least one match. if one failed, try the remaining C operand.
3526   if (!Match1) {
3527     std::swap(A, C);
3528     Match1 = matchSHFLPat(A);
3529     if (!Match1)
3530       return SDValue();
3531   } else if (!Match2) {
3532     std::swap(B, C);
3533     Match2 = matchSHFLPat(B);
3534     if (!Match2)
3535       return SDValue();
3536   }
3537   assert(Match1 && Match2);
3538 
3539   // Make sure our matches pair up.
3540   if (!Match1->formsPairWith(*Match2))
3541     return SDValue();
3542 
3543   // All the remains is to make sure C is an AND with the same input, that masks
3544   // out the bits that are being shuffled.
3545   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
3546       C.getOperand(0) != Match1->Op)
3547     return SDValue();
3548 
3549   uint64_t Mask = C.getConstantOperandVal(1);
3550 
3551   static const uint64_t BitmanipMasks[] = {
3552       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
3553       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
3554   };
3555 
3556   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
3557   unsigned MaskIdx = Log2_32(Match1->ShAmt);
3558   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
3559 
3560   if (Mask != ExpMask)
3561     return SDValue();
3562 
3563   SDLoc DL(Op);
3564   return DAG.getNode(
3565       RISCVISD::SHFLI, DL, VT, Match1->Op,
3566       DAG.getTargetConstant(Match1->ShAmt, DL, Subtarget.getXLenVT()));
3567 }
3568 
3569 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
3570 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
3571 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
3572 // not undo itself, but they are redundant.
3573 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
3574   unsigned ShAmt1 = N->getConstantOperandVal(1);
3575   SDValue Src = N->getOperand(0);
3576 
3577   if (Src.getOpcode() != N->getOpcode())
3578     return SDValue();
3579 
3580   unsigned ShAmt2 = Src.getConstantOperandVal(1);
3581   Src = Src.getOperand(0);
3582 
3583   unsigned CombinedShAmt;
3584   if (N->getOpcode() == RISCVISD::GORCI || N->getOpcode() == RISCVISD::GORCIW)
3585     CombinedShAmt = ShAmt1 | ShAmt2;
3586   else
3587     CombinedShAmt = ShAmt1 ^ ShAmt2;
3588 
3589   if (CombinedShAmt == 0)
3590     return Src;
3591 
3592   SDLoc DL(N);
3593   return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), Src,
3594                      DAG.getTargetConstant(CombinedShAmt, DL,
3595                                            N->getOperand(1).getValueType()));
3596 }
3597 
3598 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
3599                                                DAGCombinerInfo &DCI) const {
3600   SelectionDAG &DAG = DCI.DAG;
3601 
3602   switch (N->getOpcode()) {
3603   default:
3604     break;
3605   case RISCVISD::SplitF64: {
3606     SDValue Op0 = N->getOperand(0);
3607     // If the input to SplitF64 is just BuildPairF64 then the operation is
3608     // redundant. Instead, use BuildPairF64's operands directly.
3609     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
3610       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
3611 
3612     SDLoc DL(N);
3613 
3614     // It's cheaper to materialise two 32-bit integers than to load a double
3615     // from the constant pool and transfer it to integer registers through the
3616     // stack.
3617     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
3618       APInt V = C->getValueAPF().bitcastToAPInt();
3619       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
3620       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
3621       return DCI.CombineTo(N, Lo, Hi);
3622     }
3623 
3624     // This is a target-specific version of a DAGCombine performed in
3625     // DAGCombiner::visitBITCAST. It performs the equivalent of:
3626     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
3627     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
3628     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
3629         !Op0.getNode()->hasOneUse())
3630       break;
3631     SDValue NewSplitF64 =
3632         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
3633                     Op0.getOperand(0));
3634     SDValue Lo = NewSplitF64.getValue(0);
3635     SDValue Hi = NewSplitF64.getValue(1);
3636     APInt SignBit = APInt::getSignMask(32);
3637     if (Op0.getOpcode() == ISD::FNEG) {
3638       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
3639                                   DAG.getConstant(SignBit, DL, MVT::i32));
3640       return DCI.CombineTo(N, Lo, NewHi);
3641     }
3642     assert(Op0.getOpcode() == ISD::FABS);
3643     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
3644                                 DAG.getConstant(~SignBit, DL, MVT::i32));
3645     return DCI.CombineTo(N, Lo, NewHi);
3646   }
3647   case RISCVISD::SLLW:
3648   case RISCVISD::SRAW:
3649   case RISCVISD::SRLW:
3650   case RISCVISD::ROLW:
3651   case RISCVISD::RORW: {
3652     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
3653     SDValue LHS = N->getOperand(0);
3654     SDValue RHS = N->getOperand(1);
3655     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
3656     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
3657     if (SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI) ||
3658         SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)) {
3659       if (N->getOpcode() != ISD::DELETED_NODE)
3660         DCI.AddToWorklist(N);
3661       return SDValue(N, 0);
3662     }
3663     break;
3664   }
3665   case RISCVISD::FSL:
3666   case RISCVISD::FSR: {
3667     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
3668     SDValue ShAmt = N->getOperand(2);
3669     unsigned BitWidth = ShAmt.getValueSizeInBits();
3670     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
3671     APInt ShAmtMask(BitWidth, (BitWidth * 2) - 1);
3672     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
3673       if (N->getOpcode() != ISD::DELETED_NODE)
3674         DCI.AddToWorklist(N);
3675       return SDValue(N, 0);
3676     }
3677     break;
3678   }
3679   case RISCVISD::FSLW:
3680   case RISCVISD::FSRW: {
3681     // Only the lower 32 bits of Values and lower 6 bits of shift amount are
3682     // read.
3683     SDValue Op0 = N->getOperand(0);
3684     SDValue Op1 = N->getOperand(1);
3685     SDValue ShAmt = N->getOperand(2);
3686     APInt OpMask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
3687     APInt ShAmtMask = APInt::getLowBitsSet(ShAmt.getValueSizeInBits(), 6);
3688     if (SimplifyDemandedBits(Op0, OpMask, DCI) ||
3689         SimplifyDemandedBits(Op1, OpMask, DCI) ||
3690         SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
3691       if (N->getOpcode() != ISD::DELETED_NODE)
3692         DCI.AddToWorklist(N);
3693       return SDValue(N, 0);
3694     }
3695     break;
3696   }
3697   case RISCVISD::GREVIW:
3698   case RISCVISD::GORCIW: {
3699     // Only the lower 32 bits of the first operand are read
3700     SDValue Op0 = N->getOperand(0);
3701     APInt Mask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
3702     if (SimplifyDemandedBits(Op0, Mask, DCI)) {
3703       if (N->getOpcode() != ISD::DELETED_NODE)
3704         DCI.AddToWorklist(N);
3705       return SDValue(N, 0);
3706     }
3707 
3708     return combineGREVI_GORCI(N, DCI.DAG);
3709   }
3710   case RISCVISD::FMV_X_ANYEXTW_RV64: {
3711     SDLoc DL(N);
3712     SDValue Op0 = N->getOperand(0);
3713     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
3714     // conversion is unnecessary and can be replaced with an ANY_EXTEND
3715     // of the FMV_W_X_RV64 operand.
3716     if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) {
3717       assert(Op0.getOperand(0).getValueType() == MVT::i64 &&
3718              "Unexpected value type!");
3719       return Op0.getOperand(0);
3720     }
3721 
3722     // This is a target-specific version of a DAGCombine performed in
3723     // DAGCombiner::visitBITCAST. It performs the equivalent of:
3724     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
3725     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
3726     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
3727         !Op0.getNode()->hasOneUse())
3728       break;
3729     SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64,
3730                                  Op0.getOperand(0));
3731     APInt SignBit = APInt::getSignMask(32).sext(64);
3732     if (Op0.getOpcode() == ISD::FNEG)
3733       return DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV,
3734                          DAG.getConstant(SignBit, DL, MVT::i64));
3735 
3736     assert(Op0.getOpcode() == ISD::FABS);
3737     return DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV,
3738                        DAG.getConstant(~SignBit, DL, MVT::i64));
3739   }
3740   case RISCVISD::GREVI:
3741   case RISCVISD::GORCI:
3742     return combineGREVI_GORCI(N, DCI.DAG);
3743   case ISD::OR:
3744     if (auto GREV = combineORToGREV(SDValue(N, 0), DCI.DAG, Subtarget))
3745       return GREV;
3746     if (auto GORC = combineORToGORC(SDValue(N, 0), DCI.DAG, Subtarget))
3747       return GORC;
3748     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DCI.DAG, Subtarget))
3749       return SHFL;
3750     break;
3751   case RISCVISD::SELECT_CC: {
3752     // Transform
3753     // (select_cc (xor X, 1), 0, setne, trueV, falseV) ->
3754     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
3755     // This can occur when legalizing some floating point comparisons.
3756     SDValue LHS = N->getOperand(0);
3757     SDValue RHS = N->getOperand(1);
3758     auto CCVal = static_cast<ISD::CondCode>(N->getConstantOperandVal(2));
3759     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
3760     if (ISD::isIntEqualitySetCC(CCVal) && isNullConstant(RHS) &&
3761         LHS.getOpcode() == ISD::XOR && isOneConstant(LHS.getOperand(1)) &&
3762         DAG.MaskedValueIsZero(LHS.getOperand(0), Mask)) {
3763       SDLoc DL(N);
3764       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
3765       SDValue TargetCC = DAG.getConstant(CCVal, DL, Subtarget.getXLenVT());
3766       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
3767                          {LHS.getOperand(0), RHS, TargetCC, N->getOperand(3),
3768                           N->getOperand(4)});
3769     }
3770     break;
3771   }
3772   case ISD::SETCC: {
3773     // (setcc X, 1, setne) -> (setcc X, 0, seteq) if we can prove X is 0/1.
3774     // Comparing with 0 may allow us to fold into bnez/beqz.
3775     SDValue LHS = N->getOperand(0);
3776     SDValue RHS = N->getOperand(1);
3777     if (LHS.getValueType().isScalableVector())
3778       break;
3779     auto CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
3780     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
3781     if (isOneConstant(RHS) && ISD::isIntEqualitySetCC(CC) &&
3782         DAG.MaskedValueIsZero(LHS, Mask)) {
3783       SDLoc DL(N);
3784       SDValue Zero = DAG.getConstant(0, DL, LHS.getValueType());
3785       CC = ISD::getSetCCInverse(CC, LHS.getValueType());
3786       return DAG.getSetCC(DL, N->getValueType(0), LHS, Zero, CC);
3787     }
3788     break;
3789   }
3790   case ISD::FCOPYSIGN: {
3791     EVT VT = N->getValueType(0);
3792     if (!VT.isVector())
3793       break;
3794     // There is a form of VFSGNJ which injects the negated sign of its second
3795     // operand. Try and bubble any FNEG up after the extend/round to produce
3796     // this optimized pattern. Avoid modifying cases where FP_ROUND and
3797     // TRUNC=1.
3798     SDValue In2 = N->getOperand(1);
3799     // Avoid cases where the extend/round has multiple uses, as duplicating
3800     // those is typically more expensive than removing a fneg.
3801     if (!In2.hasOneUse())
3802       break;
3803     if (In2.getOpcode() != ISD::FP_EXTEND &&
3804         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
3805       break;
3806     In2 = In2.getOperand(0);
3807     if (In2.getOpcode() != ISD::FNEG)
3808       break;
3809     SDLoc DL(N);
3810     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
3811     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
3812                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
3813   }
3814   }
3815 
3816   return SDValue();
3817 }
3818 
3819 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
3820     const SDNode *N, CombineLevel Level) const {
3821   // The following folds are only desirable if `(OP _, c1 << c2)` can be
3822   // materialised in fewer instructions than `(OP _, c1)`:
3823   //
3824   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
3825   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
3826   SDValue N0 = N->getOperand(0);
3827   EVT Ty = N0.getValueType();
3828   if (Ty.isScalarInteger() &&
3829       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
3830     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
3831     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
3832     if (C1 && C2) {
3833       const APInt &C1Int = C1->getAPIntValue();
3834       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
3835 
3836       // We can materialise `c1 << c2` into an add immediate, so it's "free",
3837       // and the combine should happen, to potentially allow further combines
3838       // later.
3839       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
3840           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
3841         return true;
3842 
3843       // We can materialise `c1` in an add immediate, so it's "free", and the
3844       // combine should be prevented.
3845       if (C1Int.getMinSignedBits() <= 64 &&
3846           isLegalAddImmediate(C1Int.getSExtValue()))
3847         return false;
3848 
3849       // Neither constant will fit into an immediate, so find materialisation
3850       // costs.
3851       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
3852                                               Subtarget.is64Bit());
3853       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
3854           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit());
3855 
3856       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
3857       // combine should be prevented.
3858       if (C1Cost < ShiftedC1Cost)
3859         return false;
3860     }
3861   }
3862   return true;
3863 }
3864 
3865 bool RISCVTargetLowering::targetShrinkDemandedConstant(
3866     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
3867     TargetLoweringOpt &TLO) const {
3868   // Delay this optimization as late as possible.
3869   if (!TLO.LegalOps)
3870     return false;
3871 
3872   EVT VT = Op.getValueType();
3873   if (VT.isVector())
3874     return false;
3875 
3876   // Only handle AND for now.
3877   if (Op.getOpcode() != ISD::AND)
3878     return false;
3879 
3880   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
3881   if (!C)
3882     return false;
3883 
3884   const APInt &Mask = C->getAPIntValue();
3885 
3886   // Clear all non-demanded bits initially.
3887   APInt ShrunkMask = Mask & DemandedBits;
3888 
3889   // If the shrunk mask fits in sign extended 12 bits, let the target
3890   // independent code apply it.
3891   if (ShrunkMask.isSignedIntN(12))
3892     return false;
3893 
3894   // Try to make a smaller immediate by setting undemanded bits.
3895 
3896   // We need to be able to make a negative number through a combination of mask
3897   // and undemanded bits.
3898   APInt ExpandedMask = Mask | ~DemandedBits;
3899   if (!ExpandedMask.isNegative())
3900     return false;
3901 
3902   // What is the fewest number of bits we need to represent the negative number.
3903   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
3904 
3905   // Try to make a 12 bit negative immediate. If that fails try to make a 32
3906   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
3907   APInt NewMask = ShrunkMask;
3908   if (MinSignedBits <= 12)
3909     NewMask.setBitsFrom(11);
3910   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
3911     NewMask.setBitsFrom(31);
3912   else
3913     return false;
3914 
3915   // Sanity check that our new mask is a subset of the demanded mask.
3916   assert(NewMask.isSubsetOf(ExpandedMask));
3917 
3918   // If we aren't changing the mask, just return true to keep it and prevent
3919   // the caller from optimizing.
3920   if (NewMask == Mask)
3921     return true;
3922 
3923   // Replace the constant with the new mask.
3924   SDLoc DL(Op);
3925   SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
3926   SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
3927   return TLO.CombineTo(Op, NewOp);
3928 }
3929 
3930 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
3931                                                         KnownBits &Known,
3932                                                         const APInt &DemandedElts,
3933                                                         const SelectionDAG &DAG,
3934                                                         unsigned Depth) const {
3935   unsigned BitWidth = Known.getBitWidth();
3936   unsigned Opc = Op.getOpcode();
3937   assert((Opc >= ISD::BUILTIN_OP_END ||
3938           Opc == ISD::INTRINSIC_WO_CHAIN ||
3939           Opc == ISD::INTRINSIC_W_CHAIN ||
3940           Opc == ISD::INTRINSIC_VOID) &&
3941          "Should use MaskedValueIsZero if you don't know whether Op"
3942          " is a target node!");
3943 
3944   Known.resetAll();
3945   switch (Opc) {
3946   default: break;
3947   case RISCVISD::REMUW: {
3948     KnownBits Known2;
3949     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3950     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3951     // We only care about the lower 32 bits.
3952     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
3953     // Restore the original width by sign extending.
3954     Known = Known.sext(BitWidth);
3955     break;
3956   }
3957   case RISCVISD::DIVUW: {
3958     KnownBits Known2;
3959     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3960     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3961     // We only care about the lower 32 bits.
3962     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
3963     // Restore the original width by sign extending.
3964     Known = Known.sext(BitWidth);
3965     break;
3966   }
3967   case RISCVISD::READ_VLENB:
3968     // We assume VLENB is at least 8 bytes.
3969     // FIXME: The 1.0 draft spec defines minimum VLEN as 128 bits.
3970     Known.Zero.setLowBits(3);
3971     break;
3972   }
3973 }
3974 
3975 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
3976     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
3977     unsigned Depth) const {
3978   switch (Op.getOpcode()) {
3979   default:
3980     break;
3981   case RISCVISD::SLLW:
3982   case RISCVISD::SRAW:
3983   case RISCVISD::SRLW:
3984   case RISCVISD::DIVW:
3985   case RISCVISD::DIVUW:
3986   case RISCVISD::REMUW:
3987   case RISCVISD::ROLW:
3988   case RISCVISD::RORW:
3989   case RISCVISD::GREVIW:
3990   case RISCVISD::GORCIW:
3991   case RISCVISD::FSLW:
3992   case RISCVISD::FSRW:
3993     // TODO: As the result is sign-extended, this is conservatively correct. A
3994     // more precise answer could be calculated for SRAW depending on known
3995     // bits in the shift amount.
3996     return 33;
3997   case RISCVISD::SHFLI: {
3998     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
3999     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
4000     // will stay within the upper 32 bits. If there were more than 32 sign bits
4001     // before there will be at least 33 sign bits after.
4002     if (Op.getValueType() == MVT::i64 &&
4003         (Op.getConstantOperandVal(1) & 0x10) == 0) {
4004       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4005       if (Tmp > 32)
4006         return 33;
4007     }
4008     break;
4009   }
4010   case RISCVISD::VMV_X_S:
4011     // The number of sign bits of the scalar result is computed by obtaining the
4012     // element type of the input vector operand, subtracting its width from the
4013     // XLEN, and then adding one (sign bit within the element type). If the
4014     // element type is wider than XLen, the least-significant XLEN bits are
4015     // taken.
4016     if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen())
4017       return 1;
4018     return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1;
4019   }
4020 
4021   return 1;
4022 }
4023 
4024 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
4025                                                   MachineBasicBlock *BB) {
4026   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
4027 
4028   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
4029   // Should the count have wrapped while it was being read, we need to try
4030   // again.
4031   // ...
4032   // read:
4033   // rdcycleh x3 # load high word of cycle
4034   // rdcycle  x2 # load low word of cycle
4035   // rdcycleh x4 # load high word of cycle
4036   // bne x3, x4, read # check if high word reads match, otherwise try again
4037   // ...
4038 
4039   MachineFunction &MF = *BB->getParent();
4040   const BasicBlock *LLVM_BB = BB->getBasicBlock();
4041   MachineFunction::iterator It = ++BB->getIterator();
4042 
4043   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
4044   MF.insert(It, LoopMBB);
4045 
4046   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
4047   MF.insert(It, DoneMBB);
4048 
4049   // Transfer the remainder of BB and its successor edges to DoneMBB.
4050   DoneMBB->splice(DoneMBB->begin(), BB,
4051                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
4052   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
4053 
4054   BB->addSuccessor(LoopMBB);
4055 
4056   MachineRegisterInfo &RegInfo = MF.getRegInfo();
4057   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
4058   Register LoReg = MI.getOperand(0).getReg();
4059   Register HiReg = MI.getOperand(1).getReg();
4060   DebugLoc DL = MI.getDebugLoc();
4061 
4062   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
4063   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
4064       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
4065       .addReg(RISCV::X0);
4066   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
4067       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
4068       .addReg(RISCV::X0);
4069   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
4070       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
4071       .addReg(RISCV::X0);
4072 
4073   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
4074       .addReg(HiReg)
4075       .addReg(ReadAgainReg)
4076       .addMBB(LoopMBB);
4077 
4078   LoopMBB->addSuccessor(LoopMBB);
4079   LoopMBB->addSuccessor(DoneMBB);
4080 
4081   MI.eraseFromParent();
4082 
4083   return DoneMBB;
4084 }
4085 
4086 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
4087                                              MachineBasicBlock *BB) {
4088   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
4089 
4090   MachineFunction &MF = *BB->getParent();
4091   DebugLoc DL = MI.getDebugLoc();
4092   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
4093   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
4094   Register LoReg = MI.getOperand(0).getReg();
4095   Register HiReg = MI.getOperand(1).getReg();
4096   Register SrcReg = MI.getOperand(2).getReg();
4097   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
4098   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
4099 
4100   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
4101                           RI);
4102   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
4103   MachineMemOperand *MMOLo =
4104       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
4105   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
4106       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
4107   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
4108       .addFrameIndex(FI)
4109       .addImm(0)
4110       .addMemOperand(MMOLo);
4111   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
4112       .addFrameIndex(FI)
4113       .addImm(4)
4114       .addMemOperand(MMOHi);
4115   MI.eraseFromParent(); // The pseudo instruction is gone now.
4116   return BB;
4117 }
4118 
4119 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
4120                                                  MachineBasicBlock *BB) {
4121   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
4122          "Unexpected instruction");
4123 
4124   MachineFunction &MF = *BB->getParent();
4125   DebugLoc DL = MI.getDebugLoc();
4126   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
4127   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
4128   Register DstReg = MI.getOperand(0).getReg();
4129   Register LoReg = MI.getOperand(1).getReg();
4130   Register HiReg = MI.getOperand(2).getReg();
4131   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
4132   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
4133 
4134   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
4135   MachineMemOperand *MMOLo =
4136       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
4137   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
4138       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
4139   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
4140       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
4141       .addFrameIndex(FI)
4142       .addImm(0)
4143       .addMemOperand(MMOLo);
4144   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
4145       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
4146       .addFrameIndex(FI)
4147       .addImm(4)
4148       .addMemOperand(MMOHi);
4149   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
4150   MI.eraseFromParent(); // The pseudo instruction is gone now.
4151   return BB;
4152 }
4153 
4154 static bool isSelectPseudo(MachineInstr &MI) {
4155   switch (MI.getOpcode()) {
4156   default:
4157     return false;
4158   case RISCV::Select_GPR_Using_CC_GPR:
4159   case RISCV::Select_FPR16_Using_CC_GPR:
4160   case RISCV::Select_FPR32_Using_CC_GPR:
4161   case RISCV::Select_FPR64_Using_CC_GPR:
4162     return true;
4163   }
4164 }
4165 
4166 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
4167                                            MachineBasicBlock *BB) {
4168   // To "insert" Select_* instructions, we actually have to insert the triangle
4169   // control-flow pattern.  The incoming instructions know the destination vreg
4170   // to set, the condition code register to branch on, the true/false values to
4171   // select between, and the condcode to use to select the appropriate branch.
4172   //
4173   // We produce the following control flow:
4174   //     HeadMBB
4175   //     |  \
4176   //     |  IfFalseMBB
4177   //     | /
4178   //    TailMBB
4179   //
4180   // When we find a sequence of selects we attempt to optimize their emission
4181   // by sharing the control flow. Currently we only handle cases where we have
4182   // multiple selects with the exact same condition (same LHS, RHS and CC).
4183   // The selects may be interleaved with other instructions if the other
4184   // instructions meet some requirements we deem safe:
4185   // - They are debug instructions. Otherwise,
4186   // - They do not have side-effects, do not access memory and their inputs do
4187   //   not depend on the results of the select pseudo-instructions.
4188   // The TrueV/FalseV operands of the selects cannot depend on the result of
4189   // previous selects in the sequence.
4190   // These conditions could be further relaxed. See the X86 target for a
4191   // related approach and more information.
4192   Register LHS = MI.getOperand(1).getReg();
4193   Register RHS = MI.getOperand(2).getReg();
4194   auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
4195 
4196   SmallVector<MachineInstr *, 4> SelectDebugValues;
4197   SmallSet<Register, 4> SelectDests;
4198   SelectDests.insert(MI.getOperand(0).getReg());
4199 
4200   MachineInstr *LastSelectPseudo = &MI;
4201 
4202   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
4203        SequenceMBBI != E; ++SequenceMBBI) {
4204     if (SequenceMBBI->isDebugInstr())
4205       continue;
4206     else if (isSelectPseudo(*SequenceMBBI)) {
4207       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
4208           SequenceMBBI->getOperand(2).getReg() != RHS ||
4209           SequenceMBBI->getOperand(3).getImm() != CC ||
4210           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
4211           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
4212         break;
4213       LastSelectPseudo = &*SequenceMBBI;
4214       SequenceMBBI->collectDebugValues(SelectDebugValues);
4215       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
4216     } else {
4217       if (SequenceMBBI->hasUnmodeledSideEffects() ||
4218           SequenceMBBI->mayLoadOrStore())
4219         break;
4220       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
4221             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
4222           }))
4223         break;
4224     }
4225   }
4226 
4227   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
4228   const BasicBlock *LLVM_BB = BB->getBasicBlock();
4229   DebugLoc DL = MI.getDebugLoc();
4230   MachineFunction::iterator I = ++BB->getIterator();
4231 
4232   MachineBasicBlock *HeadMBB = BB;
4233   MachineFunction *F = BB->getParent();
4234   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
4235   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
4236 
4237   F->insert(I, IfFalseMBB);
4238   F->insert(I, TailMBB);
4239 
4240   // Transfer debug instructions associated with the selects to TailMBB.
4241   for (MachineInstr *DebugInstr : SelectDebugValues) {
4242     TailMBB->push_back(DebugInstr->removeFromParent());
4243   }
4244 
4245   // Move all instructions after the sequence to TailMBB.
4246   TailMBB->splice(TailMBB->end(), HeadMBB,
4247                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
4248   // Update machine-CFG edges by transferring all successors of the current
4249   // block to the new block which will contain the Phi nodes for the selects.
4250   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
4251   // Set the successors for HeadMBB.
4252   HeadMBB->addSuccessor(IfFalseMBB);
4253   HeadMBB->addSuccessor(TailMBB);
4254 
4255   // Insert appropriate branch.
4256   unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
4257 
4258   BuildMI(HeadMBB, DL, TII.get(Opcode))
4259     .addReg(LHS)
4260     .addReg(RHS)
4261     .addMBB(TailMBB);
4262 
4263   // IfFalseMBB just falls through to TailMBB.
4264   IfFalseMBB->addSuccessor(TailMBB);
4265 
4266   // Create PHIs for all of the select pseudo-instructions.
4267   auto SelectMBBI = MI.getIterator();
4268   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
4269   auto InsertionPoint = TailMBB->begin();
4270   while (SelectMBBI != SelectEnd) {
4271     auto Next = std::next(SelectMBBI);
4272     if (isSelectPseudo(*SelectMBBI)) {
4273       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
4274       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
4275               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
4276           .addReg(SelectMBBI->getOperand(4).getReg())
4277           .addMBB(HeadMBB)
4278           .addReg(SelectMBBI->getOperand(5).getReg())
4279           .addMBB(IfFalseMBB);
4280       SelectMBBI->eraseFromParent();
4281     }
4282     SelectMBBI = Next;
4283   }
4284 
4285   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
4286   return TailMBB;
4287 }
4288 
4289 static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB,
4290                                     int VLIndex, unsigned SEWIndex,
4291                                     RISCVVLMUL VLMul, bool ForceTailAgnostic) {
4292   MachineFunction &MF = *BB->getParent();
4293   DebugLoc DL = MI.getDebugLoc();
4294   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
4295 
4296   unsigned SEW = MI.getOperand(SEWIndex).getImm();
4297   assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
4298   RISCVVSEW ElementWidth = static_cast<RISCVVSEW>(Log2_32(SEW / 8));
4299 
4300   MachineRegisterInfo &MRI = MF.getRegInfo();
4301 
4302   auto BuildVSETVLI = [&]() {
4303     if (VLIndex >= 0) {
4304       Register DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
4305       Register VLReg = MI.getOperand(VLIndex).getReg();
4306 
4307       // VL might be a compile time constant, but isel would have to put it
4308       // in a register. See if VL comes from an ADDI X0, imm.
4309       if (VLReg.isVirtual()) {
4310         MachineInstr *Def = MRI.getVRegDef(VLReg);
4311         if (Def && Def->getOpcode() == RISCV::ADDI &&
4312             Def->getOperand(1).getReg() == RISCV::X0 &&
4313             Def->getOperand(2).isImm()) {
4314           uint64_t Imm = Def->getOperand(2).getImm();
4315           // VSETIVLI allows a 5-bit zero extended immediate.
4316           if (isUInt<5>(Imm))
4317             return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETIVLI))
4318                 .addReg(DestReg, RegState::Define | RegState::Dead)
4319                 .addImm(Imm);
4320         }
4321       }
4322 
4323       return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI))
4324           .addReg(DestReg, RegState::Define | RegState::Dead)
4325           .addReg(VLReg);
4326     }
4327 
4328     // With no VL operator in the pseudo, do not modify VL (rd = X0, rs1 = X0).
4329     return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI))
4330         .addReg(RISCV::X0, RegState::Define | RegState::Dead)
4331         .addReg(RISCV::X0, RegState::Kill);
4332   };
4333 
4334   MachineInstrBuilder MIB = BuildVSETVLI();
4335 
4336   // Default to tail agnostic unless the destination is tied to a source. In
4337   // that case the user would have some control over the tail values. The tail
4338   // policy is also ignored on instructions that only update element 0 like
4339   // vmv.s.x or reductions so use agnostic there to match the common case.
4340   // FIXME: This is conservatively correct, but we might want to detect that
4341   // the input is undefined.
4342   bool TailAgnostic = true;
4343   unsigned UseOpIdx;
4344   if (!ForceTailAgnostic && MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
4345     TailAgnostic = false;
4346     // If the tied operand is an IMPLICIT_DEF we can keep TailAgnostic.
4347     const MachineOperand &UseMO = MI.getOperand(UseOpIdx);
4348     MachineInstr *UseMI = MRI.getVRegDef(UseMO.getReg());
4349     if (UseMI && UseMI->isImplicitDef())
4350       TailAgnostic = true;
4351   }
4352 
4353   // For simplicity we reuse the vtype representation here.
4354   MIB.addImm(RISCVVType::encodeVTYPE(VLMul, ElementWidth,
4355                                      /*TailAgnostic*/ TailAgnostic,
4356                                      /*MaskAgnostic*/ false));
4357 
4358   // Remove (now) redundant operands from pseudo
4359   MI.getOperand(SEWIndex).setImm(-1);
4360   if (VLIndex >= 0) {
4361     MI.getOperand(VLIndex).setReg(RISCV::NoRegister);
4362     MI.getOperand(VLIndex).setIsKill(false);
4363   }
4364 
4365   return BB;
4366 }
4367 
4368 MachineBasicBlock *
4369 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
4370                                                  MachineBasicBlock *BB) const {
4371   uint64_t TSFlags = MI.getDesc().TSFlags;
4372 
4373   if (TSFlags & RISCVII::HasSEWOpMask) {
4374     unsigned NumOperands = MI.getNumExplicitOperands();
4375     int VLIndex = (TSFlags & RISCVII::HasVLOpMask) ? NumOperands - 2 : -1;
4376     unsigned SEWIndex = NumOperands - 1;
4377     bool ForceTailAgnostic = TSFlags & RISCVII::ForceTailAgnosticMask;
4378 
4379     RISCVVLMUL VLMul = static_cast<RISCVVLMUL>((TSFlags & RISCVII::VLMulMask) >>
4380                                                RISCVII::VLMulShift);
4381     return addVSetVL(MI, BB, VLIndex, SEWIndex, VLMul, ForceTailAgnostic);
4382   }
4383 
4384   switch (MI.getOpcode()) {
4385   default:
4386     llvm_unreachable("Unexpected instr type to insert");
4387   case RISCV::ReadCycleWide:
4388     assert(!Subtarget.is64Bit() &&
4389            "ReadCycleWrite is only to be used on riscv32");
4390     return emitReadCycleWidePseudo(MI, BB);
4391   case RISCV::Select_GPR_Using_CC_GPR:
4392   case RISCV::Select_FPR16_Using_CC_GPR:
4393   case RISCV::Select_FPR32_Using_CC_GPR:
4394   case RISCV::Select_FPR64_Using_CC_GPR:
4395     return emitSelectPseudo(MI, BB);
4396   case RISCV::BuildPairF64Pseudo:
4397     return emitBuildPairF64Pseudo(MI, BB);
4398   case RISCV::SplitF64Pseudo:
4399     return emitSplitF64Pseudo(MI, BB);
4400   }
4401 }
4402 
4403 // Calling Convention Implementation.
4404 // The expectations for frontend ABI lowering vary from target to target.
4405 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
4406 // details, but this is a longer term goal. For now, we simply try to keep the
4407 // role of the frontend as simple and well-defined as possible. The rules can
4408 // be summarised as:
4409 // * Never split up large scalar arguments. We handle them here.
4410 // * If a hardfloat calling convention is being used, and the struct may be
4411 // passed in a pair of registers (fp+fp, int+fp), and both registers are
4412 // available, then pass as two separate arguments. If either the GPRs or FPRs
4413 // are exhausted, then pass according to the rule below.
4414 // * If a struct could never be passed in registers or directly in a stack
4415 // slot (as it is larger than 2*XLEN and the floating point rules don't
4416 // apply), then pass it using a pointer with the byval attribute.
4417 // * If a struct is less than 2*XLEN, then coerce to either a two-element
4418 // word-sized array or a 2*XLEN scalar (depending on alignment).
4419 // * The frontend can determine whether a struct is returned by reference or
4420 // not based on its size and fields. If it will be returned by reference, the
4421 // frontend must modify the prototype so a pointer with the sret annotation is
4422 // passed as the first argument. This is not necessary for large scalar
4423 // returns.
4424 // * Struct return values and varargs should be coerced to structs containing
4425 // register-size fields in the same situations they would be for fixed
4426 // arguments.
4427 
4428 static const MCPhysReg ArgGPRs[] = {
4429   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
4430   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
4431 };
4432 static const MCPhysReg ArgFPR16s[] = {
4433   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
4434   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
4435 };
4436 static const MCPhysReg ArgFPR32s[] = {
4437   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
4438   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
4439 };
4440 static const MCPhysReg ArgFPR64s[] = {
4441   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
4442   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
4443 };
4444 // This is an interim calling convention and it may be changed in the future.
4445 static const MCPhysReg ArgVRs[] = {
4446     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
4447     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
4448     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
4449 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
4450                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
4451                                      RISCV::V20M2, RISCV::V22M2};
4452 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
4453                                      RISCV::V20M4};
4454 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
4455 
4456 // Pass a 2*XLEN argument that has been split into two XLEN values through
4457 // registers or the stack as necessary.
4458 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
4459                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
4460                                 MVT ValVT2, MVT LocVT2,
4461                                 ISD::ArgFlagsTy ArgFlags2) {
4462   unsigned XLenInBytes = XLen / 8;
4463   if (Register Reg = State.AllocateReg(ArgGPRs)) {
4464     // At least one half can be passed via register.
4465     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
4466                                      VA1.getLocVT(), CCValAssign::Full));
4467   } else {
4468     // Both halves must be passed on the stack, with proper alignment.
4469     Align StackAlign =
4470         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
4471     State.addLoc(
4472         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
4473                             State.AllocateStack(XLenInBytes, StackAlign),
4474                             VA1.getLocVT(), CCValAssign::Full));
4475     State.addLoc(CCValAssign::getMem(
4476         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
4477         LocVT2, CCValAssign::Full));
4478     return false;
4479   }
4480 
4481   if (Register Reg = State.AllocateReg(ArgGPRs)) {
4482     // The second half can also be passed via register.
4483     State.addLoc(
4484         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
4485   } else {
4486     // The second half is passed via the stack, without additional alignment.
4487     State.addLoc(CCValAssign::getMem(
4488         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
4489         LocVT2, CCValAssign::Full));
4490   }
4491 
4492   return false;
4493 }
4494 
4495 // Implements the RISC-V calling convention. Returns true upon failure.
4496 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
4497                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
4498                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
4499                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
4500                      Optional<unsigned> FirstMaskArgument) {
4501   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
4502   assert(XLen == 32 || XLen == 64);
4503   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
4504 
4505   // Any return value split in to more than two values can't be returned
4506   // directly.
4507   if (IsRet && ValNo > 1)
4508     return true;
4509 
4510   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
4511   // variadic argument, or if no F16/F32 argument registers are available.
4512   bool UseGPRForF16_F32 = true;
4513   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
4514   // variadic argument, or if no F64 argument registers are available.
4515   bool UseGPRForF64 = true;
4516 
4517   switch (ABI) {
4518   default:
4519     llvm_unreachable("Unexpected ABI");
4520   case RISCVABI::ABI_ILP32:
4521   case RISCVABI::ABI_LP64:
4522     break;
4523   case RISCVABI::ABI_ILP32F:
4524   case RISCVABI::ABI_LP64F:
4525     UseGPRForF16_F32 = !IsFixed;
4526     break;
4527   case RISCVABI::ABI_ILP32D:
4528   case RISCVABI::ABI_LP64D:
4529     UseGPRForF16_F32 = !IsFixed;
4530     UseGPRForF64 = !IsFixed;
4531     break;
4532   }
4533 
4534   // FPR16, FPR32, and FPR64 alias each other.
4535   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
4536     UseGPRForF16_F32 = true;
4537     UseGPRForF64 = true;
4538   }
4539 
4540   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
4541   // similar local variables rather than directly checking against the target
4542   // ABI.
4543 
4544   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
4545     LocVT = XLenVT;
4546     LocInfo = CCValAssign::BCvt;
4547   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
4548     LocVT = MVT::i64;
4549     LocInfo = CCValAssign::BCvt;
4550   }
4551 
4552   // If this is a variadic argument, the RISC-V calling convention requires
4553   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
4554   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
4555   // be used regardless of whether the original argument was split during
4556   // legalisation or not. The argument will not be passed by registers if the
4557   // original type is larger than 2*XLEN, so the register alignment rule does
4558   // not apply.
4559   unsigned TwoXLenInBytes = (2 * XLen) / 8;
4560   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
4561       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
4562     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
4563     // Skip 'odd' register if necessary.
4564     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
4565       State.AllocateReg(ArgGPRs);
4566   }
4567 
4568   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
4569   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
4570       State.getPendingArgFlags();
4571 
4572   assert(PendingLocs.size() == PendingArgFlags.size() &&
4573          "PendingLocs and PendingArgFlags out of sync");
4574 
4575   // Handle passing f64 on RV32D with a soft float ABI or when floating point
4576   // registers are exhausted.
4577   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
4578     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
4579            "Can't lower f64 if it is split");
4580     // Depending on available argument GPRS, f64 may be passed in a pair of
4581     // GPRs, split between a GPR and the stack, or passed completely on the
4582     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
4583     // cases.
4584     Register Reg = State.AllocateReg(ArgGPRs);
4585     LocVT = MVT::i32;
4586     if (!Reg) {
4587       unsigned StackOffset = State.AllocateStack(8, Align(8));
4588       State.addLoc(
4589           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
4590       return false;
4591     }
4592     if (!State.AllocateReg(ArgGPRs))
4593       State.AllocateStack(4, Align(4));
4594     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
4595     return false;
4596   }
4597 
4598   // Split arguments might be passed indirectly, so keep track of the pending
4599   // values.
4600   if (ArgFlags.isSplit() || !PendingLocs.empty()) {
4601     LocVT = XLenVT;
4602     LocInfo = CCValAssign::Indirect;
4603     PendingLocs.push_back(
4604         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
4605     PendingArgFlags.push_back(ArgFlags);
4606     if (!ArgFlags.isSplitEnd()) {
4607       return false;
4608     }
4609   }
4610 
4611   // If the split argument only had two elements, it should be passed directly
4612   // in registers or on the stack.
4613   if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) {
4614     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
4615     // Apply the normal calling convention rules to the first half of the
4616     // split argument.
4617     CCValAssign VA = PendingLocs[0];
4618     ISD::ArgFlagsTy AF = PendingArgFlags[0];
4619     PendingLocs.clear();
4620     PendingArgFlags.clear();
4621     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
4622                                ArgFlags);
4623   }
4624 
4625   // Allocate to a register if possible, or else a stack slot.
4626   Register Reg;
4627   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
4628     Reg = State.AllocateReg(ArgFPR16s);
4629   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
4630     Reg = State.AllocateReg(ArgFPR32s);
4631   else if (ValVT == MVT::f64 && !UseGPRForF64)
4632     Reg = State.AllocateReg(ArgFPR64s);
4633   else if (ValVT.isScalableVector()) {
4634     const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
4635     if (RC == &RISCV::VRRegClass) {
4636       // Assign the first mask argument to V0.
4637       // This is an interim calling convention and it may be changed in the
4638       // future.
4639       if (FirstMaskArgument.hasValue() &&
4640           ValNo == FirstMaskArgument.getValue()) {
4641         Reg = State.AllocateReg(RISCV::V0);
4642       } else {
4643         Reg = State.AllocateReg(ArgVRs);
4644       }
4645     } else if (RC == &RISCV::VRM2RegClass) {
4646       Reg = State.AllocateReg(ArgVRM2s);
4647     } else if (RC == &RISCV::VRM4RegClass) {
4648       Reg = State.AllocateReg(ArgVRM4s);
4649     } else if (RC == &RISCV::VRM8RegClass) {
4650       Reg = State.AllocateReg(ArgVRM8s);
4651     } else {
4652       llvm_unreachable("Unhandled class register for ValueType");
4653     }
4654     if (!Reg) {
4655       LocInfo = CCValAssign::Indirect;
4656       // Try using a GPR to pass the address
4657       Reg = State.AllocateReg(ArgGPRs);
4658       LocVT = XLenVT;
4659     }
4660   } else
4661     Reg = State.AllocateReg(ArgGPRs);
4662   unsigned StackOffset =
4663       Reg ? 0 : State.AllocateStack(XLen / 8, Align(XLen / 8));
4664 
4665   // If we reach this point and PendingLocs is non-empty, we must be at the
4666   // end of a split argument that must be passed indirectly.
4667   if (!PendingLocs.empty()) {
4668     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
4669     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
4670 
4671     for (auto &It : PendingLocs) {
4672       if (Reg)
4673         It.convertToReg(Reg);
4674       else
4675         It.convertToMem(StackOffset);
4676       State.addLoc(It);
4677     }
4678     PendingLocs.clear();
4679     PendingArgFlags.clear();
4680     return false;
4681   }
4682 
4683   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
4684           (TLI.getSubtarget().hasStdExtV() && ValVT.isScalableVector())) &&
4685          "Expected an XLenVT or scalable vector types at this stage");
4686 
4687   if (Reg) {
4688     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
4689     return false;
4690   }
4691 
4692   // When a floating-point value is passed on the stack, no bit-conversion is
4693   // needed.
4694   if (ValVT.isFloatingPoint()) {
4695     LocVT = ValVT;
4696     LocInfo = CCValAssign::Full;
4697   }
4698   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
4699   return false;
4700 }
4701 
4702 template <typename ArgTy>
4703 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
4704   for (const auto &ArgIdx : enumerate(Args)) {
4705     MVT ArgVT = ArgIdx.value().VT;
4706     if (ArgVT.isScalableVector() &&
4707         ArgVT.getVectorElementType().SimpleTy == MVT::i1)
4708       return ArgIdx.index();
4709   }
4710   return None;
4711 }
4712 
4713 void RISCVTargetLowering::analyzeInputArgs(
4714     MachineFunction &MF, CCState &CCInfo,
4715     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const {
4716   unsigned NumArgs = Ins.size();
4717   FunctionType *FType = MF.getFunction().getFunctionType();
4718 
4719   Optional<unsigned> FirstMaskArgument;
4720   if (Subtarget.hasStdExtV())
4721     FirstMaskArgument = preAssignMask(Ins);
4722 
4723   for (unsigned i = 0; i != NumArgs; ++i) {
4724     MVT ArgVT = Ins[i].VT;
4725     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
4726 
4727     Type *ArgTy = nullptr;
4728     if (IsRet)
4729       ArgTy = FType->getReturnType();
4730     else if (Ins[i].isOrigArg())
4731       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
4732 
4733     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
4734     if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
4735                  ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
4736                  FirstMaskArgument)) {
4737       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
4738                         << EVT(ArgVT).getEVTString() << '\n');
4739       llvm_unreachable(nullptr);
4740     }
4741   }
4742 }
4743 
4744 void RISCVTargetLowering::analyzeOutputArgs(
4745     MachineFunction &MF, CCState &CCInfo,
4746     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
4747     CallLoweringInfo *CLI) const {
4748   unsigned NumArgs = Outs.size();
4749 
4750   Optional<unsigned> FirstMaskArgument;
4751   if (Subtarget.hasStdExtV())
4752     FirstMaskArgument = preAssignMask(Outs);
4753 
4754   for (unsigned i = 0; i != NumArgs; i++) {
4755     MVT ArgVT = Outs[i].VT;
4756     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
4757     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
4758 
4759     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
4760     if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
4761                  ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
4762                  FirstMaskArgument)) {
4763       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
4764                         << EVT(ArgVT).getEVTString() << "\n");
4765       llvm_unreachable(nullptr);
4766     }
4767   }
4768 }
4769 
4770 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
4771 // values.
4772 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
4773                                    const CCValAssign &VA, const SDLoc &DL) {
4774   switch (VA.getLocInfo()) {
4775   default:
4776     llvm_unreachable("Unexpected CCValAssign::LocInfo");
4777   case CCValAssign::Full:
4778     break;
4779   case CCValAssign::BCvt:
4780     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
4781       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
4782     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
4783       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
4784     else
4785       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
4786     break;
4787   }
4788   return Val;
4789 }
4790 
4791 // The caller is responsible for loading the full value if the argument is
4792 // passed with CCValAssign::Indirect.
4793 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
4794                                 const CCValAssign &VA, const SDLoc &DL,
4795                                 const RISCVTargetLowering &TLI) {
4796   MachineFunction &MF = DAG.getMachineFunction();
4797   MachineRegisterInfo &RegInfo = MF.getRegInfo();
4798   EVT LocVT = VA.getLocVT();
4799   SDValue Val;
4800   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
4801   Register VReg = RegInfo.createVirtualRegister(RC);
4802   RegInfo.addLiveIn(VA.getLocReg(), VReg);
4803   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
4804 
4805   if (VA.getLocInfo() == CCValAssign::Indirect)
4806     return Val;
4807 
4808   return convertLocVTToValVT(DAG, Val, VA, DL);
4809 }
4810 
4811 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
4812                                    const CCValAssign &VA, const SDLoc &DL) {
4813   EVT LocVT = VA.getLocVT();
4814 
4815   switch (VA.getLocInfo()) {
4816   default:
4817     llvm_unreachable("Unexpected CCValAssign::LocInfo");
4818   case CCValAssign::Full:
4819     break;
4820   case CCValAssign::BCvt:
4821     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
4822       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
4823     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
4824       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
4825     else
4826       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
4827     break;
4828   }
4829   return Val;
4830 }
4831 
4832 // The caller is responsible for loading the full value if the argument is
4833 // passed with CCValAssign::Indirect.
4834 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
4835                                 const CCValAssign &VA, const SDLoc &DL) {
4836   MachineFunction &MF = DAG.getMachineFunction();
4837   MachineFrameInfo &MFI = MF.getFrameInfo();
4838   EVT LocVT = VA.getLocVT();
4839   EVT ValVT = VA.getValVT();
4840   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
4841   int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
4842                                  VA.getLocMemOffset(), /*Immutable=*/true);
4843   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4844   SDValue Val;
4845 
4846   ISD::LoadExtType ExtType;
4847   switch (VA.getLocInfo()) {
4848   default:
4849     llvm_unreachable("Unexpected CCValAssign::LocInfo");
4850   case CCValAssign::Full:
4851   case CCValAssign::Indirect:
4852   case CCValAssign::BCvt:
4853     ExtType = ISD::NON_EXTLOAD;
4854     break;
4855   }
4856   Val = DAG.getExtLoad(
4857       ExtType, DL, LocVT, Chain, FIN,
4858       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
4859   return Val;
4860 }
4861 
4862 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
4863                                        const CCValAssign &VA, const SDLoc &DL) {
4864   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
4865          "Unexpected VA");
4866   MachineFunction &MF = DAG.getMachineFunction();
4867   MachineFrameInfo &MFI = MF.getFrameInfo();
4868   MachineRegisterInfo &RegInfo = MF.getRegInfo();
4869 
4870   if (VA.isMemLoc()) {
4871     // f64 is passed on the stack.
4872     int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
4873     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
4874     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
4875                        MachinePointerInfo::getFixedStack(MF, FI));
4876   }
4877 
4878   assert(VA.isRegLoc() && "Expected register VA assignment");
4879 
4880   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
4881   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
4882   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
4883   SDValue Hi;
4884   if (VA.getLocReg() == RISCV::X17) {
4885     // Second half of f64 is passed on the stack.
4886     int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
4887     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
4888     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
4889                      MachinePointerInfo::getFixedStack(MF, FI));
4890   } else {
4891     // Second half of f64 is passed in another GPR.
4892     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
4893     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
4894     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
4895   }
4896   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
4897 }
4898 
4899 // FastCC has less than 1% performance improvement for some particular
4900 // benchmark. But theoretically, it may has benenfit for some cases.
4901 static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
4902                             CCValAssign::LocInfo LocInfo,
4903                             ISD::ArgFlagsTy ArgFlags, CCState &State) {
4904 
4905   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
4906     // X5 and X6 might be used for save-restore libcall.
4907     static const MCPhysReg GPRList[] = {
4908         RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
4909         RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
4910         RISCV::X29, RISCV::X30, RISCV::X31};
4911     if (unsigned Reg = State.AllocateReg(GPRList)) {
4912       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
4913       return false;
4914     }
4915   }
4916 
4917   if (LocVT == MVT::f16) {
4918     static const MCPhysReg FPR16List[] = {
4919         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
4920         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
4921         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
4922         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
4923     if (unsigned Reg = State.AllocateReg(FPR16List)) {
4924       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
4925       return false;
4926     }
4927   }
4928 
4929   if (LocVT == MVT::f32) {
4930     static const MCPhysReg FPR32List[] = {
4931         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
4932         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
4933         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
4934         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
4935     if (unsigned Reg = State.AllocateReg(FPR32List)) {
4936       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
4937       return false;
4938     }
4939   }
4940 
4941   if (LocVT == MVT::f64) {
4942     static const MCPhysReg FPR64List[] = {
4943         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
4944         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
4945         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
4946         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
4947     if (unsigned Reg = State.AllocateReg(FPR64List)) {
4948       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
4949       return false;
4950     }
4951   }
4952 
4953   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
4954     unsigned Offset4 = State.AllocateStack(4, Align(4));
4955     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
4956     return false;
4957   }
4958 
4959   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
4960     unsigned Offset5 = State.AllocateStack(8, Align(8));
4961     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
4962     return false;
4963   }
4964 
4965   return true; // CC didn't match.
4966 }
4967 
4968 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
4969                          CCValAssign::LocInfo LocInfo,
4970                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
4971 
4972   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
4973     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
4974     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
4975     static const MCPhysReg GPRList[] = {
4976         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
4977         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
4978     if (unsigned Reg = State.AllocateReg(GPRList)) {
4979       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
4980       return false;
4981     }
4982   }
4983 
4984   if (LocVT == MVT::f32) {
4985     // Pass in STG registers: F1, ..., F6
4986     //                        fs0 ... fs5
4987     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
4988                                           RISCV::F18_F, RISCV::F19_F,
4989                                           RISCV::F20_F, RISCV::F21_F};
4990     if (unsigned Reg = State.AllocateReg(FPR32List)) {
4991       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
4992       return false;
4993     }
4994   }
4995 
4996   if (LocVT == MVT::f64) {
4997     // Pass in STG registers: D1, ..., D6
4998     //                        fs6 ... fs11
4999     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
5000                                           RISCV::F24_D, RISCV::F25_D,
5001                                           RISCV::F26_D, RISCV::F27_D};
5002     if (unsigned Reg = State.AllocateReg(FPR64List)) {
5003       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
5004       return false;
5005     }
5006   }
5007 
5008   report_fatal_error("No registers left in GHC calling convention");
5009   return true;
5010 }
5011 
5012 // Transform physical registers into virtual registers.
5013 SDValue RISCVTargetLowering::LowerFormalArguments(
5014     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
5015     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
5016     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
5017 
5018   MachineFunction &MF = DAG.getMachineFunction();
5019 
5020   switch (CallConv) {
5021   default:
5022     report_fatal_error("Unsupported calling convention");
5023   case CallingConv::C:
5024   case CallingConv::Fast:
5025     break;
5026   case CallingConv::GHC:
5027     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
5028         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
5029       report_fatal_error(
5030         "GHC calling convention requires the F and D instruction set extensions");
5031   }
5032 
5033   const Function &Func = MF.getFunction();
5034   if (Func.hasFnAttribute("interrupt")) {
5035     if (!Func.arg_empty())
5036       report_fatal_error(
5037         "Functions with the interrupt attribute cannot have arguments!");
5038 
5039     StringRef Kind =
5040       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
5041 
5042     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
5043       report_fatal_error(
5044         "Function interrupt attribute argument not supported!");
5045   }
5046 
5047   EVT PtrVT = getPointerTy(DAG.getDataLayout());
5048   MVT XLenVT = Subtarget.getXLenVT();
5049   unsigned XLenInBytes = Subtarget.getXLen() / 8;
5050   // Used with vargs to acumulate store chains.
5051   std::vector<SDValue> OutChains;
5052 
5053   // Assign locations to all of the incoming arguments.
5054   SmallVector<CCValAssign, 16> ArgLocs;
5055   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
5056 
5057   if (CallConv == CallingConv::Fast)
5058     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_FastCC);
5059   else if (CallConv == CallingConv::GHC)
5060     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
5061   else
5062     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false);
5063 
5064   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
5065     CCValAssign &VA = ArgLocs[i];
5066     SDValue ArgValue;
5067     // Passing f64 on RV32D with a soft float ABI must be handled as a special
5068     // case.
5069     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
5070       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
5071     else if (VA.isRegLoc())
5072       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
5073     else
5074       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
5075 
5076     if (VA.getLocInfo() == CCValAssign::Indirect) {
5077       // If the original argument was split and passed by reference (e.g. i128
5078       // on RV32), we need to load all parts of it here (using the same
5079       // address).
5080       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
5081                                    MachinePointerInfo()));
5082       unsigned ArgIndex = Ins[i].OrigArgIndex;
5083       assert(Ins[i].PartOffset == 0);
5084       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
5085         CCValAssign &PartVA = ArgLocs[i + 1];
5086         unsigned PartOffset = Ins[i + 1].PartOffset;
5087         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
5088                                       DAG.getIntPtrConstant(PartOffset, DL));
5089         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
5090                                      MachinePointerInfo()));
5091         ++i;
5092       }
5093       continue;
5094     }
5095     InVals.push_back(ArgValue);
5096   }
5097 
5098   if (IsVarArg) {
5099     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
5100     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
5101     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
5102     MachineFrameInfo &MFI = MF.getFrameInfo();
5103     MachineRegisterInfo &RegInfo = MF.getRegInfo();
5104     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
5105 
5106     // Offset of the first variable argument from stack pointer, and size of
5107     // the vararg save area. For now, the varargs save area is either zero or
5108     // large enough to hold a0-a7.
5109     int VaArgOffset, VarArgsSaveSize;
5110 
5111     // If all registers are allocated, then all varargs must be passed on the
5112     // stack and we don't need to save any argregs.
5113     if (ArgRegs.size() == Idx) {
5114       VaArgOffset = CCInfo.getNextStackOffset();
5115       VarArgsSaveSize = 0;
5116     } else {
5117       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
5118       VaArgOffset = -VarArgsSaveSize;
5119     }
5120 
5121     // Record the frame index of the first variable argument
5122     // which is a value necessary to VASTART.
5123     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
5124     RVFI->setVarArgsFrameIndex(FI);
5125 
5126     // If saving an odd number of registers then create an extra stack slot to
5127     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
5128     // offsets to even-numbered registered remain 2*XLEN-aligned.
5129     if (Idx % 2) {
5130       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
5131       VarArgsSaveSize += XLenInBytes;
5132     }
5133 
5134     // Copy the integer registers that may have been used for passing varargs
5135     // to the vararg save area.
5136     for (unsigned I = Idx; I < ArgRegs.size();
5137          ++I, VaArgOffset += XLenInBytes) {
5138       const Register Reg = RegInfo.createVirtualRegister(RC);
5139       RegInfo.addLiveIn(ArgRegs[I], Reg);
5140       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
5141       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
5142       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
5143       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
5144                                    MachinePointerInfo::getFixedStack(MF, FI));
5145       cast<StoreSDNode>(Store.getNode())
5146           ->getMemOperand()
5147           ->setValue((Value *)nullptr);
5148       OutChains.push_back(Store);
5149     }
5150     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
5151   }
5152 
5153   // All stores are grouped in one node to allow the matching between
5154   // the size of Ins and InVals. This only happens for vararg functions.
5155   if (!OutChains.empty()) {
5156     OutChains.push_back(Chain);
5157     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
5158   }
5159 
5160   return Chain;
5161 }
5162 
5163 /// isEligibleForTailCallOptimization - Check whether the call is eligible
5164 /// for tail call optimization.
5165 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
5166 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
5167     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
5168     const SmallVector<CCValAssign, 16> &ArgLocs) const {
5169 
5170   auto &Callee = CLI.Callee;
5171   auto CalleeCC = CLI.CallConv;
5172   auto &Outs = CLI.Outs;
5173   auto &Caller = MF.getFunction();
5174   auto CallerCC = Caller.getCallingConv();
5175 
5176   // Exception-handling functions need a special set of instructions to
5177   // indicate a return to the hardware. Tail-calling another function would
5178   // probably break this.
5179   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
5180   // should be expanded as new function attributes are introduced.
5181   if (Caller.hasFnAttribute("interrupt"))
5182     return false;
5183 
5184   // Do not tail call opt if the stack is used to pass parameters.
5185   if (CCInfo.getNextStackOffset() != 0)
5186     return false;
5187 
5188   // Do not tail call opt if any parameters need to be passed indirectly.
5189   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
5190   // passed indirectly. So the address of the value will be passed in a
5191   // register, or if not available, then the address is put on the stack. In
5192   // order to pass indirectly, space on the stack often needs to be allocated
5193   // in order to store the value. In this case the CCInfo.getNextStackOffset()
5194   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
5195   // are passed CCValAssign::Indirect.
5196   for (auto &VA : ArgLocs)
5197     if (VA.getLocInfo() == CCValAssign::Indirect)
5198       return false;
5199 
5200   // Do not tail call opt if either caller or callee uses struct return
5201   // semantics.
5202   auto IsCallerStructRet = Caller.hasStructRetAttr();
5203   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
5204   if (IsCallerStructRet || IsCalleeStructRet)
5205     return false;
5206 
5207   // Externally-defined functions with weak linkage should not be
5208   // tail-called. The behaviour of branch instructions in this situation (as
5209   // used for tail calls) is implementation-defined, so we cannot rely on the
5210   // linker replacing the tail call with a return.
5211   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
5212     const GlobalValue *GV = G->getGlobal();
5213     if (GV->hasExternalWeakLinkage())
5214       return false;
5215   }
5216 
5217   // The callee has to preserve all registers the caller needs to preserve.
5218   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5219   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
5220   if (CalleeCC != CallerCC) {
5221     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
5222     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
5223       return false;
5224   }
5225 
5226   // Byval parameters hand the function a pointer directly into the stack area
5227   // we want to reuse during a tail call. Working around this *is* possible
5228   // but less efficient and uglier in LowerCall.
5229   for (auto &Arg : Outs)
5230     if (Arg.Flags.isByVal())
5231       return false;
5232 
5233   return true;
5234 }
5235 
5236 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
5237 // and output parameter nodes.
5238 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
5239                                        SmallVectorImpl<SDValue> &InVals) const {
5240   SelectionDAG &DAG = CLI.DAG;
5241   SDLoc &DL = CLI.DL;
5242   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
5243   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
5244   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
5245   SDValue Chain = CLI.Chain;
5246   SDValue Callee = CLI.Callee;
5247   bool &IsTailCall = CLI.IsTailCall;
5248   CallingConv::ID CallConv = CLI.CallConv;
5249   bool IsVarArg = CLI.IsVarArg;
5250   EVT PtrVT = getPointerTy(DAG.getDataLayout());
5251   MVT XLenVT = Subtarget.getXLenVT();
5252 
5253   MachineFunction &MF = DAG.getMachineFunction();
5254 
5255   // Analyze the operands of the call, assigning locations to each operand.
5256   SmallVector<CCValAssign, 16> ArgLocs;
5257   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
5258 
5259   if (CallConv == CallingConv::Fast)
5260     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_FastCC);
5261   else if (CallConv == CallingConv::GHC)
5262     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
5263   else
5264     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI);
5265 
5266   // Check if it's really possible to do a tail call.
5267   if (IsTailCall)
5268     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
5269 
5270   if (IsTailCall)
5271     ++NumTailCalls;
5272   else if (CLI.CB && CLI.CB->isMustTailCall())
5273     report_fatal_error("failed to perform tail call elimination on a call "
5274                        "site marked musttail");
5275 
5276   // Get a count of how many bytes are to be pushed on the stack.
5277   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
5278 
5279   // Create local copies for byval args
5280   SmallVector<SDValue, 8> ByValArgs;
5281   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
5282     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5283     if (!Flags.isByVal())
5284       continue;
5285 
5286     SDValue Arg = OutVals[i];
5287     unsigned Size = Flags.getByValSize();
5288     Align Alignment = Flags.getNonZeroByValAlign();
5289 
5290     int FI =
5291         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
5292     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
5293     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
5294 
5295     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
5296                           /*IsVolatile=*/false,
5297                           /*AlwaysInline=*/false, IsTailCall,
5298                           MachinePointerInfo(), MachinePointerInfo());
5299     ByValArgs.push_back(FIPtr);
5300   }
5301 
5302   if (!IsTailCall)
5303     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
5304 
5305   // Copy argument values to their designated locations.
5306   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
5307   SmallVector<SDValue, 8> MemOpChains;
5308   SDValue StackPtr;
5309   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
5310     CCValAssign &VA = ArgLocs[i];
5311     SDValue ArgValue = OutVals[i];
5312     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5313 
5314     // Handle passing f64 on RV32D with a soft float ABI as a special case.
5315     bool IsF64OnRV32DSoftABI =
5316         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
5317     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
5318       SDValue SplitF64 = DAG.getNode(
5319           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
5320       SDValue Lo = SplitF64.getValue(0);
5321       SDValue Hi = SplitF64.getValue(1);
5322 
5323       Register RegLo = VA.getLocReg();
5324       RegsToPass.push_back(std::make_pair(RegLo, Lo));
5325 
5326       if (RegLo == RISCV::X17) {
5327         // Second half of f64 is passed on the stack.
5328         // Work out the address of the stack slot.
5329         if (!StackPtr.getNode())
5330           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
5331         // Emit the store.
5332         MemOpChains.push_back(
5333             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
5334       } else {
5335         // Second half of f64 is passed in another GPR.
5336         assert(RegLo < RISCV::X31 && "Invalid register pair");
5337         Register RegHigh = RegLo + 1;
5338         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
5339       }
5340       continue;
5341     }
5342 
5343     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
5344     // as any other MemLoc.
5345 
5346     // Promote the value if needed.
5347     // For now, only handle fully promoted and indirect arguments.
5348     if (VA.getLocInfo() == CCValAssign::Indirect) {
5349       // Store the argument in a stack slot and pass its address.
5350       SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT);
5351       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
5352       MemOpChains.push_back(
5353           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
5354                        MachinePointerInfo::getFixedStack(MF, FI)));
5355       // If the original argument was split (e.g. i128), we need
5356       // to store all parts of it here (and pass just one address).
5357       unsigned ArgIndex = Outs[i].OrigArgIndex;
5358       assert(Outs[i].PartOffset == 0);
5359       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
5360         SDValue PartValue = OutVals[i + 1];
5361         unsigned PartOffset = Outs[i + 1].PartOffset;
5362         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
5363                                       DAG.getIntPtrConstant(PartOffset, DL));
5364         MemOpChains.push_back(
5365             DAG.getStore(Chain, DL, PartValue, Address,
5366                          MachinePointerInfo::getFixedStack(MF, FI)));
5367         ++i;
5368       }
5369       ArgValue = SpillSlot;
5370     } else {
5371       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL);
5372     }
5373 
5374     // Use local copy if it is a byval arg.
5375     if (Flags.isByVal())
5376       ArgValue = ByValArgs[j++];
5377 
5378     if (VA.isRegLoc()) {
5379       // Queue up the argument copies and emit them at the end.
5380       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
5381     } else {
5382       assert(VA.isMemLoc() && "Argument not register or memory");
5383       assert(!IsTailCall && "Tail call not allowed if stack is used "
5384                             "for passing parameters");
5385 
5386       // Work out the address of the stack slot.
5387       if (!StackPtr.getNode())
5388         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
5389       SDValue Address =
5390           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
5391                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
5392 
5393       // Emit the store.
5394       MemOpChains.push_back(
5395           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
5396     }
5397   }
5398 
5399   // Join the stores, which are independent of one another.
5400   if (!MemOpChains.empty())
5401     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
5402 
5403   SDValue Glue;
5404 
5405   // Build a sequence of copy-to-reg nodes, chained and glued together.
5406   for (auto &Reg : RegsToPass) {
5407     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
5408     Glue = Chain.getValue(1);
5409   }
5410 
5411   // Validate that none of the argument registers have been marked as
5412   // reserved, if so report an error. Do the same for the return address if this
5413   // is not a tailcall.
5414   validateCCReservedRegs(RegsToPass, MF);
5415   if (!IsTailCall &&
5416       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
5417     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
5418         MF.getFunction(),
5419         "Return address register required, but has been reserved."});
5420 
5421   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
5422   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
5423   // split it and then direct call can be matched by PseudoCALL.
5424   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
5425     const GlobalValue *GV = S->getGlobal();
5426 
5427     unsigned OpFlags = RISCVII::MO_CALL;
5428     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
5429       OpFlags = RISCVII::MO_PLT;
5430 
5431     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
5432   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
5433     unsigned OpFlags = RISCVII::MO_CALL;
5434 
5435     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
5436                                                  nullptr))
5437       OpFlags = RISCVII::MO_PLT;
5438 
5439     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
5440   }
5441 
5442   // The first call operand is the chain and the second is the target address.
5443   SmallVector<SDValue, 8> Ops;
5444   Ops.push_back(Chain);
5445   Ops.push_back(Callee);
5446 
5447   // Add argument registers to the end of the list so that they are
5448   // known live into the call.
5449   for (auto &Reg : RegsToPass)
5450     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
5451 
5452   if (!IsTailCall) {
5453     // Add a register mask operand representing the call-preserved registers.
5454     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
5455     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
5456     assert(Mask && "Missing call preserved mask for calling convention");
5457     Ops.push_back(DAG.getRegisterMask(Mask));
5458   }
5459 
5460   // Glue the call to the argument copies, if any.
5461   if (Glue.getNode())
5462     Ops.push_back(Glue);
5463 
5464   // Emit the call.
5465   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
5466 
5467   if (IsTailCall) {
5468     MF.getFrameInfo().setHasTailCall();
5469     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
5470   }
5471 
5472   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
5473   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
5474   Glue = Chain.getValue(1);
5475 
5476   // Mark the end of the call, which is glued to the call itself.
5477   Chain = DAG.getCALLSEQ_END(Chain,
5478                              DAG.getConstant(NumBytes, DL, PtrVT, true),
5479                              DAG.getConstant(0, DL, PtrVT, true),
5480                              Glue, DL);
5481   Glue = Chain.getValue(1);
5482 
5483   // Assign locations to each value returned by this call.
5484   SmallVector<CCValAssign, 16> RVLocs;
5485   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
5486   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true);
5487 
5488   // Copy all of the result registers out of their specified physreg.
5489   for (auto &VA : RVLocs) {
5490     // Copy the value out
5491     SDValue RetValue =
5492         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
5493     // Glue the RetValue to the end of the call sequence
5494     Chain = RetValue.getValue(1);
5495     Glue = RetValue.getValue(2);
5496 
5497     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
5498       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
5499       SDValue RetValue2 =
5500           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
5501       Chain = RetValue2.getValue(1);
5502       Glue = RetValue2.getValue(2);
5503       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
5504                              RetValue2);
5505     }
5506 
5507     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL);
5508 
5509     InVals.push_back(RetValue);
5510   }
5511 
5512   return Chain;
5513 }
5514 
5515 bool RISCVTargetLowering::CanLowerReturn(
5516     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
5517     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
5518   SmallVector<CCValAssign, 16> RVLocs;
5519   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
5520 
5521   Optional<unsigned> FirstMaskArgument;
5522   if (Subtarget.hasStdExtV())
5523     FirstMaskArgument = preAssignMask(Outs);
5524 
5525   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
5526     MVT VT = Outs[i].VT;
5527     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
5528     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
5529     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
5530                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
5531                  *this, FirstMaskArgument))
5532       return false;
5533   }
5534   return true;
5535 }
5536 
5537 SDValue
5538 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
5539                                  bool IsVarArg,
5540                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
5541                                  const SmallVectorImpl<SDValue> &OutVals,
5542                                  const SDLoc &DL, SelectionDAG &DAG) const {
5543   const MachineFunction &MF = DAG.getMachineFunction();
5544   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
5545 
5546   // Stores the assignment of the return value to a location.
5547   SmallVector<CCValAssign, 16> RVLocs;
5548 
5549   // Info about the registers and stack slot.
5550   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
5551                  *DAG.getContext());
5552 
5553   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
5554                     nullptr);
5555 
5556   if (CallConv == CallingConv::GHC && !RVLocs.empty())
5557     report_fatal_error("GHC functions return void only");
5558 
5559   SDValue Glue;
5560   SmallVector<SDValue, 4> RetOps(1, Chain);
5561 
5562   // Copy the result values into the output registers.
5563   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
5564     SDValue Val = OutVals[i];
5565     CCValAssign &VA = RVLocs[i];
5566     assert(VA.isRegLoc() && "Can only return in registers!");
5567 
5568     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
5569       // Handle returning f64 on RV32D with a soft float ABI.
5570       assert(VA.isRegLoc() && "Expected return via registers");
5571       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
5572                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
5573       SDValue Lo = SplitF64.getValue(0);
5574       SDValue Hi = SplitF64.getValue(1);
5575       Register RegLo = VA.getLocReg();
5576       assert(RegLo < RISCV::X31 && "Invalid register pair");
5577       Register RegHi = RegLo + 1;
5578 
5579       if (STI.isRegisterReservedByUser(RegLo) ||
5580           STI.isRegisterReservedByUser(RegHi))
5581         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
5582             MF.getFunction(),
5583             "Return value register required, but has been reserved."});
5584 
5585       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
5586       Glue = Chain.getValue(1);
5587       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
5588       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
5589       Glue = Chain.getValue(1);
5590       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
5591     } else {
5592       // Handle a 'normal' return.
5593       Val = convertValVTToLocVT(DAG, Val, VA, DL);
5594       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
5595 
5596       if (STI.isRegisterReservedByUser(VA.getLocReg()))
5597         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
5598             MF.getFunction(),
5599             "Return value register required, but has been reserved."});
5600 
5601       // Guarantee that all emitted copies are stuck together.
5602       Glue = Chain.getValue(1);
5603       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
5604     }
5605   }
5606 
5607   RetOps[0] = Chain; // Update chain.
5608 
5609   // Add the glue node if we have it.
5610   if (Glue.getNode()) {
5611     RetOps.push_back(Glue);
5612   }
5613 
5614   // Interrupt service routines use different return instructions.
5615   const Function &Func = DAG.getMachineFunction().getFunction();
5616   if (Func.hasFnAttribute("interrupt")) {
5617     if (!Func.getReturnType()->isVoidTy())
5618       report_fatal_error(
5619           "Functions with the interrupt attribute must have void return type!");
5620 
5621     MachineFunction &MF = DAG.getMachineFunction();
5622     StringRef Kind =
5623       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
5624 
5625     unsigned RetOpc;
5626     if (Kind == "user")
5627       RetOpc = RISCVISD::URET_FLAG;
5628     else if (Kind == "supervisor")
5629       RetOpc = RISCVISD::SRET_FLAG;
5630     else
5631       RetOpc = RISCVISD::MRET_FLAG;
5632 
5633     return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
5634   }
5635 
5636   return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps);
5637 }
5638 
5639 void RISCVTargetLowering::validateCCReservedRegs(
5640     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
5641     MachineFunction &MF) const {
5642   const Function &F = MF.getFunction();
5643   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
5644 
5645   if (llvm::any_of(Regs, [&STI](auto Reg) {
5646         return STI.isRegisterReservedByUser(Reg.first);
5647       }))
5648     F.getContext().diagnose(DiagnosticInfoUnsupported{
5649         F, "Argument register required, but has been reserved."});
5650 }
5651 
5652 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
5653   return CI->isTailCall();
5654 }
5655 
5656 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
5657 #define NODE_NAME_CASE(NODE)                                                   \
5658   case RISCVISD::NODE:                                                         \
5659     return "RISCVISD::" #NODE;
5660   // clang-format off
5661   switch ((RISCVISD::NodeType)Opcode) {
5662   case RISCVISD::FIRST_NUMBER:
5663     break;
5664   NODE_NAME_CASE(RET_FLAG)
5665   NODE_NAME_CASE(URET_FLAG)
5666   NODE_NAME_CASE(SRET_FLAG)
5667   NODE_NAME_CASE(MRET_FLAG)
5668   NODE_NAME_CASE(CALL)
5669   NODE_NAME_CASE(SELECT_CC)
5670   NODE_NAME_CASE(BuildPairF64)
5671   NODE_NAME_CASE(SplitF64)
5672   NODE_NAME_CASE(TAIL)
5673   NODE_NAME_CASE(SLLW)
5674   NODE_NAME_CASE(SRAW)
5675   NODE_NAME_CASE(SRLW)
5676   NODE_NAME_CASE(DIVW)
5677   NODE_NAME_CASE(DIVUW)
5678   NODE_NAME_CASE(REMUW)
5679   NODE_NAME_CASE(ROLW)
5680   NODE_NAME_CASE(RORW)
5681   NODE_NAME_CASE(FSLW)
5682   NODE_NAME_CASE(FSRW)
5683   NODE_NAME_CASE(FSL)
5684   NODE_NAME_CASE(FSR)
5685   NODE_NAME_CASE(FMV_H_X)
5686   NODE_NAME_CASE(FMV_X_ANYEXTH)
5687   NODE_NAME_CASE(FMV_W_X_RV64)
5688   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
5689   NODE_NAME_CASE(READ_CYCLE_WIDE)
5690   NODE_NAME_CASE(GREVI)
5691   NODE_NAME_CASE(GREVIW)
5692   NODE_NAME_CASE(GORCI)
5693   NODE_NAME_CASE(GORCIW)
5694   NODE_NAME_CASE(SHFLI)
5695   NODE_NAME_CASE(VMV_V_X_VL)
5696   NODE_NAME_CASE(VFMV_V_F_VL)
5697   NODE_NAME_CASE(VMV_X_S)
5698   NODE_NAME_CASE(SPLAT_VECTOR_I64)
5699   NODE_NAME_CASE(READ_VLENB)
5700   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
5701   NODE_NAME_CASE(VLEFF)
5702   NODE_NAME_CASE(VLEFF_MASK)
5703   NODE_NAME_CASE(VSLIDEUP_VL)
5704   NODE_NAME_CASE(VSLIDEDOWN_VL)
5705   NODE_NAME_CASE(VID_VL)
5706   NODE_NAME_CASE(VFNCVT_ROD_VL)
5707   NODE_NAME_CASE(VECREDUCE_ADD)
5708   NODE_NAME_CASE(VECREDUCE_UMAX)
5709   NODE_NAME_CASE(VECREDUCE_SMAX)
5710   NODE_NAME_CASE(VECREDUCE_UMIN)
5711   NODE_NAME_CASE(VECREDUCE_SMIN)
5712   NODE_NAME_CASE(VECREDUCE_AND)
5713   NODE_NAME_CASE(VECREDUCE_OR)
5714   NODE_NAME_CASE(VECREDUCE_XOR)
5715   NODE_NAME_CASE(VECREDUCE_FADD)
5716   NODE_NAME_CASE(VECREDUCE_SEQ_FADD)
5717   NODE_NAME_CASE(ADD_VL)
5718   NODE_NAME_CASE(AND_VL)
5719   NODE_NAME_CASE(MUL_VL)
5720   NODE_NAME_CASE(OR_VL)
5721   NODE_NAME_CASE(SDIV_VL)
5722   NODE_NAME_CASE(SHL_VL)
5723   NODE_NAME_CASE(SREM_VL)
5724   NODE_NAME_CASE(SRA_VL)
5725   NODE_NAME_CASE(SRL_VL)
5726   NODE_NAME_CASE(SUB_VL)
5727   NODE_NAME_CASE(UDIV_VL)
5728   NODE_NAME_CASE(UREM_VL)
5729   NODE_NAME_CASE(XOR_VL)
5730   NODE_NAME_CASE(FADD_VL)
5731   NODE_NAME_CASE(FSUB_VL)
5732   NODE_NAME_CASE(FMUL_VL)
5733   NODE_NAME_CASE(FDIV_VL)
5734   NODE_NAME_CASE(FNEG_VL)
5735   NODE_NAME_CASE(FABS_VL)
5736   NODE_NAME_CASE(FSQRT_VL)
5737   NODE_NAME_CASE(FMA_VL)
5738   NODE_NAME_CASE(SMIN_VL)
5739   NODE_NAME_CASE(SMAX_VL)
5740   NODE_NAME_CASE(UMIN_VL)
5741   NODE_NAME_CASE(UMAX_VL)
5742   NODE_NAME_CASE(MULHS_VL)
5743   NODE_NAME_CASE(MULHU_VL)
5744   NODE_NAME_CASE(FP_TO_SINT_VL)
5745   NODE_NAME_CASE(FP_TO_UINT_VL)
5746   NODE_NAME_CASE(SINT_TO_FP_VL)
5747   NODE_NAME_CASE(UINT_TO_FP_VL)
5748   NODE_NAME_CASE(FP_EXTEND_VL)
5749   NODE_NAME_CASE(FP_ROUND_VL)
5750   NODE_NAME_CASE(SETCC_VL)
5751   NODE_NAME_CASE(VSELECT_VL)
5752   NODE_NAME_CASE(VMAND_VL)
5753   NODE_NAME_CASE(VMOR_VL)
5754   NODE_NAME_CASE(VMXOR_VL)
5755   NODE_NAME_CASE(VMCLR_VL)
5756   NODE_NAME_CASE(VMSET_VL)
5757   NODE_NAME_CASE(VRGATHER_VX_VL)
5758   NODE_NAME_CASE(VSEXT_VL)
5759   NODE_NAME_CASE(VZEXT_VL)
5760   NODE_NAME_CASE(VLE_VL)
5761   NODE_NAME_CASE(VSE_VL)
5762   }
5763   // clang-format on
5764   return nullptr;
5765 #undef NODE_NAME_CASE
5766 }
5767 
5768 /// getConstraintType - Given a constraint letter, return the type of
5769 /// constraint it is for this target.
5770 RISCVTargetLowering::ConstraintType
5771 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
5772   if (Constraint.size() == 1) {
5773     switch (Constraint[0]) {
5774     default:
5775       break;
5776     case 'f':
5777       return C_RegisterClass;
5778     case 'I':
5779     case 'J':
5780     case 'K':
5781       return C_Immediate;
5782     case 'A':
5783       return C_Memory;
5784     }
5785   }
5786   return TargetLowering::getConstraintType(Constraint);
5787 }
5788 
5789 std::pair<unsigned, const TargetRegisterClass *>
5790 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
5791                                                   StringRef Constraint,
5792                                                   MVT VT) const {
5793   // First, see if this is a constraint that directly corresponds to a
5794   // RISCV register class.
5795   if (Constraint.size() == 1) {
5796     switch (Constraint[0]) {
5797     case 'r':
5798       return std::make_pair(0U, &RISCV::GPRRegClass);
5799     case 'f':
5800       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
5801         return std::make_pair(0U, &RISCV::FPR16RegClass);
5802       if (Subtarget.hasStdExtF() && VT == MVT::f32)
5803         return std::make_pair(0U, &RISCV::FPR32RegClass);
5804       if (Subtarget.hasStdExtD() && VT == MVT::f64)
5805         return std::make_pair(0U, &RISCV::FPR64RegClass);
5806       break;
5807     default:
5808       break;
5809     }
5810   }
5811 
5812   // Clang will correctly decode the usage of register name aliases into their
5813   // official names. However, other frontends like `rustc` do not. This allows
5814   // users of these frontends to use the ABI names for registers in LLVM-style
5815   // register constraints.
5816   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
5817                                .Case("{zero}", RISCV::X0)
5818                                .Case("{ra}", RISCV::X1)
5819                                .Case("{sp}", RISCV::X2)
5820                                .Case("{gp}", RISCV::X3)
5821                                .Case("{tp}", RISCV::X4)
5822                                .Case("{t0}", RISCV::X5)
5823                                .Case("{t1}", RISCV::X6)
5824                                .Case("{t2}", RISCV::X7)
5825                                .Cases("{s0}", "{fp}", RISCV::X8)
5826                                .Case("{s1}", RISCV::X9)
5827                                .Case("{a0}", RISCV::X10)
5828                                .Case("{a1}", RISCV::X11)
5829                                .Case("{a2}", RISCV::X12)
5830                                .Case("{a3}", RISCV::X13)
5831                                .Case("{a4}", RISCV::X14)
5832                                .Case("{a5}", RISCV::X15)
5833                                .Case("{a6}", RISCV::X16)
5834                                .Case("{a7}", RISCV::X17)
5835                                .Case("{s2}", RISCV::X18)
5836                                .Case("{s3}", RISCV::X19)
5837                                .Case("{s4}", RISCV::X20)
5838                                .Case("{s5}", RISCV::X21)
5839                                .Case("{s6}", RISCV::X22)
5840                                .Case("{s7}", RISCV::X23)
5841                                .Case("{s8}", RISCV::X24)
5842                                .Case("{s9}", RISCV::X25)
5843                                .Case("{s10}", RISCV::X26)
5844                                .Case("{s11}", RISCV::X27)
5845                                .Case("{t3}", RISCV::X28)
5846                                .Case("{t4}", RISCV::X29)
5847                                .Case("{t5}", RISCV::X30)
5848                                .Case("{t6}", RISCV::X31)
5849                                .Default(RISCV::NoRegister);
5850   if (XRegFromAlias != RISCV::NoRegister)
5851     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
5852 
5853   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
5854   // TableGen record rather than the AsmName to choose registers for InlineAsm
5855   // constraints, plus we want to match those names to the widest floating point
5856   // register type available, manually select floating point registers here.
5857   //
5858   // The second case is the ABI name of the register, so that frontends can also
5859   // use the ABI names in register constraint lists.
5860   if (Subtarget.hasStdExtF()) {
5861     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
5862                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
5863                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
5864                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
5865                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
5866                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
5867                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
5868                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
5869                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
5870                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
5871                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
5872                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
5873                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
5874                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
5875                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
5876                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
5877                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
5878                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
5879                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
5880                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
5881                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
5882                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
5883                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
5884                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
5885                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
5886                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
5887                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
5888                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
5889                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
5890                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
5891                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
5892                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
5893                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
5894                         .Default(RISCV::NoRegister);
5895     if (FReg != RISCV::NoRegister) {
5896       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
5897       if (Subtarget.hasStdExtD()) {
5898         unsigned RegNo = FReg - RISCV::F0_F;
5899         unsigned DReg = RISCV::F0_D + RegNo;
5900         return std::make_pair(DReg, &RISCV::FPR64RegClass);
5901       }
5902       return std::make_pair(FReg, &RISCV::FPR32RegClass);
5903     }
5904   }
5905 
5906   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
5907 }
5908 
5909 unsigned
5910 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
5911   // Currently only support length 1 constraints.
5912   if (ConstraintCode.size() == 1) {
5913     switch (ConstraintCode[0]) {
5914     case 'A':
5915       return InlineAsm::Constraint_A;
5916     default:
5917       break;
5918     }
5919   }
5920 
5921   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
5922 }
5923 
5924 void RISCVTargetLowering::LowerAsmOperandForConstraint(
5925     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
5926     SelectionDAG &DAG) const {
5927   // Currently only support length 1 constraints.
5928   if (Constraint.length() == 1) {
5929     switch (Constraint[0]) {
5930     case 'I':
5931       // Validate & create a 12-bit signed immediate operand.
5932       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
5933         uint64_t CVal = C->getSExtValue();
5934         if (isInt<12>(CVal))
5935           Ops.push_back(
5936               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
5937       }
5938       return;
5939     case 'J':
5940       // Validate & create an integer zero operand.
5941       if (auto *C = dyn_cast<ConstantSDNode>(Op))
5942         if (C->getZExtValue() == 0)
5943           Ops.push_back(
5944               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
5945       return;
5946     case 'K':
5947       // Validate & create a 5-bit unsigned immediate operand.
5948       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
5949         uint64_t CVal = C->getZExtValue();
5950         if (isUInt<5>(CVal))
5951           Ops.push_back(
5952               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
5953       }
5954       return;
5955     default:
5956       break;
5957     }
5958   }
5959   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
5960 }
5961 
5962 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
5963                                                    Instruction *Inst,
5964                                                    AtomicOrdering Ord) const {
5965   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
5966     return Builder.CreateFence(Ord);
5967   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
5968     return Builder.CreateFence(AtomicOrdering::Release);
5969   return nullptr;
5970 }
5971 
5972 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
5973                                                     Instruction *Inst,
5974                                                     AtomicOrdering Ord) const {
5975   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
5976     return Builder.CreateFence(AtomicOrdering::Acquire);
5977   return nullptr;
5978 }
5979 
5980 TargetLowering::AtomicExpansionKind
5981 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
5982   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
5983   // point operations can't be used in an lr/sc sequence without breaking the
5984   // forward-progress guarantee.
5985   if (AI->isFloatingPointOperation())
5986     return AtomicExpansionKind::CmpXChg;
5987 
5988   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
5989   if (Size == 8 || Size == 16)
5990     return AtomicExpansionKind::MaskedIntrinsic;
5991   return AtomicExpansionKind::None;
5992 }
5993 
5994 static Intrinsic::ID
5995 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
5996   if (XLen == 32) {
5997     switch (BinOp) {
5998     default:
5999       llvm_unreachable("Unexpected AtomicRMW BinOp");
6000     case AtomicRMWInst::Xchg:
6001       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
6002     case AtomicRMWInst::Add:
6003       return Intrinsic::riscv_masked_atomicrmw_add_i32;
6004     case AtomicRMWInst::Sub:
6005       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
6006     case AtomicRMWInst::Nand:
6007       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
6008     case AtomicRMWInst::Max:
6009       return Intrinsic::riscv_masked_atomicrmw_max_i32;
6010     case AtomicRMWInst::Min:
6011       return Intrinsic::riscv_masked_atomicrmw_min_i32;
6012     case AtomicRMWInst::UMax:
6013       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
6014     case AtomicRMWInst::UMin:
6015       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
6016     }
6017   }
6018 
6019   if (XLen == 64) {
6020     switch (BinOp) {
6021     default:
6022       llvm_unreachable("Unexpected AtomicRMW BinOp");
6023     case AtomicRMWInst::Xchg:
6024       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
6025     case AtomicRMWInst::Add:
6026       return Intrinsic::riscv_masked_atomicrmw_add_i64;
6027     case AtomicRMWInst::Sub:
6028       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
6029     case AtomicRMWInst::Nand:
6030       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
6031     case AtomicRMWInst::Max:
6032       return Intrinsic::riscv_masked_atomicrmw_max_i64;
6033     case AtomicRMWInst::Min:
6034       return Intrinsic::riscv_masked_atomicrmw_min_i64;
6035     case AtomicRMWInst::UMax:
6036       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
6037     case AtomicRMWInst::UMin:
6038       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
6039     }
6040   }
6041 
6042   llvm_unreachable("Unexpected XLen\n");
6043 }
6044 
6045 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
6046     IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
6047     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
6048   unsigned XLen = Subtarget.getXLen();
6049   Value *Ordering =
6050       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
6051   Type *Tys[] = {AlignedAddr->getType()};
6052   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
6053       AI->getModule(),
6054       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
6055 
6056   if (XLen == 64) {
6057     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
6058     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
6059     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
6060   }
6061 
6062   Value *Result;
6063 
6064   // Must pass the shift amount needed to sign extend the loaded value prior
6065   // to performing a signed comparison for min/max. ShiftAmt is the number of
6066   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
6067   // is the number of bits to left+right shift the value in order to
6068   // sign-extend.
6069   if (AI->getOperation() == AtomicRMWInst::Min ||
6070       AI->getOperation() == AtomicRMWInst::Max) {
6071     const DataLayout &DL = AI->getModule()->getDataLayout();
6072     unsigned ValWidth =
6073         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
6074     Value *SextShamt =
6075         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
6076     Result = Builder.CreateCall(LrwOpScwLoop,
6077                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
6078   } else {
6079     Result =
6080         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
6081   }
6082 
6083   if (XLen == 64)
6084     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
6085   return Result;
6086 }
6087 
6088 TargetLowering::AtomicExpansionKind
6089 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
6090     AtomicCmpXchgInst *CI) const {
6091   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
6092   if (Size == 8 || Size == 16)
6093     return AtomicExpansionKind::MaskedIntrinsic;
6094   return AtomicExpansionKind::None;
6095 }
6096 
6097 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
6098     IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
6099     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
6100   unsigned XLen = Subtarget.getXLen();
6101   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
6102   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
6103   if (XLen == 64) {
6104     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
6105     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
6106     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
6107     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
6108   }
6109   Type *Tys[] = {AlignedAddr->getType()};
6110   Function *MaskedCmpXchg =
6111       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
6112   Value *Result = Builder.CreateCall(
6113       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
6114   if (XLen == 64)
6115     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
6116   return Result;
6117 }
6118 
6119 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
6120                                                      EVT VT) const {
6121   VT = VT.getScalarType();
6122 
6123   if (!VT.isSimple())
6124     return false;
6125 
6126   switch (VT.getSimpleVT().SimpleTy) {
6127   case MVT::f16:
6128     return Subtarget.hasStdExtZfh();
6129   case MVT::f32:
6130     return Subtarget.hasStdExtF();
6131   case MVT::f64:
6132     return Subtarget.hasStdExtD();
6133   default:
6134     break;
6135   }
6136 
6137   return false;
6138 }
6139 
6140 Register RISCVTargetLowering::getExceptionPointerRegister(
6141     const Constant *PersonalityFn) const {
6142   return RISCV::X10;
6143 }
6144 
6145 Register RISCVTargetLowering::getExceptionSelectorRegister(
6146     const Constant *PersonalityFn) const {
6147   return RISCV::X11;
6148 }
6149 
6150 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
6151   // Return false to suppress the unnecessary extensions if the LibCall
6152   // arguments or return value is f32 type for LP64 ABI.
6153   RISCVABI::ABI ABI = Subtarget.getTargetABI();
6154   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
6155     return false;
6156 
6157   return true;
6158 }
6159 
6160 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
6161   if (Subtarget.is64Bit() && Type == MVT::i32)
6162     return true;
6163 
6164   return IsSigned;
6165 }
6166 
6167 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
6168                                                  SDValue C) const {
6169   // Check integral scalar types.
6170   if (VT.isScalarInteger()) {
6171     // Omit the optimization if the sub target has the M extension and the data
6172     // size exceeds XLen.
6173     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
6174       return false;
6175     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
6176       // Break the MUL to a SLLI and an ADD/SUB.
6177       const APInt &Imm = ConstNode->getAPIntValue();
6178       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
6179           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
6180         return true;
6181       // Omit the following optimization if the sub target has the M extension
6182       // and the data size >= XLen.
6183       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
6184         return false;
6185       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
6186       // a pair of LUI/ADDI.
6187       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
6188         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
6189         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
6190             (1 - ImmS).isPowerOf2())
6191         return true;
6192       }
6193     }
6194   }
6195 
6196   return false;
6197 }
6198 
6199 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
6200   if (!Subtarget.useRVVForFixedLengthVectors())
6201     return false;
6202 
6203   if (!VT.isFixedLengthVector())
6204     return false;
6205 
6206   // Don't use RVV for vectors we cannot scalarize if required.
6207   switch (VT.getVectorElementType().SimpleTy) {
6208   // i1 is supported but has different rules.
6209   default:
6210     return false;
6211   case MVT::i1:
6212     // Masks can only use a single register.
6213     if (VT.getVectorNumElements() > Subtarget.getMinRVVVectorSizeInBits())
6214       return false;
6215     break;
6216   case MVT::i8:
6217   case MVT::i16:
6218   case MVT::i32:
6219   case MVT::i64:
6220     break;
6221   case MVT::f16:
6222     if (!Subtarget.hasStdExtZfh())
6223       return false;
6224     break;
6225   case MVT::f32:
6226     if (!Subtarget.hasStdExtF())
6227       return false;
6228     break;
6229   case MVT::f64:
6230     if (!Subtarget.hasStdExtD())
6231       return false;
6232     break;
6233   }
6234 
6235   unsigned LMul = Subtarget.getLMULForFixedLengthVector(VT);
6236   // Don't use RVV for types that don't fit.
6237   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
6238     return false;
6239 
6240   // TODO: Perhaps an artificial restriction, but worth having whilst getting
6241   // the base fixed length RVV support in place.
6242   if (!VT.isPow2VectorType())
6243     return false;
6244 
6245   return true;
6246 }
6247 
6248 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
6249     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
6250     bool *Fast) const {
6251   if (!VT.isScalableVector())
6252     return false;
6253 
6254   EVT ElemVT = VT.getVectorElementType();
6255   if (Alignment >= ElemVT.getStoreSize()) {
6256     if (Fast)
6257       *Fast = true;
6258     return true;
6259   }
6260 
6261   return false;
6262 }
6263 
6264 #define GET_REGISTER_MATCHER
6265 #include "RISCVGenAsmMatcher.inc"
6266 
6267 Register
6268 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
6269                                        const MachineFunction &MF) const {
6270   Register Reg = MatchRegisterAltName(RegName);
6271   if (Reg == RISCV::NoRegister)
6272     Reg = MatchRegisterName(RegName);
6273   if (Reg == RISCV::NoRegister)
6274     report_fatal_error(
6275         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
6276   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
6277   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
6278     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
6279                              StringRef(RegName) + "\"."));
6280   return Reg;
6281 }
6282 
6283 namespace llvm {
6284 namespace RISCVVIntrinsicsTable {
6285 
6286 #define GET_RISCVVIntrinsicsTable_IMPL
6287 #include "RISCVGenSearchableTables.inc"
6288 
6289 } // namespace RISCVVIntrinsicsTable
6290 
6291 } // namespace llvm
6292