1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/CodeGen/ValueTypes.h"
30 #include "llvm/IR/DiagnosticInfo.h"
31 #include "llvm/IR/DiagnosticPrinter.h"
32 #include "llvm/IR/IntrinsicsRISCV.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/KnownBits.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/raw_ostream.h"
38 
39 using namespace llvm;
40 
41 #define DEBUG_TYPE "riscv-lower"
42 
43 STATISTIC(NumTailCalls, "Number of tail calls");
44 
45 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
46                                          const RISCVSubtarget &STI)
47     : TargetLowering(TM), Subtarget(STI) {
48 
49   if (Subtarget.isRV32E())
50     report_fatal_error("Codegen not yet implemented for RV32E");
51 
52   RISCVABI::ABI ABI = Subtarget.getTargetABI();
53   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
54 
55   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
56       !Subtarget.hasStdExtF()) {
57     errs() << "Hard-float 'f' ABI can't be used for a target that "
58                 "doesn't support the F instruction set extension (ignoring "
59                           "target-abi)\n";
60     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
61   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
62              !Subtarget.hasStdExtD()) {
63     errs() << "Hard-float 'd' ABI can't be used for a target that "
64               "doesn't support the D instruction set extension (ignoring "
65               "target-abi)\n";
66     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
67   }
68 
69   switch (ABI) {
70   default:
71     report_fatal_error("Don't know how to lower this ABI");
72   case RISCVABI::ABI_ILP32:
73   case RISCVABI::ABI_ILP32F:
74   case RISCVABI::ABI_ILP32D:
75   case RISCVABI::ABI_LP64:
76   case RISCVABI::ABI_LP64F:
77   case RISCVABI::ABI_LP64D:
78     break;
79   }
80 
81   MVT XLenVT = Subtarget.getXLenVT();
82 
83   // Set up the register classes.
84   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
85 
86   if (Subtarget.hasStdExtZfh())
87     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
88   if (Subtarget.hasStdExtF())
89     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
90   if (Subtarget.hasStdExtD())
91     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
92 
93   static const MVT::SimpleValueType BoolVecVTs[] = {
94       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
95       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
96   static const MVT::SimpleValueType IntVecVTs[] = {
97       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
98       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
99       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
100       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
101       MVT::nxv4i64, MVT::nxv8i64};
102   static const MVT::SimpleValueType F16VecVTs[] = {
103       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
104       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
105   static const MVT::SimpleValueType F32VecVTs[] = {
106       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
107   static const MVT::SimpleValueType F64VecVTs[] = {
108       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
109 
110   if (Subtarget.hasStdExtV()) {
111     auto addRegClassForRVV = [this](MVT VT) {
112       unsigned Size = VT.getSizeInBits().getKnownMinValue();
113       assert(Size <= 512 && isPowerOf2_32(Size));
114       const TargetRegisterClass *RC;
115       if (Size <= 64)
116         RC = &RISCV::VRRegClass;
117       else if (Size == 128)
118         RC = &RISCV::VRM2RegClass;
119       else if (Size == 256)
120         RC = &RISCV::VRM4RegClass;
121       else
122         RC = &RISCV::VRM8RegClass;
123 
124       addRegisterClass(VT, RC);
125     };
126 
127     for (MVT VT : BoolVecVTs)
128       addRegClassForRVV(VT);
129     for (MVT VT : IntVecVTs)
130       addRegClassForRVV(VT);
131 
132     if (Subtarget.hasStdExtZfh())
133       for (MVT VT : F16VecVTs)
134         addRegClassForRVV(VT);
135 
136     if (Subtarget.hasStdExtF())
137       for (MVT VT : F32VecVTs)
138         addRegClassForRVV(VT);
139 
140     if (Subtarget.hasStdExtD())
141       for (MVT VT : F64VecVTs)
142         addRegClassForRVV(VT);
143 
144     if (Subtarget.useRVVForFixedLengthVectors()) {
145       auto addRegClassForFixedVectors = [this](MVT VT) {
146         unsigned LMul = Subtarget.getLMULForFixedLengthVector(VT);
147         const TargetRegisterClass *RC;
148         if (LMul == 1 || VT.getVectorElementType() == MVT::i1)
149           RC = &RISCV::VRRegClass;
150         else if (LMul == 2)
151           RC = &RISCV::VRM2RegClass;
152         else if (LMul == 4)
153           RC = &RISCV::VRM4RegClass;
154         else if (LMul == 8)
155           RC = &RISCV::VRM8RegClass;
156         else
157           llvm_unreachable("Unexpected LMul!");
158 
159         addRegisterClass(VT, RC);
160       };
161       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
162         if (useRVVForFixedLengthVectorVT(VT))
163           addRegClassForFixedVectors(VT);
164 
165       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
166         if (useRVVForFixedLengthVectorVT(VT))
167           addRegClassForFixedVectors(VT);
168     }
169   }
170 
171   // Compute derived properties from the register classes.
172   computeRegisterProperties(STI.getRegisterInfo());
173 
174   setStackPointerRegisterToSaveRestore(RISCV::X2);
175 
176   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
177     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
178 
179   // TODO: add all necessary setOperationAction calls.
180   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
181 
182   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
183   setOperationAction(ISD::BR_CC, XLenVT, Expand);
184   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
185   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
186 
187   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
188   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
189 
190   setOperationAction(ISD::VASTART, MVT::Other, Custom);
191   setOperationAction(ISD::VAARG, MVT::Other, Expand);
192   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
193   setOperationAction(ISD::VAEND, MVT::Other, Expand);
194 
195   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
196   if (!Subtarget.hasStdExtZbb()) {
197     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
198     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
199   }
200 
201   if (Subtarget.is64Bit()) {
202     setOperationAction(ISD::ADD, MVT::i32, Custom);
203     setOperationAction(ISD::SUB, MVT::i32, Custom);
204     setOperationAction(ISD::SHL, MVT::i32, Custom);
205     setOperationAction(ISD::SRA, MVT::i32, Custom);
206     setOperationAction(ISD::SRL, MVT::i32, Custom);
207 
208     setOperationAction(ISD::UADDO, MVT::i32, Custom);
209     setOperationAction(ISD::USUBO, MVT::i32, Custom);
210     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
211     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
212   }
213 
214   if (!Subtarget.hasStdExtM()) {
215     setOperationAction(ISD::MUL, XLenVT, Expand);
216     setOperationAction(ISD::MULHS, XLenVT, Expand);
217     setOperationAction(ISD::MULHU, XLenVT, Expand);
218     setOperationAction(ISD::SDIV, XLenVT, Expand);
219     setOperationAction(ISD::UDIV, XLenVT, Expand);
220     setOperationAction(ISD::SREM, XLenVT, Expand);
221     setOperationAction(ISD::UREM, XLenVT, Expand);
222   }
223 
224   if (Subtarget.is64Bit() && Subtarget.hasStdExtM()) {
225     setOperationAction(ISD::MUL, MVT::i32, Custom);
226 
227     setOperationAction(ISD::SDIV, MVT::i8, Custom);
228     setOperationAction(ISD::UDIV, MVT::i8, Custom);
229     setOperationAction(ISD::UREM, MVT::i8, Custom);
230     setOperationAction(ISD::SDIV, MVT::i16, Custom);
231     setOperationAction(ISD::UDIV, MVT::i16, Custom);
232     setOperationAction(ISD::UREM, MVT::i16, Custom);
233     setOperationAction(ISD::SDIV, MVT::i32, Custom);
234     setOperationAction(ISD::UDIV, MVT::i32, Custom);
235     setOperationAction(ISD::UREM, MVT::i32, Custom);
236   }
237 
238   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
239   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
240   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
241   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
242 
243   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
244   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
245   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
246 
247   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
248     if (Subtarget.is64Bit()) {
249       setOperationAction(ISD::ROTL, MVT::i32, Custom);
250       setOperationAction(ISD::ROTR, MVT::i32, Custom);
251     }
252   } else {
253     setOperationAction(ISD::ROTL, XLenVT, Expand);
254     setOperationAction(ISD::ROTR, XLenVT, Expand);
255   }
256 
257   if (Subtarget.hasStdExtZbp()) {
258     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
259     // more combining.
260     setOperationAction(ISD::BITREVERSE, XLenVT, Custom);
261     setOperationAction(ISD::BSWAP, XLenVT, Custom);
262 
263     if (Subtarget.is64Bit()) {
264       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
265       setOperationAction(ISD::BSWAP, MVT::i32, Custom);
266     }
267   } else {
268     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
269     // pattern match it directly in isel.
270     setOperationAction(ISD::BSWAP, XLenVT,
271                        Subtarget.hasStdExtZbb() ? Legal : Expand);
272   }
273 
274   if (Subtarget.hasStdExtZbb()) {
275     setOperationAction(ISD::SMIN, XLenVT, Legal);
276     setOperationAction(ISD::SMAX, XLenVT, Legal);
277     setOperationAction(ISD::UMIN, XLenVT, Legal);
278     setOperationAction(ISD::UMAX, XLenVT, Legal);
279   } else {
280     setOperationAction(ISD::CTTZ, XLenVT, Expand);
281     setOperationAction(ISD::CTLZ, XLenVT, Expand);
282     setOperationAction(ISD::CTPOP, XLenVT, Expand);
283   }
284 
285   if (Subtarget.hasStdExtZbt()) {
286     setOperationAction(ISD::FSHL, XLenVT, Custom);
287     setOperationAction(ISD::FSHR, XLenVT, Custom);
288     setOperationAction(ISD::SELECT, XLenVT, Legal);
289 
290     if (Subtarget.is64Bit()) {
291       setOperationAction(ISD::FSHL, MVT::i32, Custom);
292       setOperationAction(ISD::FSHR, MVT::i32, Custom);
293     }
294   } else {
295     setOperationAction(ISD::SELECT, XLenVT, Custom);
296   }
297 
298   ISD::CondCode FPCCToExpand[] = {
299       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
300       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
301       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
302 
303   ISD::NodeType FPOpToExpand[] = {
304       ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP,
305       ISD::FP_TO_FP16};
306 
307   if (Subtarget.hasStdExtZfh())
308     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
309 
310   if (Subtarget.hasStdExtZfh()) {
311     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
312     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
313     for (auto CC : FPCCToExpand)
314       setCondCodeAction(CC, MVT::f16, Expand);
315     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
316     setOperationAction(ISD::SELECT, MVT::f16, Custom);
317     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
318     for (auto Op : FPOpToExpand)
319       setOperationAction(Op, MVT::f16, Expand);
320   }
321 
322   if (Subtarget.hasStdExtF()) {
323     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
324     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
325     for (auto CC : FPCCToExpand)
326       setCondCodeAction(CC, MVT::f32, Expand);
327     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
328     setOperationAction(ISD::SELECT, MVT::f32, Custom);
329     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
330     for (auto Op : FPOpToExpand)
331       setOperationAction(Op, MVT::f32, Expand);
332     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
333     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
334   }
335 
336   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
337     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
338 
339   if (Subtarget.hasStdExtD()) {
340     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
341     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
342     for (auto CC : FPCCToExpand)
343       setCondCodeAction(CC, MVT::f64, Expand);
344     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
345     setOperationAction(ISD::SELECT, MVT::f64, Custom);
346     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
347     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
348     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
349     for (auto Op : FPOpToExpand)
350       setOperationAction(Op, MVT::f64, Expand);
351     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
352     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
353   }
354 
355   if (Subtarget.is64Bit()) {
356     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
357     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
358     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
359     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
360   }
361 
362   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
363   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
364   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
365   setOperationAction(ISD::JumpTable, XLenVT, Custom);
366 
367   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
368 
369   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
370   // Unfortunately this can't be determined just from the ISA naming string.
371   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
372                      Subtarget.is64Bit() ? Legal : Custom);
373 
374   setOperationAction(ISD::TRAP, MVT::Other, Legal);
375   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
376   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
377 
378   if (Subtarget.hasStdExtA()) {
379     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
380     setMinCmpXchgSizeInBits(32);
381   } else {
382     setMaxAtomicSizeInBitsSupported(0);
383   }
384 
385   setBooleanContents(ZeroOrOneBooleanContent);
386 
387   if (Subtarget.hasStdExtV()) {
388     setBooleanVectorContents(ZeroOrOneBooleanContent);
389 
390     setOperationAction(ISD::VSCALE, XLenVT, Custom);
391 
392     // RVV intrinsics may have illegal operands.
393     // We also need to custom legalize vmv.x.s.
394     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
395     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
396     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
397     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
398     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
399     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
400     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
401     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
402 
403     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
404 
405     if (!Subtarget.is64Bit()) {
406       // We must custom-lower certain vXi64 operations on RV32 due to the vector
407       // element type being illegal.
408       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
409       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
410 
411       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
412       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
413       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
414       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
415       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
416       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
417       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
418       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
419     }
420 
421     for (MVT VT : BoolVecVTs) {
422       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
423 
424       // Mask VTs are custom-expanded into a series of standard nodes
425       setOperationAction(ISD::TRUNCATE, VT, Custom);
426       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
427       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
428 
429       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
430     }
431 
432     for (MVT VT : IntVecVTs) {
433       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
434       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
435 
436       setOperationAction(ISD::SMIN, VT, Legal);
437       setOperationAction(ISD::SMAX, VT, Legal);
438       setOperationAction(ISD::UMIN, VT, Legal);
439       setOperationAction(ISD::UMAX, VT, Legal);
440 
441       setOperationAction(ISD::ROTL, VT, Expand);
442       setOperationAction(ISD::ROTR, VT, Expand);
443 
444       // Custom-lower extensions and truncations from/to mask types.
445       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
446       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
447       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
448 
449       // RVV has native int->float & float->int conversions where the
450       // element type sizes are within one power-of-two of each other. Any
451       // wider distances between type sizes have to be lowered as sequences
452       // which progressively narrow the gap in stages.
453       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
454       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
455       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
456       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
457 
458       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
459       // nodes which truncate by one power of two at a time.
460       setOperationAction(ISD::TRUNCATE, VT, Custom);
461 
462       // Custom-lower insert/extract operations to simplify patterns.
463       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
464       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
465 
466       // Custom-lower reduction operations to set up the corresponding custom
467       // nodes' operands.
468       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
469       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
470       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
471       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
472       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
473       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
474       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
475       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
476 
477       setOperationAction(ISD::MLOAD, VT, Custom);
478       setOperationAction(ISD::MSTORE, VT, Custom);
479       setOperationAction(ISD::MGATHER, VT, Custom);
480       setOperationAction(ISD::MSCATTER, VT, Custom);
481 
482       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
483       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
484       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
485 
486       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
487     }
488 
489     // Expand various CCs to best match the RVV ISA, which natively supports UNE
490     // but no other unordered comparisons, and supports all ordered comparisons
491     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
492     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
493     // and we pattern-match those back to the "original", swapping operands once
494     // more. This way we catch both operations and both "vf" and "fv" forms with
495     // fewer patterns.
496     ISD::CondCode VFPCCToExpand[] = {
497         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
498         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
499         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
500     };
501 
502     // Sets common operation actions on RVV floating-point vector types.
503     const auto SetCommonVFPActions = [&](MVT VT) {
504       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
505       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
506       // sizes are within one power-of-two of each other. Therefore conversions
507       // between vXf16 and vXf64 must be lowered as sequences which convert via
508       // vXf32.
509       setOperationAction(ISD::FP_ROUND, VT, Custom);
510       setOperationAction(ISD::FP_EXTEND, VT, Custom);
511       // Custom-lower insert/extract operations to simplify patterns.
512       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
513       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
514       // Expand various condition codes (explained above).
515       for (auto CC : VFPCCToExpand)
516         setCondCodeAction(CC, VT, Expand);
517 
518       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
519       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
520       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
521 
522       setOperationAction(ISD::MLOAD, VT, Custom);
523       setOperationAction(ISD::MSTORE, VT, Custom);
524       setOperationAction(ISD::MGATHER, VT, Custom);
525       setOperationAction(ISD::MSCATTER, VT, Custom);
526 
527       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
528       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
529       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
530 
531       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
532     };
533 
534     if (Subtarget.hasStdExtZfh())
535       for (MVT VT : F16VecVTs)
536         SetCommonVFPActions(VT);
537 
538     if (Subtarget.hasStdExtF())
539       for (MVT VT : F32VecVTs)
540         SetCommonVFPActions(VT);
541 
542     if (Subtarget.hasStdExtD())
543       for (MVT VT : F64VecVTs)
544         SetCommonVFPActions(VT);
545 
546     if (Subtarget.useRVVForFixedLengthVectors()) {
547       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
548         if (!useRVVForFixedLengthVectorVT(VT))
549           continue;
550 
551         // By default everything must be expanded.
552         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
553           setOperationAction(Op, VT, Expand);
554         for (MVT OtherVT : MVT::fixedlen_vector_valuetypes())
555           setTruncStoreAction(VT, OtherVT, Expand);
556 
557         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
558         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
559         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
560 
561         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
562         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
563 
564         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
565 
566         setOperationAction(ISD::LOAD, VT, Custom);
567         setOperationAction(ISD::STORE, VT, Custom);
568 
569         setOperationAction(ISD::SETCC, VT, Custom);
570 
571         setOperationAction(ISD::TRUNCATE, VT, Custom);
572 
573         setOperationAction(ISD::BITCAST, VT, Custom);
574 
575         // Operations below are different for between masks and other vectors.
576         if (VT.getVectorElementType() == MVT::i1) {
577           setOperationAction(ISD::AND, VT, Custom);
578           setOperationAction(ISD::OR, VT, Custom);
579           setOperationAction(ISD::XOR, VT, Custom);
580           continue;
581         }
582 
583         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
584         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
585 
586         setOperationAction(ISD::MLOAD, VT, Custom);
587         setOperationAction(ISD::MSTORE, VT, Custom);
588         setOperationAction(ISD::ADD, VT, Custom);
589         setOperationAction(ISD::MUL, VT, Custom);
590         setOperationAction(ISD::SUB, VT, Custom);
591         setOperationAction(ISD::AND, VT, Custom);
592         setOperationAction(ISD::OR, VT, Custom);
593         setOperationAction(ISD::XOR, VT, Custom);
594         setOperationAction(ISD::SDIV, VT, Custom);
595         setOperationAction(ISD::SREM, VT, Custom);
596         setOperationAction(ISD::UDIV, VT, Custom);
597         setOperationAction(ISD::UREM, VT, Custom);
598         setOperationAction(ISD::SHL, VT, Custom);
599         setOperationAction(ISD::SRA, VT, Custom);
600         setOperationAction(ISD::SRL, VT, Custom);
601 
602         setOperationAction(ISD::SMIN, VT, Custom);
603         setOperationAction(ISD::SMAX, VT, Custom);
604         setOperationAction(ISD::UMIN, VT, Custom);
605         setOperationAction(ISD::UMAX, VT, Custom);
606         setOperationAction(ISD::ABS,  VT, Custom);
607 
608         setOperationAction(ISD::MULHS, VT, Custom);
609         setOperationAction(ISD::MULHU, VT, Custom);
610 
611         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
612         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
613         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
614         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
615 
616         setOperationAction(ISD::VSELECT, VT, Custom);
617 
618         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
619         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
620         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
621 
622         // Custom-lower reduction operations to set up the corresponding custom
623         // nodes' operands.
624         setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
625         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
626         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
627         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
628         setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
629         setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
630         setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
631         setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
632       }
633 
634       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
635         if (!useRVVForFixedLengthVectorVT(VT))
636           continue;
637 
638         // By default everything must be expanded.
639         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
640           setOperationAction(Op, VT, Expand);
641         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
642           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
643           setTruncStoreAction(VT, OtherVT, Expand);
644         }
645 
646         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
647         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
648         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
649 
650         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
651         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
652         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
653         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
654 
655         setOperationAction(ISD::LOAD, VT, Custom);
656         setOperationAction(ISD::STORE, VT, Custom);
657         setOperationAction(ISD::MLOAD, VT, Custom);
658         setOperationAction(ISD::MSTORE, VT, Custom);
659         setOperationAction(ISD::FADD, VT, Custom);
660         setOperationAction(ISD::FSUB, VT, Custom);
661         setOperationAction(ISD::FMUL, VT, Custom);
662         setOperationAction(ISD::FDIV, VT, Custom);
663         setOperationAction(ISD::FNEG, VT, Custom);
664         setOperationAction(ISD::FABS, VT, Custom);
665         setOperationAction(ISD::FCOPYSIGN, VT, Custom);
666         setOperationAction(ISD::FSQRT, VT, Custom);
667         setOperationAction(ISD::FMA, VT, Custom);
668 
669         setOperationAction(ISD::FP_ROUND, VT, Custom);
670         setOperationAction(ISD::FP_EXTEND, VT, Custom);
671 
672         for (auto CC : VFPCCToExpand)
673           setCondCodeAction(CC, VT, Expand);
674 
675         setOperationAction(ISD::VSELECT, VT, Custom);
676 
677         setOperationAction(ISD::BITCAST, VT, Custom);
678 
679         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
680         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
681       }
682     }
683   }
684 
685   // Function alignments.
686   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
687   setMinFunctionAlignment(FunctionAlignment);
688   setPrefFunctionAlignment(FunctionAlignment);
689 
690   setMinimumJumpTableEntries(5);
691 
692   // Jumps are expensive, compared to logic
693   setJumpIsExpensive();
694 
695   // We can use any register for comparisons
696   setHasMultipleConditionRegisters();
697 
698   if (Subtarget.hasStdExtZbp()) {
699     setTargetDAGCombine(ISD::OR);
700   }
701   if (Subtarget.hasStdExtV()) {
702     setTargetDAGCombine(ISD::FCOPYSIGN);
703     setTargetDAGCombine(ISD::MGATHER);
704     setTargetDAGCombine(ISD::MSCATTER);
705   }
706 }
707 
708 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
709                                             LLVMContext &Context,
710                                             EVT VT) const {
711   if (!VT.isVector())
712     return getPointerTy(DL);
713   if (Subtarget.hasStdExtV() &&
714       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
715     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
716   return VT.changeVectorElementTypeToInteger();
717 }
718 
719 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
720                                              const CallInst &I,
721                                              MachineFunction &MF,
722                                              unsigned Intrinsic) const {
723   switch (Intrinsic) {
724   default:
725     return false;
726   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
727   case Intrinsic::riscv_masked_atomicrmw_add_i32:
728   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
729   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
730   case Intrinsic::riscv_masked_atomicrmw_max_i32:
731   case Intrinsic::riscv_masked_atomicrmw_min_i32:
732   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
733   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
734   case Intrinsic::riscv_masked_cmpxchg_i32:
735     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
736     Info.opc = ISD::INTRINSIC_W_CHAIN;
737     Info.memVT = MVT::getVT(PtrTy->getElementType());
738     Info.ptrVal = I.getArgOperand(0);
739     Info.offset = 0;
740     Info.align = Align(4);
741     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
742                  MachineMemOperand::MOVolatile;
743     return true;
744   }
745 }
746 
747 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
748                                                 const AddrMode &AM, Type *Ty,
749                                                 unsigned AS,
750                                                 Instruction *I) const {
751   // No global is ever allowed as a base.
752   if (AM.BaseGV)
753     return false;
754 
755   // Require a 12-bit signed offset.
756   if (!isInt<12>(AM.BaseOffs))
757     return false;
758 
759   switch (AM.Scale) {
760   case 0: // "r+i" or just "i", depending on HasBaseReg.
761     break;
762   case 1:
763     if (!AM.HasBaseReg) // allow "r+i".
764       break;
765     return false; // disallow "r+r" or "r+r+i".
766   default:
767     return false;
768   }
769 
770   return true;
771 }
772 
773 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
774   return isInt<12>(Imm);
775 }
776 
777 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
778   return isInt<12>(Imm);
779 }
780 
781 // On RV32, 64-bit integers are split into their high and low parts and held
782 // in two different registers, so the trunc is free since the low register can
783 // just be used.
784 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
785   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
786     return false;
787   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
788   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
789   return (SrcBits == 64 && DestBits == 32);
790 }
791 
792 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
793   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
794       !SrcVT.isInteger() || !DstVT.isInteger())
795     return false;
796   unsigned SrcBits = SrcVT.getSizeInBits();
797   unsigned DestBits = DstVT.getSizeInBits();
798   return (SrcBits == 64 && DestBits == 32);
799 }
800 
801 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
802   // Zexts are free if they can be combined with a load.
803   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
804     EVT MemVT = LD->getMemoryVT();
805     if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
806          (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
807         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
808          LD->getExtensionType() == ISD::ZEXTLOAD))
809       return true;
810   }
811 
812   return TargetLowering::isZExtFree(Val, VT2);
813 }
814 
815 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
816   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
817 }
818 
819 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
820   return Subtarget.hasStdExtZbb();
821 }
822 
823 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
824   return Subtarget.hasStdExtZbb();
825 }
826 
827 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
828                                        bool ForCodeSize) const {
829   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
830     return false;
831   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
832     return false;
833   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
834     return false;
835   if (Imm.isNegZero())
836     return false;
837   return Imm.isZero();
838 }
839 
840 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
841   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
842          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
843          (VT == MVT::f64 && Subtarget.hasStdExtD());
844 }
845 
846 // Changes the condition code and swaps operands if necessary, so the SetCC
847 // operation matches one of the comparisons supported directly by branches
848 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
849 // with 1/-1.
850 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
851                                     ISD::CondCode &CC, SelectionDAG &DAG) {
852   // Convert X > -1 to X >= 0.
853   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
854     RHS = DAG.getConstant(0, DL, RHS.getValueType());
855     CC = ISD::SETGE;
856     return;
857   }
858   // Convert X < 1 to 0 >= X.
859   if (CC == ISD::SETLT && isOneConstant(RHS)) {
860     RHS = LHS;
861     LHS = DAG.getConstant(0, DL, RHS.getValueType());
862     CC = ISD::SETGE;
863     return;
864   }
865 
866   switch (CC) {
867   default:
868     break;
869   case ISD::SETGT:
870   case ISD::SETLE:
871   case ISD::SETUGT:
872   case ISD::SETULE:
873     CC = ISD::getSetCCSwappedOperands(CC);
874     std::swap(LHS, RHS);
875     break;
876   }
877 }
878 
879 // Return the RISC-V branch opcode that matches the given DAG integer
880 // condition code. The CondCode must be one of those supported by the RISC-V
881 // ISA (see translateSetCCForBranch).
882 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
883   switch (CC) {
884   default:
885     llvm_unreachable("Unsupported CondCode");
886   case ISD::SETEQ:
887     return RISCV::BEQ;
888   case ISD::SETNE:
889     return RISCV::BNE;
890   case ISD::SETLT:
891     return RISCV::BLT;
892   case ISD::SETGE:
893     return RISCV::BGE;
894   case ISD::SETULT:
895     return RISCV::BLTU;
896   case ISD::SETUGE:
897     return RISCV::BGEU;
898   }
899 }
900 
901 RISCVVLMUL RISCVTargetLowering::getLMUL(MVT VT) {
902   assert(VT.isScalableVector() && "Expecting a scalable vector type");
903   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
904   if (VT.getVectorElementType() == MVT::i1)
905     KnownSize *= 8;
906 
907   switch (KnownSize) {
908   default:
909     llvm_unreachable("Invalid LMUL.");
910   case 8:
911     return RISCVVLMUL::LMUL_F8;
912   case 16:
913     return RISCVVLMUL::LMUL_F4;
914   case 32:
915     return RISCVVLMUL::LMUL_F2;
916   case 64:
917     return RISCVVLMUL::LMUL_1;
918   case 128:
919     return RISCVVLMUL::LMUL_2;
920   case 256:
921     return RISCVVLMUL::LMUL_4;
922   case 512:
923     return RISCVVLMUL::LMUL_8;
924   }
925 }
926 
927 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVVLMUL LMul) {
928   switch (LMul) {
929   default:
930     llvm_unreachable("Invalid LMUL.");
931   case RISCVVLMUL::LMUL_F8:
932   case RISCVVLMUL::LMUL_F4:
933   case RISCVVLMUL::LMUL_F2:
934   case RISCVVLMUL::LMUL_1:
935     return RISCV::VRRegClassID;
936   case RISCVVLMUL::LMUL_2:
937     return RISCV::VRM2RegClassID;
938   case RISCVVLMUL::LMUL_4:
939     return RISCV::VRM4RegClassID;
940   case RISCVVLMUL::LMUL_8:
941     return RISCV::VRM8RegClassID;
942   }
943 }
944 
945 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
946   RISCVVLMUL LMUL = getLMUL(VT);
947   if (LMUL == RISCVVLMUL::LMUL_F8 || LMUL == RISCVVLMUL::LMUL_F4 ||
948       LMUL == RISCVVLMUL::LMUL_F2 || LMUL == RISCVVLMUL::LMUL_1) {
949     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
950                   "Unexpected subreg numbering");
951     return RISCV::sub_vrm1_0 + Index;
952   }
953   if (LMUL == RISCVVLMUL::LMUL_2) {
954     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
955                   "Unexpected subreg numbering");
956     return RISCV::sub_vrm2_0 + Index;
957   }
958   if (LMUL == RISCVVLMUL::LMUL_4) {
959     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
960                   "Unexpected subreg numbering");
961     return RISCV::sub_vrm4_0 + Index;
962   }
963   llvm_unreachable("Invalid vector type.");
964 }
965 
966 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
967   if (VT.getVectorElementType() == MVT::i1)
968     return RISCV::VRRegClassID;
969   return getRegClassIDForLMUL(getLMUL(VT));
970 }
971 
972 // Attempt to decompose a subvector insert/extract between VecVT and
973 // SubVecVT via subregister indices. Returns the subregister index that
974 // can perform the subvector insert/extract with the given element index, as
975 // well as the index corresponding to any leftover subvectors that must be
976 // further inserted/extracted within the register class for SubVecVT.
977 std::pair<unsigned, unsigned>
978 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
979     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
980     const RISCVRegisterInfo *TRI) {
981   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
982                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
983                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
984                 "Register classes not ordered");
985   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
986   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
987   // Try to compose a subregister index that takes us from the incoming
988   // LMUL>1 register class down to the outgoing one. At each step we half
989   // the LMUL:
990   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
991   // Note that this is not guaranteed to find a subregister index, such as
992   // when we are extracting from one VR type to another.
993   unsigned SubRegIdx = RISCV::NoSubRegister;
994   for (const unsigned RCID :
995        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
996     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
997       VecVT = VecVT.getHalfNumVectorElementsVT();
998       bool IsHi =
999           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1000       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1001                                             getSubregIndexByMVT(VecVT, IsHi));
1002       if (IsHi)
1003         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1004     }
1005   return {SubRegIdx, InsertExtractIdx};
1006 }
1007 
1008 // Return the largest legal scalable vector type that matches VT's element type.
1009 MVT RISCVTargetLowering::getContainerForFixedLengthVector(
1010     const TargetLowering &TLI, MVT VT, const RISCVSubtarget &Subtarget) {
1011   assert(VT.isFixedLengthVector() && TLI.isTypeLegal(VT) &&
1012          "Expected legal fixed length vector!");
1013 
1014   unsigned LMul = Subtarget.getLMULForFixedLengthVector(VT);
1015   assert(LMul <= 8 && isPowerOf2_32(LMul) && "Unexpected LMUL!");
1016 
1017   MVT EltVT = VT.getVectorElementType();
1018   switch (EltVT.SimpleTy) {
1019   default:
1020     llvm_unreachable("unexpected element type for RVV container");
1021   case MVT::i1: {
1022     // Masks are calculated assuming 8-bit elements since that's when we need
1023     // the most elements.
1024     unsigned EltsPerBlock = RISCV::RVVBitsPerBlock / 8;
1025     return MVT::getScalableVectorVT(MVT::i1, LMul * EltsPerBlock);
1026   }
1027   case MVT::i8:
1028   case MVT::i16:
1029   case MVT::i32:
1030   case MVT::i64:
1031   case MVT::f16:
1032   case MVT::f32:
1033   case MVT::f64: {
1034     unsigned EltsPerBlock = RISCV::RVVBitsPerBlock / EltVT.getSizeInBits();
1035     return MVT::getScalableVectorVT(EltVT, LMul * EltsPerBlock);
1036   }
1037   }
1038 }
1039 
1040 MVT RISCVTargetLowering::getContainerForFixedLengthVector(
1041     SelectionDAG &DAG, MVT VT, const RISCVSubtarget &Subtarget) {
1042   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1043                                           Subtarget);
1044 }
1045 
1046 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1047   return getContainerForFixedLengthVector(*this, VT, getSubtarget());
1048 }
1049 
1050 // Grow V to consume an entire RVV register.
1051 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1052                                        const RISCVSubtarget &Subtarget) {
1053   assert(VT.isScalableVector() &&
1054          "Expected to convert into a scalable vector!");
1055   assert(V.getValueType().isFixedLengthVector() &&
1056          "Expected a fixed length vector operand!");
1057   SDLoc DL(V);
1058   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1059   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1060 }
1061 
1062 // Shrink V so it's just big enough to maintain a VT's worth of data.
1063 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1064                                          const RISCVSubtarget &Subtarget) {
1065   assert(VT.isFixedLengthVector() &&
1066          "Expected to convert into a fixed length vector!");
1067   assert(V.getValueType().isScalableVector() &&
1068          "Expected a scalable vector operand!");
1069   SDLoc DL(V);
1070   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1071   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1072 }
1073 
1074 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1075 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1076 // the vector type that it is contained in.
1077 static std::pair<SDValue, SDValue>
1078 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1079                 const RISCVSubtarget &Subtarget) {
1080   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1081   MVT XLenVT = Subtarget.getXLenVT();
1082   SDValue VL = VecVT.isFixedLengthVector()
1083                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1084                    : DAG.getRegister(RISCV::X0, XLenVT);
1085   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1086   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1087   return {Mask, VL};
1088 }
1089 
1090 // As above but assuming the given type is a scalable vector type.
1091 static std::pair<SDValue, SDValue>
1092 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1093                         const RISCVSubtarget &Subtarget) {
1094   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1095   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1096 }
1097 
1098 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1099 // of either is (currently) supported. This can get us into an infinite loop
1100 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1101 // as a ..., etc.
1102 // Until either (or both) of these can reliably lower any node, reporting that
1103 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1104 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1105 // which is not desirable.
1106 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1107     EVT VT, unsigned DefinedValues) const {
1108   return false;
1109 }
1110 
1111 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
1112   // Only splats are currently supported.
1113   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
1114     return true;
1115 
1116   return false;
1117 }
1118 
1119 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
1120                                  const RISCVSubtarget &Subtarget) {
1121   MVT VT = Op.getSimpleValueType();
1122   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1123 
1124   MVT ContainerVT =
1125       RISCVTargetLowering::getContainerForFixedLengthVector(DAG, VT, Subtarget);
1126 
1127   SDLoc DL(Op);
1128   SDValue Mask, VL;
1129   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1130 
1131   if (VT.getVectorElementType() == MVT::i1) {
1132     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
1133       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
1134       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
1135     }
1136 
1137     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
1138       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
1139       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
1140     }
1141 
1142     return SDValue();
1143   }
1144 
1145   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1146     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
1147                                         : RISCVISD::VMV_V_X_VL;
1148     Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL);
1149     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1150   }
1151 
1152   unsigned NumElts = Op.getNumOperands();
1153 
1154   // Try and match an index sequence, which we can lower directly to the vid
1155   // instruction. An all-undef vector is matched by getSplatValue, above.
1156   if (VT.isInteger()) {
1157     bool IsVID = true;
1158     for (unsigned I = 0; I < NumElts && IsVID; I++)
1159       IsVID &= Op.getOperand(I).isUndef() ||
1160                (isa<ConstantSDNode>(Op.getOperand(I)) &&
1161                 Op.getConstantOperandVal(I) == I);
1162 
1163     if (IsVID) {
1164       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
1165       return convertFromScalableVector(VT, VID, DAG, Subtarget);
1166     }
1167   }
1168 
1169   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
1170   // which constitute a large proportion of the elements. In such cases we can
1171   // splat a vector with the dominant element and make up the shortfall with
1172   // INSERT_VECTOR_ELTs.
1173   // Note that this includes vectors of 2 elements by association. The
1174   // upper-most element is the "dominant" one, allowing us to use a splat to
1175   // "insert" the upper element, and an insert of the lower element at position
1176   // 0, which improves codegen.
1177   SDValue DominantValue;
1178   DenseMap<SDValue, unsigned> ValueCounts;
1179   // Use a fairly conservative threshold. A future optimization could be to use
1180   // multiple vmerge.vi/vmerge.vx instructions on "partially-dominant"
1181   // elements with more relaxed thresholds.
1182   unsigned NumUndefElts =
1183       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
1184   unsigned NumDefElts = NumElts - NumUndefElts;
1185   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
1186 
1187   for (SDValue V : Op->op_values()) {
1188     if (V.isUndef())
1189       continue;
1190 
1191     ValueCounts.insert(std::make_pair(V, 0));
1192     unsigned &Count = ValueCounts[V];
1193 
1194     // Is this value dominant?
1195     if (++Count > DominantValueCountThreshold)
1196       DominantValue = V;
1197   }
1198 
1199   // Don't perform this optimization when optimizing for size, since
1200   // materializing elements and inserting them tends to cause code bloat.
1201   if (DominantValue && !DAG.shouldOptForSize()) {
1202     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
1203 
1204     if (ValueCounts.size() != 1) {
1205       MVT XLenVT = Subtarget.getXLenVT();
1206       for (unsigned I = 0; I < NumElts; ++I) {
1207         if (!Op.getOperand(I).isUndef() && Op.getOperand(I) != DominantValue)
1208           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec,
1209                             Op.getOperand(I), DAG.getConstant(I, DL, XLenVT));
1210       }
1211     }
1212 
1213     return Vec;
1214   }
1215 
1216   return SDValue();
1217 }
1218 
1219 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
1220                                    const RISCVSubtarget &Subtarget) {
1221   SDValue V1 = Op.getOperand(0);
1222   SDLoc DL(Op);
1223   MVT VT = Op.getSimpleValueType();
1224   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
1225 
1226   if (SVN->isSplat()) {
1227     int Lane = SVN->getSplatIndex();
1228     if (Lane >= 0) {
1229       MVT ContainerVT = RISCVTargetLowering::getContainerForFixedLengthVector(
1230           DAG, VT, Subtarget);
1231 
1232       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
1233       assert(Lane < (int)VT.getVectorNumElements() && "Unexpected lane!");
1234 
1235       SDValue Mask, VL;
1236       std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1237       MVT XLenVT = Subtarget.getXLenVT();
1238       SDValue Gather =
1239           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
1240                       DAG.getConstant(Lane, DL, XLenVT), Mask, VL);
1241       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1242     }
1243   }
1244 
1245   return SDValue();
1246 }
1247 
1248 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
1249                                      SDLoc DL, SelectionDAG &DAG,
1250                                      const RISCVSubtarget &Subtarget) {
1251   if (VT.isScalableVector())
1252     return DAG.getFPExtendOrRound(Op, DL, VT);
1253   assert(VT.isFixedLengthVector() &&
1254          "Unexpected value type for RVV FP extend/round lowering");
1255   SDValue Mask, VL;
1256   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1257   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
1258                         ? RISCVISD::FP_EXTEND_VL
1259                         : RISCVISD::FP_ROUND_VL;
1260   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
1261 }
1262 
1263 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
1264                                             SelectionDAG &DAG) const {
1265   switch (Op.getOpcode()) {
1266   default:
1267     report_fatal_error("unimplemented operand");
1268   case ISD::GlobalAddress:
1269     return lowerGlobalAddress(Op, DAG);
1270   case ISD::BlockAddress:
1271     return lowerBlockAddress(Op, DAG);
1272   case ISD::ConstantPool:
1273     return lowerConstantPool(Op, DAG);
1274   case ISD::JumpTable:
1275     return lowerJumpTable(Op, DAG);
1276   case ISD::GlobalTLSAddress:
1277     return lowerGlobalTLSAddress(Op, DAG);
1278   case ISD::SELECT:
1279     return lowerSELECT(Op, DAG);
1280   case ISD::BRCOND:
1281     return lowerBRCOND(Op, DAG);
1282   case ISD::VASTART:
1283     return lowerVASTART(Op, DAG);
1284   case ISD::FRAMEADDR:
1285     return lowerFRAMEADDR(Op, DAG);
1286   case ISD::RETURNADDR:
1287     return lowerRETURNADDR(Op, DAG);
1288   case ISD::SHL_PARTS:
1289     return lowerShiftLeftParts(Op, DAG);
1290   case ISD::SRA_PARTS:
1291     return lowerShiftRightParts(Op, DAG, true);
1292   case ISD::SRL_PARTS:
1293     return lowerShiftRightParts(Op, DAG, false);
1294   case ISD::BITCAST: {
1295     SDValue Op0 = Op.getOperand(0);
1296     // We can handle fixed length vector bitcasts with a simple replacement
1297     // in isel.
1298     if (Op.getValueType().isFixedLengthVector()) {
1299       if (Op0.getValueType().isFixedLengthVector())
1300         return Op;
1301       return SDValue();
1302     }
1303     assert(((Subtarget.is64Bit() && Subtarget.hasStdExtF()) ||
1304             Subtarget.hasStdExtZfh()) &&
1305            "Unexpected custom legalisation");
1306     SDLoc DL(Op);
1307     if (Op.getValueType() == MVT::f16 && Subtarget.hasStdExtZfh()) {
1308       if (Op0.getValueType() != MVT::i16)
1309         return SDValue();
1310       SDValue NewOp0 =
1311           DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), Op0);
1312       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
1313       return FPConv;
1314     } else if (Op.getValueType() == MVT::f32 && Subtarget.is64Bit() &&
1315                Subtarget.hasStdExtF()) {
1316       if (Op0.getValueType() != MVT::i32)
1317         return SDValue();
1318       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
1319       SDValue FPConv =
1320           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
1321       return FPConv;
1322     }
1323     return SDValue();
1324   }
1325   case ISD::INTRINSIC_WO_CHAIN:
1326     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
1327   case ISD::INTRINSIC_W_CHAIN:
1328     return LowerINTRINSIC_W_CHAIN(Op, DAG);
1329   case ISD::BSWAP:
1330   case ISD::BITREVERSE: {
1331     // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
1332     assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
1333     MVT VT = Op.getSimpleValueType();
1334     SDLoc DL(Op);
1335     // Start with the maximum immediate value which is the bitwidth - 1.
1336     unsigned Imm = VT.getSizeInBits() - 1;
1337     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
1338     if (Op.getOpcode() == ISD::BSWAP)
1339       Imm &= ~0x7U;
1340     return DAG.getNode(RISCVISD::GREVI, DL, VT, Op.getOperand(0),
1341                        DAG.getTargetConstant(Imm, DL, Subtarget.getXLenVT()));
1342   }
1343   case ISD::FSHL:
1344   case ISD::FSHR: {
1345     MVT VT = Op.getSimpleValueType();
1346     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
1347     SDLoc DL(Op);
1348     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
1349     // use log(XLen) bits. Mask the shift amount accordingly.
1350     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
1351     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
1352                                 DAG.getConstant(ShAmtWidth, DL, VT));
1353     unsigned Opc = Op.getOpcode() == ISD::FSHL ? RISCVISD::FSL : RISCVISD::FSR;
1354     return DAG.getNode(Opc, DL, VT, Op.getOperand(0), Op.getOperand(1), ShAmt);
1355   }
1356   case ISD::TRUNCATE: {
1357     SDLoc DL(Op);
1358     MVT VT = Op.getSimpleValueType();
1359     // Only custom-lower vector truncates
1360     if (!VT.isVector())
1361       return Op;
1362 
1363     // Truncates to mask types are handled differently
1364     if (VT.getVectorElementType() == MVT::i1)
1365       return lowerVectorMaskTrunc(Op, DAG);
1366 
1367     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
1368     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
1369     // truncate by one power of two at a time.
1370     MVT DstEltVT = VT.getVectorElementType();
1371 
1372     SDValue Src = Op.getOperand(0);
1373     MVT SrcVT = Src.getSimpleValueType();
1374     MVT SrcEltVT = SrcVT.getVectorElementType();
1375 
1376     assert(DstEltVT.bitsLT(SrcEltVT) &&
1377            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
1378            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
1379            "Unexpected vector truncate lowering");
1380 
1381     MVT ContainerVT = SrcVT;
1382     if (SrcVT.isFixedLengthVector()) {
1383       ContainerVT = getContainerForFixedLengthVector(SrcVT);
1384       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
1385     }
1386 
1387     SDValue Result = Src;
1388     SDValue Mask, VL;
1389     std::tie(Mask, VL) =
1390         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
1391     LLVMContext &Context = *DAG.getContext();
1392     const ElementCount Count = ContainerVT.getVectorElementCount();
1393     do {
1394       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
1395       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
1396       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
1397                            Mask, VL);
1398     } while (SrcEltVT != DstEltVT);
1399 
1400     if (SrcVT.isFixedLengthVector())
1401       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
1402 
1403     return Result;
1404   }
1405   case ISD::ANY_EXTEND:
1406   case ISD::ZERO_EXTEND:
1407     if (Op.getOperand(0).getValueType().isVector() &&
1408         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
1409       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
1410     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
1411   case ISD::SIGN_EXTEND:
1412     if (Op.getOperand(0).getValueType().isVector() &&
1413         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
1414       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
1415     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
1416   case ISD::SPLAT_VECTOR_PARTS:
1417     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
1418   case ISD::INSERT_VECTOR_ELT:
1419     return lowerINSERT_VECTOR_ELT(Op, DAG);
1420   case ISD::EXTRACT_VECTOR_ELT:
1421     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
1422   case ISD::VSCALE: {
1423     MVT VT = Op.getSimpleValueType();
1424     SDLoc DL(Op);
1425     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
1426     // We define our scalable vector types for lmul=1 to use a 64 bit known
1427     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
1428     // vscale as VLENB / 8.
1429     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
1430                                  DAG.getConstant(3, DL, VT));
1431     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
1432   }
1433   case ISD::FP_EXTEND: {
1434     // RVV can only do fp_extend to types double the size as the source. We
1435     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
1436     // via f32.
1437     SDLoc DL(Op);
1438     MVT VT = Op.getSimpleValueType();
1439     SDValue Src = Op.getOperand(0);
1440     MVT SrcVT = Src.getSimpleValueType();
1441 
1442     // Prepare any fixed-length vector operands.
1443     MVT ContainerVT = VT;
1444     if (SrcVT.isFixedLengthVector()) {
1445       ContainerVT = getContainerForFixedLengthVector(VT);
1446       MVT SrcContainerVT =
1447           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
1448       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
1449     }
1450 
1451     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
1452         SrcVT.getVectorElementType() != MVT::f16) {
1453       // For scalable vectors, we only need to close the gap between
1454       // vXf16->vXf64.
1455       if (!VT.isFixedLengthVector())
1456         return Op;
1457       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
1458       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
1459       return convertFromScalableVector(VT, Src, DAG, Subtarget);
1460     }
1461 
1462     MVT InterVT = VT.changeVectorElementType(MVT::f32);
1463     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
1464     SDValue IntermediateExtend = getRVVFPExtendOrRound(
1465         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
1466 
1467     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
1468                                            DL, DAG, Subtarget);
1469     if (VT.isFixedLengthVector())
1470       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
1471     return Extend;
1472   }
1473   case ISD::FP_ROUND: {
1474     // RVV can only do fp_round to types half the size as the source. We
1475     // custom-lower f64->f16 rounds via RVV's round-to-odd float
1476     // conversion instruction.
1477     SDLoc DL(Op);
1478     MVT VT = Op.getSimpleValueType();
1479     SDValue Src = Op.getOperand(0);
1480     MVT SrcVT = Src.getSimpleValueType();
1481 
1482     // Prepare any fixed-length vector operands.
1483     MVT ContainerVT = VT;
1484     if (VT.isFixedLengthVector()) {
1485       MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
1486       ContainerVT =
1487           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
1488       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
1489     }
1490 
1491     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
1492         SrcVT.getVectorElementType() != MVT::f64) {
1493       // For scalable vectors, we only need to close the gap between
1494       // vXf64<->vXf16.
1495       if (!VT.isFixedLengthVector())
1496         return Op;
1497       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
1498       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
1499       return convertFromScalableVector(VT, Src, DAG, Subtarget);
1500     }
1501 
1502     SDValue Mask, VL;
1503     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1504 
1505     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
1506     SDValue IntermediateRound =
1507         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
1508     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
1509                                           DL, DAG, Subtarget);
1510 
1511     if (VT.isFixedLengthVector())
1512       return convertFromScalableVector(VT, Round, DAG, Subtarget);
1513     return Round;
1514   }
1515   case ISD::FP_TO_SINT:
1516   case ISD::FP_TO_UINT:
1517   case ISD::SINT_TO_FP:
1518   case ISD::UINT_TO_FP: {
1519     // RVV can only do fp<->int conversions to types half/double the size as
1520     // the source. We custom-lower any conversions that do two hops into
1521     // sequences.
1522     MVT VT = Op.getSimpleValueType();
1523     if (!VT.isVector())
1524       return Op;
1525     SDLoc DL(Op);
1526     SDValue Src = Op.getOperand(0);
1527     MVT EltVT = VT.getVectorElementType();
1528     MVT SrcVT = Src.getSimpleValueType();
1529     MVT SrcEltVT = SrcVT.getVectorElementType();
1530     unsigned EltSize = EltVT.getSizeInBits();
1531     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
1532     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
1533            "Unexpected vector element types");
1534 
1535     bool IsInt2FP = SrcEltVT.isInteger();
1536     // Widening conversions
1537     if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
1538       if (IsInt2FP) {
1539         // Do a regular integer sign/zero extension then convert to float.
1540         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
1541                                       VT.getVectorElementCount());
1542         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
1543                                  ? ISD::ZERO_EXTEND
1544                                  : ISD::SIGN_EXTEND;
1545         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
1546         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
1547       }
1548       // FP2Int
1549       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
1550       // Do one doubling fp_extend then complete the operation by converting
1551       // to int.
1552       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
1553       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
1554       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
1555     }
1556 
1557     // Narrowing conversions
1558     if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
1559       if (IsInt2FP) {
1560         // One narrowing int_to_fp, then an fp_round.
1561         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
1562         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
1563         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
1564         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
1565       }
1566       // FP2Int
1567       // One narrowing fp_to_int, then truncate the integer. If the float isn't
1568       // representable by the integer, the result is poison.
1569       MVT IVecVT =
1570           MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
1571                            VT.getVectorElementCount());
1572       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
1573       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
1574     }
1575 
1576     // Scalable vectors can exit here. Patterns will handle equally-sized
1577     // conversions halving/doubling ones.
1578     if (!VT.isFixedLengthVector())
1579       return Op;
1580 
1581     // For fixed-length vectors we lower to a custom "VL" node.
1582     unsigned RVVOpc = 0;
1583     switch (Op.getOpcode()) {
1584     default:
1585       llvm_unreachable("Impossible opcode");
1586     case ISD::FP_TO_SINT:
1587       RVVOpc = RISCVISD::FP_TO_SINT_VL;
1588       break;
1589     case ISD::FP_TO_UINT:
1590       RVVOpc = RISCVISD::FP_TO_UINT_VL;
1591       break;
1592     case ISD::SINT_TO_FP:
1593       RVVOpc = RISCVISD::SINT_TO_FP_VL;
1594       break;
1595     case ISD::UINT_TO_FP:
1596       RVVOpc = RISCVISD::UINT_TO_FP_VL;
1597       break;
1598     }
1599 
1600     MVT ContainerVT, SrcContainerVT;
1601     // Derive the reference container type from the larger vector type.
1602     if (SrcEltSize > EltSize) {
1603       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
1604       ContainerVT =
1605           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
1606     } else {
1607       ContainerVT = getContainerForFixedLengthVector(VT);
1608       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
1609     }
1610 
1611     SDValue Mask, VL;
1612     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1613 
1614     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
1615     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
1616     return convertFromScalableVector(VT, Src, DAG, Subtarget);
1617   }
1618   case ISD::VECREDUCE_ADD:
1619   case ISD::VECREDUCE_UMAX:
1620   case ISD::VECREDUCE_SMAX:
1621   case ISD::VECREDUCE_UMIN:
1622   case ISD::VECREDUCE_SMIN:
1623   case ISD::VECREDUCE_AND:
1624   case ISD::VECREDUCE_OR:
1625   case ISD::VECREDUCE_XOR:
1626     return lowerVECREDUCE(Op, DAG);
1627   case ISD::VECREDUCE_FADD:
1628   case ISD::VECREDUCE_SEQ_FADD:
1629     return lowerFPVECREDUCE(Op, DAG);
1630   case ISD::INSERT_SUBVECTOR:
1631     return lowerINSERT_SUBVECTOR(Op, DAG);
1632   case ISD::EXTRACT_SUBVECTOR:
1633     return lowerEXTRACT_SUBVECTOR(Op, DAG);
1634   case ISD::VECTOR_REVERSE:
1635     return lowerVECTOR_REVERSE(Op, DAG);
1636   case ISD::BUILD_VECTOR:
1637     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
1638   case ISD::VECTOR_SHUFFLE:
1639     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
1640   case ISD::CONCAT_VECTORS: {
1641     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
1642     // better than going through the stack, as the default expansion does.
1643     SDLoc DL(Op);
1644     MVT VT = Op.getSimpleValueType();
1645     unsigned NumOpElts =
1646         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
1647     SDValue Vec = DAG.getUNDEF(VT);
1648     for (const auto &OpIdx : enumerate(Op->ops()))
1649       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, OpIdx.value(),
1650                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
1651     return Vec;
1652   }
1653   case ISD::LOAD:
1654     return lowerFixedLengthVectorLoadToRVV(Op, DAG);
1655   case ISD::STORE:
1656     return lowerFixedLengthVectorStoreToRVV(Op, DAG);
1657   case ISD::MLOAD:
1658     return lowerMLOAD(Op, DAG);
1659   case ISD::MSTORE:
1660     return lowerMSTORE(Op, DAG);
1661   case ISD::SETCC:
1662     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
1663   case ISD::ADD:
1664     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
1665   case ISD::SUB:
1666     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
1667   case ISD::MUL:
1668     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
1669   case ISD::MULHS:
1670     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
1671   case ISD::MULHU:
1672     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
1673   case ISD::AND:
1674     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
1675                                               RISCVISD::AND_VL);
1676   case ISD::OR:
1677     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
1678                                               RISCVISD::OR_VL);
1679   case ISD::XOR:
1680     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
1681                                               RISCVISD::XOR_VL);
1682   case ISD::SDIV:
1683     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
1684   case ISD::SREM:
1685     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
1686   case ISD::UDIV:
1687     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
1688   case ISD::UREM:
1689     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
1690   case ISD::SHL:
1691     return lowerToScalableOp(Op, DAG, RISCVISD::SHL_VL);
1692   case ISD::SRA:
1693     return lowerToScalableOp(Op, DAG, RISCVISD::SRA_VL);
1694   case ISD::SRL:
1695     return lowerToScalableOp(Op, DAG, RISCVISD::SRL_VL);
1696   case ISD::FADD:
1697     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
1698   case ISD::FSUB:
1699     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
1700   case ISD::FMUL:
1701     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
1702   case ISD::FDIV:
1703     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
1704   case ISD::FNEG:
1705     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
1706   case ISD::FABS:
1707     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
1708   case ISD::FSQRT:
1709     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
1710   case ISD::FMA:
1711     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
1712   case ISD::SMIN:
1713     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
1714   case ISD::SMAX:
1715     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
1716   case ISD::UMIN:
1717     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
1718   case ISD::UMAX:
1719     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
1720   case ISD::ABS:
1721     return lowerABS(Op, DAG);
1722   case ISD::VSELECT:
1723     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
1724   case ISD::FCOPYSIGN:
1725     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
1726   case ISD::MGATHER:
1727   case ISD::MSCATTER:
1728     return lowerMGATHERMSCATTER(Op, DAG);
1729   }
1730 }
1731 
1732 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
1733                              SelectionDAG &DAG, unsigned Flags) {
1734   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
1735 }
1736 
1737 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
1738                              SelectionDAG &DAG, unsigned Flags) {
1739   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
1740                                    Flags);
1741 }
1742 
1743 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
1744                              SelectionDAG &DAG, unsigned Flags) {
1745   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
1746                                    N->getOffset(), Flags);
1747 }
1748 
1749 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
1750                              SelectionDAG &DAG, unsigned Flags) {
1751   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
1752 }
1753 
1754 template <class NodeTy>
1755 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
1756                                      bool IsLocal) const {
1757   SDLoc DL(N);
1758   EVT Ty = getPointerTy(DAG.getDataLayout());
1759 
1760   if (isPositionIndependent()) {
1761     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
1762     if (IsLocal)
1763       // Use PC-relative addressing to access the symbol. This generates the
1764       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
1765       // %pcrel_lo(auipc)).
1766       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
1767 
1768     // Use PC-relative addressing to access the GOT for this symbol, then load
1769     // the address from the GOT. This generates the pattern (PseudoLA sym),
1770     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
1771     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
1772   }
1773 
1774   switch (getTargetMachine().getCodeModel()) {
1775   default:
1776     report_fatal_error("Unsupported code model for lowering");
1777   case CodeModel::Small: {
1778     // Generate a sequence for accessing addresses within the first 2 GiB of
1779     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
1780     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
1781     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
1782     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
1783     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
1784   }
1785   case CodeModel::Medium: {
1786     // Generate a sequence for accessing addresses within any 2GiB range within
1787     // the address space. This generates the pattern (PseudoLLA sym), which
1788     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
1789     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
1790     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
1791   }
1792   }
1793 }
1794 
1795 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
1796                                                 SelectionDAG &DAG) const {
1797   SDLoc DL(Op);
1798   EVT Ty = Op.getValueType();
1799   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
1800   int64_t Offset = N->getOffset();
1801   MVT XLenVT = Subtarget.getXLenVT();
1802 
1803   const GlobalValue *GV = N->getGlobal();
1804   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
1805   SDValue Addr = getAddr(N, DAG, IsLocal);
1806 
1807   // In order to maximise the opportunity for common subexpression elimination,
1808   // emit a separate ADD node for the global address offset instead of folding
1809   // it in the global address node. Later peephole optimisations may choose to
1810   // fold it back in when profitable.
1811   if (Offset != 0)
1812     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
1813                        DAG.getConstant(Offset, DL, XLenVT));
1814   return Addr;
1815 }
1816 
1817 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
1818                                                SelectionDAG &DAG) const {
1819   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
1820 
1821   return getAddr(N, DAG);
1822 }
1823 
1824 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
1825                                                SelectionDAG &DAG) const {
1826   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
1827 
1828   return getAddr(N, DAG);
1829 }
1830 
1831 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
1832                                             SelectionDAG &DAG) const {
1833   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
1834 
1835   return getAddr(N, DAG);
1836 }
1837 
1838 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
1839                                               SelectionDAG &DAG,
1840                                               bool UseGOT) const {
1841   SDLoc DL(N);
1842   EVT Ty = getPointerTy(DAG.getDataLayout());
1843   const GlobalValue *GV = N->getGlobal();
1844   MVT XLenVT = Subtarget.getXLenVT();
1845 
1846   if (UseGOT) {
1847     // Use PC-relative addressing to access the GOT for this TLS symbol, then
1848     // load the address from the GOT and add the thread pointer. This generates
1849     // the pattern (PseudoLA_TLS_IE sym), which expands to
1850     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
1851     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
1852     SDValue Load =
1853         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
1854 
1855     // Add the thread pointer.
1856     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
1857     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
1858   }
1859 
1860   // Generate a sequence for accessing the address relative to the thread
1861   // pointer, with the appropriate adjustment for the thread pointer offset.
1862   // This generates the pattern
1863   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
1864   SDValue AddrHi =
1865       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
1866   SDValue AddrAdd =
1867       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
1868   SDValue AddrLo =
1869       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
1870 
1871   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
1872   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
1873   SDValue MNAdd = SDValue(
1874       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
1875       0);
1876   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
1877 }
1878 
1879 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
1880                                                SelectionDAG &DAG) const {
1881   SDLoc DL(N);
1882   EVT Ty = getPointerTy(DAG.getDataLayout());
1883   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
1884   const GlobalValue *GV = N->getGlobal();
1885 
1886   // Use a PC-relative addressing mode to access the global dynamic GOT address.
1887   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
1888   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
1889   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
1890   SDValue Load =
1891       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
1892 
1893   // Prepare argument list to generate call.
1894   ArgListTy Args;
1895   ArgListEntry Entry;
1896   Entry.Node = Load;
1897   Entry.Ty = CallTy;
1898   Args.push_back(Entry);
1899 
1900   // Setup call to __tls_get_addr.
1901   TargetLowering::CallLoweringInfo CLI(DAG);
1902   CLI.setDebugLoc(DL)
1903       .setChain(DAG.getEntryNode())
1904       .setLibCallee(CallingConv::C, CallTy,
1905                     DAG.getExternalSymbol("__tls_get_addr", Ty),
1906                     std::move(Args));
1907 
1908   return LowerCallTo(CLI).first;
1909 }
1910 
1911 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
1912                                                    SelectionDAG &DAG) const {
1913   SDLoc DL(Op);
1914   EVT Ty = Op.getValueType();
1915   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
1916   int64_t Offset = N->getOffset();
1917   MVT XLenVT = Subtarget.getXLenVT();
1918 
1919   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
1920 
1921   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
1922       CallingConv::GHC)
1923     report_fatal_error("In GHC calling convention TLS is not supported");
1924 
1925   SDValue Addr;
1926   switch (Model) {
1927   case TLSModel::LocalExec:
1928     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
1929     break;
1930   case TLSModel::InitialExec:
1931     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
1932     break;
1933   case TLSModel::LocalDynamic:
1934   case TLSModel::GeneralDynamic:
1935     Addr = getDynamicTLSAddr(N, DAG);
1936     break;
1937   }
1938 
1939   // In order to maximise the opportunity for common subexpression elimination,
1940   // emit a separate ADD node for the global address offset instead of folding
1941   // it in the global address node. Later peephole optimisations may choose to
1942   // fold it back in when profitable.
1943   if (Offset != 0)
1944     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
1945                        DAG.getConstant(Offset, DL, XLenVT));
1946   return Addr;
1947 }
1948 
1949 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
1950   SDValue CondV = Op.getOperand(0);
1951   SDValue TrueV = Op.getOperand(1);
1952   SDValue FalseV = Op.getOperand(2);
1953   SDLoc DL(Op);
1954   MVT XLenVT = Subtarget.getXLenVT();
1955 
1956   // If the result type is XLenVT and CondV is the output of a SETCC node
1957   // which also operated on XLenVT inputs, then merge the SETCC node into the
1958   // lowered RISCVISD::SELECT_CC to take advantage of the integer
1959   // compare+branch instructions. i.e.:
1960   // (select (setcc lhs, rhs, cc), truev, falsev)
1961   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
1962   if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC &&
1963       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
1964     SDValue LHS = CondV.getOperand(0);
1965     SDValue RHS = CondV.getOperand(1);
1966     auto CC = cast<CondCodeSDNode>(CondV.getOperand(2));
1967     ISD::CondCode CCVal = CC->get();
1968 
1969     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
1970 
1971     SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT);
1972     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
1973     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
1974   }
1975 
1976   // Otherwise:
1977   // (select condv, truev, falsev)
1978   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
1979   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
1980   SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT);
1981 
1982   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
1983 
1984   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
1985 }
1986 
1987 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
1988   SDValue CondV = Op.getOperand(1);
1989   SDLoc DL(Op);
1990   MVT XLenVT = Subtarget.getXLenVT();
1991 
1992   if (CondV.getOpcode() == ISD::SETCC &&
1993       CondV.getOperand(0).getValueType() == XLenVT) {
1994     SDValue LHS = CondV.getOperand(0);
1995     SDValue RHS = CondV.getOperand(1);
1996     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
1997 
1998     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
1999 
2000     SDValue TargetCC = DAG.getCondCode(CCVal);
2001     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
2002                        LHS, RHS, TargetCC, Op.getOperand(2));
2003   }
2004 
2005   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
2006                      CondV, DAG.getConstant(0, DL, XLenVT),
2007                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
2008 }
2009 
2010 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2011   MachineFunction &MF = DAG.getMachineFunction();
2012   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
2013 
2014   SDLoc DL(Op);
2015   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
2016                                  getPointerTy(MF.getDataLayout()));
2017 
2018   // vastart just stores the address of the VarArgsFrameIndex slot into the
2019   // memory location argument.
2020   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2021   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
2022                       MachinePointerInfo(SV));
2023 }
2024 
2025 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
2026                                             SelectionDAG &DAG) const {
2027   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
2028   MachineFunction &MF = DAG.getMachineFunction();
2029   MachineFrameInfo &MFI = MF.getFrameInfo();
2030   MFI.setFrameAddressIsTaken(true);
2031   Register FrameReg = RI.getFrameRegister(MF);
2032   int XLenInBytes = Subtarget.getXLen() / 8;
2033 
2034   EVT VT = Op.getValueType();
2035   SDLoc DL(Op);
2036   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
2037   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2038   while (Depth--) {
2039     int Offset = -(XLenInBytes * 2);
2040     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
2041                               DAG.getIntPtrConstant(Offset, DL));
2042     FrameAddr =
2043         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2044   }
2045   return FrameAddr;
2046 }
2047 
2048 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
2049                                              SelectionDAG &DAG) const {
2050   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
2051   MachineFunction &MF = DAG.getMachineFunction();
2052   MachineFrameInfo &MFI = MF.getFrameInfo();
2053   MFI.setReturnAddressIsTaken(true);
2054   MVT XLenVT = Subtarget.getXLenVT();
2055   int XLenInBytes = Subtarget.getXLen() / 8;
2056 
2057   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
2058     return SDValue();
2059 
2060   EVT VT = Op.getValueType();
2061   SDLoc DL(Op);
2062   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2063   if (Depth) {
2064     int Off = -XLenInBytes;
2065     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
2066     SDValue Offset = DAG.getConstant(Off, DL, VT);
2067     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
2068                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
2069                        MachinePointerInfo());
2070   }
2071 
2072   // Return the value of the return address register, marking it an implicit
2073   // live-in.
2074   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
2075   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
2076 }
2077 
2078 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
2079                                                  SelectionDAG &DAG) const {
2080   SDLoc DL(Op);
2081   SDValue Lo = Op.getOperand(0);
2082   SDValue Hi = Op.getOperand(1);
2083   SDValue Shamt = Op.getOperand(2);
2084   EVT VT = Lo.getValueType();
2085 
2086   // if Shamt-XLEN < 0: // Shamt < XLEN
2087   //   Lo = Lo << Shamt
2088   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
2089   // else:
2090   //   Lo = 0
2091   //   Hi = Lo << (Shamt-XLEN)
2092 
2093   SDValue Zero = DAG.getConstant(0, DL, VT);
2094   SDValue One = DAG.getConstant(1, DL, VT);
2095   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
2096   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
2097   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
2098   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
2099 
2100   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
2101   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
2102   SDValue ShiftRightLo =
2103       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
2104   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
2105   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
2106   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
2107 
2108   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
2109 
2110   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
2111   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
2112 
2113   SDValue Parts[2] = {Lo, Hi};
2114   return DAG.getMergeValues(Parts, DL);
2115 }
2116 
2117 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
2118                                                   bool IsSRA) const {
2119   SDLoc DL(Op);
2120   SDValue Lo = Op.getOperand(0);
2121   SDValue Hi = Op.getOperand(1);
2122   SDValue Shamt = Op.getOperand(2);
2123   EVT VT = Lo.getValueType();
2124 
2125   // SRA expansion:
2126   //   if Shamt-XLEN < 0: // Shamt < XLEN
2127   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
2128   //     Hi = Hi >>s Shamt
2129   //   else:
2130   //     Lo = Hi >>s (Shamt-XLEN);
2131   //     Hi = Hi >>s (XLEN-1)
2132   //
2133   // SRL expansion:
2134   //   if Shamt-XLEN < 0: // Shamt < XLEN
2135   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
2136   //     Hi = Hi >>u Shamt
2137   //   else:
2138   //     Lo = Hi >>u (Shamt-XLEN);
2139   //     Hi = 0;
2140 
2141   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
2142 
2143   SDValue Zero = DAG.getConstant(0, DL, VT);
2144   SDValue One = DAG.getConstant(1, DL, VT);
2145   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
2146   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
2147   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
2148   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
2149 
2150   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
2151   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
2152   SDValue ShiftLeftHi =
2153       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
2154   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
2155   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
2156   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
2157   SDValue HiFalse =
2158       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
2159 
2160   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
2161 
2162   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
2163   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
2164 
2165   SDValue Parts[2] = {Lo, Hi};
2166   return DAG.getMergeValues(Parts, DL);
2167 }
2168 
2169 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
2170 // illegal (currently only vXi64 RV32).
2171 // FIXME: We could also catch non-constant sign-extended i32 values and lower
2172 // them to SPLAT_VECTOR_I64
2173 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
2174                                                      SelectionDAG &DAG) const {
2175   SDLoc DL(Op);
2176   EVT VecVT = Op.getValueType();
2177   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
2178          "Unexpected SPLAT_VECTOR_PARTS lowering");
2179 
2180   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
2181   SDValue Lo = Op.getOperand(0);
2182   SDValue Hi = Op.getOperand(1);
2183 
2184   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
2185     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
2186     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
2187     // If Hi constant is all the same sign bit as Lo, lower this as a custom
2188     // node in order to try and match RVV vector/scalar instructions.
2189     if ((LoC >> 31) == HiC)
2190       return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
2191   }
2192 
2193   // Else, on RV32 we lower an i64-element SPLAT_VECTOR thus, being careful not
2194   // to accidentally sign-extend the 32-bit halves to the e64 SEW:
2195   // vmv.v.x vX, hi
2196   // vsll.vx vX, vX, /*32*/
2197   // vmv.v.x vY, lo
2198   // vsll.vx vY, vY, /*32*/
2199   // vsrl.vx vY, vY, /*32*/
2200   // vor.vv vX, vX, vY
2201   SDValue ThirtyTwoV = DAG.getConstant(32, DL, VecVT);
2202 
2203   Lo = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
2204   Lo = DAG.getNode(ISD::SHL, DL, VecVT, Lo, ThirtyTwoV);
2205   Lo = DAG.getNode(ISD::SRL, DL, VecVT, Lo, ThirtyTwoV);
2206 
2207   if (isNullConstant(Hi))
2208     return Lo;
2209 
2210   Hi = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Hi);
2211   Hi = DAG.getNode(ISD::SHL, DL, VecVT, Hi, ThirtyTwoV);
2212 
2213   return DAG.getNode(ISD::OR, DL, VecVT, Lo, Hi);
2214 }
2215 
2216 // Custom-lower extensions from mask vectors by using a vselect either with 1
2217 // for zero/any-extension or -1 for sign-extension:
2218 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
2219 // Note that any-extension is lowered identically to zero-extension.
2220 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
2221                                                 int64_t ExtTrueVal) const {
2222   SDLoc DL(Op);
2223   MVT VecVT = Op.getSimpleValueType();
2224   SDValue Src = Op.getOperand(0);
2225   // Only custom-lower extensions from mask types
2226   assert(Src.getValueType().isVector() &&
2227          Src.getValueType().getVectorElementType() == MVT::i1);
2228 
2229   MVT XLenVT = Subtarget.getXLenVT();
2230   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
2231   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
2232 
2233   if (VecVT.isScalableVector()) {
2234     // Be careful not to introduce illegal scalar types at this stage, and be
2235     // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
2236     // illegal and must be expanded. Since we know that the constants are
2237     // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly.
2238     bool IsRV32E64 =
2239         !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
2240 
2241     if (!IsRV32E64) {
2242       SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
2243       SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
2244     } else {
2245       SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero);
2246       SplatTrueVal =
2247           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal);
2248     }
2249 
2250     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
2251   }
2252 
2253   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
2254   MVT I1ContainerVT =
2255       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
2256 
2257   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
2258 
2259   SDValue Mask, VL;
2260   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
2261 
2262   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL);
2263   SplatTrueVal =
2264       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL);
2265   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
2266                                SplatTrueVal, SplatZero, VL);
2267 
2268   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
2269 }
2270 
2271 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
2272     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
2273   MVT ExtVT = Op.getSimpleValueType();
2274   // Only custom-lower extensions from fixed-length vector types.
2275   if (!ExtVT.isFixedLengthVector())
2276     return Op;
2277   MVT VT = Op.getOperand(0).getSimpleValueType();
2278   // Grab the canonical container type for the extended type. Infer the smaller
2279   // type from that to ensure the same number of vector elements, as we know
2280   // the LMUL will be sufficient to hold the smaller type.
2281   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
2282   // Get the extended container type manually to ensure the same number of
2283   // vector elements between source and dest.
2284   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
2285                                      ContainerExtVT.getVectorElementCount());
2286 
2287   SDValue Op1 =
2288       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
2289 
2290   SDLoc DL(Op);
2291   SDValue Mask, VL;
2292   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2293 
2294   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
2295 
2296   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
2297 }
2298 
2299 // Custom-lower truncations from vectors to mask vectors by using a mask and a
2300 // setcc operation:
2301 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
2302 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
2303                                                   SelectionDAG &DAG) const {
2304   SDLoc DL(Op);
2305   EVT MaskVT = Op.getValueType();
2306   // Only expect to custom-lower truncations to mask types
2307   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
2308          "Unexpected type for vector mask lowering");
2309   SDValue Src = Op.getOperand(0);
2310   MVT VecVT = Src.getSimpleValueType();
2311 
2312   // If this is a fixed vector, we need to convert it to a scalable vector.
2313   MVT ContainerVT = VecVT;
2314   if (VecVT.isFixedLengthVector()) {
2315     ContainerVT = getContainerForFixedLengthVector(VecVT);
2316     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
2317   }
2318 
2319   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
2320   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
2321 
2322   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne);
2323   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero);
2324 
2325   if (VecVT.isScalableVector()) {
2326     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
2327     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
2328   }
2329 
2330   SDValue Mask, VL;
2331   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
2332 
2333   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
2334   SDValue Trunc =
2335       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
2336   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
2337                       DAG.getCondCode(ISD::SETNE), Mask, VL);
2338   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
2339 }
2340 
2341 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
2342 // first position of a vector, and that vector is slid up to the insert index.
2343 // By limiting the active vector length to index+1 and merging with the
2344 // original vector (with an undisturbed tail policy for elements >= VL), we
2345 // achieve the desired result of leaving all elements untouched except the one
2346 // at VL-1, which is replaced with the desired value.
2347 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
2348                                                     SelectionDAG &DAG) const {
2349   SDLoc DL(Op);
2350   MVT VecVT = Op.getSimpleValueType();
2351   SDValue Vec = Op.getOperand(0);
2352   SDValue Val = Op.getOperand(1);
2353   SDValue Idx = Op.getOperand(2);
2354 
2355   MVT ContainerVT = VecVT;
2356   // If the operand is a fixed-length vector, convert to a scalable one.
2357   if (VecVT.isFixedLengthVector()) {
2358     ContainerVT = getContainerForFixedLengthVector(VecVT);
2359     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
2360   }
2361 
2362   MVT XLenVT = Subtarget.getXLenVT();
2363 
2364   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
2365   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
2366   // Even i64-element vectors on RV32 can be lowered without scalar
2367   // legalization if the most-significant 32 bits of the value are not affected
2368   // by the sign-extension of the lower 32 bits.
2369   // TODO: We could also catch sign extensions of a 32-bit value.
2370   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
2371     const auto *CVal = cast<ConstantSDNode>(Val);
2372     if (isInt<32>(CVal->getSExtValue())) {
2373       IsLegalInsert = true;
2374       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
2375     }
2376   }
2377 
2378   SDValue Mask, VL;
2379   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
2380 
2381   SDValue ValInVec;
2382 
2383   if (IsLegalInsert) {
2384     if (isNullConstant(Idx)) {
2385       Vec = DAG.getNode(RISCVISD::VMV_S_XF_VL, DL, ContainerVT, Vec, Val, VL);
2386       if (!VecVT.isFixedLengthVector())
2387         return Vec;
2388       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
2389     }
2390     ValInVec = DAG.getNode(RISCVISD::VMV_S_XF_VL, DL, ContainerVT,
2391                            DAG.getUNDEF(ContainerVT), Val, VL);
2392   } else {
2393     // On RV32, i64-element vectors must be specially handled to place the
2394     // value at element 0, by using two vslide1up instructions in sequence on
2395     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
2396     // this.
2397     SDValue One = DAG.getConstant(1, DL, XLenVT);
2398     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
2399     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
2400     MVT I32ContainerVT =
2401         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
2402     SDValue I32Mask =
2403         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
2404     // Limit the active VL to two.
2405     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
2406     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
2407     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
2408     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero,
2409                            InsertI64VL);
2410     // First slide in the hi value, then the lo in underneath it.
2411     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
2412                            ValHi, I32Mask, InsertI64VL);
2413     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
2414                            ValLo, I32Mask, InsertI64VL);
2415     // Bitcast back to the right container type.
2416     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
2417   }
2418 
2419   // Now that the value is in a vector, slide it into position.
2420   SDValue InsertVL =
2421       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
2422   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
2423                                 ValInVec, Idx, Mask, InsertVL);
2424   if (!VecVT.isFixedLengthVector())
2425     return Slideup;
2426   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
2427 }
2428 
2429 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
2430 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
2431 // types this is done using VMV_X_S to allow us to glean information about the
2432 // sign bits of the result.
2433 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
2434                                                      SelectionDAG &DAG) const {
2435   SDLoc DL(Op);
2436   SDValue Idx = Op.getOperand(1);
2437   SDValue Vec = Op.getOperand(0);
2438   EVT EltVT = Op.getValueType();
2439   MVT VecVT = Vec.getSimpleValueType();
2440   MVT XLenVT = Subtarget.getXLenVT();
2441 
2442   if (VecVT.getVectorElementType() == MVT::i1) {
2443     // FIXME: For now we just promote to an i8 vector and extract from that,
2444     // but this is probably not optimal.
2445     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
2446     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
2447     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
2448   }
2449 
2450   // If this is a fixed vector, we need to convert it to a scalable vector.
2451   MVT ContainerVT = VecVT;
2452   if (VecVT.isFixedLengthVector()) {
2453     ContainerVT = getContainerForFixedLengthVector(VecVT);
2454     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
2455   }
2456 
2457   // If the index is 0, the vector is already in the right position.
2458   if (!isNullConstant(Idx)) {
2459     // Use a VL of 1 to avoid processing more elements than we need.
2460     SDValue VL = DAG.getConstant(1, DL, XLenVT);
2461     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
2462     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
2463     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
2464                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
2465   }
2466 
2467   if (!EltVT.isInteger()) {
2468     // Floating-point extracts are handled in TableGen.
2469     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
2470                        DAG.getConstant(0, DL, XLenVT));
2471   }
2472 
2473   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
2474   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
2475 }
2476 
2477 // Called by type legalization to handle splat of i64 on RV32.
2478 // FIXME: We can optimize this when the type has sign or zero bits in one
2479 // of the halves.
2480 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar,
2481                                    SDValue VL, SelectionDAG &DAG) {
2482   SDValue ThirtyTwoV = DAG.getConstant(32, DL, VT);
2483   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2484                            DAG.getConstant(0, DL, MVT::i32));
2485   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2486                            DAG.getConstant(1, DL, MVT::i32));
2487 
2488   // vmv.v.x vX, hi
2489   // vsll.vx vX, vX, /*32*/
2490   // vmv.v.x vY, lo
2491   // vsll.vx vY, vY, /*32*/
2492   // vsrl.vx vY, vY, /*32*/
2493   // vor.vv vX, vX, vY
2494   MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
2495   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
2496   Lo = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL);
2497   Lo = DAG.getNode(RISCVISD::SHL_VL, DL, VT, Lo, ThirtyTwoV, Mask, VL);
2498   Lo = DAG.getNode(RISCVISD::SRL_VL, DL, VT, Lo, ThirtyTwoV, Mask, VL);
2499 
2500   Hi = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Hi, VL);
2501   Hi = DAG.getNode(RISCVISD::SHL_VL, DL, VT, Hi, ThirtyTwoV, Mask, VL);
2502 
2503   return DAG.getNode(RISCVISD::OR_VL, DL, VT, Lo, Hi, Mask, VL);
2504 }
2505 
2506 // Some RVV intrinsics may claim that they want an integer operand to be
2507 // promoted or expanded.
2508 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
2509                                           const RISCVSubtarget &Subtarget) {
2510   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2511           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
2512          "Unexpected opcode");
2513 
2514   if (!Subtarget.hasStdExtV())
2515     return SDValue();
2516 
2517   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
2518   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
2519   SDLoc DL(Op);
2520 
2521   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
2522       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
2523   if (!II || !II->SplatOperand)
2524     return SDValue();
2525 
2526   unsigned SplatOp = II->SplatOperand + HasChain;
2527   assert(SplatOp < Op.getNumOperands());
2528 
2529   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
2530   SDValue &ScalarOp = Operands[SplatOp];
2531   MVT OpVT = ScalarOp.getSimpleValueType();
2532   MVT VT = Op.getSimpleValueType();
2533   MVT XLenVT = Subtarget.getXLenVT();
2534 
2535   // If this isn't a scalar, or its type is XLenVT we're done.
2536   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
2537     return SDValue();
2538 
2539   // Simplest case is that the operand needs to be promoted to XLenVT.
2540   if (OpVT.bitsLT(XLenVT)) {
2541     // If the operand is a constant, sign extend to increase our chances
2542     // of being able to use a .vi instruction. ANY_EXTEND would become a
2543     // a zero extend and the simm5 check in isel would fail.
2544     // FIXME: Should we ignore the upper bits in isel instead?
2545     unsigned ExtOpc =
2546         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
2547     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
2548     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
2549   }
2550 
2551   // The more complex case is when the scalar is larger than XLenVT.
2552   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
2553          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
2554 
2555   // If this is a sign-extended 32-bit constant, we can truncate it and rely
2556   // on the instruction to sign-extend since SEW>XLEN.
2557   if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) {
2558     if (isInt<32>(CVal->getSExtValue())) {
2559       ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
2560       return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
2561     }
2562   }
2563 
2564   // We need to convert the scalar to a splat vector.
2565   // FIXME: Can we implicitly truncate the scalar if it is known to
2566   // be sign extended?
2567   // VL should be the last operand.
2568   SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
2569   assert(VL.getValueType() == XLenVT);
2570   ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG);
2571   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
2572 }
2573 
2574 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
2575                                                      SelectionDAG &DAG) const {
2576   unsigned IntNo = Op.getConstantOperandVal(0);
2577   SDLoc DL(Op);
2578   MVT XLenVT = Subtarget.getXLenVT();
2579 
2580   switch (IntNo) {
2581   default:
2582     break; // Don't custom lower most intrinsics.
2583   case Intrinsic::thread_pointer: {
2584     EVT PtrVT = getPointerTy(DAG.getDataLayout());
2585     return DAG.getRegister(RISCV::X4, PtrVT);
2586   }
2587   case Intrinsic::riscv_vmv_x_s:
2588     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
2589     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
2590                        Op.getOperand(1));
2591   case Intrinsic::riscv_vmv_v_x: {
2592     SDValue Scalar = Op.getOperand(1);
2593     if (Scalar.getValueType().bitsLE(XLenVT)) {
2594       unsigned ExtOpc =
2595           isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
2596       Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
2597       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, Op.getValueType(), Scalar,
2598                          Op.getOperand(2));
2599     }
2600 
2601     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
2602 
2603     // If this is a sign-extended 32-bit constant, we can truncate it and rely
2604     // on the instruction to sign-extend since SEW>XLEN.
2605     if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar)) {
2606       if (isInt<32>(CVal->getSExtValue()))
2607         return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, Op.getValueType(),
2608                            DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32),
2609                            Op.getOperand(2));
2610     }
2611 
2612     // Otherwise use the more complicated splatting algorithm.
2613     return splatSplitI64WithVL(DL, Op.getSimpleValueType(), Scalar,
2614                                Op.getOperand(2), DAG);
2615   }
2616   case Intrinsic::riscv_vfmv_v_f:
2617     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
2618                        Op.getOperand(1), Op.getOperand(2));
2619   case Intrinsic::riscv_vmv_s_x: {
2620     SDValue Scalar = Op.getOperand(2);
2621 
2622     if (Scalar.getValueType().bitsLE(XLenVT)) {
2623       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
2624       return DAG.getNode(RISCVISD::VMV_S_XF_VL, DL, Op.getValueType(),
2625                          Op.getOperand(1), Scalar, Op.getOperand(3));
2626     }
2627 
2628     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
2629 
2630     // This is an i64 value that lives in two scalar registers. We have to
2631     // insert this in a convoluted way. First we build vXi64 splat containing
2632     // the/ two values that we assemble using some bit math. Next we'll use
2633     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
2634     // to merge element 0 from our splat into the source vector.
2635     // FIXME: This is probably not the best way to do this, but it is
2636     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
2637     // point.
2638     //   vmv.v.x vX, hi
2639     //   vsll.vx vX, vX, /*32*/
2640     //   vmv.v.x vY, lo
2641     //   vsll.vx vY, vY, /*32*/
2642     //   vsrl.vx vY, vY, /*32*/
2643     //   vor.vv vX, vX, vY
2644     //
2645     //   vid.v      vVid
2646     //   vmseq.vx   mMask, vVid, 0
2647     //   vmerge.vvm vDest, vSrc, vVal, mMask
2648     MVT VT = Op.getSimpleValueType();
2649     SDValue Vec = Op.getOperand(1);
2650     SDValue VL = Op.getOperand(3);
2651 
2652     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
2653     SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
2654                                       DAG.getConstant(0, DL, MVT::i32), VL);
2655 
2656     MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
2657     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
2658     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
2659     SDValue SelectCond =
2660         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
2661                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
2662     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
2663                        Vec, VL);
2664   }
2665   }
2666 
2667   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
2668 }
2669 
2670 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
2671                                                     SelectionDAG &DAG) const {
2672   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
2673 }
2674 
2675 static MVT getLMUL1VT(MVT VT) {
2676   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
2677          "Unexpected vector MVT");
2678   return MVT::getScalableVectorVT(
2679       VT.getVectorElementType(),
2680       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
2681 }
2682 
2683 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
2684   switch (ISDOpcode) {
2685   default:
2686     llvm_unreachable("Unhandled reduction");
2687   case ISD::VECREDUCE_ADD:
2688     return RISCVISD::VECREDUCE_ADD_VL;
2689   case ISD::VECREDUCE_UMAX:
2690     return RISCVISD::VECREDUCE_UMAX_VL;
2691   case ISD::VECREDUCE_SMAX:
2692     return RISCVISD::VECREDUCE_SMAX_VL;
2693   case ISD::VECREDUCE_UMIN:
2694     return RISCVISD::VECREDUCE_UMIN_VL;
2695   case ISD::VECREDUCE_SMIN:
2696     return RISCVISD::VECREDUCE_SMIN_VL;
2697   case ISD::VECREDUCE_AND:
2698     return RISCVISD::VECREDUCE_AND_VL;
2699   case ISD::VECREDUCE_OR:
2700     return RISCVISD::VECREDUCE_OR_VL;
2701   case ISD::VECREDUCE_XOR:
2702     return RISCVISD::VECREDUCE_XOR_VL;
2703   }
2704 }
2705 
2706 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
2707                                             SelectionDAG &DAG) const {
2708   SDLoc DL(Op);
2709   SDValue Vec = Op.getOperand(0);
2710   EVT VecEVT = Vec.getValueType();
2711 
2712   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
2713 
2714   // Due to ordering in legalize types we may have a vector type that needs to
2715   // be split. Do that manually so we can get down to a legal type.
2716   while (getTypeAction(*DAG.getContext(), VecEVT) ==
2717          TargetLowering::TypeSplitVector) {
2718     SDValue Lo, Hi;
2719     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
2720     VecEVT = Lo.getValueType();
2721     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
2722   }
2723 
2724   // TODO: The type may need to be widened rather than split. Or widened before
2725   // it can be split.
2726   if (!isTypeLegal(VecEVT))
2727     return SDValue();
2728 
2729   MVT VecVT = VecEVT.getSimpleVT();
2730   MVT VecEltVT = VecVT.getVectorElementType();
2731   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
2732 
2733   MVT ContainerVT = VecVT;
2734   if (VecVT.isFixedLengthVector()) {
2735     ContainerVT = getContainerForFixedLengthVector(VecVT);
2736     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
2737   }
2738 
2739   MVT M1VT = getLMUL1VT(ContainerVT);
2740 
2741   SDValue Mask, VL;
2742   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
2743 
2744   // FIXME: This is a VLMAX splat which might be too large and can prevent
2745   // vsetvli removal.
2746   SDValue NeutralElem =
2747       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
2748   SDValue IdentitySplat = DAG.getSplatVector(M1VT, DL, NeutralElem);
2749   SDValue Reduction =
2750       DAG.getNode(RVVOpcode, DL, M1VT, Vec, IdentitySplat, Mask, VL);
2751   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
2752                              DAG.getConstant(0, DL, Subtarget.getXLenVT()));
2753   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
2754 }
2755 
2756 // Given a reduction op, this function returns the matching reduction opcode,
2757 // the vector SDValue and the scalar SDValue required to lower this to a
2758 // RISCVISD node.
2759 static std::tuple<unsigned, SDValue, SDValue>
2760 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
2761   SDLoc DL(Op);
2762   switch (Op.getOpcode()) {
2763   default:
2764     llvm_unreachable("Unhandled reduction");
2765   case ISD::VECREDUCE_FADD:
2766     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0),
2767                            DAG.getConstantFP(0.0, DL, EltVT));
2768   case ISD::VECREDUCE_SEQ_FADD:
2769     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
2770                            Op.getOperand(0));
2771   }
2772 }
2773 
2774 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
2775                                               SelectionDAG &DAG) const {
2776   SDLoc DL(Op);
2777   MVT VecEltVT = Op.getSimpleValueType();
2778 
2779   unsigned RVVOpcode;
2780   SDValue VectorVal, ScalarVal;
2781   std::tie(RVVOpcode, VectorVal, ScalarVal) =
2782       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
2783   MVT VecVT = VectorVal.getSimpleValueType();
2784 
2785   MVT ContainerVT = VecVT;
2786   if (VecVT.isFixedLengthVector()) {
2787     ContainerVT = getContainerForFixedLengthVector(VecVT);
2788     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
2789   }
2790 
2791   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
2792 
2793   SDValue Mask, VL;
2794   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
2795 
2796   // FIXME: This is a VLMAX splat which might be too large and can prevent
2797   // vsetvli removal.
2798   SDValue ScalarSplat = DAG.getSplatVector(M1VT, DL, ScalarVal);
2799   SDValue Reduction =
2800       DAG.getNode(RVVOpcode, DL, M1VT, VectorVal, ScalarSplat, Mask, VL);
2801   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
2802                      DAG.getConstant(0, DL, Subtarget.getXLenVT()));
2803 }
2804 
2805 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
2806                                                    SelectionDAG &DAG) const {
2807   SDValue Vec = Op.getOperand(0);
2808   SDValue SubVec = Op.getOperand(1);
2809   MVT VecVT = Vec.getSimpleValueType();
2810   MVT SubVecVT = SubVec.getSimpleValueType();
2811 
2812   SDLoc DL(Op);
2813   MVT XLenVT = Subtarget.getXLenVT();
2814   unsigned OrigIdx = Op.getConstantOperandVal(2);
2815   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
2816 
2817   // We don't have the ability to slide mask vectors up indexed by their i1
2818   // elements; the smallest we can do is i8. Often we are able to bitcast to
2819   // equivalent i8 vectors. Note that when inserting a fixed-length vector
2820   // into a scalable one, we might not necessarily have enough scalable
2821   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
2822   if (SubVecVT.getVectorElementType() == MVT::i1 &&
2823       (OrigIdx != 0 || !Vec.isUndef())) {
2824     if (VecVT.getVectorMinNumElements() >= 8 &&
2825         SubVecVT.getVectorMinNumElements() >= 8) {
2826       assert(OrigIdx % 8 == 0 && "Invalid index");
2827       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
2828              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
2829              "Unexpected mask vector lowering");
2830       OrigIdx /= 8;
2831       SubVecVT =
2832           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
2833                            SubVecVT.isScalableVector());
2834       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
2835                                VecVT.isScalableVector());
2836       Vec = DAG.getBitcast(VecVT, Vec);
2837       SubVec = DAG.getBitcast(SubVecVT, SubVec);
2838     } else {
2839       // We can't slide this mask vector up indexed by its i1 elements.
2840       // This poses a problem when we wish to insert a scalable vector which
2841       // can't be re-expressed as a larger type. Just choose the slow path and
2842       // extend to a larger type, then truncate back down.
2843       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
2844       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
2845       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
2846       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
2847       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
2848                         Op.getOperand(2));
2849       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
2850       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
2851     }
2852   }
2853 
2854   // If the subvector vector is a fixed-length type, we cannot use subregister
2855   // manipulation to simplify the codegen; we don't know which register of a
2856   // LMUL group contains the specific subvector as we only know the minimum
2857   // register size. Therefore we must slide the vector group up the full
2858   // amount.
2859   if (SubVecVT.isFixedLengthVector()) {
2860     if (OrigIdx == 0 && Vec.isUndef())
2861       return Op;
2862     MVT ContainerVT = VecVT;
2863     if (VecVT.isFixedLengthVector()) {
2864       ContainerVT = getContainerForFixedLengthVector(VecVT);
2865       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
2866     }
2867     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
2868                          DAG.getUNDEF(ContainerVT), SubVec,
2869                          DAG.getConstant(0, DL, XLenVT));
2870     SDValue Mask =
2871         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
2872     // Set the vector length to only the number of elements we care about. Note
2873     // that for slideup this includes the offset.
2874     SDValue VL =
2875         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
2876     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
2877     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
2878                                   SubVec, SlideupAmt, Mask, VL);
2879     if (VecVT.isFixedLengthVector())
2880       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
2881     return DAG.getBitcast(Op.getValueType(), Slideup);
2882   }
2883 
2884   unsigned SubRegIdx, RemIdx;
2885   std::tie(SubRegIdx, RemIdx) =
2886       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
2887           VecVT, SubVecVT, OrigIdx, TRI);
2888 
2889   RISCVVLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
2890   bool IsSubVecPartReg = SubVecLMUL == RISCVVLMUL::LMUL_F2 ||
2891                          SubVecLMUL == RISCVVLMUL::LMUL_F4 ||
2892                          SubVecLMUL == RISCVVLMUL::LMUL_F8;
2893 
2894   // 1. If the Idx has been completely eliminated and this subvector's size is
2895   // a vector register or a multiple thereof, or the surrounding elements are
2896   // undef, then this is a subvector insert which naturally aligns to a vector
2897   // register. These can easily be handled using subregister manipulation.
2898   // 2. If the subvector is smaller than a vector register, then the insertion
2899   // must preserve the undisturbed elements of the register. We do this by
2900   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
2901   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
2902   // subvector within the vector register, and an INSERT_SUBVECTOR of that
2903   // LMUL=1 type back into the larger vector (resolving to another subregister
2904   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
2905   // to avoid allocating a large register group to hold our subvector.
2906   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
2907     return Op;
2908 
2909   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
2910   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
2911   // (in our case undisturbed). This means we can set up a subvector insertion
2912   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
2913   // size of the subvector.
2914   MVT InterSubVT = VecVT;
2915   SDValue AlignedExtract = Vec;
2916   unsigned AlignedIdx = OrigIdx - RemIdx;
2917   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
2918     InterSubVT = getLMUL1VT(VecVT);
2919     // Extract a subvector equal to the nearest full vector register type. This
2920     // should resolve to a EXTRACT_SUBREG instruction.
2921     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
2922                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
2923   }
2924 
2925   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
2926   // For scalable vectors this must be further multiplied by vscale.
2927   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
2928 
2929   SDValue Mask, VL;
2930   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
2931 
2932   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
2933   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
2934   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
2935   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
2936 
2937   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
2938                        DAG.getUNDEF(InterSubVT), SubVec,
2939                        DAG.getConstant(0, DL, XLenVT));
2940 
2941   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
2942                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
2943 
2944   // If required, insert this subvector back into the correct vector register.
2945   // This should resolve to an INSERT_SUBREG instruction.
2946   if (VecVT.bitsGT(InterSubVT))
2947     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
2948                           DAG.getConstant(AlignedIdx, DL, XLenVT));
2949 
2950   // We might have bitcast from a mask type: cast back to the original type if
2951   // required.
2952   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
2953 }
2954 
2955 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
2956                                                     SelectionDAG &DAG) const {
2957   SDValue Vec = Op.getOperand(0);
2958   MVT SubVecVT = Op.getSimpleValueType();
2959   MVT VecVT = Vec.getSimpleValueType();
2960 
2961   SDLoc DL(Op);
2962   MVT XLenVT = Subtarget.getXLenVT();
2963   unsigned OrigIdx = Op.getConstantOperandVal(1);
2964   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
2965 
2966   // We don't have the ability to slide mask vectors down indexed by their i1
2967   // elements; the smallest we can do is i8. Often we are able to bitcast to
2968   // equivalent i8 vectors. Note that when extracting a fixed-length vector
2969   // from a scalable one, we might not necessarily have enough scalable
2970   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
2971   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
2972     if (VecVT.getVectorMinNumElements() >= 8 &&
2973         SubVecVT.getVectorMinNumElements() >= 8) {
2974       assert(OrigIdx % 8 == 0 && "Invalid index");
2975       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
2976              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
2977              "Unexpected mask vector lowering");
2978       OrigIdx /= 8;
2979       SubVecVT =
2980           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
2981                            SubVecVT.isScalableVector());
2982       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
2983                                VecVT.isScalableVector());
2984       Vec = DAG.getBitcast(VecVT, Vec);
2985     } else {
2986       // We can't slide this mask vector down, indexed by its i1 elements.
2987       // This poses a problem when we wish to extract a scalable vector which
2988       // can't be re-expressed as a larger type. Just choose the slow path and
2989       // extend to a larger type, then truncate back down.
2990       // TODO: We could probably improve this when extracting certain fixed
2991       // from fixed, where we can extract as i8 and shift the correct element
2992       // right to reach the desired subvector?
2993       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
2994       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
2995       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
2996       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
2997                         Op.getOperand(1));
2998       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
2999       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
3000     }
3001   }
3002 
3003   // If the subvector vector is a fixed-length type, we cannot use subregister
3004   // manipulation to simplify the codegen; we don't know which register of a
3005   // LMUL group contains the specific subvector as we only know the minimum
3006   // register size. Therefore we must slide the vector group down the full
3007   // amount.
3008   if (SubVecVT.isFixedLengthVector()) {
3009     // With an index of 0 this is a cast-like subvector, which can be performed
3010     // with subregister operations.
3011     if (OrigIdx == 0)
3012       return Op;
3013     MVT ContainerVT = VecVT;
3014     if (VecVT.isFixedLengthVector()) {
3015       ContainerVT = getContainerForFixedLengthVector(VecVT);
3016       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3017     }
3018     SDValue Mask =
3019         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
3020     // Set the vector length to only the number of elements we care about. This
3021     // avoids sliding down elements we're going to discard straight away.
3022     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
3023     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
3024     SDValue Slidedown =
3025         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
3026                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
3027     // Now we can use a cast-like subvector extract to get the result.
3028     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
3029                             DAG.getConstant(0, DL, XLenVT));
3030     return DAG.getBitcast(Op.getValueType(), Slidedown);
3031   }
3032 
3033   unsigned SubRegIdx, RemIdx;
3034   std::tie(SubRegIdx, RemIdx) =
3035       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
3036           VecVT, SubVecVT, OrigIdx, TRI);
3037 
3038   // If the Idx has been completely eliminated then this is a subvector extract
3039   // which naturally aligns to a vector register. These can easily be handled
3040   // using subregister manipulation.
3041   if (RemIdx == 0)
3042     return Op;
3043 
3044   // Else we must shift our vector register directly to extract the subvector.
3045   // Do this using VSLIDEDOWN.
3046 
3047   // If the vector type is an LMUL-group type, extract a subvector equal to the
3048   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
3049   // instruction.
3050   MVT InterSubVT = VecVT;
3051   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
3052     InterSubVT = getLMUL1VT(VecVT);
3053     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
3054                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
3055   }
3056 
3057   // Slide this vector register down by the desired number of elements in order
3058   // to place the desired subvector starting at element 0.
3059   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
3060   // For scalable vectors this must be further multiplied by vscale.
3061   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
3062 
3063   SDValue Mask, VL;
3064   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
3065   SDValue Slidedown =
3066       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
3067                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
3068 
3069   // Now the vector is in the right position, extract our final subvector. This
3070   // should resolve to a COPY.
3071   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
3072                           DAG.getConstant(0, DL, XLenVT));
3073 
3074   // We might have bitcast from a mask type: cast back to the original type if
3075   // required.
3076   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
3077 }
3078 
3079 // Implement vector_reverse using vrgather.vv with indices determined by
3080 // subtracting the id of each element from (VLMAX-1). This will convert
3081 // the indices like so:
3082 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
3083 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
3084 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
3085                                                  SelectionDAG &DAG) const {
3086   SDLoc DL(Op);
3087   MVT VecVT = Op.getSimpleValueType();
3088   unsigned EltSize = VecVT.getScalarSizeInBits();
3089   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
3090 
3091   unsigned MaxVLMAX = 0;
3092   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
3093   if (VectorBitsMax != 0)
3094     MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
3095 
3096   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
3097   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
3098 
3099   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
3100   // to use vrgatherei16.vv.
3101   // TODO: It's also possible to use vrgatherei16.vv for other types to
3102   // decrease register width for the index calculation.
3103   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
3104     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
3105     // Reverse each half, then reassemble them in reverse order.
3106     // NOTE: It's also possible that after splitting that VLMAX no longer
3107     // requires vrgatherei16.vv.
3108     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
3109       SDValue Lo, Hi;
3110       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
3111       EVT LoVT, HiVT;
3112       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
3113       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
3114       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
3115       // Reassemble the low and high pieces reversed.
3116       // FIXME: This is a CONCAT_VECTORS.
3117       SDValue Res =
3118           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
3119                       DAG.getIntPtrConstant(0, DL));
3120       return DAG.getNode(
3121           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
3122           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
3123     }
3124 
3125     // Just promote the int type to i16 which will double the LMUL.
3126     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
3127     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
3128   }
3129 
3130   MVT XLenVT = Subtarget.getXLenVT();
3131   SDValue Mask, VL;
3132   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
3133 
3134   // Calculate VLMAX-1 for the desired SEW.
3135   unsigned MinElts = VecVT.getVectorMinNumElements();
3136   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
3137                               DAG.getConstant(MinElts, DL, XLenVT));
3138   SDValue VLMinus1 =
3139       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
3140 
3141   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
3142   bool IsRV32E64 =
3143       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
3144   SDValue SplatVL;
3145   if (!IsRV32E64)
3146     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
3147   else
3148     SplatVL = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, IntVT, VLMinus1);
3149 
3150   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
3151   SDValue Indices =
3152       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
3153 
3154   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
3155 }
3156 
3157 SDValue
3158 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
3159                                                      SelectionDAG &DAG) const {
3160   auto *Load = cast<LoadSDNode>(Op);
3161 
3162   SDLoc DL(Op);
3163   MVT VT = Op.getSimpleValueType();
3164   MVT ContainerVT = getContainerForFixedLengthVector(VT);
3165 
3166   SDValue VL =
3167       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
3168 
3169   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
3170   SDValue NewLoad = DAG.getMemIntrinsicNode(
3171       RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL},
3172       Load->getMemoryVT(), Load->getMemOperand());
3173 
3174   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
3175   return DAG.getMergeValues({Result, Load->getChain()}, DL);
3176 }
3177 
3178 SDValue
3179 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
3180                                                       SelectionDAG &DAG) const {
3181   auto *Store = cast<StoreSDNode>(Op);
3182 
3183   SDLoc DL(Op);
3184   MVT VT = Store->getValue().getSimpleValueType();
3185 
3186   // FIXME: We probably need to zero any extra bits in a byte for mask stores.
3187   // This is tricky to do.
3188 
3189   MVT ContainerVT = getContainerForFixedLengthVector(VT);
3190 
3191   SDValue VL =
3192       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
3193 
3194   SDValue NewValue =
3195       convertToScalableVector(ContainerVT, Store->getValue(), DAG, Subtarget);
3196   return DAG.getMemIntrinsicNode(
3197       RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other),
3198       {Store->getChain(), NewValue, Store->getBasePtr(), VL},
3199       Store->getMemoryVT(), Store->getMemOperand());
3200 }
3201 
3202 SDValue RISCVTargetLowering::lowerMLOAD(SDValue Op, SelectionDAG &DAG) const {
3203   auto *Load = cast<MaskedLoadSDNode>(Op);
3204 
3205   SDLoc DL(Op);
3206   MVT VT = Op.getSimpleValueType();
3207   MVT XLenVT = Subtarget.getXLenVT();
3208 
3209   SDValue Mask = Load->getMask();
3210   SDValue PassThru = Load->getPassThru();
3211   SDValue VL;
3212 
3213   MVT ContainerVT = VT;
3214   if (VT.isFixedLengthVector()) {
3215     ContainerVT = getContainerForFixedLengthVector(VT);
3216     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3217 
3218     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
3219     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
3220     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
3221   } else
3222     VL = DAG.getRegister(RISCV::X0, XLenVT);
3223 
3224   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
3225   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vle_mask, DL, XLenVT);
3226   SDValue Ops[] = {Load->getChain(),   IntID, PassThru,
3227                    Load->getBasePtr(), Mask,  VL};
3228   SDValue Result =
3229       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
3230                               Load->getMemoryVT(), Load->getMemOperand());
3231   SDValue Chain = Result.getValue(1);
3232 
3233   if (VT.isFixedLengthVector())
3234     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
3235 
3236   return DAG.getMergeValues({Result, Chain}, DL);
3237 }
3238 
3239 SDValue RISCVTargetLowering::lowerMSTORE(SDValue Op, SelectionDAG &DAG) const {
3240   auto *Store = cast<MaskedStoreSDNode>(Op);
3241 
3242   SDLoc DL(Op);
3243   SDValue Val = Store->getValue();
3244   SDValue Mask = Store->getMask();
3245   MVT VT = Val.getSimpleValueType();
3246   MVT XLenVT = Subtarget.getXLenVT();
3247   SDValue VL;
3248 
3249   MVT ContainerVT = VT;
3250   if (VT.isFixedLengthVector()) {
3251     ContainerVT = getContainerForFixedLengthVector(VT);
3252     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3253 
3254     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
3255     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
3256     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
3257   } else
3258     VL = DAG.getRegister(RISCV::X0, XLenVT);
3259 
3260   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vse_mask, DL, XLenVT);
3261   return DAG.getMemIntrinsicNode(
3262       ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other),
3263       {Store->getChain(), IntID, Val, Store->getBasePtr(), Mask, VL},
3264       Store->getMemoryVT(), Store->getMemOperand());
3265 }
3266 
3267 SDValue
3268 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
3269                                                       SelectionDAG &DAG) const {
3270   MVT InVT = Op.getOperand(0).getSimpleValueType();
3271   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
3272 
3273   MVT VT = Op.getSimpleValueType();
3274 
3275   SDValue Op1 =
3276       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
3277   SDValue Op2 =
3278       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
3279 
3280   SDLoc DL(Op);
3281   SDValue VL =
3282       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
3283 
3284   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
3285 
3286   bool Invert = false;
3287   Optional<unsigned> LogicOpc;
3288   if (ContainerVT.isFloatingPoint()) {
3289     bool Swap = false;
3290     switch (CC) {
3291     default:
3292       break;
3293     case ISD::SETULE:
3294     case ISD::SETULT:
3295       Swap = true;
3296       LLVM_FALLTHROUGH;
3297     case ISD::SETUGE:
3298     case ISD::SETUGT:
3299       CC = getSetCCInverse(CC, ContainerVT);
3300       Invert = true;
3301       break;
3302     case ISD::SETOGE:
3303     case ISD::SETOGT:
3304     case ISD::SETGE:
3305     case ISD::SETGT:
3306       Swap = true;
3307       break;
3308     case ISD::SETUEQ:
3309       // Use !((OLT Op1, Op2) || (OLT Op2, Op1))
3310       Invert = true;
3311       LogicOpc = RISCVISD::VMOR_VL;
3312       CC = ISD::SETOLT;
3313       break;
3314     case ISD::SETONE:
3315       // Use ((OLT Op1, Op2) || (OLT Op2, Op1))
3316       LogicOpc = RISCVISD::VMOR_VL;
3317       CC = ISD::SETOLT;
3318       break;
3319     case ISD::SETO:
3320       // Use (OEQ Op1, Op1) && (OEQ Op2, Op2)
3321       LogicOpc = RISCVISD::VMAND_VL;
3322       CC = ISD::SETOEQ;
3323       break;
3324     case ISD::SETUO:
3325       // Use (UNE Op1, Op1) || (UNE Op2, Op2)
3326       LogicOpc = RISCVISD::VMOR_VL;
3327       CC = ISD::SETUNE;
3328       break;
3329     }
3330 
3331     if (Swap) {
3332       CC = getSetCCSwappedOperands(CC);
3333       std::swap(Op1, Op2);
3334     }
3335   }
3336 
3337   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3338   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3339 
3340   // There are 3 cases we need to emit.
3341   // 1. For (OEQ Op1, Op1) && (OEQ Op2, Op2) or (UNE Op1, Op1) || (UNE Op2, Op2)
3342   //    we need to compare each operand with itself.
3343   // 2. For (OLT Op1, Op2) || (OLT Op2, Op1) we need to compare Op1 and Op2 in
3344   //    both orders.
3345   // 3. For any other case we just need one compare with Op1 and Op2.
3346   SDValue Cmp;
3347   if (LogicOpc && (CC == ISD::SETOEQ || CC == ISD::SETUNE)) {
3348     Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op1,
3349                       DAG.getCondCode(CC), Mask, VL);
3350     SDValue Cmp2 = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op2, Op2,
3351                                DAG.getCondCode(CC), Mask, VL);
3352     Cmp = DAG.getNode(*LogicOpc, DL, MaskVT, Cmp, Cmp2, VL);
3353   } else {
3354     Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
3355                       DAG.getCondCode(CC), Mask, VL);
3356     if (LogicOpc) {
3357       SDValue Cmp2 = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op2, Op1,
3358                                  DAG.getCondCode(CC), Mask, VL);
3359       Cmp = DAG.getNode(*LogicOpc, DL, MaskVT, Cmp, Cmp2, VL);
3360     }
3361   }
3362 
3363   if (Invert) {
3364     SDValue AllOnes = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3365     Cmp = DAG.getNode(RISCVISD::VMXOR_VL, DL, MaskVT, Cmp, AllOnes, VL);
3366   }
3367 
3368   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
3369 }
3370 
3371 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
3372     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
3373   MVT VT = Op.getSimpleValueType();
3374 
3375   if (VT.getVectorElementType() == MVT::i1)
3376     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
3377 
3378   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
3379 }
3380 
3381 // Lower vector ABS to smax(X, sub(0, X)).
3382 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
3383   SDLoc DL(Op);
3384   MVT VT = Op.getSimpleValueType();
3385   SDValue X = Op.getOperand(0);
3386 
3387   assert(VT.isFixedLengthVector() && "Unexpected type");
3388 
3389   MVT ContainerVT = getContainerForFixedLengthVector(VT);
3390   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
3391 
3392   SDValue Mask, VL;
3393   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3394 
3395   SDValue SplatZero =
3396       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
3397                   DAG.getConstant(0, DL, Subtarget.getXLenVT()));
3398   SDValue NegX =
3399       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
3400   SDValue Max =
3401       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
3402 
3403   return convertFromScalableVector(VT, Max, DAG, Subtarget);
3404 }
3405 
3406 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
3407     SDValue Op, SelectionDAG &DAG) const {
3408   SDLoc DL(Op);
3409   MVT VT = Op.getSimpleValueType();
3410   SDValue Mag = Op.getOperand(0);
3411   SDValue Sign = Op.getOperand(1);
3412   assert(Mag.getValueType() == Sign.getValueType() &&
3413          "Can only handle COPYSIGN with matching types.");
3414 
3415   MVT ContainerVT = getContainerForFixedLengthVector(VT);
3416   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
3417   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
3418 
3419   SDValue Mask, VL;
3420   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3421 
3422   SDValue CopySign =
3423       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
3424 
3425   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
3426 }
3427 
3428 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
3429     SDValue Op, SelectionDAG &DAG) const {
3430   MVT VT = Op.getSimpleValueType();
3431   MVT ContainerVT = getContainerForFixedLengthVector(VT);
3432 
3433   MVT I1ContainerVT =
3434       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3435 
3436   SDValue CC =
3437       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
3438   SDValue Op1 =
3439       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
3440   SDValue Op2 =
3441       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
3442 
3443   SDLoc DL(Op);
3444   SDValue Mask, VL;
3445   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3446 
3447   SDValue Select =
3448       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
3449 
3450   return convertFromScalableVector(VT, Select, DAG, Subtarget);
3451 }
3452 
3453 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
3454                                                unsigned NewOpc,
3455                                                bool HasMask) const {
3456   MVT VT = Op.getSimpleValueType();
3457   assert(useRVVForFixedLengthVectorVT(VT) &&
3458          "Only expected to lower fixed length vector operation!");
3459   MVT ContainerVT = getContainerForFixedLengthVector(VT);
3460 
3461   // Create list of operands by converting existing ones to scalable types.
3462   SmallVector<SDValue, 6> Ops;
3463   for (const SDValue &V : Op->op_values()) {
3464     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
3465 
3466     // Pass through non-vector operands.
3467     if (!V.getValueType().isVector()) {
3468       Ops.push_back(V);
3469       continue;
3470     }
3471 
3472     // "cast" fixed length vector to a scalable vector.
3473     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
3474            "Only fixed length vectors are supported!");
3475     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
3476   }
3477 
3478   SDLoc DL(Op);
3479   SDValue Mask, VL;
3480   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3481   if (HasMask)
3482     Ops.push_back(Mask);
3483   Ops.push_back(VL);
3484 
3485   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
3486   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
3487 }
3488 
3489 // Custom lower MGATHER to a legalized form for RVV. It will then be matched to
3490 // a RVV indexed load. The RVV indexed load/store instructions only support the
3491 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
3492 // truncated to XLEN and are treated as byte offsets. Any signed or scaled
3493 // indexing is extended to the XLEN value type and scaled accordingly.
3494 SDValue RISCVTargetLowering::lowerMGATHERMSCATTER(SDValue Op,
3495                                                   SelectionDAG &DAG) const {
3496   auto *N = cast<MaskedGatherScatterSDNode>(Op.getNode());
3497   SDLoc DL(Op);
3498   SDValue Index = N->getIndex();
3499   SDValue Mask = N->getMask();
3500 
3501   MVT XLenVT = Subtarget.getXLenVT();
3502   assert(N->getBasePtr().getSimpleValueType() == XLenVT &&
3503          "Unexpected pointer type");
3504   // Targets have to explicitly opt-in for extending vector loads and
3505   // truncating vector stores.
3506   const auto *MGN = dyn_cast<MaskedGatherSDNode>(N);
3507   const auto *MSN = dyn_cast<MaskedScatterSDNode>(N);
3508   assert((!MGN || MGN->getExtensionType() == ISD::NON_EXTLOAD) &&
3509          "Unexpected extending MGATHER");
3510   assert((!MSN || !MSN->isTruncatingStore()) &&
3511          "Unexpected extending MSCATTER");
3512 
3513   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
3514   // the selection of the masked intrinsics doesn't do this for us.
3515   unsigned IntID = 0;
3516   MVT IndexVT = Index.getSimpleValueType();
3517   SDValue VL = getDefaultVLOps(IndexVT, IndexVT, DL, DAG, Subtarget).second;
3518   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
3519 
3520   if (IsUnmasked)
3521     IntID = MGN ? Intrinsic::riscv_vloxei : Intrinsic::riscv_vsoxei;
3522   else
3523     IntID = MGN ? Intrinsic::riscv_vloxei_mask : Intrinsic::riscv_vsoxei_mask;
3524   SmallVector<SDValue, 8> Ops{N->getChain(),
3525                               DAG.getTargetConstant(IntID, DL, XLenVT)};
3526   if (MSN)
3527     Ops.push_back(MSN->getValue());
3528   else if (!IsUnmasked)
3529     Ops.push_back(MGN->getPassThru());
3530   Ops.push_back(N->getBasePtr());
3531   Ops.push_back(Index);
3532   if (!IsUnmasked)
3533     Ops.push_back(Mask);
3534   Ops.push_back(VL);
3535   return DAG.getMemIntrinsicNode(
3536       MGN ? ISD::INTRINSIC_W_CHAIN : ISD::INTRINSIC_VOID, DL, N->getVTList(),
3537       Ops, N->getMemoryVT(), N->getMemOperand());
3538 }
3539 
3540 // Returns the opcode of the target-specific SDNode that implements the 32-bit
3541 // form of the given Opcode.
3542 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
3543   switch (Opcode) {
3544   default:
3545     llvm_unreachable("Unexpected opcode");
3546   case ISD::SHL:
3547     return RISCVISD::SLLW;
3548   case ISD::SRA:
3549     return RISCVISD::SRAW;
3550   case ISD::SRL:
3551     return RISCVISD::SRLW;
3552   case ISD::SDIV:
3553     return RISCVISD::DIVW;
3554   case ISD::UDIV:
3555     return RISCVISD::DIVUW;
3556   case ISD::UREM:
3557     return RISCVISD::REMUW;
3558   case ISD::ROTL:
3559     return RISCVISD::ROLW;
3560   case ISD::ROTR:
3561     return RISCVISD::RORW;
3562   case RISCVISD::GREVI:
3563     return RISCVISD::GREVIW;
3564   case RISCVISD::GORCI:
3565     return RISCVISD::GORCIW;
3566   }
3567 }
3568 
3569 // Converts the given 32-bit operation to a target-specific SelectionDAG node.
3570 // Because i32 isn't a legal type for RV64, these operations would otherwise
3571 // be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W
3572 // later one because the fact the operation was originally of type i32 is
3573 // lost.
3574 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
3575                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
3576   SDLoc DL(N);
3577   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
3578   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
3579   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
3580   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
3581   // ReplaceNodeResults requires we maintain the same type for the return value.
3582   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
3583 }
3584 
3585 // Converts the given 32-bit operation to a i64 operation with signed extension
3586 // semantic to reduce the signed extension instructions.
3587 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
3588   SDLoc DL(N);
3589   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
3590   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
3591   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
3592   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
3593                                DAG.getValueType(MVT::i32));
3594   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
3595 }
3596 
3597 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
3598                                              SmallVectorImpl<SDValue> &Results,
3599                                              SelectionDAG &DAG) const {
3600   SDLoc DL(N);
3601   switch (N->getOpcode()) {
3602   default:
3603     llvm_unreachable("Don't know how to custom type legalize this operation!");
3604   case ISD::STRICT_FP_TO_SINT:
3605   case ISD::STRICT_FP_TO_UINT:
3606   case ISD::FP_TO_SINT:
3607   case ISD::FP_TO_UINT: {
3608     bool IsStrict = N->isStrictFPOpcode();
3609     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
3610            "Unexpected custom legalisation");
3611     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
3612     // If the FP type needs to be softened, emit a library call using the 'si'
3613     // version. If we left it to default legalization we'd end up with 'di'. If
3614     // the FP type doesn't need to be softened just let generic type
3615     // legalization promote the result type.
3616     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
3617         TargetLowering::TypeSoftenFloat)
3618       return;
3619     RTLIB::Libcall LC;
3620     if (N->getOpcode() == ISD::FP_TO_SINT ||
3621         N->getOpcode() == ISD::STRICT_FP_TO_SINT)
3622       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
3623     else
3624       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
3625     MakeLibCallOptions CallOptions;
3626     EVT OpVT = Op0.getValueType();
3627     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
3628     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
3629     SDValue Result;
3630     std::tie(Result, Chain) =
3631         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
3632     Results.push_back(Result);
3633     if (IsStrict)
3634       Results.push_back(Chain);
3635     break;
3636   }
3637   case ISD::READCYCLECOUNTER: {
3638     assert(!Subtarget.is64Bit() &&
3639            "READCYCLECOUNTER only has custom type legalization on riscv32");
3640 
3641     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
3642     SDValue RCW =
3643         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
3644 
3645     Results.push_back(
3646         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
3647     Results.push_back(RCW.getValue(2));
3648     break;
3649   }
3650   case ISD::ADD:
3651   case ISD::SUB:
3652   case ISD::MUL:
3653     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
3654            "Unexpected custom legalisation");
3655     if (N->getOperand(1).getOpcode() == ISD::Constant)
3656       return;
3657     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
3658     break;
3659   case ISD::SHL:
3660   case ISD::SRA:
3661   case ISD::SRL:
3662     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
3663            "Unexpected custom legalisation");
3664     if (N->getOperand(1).getOpcode() == ISD::Constant)
3665       return;
3666     Results.push_back(customLegalizeToWOp(N, DAG));
3667     break;
3668   case ISD::ROTL:
3669   case ISD::ROTR:
3670     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
3671            "Unexpected custom legalisation");
3672     Results.push_back(customLegalizeToWOp(N, DAG));
3673     break;
3674   case ISD::SDIV:
3675   case ISD::UDIV:
3676   case ISD::UREM: {
3677     MVT VT = N->getSimpleValueType(0);
3678     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
3679            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
3680            "Unexpected custom legalisation");
3681     if (N->getOperand(0).getOpcode() == ISD::Constant ||
3682         N->getOperand(1).getOpcode() == ISD::Constant)
3683       return;
3684 
3685     // If the input is i32, use ANY_EXTEND since the W instructions don't read
3686     // the upper 32 bits. For other types we need to sign or zero extend
3687     // based on the opcode.
3688     unsigned ExtOpc = ISD::ANY_EXTEND;
3689     if (VT != MVT::i32)
3690       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
3691                                            : ISD::ZERO_EXTEND;
3692 
3693     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
3694     break;
3695   }
3696   case ISD::UADDO:
3697   case ISD::USUBO: {
3698     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
3699            "Unexpected custom legalisation");
3700     bool IsAdd = N->getOpcode() == ISD::UADDO;
3701     SDLoc DL(N);
3702     // Create an ADDW or SUBW.
3703     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
3704     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
3705     SDValue Res =
3706         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
3707     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
3708                       DAG.getValueType(MVT::i32));
3709 
3710     // Sign extend the LHS and perform an unsigned compare with the ADDW result.
3711     // Since the inputs are sign extended from i32, this is equivalent to
3712     // comparing the lower 32 bits.
3713     LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
3714     SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
3715                                     IsAdd ? ISD::SETULT : ISD::SETUGT);
3716 
3717     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
3718     Results.push_back(Overflow);
3719     return;
3720   }
3721   case ISD::UADDSAT:
3722   case ISD::USUBSAT: {
3723     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
3724            "Unexpected custom legalisation");
3725     SDLoc DL(N);
3726     if (Subtarget.hasStdExtZbb()) {
3727       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
3728       // sign extend allows overflow of the lower 32 bits to be detected on
3729       // the promoted size.
3730       SDValue LHS =
3731           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
3732       SDValue RHS =
3733           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
3734       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
3735       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
3736       return;
3737     }
3738 
3739     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
3740     // promotion for UADDO/USUBO.
3741     Results.push_back(expandAddSubSat(N, DAG));
3742     return;
3743   }
3744   case ISD::BITCAST: {
3745     assert(((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
3746              Subtarget.hasStdExtF()) ||
3747             (N->getValueType(0) == MVT::i16 && Subtarget.hasStdExtZfh())) &&
3748            "Unexpected custom legalisation");
3749     SDValue Op0 = N->getOperand(0);
3750     if (N->getValueType(0) == MVT::i16 && Subtarget.hasStdExtZfh()) {
3751       if (Op0.getValueType() != MVT::f16)
3752         return;
3753       SDValue FPConv =
3754           DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, Subtarget.getXLenVT(), Op0);
3755       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
3756     } else if (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
3757                Subtarget.hasStdExtF()) {
3758       if (Op0.getValueType() != MVT::f32)
3759         return;
3760       SDValue FPConv =
3761           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
3762       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
3763     }
3764     break;
3765   }
3766   case RISCVISD::GREVI:
3767   case RISCVISD::GORCI: {
3768     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
3769            "Unexpected custom legalisation");
3770     // This is similar to customLegalizeToWOp, except that we pass the second
3771     // operand (a TargetConstant) straight through: it is already of type
3772     // XLenVT.
3773     SDLoc DL(N);
3774     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
3775     SDValue NewOp0 =
3776         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
3777     SDValue NewRes =
3778         DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, N->getOperand(1));
3779     // ReplaceNodeResults requires we maintain the same type for the return
3780     // value.
3781     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
3782     break;
3783   }
3784   case RISCVISD::SHFLI: {
3785     // There is no SHFLIW instruction, but we can just promote the operation.
3786     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
3787            "Unexpected custom legalisation");
3788     SDLoc DL(N);
3789     SDValue NewOp0 =
3790         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
3791     SDValue NewRes =
3792         DAG.getNode(RISCVISD::SHFLI, DL, MVT::i64, NewOp0, N->getOperand(1));
3793     // ReplaceNodeResults requires we maintain the same type for the return
3794     // value.
3795     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
3796     break;
3797   }
3798   case ISD::BSWAP:
3799   case ISD::BITREVERSE: {
3800     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
3801            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
3802     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
3803                                  N->getOperand(0));
3804     unsigned Imm = N->getOpcode() == ISD::BITREVERSE ? 31 : 24;
3805     SDValue GREVIW = DAG.getNode(RISCVISD::GREVIW, DL, MVT::i64, NewOp0,
3806                                  DAG.getTargetConstant(Imm, DL,
3807                                                        Subtarget.getXLenVT()));
3808     // ReplaceNodeResults requires we maintain the same type for the return
3809     // value.
3810     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, GREVIW));
3811     break;
3812   }
3813   case ISD::FSHL:
3814   case ISD::FSHR: {
3815     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
3816            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
3817     SDValue NewOp0 =
3818         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
3819     SDValue NewOp1 =
3820         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
3821     SDValue NewOp2 =
3822         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
3823     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
3824     // Mask the shift amount to 5 bits.
3825     NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
3826                          DAG.getConstant(0x1f, DL, MVT::i64));
3827     unsigned Opc =
3828         N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW;
3829     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2);
3830     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
3831     break;
3832   }
3833   case ISD::EXTRACT_VECTOR_ELT: {
3834     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
3835     // type is illegal (currently only vXi64 RV32).
3836     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
3837     // transferred to the destination register. We issue two of these from the
3838     // upper- and lower- halves of the SEW-bit vector element, slid down to the
3839     // first element.
3840     SDLoc DL(N);
3841     SDValue Vec = N->getOperand(0);
3842     SDValue Idx = N->getOperand(1);
3843 
3844     // The vector type hasn't been legalized yet so we can't issue target
3845     // specific nodes if it needs legalization.
3846     // FIXME: We would manually legalize if it's important.
3847     if (!isTypeLegal(Vec.getValueType()))
3848       return;
3849 
3850     MVT VecVT = Vec.getSimpleValueType();
3851 
3852     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
3853            VecVT.getVectorElementType() == MVT::i64 &&
3854            "Unexpected EXTRACT_VECTOR_ELT legalization");
3855 
3856     // If this is a fixed vector, we need to convert it to a scalable vector.
3857     MVT ContainerVT = VecVT;
3858     if (VecVT.isFixedLengthVector()) {
3859       ContainerVT = getContainerForFixedLengthVector(VecVT);
3860       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3861     }
3862 
3863     MVT XLenVT = Subtarget.getXLenVT();
3864 
3865     // Use a VL of 1 to avoid processing more elements than we need.
3866     MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
3867     SDValue VL = DAG.getConstant(1, DL, XLenVT);
3868     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3869 
3870     // Unless the index is known to be 0, we must slide the vector down to get
3871     // the desired element into index 0.
3872     if (!isNullConstant(Idx)) {
3873       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
3874                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
3875     }
3876 
3877     // Extract the lower XLEN bits of the correct vector element.
3878     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
3879 
3880     // To extract the upper XLEN bits of the vector element, shift the first
3881     // element right by 32 bits and re-extract the lower XLEN bits.
3882     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
3883                                      DAG.getConstant(32, DL, XLenVT), VL);
3884     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
3885                                  ThirtyTwoV, Mask, VL);
3886 
3887     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
3888 
3889     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
3890     break;
3891   }
3892   case ISD::INTRINSIC_WO_CHAIN: {
3893     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3894     switch (IntNo) {
3895     default:
3896       llvm_unreachable(
3897           "Don't know how to custom type legalize this intrinsic!");
3898     case Intrinsic::riscv_vmv_x_s: {
3899       EVT VT = N->getValueType(0);
3900       MVT XLenVT = Subtarget.getXLenVT();
3901       if (VT.bitsLT(XLenVT)) {
3902         // Simple case just extract using vmv.x.s and truncate.
3903         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
3904                                       Subtarget.getXLenVT(), N->getOperand(1));
3905         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
3906         return;
3907       }
3908 
3909       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
3910              "Unexpected custom legalization");
3911 
3912       // We need to do the move in two steps.
3913       SDValue Vec = N->getOperand(1);
3914       MVT VecVT = Vec.getSimpleValueType();
3915 
3916       // First extract the lower XLEN bits of the element.
3917       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
3918 
3919       // To extract the upper XLEN bits of the vector element, shift the first
3920       // element right by 32 bits and re-extract the lower XLEN bits.
3921       SDValue VL = DAG.getConstant(1, DL, XLenVT);
3922       MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
3923       SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3924       SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT,
3925                                        DAG.getConstant(32, DL, XLenVT), VL);
3926       SDValue LShr32 =
3927           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
3928       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
3929 
3930       Results.push_back(
3931           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
3932       break;
3933     }
3934     }
3935     break;
3936   }
3937   case ISD::VECREDUCE_ADD:
3938   case ISD::VECREDUCE_AND:
3939   case ISD::VECREDUCE_OR:
3940   case ISD::VECREDUCE_XOR:
3941   case ISD::VECREDUCE_SMAX:
3942   case ISD::VECREDUCE_UMAX:
3943   case ISD::VECREDUCE_SMIN:
3944   case ISD::VECREDUCE_UMIN:
3945     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
3946       Results.push_back(V);
3947     break;
3948   }
3949 }
3950 
3951 // A structure to hold one of the bit-manipulation patterns below. Together, a
3952 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
3953 //   (or (and (shl x, 1), 0xAAAAAAAA),
3954 //       (and (srl x, 1), 0x55555555))
3955 struct RISCVBitmanipPat {
3956   SDValue Op;
3957   unsigned ShAmt;
3958   bool IsSHL;
3959 
3960   bool formsPairWith(const RISCVBitmanipPat &Other) const {
3961     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
3962   }
3963 };
3964 
3965 // Matches patterns of the form
3966 //   (and (shl x, C2), (C1 << C2))
3967 //   (and (srl x, C2), C1)
3968 //   (shl (and x, C1), C2)
3969 //   (srl (and x, (C1 << C2)), C2)
3970 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
3971 // The expected masks for each shift amount are specified in BitmanipMasks where
3972 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
3973 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
3974 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
3975 // XLen is 64.
3976 static Optional<RISCVBitmanipPat>
3977 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
3978   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
3979          "Unexpected number of masks");
3980   Optional<uint64_t> Mask;
3981   // Optionally consume a mask around the shift operation.
3982   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
3983     Mask = Op.getConstantOperandVal(1);
3984     Op = Op.getOperand(0);
3985   }
3986   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
3987     return None;
3988   bool IsSHL = Op.getOpcode() == ISD::SHL;
3989 
3990   if (!isa<ConstantSDNode>(Op.getOperand(1)))
3991     return None;
3992   uint64_t ShAmt = Op.getConstantOperandVal(1);
3993 
3994   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
3995   if (ShAmt >= Width && !isPowerOf2_64(ShAmt))
3996     return None;
3997   // If we don't have enough masks for 64 bit, then we must be trying to
3998   // match SHFL so we're only allowed to shift 1/4 of the width.
3999   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
4000     return None;
4001 
4002   SDValue Src = Op.getOperand(0);
4003 
4004   // The expected mask is shifted left when the AND is found around SHL
4005   // patterns.
4006   //   ((x >> 1) & 0x55555555)
4007   //   ((x << 1) & 0xAAAAAAAA)
4008   bool SHLExpMask = IsSHL;
4009 
4010   if (!Mask) {
4011     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
4012     // the mask is all ones: consume that now.
4013     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
4014       Mask = Src.getConstantOperandVal(1);
4015       Src = Src.getOperand(0);
4016       // The expected mask is now in fact shifted left for SRL, so reverse the
4017       // decision.
4018       //   ((x & 0xAAAAAAAA) >> 1)
4019       //   ((x & 0x55555555) << 1)
4020       SHLExpMask = !SHLExpMask;
4021     } else {
4022       // Use a default shifted mask of all-ones if there's no AND, truncated
4023       // down to the expected width. This simplifies the logic later on.
4024       Mask = maskTrailingOnes<uint64_t>(Width);
4025       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
4026     }
4027   }
4028 
4029   unsigned MaskIdx = Log2_32(ShAmt);
4030   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
4031 
4032   if (SHLExpMask)
4033     ExpMask <<= ShAmt;
4034 
4035   if (Mask != ExpMask)
4036     return None;
4037 
4038   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
4039 }
4040 
4041 // Matches any of the following bit-manipulation patterns:
4042 //   (and (shl x, 1), (0x55555555 << 1))
4043 //   (and (srl x, 1), 0x55555555)
4044 //   (shl (and x, 0x55555555), 1)
4045 //   (srl (and x, (0x55555555 << 1)), 1)
4046 // where the shift amount and mask may vary thus:
4047 //   [1]  = 0x55555555 / 0xAAAAAAAA
4048 //   [2]  = 0x33333333 / 0xCCCCCCCC
4049 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
4050 //   [8]  = 0x00FF00FF / 0xFF00FF00
4051 //   [16] = 0x0000FFFF / 0xFFFFFFFF
4052 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
4053 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
4054   // These are the unshifted masks which we use to match bit-manipulation
4055   // patterns. They may be shifted left in certain circumstances.
4056   static const uint64_t BitmanipMasks[] = {
4057       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
4058       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
4059 
4060   return matchRISCVBitmanipPat(Op, BitmanipMasks);
4061 }
4062 
4063 // Match the following pattern as a GREVI(W) operation
4064 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
4065 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
4066                                const RISCVSubtarget &Subtarget) {
4067   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
4068   EVT VT = Op.getValueType();
4069 
4070   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
4071     auto LHS = matchGREVIPat(Op.getOperand(0));
4072     auto RHS = matchGREVIPat(Op.getOperand(1));
4073     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
4074       SDLoc DL(Op);
4075       return DAG.getNode(
4076           RISCVISD::GREVI, DL, VT, LHS->Op,
4077           DAG.getTargetConstant(LHS->ShAmt, DL, Subtarget.getXLenVT()));
4078     }
4079   }
4080   return SDValue();
4081 }
4082 
4083 // Matches any the following pattern as a GORCI(W) operation
4084 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
4085 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
4086 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
4087 // Note that with the variant of 3.,
4088 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
4089 // the inner pattern will first be matched as GREVI and then the outer
4090 // pattern will be matched to GORC via the first rule above.
4091 // 4.  (or (rotl/rotr x, bitwidth/2), x)
4092 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
4093                                const RISCVSubtarget &Subtarget) {
4094   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
4095   EVT VT = Op.getValueType();
4096 
4097   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
4098     SDLoc DL(Op);
4099     SDValue Op0 = Op.getOperand(0);
4100     SDValue Op1 = Op.getOperand(1);
4101 
4102     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
4103       if (Reverse.getOpcode() == RISCVISD::GREVI && Reverse.getOperand(0) == X &&
4104           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
4105         return DAG.getNode(RISCVISD::GORCI, DL, VT, X, Reverse.getOperand(1));
4106       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
4107       if ((Reverse.getOpcode() == ISD::ROTL ||
4108            Reverse.getOpcode() == ISD::ROTR) &&
4109           Reverse.getOperand(0) == X &&
4110           isa<ConstantSDNode>(Reverse.getOperand(1))) {
4111         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
4112         if (RotAmt == (VT.getSizeInBits() / 2))
4113           return DAG.getNode(
4114               RISCVISD::GORCI, DL, VT, X,
4115               DAG.getTargetConstant(RotAmt, DL, Subtarget.getXLenVT()));
4116       }
4117       return SDValue();
4118     };
4119 
4120     // Check for either commutable permutation of (or (GREVI x, shamt), x)
4121     if (SDValue V = MatchOROfReverse(Op0, Op1))
4122       return V;
4123     if (SDValue V = MatchOROfReverse(Op1, Op0))
4124       return V;
4125 
4126     // OR is commutable so canonicalize its OR operand to the left
4127     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
4128       std::swap(Op0, Op1);
4129     if (Op0.getOpcode() != ISD::OR)
4130       return SDValue();
4131     SDValue OrOp0 = Op0.getOperand(0);
4132     SDValue OrOp1 = Op0.getOperand(1);
4133     auto LHS = matchGREVIPat(OrOp0);
4134     // OR is commutable so swap the operands and try again: x might have been
4135     // on the left
4136     if (!LHS) {
4137       std::swap(OrOp0, OrOp1);
4138       LHS = matchGREVIPat(OrOp0);
4139     }
4140     auto RHS = matchGREVIPat(Op1);
4141     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
4142       return DAG.getNode(
4143           RISCVISD::GORCI, DL, VT, LHS->Op,
4144           DAG.getTargetConstant(LHS->ShAmt, DL, Subtarget.getXLenVT()));
4145     }
4146   }
4147   return SDValue();
4148 }
4149 
4150 // Matches any of the following bit-manipulation patterns:
4151 //   (and (shl x, 1), (0x22222222 << 1))
4152 //   (and (srl x, 1), 0x22222222)
4153 //   (shl (and x, 0x22222222), 1)
4154 //   (srl (and x, (0x22222222 << 1)), 1)
4155 // where the shift amount and mask may vary thus:
4156 //   [1]  = 0x22222222 / 0x44444444
4157 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
4158 //   [4]  = 0x00F000F0 / 0x0F000F00
4159 //   [8]  = 0x0000FF00 / 0x00FF0000
4160 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
4161 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
4162   // These are the unshifted masks which we use to match bit-manipulation
4163   // patterns. They may be shifted left in certain circumstances.
4164   static const uint64_t BitmanipMasks[] = {
4165       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
4166       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
4167 
4168   return matchRISCVBitmanipPat(Op, BitmanipMasks);
4169 }
4170 
4171 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
4172 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
4173                                const RISCVSubtarget &Subtarget) {
4174   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
4175   EVT VT = Op.getValueType();
4176 
4177   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
4178     return SDValue();
4179 
4180   SDValue Op0 = Op.getOperand(0);
4181   SDValue Op1 = Op.getOperand(1);
4182 
4183   // Or is commutable so canonicalize the second OR to the LHS.
4184   if (Op0.getOpcode() != ISD::OR)
4185     std::swap(Op0, Op1);
4186   if (Op0.getOpcode() != ISD::OR)
4187     return SDValue();
4188 
4189   // We found an inner OR, so our operands are the operands of the inner OR
4190   // and the other operand of the outer OR.
4191   SDValue A = Op0.getOperand(0);
4192   SDValue B = Op0.getOperand(1);
4193   SDValue C = Op1;
4194 
4195   auto Match1 = matchSHFLPat(A);
4196   auto Match2 = matchSHFLPat(B);
4197 
4198   // If neither matched, we failed.
4199   if (!Match1 && !Match2)
4200     return SDValue();
4201 
4202   // We had at least one match. if one failed, try the remaining C operand.
4203   if (!Match1) {
4204     std::swap(A, C);
4205     Match1 = matchSHFLPat(A);
4206     if (!Match1)
4207       return SDValue();
4208   } else if (!Match2) {
4209     std::swap(B, C);
4210     Match2 = matchSHFLPat(B);
4211     if (!Match2)
4212       return SDValue();
4213   }
4214   assert(Match1 && Match2);
4215 
4216   // Make sure our matches pair up.
4217   if (!Match1->formsPairWith(*Match2))
4218     return SDValue();
4219 
4220   // All the remains is to make sure C is an AND with the same input, that masks
4221   // out the bits that are being shuffled.
4222   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
4223       C.getOperand(0) != Match1->Op)
4224     return SDValue();
4225 
4226   uint64_t Mask = C.getConstantOperandVal(1);
4227 
4228   static const uint64_t BitmanipMasks[] = {
4229       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
4230       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
4231   };
4232 
4233   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
4234   unsigned MaskIdx = Log2_32(Match1->ShAmt);
4235   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
4236 
4237   if (Mask != ExpMask)
4238     return SDValue();
4239 
4240   SDLoc DL(Op);
4241   return DAG.getNode(
4242       RISCVISD::SHFLI, DL, VT, Match1->Op,
4243       DAG.getTargetConstant(Match1->ShAmt, DL, Subtarget.getXLenVT()));
4244 }
4245 
4246 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
4247 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
4248 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
4249 // not undo itself, but they are redundant.
4250 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
4251   unsigned ShAmt1 = N->getConstantOperandVal(1);
4252   SDValue Src = N->getOperand(0);
4253 
4254   if (Src.getOpcode() != N->getOpcode())
4255     return SDValue();
4256 
4257   unsigned ShAmt2 = Src.getConstantOperandVal(1);
4258   Src = Src.getOperand(0);
4259 
4260   unsigned CombinedShAmt;
4261   if (N->getOpcode() == RISCVISD::GORCI || N->getOpcode() == RISCVISD::GORCIW)
4262     CombinedShAmt = ShAmt1 | ShAmt2;
4263   else
4264     CombinedShAmt = ShAmt1 ^ ShAmt2;
4265 
4266   if (CombinedShAmt == 0)
4267     return Src;
4268 
4269   SDLoc DL(N);
4270   return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), Src,
4271                      DAG.getTargetConstant(CombinedShAmt, DL,
4272                                            N->getOperand(1).getValueType()));
4273 }
4274 
4275 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
4276                                                DAGCombinerInfo &DCI) const {
4277   SelectionDAG &DAG = DCI.DAG;
4278 
4279   switch (N->getOpcode()) {
4280   default:
4281     break;
4282   case RISCVISD::SplitF64: {
4283     SDValue Op0 = N->getOperand(0);
4284     // If the input to SplitF64 is just BuildPairF64 then the operation is
4285     // redundant. Instead, use BuildPairF64's operands directly.
4286     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
4287       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
4288 
4289     SDLoc DL(N);
4290 
4291     // It's cheaper to materialise two 32-bit integers than to load a double
4292     // from the constant pool and transfer it to integer registers through the
4293     // stack.
4294     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
4295       APInt V = C->getValueAPF().bitcastToAPInt();
4296       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
4297       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
4298       return DCI.CombineTo(N, Lo, Hi);
4299     }
4300 
4301     // This is a target-specific version of a DAGCombine performed in
4302     // DAGCombiner::visitBITCAST. It performs the equivalent of:
4303     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
4304     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
4305     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
4306         !Op0.getNode()->hasOneUse())
4307       break;
4308     SDValue NewSplitF64 =
4309         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
4310                     Op0.getOperand(0));
4311     SDValue Lo = NewSplitF64.getValue(0);
4312     SDValue Hi = NewSplitF64.getValue(1);
4313     APInt SignBit = APInt::getSignMask(32);
4314     if (Op0.getOpcode() == ISD::FNEG) {
4315       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
4316                                   DAG.getConstant(SignBit, DL, MVT::i32));
4317       return DCI.CombineTo(N, Lo, NewHi);
4318     }
4319     assert(Op0.getOpcode() == ISD::FABS);
4320     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
4321                                 DAG.getConstant(~SignBit, DL, MVT::i32));
4322     return DCI.CombineTo(N, Lo, NewHi);
4323   }
4324   case RISCVISD::SLLW:
4325   case RISCVISD::SRAW:
4326   case RISCVISD::SRLW:
4327   case RISCVISD::ROLW:
4328   case RISCVISD::RORW: {
4329     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
4330     SDValue LHS = N->getOperand(0);
4331     SDValue RHS = N->getOperand(1);
4332     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
4333     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
4334     if (SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI) ||
4335         SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)) {
4336       if (N->getOpcode() != ISD::DELETED_NODE)
4337         DCI.AddToWorklist(N);
4338       return SDValue(N, 0);
4339     }
4340     break;
4341   }
4342   case RISCVISD::FSL:
4343   case RISCVISD::FSR: {
4344     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
4345     SDValue ShAmt = N->getOperand(2);
4346     unsigned BitWidth = ShAmt.getValueSizeInBits();
4347     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
4348     APInt ShAmtMask(BitWidth, (BitWidth * 2) - 1);
4349     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
4350       if (N->getOpcode() != ISD::DELETED_NODE)
4351         DCI.AddToWorklist(N);
4352       return SDValue(N, 0);
4353     }
4354     break;
4355   }
4356   case RISCVISD::FSLW:
4357   case RISCVISD::FSRW: {
4358     // Only the lower 32 bits of Values and lower 6 bits of shift amount are
4359     // read.
4360     SDValue Op0 = N->getOperand(0);
4361     SDValue Op1 = N->getOperand(1);
4362     SDValue ShAmt = N->getOperand(2);
4363     APInt OpMask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
4364     APInt ShAmtMask = APInt::getLowBitsSet(ShAmt.getValueSizeInBits(), 6);
4365     if (SimplifyDemandedBits(Op0, OpMask, DCI) ||
4366         SimplifyDemandedBits(Op1, OpMask, DCI) ||
4367         SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
4368       if (N->getOpcode() != ISD::DELETED_NODE)
4369         DCI.AddToWorklist(N);
4370       return SDValue(N, 0);
4371     }
4372     break;
4373   }
4374   case RISCVISD::GREVIW:
4375   case RISCVISD::GORCIW: {
4376     // Only the lower 32 bits of the first operand are read
4377     SDValue Op0 = N->getOperand(0);
4378     APInt Mask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
4379     if (SimplifyDemandedBits(Op0, Mask, DCI)) {
4380       if (N->getOpcode() != ISD::DELETED_NODE)
4381         DCI.AddToWorklist(N);
4382       return SDValue(N, 0);
4383     }
4384 
4385     return combineGREVI_GORCI(N, DCI.DAG);
4386   }
4387   case RISCVISD::FMV_X_ANYEXTW_RV64: {
4388     SDLoc DL(N);
4389     SDValue Op0 = N->getOperand(0);
4390     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
4391     // conversion is unnecessary and can be replaced with an ANY_EXTEND
4392     // of the FMV_W_X_RV64 operand.
4393     if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) {
4394       assert(Op0.getOperand(0).getValueType() == MVT::i64 &&
4395              "Unexpected value type!");
4396       return Op0.getOperand(0);
4397     }
4398 
4399     // This is a target-specific version of a DAGCombine performed in
4400     // DAGCombiner::visitBITCAST. It performs the equivalent of:
4401     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
4402     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
4403     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
4404         !Op0.getNode()->hasOneUse())
4405       break;
4406     SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64,
4407                                  Op0.getOperand(0));
4408     APInt SignBit = APInt::getSignMask(32).sext(64);
4409     if (Op0.getOpcode() == ISD::FNEG)
4410       return DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV,
4411                          DAG.getConstant(SignBit, DL, MVT::i64));
4412 
4413     assert(Op0.getOpcode() == ISD::FABS);
4414     return DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV,
4415                        DAG.getConstant(~SignBit, DL, MVT::i64));
4416   }
4417   case RISCVISD::GREVI:
4418   case RISCVISD::GORCI:
4419     return combineGREVI_GORCI(N, DCI.DAG);
4420   case ISD::OR:
4421     if (auto GREV = combineORToGREV(SDValue(N, 0), DCI.DAG, Subtarget))
4422       return GREV;
4423     if (auto GORC = combineORToGORC(SDValue(N, 0), DCI.DAG, Subtarget))
4424       return GORC;
4425     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DCI.DAG, Subtarget))
4426       return SHFL;
4427     break;
4428   case RISCVISD::SELECT_CC: {
4429     // Transform
4430     SDValue LHS = N->getOperand(0);
4431     SDValue RHS = N->getOperand(1);
4432     auto CCVal = static_cast<ISD::CondCode>(N->getConstantOperandVal(2));
4433     if (!ISD::isIntEqualitySetCC(CCVal))
4434       break;
4435 
4436     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
4437     //      (select_cc X, Y, lt, trueV, falseV)
4438     // Sometimes the setcc is introduced after select_cc has been formed.
4439     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
4440         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
4441       // If we're looking for eq 0 instead of ne 0, we need to invert the
4442       // condition.
4443       bool Invert = CCVal == ISD::SETEQ;
4444       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
4445       if (Invert)
4446         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
4447 
4448       SDLoc DL(N);
4449       RHS = LHS.getOperand(1);
4450       LHS = LHS.getOperand(0);
4451       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
4452 
4453       SDValue TargetCC = DAG.getConstant(CCVal, DL, Subtarget.getXLenVT());
4454       return DAG.getNode(
4455           RISCVISD::SELECT_CC, DL, N->getValueType(0),
4456           {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)});
4457     }
4458 
4459     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
4460     //      (select_cc X, Y, eq/ne, trueV, falseV)
4461     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
4462       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
4463                          {LHS.getOperand(0), LHS.getOperand(1),
4464                           N->getOperand(2), N->getOperand(3),
4465                           N->getOperand(4)});
4466     // (select_cc X, 1, setne, trueV, falseV) ->
4467     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
4468     // This can occur when legalizing some floating point comparisons.
4469     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
4470     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
4471       SDLoc DL(N);
4472       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
4473       SDValue TargetCC = DAG.getConstant(CCVal, DL, Subtarget.getXLenVT());
4474       RHS = DAG.getConstant(0, DL, LHS.getValueType());
4475       return DAG.getNode(
4476           RISCVISD::SELECT_CC, DL, N->getValueType(0),
4477           {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)});
4478     }
4479 
4480     break;
4481   }
4482   case RISCVISD::BR_CC: {
4483     SDValue LHS = N->getOperand(1);
4484     SDValue RHS = N->getOperand(2);
4485     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
4486     if (!ISD::isIntEqualitySetCC(CCVal))
4487       break;
4488 
4489     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
4490     //      (br_cc X, Y, lt, dest)
4491     // Sometimes the setcc is introduced after br_cc has been formed.
4492     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
4493         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
4494       // If we're looking for eq 0 instead of ne 0, we need to invert the
4495       // condition.
4496       bool Invert = CCVal == ISD::SETEQ;
4497       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
4498       if (Invert)
4499         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
4500 
4501       SDLoc DL(N);
4502       RHS = LHS.getOperand(1);
4503       LHS = LHS.getOperand(0);
4504       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
4505 
4506       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
4507                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
4508                          N->getOperand(4));
4509     }
4510 
4511     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
4512     //      (br_cc X, Y, eq/ne, trueV, falseV)
4513     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
4514       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
4515                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
4516                          N->getOperand(3), N->getOperand(4));
4517 
4518     // (br_cc X, 1, setne, br_cc) ->
4519     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
4520     // This can occur when legalizing some floating point comparisons.
4521     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
4522     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
4523       SDLoc DL(N);
4524       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
4525       SDValue TargetCC = DAG.getCondCode(CCVal);
4526       RHS = DAG.getConstant(0, DL, LHS.getValueType());
4527       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
4528                          N->getOperand(0), LHS, RHS, TargetCC,
4529                          N->getOperand(4));
4530     }
4531     break;
4532   }
4533   case ISD::FCOPYSIGN: {
4534     EVT VT = N->getValueType(0);
4535     if (!VT.isVector())
4536       break;
4537     // There is a form of VFSGNJ which injects the negated sign of its second
4538     // operand. Try and bubble any FNEG up after the extend/round to produce
4539     // this optimized pattern. Avoid modifying cases where FP_ROUND and
4540     // TRUNC=1.
4541     SDValue In2 = N->getOperand(1);
4542     // Avoid cases where the extend/round has multiple uses, as duplicating
4543     // those is typically more expensive than removing a fneg.
4544     if (!In2.hasOneUse())
4545       break;
4546     if (In2.getOpcode() != ISD::FP_EXTEND &&
4547         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
4548       break;
4549     In2 = In2.getOperand(0);
4550     if (In2.getOpcode() != ISD::FNEG)
4551       break;
4552     SDLoc DL(N);
4553     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
4554     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
4555                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
4556   }
4557   case ISD::MGATHER:
4558   case ISD::MSCATTER: {
4559     if (!DCI.isBeforeLegalize())
4560       break;
4561     MaskedGatherScatterSDNode *MGSN = cast<MaskedGatherScatterSDNode>(N);
4562     SDValue Index = MGSN->getIndex();
4563     EVT IndexVT = Index.getValueType();
4564     MVT XLenVT = Subtarget.getXLenVT();
4565     // RISCV indexed loads only support the "unsigned unscaled" addressing
4566     // mode, so anything else must be manually legalized.
4567     bool NeedsIdxLegalization = MGSN->isIndexScaled() ||
4568                                 (MGSN->isIndexSigned() &&
4569                                  IndexVT.getVectorElementType().bitsLT(XLenVT));
4570     if (!NeedsIdxLegalization)
4571       break;
4572 
4573     SDLoc DL(N);
4574 
4575     // Any index legalization should first promote to XLenVT, so we don't lose
4576     // bits when scaling. This may create an illegal index type so we let
4577     // LLVM's legalization take care of the splitting.
4578     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
4579       IndexVT = IndexVT.changeVectorElementType(XLenVT);
4580       Index = DAG.getNode(MGSN->isIndexSigned() ? ISD::SIGN_EXTEND
4581                                                 : ISD::ZERO_EXTEND,
4582                           DL, IndexVT, Index);
4583     }
4584 
4585     unsigned Scale = N->getConstantOperandVal(5);
4586     if (MGSN->isIndexScaled() && Scale != 1) {
4587       // Manually scale the indices by the element size.
4588       // TODO: Sanitize the scale operand here?
4589       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
4590       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
4591       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
4592     }
4593 
4594     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
4595     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N)) {
4596       return DAG.getMaskedGather(
4597           N->getVTList(), MGSN->getMemoryVT(), DL,
4598           {MGSN->getChain(), MGN->getPassThru(), MGSN->getMask(),
4599            MGSN->getBasePtr(), Index, MGN->getScale()},
4600           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
4601     }
4602     const auto *MSN = cast<MaskedScatterSDNode>(N);
4603     return DAG.getMaskedScatter(
4604         N->getVTList(), MGSN->getMemoryVT(), DL,
4605         {MGSN->getChain(), MSN->getValue(), MGSN->getMask(), MGSN->getBasePtr(),
4606          Index, MGSN->getScale()},
4607         MGSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
4608   }
4609   }
4610 
4611   return SDValue();
4612 }
4613 
4614 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
4615     const SDNode *N, CombineLevel Level) const {
4616   // The following folds are only desirable if `(OP _, c1 << c2)` can be
4617   // materialised in fewer instructions than `(OP _, c1)`:
4618   //
4619   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
4620   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
4621   SDValue N0 = N->getOperand(0);
4622   EVT Ty = N0.getValueType();
4623   if (Ty.isScalarInteger() &&
4624       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
4625     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
4626     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
4627     if (C1 && C2) {
4628       const APInt &C1Int = C1->getAPIntValue();
4629       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
4630 
4631       // We can materialise `c1 << c2` into an add immediate, so it's "free",
4632       // and the combine should happen, to potentially allow further combines
4633       // later.
4634       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
4635           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
4636         return true;
4637 
4638       // We can materialise `c1` in an add immediate, so it's "free", and the
4639       // combine should be prevented.
4640       if (C1Int.getMinSignedBits() <= 64 &&
4641           isLegalAddImmediate(C1Int.getSExtValue()))
4642         return false;
4643 
4644       // Neither constant will fit into an immediate, so find materialisation
4645       // costs.
4646       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
4647                                               Subtarget.is64Bit());
4648       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
4649           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit());
4650 
4651       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
4652       // combine should be prevented.
4653       if (C1Cost < ShiftedC1Cost)
4654         return false;
4655     }
4656   }
4657   return true;
4658 }
4659 
4660 bool RISCVTargetLowering::targetShrinkDemandedConstant(
4661     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
4662     TargetLoweringOpt &TLO) const {
4663   // Delay this optimization as late as possible.
4664   if (!TLO.LegalOps)
4665     return false;
4666 
4667   EVT VT = Op.getValueType();
4668   if (VT.isVector())
4669     return false;
4670 
4671   // Only handle AND for now.
4672   if (Op.getOpcode() != ISD::AND)
4673     return false;
4674 
4675   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
4676   if (!C)
4677     return false;
4678 
4679   const APInt &Mask = C->getAPIntValue();
4680 
4681   // Clear all non-demanded bits initially.
4682   APInt ShrunkMask = Mask & DemandedBits;
4683 
4684   // If the shrunk mask fits in sign extended 12 bits, let the target
4685   // independent code apply it.
4686   if (ShrunkMask.isSignedIntN(12))
4687     return false;
4688 
4689   // Try to make a smaller immediate by setting undemanded bits.
4690 
4691   // We need to be able to make a negative number through a combination of mask
4692   // and undemanded bits.
4693   APInt ExpandedMask = Mask | ~DemandedBits;
4694   if (!ExpandedMask.isNegative())
4695     return false;
4696 
4697   // What is the fewest number of bits we need to represent the negative number.
4698   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
4699 
4700   // Try to make a 12 bit negative immediate. If that fails try to make a 32
4701   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
4702   APInt NewMask = ShrunkMask;
4703   if (MinSignedBits <= 12)
4704     NewMask.setBitsFrom(11);
4705   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
4706     NewMask.setBitsFrom(31);
4707   else
4708     return false;
4709 
4710   // Sanity check that our new mask is a subset of the demanded mask.
4711   assert(NewMask.isSubsetOf(ExpandedMask));
4712 
4713   // If we aren't changing the mask, just return true to keep it and prevent
4714   // the caller from optimizing.
4715   if (NewMask == Mask)
4716     return true;
4717 
4718   // Replace the constant with the new mask.
4719   SDLoc DL(Op);
4720   SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
4721   SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
4722   return TLO.CombineTo(Op, NewOp);
4723 }
4724 
4725 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
4726                                                         KnownBits &Known,
4727                                                         const APInt &DemandedElts,
4728                                                         const SelectionDAG &DAG,
4729                                                         unsigned Depth) const {
4730   unsigned BitWidth = Known.getBitWidth();
4731   unsigned Opc = Op.getOpcode();
4732   assert((Opc >= ISD::BUILTIN_OP_END ||
4733           Opc == ISD::INTRINSIC_WO_CHAIN ||
4734           Opc == ISD::INTRINSIC_W_CHAIN ||
4735           Opc == ISD::INTRINSIC_VOID) &&
4736          "Should use MaskedValueIsZero if you don't know whether Op"
4737          " is a target node!");
4738 
4739   Known.resetAll();
4740   switch (Opc) {
4741   default: break;
4742   case RISCVISD::SELECT_CC: {
4743     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
4744     // If we don't know any bits, early out.
4745     if (Known.isUnknown())
4746       break;
4747     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
4748 
4749     // Only known if known in both the LHS and RHS.
4750     Known = KnownBits::commonBits(Known, Known2);
4751     break;
4752   }
4753   case RISCVISD::REMUW: {
4754     KnownBits Known2;
4755     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4756     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4757     // We only care about the lower 32 bits.
4758     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
4759     // Restore the original width by sign extending.
4760     Known = Known.sext(BitWidth);
4761     break;
4762   }
4763   case RISCVISD::DIVUW: {
4764     KnownBits Known2;
4765     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4766     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4767     // We only care about the lower 32 bits.
4768     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
4769     // Restore the original width by sign extending.
4770     Known = Known.sext(BitWidth);
4771     break;
4772   }
4773   case RISCVISD::READ_VLENB:
4774     // We assume VLENB is at least 8 bytes.
4775     // FIXME: The 1.0 draft spec defines minimum VLEN as 128 bits.
4776     Known.Zero.setLowBits(3);
4777     break;
4778   }
4779 }
4780 
4781 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
4782     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
4783     unsigned Depth) const {
4784   switch (Op.getOpcode()) {
4785   default:
4786     break;
4787   case RISCVISD::SLLW:
4788   case RISCVISD::SRAW:
4789   case RISCVISD::SRLW:
4790   case RISCVISD::DIVW:
4791   case RISCVISD::DIVUW:
4792   case RISCVISD::REMUW:
4793   case RISCVISD::ROLW:
4794   case RISCVISD::RORW:
4795   case RISCVISD::GREVIW:
4796   case RISCVISD::GORCIW:
4797   case RISCVISD::FSLW:
4798   case RISCVISD::FSRW:
4799     // TODO: As the result is sign-extended, this is conservatively correct. A
4800     // more precise answer could be calculated for SRAW depending on known
4801     // bits in the shift amount.
4802     return 33;
4803   case RISCVISD::SHFLI: {
4804     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
4805     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
4806     // will stay within the upper 32 bits. If there were more than 32 sign bits
4807     // before there will be at least 33 sign bits after.
4808     if (Op.getValueType() == MVT::i64 &&
4809         (Op.getConstantOperandVal(1) & 0x10) == 0) {
4810       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4811       if (Tmp > 32)
4812         return 33;
4813     }
4814     break;
4815   }
4816   case RISCVISD::VMV_X_S:
4817     // The number of sign bits of the scalar result is computed by obtaining the
4818     // element type of the input vector operand, subtracting its width from the
4819     // XLEN, and then adding one (sign bit within the element type). If the
4820     // element type is wider than XLen, the least-significant XLEN bits are
4821     // taken.
4822     if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen())
4823       return 1;
4824     return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1;
4825   }
4826 
4827   return 1;
4828 }
4829 
4830 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
4831                                                   MachineBasicBlock *BB) {
4832   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
4833 
4834   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
4835   // Should the count have wrapped while it was being read, we need to try
4836   // again.
4837   // ...
4838   // read:
4839   // rdcycleh x3 # load high word of cycle
4840   // rdcycle  x2 # load low word of cycle
4841   // rdcycleh x4 # load high word of cycle
4842   // bne x3, x4, read # check if high word reads match, otherwise try again
4843   // ...
4844 
4845   MachineFunction &MF = *BB->getParent();
4846   const BasicBlock *LLVM_BB = BB->getBasicBlock();
4847   MachineFunction::iterator It = ++BB->getIterator();
4848 
4849   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
4850   MF.insert(It, LoopMBB);
4851 
4852   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
4853   MF.insert(It, DoneMBB);
4854 
4855   // Transfer the remainder of BB and its successor edges to DoneMBB.
4856   DoneMBB->splice(DoneMBB->begin(), BB,
4857                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
4858   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
4859 
4860   BB->addSuccessor(LoopMBB);
4861 
4862   MachineRegisterInfo &RegInfo = MF.getRegInfo();
4863   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
4864   Register LoReg = MI.getOperand(0).getReg();
4865   Register HiReg = MI.getOperand(1).getReg();
4866   DebugLoc DL = MI.getDebugLoc();
4867 
4868   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
4869   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
4870       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
4871       .addReg(RISCV::X0);
4872   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
4873       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
4874       .addReg(RISCV::X0);
4875   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
4876       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
4877       .addReg(RISCV::X0);
4878 
4879   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
4880       .addReg(HiReg)
4881       .addReg(ReadAgainReg)
4882       .addMBB(LoopMBB);
4883 
4884   LoopMBB->addSuccessor(LoopMBB);
4885   LoopMBB->addSuccessor(DoneMBB);
4886 
4887   MI.eraseFromParent();
4888 
4889   return DoneMBB;
4890 }
4891 
4892 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
4893                                              MachineBasicBlock *BB) {
4894   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
4895 
4896   MachineFunction &MF = *BB->getParent();
4897   DebugLoc DL = MI.getDebugLoc();
4898   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
4899   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
4900   Register LoReg = MI.getOperand(0).getReg();
4901   Register HiReg = MI.getOperand(1).getReg();
4902   Register SrcReg = MI.getOperand(2).getReg();
4903   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
4904   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
4905 
4906   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
4907                           RI);
4908   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
4909   MachineMemOperand *MMOLo =
4910       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
4911   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
4912       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
4913   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
4914       .addFrameIndex(FI)
4915       .addImm(0)
4916       .addMemOperand(MMOLo);
4917   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
4918       .addFrameIndex(FI)
4919       .addImm(4)
4920       .addMemOperand(MMOHi);
4921   MI.eraseFromParent(); // The pseudo instruction is gone now.
4922   return BB;
4923 }
4924 
4925 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
4926                                                  MachineBasicBlock *BB) {
4927   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
4928          "Unexpected instruction");
4929 
4930   MachineFunction &MF = *BB->getParent();
4931   DebugLoc DL = MI.getDebugLoc();
4932   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
4933   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
4934   Register DstReg = MI.getOperand(0).getReg();
4935   Register LoReg = MI.getOperand(1).getReg();
4936   Register HiReg = MI.getOperand(2).getReg();
4937   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
4938   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
4939 
4940   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
4941   MachineMemOperand *MMOLo =
4942       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
4943   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
4944       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
4945   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
4946       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
4947       .addFrameIndex(FI)
4948       .addImm(0)
4949       .addMemOperand(MMOLo);
4950   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
4951       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
4952       .addFrameIndex(FI)
4953       .addImm(4)
4954       .addMemOperand(MMOHi);
4955   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
4956   MI.eraseFromParent(); // The pseudo instruction is gone now.
4957   return BB;
4958 }
4959 
4960 static bool isSelectPseudo(MachineInstr &MI) {
4961   switch (MI.getOpcode()) {
4962   default:
4963     return false;
4964   case RISCV::Select_GPR_Using_CC_GPR:
4965   case RISCV::Select_FPR16_Using_CC_GPR:
4966   case RISCV::Select_FPR32_Using_CC_GPR:
4967   case RISCV::Select_FPR64_Using_CC_GPR:
4968     return true;
4969   }
4970 }
4971 
4972 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
4973                                            MachineBasicBlock *BB) {
4974   // To "insert" Select_* instructions, we actually have to insert the triangle
4975   // control-flow pattern.  The incoming instructions know the destination vreg
4976   // to set, the condition code register to branch on, the true/false values to
4977   // select between, and the condcode to use to select the appropriate branch.
4978   //
4979   // We produce the following control flow:
4980   //     HeadMBB
4981   //     |  \
4982   //     |  IfFalseMBB
4983   //     | /
4984   //    TailMBB
4985   //
4986   // When we find a sequence of selects we attempt to optimize their emission
4987   // by sharing the control flow. Currently we only handle cases where we have
4988   // multiple selects with the exact same condition (same LHS, RHS and CC).
4989   // The selects may be interleaved with other instructions if the other
4990   // instructions meet some requirements we deem safe:
4991   // - They are debug instructions. Otherwise,
4992   // - They do not have side-effects, do not access memory and their inputs do
4993   //   not depend on the results of the select pseudo-instructions.
4994   // The TrueV/FalseV operands of the selects cannot depend on the result of
4995   // previous selects in the sequence.
4996   // These conditions could be further relaxed. See the X86 target for a
4997   // related approach and more information.
4998   Register LHS = MI.getOperand(1).getReg();
4999   Register RHS = MI.getOperand(2).getReg();
5000   auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
5001 
5002   SmallVector<MachineInstr *, 4> SelectDebugValues;
5003   SmallSet<Register, 4> SelectDests;
5004   SelectDests.insert(MI.getOperand(0).getReg());
5005 
5006   MachineInstr *LastSelectPseudo = &MI;
5007 
5008   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
5009        SequenceMBBI != E; ++SequenceMBBI) {
5010     if (SequenceMBBI->isDebugInstr())
5011       continue;
5012     else if (isSelectPseudo(*SequenceMBBI)) {
5013       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
5014           SequenceMBBI->getOperand(2).getReg() != RHS ||
5015           SequenceMBBI->getOperand(3).getImm() != CC ||
5016           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
5017           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
5018         break;
5019       LastSelectPseudo = &*SequenceMBBI;
5020       SequenceMBBI->collectDebugValues(SelectDebugValues);
5021       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
5022     } else {
5023       if (SequenceMBBI->hasUnmodeledSideEffects() ||
5024           SequenceMBBI->mayLoadOrStore())
5025         break;
5026       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
5027             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
5028           }))
5029         break;
5030     }
5031   }
5032 
5033   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
5034   const BasicBlock *LLVM_BB = BB->getBasicBlock();
5035   DebugLoc DL = MI.getDebugLoc();
5036   MachineFunction::iterator I = ++BB->getIterator();
5037 
5038   MachineBasicBlock *HeadMBB = BB;
5039   MachineFunction *F = BB->getParent();
5040   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
5041   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
5042 
5043   F->insert(I, IfFalseMBB);
5044   F->insert(I, TailMBB);
5045 
5046   // Transfer debug instructions associated with the selects to TailMBB.
5047   for (MachineInstr *DebugInstr : SelectDebugValues) {
5048     TailMBB->push_back(DebugInstr->removeFromParent());
5049   }
5050 
5051   // Move all instructions after the sequence to TailMBB.
5052   TailMBB->splice(TailMBB->end(), HeadMBB,
5053                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
5054   // Update machine-CFG edges by transferring all successors of the current
5055   // block to the new block which will contain the Phi nodes for the selects.
5056   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
5057   // Set the successors for HeadMBB.
5058   HeadMBB->addSuccessor(IfFalseMBB);
5059   HeadMBB->addSuccessor(TailMBB);
5060 
5061   // Insert appropriate branch.
5062   unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
5063 
5064   BuildMI(HeadMBB, DL, TII.get(Opcode))
5065     .addReg(LHS)
5066     .addReg(RHS)
5067     .addMBB(TailMBB);
5068 
5069   // IfFalseMBB just falls through to TailMBB.
5070   IfFalseMBB->addSuccessor(TailMBB);
5071 
5072   // Create PHIs for all of the select pseudo-instructions.
5073   auto SelectMBBI = MI.getIterator();
5074   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
5075   auto InsertionPoint = TailMBB->begin();
5076   while (SelectMBBI != SelectEnd) {
5077     auto Next = std::next(SelectMBBI);
5078     if (isSelectPseudo(*SelectMBBI)) {
5079       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
5080       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
5081               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
5082           .addReg(SelectMBBI->getOperand(4).getReg())
5083           .addMBB(HeadMBB)
5084           .addReg(SelectMBBI->getOperand(5).getReg())
5085           .addMBB(IfFalseMBB);
5086       SelectMBBI->eraseFromParent();
5087     }
5088     SelectMBBI = Next;
5089   }
5090 
5091   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
5092   return TailMBB;
5093 }
5094 
5095 static MachineInstr *elideCopies(MachineInstr *MI,
5096                                  const MachineRegisterInfo &MRI) {
5097   while (true) {
5098     if (!MI->isFullCopy())
5099       return MI;
5100     if (!Register::isVirtualRegister(MI->getOperand(1).getReg()))
5101       return nullptr;
5102     MI = MRI.getVRegDef(MI->getOperand(1).getReg());
5103     if (!MI)
5104       return nullptr;
5105   }
5106 }
5107 
5108 static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB,
5109                                     int VLIndex, unsigned SEWIndex,
5110                                     RISCVVLMUL VLMul, bool ForceTailAgnostic) {
5111   MachineFunction &MF = *BB->getParent();
5112   DebugLoc DL = MI.getDebugLoc();
5113   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
5114 
5115   unsigned SEW = MI.getOperand(SEWIndex).getImm();
5116   assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
5117   RISCVVSEW ElementWidth = static_cast<RISCVVSEW>(Log2_32(SEW / 8));
5118 
5119   MachineRegisterInfo &MRI = MF.getRegInfo();
5120 
5121   auto BuildVSETVLI = [&]() {
5122     if (VLIndex >= 0) {
5123       Register DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
5124       Register VLReg = MI.getOperand(VLIndex).getReg();
5125 
5126       // VL might be a compile time constant, but isel would have to put it
5127       // in a register. See if VL comes from an ADDI X0, imm.
5128       if (VLReg.isVirtual()) {
5129         MachineInstr *Def = MRI.getVRegDef(VLReg);
5130         if (Def && Def->getOpcode() == RISCV::ADDI &&
5131             Def->getOperand(1).getReg() == RISCV::X0 &&
5132             Def->getOperand(2).isImm()) {
5133           uint64_t Imm = Def->getOperand(2).getImm();
5134           // VSETIVLI allows a 5-bit zero extended immediate.
5135           if (isUInt<5>(Imm))
5136             return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETIVLI))
5137                 .addReg(DestReg, RegState::Define | RegState::Dead)
5138                 .addImm(Imm);
5139         }
5140       }
5141 
5142       return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI))
5143           .addReg(DestReg, RegState::Define | RegState::Dead)
5144           .addReg(VLReg);
5145     }
5146 
5147     // With no VL operator in the pseudo, do not modify VL (rd = X0, rs1 = X0).
5148     return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI))
5149         .addReg(RISCV::X0, RegState::Define | RegState::Dead)
5150         .addReg(RISCV::X0, RegState::Kill);
5151   };
5152 
5153   MachineInstrBuilder MIB = BuildVSETVLI();
5154 
5155   // Default to tail agnostic unless the destination is tied to a source. In
5156   // that case the user would have some control over the tail values. The tail
5157   // policy is also ignored on instructions that only update element 0 like
5158   // vmv.s.x or reductions so use agnostic there to match the common case.
5159   // FIXME: This is conservatively correct, but we might want to detect that
5160   // the input is undefined.
5161   bool TailAgnostic = true;
5162   unsigned UseOpIdx;
5163   if (!ForceTailAgnostic && MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
5164     TailAgnostic = false;
5165     // If the tied operand is an IMPLICIT_DEF we can keep TailAgnostic.
5166     const MachineOperand &UseMO = MI.getOperand(UseOpIdx);
5167     MachineInstr *UseMI = MRI.getVRegDef(UseMO.getReg());
5168     if (UseMI) {
5169       UseMI = elideCopies(UseMI, MRI);
5170       if (UseMI && UseMI->isImplicitDef())
5171         TailAgnostic = true;
5172     }
5173   }
5174 
5175   // For simplicity we reuse the vtype representation here.
5176   MIB.addImm(RISCVVType::encodeVTYPE(VLMul, ElementWidth,
5177                                      /*TailAgnostic*/ TailAgnostic,
5178                                      /*MaskAgnostic*/ false));
5179 
5180   // Remove (now) redundant operands from pseudo
5181   if (VLIndex >= 0) {
5182     MI.getOperand(VLIndex).setReg(RISCV::NoRegister);
5183     MI.getOperand(VLIndex).setIsKill(false);
5184   }
5185 
5186   return BB;
5187 }
5188 
5189 MachineBasicBlock *
5190 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
5191                                                  MachineBasicBlock *BB) const {
5192   uint64_t TSFlags = MI.getDesc().TSFlags;
5193 
5194   if (TSFlags & RISCVII::HasSEWOpMask) {
5195     unsigned NumOperands = MI.getNumExplicitOperands();
5196     int VLIndex = (TSFlags & RISCVII::HasVLOpMask) ? NumOperands - 2 : -1;
5197     unsigned SEWIndex = NumOperands - 1;
5198     bool ForceTailAgnostic = TSFlags & RISCVII::ForceTailAgnosticMask;
5199 
5200     RISCVVLMUL VLMul = static_cast<RISCVVLMUL>((TSFlags & RISCVII::VLMulMask) >>
5201                                                RISCVII::VLMulShift);
5202     return addVSetVL(MI, BB, VLIndex, SEWIndex, VLMul, ForceTailAgnostic);
5203   }
5204 
5205   switch (MI.getOpcode()) {
5206   default:
5207     llvm_unreachable("Unexpected instr type to insert");
5208   case RISCV::ReadCycleWide:
5209     assert(!Subtarget.is64Bit() &&
5210            "ReadCycleWrite is only to be used on riscv32");
5211     return emitReadCycleWidePseudo(MI, BB);
5212   case RISCV::Select_GPR_Using_CC_GPR:
5213   case RISCV::Select_FPR16_Using_CC_GPR:
5214   case RISCV::Select_FPR32_Using_CC_GPR:
5215   case RISCV::Select_FPR64_Using_CC_GPR:
5216     return emitSelectPseudo(MI, BB);
5217   case RISCV::BuildPairF64Pseudo:
5218     return emitBuildPairF64Pseudo(MI, BB);
5219   case RISCV::SplitF64Pseudo:
5220     return emitSplitF64Pseudo(MI, BB);
5221   }
5222 }
5223 
5224 // Calling Convention Implementation.
5225 // The expectations for frontend ABI lowering vary from target to target.
5226 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
5227 // details, but this is a longer term goal. For now, we simply try to keep the
5228 // role of the frontend as simple and well-defined as possible. The rules can
5229 // be summarised as:
5230 // * Never split up large scalar arguments. We handle them here.
5231 // * If a hardfloat calling convention is being used, and the struct may be
5232 // passed in a pair of registers (fp+fp, int+fp), and both registers are
5233 // available, then pass as two separate arguments. If either the GPRs or FPRs
5234 // are exhausted, then pass according to the rule below.
5235 // * If a struct could never be passed in registers or directly in a stack
5236 // slot (as it is larger than 2*XLEN and the floating point rules don't
5237 // apply), then pass it using a pointer with the byval attribute.
5238 // * If a struct is less than 2*XLEN, then coerce to either a two-element
5239 // word-sized array or a 2*XLEN scalar (depending on alignment).
5240 // * The frontend can determine whether a struct is returned by reference or
5241 // not based on its size and fields. If it will be returned by reference, the
5242 // frontend must modify the prototype so a pointer with the sret annotation is
5243 // passed as the first argument. This is not necessary for large scalar
5244 // returns.
5245 // * Struct return values and varargs should be coerced to structs containing
5246 // register-size fields in the same situations they would be for fixed
5247 // arguments.
5248 
5249 static const MCPhysReg ArgGPRs[] = {
5250   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
5251   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
5252 };
5253 static const MCPhysReg ArgFPR16s[] = {
5254   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
5255   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
5256 };
5257 static const MCPhysReg ArgFPR32s[] = {
5258   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
5259   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
5260 };
5261 static const MCPhysReg ArgFPR64s[] = {
5262   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
5263   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
5264 };
5265 // This is an interim calling convention and it may be changed in the future.
5266 static const MCPhysReg ArgVRs[] = {
5267     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
5268     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
5269     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
5270 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
5271                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
5272                                      RISCV::V20M2, RISCV::V22M2};
5273 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
5274                                      RISCV::V20M4};
5275 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
5276 
5277 // Pass a 2*XLEN argument that has been split into two XLEN values through
5278 // registers or the stack as necessary.
5279 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
5280                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
5281                                 MVT ValVT2, MVT LocVT2,
5282                                 ISD::ArgFlagsTy ArgFlags2) {
5283   unsigned XLenInBytes = XLen / 8;
5284   if (Register Reg = State.AllocateReg(ArgGPRs)) {
5285     // At least one half can be passed via register.
5286     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
5287                                      VA1.getLocVT(), CCValAssign::Full));
5288   } else {
5289     // Both halves must be passed on the stack, with proper alignment.
5290     Align StackAlign =
5291         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
5292     State.addLoc(
5293         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
5294                             State.AllocateStack(XLenInBytes, StackAlign),
5295                             VA1.getLocVT(), CCValAssign::Full));
5296     State.addLoc(CCValAssign::getMem(
5297         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
5298         LocVT2, CCValAssign::Full));
5299     return false;
5300   }
5301 
5302   if (Register Reg = State.AllocateReg(ArgGPRs)) {
5303     // The second half can also be passed via register.
5304     State.addLoc(
5305         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
5306   } else {
5307     // The second half is passed via the stack, without additional alignment.
5308     State.addLoc(CCValAssign::getMem(
5309         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
5310         LocVT2, CCValAssign::Full));
5311   }
5312 
5313   return false;
5314 }
5315 
5316 // Implements the RISC-V calling convention. Returns true upon failure.
5317 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
5318                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
5319                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
5320                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
5321                      Optional<unsigned> FirstMaskArgument) {
5322   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
5323   assert(XLen == 32 || XLen == 64);
5324   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
5325 
5326   // Any return value split in to more than two values can't be returned
5327   // directly. Vectors are returned via the available vector registers.
5328   if (!LocVT.isVector() && IsRet && ValNo > 1)
5329     return true;
5330 
5331   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
5332   // variadic argument, or if no F16/F32 argument registers are available.
5333   bool UseGPRForF16_F32 = true;
5334   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
5335   // variadic argument, or if no F64 argument registers are available.
5336   bool UseGPRForF64 = true;
5337 
5338   switch (ABI) {
5339   default:
5340     llvm_unreachable("Unexpected ABI");
5341   case RISCVABI::ABI_ILP32:
5342   case RISCVABI::ABI_LP64:
5343     break;
5344   case RISCVABI::ABI_ILP32F:
5345   case RISCVABI::ABI_LP64F:
5346     UseGPRForF16_F32 = !IsFixed;
5347     break;
5348   case RISCVABI::ABI_ILP32D:
5349   case RISCVABI::ABI_LP64D:
5350     UseGPRForF16_F32 = !IsFixed;
5351     UseGPRForF64 = !IsFixed;
5352     break;
5353   }
5354 
5355   // FPR16, FPR32, and FPR64 alias each other.
5356   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
5357     UseGPRForF16_F32 = true;
5358     UseGPRForF64 = true;
5359   }
5360 
5361   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
5362   // similar local variables rather than directly checking against the target
5363   // ABI.
5364 
5365   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
5366     LocVT = XLenVT;
5367     LocInfo = CCValAssign::BCvt;
5368   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
5369     LocVT = MVT::i64;
5370     LocInfo = CCValAssign::BCvt;
5371   }
5372 
5373   // If this is a variadic argument, the RISC-V calling convention requires
5374   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
5375   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
5376   // be used regardless of whether the original argument was split during
5377   // legalisation or not. The argument will not be passed by registers if the
5378   // original type is larger than 2*XLEN, so the register alignment rule does
5379   // not apply.
5380   unsigned TwoXLenInBytes = (2 * XLen) / 8;
5381   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
5382       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
5383     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
5384     // Skip 'odd' register if necessary.
5385     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
5386       State.AllocateReg(ArgGPRs);
5387   }
5388 
5389   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
5390   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
5391       State.getPendingArgFlags();
5392 
5393   assert(PendingLocs.size() == PendingArgFlags.size() &&
5394          "PendingLocs and PendingArgFlags out of sync");
5395 
5396   // Handle passing f64 on RV32D with a soft float ABI or when floating point
5397   // registers are exhausted.
5398   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
5399     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
5400            "Can't lower f64 if it is split");
5401     // Depending on available argument GPRS, f64 may be passed in a pair of
5402     // GPRs, split between a GPR and the stack, or passed completely on the
5403     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
5404     // cases.
5405     Register Reg = State.AllocateReg(ArgGPRs);
5406     LocVT = MVT::i32;
5407     if (!Reg) {
5408       unsigned StackOffset = State.AllocateStack(8, Align(8));
5409       State.addLoc(
5410           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
5411       return false;
5412     }
5413     if (!State.AllocateReg(ArgGPRs))
5414       State.AllocateStack(4, Align(4));
5415     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
5416     return false;
5417   }
5418 
5419   // Fixed-length vectors are located in the corresponding scalable-vector
5420   // container types.
5421   if (ValVT.isFixedLengthVector())
5422     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
5423 
5424   // Split arguments might be passed indirectly, so keep track of the pending
5425   // values. Split vectors are passed via a mix of registers and indirectly, so
5426   // treat them as we would any other argument.
5427   if (!LocVT.isVector() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
5428     LocVT = XLenVT;
5429     LocInfo = CCValAssign::Indirect;
5430     PendingLocs.push_back(
5431         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
5432     PendingArgFlags.push_back(ArgFlags);
5433     if (!ArgFlags.isSplitEnd()) {
5434       return false;
5435     }
5436   }
5437 
5438   // If the split argument only had two elements, it should be passed directly
5439   // in registers or on the stack.
5440   if (!LocVT.isVector() && ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) {
5441     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
5442     // Apply the normal calling convention rules to the first half of the
5443     // split argument.
5444     CCValAssign VA = PendingLocs[0];
5445     ISD::ArgFlagsTy AF = PendingArgFlags[0];
5446     PendingLocs.clear();
5447     PendingArgFlags.clear();
5448     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
5449                                ArgFlags);
5450   }
5451 
5452   // Allocate to a register if possible, or else a stack slot.
5453   Register Reg;
5454   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
5455     Reg = State.AllocateReg(ArgFPR16s);
5456   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
5457     Reg = State.AllocateReg(ArgFPR32s);
5458   else if (ValVT == MVT::f64 && !UseGPRForF64)
5459     Reg = State.AllocateReg(ArgFPR64s);
5460   else if (ValVT.isVector()) {
5461     const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
5462     if (RC == &RISCV::VRRegClass) {
5463       // Assign the first mask argument to V0.
5464       // This is an interim calling convention and it may be changed in the
5465       // future.
5466       if (FirstMaskArgument.hasValue() &&
5467           ValNo == FirstMaskArgument.getValue()) {
5468         Reg = State.AllocateReg(RISCV::V0);
5469       } else {
5470         Reg = State.AllocateReg(ArgVRs);
5471       }
5472     } else if (RC == &RISCV::VRM2RegClass) {
5473       Reg = State.AllocateReg(ArgVRM2s);
5474     } else if (RC == &RISCV::VRM4RegClass) {
5475       Reg = State.AllocateReg(ArgVRM4s);
5476     } else if (RC == &RISCV::VRM8RegClass) {
5477       Reg = State.AllocateReg(ArgVRM8s);
5478     } else {
5479       llvm_unreachable("Unhandled class register for ValueType");
5480     }
5481     if (!Reg) {
5482       // For return values, the vector must be passed fully via registers or
5483       // via the stack.
5484       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
5485       // but we're using all of them.
5486       if (IsRet)
5487         return true;
5488       LocInfo = CCValAssign::Indirect;
5489       // Try using a GPR to pass the address
5490       Reg = State.AllocateReg(ArgGPRs);
5491       LocVT = XLenVT;
5492     }
5493   } else
5494     Reg = State.AllocateReg(ArgGPRs);
5495   unsigned StackOffset =
5496       Reg ? 0 : State.AllocateStack(XLen / 8, Align(XLen / 8));
5497 
5498   // If we reach this point and PendingLocs is non-empty, we must be at the
5499   // end of a split argument that must be passed indirectly.
5500   if (!PendingLocs.empty()) {
5501     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
5502     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
5503 
5504     for (auto &It : PendingLocs) {
5505       if (Reg)
5506         It.convertToReg(Reg);
5507       else
5508         It.convertToMem(StackOffset);
5509       State.addLoc(It);
5510     }
5511     PendingLocs.clear();
5512     PendingArgFlags.clear();
5513     return false;
5514   }
5515 
5516   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
5517           (TLI.getSubtarget().hasStdExtV() && ValVT.isVector())) &&
5518          "Expected an XLenVT or vector types at this stage");
5519 
5520   if (Reg) {
5521     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
5522     return false;
5523   }
5524 
5525   // When a floating-point value is passed on the stack, no bit-conversion is
5526   // needed.
5527   if (ValVT.isFloatingPoint()) {
5528     LocVT = ValVT;
5529     LocInfo = CCValAssign::Full;
5530   }
5531   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
5532   return false;
5533 }
5534 
5535 template <typename ArgTy>
5536 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
5537   for (const auto &ArgIdx : enumerate(Args)) {
5538     MVT ArgVT = ArgIdx.value().VT;
5539     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
5540       return ArgIdx.index();
5541   }
5542   return None;
5543 }
5544 
5545 void RISCVTargetLowering::analyzeInputArgs(
5546     MachineFunction &MF, CCState &CCInfo,
5547     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const {
5548   unsigned NumArgs = Ins.size();
5549   FunctionType *FType = MF.getFunction().getFunctionType();
5550 
5551   Optional<unsigned> FirstMaskArgument;
5552   if (Subtarget.hasStdExtV())
5553     FirstMaskArgument = preAssignMask(Ins);
5554 
5555   for (unsigned i = 0; i != NumArgs; ++i) {
5556     MVT ArgVT = Ins[i].VT;
5557     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
5558 
5559     Type *ArgTy = nullptr;
5560     if (IsRet)
5561       ArgTy = FType->getReturnType();
5562     else if (Ins[i].isOrigArg())
5563       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
5564 
5565     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
5566     if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
5567                  ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
5568                  FirstMaskArgument)) {
5569       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
5570                         << EVT(ArgVT).getEVTString() << '\n');
5571       llvm_unreachable(nullptr);
5572     }
5573   }
5574 }
5575 
5576 void RISCVTargetLowering::analyzeOutputArgs(
5577     MachineFunction &MF, CCState &CCInfo,
5578     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
5579     CallLoweringInfo *CLI) const {
5580   unsigned NumArgs = Outs.size();
5581 
5582   Optional<unsigned> FirstMaskArgument;
5583   if (Subtarget.hasStdExtV())
5584     FirstMaskArgument = preAssignMask(Outs);
5585 
5586   for (unsigned i = 0; i != NumArgs; i++) {
5587     MVT ArgVT = Outs[i].VT;
5588     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
5589     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
5590 
5591     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
5592     if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
5593                  ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
5594                  FirstMaskArgument)) {
5595       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
5596                         << EVT(ArgVT).getEVTString() << "\n");
5597       llvm_unreachable(nullptr);
5598     }
5599   }
5600 }
5601 
5602 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
5603 // values.
5604 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
5605                                    const CCValAssign &VA, const SDLoc &DL,
5606                                    const RISCVSubtarget &Subtarget) {
5607   switch (VA.getLocInfo()) {
5608   default:
5609     llvm_unreachable("Unexpected CCValAssign::LocInfo");
5610   case CCValAssign::Full:
5611     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
5612       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
5613     break;
5614   case CCValAssign::BCvt:
5615     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
5616       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
5617     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
5618       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
5619     else
5620       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
5621     break;
5622   }
5623   return Val;
5624 }
5625 
5626 // The caller is responsible for loading the full value if the argument is
5627 // passed with CCValAssign::Indirect.
5628 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
5629                                 const CCValAssign &VA, const SDLoc &DL,
5630                                 const RISCVTargetLowering &TLI) {
5631   MachineFunction &MF = DAG.getMachineFunction();
5632   MachineRegisterInfo &RegInfo = MF.getRegInfo();
5633   EVT LocVT = VA.getLocVT();
5634   SDValue Val;
5635   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
5636   Register VReg = RegInfo.createVirtualRegister(RC);
5637   RegInfo.addLiveIn(VA.getLocReg(), VReg);
5638   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
5639 
5640   if (VA.getLocInfo() == CCValAssign::Indirect)
5641     return Val;
5642 
5643   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
5644 }
5645 
5646 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
5647                                    const CCValAssign &VA, const SDLoc &DL,
5648                                    const RISCVSubtarget &Subtarget) {
5649   EVT LocVT = VA.getLocVT();
5650 
5651   switch (VA.getLocInfo()) {
5652   default:
5653     llvm_unreachable("Unexpected CCValAssign::LocInfo");
5654   case CCValAssign::Full:
5655     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
5656       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
5657     break;
5658   case CCValAssign::BCvt:
5659     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
5660       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
5661     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
5662       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
5663     else
5664       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
5665     break;
5666   }
5667   return Val;
5668 }
5669 
5670 // The caller is responsible for loading the full value if the argument is
5671 // passed with CCValAssign::Indirect.
5672 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
5673                                 const CCValAssign &VA, const SDLoc &DL) {
5674   MachineFunction &MF = DAG.getMachineFunction();
5675   MachineFrameInfo &MFI = MF.getFrameInfo();
5676   EVT LocVT = VA.getLocVT();
5677   EVT ValVT = VA.getValVT();
5678   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
5679   int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
5680                                  VA.getLocMemOffset(), /*Immutable=*/true);
5681   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
5682   SDValue Val;
5683 
5684   ISD::LoadExtType ExtType;
5685   switch (VA.getLocInfo()) {
5686   default:
5687     llvm_unreachable("Unexpected CCValAssign::LocInfo");
5688   case CCValAssign::Full:
5689   case CCValAssign::Indirect:
5690   case CCValAssign::BCvt:
5691     ExtType = ISD::NON_EXTLOAD;
5692     break;
5693   }
5694   Val = DAG.getExtLoad(
5695       ExtType, DL, LocVT, Chain, FIN,
5696       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
5697   return Val;
5698 }
5699 
5700 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
5701                                        const CCValAssign &VA, const SDLoc &DL) {
5702   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
5703          "Unexpected VA");
5704   MachineFunction &MF = DAG.getMachineFunction();
5705   MachineFrameInfo &MFI = MF.getFrameInfo();
5706   MachineRegisterInfo &RegInfo = MF.getRegInfo();
5707 
5708   if (VA.isMemLoc()) {
5709     // f64 is passed on the stack.
5710     int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
5711     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
5712     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
5713                        MachinePointerInfo::getFixedStack(MF, FI));
5714   }
5715 
5716   assert(VA.isRegLoc() && "Expected register VA assignment");
5717 
5718   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
5719   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
5720   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
5721   SDValue Hi;
5722   if (VA.getLocReg() == RISCV::X17) {
5723     // Second half of f64 is passed on the stack.
5724     int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
5725     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
5726     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
5727                      MachinePointerInfo::getFixedStack(MF, FI));
5728   } else {
5729     // Second half of f64 is passed in another GPR.
5730     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
5731     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
5732     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
5733   }
5734   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
5735 }
5736 
5737 // FastCC has less than 1% performance improvement for some particular
5738 // benchmark. But theoretically, it may has benenfit for some cases.
5739 static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
5740                             CCValAssign::LocInfo LocInfo,
5741                             ISD::ArgFlagsTy ArgFlags, CCState &State) {
5742 
5743   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
5744     // X5 and X6 might be used for save-restore libcall.
5745     static const MCPhysReg GPRList[] = {
5746         RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
5747         RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
5748         RISCV::X29, RISCV::X30, RISCV::X31};
5749     if (unsigned Reg = State.AllocateReg(GPRList)) {
5750       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
5751       return false;
5752     }
5753   }
5754 
5755   if (LocVT == MVT::f16) {
5756     static const MCPhysReg FPR16List[] = {
5757         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
5758         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
5759         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
5760         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
5761     if (unsigned Reg = State.AllocateReg(FPR16List)) {
5762       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
5763       return false;
5764     }
5765   }
5766 
5767   if (LocVT == MVT::f32) {
5768     static const MCPhysReg FPR32List[] = {
5769         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
5770         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
5771         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
5772         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
5773     if (unsigned Reg = State.AllocateReg(FPR32List)) {
5774       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
5775       return false;
5776     }
5777   }
5778 
5779   if (LocVT == MVT::f64) {
5780     static const MCPhysReg FPR64List[] = {
5781         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
5782         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
5783         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
5784         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
5785     if (unsigned Reg = State.AllocateReg(FPR64List)) {
5786       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
5787       return false;
5788     }
5789   }
5790 
5791   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
5792     unsigned Offset4 = State.AllocateStack(4, Align(4));
5793     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
5794     return false;
5795   }
5796 
5797   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
5798     unsigned Offset5 = State.AllocateStack(8, Align(8));
5799     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
5800     return false;
5801   }
5802 
5803   return true; // CC didn't match.
5804 }
5805 
5806 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
5807                          CCValAssign::LocInfo LocInfo,
5808                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
5809 
5810   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
5811     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
5812     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
5813     static const MCPhysReg GPRList[] = {
5814         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
5815         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
5816     if (unsigned Reg = State.AllocateReg(GPRList)) {
5817       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
5818       return false;
5819     }
5820   }
5821 
5822   if (LocVT == MVT::f32) {
5823     // Pass in STG registers: F1, ..., F6
5824     //                        fs0 ... fs5
5825     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
5826                                           RISCV::F18_F, RISCV::F19_F,
5827                                           RISCV::F20_F, RISCV::F21_F};
5828     if (unsigned Reg = State.AllocateReg(FPR32List)) {
5829       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
5830       return false;
5831     }
5832   }
5833 
5834   if (LocVT == MVT::f64) {
5835     // Pass in STG registers: D1, ..., D6
5836     //                        fs6 ... fs11
5837     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
5838                                           RISCV::F24_D, RISCV::F25_D,
5839                                           RISCV::F26_D, RISCV::F27_D};
5840     if (unsigned Reg = State.AllocateReg(FPR64List)) {
5841       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
5842       return false;
5843     }
5844   }
5845 
5846   report_fatal_error("No registers left in GHC calling convention");
5847   return true;
5848 }
5849 
5850 // Transform physical registers into virtual registers.
5851 SDValue RISCVTargetLowering::LowerFormalArguments(
5852     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
5853     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
5854     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
5855 
5856   MachineFunction &MF = DAG.getMachineFunction();
5857 
5858   switch (CallConv) {
5859   default:
5860     report_fatal_error("Unsupported calling convention");
5861   case CallingConv::C:
5862   case CallingConv::Fast:
5863     break;
5864   case CallingConv::GHC:
5865     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
5866         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
5867       report_fatal_error(
5868         "GHC calling convention requires the F and D instruction set extensions");
5869   }
5870 
5871   const Function &Func = MF.getFunction();
5872   if (Func.hasFnAttribute("interrupt")) {
5873     if (!Func.arg_empty())
5874       report_fatal_error(
5875         "Functions with the interrupt attribute cannot have arguments!");
5876 
5877     StringRef Kind =
5878       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
5879 
5880     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
5881       report_fatal_error(
5882         "Function interrupt attribute argument not supported!");
5883   }
5884 
5885   EVT PtrVT = getPointerTy(DAG.getDataLayout());
5886   MVT XLenVT = Subtarget.getXLenVT();
5887   unsigned XLenInBytes = Subtarget.getXLen() / 8;
5888   // Used with vargs to acumulate store chains.
5889   std::vector<SDValue> OutChains;
5890 
5891   // Assign locations to all of the incoming arguments.
5892   SmallVector<CCValAssign, 16> ArgLocs;
5893   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
5894 
5895   if (CallConv == CallingConv::Fast)
5896     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_FastCC);
5897   else if (CallConv == CallingConv::GHC)
5898     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
5899   else
5900     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false);
5901 
5902   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
5903     CCValAssign &VA = ArgLocs[i];
5904     SDValue ArgValue;
5905     // Passing f64 on RV32D with a soft float ABI must be handled as a special
5906     // case.
5907     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
5908       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
5909     else if (VA.isRegLoc())
5910       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
5911     else
5912       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
5913 
5914     if (VA.getLocInfo() == CCValAssign::Indirect) {
5915       // If the original argument was split and passed by reference (e.g. i128
5916       // on RV32), we need to load all parts of it here (using the same
5917       // address). Vectors may be partly split to registers and partly to the
5918       // stack, in which case the base address is partly offset and subsequent
5919       // stores are relative to that.
5920       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
5921                                    MachinePointerInfo()));
5922       unsigned ArgIndex = Ins[i].OrigArgIndex;
5923       unsigned ArgPartOffset = Ins[i].PartOffset;
5924       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
5925       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
5926         CCValAssign &PartVA = ArgLocs[i + 1];
5927         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
5928         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
5929                                       DAG.getIntPtrConstant(PartOffset, DL));
5930         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
5931                                      MachinePointerInfo()));
5932         ++i;
5933       }
5934       continue;
5935     }
5936     InVals.push_back(ArgValue);
5937   }
5938 
5939   if (IsVarArg) {
5940     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
5941     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
5942     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
5943     MachineFrameInfo &MFI = MF.getFrameInfo();
5944     MachineRegisterInfo &RegInfo = MF.getRegInfo();
5945     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
5946 
5947     // Offset of the first variable argument from stack pointer, and size of
5948     // the vararg save area. For now, the varargs save area is either zero or
5949     // large enough to hold a0-a7.
5950     int VaArgOffset, VarArgsSaveSize;
5951 
5952     // If all registers are allocated, then all varargs must be passed on the
5953     // stack and we don't need to save any argregs.
5954     if (ArgRegs.size() == Idx) {
5955       VaArgOffset = CCInfo.getNextStackOffset();
5956       VarArgsSaveSize = 0;
5957     } else {
5958       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
5959       VaArgOffset = -VarArgsSaveSize;
5960     }
5961 
5962     // Record the frame index of the first variable argument
5963     // which is a value necessary to VASTART.
5964     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
5965     RVFI->setVarArgsFrameIndex(FI);
5966 
5967     // If saving an odd number of registers then create an extra stack slot to
5968     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
5969     // offsets to even-numbered registered remain 2*XLEN-aligned.
5970     if (Idx % 2) {
5971       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
5972       VarArgsSaveSize += XLenInBytes;
5973     }
5974 
5975     // Copy the integer registers that may have been used for passing varargs
5976     // to the vararg save area.
5977     for (unsigned I = Idx; I < ArgRegs.size();
5978          ++I, VaArgOffset += XLenInBytes) {
5979       const Register Reg = RegInfo.createVirtualRegister(RC);
5980       RegInfo.addLiveIn(ArgRegs[I], Reg);
5981       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
5982       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
5983       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
5984       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
5985                                    MachinePointerInfo::getFixedStack(MF, FI));
5986       cast<StoreSDNode>(Store.getNode())
5987           ->getMemOperand()
5988           ->setValue((Value *)nullptr);
5989       OutChains.push_back(Store);
5990     }
5991     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
5992   }
5993 
5994   // All stores are grouped in one node to allow the matching between
5995   // the size of Ins and InVals. This only happens for vararg functions.
5996   if (!OutChains.empty()) {
5997     OutChains.push_back(Chain);
5998     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
5999   }
6000 
6001   return Chain;
6002 }
6003 
6004 /// isEligibleForTailCallOptimization - Check whether the call is eligible
6005 /// for tail call optimization.
6006 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
6007 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
6008     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
6009     const SmallVector<CCValAssign, 16> &ArgLocs) const {
6010 
6011   auto &Callee = CLI.Callee;
6012   auto CalleeCC = CLI.CallConv;
6013   auto &Outs = CLI.Outs;
6014   auto &Caller = MF.getFunction();
6015   auto CallerCC = Caller.getCallingConv();
6016 
6017   // Exception-handling functions need a special set of instructions to
6018   // indicate a return to the hardware. Tail-calling another function would
6019   // probably break this.
6020   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
6021   // should be expanded as new function attributes are introduced.
6022   if (Caller.hasFnAttribute("interrupt"))
6023     return false;
6024 
6025   // Do not tail call opt if the stack is used to pass parameters.
6026   if (CCInfo.getNextStackOffset() != 0)
6027     return false;
6028 
6029   // Do not tail call opt if any parameters need to be passed indirectly.
6030   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
6031   // passed indirectly. So the address of the value will be passed in a
6032   // register, or if not available, then the address is put on the stack. In
6033   // order to pass indirectly, space on the stack often needs to be allocated
6034   // in order to store the value. In this case the CCInfo.getNextStackOffset()
6035   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
6036   // are passed CCValAssign::Indirect.
6037   for (auto &VA : ArgLocs)
6038     if (VA.getLocInfo() == CCValAssign::Indirect)
6039       return false;
6040 
6041   // Do not tail call opt if either caller or callee uses struct return
6042   // semantics.
6043   auto IsCallerStructRet = Caller.hasStructRetAttr();
6044   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
6045   if (IsCallerStructRet || IsCalleeStructRet)
6046     return false;
6047 
6048   // Externally-defined functions with weak linkage should not be
6049   // tail-called. The behaviour of branch instructions in this situation (as
6050   // used for tail calls) is implementation-defined, so we cannot rely on the
6051   // linker replacing the tail call with a return.
6052   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
6053     const GlobalValue *GV = G->getGlobal();
6054     if (GV->hasExternalWeakLinkage())
6055       return false;
6056   }
6057 
6058   // The callee has to preserve all registers the caller needs to preserve.
6059   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
6060   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
6061   if (CalleeCC != CallerCC) {
6062     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
6063     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
6064       return false;
6065   }
6066 
6067   // Byval parameters hand the function a pointer directly into the stack area
6068   // we want to reuse during a tail call. Working around this *is* possible
6069   // but less efficient and uglier in LowerCall.
6070   for (auto &Arg : Outs)
6071     if (Arg.Flags.isByVal())
6072       return false;
6073 
6074   return true;
6075 }
6076 
6077 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
6078 // and output parameter nodes.
6079 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
6080                                        SmallVectorImpl<SDValue> &InVals) const {
6081   SelectionDAG &DAG = CLI.DAG;
6082   SDLoc &DL = CLI.DL;
6083   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
6084   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
6085   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
6086   SDValue Chain = CLI.Chain;
6087   SDValue Callee = CLI.Callee;
6088   bool &IsTailCall = CLI.IsTailCall;
6089   CallingConv::ID CallConv = CLI.CallConv;
6090   bool IsVarArg = CLI.IsVarArg;
6091   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6092   MVT XLenVT = Subtarget.getXLenVT();
6093 
6094   MachineFunction &MF = DAG.getMachineFunction();
6095 
6096   // Analyze the operands of the call, assigning locations to each operand.
6097   SmallVector<CCValAssign, 16> ArgLocs;
6098   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
6099 
6100   if (CallConv == CallingConv::Fast)
6101     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_FastCC);
6102   else if (CallConv == CallingConv::GHC)
6103     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
6104   else
6105     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI);
6106 
6107   // Check if it's really possible to do a tail call.
6108   if (IsTailCall)
6109     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
6110 
6111   if (IsTailCall)
6112     ++NumTailCalls;
6113   else if (CLI.CB && CLI.CB->isMustTailCall())
6114     report_fatal_error("failed to perform tail call elimination on a call "
6115                        "site marked musttail");
6116 
6117   // Get a count of how many bytes are to be pushed on the stack.
6118   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
6119 
6120   // Create local copies for byval args
6121   SmallVector<SDValue, 8> ByValArgs;
6122   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
6123     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6124     if (!Flags.isByVal())
6125       continue;
6126 
6127     SDValue Arg = OutVals[i];
6128     unsigned Size = Flags.getByValSize();
6129     Align Alignment = Flags.getNonZeroByValAlign();
6130 
6131     int FI =
6132         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
6133     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
6134     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
6135 
6136     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
6137                           /*IsVolatile=*/false,
6138                           /*AlwaysInline=*/false, IsTailCall,
6139                           MachinePointerInfo(), MachinePointerInfo());
6140     ByValArgs.push_back(FIPtr);
6141   }
6142 
6143   if (!IsTailCall)
6144     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
6145 
6146   // Copy argument values to their designated locations.
6147   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
6148   SmallVector<SDValue, 8> MemOpChains;
6149   SDValue StackPtr;
6150   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
6151     CCValAssign &VA = ArgLocs[i];
6152     SDValue ArgValue = OutVals[i];
6153     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6154 
6155     // Handle passing f64 on RV32D with a soft float ABI as a special case.
6156     bool IsF64OnRV32DSoftABI =
6157         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
6158     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
6159       SDValue SplitF64 = DAG.getNode(
6160           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
6161       SDValue Lo = SplitF64.getValue(0);
6162       SDValue Hi = SplitF64.getValue(1);
6163 
6164       Register RegLo = VA.getLocReg();
6165       RegsToPass.push_back(std::make_pair(RegLo, Lo));
6166 
6167       if (RegLo == RISCV::X17) {
6168         // Second half of f64 is passed on the stack.
6169         // Work out the address of the stack slot.
6170         if (!StackPtr.getNode())
6171           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
6172         // Emit the store.
6173         MemOpChains.push_back(
6174             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
6175       } else {
6176         // Second half of f64 is passed in another GPR.
6177         assert(RegLo < RISCV::X31 && "Invalid register pair");
6178         Register RegHigh = RegLo + 1;
6179         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
6180       }
6181       continue;
6182     }
6183 
6184     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
6185     // as any other MemLoc.
6186 
6187     // Promote the value if needed.
6188     // For now, only handle fully promoted and indirect arguments.
6189     if (VA.getLocInfo() == CCValAssign::Indirect) {
6190       // Store the argument in a stack slot and pass its address.
6191       SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT);
6192       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
6193       MemOpChains.push_back(
6194           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
6195                        MachinePointerInfo::getFixedStack(MF, FI)));
6196       // If the original argument was split (e.g. i128), we need
6197       // to store the required parts of it here (and pass just one address).
6198       // Vectors may be partly split to registers and partly to the stack, in
6199       // which case the base address is partly offset and subsequent stores are
6200       // relative to that.
6201       unsigned ArgIndex = Outs[i].OrigArgIndex;
6202       unsigned ArgPartOffset = Outs[i].PartOffset;
6203       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
6204       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
6205         SDValue PartValue = OutVals[i + 1];
6206         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
6207         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
6208                                       DAG.getIntPtrConstant(PartOffset, DL));
6209         MemOpChains.push_back(
6210             DAG.getStore(Chain, DL, PartValue, Address,
6211                          MachinePointerInfo::getFixedStack(MF, FI)));
6212         ++i;
6213       }
6214       ArgValue = SpillSlot;
6215     } else {
6216       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
6217     }
6218 
6219     // Use local copy if it is a byval arg.
6220     if (Flags.isByVal())
6221       ArgValue = ByValArgs[j++];
6222 
6223     if (VA.isRegLoc()) {
6224       // Queue up the argument copies and emit them at the end.
6225       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
6226     } else {
6227       assert(VA.isMemLoc() && "Argument not register or memory");
6228       assert(!IsTailCall && "Tail call not allowed if stack is used "
6229                             "for passing parameters");
6230 
6231       // Work out the address of the stack slot.
6232       if (!StackPtr.getNode())
6233         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
6234       SDValue Address =
6235           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
6236                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
6237 
6238       // Emit the store.
6239       MemOpChains.push_back(
6240           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
6241     }
6242   }
6243 
6244   // Join the stores, which are independent of one another.
6245   if (!MemOpChains.empty())
6246     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
6247 
6248   SDValue Glue;
6249 
6250   // Build a sequence of copy-to-reg nodes, chained and glued together.
6251   for (auto &Reg : RegsToPass) {
6252     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
6253     Glue = Chain.getValue(1);
6254   }
6255 
6256   // Validate that none of the argument registers have been marked as
6257   // reserved, if so report an error. Do the same for the return address if this
6258   // is not a tailcall.
6259   validateCCReservedRegs(RegsToPass, MF);
6260   if (!IsTailCall &&
6261       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
6262     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
6263         MF.getFunction(),
6264         "Return address register required, but has been reserved."});
6265 
6266   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
6267   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
6268   // split it and then direct call can be matched by PseudoCALL.
6269   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
6270     const GlobalValue *GV = S->getGlobal();
6271 
6272     unsigned OpFlags = RISCVII::MO_CALL;
6273     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
6274       OpFlags = RISCVII::MO_PLT;
6275 
6276     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
6277   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
6278     unsigned OpFlags = RISCVII::MO_CALL;
6279 
6280     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
6281                                                  nullptr))
6282       OpFlags = RISCVII::MO_PLT;
6283 
6284     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
6285   }
6286 
6287   // The first call operand is the chain and the second is the target address.
6288   SmallVector<SDValue, 8> Ops;
6289   Ops.push_back(Chain);
6290   Ops.push_back(Callee);
6291 
6292   // Add argument registers to the end of the list so that they are
6293   // known live into the call.
6294   for (auto &Reg : RegsToPass)
6295     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
6296 
6297   if (!IsTailCall) {
6298     // Add a register mask operand representing the call-preserved registers.
6299     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
6300     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
6301     assert(Mask && "Missing call preserved mask for calling convention");
6302     Ops.push_back(DAG.getRegisterMask(Mask));
6303   }
6304 
6305   // Glue the call to the argument copies, if any.
6306   if (Glue.getNode())
6307     Ops.push_back(Glue);
6308 
6309   // Emit the call.
6310   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6311 
6312   if (IsTailCall) {
6313     MF.getFrameInfo().setHasTailCall();
6314     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
6315   }
6316 
6317   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
6318   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
6319   Glue = Chain.getValue(1);
6320 
6321   // Mark the end of the call, which is glued to the call itself.
6322   Chain = DAG.getCALLSEQ_END(Chain,
6323                              DAG.getConstant(NumBytes, DL, PtrVT, true),
6324                              DAG.getConstant(0, DL, PtrVT, true),
6325                              Glue, DL);
6326   Glue = Chain.getValue(1);
6327 
6328   // Assign locations to each value returned by this call.
6329   SmallVector<CCValAssign, 16> RVLocs;
6330   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
6331   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true);
6332 
6333   // Copy all of the result registers out of their specified physreg.
6334   for (auto &VA : RVLocs) {
6335     // Copy the value out
6336     SDValue RetValue =
6337         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
6338     // Glue the RetValue to the end of the call sequence
6339     Chain = RetValue.getValue(1);
6340     Glue = RetValue.getValue(2);
6341 
6342     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
6343       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
6344       SDValue RetValue2 =
6345           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
6346       Chain = RetValue2.getValue(1);
6347       Glue = RetValue2.getValue(2);
6348       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
6349                              RetValue2);
6350     }
6351 
6352     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
6353 
6354     InVals.push_back(RetValue);
6355   }
6356 
6357   return Chain;
6358 }
6359 
6360 bool RISCVTargetLowering::CanLowerReturn(
6361     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
6362     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
6363   SmallVector<CCValAssign, 16> RVLocs;
6364   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
6365 
6366   Optional<unsigned> FirstMaskArgument;
6367   if (Subtarget.hasStdExtV())
6368     FirstMaskArgument = preAssignMask(Outs);
6369 
6370   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
6371     MVT VT = Outs[i].VT;
6372     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
6373     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
6374     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
6375                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
6376                  *this, FirstMaskArgument))
6377       return false;
6378   }
6379   return true;
6380 }
6381 
6382 SDValue
6383 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
6384                                  bool IsVarArg,
6385                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
6386                                  const SmallVectorImpl<SDValue> &OutVals,
6387                                  const SDLoc &DL, SelectionDAG &DAG) const {
6388   const MachineFunction &MF = DAG.getMachineFunction();
6389   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
6390 
6391   // Stores the assignment of the return value to a location.
6392   SmallVector<CCValAssign, 16> RVLocs;
6393 
6394   // Info about the registers and stack slot.
6395   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
6396                  *DAG.getContext());
6397 
6398   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
6399                     nullptr);
6400 
6401   if (CallConv == CallingConv::GHC && !RVLocs.empty())
6402     report_fatal_error("GHC functions return void only");
6403 
6404   SDValue Glue;
6405   SmallVector<SDValue, 4> RetOps(1, Chain);
6406 
6407   // Copy the result values into the output registers.
6408   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
6409     SDValue Val = OutVals[i];
6410     CCValAssign &VA = RVLocs[i];
6411     assert(VA.isRegLoc() && "Can only return in registers!");
6412 
6413     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
6414       // Handle returning f64 on RV32D with a soft float ABI.
6415       assert(VA.isRegLoc() && "Expected return via registers");
6416       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
6417                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
6418       SDValue Lo = SplitF64.getValue(0);
6419       SDValue Hi = SplitF64.getValue(1);
6420       Register RegLo = VA.getLocReg();
6421       assert(RegLo < RISCV::X31 && "Invalid register pair");
6422       Register RegHi = RegLo + 1;
6423 
6424       if (STI.isRegisterReservedByUser(RegLo) ||
6425           STI.isRegisterReservedByUser(RegHi))
6426         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
6427             MF.getFunction(),
6428             "Return value register required, but has been reserved."});
6429 
6430       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
6431       Glue = Chain.getValue(1);
6432       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
6433       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
6434       Glue = Chain.getValue(1);
6435       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
6436     } else {
6437       // Handle a 'normal' return.
6438       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
6439       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
6440 
6441       if (STI.isRegisterReservedByUser(VA.getLocReg()))
6442         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
6443             MF.getFunction(),
6444             "Return value register required, but has been reserved."});
6445 
6446       // Guarantee that all emitted copies are stuck together.
6447       Glue = Chain.getValue(1);
6448       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
6449     }
6450   }
6451 
6452   RetOps[0] = Chain; // Update chain.
6453 
6454   // Add the glue node if we have it.
6455   if (Glue.getNode()) {
6456     RetOps.push_back(Glue);
6457   }
6458 
6459   // Interrupt service routines use different return instructions.
6460   const Function &Func = DAG.getMachineFunction().getFunction();
6461   if (Func.hasFnAttribute("interrupt")) {
6462     if (!Func.getReturnType()->isVoidTy())
6463       report_fatal_error(
6464           "Functions with the interrupt attribute must have void return type!");
6465 
6466     MachineFunction &MF = DAG.getMachineFunction();
6467     StringRef Kind =
6468       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
6469 
6470     unsigned RetOpc;
6471     if (Kind == "user")
6472       RetOpc = RISCVISD::URET_FLAG;
6473     else if (Kind == "supervisor")
6474       RetOpc = RISCVISD::SRET_FLAG;
6475     else
6476       RetOpc = RISCVISD::MRET_FLAG;
6477 
6478     return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
6479   }
6480 
6481   return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps);
6482 }
6483 
6484 void RISCVTargetLowering::validateCCReservedRegs(
6485     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
6486     MachineFunction &MF) const {
6487   const Function &F = MF.getFunction();
6488   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
6489 
6490   if (llvm::any_of(Regs, [&STI](auto Reg) {
6491         return STI.isRegisterReservedByUser(Reg.first);
6492       }))
6493     F.getContext().diagnose(DiagnosticInfoUnsupported{
6494         F, "Argument register required, but has been reserved."});
6495 }
6496 
6497 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
6498   return CI->isTailCall();
6499 }
6500 
6501 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
6502 #define NODE_NAME_CASE(NODE)                                                   \
6503   case RISCVISD::NODE:                                                         \
6504     return "RISCVISD::" #NODE;
6505   // clang-format off
6506   switch ((RISCVISD::NodeType)Opcode) {
6507   case RISCVISD::FIRST_NUMBER:
6508     break;
6509   NODE_NAME_CASE(RET_FLAG)
6510   NODE_NAME_CASE(URET_FLAG)
6511   NODE_NAME_CASE(SRET_FLAG)
6512   NODE_NAME_CASE(MRET_FLAG)
6513   NODE_NAME_CASE(CALL)
6514   NODE_NAME_CASE(SELECT_CC)
6515   NODE_NAME_CASE(BR_CC)
6516   NODE_NAME_CASE(BuildPairF64)
6517   NODE_NAME_CASE(SplitF64)
6518   NODE_NAME_CASE(TAIL)
6519   NODE_NAME_CASE(SLLW)
6520   NODE_NAME_CASE(SRAW)
6521   NODE_NAME_CASE(SRLW)
6522   NODE_NAME_CASE(DIVW)
6523   NODE_NAME_CASE(DIVUW)
6524   NODE_NAME_CASE(REMUW)
6525   NODE_NAME_CASE(ROLW)
6526   NODE_NAME_CASE(RORW)
6527   NODE_NAME_CASE(FSLW)
6528   NODE_NAME_CASE(FSRW)
6529   NODE_NAME_CASE(FSL)
6530   NODE_NAME_CASE(FSR)
6531   NODE_NAME_CASE(FMV_H_X)
6532   NODE_NAME_CASE(FMV_X_ANYEXTH)
6533   NODE_NAME_CASE(FMV_W_X_RV64)
6534   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
6535   NODE_NAME_CASE(READ_CYCLE_WIDE)
6536   NODE_NAME_CASE(GREVI)
6537   NODE_NAME_CASE(GREVIW)
6538   NODE_NAME_CASE(GORCI)
6539   NODE_NAME_CASE(GORCIW)
6540   NODE_NAME_CASE(SHFLI)
6541   NODE_NAME_CASE(VMV_V_X_VL)
6542   NODE_NAME_CASE(VFMV_V_F_VL)
6543   NODE_NAME_CASE(VMV_X_S)
6544   NODE_NAME_CASE(VMV_S_XF_VL)
6545   NODE_NAME_CASE(SPLAT_VECTOR_I64)
6546   NODE_NAME_CASE(READ_VLENB)
6547   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
6548   NODE_NAME_CASE(VLEFF)
6549   NODE_NAME_CASE(VLEFF_MASK)
6550   NODE_NAME_CASE(VSLIDEUP_VL)
6551   NODE_NAME_CASE(VSLIDE1UP_VL)
6552   NODE_NAME_CASE(VSLIDEDOWN_VL)
6553   NODE_NAME_CASE(VID_VL)
6554   NODE_NAME_CASE(VFNCVT_ROD_VL)
6555   NODE_NAME_CASE(VECREDUCE_ADD_VL)
6556   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
6557   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
6558   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
6559   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
6560   NODE_NAME_CASE(VECREDUCE_AND_VL)
6561   NODE_NAME_CASE(VECREDUCE_OR_VL)
6562   NODE_NAME_CASE(VECREDUCE_XOR_VL)
6563   NODE_NAME_CASE(VECREDUCE_FADD_VL)
6564   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
6565   NODE_NAME_CASE(ADD_VL)
6566   NODE_NAME_CASE(AND_VL)
6567   NODE_NAME_CASE(MUL_VL)
6568   NODE_NAME_CASE(OR_VL)
6569   NODE_NAME_CASE(SDIV_VL)
6570   NODE_NAME_CASE(SHL_VL)
6571   NODE_NAME_CASE(SREM_VL)
6572   NODE_NAME_CASE(SRA_VL)
6573   NODE_NAME_CASE(SRL_VL)
6574   NODE_NAME_CASE(SUB_VL)
6575   NODE_NAME_CASE(UDIV_VL)
6576   NODE_NAME_CASE(UREM_VL)
6577   NODE_NAME_CASE(XOR_VL)
6578   NODE_NAME_CASE(FADD_VL)
6579   NODE_NAME_CASE(FSUB_VL)
6580   NODE_NAME_CASE(FMUL_VL)
6581   NODE_NAME_CASE(FDIV_VL)
6582   NODE_NAME_CASE(FNEG_VL)
6583   NODE_NAME_CASE(FABS_VL)
6584   NODE_NAME_CASE(FSQRT_VL)
6585   NODE_NAME_CASE(FMA_VL)
6586   NODE_NAME_CASE(FCOPYSIGN_VL)
6587   NODE_NAME_CASE(SMIN_VL)
6588   NODE_NAME_CASE(SMAX_VL)
6589   NODE_NAME_CASE(UMIN_VL)
6590   NODE_NAME_CASE(UMAX_VL)
6591   NODE_NAME_CASE(MULHS_VL)
6592   NODE_NAME_CASE(MULHU_VL)
6593   NODE_NAME_CASE(FP_TO_SINT_VL)
6594   NODE_NAME_CASE(FP_TO_UINT_VL)
6595   NODE_NAME_CASE(SINT_TO_FP_VL)
6596   NODE_NAME_CASE(UINT_TO_FP_VL)
6597   NODE_NAME_CASE(FP_EXTEND_VL)
6598   NODE_NAME_CASE(FP_ROUND_VL)
6599   NODE_NAME_CASE(SETCC_VL)
6600   NODE_NAME_CASE(VSELECT_VL)
6601   NODE_NAME_CASE(VMAND_VL)
6602   NODE_NAME_CASE(VMOR_VL)
6603   NODE_NAME_CASE(VMXOR_VL)
6604   NODE_NAME_CASE(VMCLR_VL)
6605   NODE_NAME_CASE(VMSET_VL)
6606   NODE_NAME_CASE(VRGATHER_VX_VL)
6607   NODE_NAME_CASE(VRGATHER_VV_VL)
6608   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
6609   NODE_NAME_CASE(VSEXT_VL)
6610   NODE_NAME_CASE(VZEXT_VL)
6611   NODE_NAME_CASE(VLE_VL)
6612   NODE_NAME_CASE(VSE_VL)
6613   }
6614   // clang-format on
6615   return nullptr;
6616 #undef NODE_NAME_CASE
6617 }
6618 
6619 /// getConstraintType - Given a constraint letter, return the type of
6620 /// constraint it is for this target.
6621 RISCVTargetLowering::ConstraintType
6622 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
6623   if (Constraint.size() == 1) {
6624     switch (Constraint[0]) {
6625     default:
6626       break;
6627     case 'f':
6628     case 'v':
6629       return C_RegisterClass;
6630     case 'I':
6631     case 'J':
6632     case 'K':
6633       return C_Immediate;
6634     case 'A':
6635       return C_Memory;
6636     }
6637   }
6638   return TargetLowering::getConstraintType(Constraint);
6639 }
6640 
6641 std::pair<unsigned, const TargetRegisterClass *>
6642 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
6643                                                   StringRef Constraint,
6644                                                   MVT VT) const {
6645   // First, see if this is a constraint that directly corresponds to a
6646   // RISCV register class.
6647   if (Constraint.size() == 1) {
6648     switch (Constraint[0]) {
6649     case 'r':
6650       return std::make_pair(0U, &RISCV::GPRRegClass);
6651     case 'f':
6652       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
6653         return std::make_pair(0U, &RISCV::FPR16RegClass);
6654       if (Subtarget.hasStdExtF() && VT == MVT::f32)
6655         return std::make_pair(0U, &RISCV::FPR32RegClass);
6656       if (Subtarget.hasStdExtD() && VT == MVT::f64)
6657         return std::make_pair(0U, &RISCV::FPR64RegClass);
6658       break;
6659     case 'v':
6660       for (const auto *RC :
6661            {&RISCV::VMRegClass, &RISCV::VRRegClass, &RISCV::VRM2RegClass,
6662             &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
6663         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
6664           return std::make_pair(0U, RC);
6665       }
6666       break;
6667     default:
6668       break;
6669     }
6670   }
6671 
6672   // Clang will correctly decode the usage of register name aliases into their
6673   // official names. However, other frontends like `rustc` do not. This allows
6674   // users of these frontends to use the ABI names for registers in LLVM-style
6675   // register constraints.
6676   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
6677                                .Case("{zero}", RISCV::X0)
6678                                .Case("{ra}", RISCV::X1)
6679                                .Case("{sp}", RISCV::X2)
6680                                .Case("{gp}", RISCV::X3)
6681                                .Case("{tp}", RISCV::X4)
6682                                .Case("{t0}", RISCV::X5)
6683                                .Case("{t1}", RISCV::X6)
6684                                .Case("{t2}", RISCV::X7)
6685                                .Cases("{s0}", "{fp}", RISCV::X8)
6686                                .Case("{s1}", RISCV::X9)
6687                                .Case("{a0}", RISCV::X10)
6688                                .Case("{a1}", RISCV::X11)
6689                                .Case("{a2}", RISCV::X12)
6690                                .Case("{a3}", RISCV::X13)
6691                                .Case("{a4}", RISCV::X14)
6692                                .Case("{a5}", RISCV::X15)
6693                                .Case("{a6}", RISCV::X16)
6694                                .Case("{a7}", RISCV::X17)
6695                                .Case("{s2}", RISCV::X18)
6696                                .Case("{s3}", RISCV::X19)
6697                                .Case("{s4}", RISCV::X20)
6698                                .Case("{s5}", RISCV::X21)
6699                                .Case("{s6}", RISCV::X22)
6700                                .Case("{s7}", RISCV::X23)
6701                                .Case("{s8}", RISCV::X24)
6702                                .Case("{s9}", RISCV::X25)
6703                                .Case("{s10}", RISCV::X26)
6704                                .Case("{s11}", RISCV::X27)
6705                                .Case("{t3}", RISCV::X28)
6706                                .Case("{t4}", RISCV::X29)
6707                                .Case("{t5}", RISCV::X30)
6708                                .Case("{t6}", RISCV::X31)
6709                                .Default(RISCV::NoRegister);
6710   if (XRegFromAlias != RISCV::NoRegister)
6711     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
6712 
6713   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
6714   // TableGen record rather than the AsmName to choose registers for InlineAsm
6715   // constraints, plus we want to match those names to the widest floating point
6716   // register type available, manually select floating point registers here.
6717   //
6718   // The second case is the ABI name of the register, so that frontends can also
6719   // use the ABI names in register constraint lists.
6720   if (Subtarget.hasStdExtF()) {
6721     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
6722                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
6723                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
6724                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
6725                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
6726                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
6727                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
6728                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
6729                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
6730                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
6731                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
6732                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
6733                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
6734                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
6735                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
6736                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
6737                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
6738                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
6739                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
6740                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
6741                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
6742                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
6743                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
6744                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
6745                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
6746                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
6747                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
6748                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
6749                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
6750                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
6751                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
6752                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
6753                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
6754                         .Default(RISCV::NoRegister);
6755     if (FReg != RISCV::NoRegister) {
6756       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
6757       if (Subtarget.hasStdExtD()) {
6758         unsigned RegNo = FReg - RISCV::F0_F;
6759         unsigned DReg = RISCV::F0_D + RegNo;
6760         return std::make_pair(DReg, &RISCV::FPR64RegClass);
6761       }
6762       return std::make_pair(FReg, &RISCV::FPR32RegClass);
6763     }
6764   }
6765 
6766   if (Subtarget.hasStdExtV()) {
6767     Register VReg = StringSwitch<Register>(Constraint.lower())
6768                         .Case("{v0}", RISCV::V0)
6769                         .Case("{v1}", RISCV::V1)
6770                         .Case("{v2}", RISCV::V2)
6771                         .Case("{v3}", RISCV::V3)
6772                         .Case("{v4}", RISCV::V4)
6773                         .Case("{v5}", RISCV::V5)
6774                         .Case("{v6}", RISCV::V6)
6775                         .Case("{v7}", RISCV::V7)
6776                         .Case("{v8}", RISCV::V8)
6777                         .Case("{v9}", RISCV::V9)
6778                         .Case("{v10}", RISCV::V10)
6779                         .Case("{v11}", RISCV::V11)
6780                         .Case("{v12}", RISCV::V12)
6781                         .Case("{v13}", RISCV::V13)
6782                         .Case("{v14}", RISCV::V14)
6783                         .Case("{v15}", RISCV::V15)
6784                         .Case("{v16}", RISCV::V16)
6785                         .Case("{v17}", RISCV::V17)
6786                         .Case("{v18}", RISCV::V18)
6787                         .Case("{v19}", RISCV::V19)
6788                         .Case("{v20}", RISCV::V20)
6789                         .Case("{v21}", RISCV::V21)
6790                         .Case("{v22}", RISCV::V22)
6791                         .Case("{v23}", RISCV::V23)
6792                         .Case("{v24}", RISCV::V24)
6793                         .Case("{v25}", RISCV::V25)
6794                         .Case("{v26}", RISCV::V26)
6795                         .Case("{v27}", RISCV::V27)
6796                         .Case("{v28}", RISCV::V28)
6797                         .Case("{v29}", RISCV::V29)
6798                         .Case("{v30}", RISCV::V30)
6799                         .Case("{v31}", RISCV::V31)
6800                         .Default(RISCV::NoRegister);
6801     if (VReg != RISCV::NoRegister) {
6802       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
6803         return std::make_pair(VReg, &RISCV::VMRegClass);
6804       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
6805         return std::make_pair(VReg, &RISCV::VRRegClass);
6806       for (const auto *RC :
6807            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
6808         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
6809           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
6810           return std::make_pair(VReg, RC);
6811         }
6812       }
6813     }
6814   }
6815 
6816   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
6817 }
6818 
6819 unsigned
6820 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
6821   // Currently only support length 1 constraints.
6822   if (ConstraintCode.size() == 1) {
6823     switch (ConstraintCode[0]) {
6824     case 'A':
6825       return InlineAsm::Constraint_A;
6826     default:
6827       break;
6828     }
6829   }
6830 
6831   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
6832 }
6833 
6834 void RISCVTargetLowering::LowerAsmOperandForConstraint(
6835     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
6836     SelectionDAG &DAG) const {
6837   // Currently only support length 1 constraints.
6838   if (Constraint.length() == 1) {
6839     switch (Constraint[0]) {
6840     case 'I':
6841       // Validate & create a 12-bit signed immediate operand.
6842       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
6843         uint64_t CVal = C->getSExtValue();
6844         if (isInt<12>(CVal))
6845           Ops.push_back(
6846               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
6847       }
6848       return;
6849     case 'J':
6850       // Validate & create an integer zero operand.
6851       if (auto *C = dyn_cast<ConstantSDNode>(Op))
6852         if (C->getZExtValue() == 0)
6853           Ops.push_back(
6854               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
6855       return;
6856     case 'K':
6857       // Validate & create a 5-bit unsigned immediate operand.
6858       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
6859         uint64_t CVal = C->getZExtValue();
6860         if (isUInt<5>(CVal))
6861           Ops.push_back(
6862               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
6863       }
6864       return;
6865     default:
6866       break;
6867     }
6868   }
6869   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
6870 }
6871 
6872 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
6873                                                    Instruction *Inst,
6874                                                    AtomicOrdering Ord) const {
6875   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
6876     return Builder.CreateFence(Ord);
6877   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
6878     return Builder.CreateFence(AtomicOrdering::Release);
6879   return nullptr;
6880 }
6881 
6882 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
6883                                                     Instruction *Inst,
6884                                                     AtomicOrdering Ord) const {
6885   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
6886     return Builder.CreateFence(AtomicOrdering::Acquire);
6887   return nullptr;
6888 }
6889 
6890 TargetLowering::AtomicExpansionKind
6891 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
6892   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
6893   // point operations can't be used in an lr/sc sequence without breaking the
6894   // forward-progress guarantee.
6895   if (AI->isFloatingPointOperation())
6896     return AtomicExpansionKind::CmpXChg;
6897 
6898   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
6899   if (Size == 8 || Size == 16)
6900     return AtomicExpansionKind::MaskedIntrinsic;
6901   return AtomicExpansionKind::None;
6902 }
6903 
6904 static Intrinsic::ID
6905 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
6906   if (XLen == 32) {
6907     switch (BinOp) {
6908     default:
6909       llvm_unreachable("Unexpected AtomicRMW BinOp");
6910     case AtomicRMWInst::Xchg:
6911       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
6912     case AtomicRMWInst::Add:
6913       return Intrinsic::riscv_masked_atomicrmw_add_i32;
6914     case AtomicRMWInst::Sub:
6915       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
6916     case AtomicRMWInst::Nand:
6917       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
6918     case AtomicRMWInst::Max:
6919       return Intrinsic::riscv_masked_atomicrmw_max_i32;
6920     case AtomicRMWInst::Min:
6921       return Intrinsic::riscv_masked_atomicrmw_min_i32;
6922     case AtomicRMWInst::UMax:
6923       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
6924     case AtomicRMWInst::UMin:
6925       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
6926     }
6927   }
6928 
6929   if (XLen == 64) {
6930     switch (BinOp) {
6931     default:
6932       llvm_unreachable("Unexpected AtomicRMW BinOp");
6933     case AtomicRMWInst::Xchg:
6934       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
6935     case AtomicRMWInst::Add:
6936       return Intrinsic::riscv_masked_atomicrmw_add_i64;
6937     case AtomicRMWInst::Sub:
6938       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
6939     case AtomicRMWInst::Nand:
6940       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
6941     case AtomicRMWInst::Max:
6942       return Intrinsic::riscv_masked_atomicrmw_max_i64;
6943     case AtomicRMWInst::Min:
6944       return Intrinsic::riscv_masked_atomicrmw_min_i64;
6945     case AtomicRMWInst::UMax:
6946       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
6947     case AtomicRMWInst::UMin:
6948       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
6949     }
6950   }
6951 
6952   llvm_unreachable("Unexpected XLen\n");
6953 }
6954 
6955 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
6956     IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
6957     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
6958   unsigned XLen = Subtarget.getXLen();
6959   Value *Ordering =
6960       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
6961   Type *Tys[] = {AlignedAddr->getType()};
6962   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
6963       AI->getModule(),
6964       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
6965 
6966   if (XLen == 64) {
6967     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
6968     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
6969     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
6970   }
6971 
6972   Value *Result;
6973 
6974   // Must pass the shift amount needed to sign extend the loaded value prior
6975   // to performing a signed comparison for min/max. ShiftAmt is the number of
6976   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
6977   // is the number of bits to left+right shift the value in order to
6978   // sign-extend.
6979   if (AI->getOperation() == AtomicRMWInst::Min ||
6980       AI->getOperation() == AtomicRMWInst::Max) {
6981     const DataLayout &DL = AI->getModule()->getDataLayout();
6982     unsigned ValWidth =
6983         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
6984     Value *SextShamt =
6985         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
6986     Result = Builder.CreateCall(LrwOpScwLoop,
6987                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
6988   } else {
6989     Result =
6990         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
6991   }
6992 
6993   if (XLen == 64)
6994     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
6995   return Result;
6996 }
6997 
6998 TargetLowering::AtomicExpansionKind
6999 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
7000     AtomicCmpXchgInst *CI) const {
7001   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
7002   if (Size == 8 || Size == 16)
7003     return AtomicExpansionKind::MaskedIntrinsic;
7004   return AtomicExpansionKind::None;
7005 }
7006 
7007 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
7008     IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
7009     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
7010   unsigned XLen = Subtarget.getXLen();
7011   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
7012   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
7013   if (XLen == 64) {
7014     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
7015     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
7016     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
7017     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
7018   }
7019   Type *Tys[] = {AlignedAddr->getType()};
7020   Function *MaskedCmpXchg =
7021       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
7022   Value *Result = Builder.CreateCall(
7023       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
7024   if (XLen == 64)
7025     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
7026   return Result;
7027 }
7028 
7029 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
7030   return false;
7031 }
7032 
7033 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
7034                                                      EVT VT) const {
7035   VT = VT.getScalarType();
7036 
7037   if (!VT.isSimple())
7038     return false;
7039 
7040   switch (VT.getSimpleVT().SimpleTy) {
7041   case MVT::f16:
7042     return Subtarget.hasStdExtZfh();
7043   case MVT::f32:
7044     return Subtarget.hasStdExtF();
7045   case MVT::f64:
7046     return Subtarget.hasStdExtD();
7047   default:
7048     break;
7049   }
7050 
7051   return false;
7052 }
7053 
7054 Register RISCVTargetLowering::getExceptionPointerRegister(
7055     const Constant *PersonalityFn) const {
7056   return RISCV::X10;
7057 }
7058 
7059 Register RISCVTargetLowering::getExceptionSelectorRegister(
7060     const Constant *PersonalityFn) const {
7061   return RISCV::X11;
7062 }
7063 
7064 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
7065   // Return false to suppress the unnecessary extensions if the LibCall
7066   // arguments or return value is f32 type for LP64 ABI.
7067   RISCVABI::ABI ABI = Subtarget.getTargetABI();
7068   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
7069     return false;
7070 
7071   return true;
7072 }
7073 
7074 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
7075   if (Subtarget.is64Bit() && Type == MVT::i32)
7076     return true;
7077 
7078   return IsSigned;
7079 }
7080 
7081 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
7082                                                  SDValue C) const {
7083   // Check integral scalar types.
7084   if (VT.isScalarInteger()) {
7085     // Omit the optimization if the sub target has the M extension and the data
7086     // size exceeds XLen.
7087     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
7088       return false;
7089     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
7090       // Break the MUL to a SLLI and an ADD/SUB.
7091       const APInt &Imm = ConstNode->getAPIntValue();
7092       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
7093           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
7094         return true;
7095       // Omit the following optimization if the sub target has the M extension
7096       // and the data size >= XLen.
7097       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
7098         return false;
7099       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
7100       // a pair of LUI/ADDI.
7101       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
7102         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
7103         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
7104             (1 - ImmS).isPowerOf2())
7105         return true;
7106       }
7107     }
7108   }
7109 
7110   return false;
7111 }
7112 
7113 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
7114   if (!Subtarget.useRVVForFixedLengthVectors())
7115     return false;
7116 
7117   if (!VT.isFixedLengthVector())
7118     return false;
7119 
7120   // Don't use RVV for vectors we cannot scalarize if required.
7121   switch (VT.getVectorElementType().SimpleTy) {
7122   // i1 is supported but has different rules.
7123   default:
7124     return false;
7125   case MVT::i1:
7126     // Masks can only use a single register.
7127     if (VT.getVectorNumElements() > Subtarget.getMinRVVVectorSizeInBits())
7128       return false;
7129     break;
7130   case MVT::i8:
7131   case MVT::i16:
7132   case MVT::i32:
7133   case MVT::i64:
7134     break;
7135   case MVT::f16:
7136     if (!Subtarget.hasStdExtZfh())
7137       return false;
7138     break;
7139   case MVT::f32:
7140     if (!Subtarget.hasStdExtF())
7141       return false;
7142     break;
7143   case MVT::f64:
7144     if (!Subtarget.hasStdExtD())
7145       return false;
7146     break;
7147   }
7148 
7149   unsigned LMul = Subtarget.getLMULForFixedLengthVector(VT);
7150   // Don't use RVV for types that don't fit.
7151   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
7152     return false;
7153 
7154   // TODO: Perhaps an artificial restriction, but worth having whilst getting
7155   // the base fixed length RVV support in place.
7156   if (!VT.isPow2VectorType())
7157     return false;
7158 
7159   return true;
7160 }
7161 
7162 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
7163     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
7164     bool *Fast) const {
7165   if (!VT.isScalableVector())
7166     return false;
7167 
7168   EVT ElemVT = VT.getVectorElementType();
7169   if (Alignment >= ElemVT.getStoreSize()) {
7170     if (Fast)
7171       *Fast = true;
7172     return true;
7173   }
7174 
7175   return false;
7176 }
7177 
7178 bool RISCVTargetLowering::splitValueIntoRegisterParts(
7179     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
7180     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
7181   EVT ValueVT = Val.getValueType();
7182   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
7183     LLVMContext &Context = *DAG.getContext();
7184     EVT ValueEltVT = ValueVT.getVectorElementType();
7185     EVT PartEltVT = PartVT.getVectorElementType();
7186     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
7187     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
7188     if (PartVTBitSize % ValueVTBitSize == 0) {
7189       // If the element types are different, bitcast to the same element type of
7190       // PartVT first.
7191       if (ValueEltVT != PartEltVT) {
7192         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
7193         assert(Count != 0 && "The number of element should not be zero.");
7194         EVT SameEltTypeVT =
7195             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
7196         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
7197       }
7198       Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
7199                         Val, DAG.getConstant(0, DL, Subtarget.getXLenVT()));
7200       Parts[0] = Val;
7201       return true;
7202     }
7203   }
7204   return false;
7205 }
7206 
7207 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
7208     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
7209     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
7210   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
7211     LLVMContext &Context = *DAG.getContext();
7212     SDValue Val = Parts[0];
7213     EVT ValueEltVT = ValueVT.getVectorElementType();
7214     EVT PartEltVT = PartVT.getVectorElementType();
7215     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
7216     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
7217     if (PartVTBitSize % ValueVTBitSize == 0) {
7218       EVT SameEltTypeVT = ValueVT;
7219       // If the element types are different, convert it to the same element type
7220       // of PartVT.
7221       if (ValueEltVT != PartEltVT) {
7222         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
7223         assert(Count != 0 && "The number of element should not be zero.");
7224         SameEltTypeVT =
7225             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
7226       }
7227       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SameEltTypeVT, Val,
7228                         DAG.getConstant(0, DL, Subtarget.getXLenVT()));
7229       if (ValueEltVT != PartEltVT)
7230         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
7231       return Val;
7232     }
7233   }
7234   return SDValue();
7235 }
7236 
7237 #define GET_REGISTER_MATCHER
7238 #include "RISCVGenAsmMatcher.inc"
7239 
7240 Register
7241 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
7242                                        const MachineFunction &MF) const {
7243   Register Reg = MatchRegisterAltName(RegName);
7244   if (Reg == RISCV::NoRegister)
7245     Reg = MatchRegisterName(RegName);
7246   if (Reg == RISCV::NoRegister)
7247     report_fatal_error(
7248         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
7249   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
7250   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
7251     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
7252                              StringRef(RegName) + "\"."));
7253   return Reg;
7254 }
7255 
7256 namespace llvm {
7257 namespace RISCVVIntrinsicsTable {
7258 
7259 #define GET_RISCVVIntrinsicsTable_IMPL
7260 #include "RISCVGenSearchableTables.inc"
7261 
7262 } // namespace RISCVVIntrinsicsTable
7263 
7264 } // namespace llvm
7265