1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "RISCV.h"
16 #include "RISCVMachineFunctionInfo.h"
17 #include "RISCVRegisterInfo.h"
18 #include "RISCVSubtarget.h"
19 #include "RISCVTargetMachine.h"
20 #include "Utils/RISCVMatInt.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/CodeGen/ValueTypes.h"
30 #include "llvm/IR/DiagnosticInfo.h"
31 #include "llvm/IR/DiagnosticPrinter.h"
32 #include "llvm/IR/IntrinsicsRISCV.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/MathExtras.h"
36 #include "llvm/Support/raw_ostream.h"
37 
38 using namespace llvm;
39 
40 #define DEBUG_TYPE "riscv-lower"
41 
42 STATISTIC(NumTailCalls, "Number of tail calls");
43 
44 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
45                                          const RISCVSubtarget &STI)
46     : TargetLowering(TM), Subtarget(STI) {
47 
48   if (Subtarget.isRV32E())
49     report_fatal_error("Codegen not yet implemented for RV32E");
50 
51   RISCVABI::ABI ABI = Subtarget.getTargetABI();
52   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
53 
54   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
55       !Subtarget.hasStdExtF()) {
56     errs() << "Hard-float 'f' ABI can't be used for a target that "
57                 "doesn't support the F instruction set extension (ignoring "
58                           "target-abi)\n";
59     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
60   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
61              !Subtarget.hasStdExtD()) {
62     errs() << "Hard-float 'd' ABI can't be used for a target that "
63               "doesn't support the D instruction set extension (ignoring "
64               "target-abi)\n";
65     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
66   }
67 
68   switch (ABI) {
69   default:
70     report_fatal_error("Don't know how to lower this ABI");
71   case RISCVABI::ABI_ILP32:
72   case RISCVABI::ABI_ILP32F:
73   case RISCVABI::ABI_ILP32D:
74   case RISCVABI::ABI_LP64:
75   case RISCVABI::ABI_LP64F:
76   case RISCVABI::ABI_LP64D:
77     break;
78   }
79 
80   MVT XLenVT = Subtarget.getXLenVT();
81 
82   // Set up the register classes.
83   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
84 
85   if (Subtarget.hasStdExtZfh())
86     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
87   if (Subtarget.hasStdExtF())
88     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
89   if (Subtarget.hasStdExtD())
90     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
91 
92   if (Subtarget.hasStdExtV()) {
93     addRegisterClass(RISCVVMVTs::vbool64_t, &RISCV::VRRegClass);
94     addRegisterClass(RISCVVMVTs::vbool32_t, &RISCV::VRRegClass);
95     addRegisterClass(RISCVVMVTs::vbool16_t, &RISCV::VRRegClass);
96     addRegisterClass(RISCVVMVTs::vbool8_t, &RISCV::VRRegClass);
97     addRegisterClass(RISCVVMVTs::vbool4_t, &RISCV::VRRegClass);
98     addRegisterClass(RISCVVMVTs::vbool2_t, &RISCV::VRRegClass);
99     addRegisterClass(RISCVVMVTs::vbool1_t, &RISCV::VRRegClass);
100 
101     addRegisterClass(RISCVVMVTs::vint8mf8_t, &RISCV::VRRegClass);
102     addRegisterClass(RISCVVMVTs::vint8mf4_t, &RISCV::VRRegClass);
103     addRegisterClass(RISCVVMVTs::vint8mf2_t, &RISCV::VRRegClass);
104     addRegisterClass(RISCVVMVTs::vint8m1_t, &RISCV::VRRegClass);
105     addRegisterClass(RISCVVMVTs::vint8m2_t, &RISCV::VRM2RegClass);
106     addRegisterClass(RISCVVMVTs::vint8m4_t, &RISCV::VRM4RegClass);
107     addRegisterClass(RISCVVMVTs::vint8m8_t, &RISCV::VRM8RegClass);
108 
109     addRegisterClass(RISCVVMVTs::vint16mf4_t, &RISCV::VRRegClass);
110     addRegisterClass(RISCVVMVTs::vint16mf2_t, &RISCV::VRRegClass);
111     addRegisterClass(RISCVVMVTs::vint16m1_t, &RISCV::VRRegClass);
112     addRegisterClass(RISCVVMVTs::vint16m2_t, &RISCV::VRM2RegClass);
113     addRegisterClass(RISCVVMVTs::vint16m4_t, &RISCV::VRM4RegClass);
114     addRegisterClass(RISCVVMVTs::vint16m8_t, &RISCV::VRM8RegClass);
115 
116     addRegisterClass(RISCVVMVTs::vint32mf2_t, &RISCV::VRRegClass);
117     addRegisterClass(RISCVVMVTs::vint32m1_t, &RISCV::VRRegClass);
118     addRegisterClass(RISCVVMVTs::vint32m2_t, &RISCV::VRM2RegClass);
119     addRegisterClass(RISCVVMVTs::vint32m4_t, &RISCV::VRM4RegClass);
120     addRegisterClass(RISCVVMVTs::vint32m8_t, &RISCV::VRM8RegClass);
121 
122     addRegisterClass(RISCVVMVTs::vint64m1_t, &RISCV::VRRegClass);
123     addRegisterClass(RISCVVMVTs::vint64m2_t, &RISCV::VRM2RegClass);
124     addRegisterClass(RISCVVMVTs::vint64m4_t, &RISCV::VRM4RegClass);
125     addRegisterClass(RISCVVMVTs::vint64m8_t, &RISCV::VRM8RegClass);
126 
127     if (Subtarget.hasStdExtZfh()) {
128       addRegisterClass(RISCVVMVTs::vfloat16mf4_t, &RISCV::VRRegClass);
129       addRegisterClass(RISCVVMVTs::vfloat16mf2_t, &RISCV::VRRegClass);
130       addRegisterClass(RISCVVMVTs::vfloat16m1_t, &RISCV::VRRegClass);
131       addRegisterClass(RISCVVMVTs::vfloat16m2_t, &RISCV::VRM2RegClass);
132       addRegisterClass(RISCVVMVTs::vfloat16m4_t, &RISCV::VRM4RegClass);
133       addRegisterClass(RISCVVMVTs::vfloat16m8_t, &RISCV::VRM8RegClass);
134     }
135 
136     if (Subtarget.hasStdExtF()) {
137       addRegisterClass(RISCVVMVTs::vfloat32mf2_t, &RISCV::VRRegClass);
138       addRegisterClass(RISCVVMVTs::vfloat32m1_t, &RISCV::VRRegClass);
139       addRegisterClass(RISCVVMVTs::vfloat32m2_t, &RISCV::VRM2RegClass);
140       addRegisterClass(RISCVVMVTs::vfloat32m4_t, &RISCV::VRM4RegClass);
141       addRegisterClass(RISCVVMVTs::vfloat32m8_t, &RISCV::VRM8RegClass);
142     }
143 
144     if (Subtarget.hasStdExtD()) {
145       addRegisterClass(RISCVVMVTs::vfloat64m1_t, &RISCV::VRRegClass);
146       addRegisterClass(RISCVVMVTs::vfloat64m2_t, &RISCV::VRM2RegClass);
147       addRegisterClass(RISCVVMVTs::vfloat64m4_t, &RISCV::VRM4RegClass);
148       addRegisterClass(RISCVVMVTs::vfloat64m8_t, &RISCV::VRM8RegClass);
149     }
150   }
151 
152   // Compute derived properties from the register classes.
153   computeRegisterProperties(STI.getRegisterInfo());
154 
155   setStackPointerRegisterToSaveRestore(RISCV::X2);
156 
157   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
158     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
159 
160   // TODO: add all necessary setOperationAction calls.
161   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
162 
163   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
164   setOperationAction(ISD::BR_CC, XLenVT, Expand);
165   setOperationAction(ISD::SELECT, XLenVT, Custom);
166   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
167 
168   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
169   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
170 
171   setOperationAction(ISD::VASTART, MVT::Other, Custom);
172   setOperationAction(ISD::VAARG, MVT::Other, Expand);
173   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
174   setOperationAction(ISD::VAEND, MVT::Other, Expand);
175 
176   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
177   if (!Subtarget.hasStdExtZbb()) {
178     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
179     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
180   }
181 
182   if (Subtarget.is64Bit()) {
183     setOperationAction(ISD::ADD, MVT::i32, Custom);
184     setOperationAction(ISD::SUB, MVT::i32, Custom);
185     setOperationAction(ISD::SHL, MVT::i32, Custom);
186     setOperationAction(ISD::SRA, MVT::i32, Custom);
187     setOperationAction(ISD::SRL, MVT::i32, Custom);
188   }
189 
190   if (!Subtarget.hasStdExtM()) {
191     setOperationAction(ISD::MUL, XLenVT, Expand);
192     setOperationAction(ISD::MULHS, XLenVT, Expand);
193     setOperationAction(ISD::MULHU, XLenVT, Expand);
194     setOperationAction(ISD::SDIV, XLenVT, Expand);
195     setOperationAction(ISD::UDIV, XLenVT, Expand);
196     setOperationAction(ISD::SREM, XLenVT, Expand);
197     setOperationAction(ISD::UREM, XLenVT, Expand);
198   }
199 
200   if (Subtarget.is64Bit() && Subtarget.hasStdExtM()) {
201     setOperationAction(ISD::MUL, MVT::i32, Custom);
202     setOperationAction(ISD::SDIV, MVT::i32, Custom);
203     setOperationAction(ISD::UDIV, MVT::i32, Custom);
204     setOperationAction(ISD::UREM, MVT::i32, Custom);
205   }
206 
207   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
208   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
209   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
210   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
211 
212   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
213   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
214   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
215 
216   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
217     if (Subtarget.is64Bit()) {
218       setOperationAction(ISD::ROTL, MVT::i32, Custom);
219       setOperationAction(ISD::ROTR, MVT::i32, Custom);
220     }
221   } else {
222     setOperationAction(ISD::ROTL, XLenVT, Expand);
223     setOperationAction(ISD::ROTR, XLenVT, Expand);
224   }
225 
226   if (Subtarget.hasStdExtZbp()) {
227     setOperationAction(ISD::BITREVERSE, XLenVT, Custom);
228     setOperationAction(ISD::BSWAP, XLenVT, Custom);
229 
230     if (Subtarget.is64Bit()) {
231       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
232       setOperationAction(ISD::BSWAP, MVT::i32, Custom);
233     }
234   } else {
235     setOperationAction(ISD::BSWAP, XLenVT, Expand);
236   }
237 
238   if (Subtarget.hasStdExtZbb()) {
239     setOperationAction(ISD::SMIN, XLenVT, Legal);
240     setOperationAction(ISD::SMAX, XLenVT, Legal);
241     setOperationAction(ISD::UMIN, XLenVT, Legal);
242     setOperationAction(ISD::UMAX, XLenVT, Legal);
243   } else {
244     setOperationAction(ISD::CTTZ, XLenVT, Expand);
245     setOperationAction(ISD::CTLZ, XLenVT, Expand);
246     setOperationAction(ISD::CTPOP, XLenVT, Expand);
247   }
248 
249   if (Subtarget.hasStdExtZbt()) {
250     setOperationAction(ISD::FSHL, XLenVT, Legal);
251     setOperationAction(ISD::FSHR, XLenVT, Legal);
252 
253     if (Subtarget.is64Bit()) {
254       setOperationAction(ISD::FSHL, MVT::i32, Custom);
255       setOperationAction(ISD::FSHR, MVT::i32, Custom);
256     }
257   }
258 
259   ISD::CondCode FPCCToExpand[] = {
260       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
261       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
262       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
263 
264   ISD::NodeType FPOpToExpand[] = {
265       ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP,
266       ISD::FP_TO_FP16};
267 
268   if (Subtarget.hasStdExtZfh())
269     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
270 
271   if (Subtarget.hasStdExtZfh()) {
272     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
273     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
274     for (auto CC : FPCCToExpand)
275       setCondCodeAction(CC, MVT::f16, Expand);
276     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
277     setOperationAction(ISD::SELECT, MVT::f16, Custom);
278     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
279     for (auto Op : FPOpToExpand)
280       setOperationAction(Op, MVT::f16, Expand);
281   }
282 
283   if (Subtarget.hasStdExtF()) {
284     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
285     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
286     for (auto CC : FPCCToExpand)
287       setCondCodeAction(CC, MVT::f32, Expand);
288     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
289     setOperationAction(ISD::SELECT, MVT::f32, Custom);
290     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
291     for (auto Op : FPOpToExpand)
292       setOperationAction(Op, MVT::f32, Expand);
293     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
294     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
295   }
296 
297   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
298     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
299 
300   if (Subtarget.hasStdExtD()) {
301     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
302     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
303     for (auto CC : FPCCToExpand)
304       setCondCodeAction(CC, MVT::f64, Expand);
305     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
306     setOperationAction(ISD::SELECT, MVT::f64, Custom);
307     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
308     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
309     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
310     for (auto Op : FPOpToExpand)
311       setOperationAction(Op, MVT::f64, Expand);
312     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
313     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
314   }
315 
316   if (Subtarget.is64Bit()) {
317     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
318     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
319     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
320     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
321   }
322 
323   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
324   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
325   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
326   setOperationAction(ISD::JumpTable, XLenVT, Custom);
327 
328   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
329 
330   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
331   // Unfortunately this can't be determined just from the ISA naming string.
332   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
333                      Subtarget.is64Bit() ? Legal : Custom);
334 
335   setOperationAction(ISD::TRAP, MVT::Other, Legal);
336   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
337   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
338 
339   if (Subtarget.hasStdExtA()) {
340     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
341     setMinCmpXchgSizeInBits(32);
342   } else {
343     setMaxAtomicSizeInBitsSupported(0);
344   }
345 
346   setBooleanContents(ZeroOrOneBooleanContent);
347 
348   if (Subtarget.hasStdExtV()) {
349     setBooleanVectorContents(ZeroOrOneBooleanContent);
350 
351     // RVV intrinsics may have illegal operands.
352     // We also need to custom legalize vmv.x.s.
353     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
354     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
355     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
356     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
357     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
358     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
359 
360     if (Subtarget.is64Bit()) {
361       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
362       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
363     }
364 
365     for (auto VT : MVT::integer_scalable_vector_valuetypes()) {
366       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
367 
368       setOperationAction(ISD::SMIN, VT, Legal);
369       setOperationAction(ISD::SMAX, VT, Legal);
370       setOperationAction(ISD::UMIN, VT, Legal);
371       setOperationAction(ISD::UMAX, VT, Legal);
372     }
373 
374     // We must custom-lower SPLAT_VECTOR vXi64 on RV32
375     if (!Subtarget.is64Bit())
376       setOperationAction(ISD::SPLAT_VECTOR, MVT::i64, Custom);
377 
378     // Expand various CCs to best match the RVV ISA, which natively supports UNE
379     // but no other unordered comparisons, and supports all ordered comparisons
380     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
381     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
382     // and we pattern-match those back to the "original", swapping operands once
383     // more. This way we catch both operations and both "vf" and "fv" forms with
384     // fewer patterns.
385     ISD::CondCode VFPCCToExpand[] = {
386         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
387         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
388         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
389     };
390 
391     if (Subtarget.hasStdExtZfh()) {
392       for (auto VT : {RISCVVMVTs::vfloat16mf4_t, RISCVVMVTs::vfloat16mf2_t,
393                       RISCVVMVTs::vfloat16m1_t, RISCVVMVTs::vfloat16m2_t,
394                       RISCVVMVTs::vfloat16m4_t, RISCVVMVTs::vfloat16m8_t}) {
395         setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
396         for (auto CC : VFPCCToExpand)
397           setCondCodeAction(CC, VT, Expand);
398       }
399     }
400 
401     if (Subtarget.hasStdExtF()) {
402       for (auto VT : {RISCVVMVTs::vfloat32mf2_t, RISCVVMVTs::vfloat32m1_t,
403                       RISCVVMVTs::vfloat32m2_t, RISCVVMVTs::vfloat32m4_t,
404                       RISCVVMVTs::vfloat32m8_t}) {
405         setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
406         for (auto CC : VFPCCToExpand)
407           setCondCodeAction(CC, VT, Expand);
408       }
409     }
410 
411     if (Subtarget.hasStdExtD()) {
412       for (auto VT : {RISCVVMVTs::vfloat64m1_t, RISCVVMVTs::vfloat64m2_t,
413                       RISCVVMVTs::vfloat64m4_t, RISCVVMVTs::vfloat64m8_t}) {
414         setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
415         for (auto CC : VFPCCToExpand)
416           setCondCodeAction(CC, VT, Expand);
417       }
418     }
419   }
420 
421   // Function alignments.
422   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
423   setMinFunctionAlignment(FunctionAlignment);
424   setPrefFunctionAlignment(FunctionAlignment);
425 
426   setMinimumJumpTableEntries(5);
427 
428   // Jumps are expensive, compared to logic
429   setJumpIsExpensive();
430 
431   // We can use any register for comparisons
432   setHasMultipleConditionRegisters();
433 
434   if (Subtarget.hasStdExtZbp()) {
435     setTargetDAGCombine(ISD::OR);
436   }
437 }
438 
439 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
440                                             EVT VT) const {
441   if (!VT.isVector())
442     return getPointerTy(DL);
443   if (Subtarget.hasStdExtV())
444     return MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
445   return VT.changeVectorElementTypeToInteger();
446 }
447 
448 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
449                                              const CallInst &I,
450                                              MachineFunction &MF,
451                                              unsigned Intrinsic) const {
452   switch (Intrinsic) {
453   default:
454     return false;
455   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
456   case Intrinsic::riscv_masked_atomicrmw_add_i32:
457   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
458   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
459   case Intrinsic::riscv_masked_atomicrmw_max_i32:
460   case Intrinsic::riscv_masked_atomicrmw_min_i32:
461   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
462   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
463   case Intrinsic::riscv_masked_cmpxchg_i32:
464     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
465     Info.opc = ISD::INTRINSIC_W_CHAIN;
466     Info.memVT = MVT::getVT(PtrTy->getElementType());
467     Info.ptrVal = I.getArgOperand(0);
468     Info.offset = 0;
469     Info.align = Align(4);
470     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
471                  MachineMemOperand::MOVolatile;
472     return true;
473   }
474 }
475 
476 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
477                                                 const AddrMode &AM, Type *Ty,
478                                                 unsigned AS,
479                                                 Instruction *I) const {
480   // No global is ever allowed as a base.
481   if (AM.BaseGV)
482     return false;
483 
484   // Require a 12-bit signed offset.
485   if (!isInt<12>(AM.BaseOffs))
486     return false;
487 
488   switch (AM.Scale) {
489   case 0: // "r+i" or just "i", depending on HasBaseReg.
490     break;
491   case 1:
492     if (!AM.HasBaseReg) // allow "r+i".
493       break;
494     return false; // disallow "r+r" or "r+r+i".
495   default:
496     return false;
497   }
498 
499   return true;
500 }
501 
502 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
503   return isInt<12>(Imm);
504 }
505 
506 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
507   return isInt<12>(Imm);
508 }
509 
510 // On RV32, 64-bit integers are split into their high and low parts and held
511 // in two different registers, so the trunc is free since the low register can
512 // just be used.
513 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
514   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
515     return false;
516   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
517   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
518   return (SrcBits == 64 && DestBits == 32);
519 }
520 
521 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
522   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
523       !SrcVT.isInteger() || !DstVT.isInteger())
524     return false;
525   unsigned SrcBits = SrcVT.getSizeInBits();
526   unsigned DestBits = DstVT.getSizeInBits();
527   return (SrcBits == 64 && DestBits == 32);
528 }
529 
530 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
531   // Zexts are free if they can be combined with a load.
532   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
533     EVT MemVT = LD->getMemoryVT();
534     if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
535          (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
536         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
537          LD->getExtensionType() == ISD::ZEXTLOAD))
538       return true;
539   }
540 
541   return TargetLowering::isZExtFree(Val, VT2);
542 }
543 
544 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
545   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
546 }
547 
548 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
549   return Subtarget.hasStdExtZbb();
550 }
551 
552 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
553   return Subtarget.hasStdExtZbb();
554 }
555 
556 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
557                                        bool ForCodeSize) const {
558   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
559     return false;
560   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
561     return false;
562   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
563     return false;
564   if (Imm.isNegZero())
565     return false;
566   return Imm.isZero();
567 }
568 
569 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
570   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
571          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
572          (VT == MVT::f64 && Subtarget.hasStdExtD());
573 }
574 
575 // Changes the condition code and swaps operands if necessary, so the SetCC
576 // operation matches one of the comparisons supported directly in the RISC-V
577 // ISA.
578 static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) {
579   switch (CC) {
580   default:
581     break;
582   case ISD::SETGT:
583   case ISD::SETLE:
584   case ISD::SETUGT:
585   case ISD::SETULE:
586     CC = ISD::getSetCCSwappedOperands(CC);
587     std::swap(LHS, RHS);
588     break;
589   }
590 }
591 
592 // Return the RISC-V branch opcode that matches the given DAG integer
593 // condition code. The CondCode must be one of those supported by the RISC-V
594 // ISA (see normaliseSetCC).
595 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
596   switch (CC) {
597   default:
598     llvm_unreachable("Unsupported CondCode");
599   case ISD::SETEQ:
600     return RISCV::BEQ;
601   case ISD::SETNE:
602     return RISCV::BNE;
603   case ISD::SETLT:
604     return RISCV::BLT;
605   case ISD::SETGE:
606     return RISCV::BGE;
607   case ISD::SETULT:
608     return RISCV::BLTU;
609   case ISD::SETUGE:
610     return RISCV::BGEU;
611   }
612 }
613 
614 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
615                                             SelectionDAG &DAG) const {
616   switch (Op.getOpcode()) {
617   default:
618     report_fatal_error("unimplemented operand");
619   case ISD::GlobalAddress:
620     return lowerGlobalAddress(Op, DAG);
621   case ISD::BlockAddress:
622     return lowerBlockAddress(Op, DAG);
623   case ISD::ConstantPool:
624     return lowerConstantPool(Op, DAG);
625   case ISD::JumpTable:
626     return lowerJumpTable(Op, DAG);
627   case ISD::GlobalTLSAddress:
628     return lowerGlobalTLSAddress(Op, DAG);
629   case ISD::SELECT:
630     return lowerSELECT(Op, DAG);
631   case ISD::VASTART:
632     return lowerVASTART(Op, DAG);
633   case ISD::FRAMEADDR:
634     return lowerFRAMEADDR(Op, DAG);
635   case ISD::RETURNADDR:
636     return lowerRETURNADDR(Op, DAG);
637   case ISD::SHL_PARTS:
638     return lowerShiftLeftParts(Op, DAG);
639   case ISD::SRA_PARTS:
640     return lowerShiftRightParts(Op, DAG, true);
641   case ISD::SRL_PARTS:
642     return lowerShiftRightParts(Op, DAG, false);
643   case ISD::BITCAST: {
644     assert(((Subtarget.is64Bit() && Subtarget.hasStdExtF()) ||
645             Subtarget.hasStdExtZfh()) &&
646            "Unexpected custom legalisation");
647     SDLoc DL(Op);
648     SDValue Op0 = Op.getOperand(0);
649     if (Op.getValueType() == MVT::f16 && Subtarget.hasStdExtZfh()) {
650       if (Op0.getValueType() != MVT::i16)
651         return SDValue();
652       SDValue NewOp0 =
653           DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), Op0);
654       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
655       return FPConv;
656     } else if (Op.getValueType() == MVT::f32 && Subtarget.is64Bit() &&
657                Subtarget.hasStdExtF()) {
658       if (Op0.getValueType() != MVT::i32)
659         return SDValue();
660       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
661       SDValue FPConv =
662           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
663       return FPConv;
664     }
665     return SDValue();
666   }
667   case ISD::INTRINSIC_WO_CHAIN:
668     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
669   case ISD::INTRINSIC_W_CHAIN:
670     return LowerINTRINSIC_W_CHAIN(Op, DAG);
671   case ISD::BSWAP:
672   case ISD::BITREVERSE: {
673     // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
674     assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
675     MVT VT = Op.getSimpleValueType();
676     SDLoc DL(Op);
677     // Start with the maximum immediate value which is the bitwidth - 1.
678     unsigned Imm = VT.getSizeInBits() - 1;
679     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
680     if (Op.getOpcode() == ISD::BSWAP)
681       Imm &= ~0x7U;
682     return DAG.getNode(RISCVISD::GREVI, DL, VT, Op.getOperand(0),
683                        DAG.getTargetConstant(Imm, DL, Subtarget.getXLenVT()));
684   }
685   case ISD::SPLAT_VECTOR:
686     return lowerSPLATVECTOR(Op, DAG);
687   }
688 }
689 
690 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
691                              SelectionDAG &DAG, unsigned Flags) {
692   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
693 }
694 
695 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
696                              SelectionDAG &DAG, unsigned Flags) {
697   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
698                                    Flags);
699 }
700 
701 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
702                              SelectionDAG &DAG, unsigned Flags) {
703   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
704                                    N->getOffset(), Flags);
705 }
706 
707 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
708                              SelectionDAG &DAG, unsigned Flags) {
709   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
710 }
711 
712 template <class NodeTy>
713 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
714                                      bool IsLocal) const {
715   SDLoc DL(N);
716   EVT Ty = getPointerTy(DAG.getDataLayout());
717 
718   if (isPositionIndependent()) {
719     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
720     if (IsLocal)
721       // Use PC-relative addressing to access the symbol. This generates the
722       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
723       // %pcrel_lo(auipc)).
724       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
725 
726     // Use PC-relative addressing to access the GOT for this symbol, then load
727     // the address from the GOT. This generates the pattern (PseudoLA sym),
728     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
729     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
730   }
731 
732   switch (getTargetMachine().getCodeModel()) {
733   default:
734     report_fatal_error("Unsupported code model for lowering");
735   case CodeModel::Small: {
736     // Generate a sequence for accessing addresses within the first 2 GiB of
737     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
738     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
739     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
740     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
741     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
742   }
743   case CodeModel::Medium: {
744     // Generate a sequence for accessing addresses within any 2GiB range within
745     // the address space. This generates the pattern (PseudoLLA sym), which
746     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
747     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
748     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
749   }
750   }
751 }
752 
753 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
754                                                 SelectionDAG &DAG) const {
755   SDLoc DL(Op);
756   EVT Ty = Op.getValueType();
757   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
758   int64_t Offset = N->getOffset();
759   MVT XLenVT = Subtarget.getXLenVT();
760 
761   const GlobalValue *GV = N->getGlobal();
762   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
763   SDValue Addr = getAddr(N, DAG, IsLocal);
764 
765   // In order to maximise the opportunity for common subexpression elimination,
766   // emit a separate ADD node for the global address offset instead of folding
767   // it in the global address node. Later peephole optimisations may choose to
768   // fold it back in when profitable.
769   if (Offset != 0)
770     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
771                        DAG.getConstant(Offset, DL, XLenVT));
772   return Addr;
773 }
774 
775 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
776                                                SelectionDAG &DAG) const {
777   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
778 
779   return getAddr(N, DAG);
780 }
781 
782 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
783                                                SelectionDAG &DAG) const {
784   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
785 
786   return getAddr(N, DAG);
787 }
788 
789 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
790                                             SelectionDAG &DAG) const {
791   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
792 
793   return getAddr(N, DAG);
794 }
795 
796 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
797                                               SelectionDAG &DAG,
798                                               bool UseGOT) const {
799   SDLoc DL(N);
800   EVT Ty = getPointerTy(DAG.getDataLayout());
801   const GlobalValue *GV = N->getGlobal();
802   MVT XLenVT = Subtarget.getXLenVT();
803 
804   if (UseGOT) {
805     // Use PC-relative addressing to access the GOT for this TLS symbol, then
806     // load the address from the GOT and add the thread pointer. This generates
807     // the pattern (PseudoLA_TLS_IE sym), which expands to
808     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
809     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
810     SDValue Load =
811         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
812 
813     // Add the thread pointer.
814     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
815     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
816   }
817 
818   // Generate a sequence for accessing the address relative to the thread
819   // pointer, with the appropriate adjustment for the thread pointer offset.
820   // This generates the pattern
821   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
822   SDValue AddrHi =
823       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
824   SDValue AddrAdd =
825       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
826   SDValue AddrLo =
827       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
828 
829   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
830   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
831   SDValue MNAdd = SDValue(
832       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
833       0);
834   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
835 }
836 
837 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
838                                                SelectionDAG &DAG) const {
839   SDLoc DL(N);
840   EVT Ty = getPointerTy(DAG.getDataLayout());
841   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
842   const GlobalValue *GV = N->getGlobal();
843 
844   // Use a PC-relative addressing mode to access the global dynamic GOT address.
845   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
846   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
847   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
848   SDValue Load =
849       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
850 
851   // Prepare argument list to generate call.
852   ArgListTy Args;
853   ArgListEntry Entry;
854   Entry.Node = Load;
855   Entry.Ty = CallTy;
856   Args.push_back(Entry);
857 
858   // Setup call to __tls_get_addr.
859   TargetLowering::CallLoweringInfo CLI(DAG);
860   CLI.setDebugLoc(DL)
861       .setChain(DAG.getEntryNode())
862       .setLibCallee(CallingConv::C, CallTy,
863                     DAG.getExternalSymbol("__tls_get_addr", Ty),
864                     std::move(Args));
865 
866   return LowerCallTo(CLI).first;
867 }
868 
869 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
870                                                    SelectionDAG &DAG) const {
871   SDLoc DL(Op);
872   EVT Ty = Op.getValueType();
873   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
874   int64_t Offset = N->getOffset();
875   MVT XLenVT = Subtarget.getXLenVT();
876 
877   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
878 
879   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
880       CallingConv::GHC)
881     report_fatal_error("In GHC calling convention TLS is not supported");
882 
883   SDValue Addr;
884   switch (Model) {
885   case TLSModel::LocalExec:
886     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
887     break;
888   case TLSModel::InitialExec:
889     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
890     break;
891   case TLSModel::LocalDynamic:
892   case TLSModel::GeneralDynamic:
893     Addr = getDynamicTLSAddr(N, DAG);
894     break;
895   }
896 
897   // In order to maximise the opportunity for common subexpression elimination,
898   // emit a separate ADD node for the global address offset instead of folding
899   // it in the global address node. Later peephole optimisations may choose to
900   // fold it back in when profitable.
901   if (Offset != 0)
902     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
903                        DAG.getConstant(Offset, DL, XLenVT));
904   return Addr;
905 }
906 
907 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
908   SDValue CondV = Op.getOperand(0);
909   SDValue TrueV = Op.getOperand(1);
910   SDValue FalseV = Op.getOperand(2);
911   SDLoc DL(Op);
912   MVT XLenVT = Subtarget.getXLenVT();
913 
914   // If the result type is XLenVT and CondV is the output of a SETCC node
915   // which also operated on XLenVT inputs, then merge the SETCC node into the
916   // lowered RISCVISD::SELECT_CC to take advantage of the integer
917   // compare+branch instructions. i.e.:
918   // (select (setcc lhs, rhs, cc), truev, falsev)
919   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
920   if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC &&
921       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
922     SDValue LHS = CondV.getOperand(0);
923     SDValue RHS = CondV.getOperand(1);
924     auto CC = cast<CondCodeSDNode>(CondV.getOperand(2));
925     ISD::CondCode CCVal = CC->get();
926 
927     normaliseSetCC(LHS, RHS, CCVal);
928 
929     SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT);
930     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
931     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
932   }
933 
934   // Otherwise:
935   // (select condv, truev, falsev)
936   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
937   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
938   SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT);
939 
940   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
941 
942   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
943 }
944 
945 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
946   MachineFunction &MF = DAG.getMachineFunction();
947   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
948 
949   SDLoc DL(Op);
950   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
951                                  getPointerTy(MF.getDataLayout()));
952 
953   // vastart just stores the address of the VarArgsFrameIndex slot into the
954   // memory location argument.
955   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
956   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
957                       MachinePointerInfo(SV));
958 }
959 
960 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
961                                             SelectionDAG &DAG) const {
962   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
963   MachineFunction &MF = DAG.getMachineFunction();
964   MachineFrameInfo &MFI = MF.getFrameInfo();
965   MFI.setFrameAddressIsTaken(true);
966   Register FrameReg = RI.getFrameRegister(MF);
967   int XLenInBytes = Subtarget.getXLen() / 8;
968 
969   EVT VT = Op.getValueType();
970   SDLoc DL(Op);
971   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
972   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
973   while (Depth--) {
974     int Offset = -(XLenInBytes * 2);
975     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
976                               DAG.getIntPtrConstant(Offset, DL));
977     FrameAddr =
978         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
979   }
980   return FrameAddr;
981 }
982 
983 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
984                                              SelectionDAG &DAG) const {
985   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
986   MachineFunction &MF = DAG.getMachineFunction();
987   MachineFrameInfo &MFI = MF.getFrameInfo();
988   MFI.setReturnAddressIsTaken(true);
989   MVT XLenVT = Subtarget.getXLenVT();
990   int XLenInBytes = Subtarget.getXLen() / 8;
991 
992   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
993     return SDValue();
994 
995   EVT VT = Op.getValueType();
996   SDLoc DL(Op);
997   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
998   if (Depth) {
999     int Off = -XLenInBytes;
1000     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
1001     SDValue Offset = DAG.getConstant(Off, DL, VT);
1002     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
1003                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
1004                        MachinePointerInfo());
1005   }
1006 
1007   // Return the value of the return address register, marking it an implicit
1008   // live-in.
1009   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
1010   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
1011 }
1012 
1013 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
1014                                                  SelectionDAG &DAG) const {
1015   SDLoc DL(Op);
1016   SDValue Lo = Op.getOperand(0);
1017   SDValue Hi = Op.getOperand(1);
1018   SDValue Shamt = Op.getOperand(2);
1019   EVT VT = Lo.getValueType();
1020 
1021   // if Shamt-XLEN < 0: // Shamt < XLEN
1022   //   Lo = Lo << Shamt
1023   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
1024   // else:
1025   //   Lo = 0
1026   //   Hi = Lo << (Shamt-XLEN)
1027 
1028   SDValue Zero = DAG.getConstant(0, DL, VT);
1029   SDValue One = DAG.getConstant(1, DL, VT);
1030   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
1031   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
1032   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
1033   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
1034 
1035   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
1036   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
1037   SDValue ShiftRightLo =
1038       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
1039   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
1040   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
1041   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
1042 
1043   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
1044 
1045   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
1046   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
1047 
1048   SDValue Parts[2] = {Lo, Hi};
1049   return DAG.getMergeValues(Parts, DL);
1050 }
1051 
1052 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
1053                                                   bool IsSRA) const {
1054   SDLoc DL(Op);
1055   SDValue Lo = Op.getOperand(0);
1056   SDValue Hi = Op.getOperand(1);
1057   SDValue Shamt = Op.getOperand(2);
1058   EVT VT = Lo.getValueType();
1059 
1060   // SRA expansion:
1061   //   if Shamt-XLEN < 0: // Shamt < XLEN
1062   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
1063   //     Hi = Hi >>s Shamt
1064   //   else:
1065   //     Lo = Hi >>s (Shamt-XLEN);
1066   //     Hi = Hi >>s (XLEN-1)
1067   //
1068   // SRL expansion:
1069   //   if Shamt-XLEN < 0: // Shamt < XLEN
1070   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
1071   //     Hi = Hi >>u Shamt
1072   //   else:
1073   //     Lo = Hi >>u (Shamt-XLEN);
1074   //     Hi = 0;
1075 
1076   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
1077 
1078   SDValue Zero = DAG.getConstant(0, DL, VT);
1079   SDValue One = DAG.getConstant(1, DL, VT);
1080   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
1081   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
1082   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
1083   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
1084 
1085   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
1086   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
1087   SDValue ShiftLeftHi =
1088       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
1089   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
1090   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
1091   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
1092   SDValue HiFalse =
1093       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
1094 
1095   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
1096 
1097   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
1098   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
1099 
1100   SDValue Parts[2] = {Lo, Hi};
1101   return DAG.getMergeValues(Parts, DL);
1102 }
1103 
1104 // Custom-lower a SPLAT_VECTOR where XLEN<SEW, as the SEW element type is
1105 // illegal (currently only vXi64 RV32).
1106 // FIXME: We could also catch non-constant sign-extended i32 values and lower
1107 // them to SPLAT_VECTOR_I64
1108 SDValue RISCVTargetLowering::lowerSPLATVECTOR(SDValue Op,
1109                                               SelectionDAG &DAG) const {
1110   SDLoc DL(Op);
1111   EVT VecVT = Op.getValueType();
1112   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
1113          "Unexpected SPLAT_VECTOR lowering");
1114   SDValue SplatVal = Op.getOperand(0);
1115 
1116   // If we can prove that the value is a sign-extended 32-bit value, lower this
1117   // as a custom node in order to try and match RVV vector/scalar instructions.
1118   if (auto *CVal = dyn_cast<ConstantSDNode>(SplatVal)) {
1119     if (isInt<32>(CVal->getSExtValue()))
1120       return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT,
1121                          DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32));
1122   }
1123 
1124   // Else, on RV32 we lower an i64-element SPLAT_VECTOR thus, being careful not
1125   // to accidentally sign-extend the 32-bit halves to the e64 SEW:
1126   // vmv.v.x vX, hi
1127   // vsll.vx vX, vX, /*32*/
1128   // vmv.v.x vY, lo
1129   // vsll.vx vY, vY, /*32*/
1130   // vsrl.vx vY, vY, /*32*/
1131   // vor.vv vX, vX, vY
1132   SDValue One = DAG.getConstant(1, DL, MVT::i32);
1133   SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
1134   SDValue ThirtyTwoV = DAG.getConstant(32, DL, VecVT);
1135   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, SplatVal, Zero);
1136   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, SplatVal, One);
1137 
1138   Lo = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
1139   Lo = DAG.getNode(ISD::SHL, DL, VecVT, Lo, ThirtyTwoV);
1140   Lo = DAG.getNode(ISD::SRL, DL, VecVT, Lo, ThirtyTwoV);
1141 
1142   if (isNullConstant(Hi))
1143     return Lo;
1144 
1145   Hi = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Hi);
1146   Hi = DAG.getNode(ISD::SHL, DL, VecVT, Hi, ThirtyTwoV);
1147 
1148   return DAG.getNode(ISD::OR, DL, VecVT, Lo, Hi);
1149 }
1150 
1151 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
1152                                                      SelectionDAG &DAG) const {
1153   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1154   SDLoc DL(Op);
1155 
1156   if (Subtarget.hasStdExtV()) {
1157     // Some RVV intrinsics may claim that they want an integer operand to be
1158     // extended.
1159     if (const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
1160             RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo)) {
1161       if (II->ExtendedOperand) {
1162         assert(II->ExtendedOperand < Op.getNumOperands());
1163         SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
1164         SDValue &ScalarOp = Operands[II->ExtendedOperand];
1165         EVT OpVT = ScalarOp.getValueType();
1166         if (OpVT == MVT::i8 || OpVT == MVT::i16 ||
1167             (OpVT == MVT::i32 && Subtarget.is64Bit())) {
1168           // If the operand is a constant, sign extend to increase our chances
1169           // of being able to use a .vi instruction. ANY_EXTEND would become a
1170           // a zero extend and the simm5 check in isel would fail.
1171           // FIXME: Should we ignore the upper bits in isel instead?
1172           unsigned ExtOpc = isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND
1173                                                           : ISD::ANY_EXTEND;
1174           ScalarOp = DAG.getNode(ExtOpc, DL, Subtarget.getXLenVT(), ScalarOp);
1175           return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
1176                              Operands);
1177         }
1178       }
1179     }
1180   }
1181 
1182   switch (IntNo) {
1183   default:
1184     return SDValue();    // Don't custom lower most intrinsics.
1185   case Intrinsic::thread_pointer: {
1186     EVT PtrVT = getPointerTy(DAG.getDataLayout());
1187     return DAG.getRegister(RISCV::X4, PtrVT);
1188   }
1189   case Intrinsic::riscv_vmv_x_s:
1190     assert(Op.getValueType() == Subtarget.getXLenVT() && "Unexpected VT!");
1191     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
1192                        Op.getOperand(1));
1193   }
1194 }
1195 
1196 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
1197                                                     SelectionDAG &DAG) const {
1198   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1199   SDLoc DL(Op);
1200 
1201   if (Subtarget.hasStdExtV()) {
1202     // Some RVV intrinsics may claim that they want an integer operand to be
1203     // extended.
1204     if (const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
1205             RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo)) {
1206       if (II->ExtendedOperand) {
1207         // The operands start from the second argument in INTRINSIC_W_CHAIN.
1208         unsigned ExtendOp = II->ExtendedOperand + 1;
1209         assert(ExtendOp < Op.getNumOperands());
1210         SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
1211         SDValue &ScalarOp = Operands[ExtendOp];
1212         EVT OpVT = ScalarOp.getValueType();
1213         if (OpVT == MVT::i8 || OpVT == MVT::i16 ||
1214             (OpVT == MVT::i32 && Subtarget.is64Bit())) {
1215           // If the operand is a constant, sign extend to increase our chances
1216           // of being able to use a .vi instruction. ANY_EXTEND would become a
1217           // a zero extend and the simm5 check in isel would fail.
1218           // FIXME: Should we ignore the upper bits in isel instead?
1219           unsigned ExtOpc = isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND
1220                                                           : ISD::ANY_EXTEND;
1221           ScalarOp = DAG.getNode(ExtOpc, DL, Subtarget.getXLenVT(), ScalarOp);
1222           return DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, Op->getVTList(),
1223                              Operands);
1224         }
1225       }
1226     }
1227   }
1228 
1229   return SDValue();
1230 }
1231 
1232 // Returns the opcode of the target-specific SDNode that implements the 32-bit
1233 // form of the given Opcode.
1234 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
1235   switch (Opcode) {
1236   default:
1237     llvm_unreachable("Unexpected opcode");
1238   case ISD::SHL:
1239     return RISCVISD::SLLW;
1240   case ISD::SRA:
1241     return RISCVISD::SRAW;
1242   case ISD::SRL:
1243     return RISCVISD::SRLW;
1244   case ISD::SDIV:
1245     return RISCVISD::DIVW;
1246   case ISD::UDIV:
1247     return RISCVISD::DIVUW;
1248   case ISD::UREM:
1249     return RISCVISD::REMUW;
1250   case ISD::ROTL:
1251     return RISCVISD::ROLW;
1252   case ISD::ROTR:
1253     return RISCVISD::RORW;
1254   case RISCVISD::GREVI:
1255     return RISCVISD::GREVIW;
1256   case RISCVISD::GORCI:
1257     return RISCVISD::GORCIW;
1258   }
1259 }
1260 
1261 // Converts the given 32-bit operation to a target-specific SelectionDAG node.
1262 // Because i32 isn't a legal type for RV64, these operations would otherwise
1263 // be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W
1264 // later one because the fact the operation was originally of type i32 is
1265 // lost.
1266 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG) {
1267   SDLoc DL(N);
1268   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
1269   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
1270   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
1271   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
1272   // ReplaceNodeResults requires we maintain the same type for the return value.
1273   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
1274 }
1275 
1276 // Converts the given 32-bit operation to a i64 operation with signed extension
1277 // semantic to reduce the signed extension instructions.
1278 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
1279   SDLoc DL(N);
1280   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
1281   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
1282   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
1283   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
1284                                DAG.getValueType(MVT::i32));
1285   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
1286 }
1287 
1288 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
1289                                              SmallVectorImpl<SDValue> &Results,
1290                                              SelectionDAG &DAG) const {
1291   SDLoc DL(N);
1292   switch (N->getOpcode()) {
1293   default:
1294     llvm_unreachable("Don't know how to custom type legalize this operation!");
1295   case ISD::STRICT_FP_TO_SINT:
1296   case ISD::STRICT_FP_TO_UINT:
1297   case ISD::FP_TO_SINT:
1298   case ISD::FP_TO_UINT: {
1299     bool IsStrict = N->isStrictFPOpcode();
1300     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1301            "Unexpected custom legalisation");
1302     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
1303     // If the FP type needs to be softened, emit a library call using the 'si'
1304     // version. If we left it to default legalization we'd end up with 'di'. If
1305     // the FP type doesn't need to be softened just let generic type
1306     // legalization promote the result type.
1307     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
1308         TargetLowering::TypeSoftenFloat)
1309       return;
1310     RTLIB::Libcall LC;
1311     if (N->getOpcode() == ISD::FP_TO_SINT ||
1312         N->getOpcode() == ISD::STRICT_FP_TO_SINT)
1313       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
1314     else
1315       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
1316     MakeLibCallOptions CallOptions;
1317     EVT OpVT = Op0.getValueType();
1318     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
1319     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
1320     SDValue Result;
1321     std::tie(Result, Chain) =
1322         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
1323     Results.push_back(Result);
1324     if (IsStrict)
1325       Results.push_back(Chain);
1326     break;
1327   }
1328   case ISD::READCYCLECOUNTER: {
1329     assert(!Subtarget.is64Bit() &&
1330            "READCYCLECOUNTER only has custom type legalization on riscv32");
1331 
1332     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
1333     SDValue RCW =
1334         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
1335 
1336     Results.push_back(
1337         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
1338     Results.push_back(RCW.getValue(2));
1339     break;
1340   }
1341   case ISD::ADD:
1342   case ISD::SUB:
1343   case ISD::MUL:
1344     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1345            "Unexpected custom legalisation");
1346     if (N->getOperand(1).getOpcode() == ISD::Constant)
1347       return;
1348     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
1349     break;
1350   case ISD::SHL:
1351   case ISD::SRA:
1352   case ISD::SRL:
1353     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1354            "Unexpected custom legalisation");
1355     if (N->getOperand(1).getOpcode() == ISD::Constant)
1356       return;
1357     Results.push_back(customLegalizeToWOp(N, DAG));
1358     break;
1359   case ISD::ROTL:
1360   case ISD::ROTR:
1361     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1362            "Unexpected custom legalisation");
1363     Results.push_back(customLegalizeToWOp(N, DAG));
1364     break;
1365   case ISD::SDIV:
1366   case ISD::UDIV:
1367   case ISD::UREM:
1368     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1369            Subtarget.hasStdExtM() && "Unexpected custom legalisation");
1370     if (N->getOperand(0).getOpcode() == ISD::Constant ||
1371         N->getOperand(1).getOpcode() == ISD::Constant)
1372       return;
1373     Results.push_back(customLegalizeToWOp(N, DAG));
1374     break;
1375   case ISD::BITCAST: {
1376     assert(((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1377              Subtarget.hasStdExtF()) ||
1378             (N->getValueType(0) == MVT::i16 && Subtarget.hasStdExtZfh())) &&
1379            "Unexpected custom legalisation");
1380     SDValue Op0 = N->getOperand(0);
1381     if (N->getValueType(0) == MVT::i16 && Subtarget.hasStdExtZfh()) {
1382       if (Op0.getValueType() != MVT::f16)
1383         return;
1384       SDValue FPConv =
1385           DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, Subtarget.getXLenVT(), Op0);
1386       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
1387     } else if (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1388                Subtarget.hasStdExtF()) {
1389       if (Op0.getValueType() != MVT::f32)
1390         return;
1391       SDValue FPConv =
1392           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
1393       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
1394     }
1395     break;
1396   }
1397   case RISCVISD::GREVI:
1398   case RISCVISD::GORCI: {
1399     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1400            "Unexpected custom legalisation");
1401     // This is similar to customLegalizeToWOp, except that we pass the second
1402     // operand (a TargetConstant) straight through: it is already of type
1403     // XLenVT.
1404     SDLoc DL(N);
1405     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
1406     SDValue NewOp0 =
1407         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
1408     SDValue NewRes =
1409         DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, N->getOperand(1));
1410     // ReplaceNodeResults requires we maintain the same type for the return
1411     // value.
1412     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
1413     break;
1414   }
1415   case ISD::BSWAP:
1416   case ISD::BITREVERSE: {
1417     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1418            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
1419     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
1420                                  N->getOperand(0));
1421     unsigned Imm = N->getOpcode() == ISD::BITREVERSE ? 31 : 24;
1422     SDValue GREVIW = DAG.getNode(RISCVISD::GREVIW, DL, MVT::i64, NewOp0,
1423                                  DAG.getTargetConstant(Imm, DL,
1424                                                        Subtarget.getXLenVT()));
1425     // ReplaceNodeResults requires we maintain the same type for the return
1426     // value.
1427     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, GREVIW));
1428     break;
1429   }
1430   case ISD::FSHL:
1431   case ISD::FSHR: {
1432     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1433            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
1434     SDValue NewOp0 =
1435         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
1436     SDValue NewOp1 =
1437         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
1438     SDValue NewOp2 =
1439         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
1440     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
1441     // Mask the shift amount to 5 bits.
1442     NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
1443                          DAG.getConstant(0x1f, DL, MVT::i64));
1444     unsigned Opc =
1445         N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW;
1446     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2);
1447     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
1448     break;
1449   }
1450   case ISD::INTRINSIC_WO_CHAIN: {
1451     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
1452     switch (IntNo) {
1453     default:
1454       llvm_unreachable(
1455           "Don't know how to custom type legalize this intrinsic!");
1456     case Intrinsic::riscv_vmv_x_s: {
1457       EVT VT = N->getValueType(0);
1458       assert((VT == MVT::i8 || VT == MVT::i16 ||
1459               (Subtarget.is64Bit() && VT == MVT::i32)) &&
1460              "Unexpected custom legalisation!");
1461       SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
1462                                     Subtarget.getXLenVT(), N->getOperand(1));
1463       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
1464       break;
1465     }
1466     }
1467     break;
1468   }
1469   }
1470 }
1471 
1472 // A structure to hold one of the bit-manipulation patterns below. Together, a
1473 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
1474 //   (or (and (shl x, 1), 0xAAAAAAAA),
1475 //       (and (srl x, 1), 0x55555555))
1476 struct RISCVBitmanipPat {
1477   SDValue Op;
1478   unsigned ShAmt;
1479   bool IsSHL;
1480 
1481   bool formsPairWith(const RISCVBitmanipPat &Other) const {
1482     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
1483   }
1484 };
1485 
1486 // Matches any of the following bit-manipulation patterns:
1487 //   (and (shl x, 1), (0x55555555 << 1))
1488 //   (and (srl x, 1), 0x55555555)
1489 //   (shl (and x, 0x55555555), 1)
1490 //   (srl (and x, (0x55555555 << 1)), 1)
1491 // where the shift amount and mask may vary thus:
1492 //   [1]  = 0x55555555 / 0xAAAAAAAA
1493 //   [2]  = 0x33333333 / 0xCCCCCCCC
1494 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
1495 //   [8]  = 0x00FF00FF / 0xFF00FF00
1496 //   [16] = 0x0000FFFF / 0xFFFFFFFF
1497 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
1498 static Optional<RISCVBitmanipPat> matchRISCVBitmanipPat(SDValue Op) {
1499   Optional<uint64_t> Mask;
1500   // Optionally consume a mask around the shift operation.
1501   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
1502     Mask = Op.getConstantOperandVal(1);
1503     Op = Op.getOperand(0);
1504   }
1505   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
1506     return None;
1507   bool IsSHL = Op.getOpcode() == ISD::SHL;
1508 
1509   if (!isa<ConstantSDNode>(Op.getOperand(1)))
1510     return None;
1511   auto ShAmt = Op.getConstantOperandVal(1);
1512 
1513   if (!isPowerOf2_64(ShAmt))
1514     return None;
1515 
1516   // These are the unshifted masks which we use to match bit-manipulation
1517   // patterns. They may be shifted left in certain circumstances.
1518   static const uint64_t BitmanipMasks[] = {
1519       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
1520       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL,
1521   };
1522 
1523   unsigned MaskIdx = Log2_64(ShAmt);
1524   if (MaskIdx >= array_lengthof(BitmanipMasks))
1525     return None;
1526 
1527   auto Src = Op.getOperand(0);
1528 
1529   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
1530   auto ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
1531 
1532   // The expected mask is shifted left when the AND is found around SHL
1533   // patterns.
1534   //   ((x >> 1) & 0x55555555)
1535   //   ((x << 1) & 0xAAAAAAAA)
1536   bool SHLExpMask = IsSHL;
1537 
1538   if (!Mask) {
1539     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
1540     // the mask is all ones: consume that now.
1541     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
1542       Mask = Src.getConstantOperandVal(1);
1543       Src = Src.getOperand(0);
1544       // The expected mask is now in fact shifted left for SRL, so reverse the
1545       // decision.
1546       //   ((x & 0xAAAAAAAA) >> 1)
1547       //   ((x & 0x55555555) << 1)
1548       SHLExpMask = !SHLExpMask;
1549     } else {
1550       // Use a default shifted mask of all-ones if there's no AND, truncated
1551       // down to the expected width. This simplifies the logic later on.
1552       Mask = maskTrailingOnes<uint64_t>(Width);
1553       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
1554     }
1555   }
1556 
1557   if (SHLExpMask)
1558     ExpMask <<= ShAmt;
1559 
1560   if (Mask != ExpMask)
1561     return None;
1562 
1563   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
1564 }
1565 
1566 // Match the following pattern as a GREVI(W) operation
1567 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
1568 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
1569                                const RISCVSubtarget &Subtarget) {
1570   EVT VT = Op.getValueType();
1571 
1572   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
1573     auto LHS = matchRISCVBitmanipPat(Op.getOperand(0));
1574     auto RHS = matchRISCVBitmanipPat(Op.getOperand(1));
1575     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
1576       SDLoc DL(Op);
1577       return DAG.getNode(
1578           RISCVISD::GREVI, DL, VT, LHS->Op,
1579           DAG.getTargetConstant(LHS->ShAmt, DL, Subtarget.getXLenVT()));
1580     }
1581   }
1582   return SDValue();
1583 }
1584 
1585 // Matches any the following pattern as a GORCI(W) operation
1586 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
1587 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
1588 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
1589 // Note that with the variant of 3.,
1590 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
1591 // the inner pattern will first be matched as GREVI and then the outer
1592 // pattern will be matched to GORC via the first rule above.
1593 // 4.  (or (rotl/rotr x, bitwidth/2), x)
1594 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
1595                                const RISCVSubtarget &Subtarget) {
1596   EVT VT = Op.getValueType();
1597 
1598   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
1599     SDLoc DL(Op);
1600     SDValue Op0 = Op.getOperand(0);
1601     SDValue Op1 = Op.getOperand(1);
1602 
1603     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
1604       if (Reverse.getOpcode() == RISCVISD::GREVI && Reverse.getOperand(0) == X &&
1605           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
1606         return DAG.getNode(RISCVISD::GORCI, DL, VT, X, Reverse.getOperand(1));
1607       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
1608       if ((Reverse.getOpcode() == ISD::ROTL ||
1609            Reverse.getOpcode() == ISD::ROTR) &&
1610           Reverse.getOperand(0) == X &&
1611           isa<ConstantSDNode>(Reverse.getOperand(1))) {
1612         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
1613         if (RotAmt == (VT.getSizeInBits() / 2))
1614           return DAG.getNode(
1615               RISCVISD::GORCI, DL, VT, X,
1616               DAG.getTargetConstant(RotAmt, DL, Subtarget.getXLenVT()));
1617       }
1618       return SDValue();
1619     };
1620 
1621     // Check for either commutable permutation of (or (GREVI x, shamt), x)
1622     if (SDValue V = MatchOROfReverse(Op0, Op1))
1623       return V;
1624     if (SDValue V = MatchOROfReverse(Op1, Op0))
1625       return V;
1626 
1627     // OR is commutable so canonicalize its OR operand to the left
1628     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
1629       std::swap(Op0, Op1);
1630     if (Op0.getOpcode() != ISD::OR)
1631       return SDValue();
1632     SDValue OrOp0 = Op0.getOperand(0);
1633     SDValue OrOp1 = Op0.getOperand(1);
1634     auto LHS = matchRISCVBitmanipPat(OrOp0);
1635     // OR is commutable so swap the operands and try again: x might have been
1636     // on the left
1637     if (!LHS) {
1638       std::swap(OrOp0, OrOp1);
1639       LHS = matchRISCVBitmanipPat(OrOp0);
1640     }
1641     auto RHS = matchRISCVBitmanipPat(Op1);
1642     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
1643       return DAG.getNode(
1644           RISCVISD::GORCI, DL, VT, LHS->Op,
1645           DAG.getTargetConstant(LHS->ShAmt, DL, Subtarget.getXLenVT()));
1646     }
1647   }
1648   return SDValue();
1649 }
1650 
1651 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
1652 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
1653 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
1654 // not undo itself, but they are redundant.
1655 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
1656   unsigned ShAmt1 = N->getConstantOperandVal(1);
1657   SDValue Src = N->getOperand(0);
1658 
1659   if (Src.getOpcode() != N->getOpcode())
1660     return SDValue();
1661 
1662   unsigned ShAmt2 = Src.getConstantOperandVal(1);
1663   Src = Src.getOperand(0);
1664 
1665   unsigned CombinedShAmt;
1666   if (N->getOpcode() == RISCVISD::GORCI || N->getOpcode() == RISCVISD::GORCIW)
1667     CombinedShAmt = ShAmt1 | ShAmt2;
1668   else
1669     CombinedShAmt = ShAmt1 ^ ShAmt2;
1670 
1671   if (CombinedShAmt == 0)
1672     return Src;
1673 
1674   SDLoc DL(N);
1675   return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), Src,
1676                      DAG.getTargetConstant(CombinedShAmt, DL,
1677                                            N->getOperand(1).getValueType()));
1678 }
1679 
1680 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
1681                                                DAGCombinerInfo &DCI) const {
1682   SelectionDAG &DAG = DCI.DAG;
1683 
1684   switch (N->getOpcode()) {
1685   default:
1686     break;
1687   case RISCVISD::SplitF64: {
1688     SDValue Op0 = N->getOperand(0);
1689     // If the input to SplitF64 is just BuildPairF64 then the operation is
1690     // redundant. Instead, use BuildPairF64's operands directly.
1691     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
1692       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
1693 
1694     SDLoc DL(N);
1695 
1696     // It's cheaper to materialise two 32-bit integers than to load a double
1697     // from the constant pool and transfer it to integer registers through the
1698     // stack.
1699     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
1700       APInt V = C->getValueAPF().bitcastToAPInt();
1701       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
1702       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
1703       return DCI.CombineTo(N, Lo, Hi);
1704     }
1705 
1706     // This is a target-specific version of a DAGCombine performed in
1707     // DAGCombiner::visitBITCAST. It performs the equivalent of:
1708     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
1709     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
1710     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
1711         !Op0.getNode()->hasOneUse())
1712       break;
1713     SDValue NewSplitF64 =
1714         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
1715                     Op0.getOperand(0));
1716     SDValue Lo = NewSplitF64.getValue(0);
1717     SDValue Hi = NewSplitF64.getValue(1);
1718     APInt SignBit = APInt::getSignMask(32);
1719     if (Op0.getOpcode() == ISD::FNEG) {
1720       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
1721                                   DAG.getConstant(SignBit, DL, MVT::i32));
1722       return DCI.CombineTo(N, Lo, NewHi);
1723     }
1724     assert(Op0.getOpcode() == ISD::FABS);
1725     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
1726                                 DAG.getConstant(~SignBit, DL, MVT::i32));
1727     return DCI.CombineTo(N, Lo, NewHi);
1728   }
1729   case RISCVISD::SLLW:
1730   case RISCVISD::SRAW:
1731   case RISCVISD::SRLW:
1732   case RISCVISD::ROLW:
1733   case RISCVISD::RORW: {
1734     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
1735     SDValue LHS = N->getOperand(0);
1736     SDValue RHS = N->getOperand(1);
1737     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
1738     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
1739     if (SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI) ||
1740         SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)) {
1741       if (N->getOpcode() != ISD::DELETED_NODE)
1742         DCI.AddToWorklist(N);
1743       return SDValue(N, 0);
1744     }
1745     break;
1746   }
1747   case RISCVISD::FSLW:
1748   case RISCVISD::FSRW: {
1749     // Only the lower 32 bits of Values and lower 6 bits of shift amount are
1750     // read.
1751     SDValue Op0 = N->getOperand(0);
1752     SDValue Op1 = N->getOperand(1);
1753     SDValue ShAmt = N->getOperand(2);
1754     APInt OpMask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
1755     APInt ShAmtMask = APInt::getLowBitsSet(ShAmt.getValueSizeInBits(), 6);
1756     if (SimplifyDemandedBits(Op0, OpMask, DCI) ||
1757         SimplifyDemandedBits(Op1, OpMask, DCI) ||
1758         SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
1759       if (N->getOpcode() != ISD::DELETED_NODE)
1760         DCI.AddToWorklist(N);
1761       return SDValue(N, 0);
1762     }
1763     break;
1764   }
1765   case RISCVISD::GREVIW:
1766   case RISCVISD::GORCIW: {
1767     // Only the lower 32 bits of the first operand are read
1768     SDValue Op0 = N->getOperand(0);
1769     APInt Mask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
1770     if (SimplifyDemandedBits(Op0, Mask, DCI)) {
1771       if (N->getOpcode() != ISD::DELETED_NODE)
1772         DCI.AddToWorklist(N);
1773       return SDValue(N, 0);
1774     }
1775 
1776     return combineGREVI_GORCI(N, DCI.DAG);
1777   }
1778   case RISCVISD::FMV_X_ANYEXTW_RV64: {
1779     SDLoc DL(N);
1780     SDValue Op0 = N->getOperand(0);
1781     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
1782     // conversion is unnecessary and can be replaced with an ANY_EXTEND
1783     // of the FMV_W_X_RV64 operand.
1784     if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) {
1785       assert(Op0.getOperand(0).getValueType() == MVT::i64 &&
1786              "Unexpected value type!");
1787       return Op0.getOperand(0);
1788     }
1789 
1790     // This is a target-specific version of a DAGCombine performed in
1791     // DAGCombiner::visitBITCAST. It performs the equivalent of:
1792     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
1793     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
1794     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
1795         !Op0.getNode()->hasOneUse())
1796       break;
1797     SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64,
1798                                  Op0.getOperand(0));
1799     APInt SignBit = APInt::getSignMask(32).sext(64);
1800     if (Op0.getOpcode() == ISD::FNEG)
1801       return DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV,
1802                          DAG.getConstant(SignBit, DL, MVT::i64));
1803 
1804     assert(Op0.getOpcode() == ISD::FABS);
1805     return DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV,
1806                        DAG.getConstant(~SignBit, DL, MVT::i64));
1807   }
1808   case RISCVISD::GREVI:
1809   case RISCVISD::GORCI:
1810     return combineGREVI_GORCI(N, DCI.DAG);
1811   case ISD::OR:
1812     if (auto GREV = combineORToGREV(SDValue(N, 0), DCI.DAG, Subtarget))
1813       return GREV;
1814     if (auto GORC = combineORToGORC(SDValue(N, 0), DCI.DAG, Subtarget))
1815       return GORC;
1816     break;
1817   }
1818 
1819   return SDValue();
1820 }
1821 
1822 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
1823     const SDNode *N, CombineLevel Level) const {
1824   // The following folds are only desirable if `(OP _, c1 << c2)` can be
1825   // materialised in fewer instructions than `(OP _, c1)`:
1826   //
1827   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
1828   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
1829   SDValue N0 = N->getOperand(0);
1830   EVT Ty = N0.getValueType();
1831   if (Ty.isScalarInteger() &&
1832       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
1833     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
1834     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
1835     if (C1 && C2) {
1836       APInt C1Int = C1->getAPIntValue();
1837       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
1838 
1839       // We can materialise `c1 << c2` into an add immediate, so it's "free",
1840       // and the combine should happen, to potentially allow further combines
1841       // later.
1842       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
1843           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
1844         return true;
1845 
1846       // We can materialise `c1` in an add immediate, so it's "free", and the
1847       // combine should be prevented.
1848       if (C1Int.getMinSignedBits() <= 64 &&
1849           isLegalAddImmediate(C1Int.getSExtValue()))
1850         return false;
1851 
1852       // Neither constant will fit into an immediate, so find materialisation
1853       // costs.
1854       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
1855                                               Subtarget.is64Bit());
1856       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
1857           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit());
1858 
1859       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
1860       // combine should be prevented.
1861       if (C1Cost < ShiftedC1Cost)
1862         return false;
1863     }
1864   }
1865   return true;
1866 }
1867 
1868 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
1869     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
1870     unsigned Depth) const {
1871   switch (Op.getOpcode()) {
1872   default:
1873     break;
1874   case RISCVISD::SLLW:
1875   case RISCVISD::SRAW:
1876   case RISCVISD::SRLW:
1877   case RISCVISD::DIVW:
1878   case RISCVISD::DIVUW:
1879   case RISCVISD::REMUW:
1880   case RISCVISD::ROLW:
1881   case RISCVISD::RORW:
1882   case RISCVISD::GREVIW:
1883   case RISCVISD::GORCIW:
1884   case RISCVISD::FSLW:
1885   case RISCVISD::FSRW:
1886     // TODO: As the result is sign-extended, this is conservatively correct. A
1887     // more precise answer could be calculated for SRAW depending on known
1888     // bits in the shift amount.
1889     return 33;
1890   case RISCVISD::VMV_X_S:
1891     // The number of sign bits of the scalar result is computed by obtaining the
1892     // element type of the input vector operand, substracting its width from the
1893     // XLEN, and then adding one (sign bit within the element type).
1894     return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1;
1895   }
1896 
1897   return 1;
1898 }
1899 
1900 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
1901                                                   MachineBasicBlock *BB) {
1902   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
1903 
1904   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
1905   // Should the count have wrapped while it was being read, we need to try
1906   // again.
1907   // ...
1908   // read:
1909   // rdcycleh x3 # load high word of cycle
1910   // rdcycle  x2 # load low word of cycle
1911   // rdcycleh x4 # load high word of cycle
1912   // bne x3, x4, read # check if high word reads match, otherwise try again
1913   // ...
1914 
1915   MachineFunction &MF = *BB->getParent();
1916   const BasicBlock *LLVM_BB = BB->getBasicBlock();
1917   MachineFunction::iterator It = ++BB->getIterator();
1918 
1919   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
1920   MF.insert(It, LoopMBB);
1921 
1922   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
1923   MF.insert(It, DoneMBB);
1924 
1925   // Transfer the remainder of BB and its successor edges to DoneMBB.
1926   DoneMBB->splice(DoneMBB->begin(), BB,
1927                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
1928   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
1929 
1930   BB->addSuccessor(LoopMBB);
1931 
1932   MachineRegisterInfo &RegInfo = MF.getRegInfo();
1933   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
1934   Register LoReg = MI.getOperand(0).getReg();
1935   Register HiReg = MI.getOperand(1).getReg();
1936   DebugLoc DL = MI.getDebugLoc();
1937 
1938   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
1939   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
1940       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
1941       .addReg(RISCV::X0);
1942   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
1943       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
1944       .addReg(RISCV::X0);
1945   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
1946       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
1947       .addReg(RISCV::X0);
1948 
1949   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
1950       .addReg(HiReg)
1951       .addReg(ReadAgainReg)
1952       .addMBB(LoopMBB);
1953 
1954   LoopMBB->addSuccessor(LoopMBB);
1955   LoopMBB->addSuccessor(DoneMBB);
1956 
1957   MI.eraseFromParent();
1958 
1959   return DoneMBB;
1960 }
1961 
1962 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
1963                                              MachineBasicBlock *BB) {
1964   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
1965 
1966   MachineFunction &MF = *BB->getParent();
1967   DebugLoc DL = MI.getDebugLoc();
1968   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
1969   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
1970   Register LoReg = MI.getOperand(0).getReg();
1971   Register HiReg = MI.getOperand(1).getReg();
1972   Register SrcReg = MI.getOperand(2).getReg();
1973   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
1974   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
1975 
1976   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
1977                           RI);
1978   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
1979   MachineMemOperand *MMOLo =
1980       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
1981   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
1982       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
1983   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
1984       .addFrameIndex(FI)
1985       .addImm(0)
1986       .addMemOperand(MMOLo);
1987   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
1988       .addFrameIndex(FI)
1989       .addImm(4)
1990       .addMemOperand(MMOHi);
1991   MI.eraseFromParent(); // The pseudo instruction is gone now.
1992   return BB;
1993 }
1994 
1995 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
1996                                                  MachineBasicBlock *BB) {
1997   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
1998          "Unexpected instruction");
1999 
2000   MachineFunction &MF = *BB->getParent();
2001   DebugLoc DL = MI.getDebugLoc();
2002   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
2003   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
2004   Register DstReg = MI.getOperand(0).getReg();
2005   Register LoReg = MI.getOperand(1).getReg();
2006   Register HiReg = MI.getOperand(2).getReg();
2007   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
2008   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
2009 
2010   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
2011   MachineMemOperand *MMOLo =
2012       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
2013   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
2014       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
2015   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
2016       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
2017       .addFrameIndex(FI)
2018       .addImm(0)
2019       .addMemOperand(MMOLo);
2020   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
2021       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
2022       .addFrameIndex(FI)
2023       .addImm(4)
2024       .addMemOperand(MMOHi);
2025   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
2026   MI.eraseFromParent(); // The pseudo instruction is gone now.
2027   return BB;
2028 }
2029 
2030 static bool isSelectPseudo(MachineInstr &MI) {
2031   switch (MI.getOpcode()) {
2032   default:
2033     return false;
2034   case RISCV::Select_GPR_Using_CC_GPR:
2035   case RISCV::Select_FPR16_Using_CC_GPR:
2036   case RISCV::Select_FPR32_Using_CC_GPR:
2037   case RISCV::Select_FPR64_Using_CC_GPR:
2038     return true;
2039   }
2040 }
2041 
2042 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
2043                                            MachineBasicBlock *BB) {
2044   // To "insert" Select_* instructions, we actually have to insert the triangle
2045   // control-flow pattern.  The incoming instructions know the destination vreg
2046   // to set, the condition code register to branch on, the true/false values to
2047   // select between, and the condcode to use to select the appropriate branch.
2048   //
2049   // We produce the following control flow:
2050   //     HeadMBB
2051   //     |  \
2052   //     |  IfFalseMBB
2053   //     | /
2054   //    TailMBB
2055   //
2056   // When we find a sequence of selects we attempt to optimize their emission
2057   // by sharing the control flow. Currently we only handle cases where we have
2058   // multiple selects with the exact same condition (same LHS, RHS and CC).
2059   // The selects may be interleaved with other instructions if the other
2060   // instructions meet some requirements we deem safe:
2061   // - They are debug instructions. Otherwise,
2062   // - They do not have side-effects, do not access memory and their inputs do
2063   //   not depend on the results of the select pseudo-instructions.
2064   // The TrueV/FalseV operands of the selects cannot depend on the result of
2065   // previous selects in the sequence.
2066   // These conditions could be further relaxed. See the X86 target for a
2067   // related approach and more information.
2068   Register LHS = MI.getOperand(1).getReg();
2069   Register RHS = MI.getOperand(2).getReg();
2070   auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
2071 
2072   SmallVector<MachineInstr *, 4> SelectDebugValues;
2073   SmallSet<Register, 4> SelectDests;
2074   SelectDests.insert(MI.getOperand(0).getReg());
2075 
2076   MachineInstr *LastSelectPseudo = &MI;
2077 
2078   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
2079        SequenceMBBI != E; ++SequenceMBBI) {
2080     if (SequenceMBBI->isDebugInstr())
2081       continue;
2082     else if (isSelectPseudo(*SequenceMBBI)) {
2083       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
2084           SequenceMBBI->getOperand(2).getReg() != RHS ||
2085           SequenceMBBI->getOperand(3).getImm() != CC ||
2086           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
2087           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
2088         break;
2089       LastSelectPseudo = &*SequenceMBBI;
2090       SequenceMBBI->collectDebugValues(SelectDebugValues);
2091       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
2092     } else {
2093       if (SequenceMBBI->hasUnmodeledSideEffects() ||
2094           SequenceMBBI->mayLoadOrStore())
2095         break;
2096       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
2097             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
2098           }))
2099         break;
2100     }
2101   }
2102 
2103   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
2104   const BasicBlock *LLVM_BB = BB->getBasicBlock();
2105   DebugLoc DL = MI.getDebugLoc();
2106   MachineFunction::iterator I = ++BB->getIterator();
2107 
2108   MachineBasicBlock *HeadMBB = BB;
2109   MachineFunction *F = BB->getParent();
2110   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
2111   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
2112 
2113   F->insert(I, IfFalseMBB);
2114   F->insert(I, TailMBB);
2115 
2116   // Transfer debug instructions associated with the selects to TailMBB.
2117   for (MachineInstr *DebugInstr : SelectDebugValues) {
2118     TailMBB->push_back(DebugInstr->removeFromParent());
2119   }
2120 
2121   // Move all instructions after the sequence to TailMBB.
2122   TailMBB->splice(TailMBB->end(), HeadMBB,
2123                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
2124   // Update machine-CFG edges by transferring all successors of the current
2125   // block to the new block which will contain the Phi nodes for the selects.
2126   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
2127   // Set the successors for HeadMBB.
2128   HeadMBB->addSuccessor(IfFalseMBB);
2129   HeadMBB->addSuccessor(TailMBB);
2130 
2131   // Insert appropriate branch.
2132   unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
2133 
2134   BuildMI(HeadMBB, DL, TII.get(Opcode))
2135     .addReg(LHS)
2136     .addReg(RHS)
2137     .addMBB(TailMBB);
2138 
2139   // IfFalseMBB just falls through to TailMBB.
2140   IfFalseMBB->addSuccessor(TailMBB);
2141 
2142   // Create PHIs for all of the select pseudo-instructions.
2143   auto SelectMBBI = MI.getIterator();
2144   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
2145   auto InsertionPoint = TailMBB->begin();
2146   while (SelectMBBI != SelectEnd) {
2147     auto Next = std::next(SelectMBBI);
2148     if (isSelectPseudo(*SelectMBBI)) {
2149       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
2150       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
2151               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
2152           .addReg(SelectMBBI->getOperand(4).getReg())
2153           .addMBB(HeadMBB)
2154           .addReg(SelectMBBI->getOperand(5).getReg())
2155           .addMBB(IfFalseMBB);
2156       SelectMBBI->eraseFromParent();
2157     }
2158     SelectMBBI = Next;
2159   }
2160 
2161   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
2162   return TailMBB;
2163 }
2164 
2165 static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB,
2166                                     int VLIndex, unsigned SEWIndex,
2167                                     RISCVVLMUL VLMul, bool WritesElement0) {
2168   MachineFunction &MF = *BB->getParent();
2169   DebugLoc DL = MI.getDebugLoc();
2170   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
2171 
2172   unsigned SEW = MI.getOperand(SEWIndex).getImm();
2173   assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
2174   RISCVVSEW ElementWidth = static_cast<RISCVVSEW>(Log2_32(SEW / 8));
2175 
2176   MachineRegisterInfo &MRI = MF.getRegInfo();
2177 
2178   // VL and VTYPE are alive here.
2179   MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI));
2180 
2181   if (VLIndex >= 0) {
2182     // Set VL (rs1 != X0).
2183     Register DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
2184     MIB.addReg(DestReg, RegState::Define | RegState::Dead)
2185         .addReg(MI.getOperand(VLIndex).getReg());
2186   } else
2187     // With no VL operator in the pseudo, do not modify VL (rd = X0, rs1 = X0).
2188     MIB.addReg(RISCV::X0, RegState::Define | RegState::Dead)
2189         .addReg(RISCV::X0, RegState::Kill);
2190 
2191   // Default to tail agnostic unless the destination is tied to a source. In
2192   // that case the user would have some control over the tail values. The tail
2193   // policy is also ignored on instructions that only update element 0 like
2194   // vmv.s.x or reductions so use agnostic there to match the common case.
2195   // FIXME: This is conservatively correct, but we might want to detect that
2196   // the input is undefined.
2197   bool TailAgnostic = true;
2198   if (MI.isRegTiedToUseOperand(0) && !WritesElement0)
2199     TailAgnostic = false;
2200 
2201   // For simplicity we reuse the vtype representation here.
2202   MIB.addImm(RISCVVType::encodeVTYPE(VLMul, ElementWidth,
2203                                      /*TailAgnostic*/ TailAgnostic,
2204                                      /*MaskAgnostic*/ false));
2205 
2206   // Remove (now) redundant operands from pseudo
2207   MI.getOperand(SEWIndex).setImm(-1);
2208   if (VLIndex >= 0) {
2209     MI.getOperand(VLIndex).setReg(RISCV::NoRegister);
2210     MI.getOperand(VLIndex).setIsKill(false);
2211   }
2212 
2213   return BB;
2214 }
2215 
2216 MachineBasicBlock *
2217 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
2218                                                  MachineBasicBlock *BB) const {
2219   uint64_t TSFlags = MI.getDesc().TSFlags;
2220 
2221   if (TSFlags & RISCVII::HasSEWOpMask) {
2222     unsigned NumOperands = MI.getNumExplicitOperands();
2223     int VLIndex = (TSFlags & RISCVII::HasVLOpMask) ? NumOperands - 2 : -1;
2224     unsigned SEWIndex = NumOperands - 1;
2225     bool WritesElement0 = TSFlags & RISCVII::WritesElement0Mask;
2226 
2227     RISCVVLMUL VLMul = static_cast<RISCVVLMUL>((TSFlags & RISCVII::VLMulMask) >>
2228                                                RISCVII::VLMulShift);
2229     return addVSetVL(MI, BB, VLIndex, SEWIndex, VLMul, WritesElement0);
2230   }
2231 
2232   switch (MI.getOpcode()) {
2233   default:
2234     llvm_unreachable("Unexpected instr type to insert");
2235   case RISCV::ReadCycleWide:
2236     assert(!Subtarget.is64Bit() &&
2237            "ReadCycleWrite is only to be used on riscv32");
2238     return emitReadCycleWidePseudo(MI, BB);
2239   case RISCV::Select_GPR_Using_CC_GPR:
2240   case RISCV::Select_FPR16_Using_CC_GPR:
2241   case RISCV::Select_FPR32_Using_CC_GPR:
2242   case RISCV::Select_FPR64_Using_CC_GPR:
2243     return emitSelectPseudo(MI, BB);
2244   case RISCV::BuildPairF64Pseudo:
2245     return emitBuildPairF64Pseudo(MI, BB);
2246   case RISCV::SplitF64Pseudo:
2247     return emitSplitF64Pseudo(MI, BB);
2248   }
2249 }
2250 
2251 // Calling Convention Implementation.
2252 // The expectations for frontend ABI lowering vary from target to target.
2253 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
2254 // details, but this is a longer term goal. For now, we simply try to keep the
2255 // role of the frontend as simple and well-defined as possible. The rules can
2256 // be summarised as:
2257 // * Never split up large scalar arguments. We handle them here.
2258 // * If a hardfloat calling convention is being used, and the struct may be
2259 // passed in a pair of registers (fp+fp, int+fp), and both registers are
2260 // available, then pass as two separate arguments. If either the GPRs or FPRs
2261 // are exhausted, then pass according to the rule below.
2262 // * If a struct could never be passed in registers or directly in a stack
2263 // slot (as it is larger than 2*XLEN and the floating point rules don't
2264 // apply), then pass it using a pointer with the byval attribute.
2265 // * If a struct is less than 2*XLEN, then coerce to either a two-element
2266 // word-sized array or a 2*XLEN scalar (depending on alignment).
2267 // * The frontend can determine whether a struct is returned by reference or
2268 // not based on its size and fields. If it will be returned by reference, the
2269 // frontend must modify the prototype so a pointer with the sret annotation is
2270 // passed as the first argument. This is not necessary for large scalar
2271 // returns.
2272 // * Struct return values and varargs should be coerced to structs containing
2273 // register-size fields in the same situations they would be for fixed
2274 // arguments.
2275 
2276 static const MCPhysReg ArgGPRs[] = {
2277   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
2278   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
2279 };
2280 static const MCPhysReg ArgFPR16s[] = {
2281   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
2282   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
2283 };
2284 static const MCPhysReg ArgFPR32s[] = {
2285   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
2286   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
2287 };
2288 static const MCPhysReg ArgFPR64s[] = {
2289   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
2290   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
2291 };
2292 // This is an interim calling convention and it may be changed in the future.
2293 static const MCPhysReg ArgVRs[] = {
2294   RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19, RISCV::V20,
2295   RISCV::V21, RISCV::V22, RISCV::V23
2296 };
2297 static const MCPhysReg ArgVRM2s[] = {
2298   RISCV::V16M2, RISCV::V18M2, RISCV::V20M2, RISCV::V22M2
2299 };
2300 static const MCPhysReg ArgVRM4s[] = {RISCV::V16M4, RISCV::V20M4};
2301 static const MCPhysReg ArgVRM8s[] = {RISCV::V16M8};
2302 
2303 // Pass a 2*XLEN argument that has been split into two XLEN values through
2304 // registers or the stack as necessary.
2305 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
2306                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
2307                                 MVT ValVT2, MVT LocVT2,
2308                                 ISD::ArgFlagsTy ArgFlags2) {
2309   unsigned XLenInBytes = XLen / 8;
2310   if (Register Reg = State.AllocateReg(ArgGPRs)) {
2311     // At least one half can be passed via register.
2312     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
2313                                      VA1.getLocVT(), CCValAssign::Full));
2314   } else {
2315     // Both halves must be passed on the stack, with proper alignment.
2316     Align StackAlign =
2317         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
2318     State.addLoc(
2319         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
2320                             State.AllocateStack(XLenInBytes, StackAlign),
2321                             VA1.getLocVT(), CCValAssign::Full));
2322     State.addLoc(CCValAssign::getMem(
2323         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
2324         LocVT2, CCValAssign::Full));
2325     return false;
2326   }
2327 
2328   if (Register Reg = State.AllocateReg(ArgGPRs)) {
2329     // The second half can also be passed via register.
2330     State.addLoc(
2331         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
2332   } else {
2333     // The second half is passed via the stack, without additional alignment.
2334     State.addLoc(CCValAssign::getMem(
2335         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
2336         LocVT2, CCValAssign::Full));
2337   }
2338 
2339   return false;
2340 }
2341 
2342 // Implements the RISC-V calling convention. Returns true upon failure.
2343 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
2344                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
2345                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
2346                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
2347                      Optional<unsigned> FirstMaskArgument) {
2348   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
2349   assert(XLen == 32 || XLen == 64);
2350   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
2351 
2352   // Any return value split in to more than two values can't be returned
2353   // directly.
2354   if (IsRet && ValNo > 1)
2355     return true;
2356 
2357   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
2358   // variadic argument, or if no F16/F32 argument registers are available.
2359   bool UseGPRForF16_F32 = true;
2360   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
2361   // variadic argument, or if no F64 argument registers are available.
2362   bool UseGPRForF64 = true;
2363 
2364   switch (ABI) {
2365   default:
2366     llvm_unreachable("Unexpected ABI");
2367   case RISCVABI::ABI_ILP32:
2368   case RISCVABI::ABI_LP64:
2369     break;
2370   case RISCVABI::ABI_ILP32F:
2371   case RISCVABI::ABI_LP64F:
2372     UseGPRForF16_F32 = !IsFixed;
2373     break;
2374   case RISCVABI::ABI_ILP32D:
2375   case RISCVABI::ABI_LP64D:
2376     UseGPRForF16_F32 = !IsFixed;
2377     UseGPRForF64 = !IsFixed;
2378     break;
2379   }
2380 
2381   // FPR16, FPR32, and FPR64 alias each other.
2382   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
2383     UseGPRForF16_F32 = true;
2384     UseGPRForF64 = true;
2385   }
2386 
2387   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
2388   // similar local variables rather than directly checking against the target
2389   // ABI.
2390 
2391   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
2392     LocVT = XLenVT;
2393     LocInfo = CCValAssign::BCvt;
2394   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
2395     LocVT = MVT::i64;
2396     LocInfo = CCValAssign::BCvt;
2397   }
2398 
2399   // If this is a variadic argument, the RISC-V calling convention requires
2400   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
2401   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
2402   // be used regardless of whether the original argument was split during
2403   // legalisation or not. The argument will not be passed by registers if the
2404   // original type is larger than 2*XLEN, so the register alignment rule does
2405   // not apply.
2406   unsigned TwoXLenInBytes = (2 * XLen) / 8;
2407   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
2408       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
2409     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
2410     // Skip 'odd' register if necessary.
2411     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
2412       State.AllocateReg(ArgGPRs);
2413   }
2414 
2415   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
2416   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
2417       State.getPendingArgFlags();
2418 
2419   assert(PendingLocs.size() == PendingArgFlags.size() &&
2420          "PendingLocs and PendingArgFlags out of sync");
2421 
2422   // Handle passing f64 on RV32D with a soft float ABI or when floating point
2423   // registers are exhausted.
2424   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
2425     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
2426            "Can't lower f64 if it is split");
2427     // Depending on available argument GPRS, f64 may be passed in a pair of
2428     // GPRs, split between a GPR and the stack, or passed completely on the
2429     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
2430     // cases.
2431     Register Reg = State.AllocateReg(ArgGPRs);
2432     LocVT = MVT::i32;
2433     if (!Reg) {
2434       unsigned StackOffset = State.AllocateStack(8, Align(8));
2435       State.addLoc(
2436           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
2437       return false;
2438     }
2439     if (!State.AllocateReg(ArgGPRs))
2440       State.AllocateStack(4, Align(4));
2441     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2442     return false;
2443   }
2444 
2445   // Split arguments might be passed indirectly, so keep track of the pending
2446   // values.
2447   if (ArgFlags.isSplit() || !PendingLocs.empty()) {
2448     LocVT = XLenVT;
2449     LocInfo = CCValAssign::Indirect;
2450     PendingLocs.push_back(
2451         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
2452     PendingArgFlags.push_back(ArgFlags);
2453     if (!ArgFlags.isSplitEnd()) {
2454       return false;
2455     }
2456   }
2457 
2458   // If the split argument only had two elements, it should be passed directly
2459   // in registers or on the stack.
2460   if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) {
2461     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
2462     // Apply the normal calling convention rules to the first half of the
2463     // split argument.
2464     CCValAssign VA = PendingLocs[0];
2465     ISD::ArgFlagsTy AF = PendingArgFlags[0];
2466     PendingLocs.clear();
2467     PendingArgFlags.clear();
2468     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
2469                                ArgFlags);
2470   }
2471 
2472   // Allocate to a register if possible, or else a stack slot.
2473   Register Reg;
2474   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
2475     Reg = State.AllocateReg(ArgFPR16s);
2476   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
2477     Reg = State.AllocateReg(ArgFPR32s);
2478   else if (ValVT == MVT::f64 && !UseGPRForF64)
2479     Reg = State.AllocateReg(ArgFPR64s);
2480   else if (ValVT.isScalableVector()) {
2481     const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
2482     if (RC == &RISCV::VRRegClass) {
2483       // Assign the first mask argument to V0.
2484       // This is an interim calling convention and it may be changed in the
2485       // future.
2486       if (FirstMaskArgument.hasValue() &&
2487           ValNo == FirstMaskArgument.getValue()) {
2488         Reg = State.AllocateReg(RISCV::V0);
2489       } else {
2490         Reg = State.AllocateReg(ArgVRs);
2491       }
2492     } else if (RC == &RISCV::VRM2RegClass) {
2493       Reg = State.AllocateReg(ArgVRM2s);
2494     } else if (RC == &RISCV::VRM4RegClass) {
2495       Reg = State.AllocateReg(ArgVRM4s);
2496     } else if (RC == &RISCV::VRM8RegClass) {
2497       Reg = State.AllocateReg(ArgVRM8s);
2498     } else {
2499       llvm_unreachable("Unhandled class register for ValueType");
2500     }
2501     if (!Reg) {
2502       LocInfo = CCValAssign::Indirect;
2503       // Try using a GPR to pass the address
2504       Reg = State.AllocateReg(ArgGPRs);
2505       LocVT = XLenVT;
2506     }
2507   } else
2508     Reg = State.AllocateReg(ArgGPRs);
2509   unsigned StackOffset =
2510       Reg ? 0 : State.AllocateStack(XLen / 8, Align(XLen / 8));
2511 
2512   // If we reach this point and PendingLocs is non-empty, we must be at the
2513   // end of a split argument that must be passed indirectly.
2514   if (!PendingLocs.empty()) {
2515     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
2516     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
2517 
2518     for (auto &It : PendingLocs) {
2519       if (Reg)
2520         It.convertToReg(Reg);
2521       else
2522         It.convertToMem(StackOffset);
2523       State.addLoc(It);
2524     }
2525     PendingLocs.clear();
2526     PendingArgFlags.clear();
2527     return false;
2528   }
2529 
2530   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
2531           (TLI.getSubtarget().hasStdExtV() && ValVT.isScalableVector())) &&
2532          "Expected an XLenVT or scalable vector types at this stage");
2533 
2534   if (Reg) {
2535     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2536     return false;
2537   }
2538 
2539   // When a floating-point value is passed on the stack, no bit-conversion is
2540   // needed.
2541   if (ValVT.isFloatingPoint()) {
2542     LocVT = ValVT;
2543     LocInfo = CCValAssign::Full;
2544   }
2545   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
2546   return false;
2547 }
2548 
2549 template <typename ArgTy>
2550 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
2551   for (const auto &ArgIdx : enumerate(Args)) {
2552     MVT ArgVT = ArgIdx.value().VT;
2553     if (ArgVT.isScalableVector() &&
2554         ArgVT.getVectorElementType().SimpleTy == MVT::i1)
2555       return ArgIdx.index();
2556   }
2557   return None;
2558 }
2559 
2560 void RISCVTargetLowering::analyzeInputArgs(
2561     MachineFunction &MF, CCState &CCInfo,
2562     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const {
2563   unsigned NumArgs = Ins.size();
2564   FunctionType *FType = MF.getFunction().getFunctionType();
2565 
2566   Optional<unsigned> FirstMaskArgument;
2567   if (Subtarget.hasStdExtV())
2568     FirstMaskArgument = preAssignMask(Ins);
2569 
2570   for (unsigned i = 0; i != NumArgs; ++i) {
2571     MVT ArgVT = Ins[i].VT;
2572     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
2573 
2574     Type *ArgTy = nullptr;
2575     if (IsRet)
2576       ArgTy = FType->getReturnType();
2577     else if (Ins[i].isOrigArg())
2578       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
2579 
2580     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
2581     if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
2582                  ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
2583                  FirstMaskArgument)) {
2584       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
2585                         << EVT(ArgVT).getEVTString() << '\n');
2586       llvm_unreachable(nullptr);
2587     }
2588   }
2589 }
2590 
2591 void RISCVTargetLowering::analyzeOutputArgs(
2592     MachineFunction &MF, CCState &CCInfo,
2593     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
2594     CallLoweringInfo *CLI) const {
2595   unsigned NumArgs = Outs.size();
2596 
2597   Optional<unsigned> FirstMaskArgument;
2598   if (Subtarget.hasStdExtV())
2599     FirstMaskArgument = preAssignMask(Outs);
2600 
2601   for (unsigned i = 0; i != NumArgs; i++) {
2602     MVT ArgVT = Outs[i].VT;
2603     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
2604     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
2605 
2606     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
2607     if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
2608                  ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
2609                  FirstMaskArgument)) {
2610       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
2611                         << EVT(ArgVT).getEVTString() << "\n");
2612       llvm_unreachable(nullptr);
2613     }
2614   }
2615 }
2616 
2617 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
2618 // values.
2619 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
2620                                    const CCValAssign &VA, const SDLoc &DL) {
2621   switch (VA.getLocInfo()) {
2622   default:
2623     llvm_unreachable("Unexpected CCValAssign::LocInfo");
2624   case CCValAssign::Full:
2625     break;
2626   case CCValAssign::BCvt:
2627     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
2628       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
2629     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
2630       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
2631     else
2632       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
2633     break;
2634   }
2635   return Val;
2636 }
2637 
2638 // The caller is responsible for loading the full value if the argument is
2639 // passed with CCValAssign::Indirect.
2640 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
2641                                 const CCValAssign &VA, const SDLoc &DL,
2642                                 const RISCVTargetLowering &TLI) {
2643   MachineFunction &MF = DAG.getMachineFunction();
2644   MachineRegisterInfo &RegInfo = MF.getRegInfo();
2645   EVT LocVT = VA.getLocVT();
2646   SDValue Val;
2647   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
2648   Register VReg = RegInfo.createVirtualRegister(RC);
2649   RegInfo.addLiveIn(VA.getLocReg(), VReg);
2650   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
2651 
2652   if (VA.getLocInfo() == CCValAssign::Indirect)
2653     return Val;
2654 
2655   return convertLocVTToValVT(DAG, Val, VA, DL);
2656 }
2657 
2658 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
2659                                    const CCValAssign &VA, const SDLoc &DL) {
2660   EVT LocVT = VA.getLocVT();
2661 
2662   switch (VA.getLocInfo()) {
2663   default:
2664     llvm_unreachable("Unexpected CCValAssign::LocInfo");
2665   case CCValAssign::Full:
2666     break;
2667   case CCValAssign::BCvt:
2668     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
2669       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
2670     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
2671       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
2672     else
2673       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
2674     break;
2675   }
2676   return Val;
2677 }
2678 
2679 // The caller is responsible for loading the full value if the argument is
2680 // passed with CCValAssign::Indirect.
2681 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
2682                                 const CCValAssign &VA, const SDLoc &DL) {
2683   MachineFunction &MF = DAG.getMachineFunction();
2684   MachineFrameInfo &MFI = MF.getFrameInfo();
2685   EVT LocVT = VA.getLocVT();
2686   EVT ValVT = VA.getValVT();
2687   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
2688   int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
2689                                  VA.getLocMemOffset(), /*Immutable=*/true);
2690   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
2691   SDValue Val;
2692 
2693   ISD::LoadExtType ExtType;
2694   switch (VA.getLocInfo()) {
2695   default:
2696     llvm_unreachable("Unexpected CCValAssign::LocInfo");
2697   case CCValAssign::Full:
2698   case CCValAssign::Indirect:
2699   case CCValAssign::BCvt:
2700     ExtType = ISD::NON_EXTLOAD;
2701     break;
2702   }
2703   Val = DAG.getExtLoad(
2704       ExtType, DL, LocVT, Chain, FIN,
2705       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
2706   return Val;
2707 }
2708 
2709 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
2710                                        const CCValAssign &VA, const SDLoc &DL) {
2711   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
2712          "Unexpected VA");
2713   MachineFunction &MF = DAG.getMachineFunction();
2714   MachineFrameInfo &MFI = MF.getFrameInfo();
2715   MachineRegisterInfo &RegInfo = MF.getRegInfo();
2716 
2717   if (VA.isMemLoc()) {
2718     // f64 is passed on the stack.
2719     int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
2720     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
2721     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
2722                        MachinePointerInfo::getFixedStack(MF, FI));
2723   }
2724 
2725   assert(VA.isRegLoc() && "Expected register VA assignment");
2726 
2727   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
2728   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
2729   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
2730   SDValue Hi;
2731   if (VA.getLocReg() == RISCV::X17) {
2732     // Second half of f64 is passed on the stack.
2733     int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
2734     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
2735     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
2736                      MachinePointerInfo::getFixedStack(MF, FI));
2737   } else {
2738     // Second half of f64 is passed in another GPR.
2739     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
2740     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
2741     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
2742   }
2743   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
2744 }
2745 
2746 // FastCC has less than 1% performance improvement for some particular
2747 // benchmark. But theoretically, it may has benenfit for some cases.
2748 static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
2749                             CCValAssign::LocInfo LocInfo,
2750                             ISD::ArgFlagsTy ArgFlags, CCState &State) {
2751 
2752   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
2753     // X5 and X6 might be used for save-restore libcall.
2754     static const MCPhysReg GPRList[] = {
2755         RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
2756         RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
2757         RISCV::X29, RISCV::X30, RISCV::X31};
2758     if (unsigned Reg = State.AllocateReg(GPRList)) {
2759       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2760       return false;
2761     }
2762   }
2763 
2764   if (LocVT == MVT::f16) {
2765     static const MCPhysReg FPR16List[] = {
2766         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
2767         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
2768         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
2769         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
2770     if (unsigned Reg = State.AllocateReg(FPR16List)) {
2771       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2772       return false;
2773     }
2774   }
2775 
2776   if (LocVT == MVT::f32) {
2777     static const MCPhysReg FPR32List[] = {
2778         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
2779         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
2780         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
2781         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
2782     if (unsigned Reg = State.AllocateReg(FPR32List)) {
2783       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2784       return false;
2785     }
2786   }
2787 
2788   if (LocVT == MVT::f64) {
2789     static const MCPhysReg FPR64List[] = {
2790         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
2791         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
2792         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
2793         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
2794     if (unsigned Reg = State.AllocateReg(FPR64List)) {
2795       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2796       return false;
2797     }
2798   }
2799 
2800   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
2801     unsigned Offset4 = State.AllocateStack(4, Align(4));
2802     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
2803     return false;
2804   }
2805 
2806   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
2807     unsigned Offset5 = State.AllocateStack(8, Align(8));
2808     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
2809     return false;
2810   }
2811 
2812   return true; // CC didn't match.
2813 }
2814 
2815 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
2816                          CCValAssign::LocInfo LocInfo,
2817                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
2818 
2819   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
2820     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
2821     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
2822     static const MCPhysReg GPRList[] = {
2823         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
2824         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
2825     if (unsigned Reg = State.AllocateReg(GPRList)) {
2826       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2827       return false;
2828     }
2829   }
2830 
2831   if (LocVT == MVT::f32) {
2832     // Pass in STG registers: F1, ..., F6
2833     //                        fs0 ... fs5
2834     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
2835                                           RISCV::F18_F, RISCV::F19_F,
2836                                           RISCV::F20_F, RISCV::F21_F};
2837     if (unsigned Reg = State.AllocateReg(FPR32List)) {
2838       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2839       return false;
2840     }
2841   }
2842 
2843   if (LocVT == MVT::f64) {
2844     // Pass in STG registers: D1, ..., D6
2845     //                        fs6 ... fs11
2846     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
2847                                           RISCV::F24_D, RISCV::F25_D,
2848                                           RISCV::F26_D, RISCV::F27_D};
2849     if (unsigned Reg = State.AllocateReg(FPR64List)) {
2850       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2851       return false;
2852     }
2853   }
2854 
2855   report_fatal_error("No registers left in GHC calling convention");
2856   return true;
2857 }
2858 
2859 // Transform physical registers into virtual registers.
2860 SDValue RISCVTargetLowering::LowerFormalArguments(
2861     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
2862     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2863     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
2864 
2865   MachineFunction &MF = DAG.getMachineFunction();
2866 
2867   switch (CallConv) {
2868   default:
2869     report_fatal_error("Unsupported calling convention");
2870   case CallingConv::C:
2871   case CallingConv::Fast:
2872     break;
2873   case CallingConv::GHC:
2874     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
2875         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
2876       report_fatal_error(
2877         "GHC calling convention requires the F and D instruction set extensions");
2878   }
2879 
2880   const Function &Func = MF.getFunction();
2881   if (Func.hasFnAttribute("interrupt")) {
2882     if (!Func.arg_empty())
2883       report_fatal_error(
2884         "Functions with the interrupt attribute cannot have arguments!");
2885 
2886     StringRef Kind =
2887       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
2888 
2889     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
2890       report_fatal_error(
2891         "Function interrupt attribute argument not supported!");
2892   }
2893 
2894   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2895   MVT XLenVT = Subtarget.getXLenVT();
2896   unsigned XLenInBytes = Subtarget.getXLen() / 8;
2897   // Used with vargs to acumulate store chains.
2898   std::vector<SDValue> OutChains;
2899 
2900   // Assign locations to all of the incoming arguments.
2901   SmallVector<CCValAssign, 16> ArgLocs;
2902   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2903 
2904   if (CallConv == CallingConv::Fast)
2905     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_FastCC);
2906   else if (CallConv == CallingConv::GHC)
2907     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
2908   else
2909     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false);
2910 
2911   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2912     CCValAssign &VA = ArgLocs[i];
2913     SDValue ArgValue;
2914     // Passing f64 on RV32D with a soft float ABI must be handled as a special
2915     // case.
2916     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
2917       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
2918     else if (VA.isRegLoc())
2919       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
2920     else
2921       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
2922 
2923     if (VA.getLocInfo() == CCValAssign::Indirect) {
2924       // If the original argument was split and passed by reference (e.g. i128
2925       // on RV32), we need to load all parts of it here (using the same
2926       // address).
2927       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
2928                                    MachinePointerInfo()));
2929       unsigned ArgIndex = Ins[i].OrigArgIndex;
2930       assert(Ins[i].PartOffset == 0);
2931       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
2932         CCValAssign &PartVA = ArgLocs[i + 1];
2933         unsigned PartOffset = Ins[i + 1].PartOffset;
2934         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
2935                                       DAG.getIntPtrConstant(PartOffset, DL));
2936         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
2937                                      MachinePointerInfo()));
2938         ++i;
2939       }
2940       continue;
2941     }
2942     InVals.push_back(ArgValue);
2943   }
2944 
2945   if (IsVarArg) {
2946     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
2947     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
2948     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
2949     MachineFrameInfo &MFI = MF.getFrameInfo();
2950     MachineRegisterInfo &RegInfo = MF.getRegInfo();
2951     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
2952 
2953     // Offset of the first variable argument from stack pointer, and size of
2954     // the vararg save area. For now, the varargs save area is either zero or
2955     // large enough to hold a0-a7.
2956     int VaArgOffset, VarArgsSaveSize;
2957 
2958     // If all registers are allocated, then all varargs must be passed on the
2959     // stack and we don't need to save any argregs.
2960     if (ArgRegs.size() == Idx) {
2961       VaArgOffset = CCInfo.getNextStackOffset();
2962       VarArgsSaveSize = 0;
2963     } else {
2964       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
2965       VaArgOffset = -VarArgsSaveSize;
2966     }
2967 
2968     // Record the frame index of the first variable argument
2969     // which is a value necessary to VASTART.
2970     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
2971     RVFI->setVarArgsFrameIndex(FI);
2972 
2973     // If saving an odd number of registers then create an extra stack slot to
2974     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
2975     // offsets to even-numbered registered remain 2*XLEN-aligned.
2976     if (Idx % 2) {
2977       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
2978       VarArgsSaveSize += XLenInBytes;
2979     }
2980 
2981     // Copy the integer registers that may have been used for passing varargs
2982     // to the vararg save area.
2983     for (unsigned I = Idx; I < ArgRegs.size();
2984          ++I, VaArgOffset += XLenInBytes) {
2985       const Register Reg = RegInfo.createVirtualRegister(RC);
2986       RegInfo.addLiveIn(ArgRegs[I], Reg);
2987       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
2988       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
2989       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2990       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
2991                                    MachinePointerInfo::getFixedStack(MF, FI));
2992       cast<StoreSDNode>(Store.getNode())
2993           ->getMemOperand()
2994           ->setValue((Value *)nullptr);
2995       OutChains.push_back(Store);
2996     }
2997     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
2998   }
2999 
3000   // All stores are grouped in one node to allow the matching between
3001   // the size of Ins and InVals. This only happens for vararg functions.
3002   if (!OutChains.empty()) {
3003     OutChains.push_back(Chain);
3004     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
3005   }
3006 
3007   return Chain;
3008 }
3009 
3010 /// isEligibleForTailCallOptimization - Check whether the call is eligible
3011 /// for tail call optimization.
3012 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
3013 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
3014     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
3015     const SmallVector<CCValAssign, 16> &ArgLocs) const {
3016 
3017   auto &Callee = CLI.Callee;
3018   auto CalleeCC = CLI.CallConv;
3019   auto &Outs = CLI.Outs;
3020   auto &Caller = MF.getFunction();
3021   auto CallerCC = Caller.getCallingConv();
3022 
3023   // Exception-handling functions need a special set of instructions to
3024   // indicate a return to the hardware. Tail-calling another function would
3025   // probably break this.
3026   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
3027   // should be expanded as new function attributes are introduced.
3028   if (Caller.hasFnAttribute("interrupt"))
3029     return false;
3030 
3031   // Do not tail call opt if the stack is used to pass parameters.
3032   if (CCInfo.getNextStackOffset() != 0)
3033     return false;
3034 
3035   // Do not tail call opt if any parameters need to be passed indirectly.
3036   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
3037   // passed indirectly. So the address of the value will be passed in a
3038   // register, or if not available, then the address is put on the stack. In
3039   // order to pass indirectly, space on the stack often needs to be allocated
3040   // in order to store the value. In this case the CCInfo.getNextStackOffset()
3041   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
3042   // are passed CCValAssign::Indirect.
3043   for (auto &VA : ArgLocs)
3044     if (VA.getLocInfo() == CCValAssign::Indirect)
3045       return false;
3046 
3047   // Do not tail call opt if either caller or callee uses struct return
3048   // semantics.
3049   auto IsCallerStructRet = Caller.hasStructRetAttr();
3050   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
3051   if (IsCallerStructRet || IsCalleeStructRet)
3052     return false;
3053 
3054   // Externally-defined functions with weak linkage should not be
3055   // tail-called. The behaviour of branch instructions in this situation (as
3056   // used for tail calls) is implementation-defined, so we cannot rely on the
3057   // linker replacing the tail call with a return.
3058   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
3059     const GlobalValue *GV = G->getGlobal();
3060     if (GV->hasExternalWeakLinkage())
3061       return false;
3062   }
3063 
3064   // The callee has to preserve all registers the caller needs to preserve.
3065   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
3066   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
3067   if (CalleeCC != CallerCC) {
3068     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
3069     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
3070       return false;
3071   }
3072 
3073   // Byval parameters hand the function a pointer directly into the stack area
3074   // we want to reuse during a tail call. Working around this *is* possible
3075   // but less efficient and uglier in LowerCall.
3076   for (auto &Arg : Outs)
3077     if (Arg.Flags.isByVal())
3078       return false;
3079 
3080   return true;
3081 }
3082 
3083 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
3084 // and output parameter nodes.
3085 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
3086                                        SmallVectorImpl<SDValue> &InVals) const {
3087   SelectionDAG &DAG = CLI.DAG;
3088   SDLoc &DL = CLI.DL;
3089   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
3090   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
3091   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
3092   SDValue Chain = CLI.Chain;
3093   SDValue Callee = CLI.Callee;
3094   bool &IsTailCall = CLI.IsTailCall;
3095   CallingConv::ID CallConv = CLI.CallConv;
3096   bool IsVarArg = CLI.IsVarArg;
3097   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3098   MVT XLenVT = Subtarget.getXLenVT();
3099 
3100   MachineFunction &MF = DAG.getMachineFunction();
3101 
3102   // Analyze the operands of the call, assigning locations to each operand.
3103   SmallVector<CCValAssign, 16> ArgLocs;
3104   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
3105 
3106   if (CallConv == CallingConv::Fast)
3107     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_FastCC);
3108   else if (CallConv == CallingConv::GHC)
3109     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
3110   else
3111     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI);
3112 
3113   // Check if it's really possible to do a tail call.
3114   if (IsTailCall)
3115     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
3116 
3117   if (IsTailCall)
3118     ++NumTailCalls;
3119   else if (CLI.CB && CLI.CB->isMustTailCall())
3120     report_fatal_error("failed to perform tail call elimination on a call "
3121                        "site marked musttail");
3122 
3123   // Get a count of how many bytes are to be pushed on the stack.
3124   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
3125 
3126   // Create local copies for byval args
3127   SmallVector<SDValue, 8> ByValArgs;
3128   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
3129     ISD::ArgFlagsTy Flags = Outs[i].Flags;
3130     if (!Flags.isByVal())
3131       continue;
3132 
3133     SDValue Arg = OutVals[i];
3134     unsigned Size = Flags.getByValSize();
3135     Align Alignment = Flags.getNonZeroByValAlign();
3136 
3137     int FI =
3138         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
3139     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3140     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
3141 
3142     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
3143                           /*IsVolatile=*/false,
3144                           /*AlwaysInline=*/false, IsTailCall,
3145                           MachinePointerInfo(), MachinePointerInfo());
3146     ByValArgs.push_back(FIPtr);
3147   }
3148 
3149   if (!IsTailCall)
3150     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
3151 
3152   // Copy argument values to their designated locations.
3153   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
3154   SmallVector<SDValue, 8> MemOpChains;
3155   SDValue StackPtr;
3156   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
3157     CCValAssign &VA = ArgLocs[i];
3158     SDValue ArgValue = OutVals[i];
3159     ISD::ArgFlagsTy Flags = Outs[i].Flags;
3160 
3161     // Handle passing f64 on RV32D with a soft float ABI as a special case.
3162     bool IsF64OnRV32DSoftABI =
3163         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
3164     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
3165       SDValue SplitF64 = DAG.getNode(
3166           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
3167       SDValue Lo = SplitF64.getValue(0);
3168       SDValue Hi = SplitF64.getValue(1);
3169 
3170       Register RegLo = VA.getLocReg();
3171       RegsToPass.push_back(std::make_pair(RegLo, Lo));
3172 
3173       if (RegLo == RISCV::X17) {
3174         // Second half of f64 is passed on the stack.
3175         // Work out the address of the stack slot.
3176         if (!StackPtr.getNode())
3177           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
3178         // Emit the store.
3179         MemOpChains.push_back(
3180             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
3181       } else {
3182         // Second half of f64 is passed in another GPR.
3183         assert(RegLo < RISCV::X31 && "Invalid register pair");
3184         Register RegHigh = RegLo + 1;
3185         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
3186       }
3187       continue;
3188     }
3189 
3190     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
3191     // as any other MemLoc.
3192 
3193     // Promote the value if needed.
3194     // For now, only handle fully promoted and indirect arguments.
3195     if (VA.getLocInfo() == CCValAssign::Indirect) {
3196       // Store the argument in a stack slot and pass its address.
3197       SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT);
3198       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
3199       MemOpChains.push_back(
3200           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
3201                        MachinePointerInfo::getFixedStack(MF, FI)));
3202       // If the original argument was split (e.g. i128), we need
3203       // to store all parts of it here (and pass just one address).
3204       unsigned ArgIndex = Outs[i].OrigArgIndex;
3205       assert(Outs[i].PartOffset == 0);
3206       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
3207         SDValue PartValue = OutVals[i + 1];
3208         unsigned PartOffset = Outs[i + 1].PartOffset;
3209         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
3210                                       DAG.getIntPtrConstant(PartOffset, DL));
3211         MemOpChains.push_back(
3212             DAG.getStore(Chain, DL, PartValue, Address,
3213                          MachinePointerInfo::getFixedStack(MF, FI)));
3214         ++i;
3215       }
3216       ArgValue = SpillSlot;
3217     } else {
3218       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL);
3219     }
3220 
3221     // Use local copy if it is a byval arg.
3222     if (Flags.isByVal())
3223       ArgValue = ByValArgs[j++];
3224 
3225     if (VA.isRegLoc()) {
3226       // Queue up the argument copies and emit them at the end.
3227       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
3228     } else {
3229       assert(VA.isMemLoc() && "Argument not register or memory");
3230       assert(!IsTailCall && "Tail call not allowed if stack is used "
3231                             "for passing parameters");
3232 
3233       // Work out the address of the stack slot.
3234       if (!StackPtr.getNode())
3235         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
3236       SDValue Address =
3237           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
3238                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
3239 
3240       // Emit the store.
3241       MemOpChains.push_back(
3242           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
3243     }
3244   }
3245 
3246   // Join the stores, which are independent of one another.
3247   if (!MemOpChains.empty())
3248     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
3249 
3250   SDValue Glue;
3251 
3252   // Build a sequence of copy-to-reg nodes, chained and glued together.
3253   for (auto &Reg : RegsToPass) {
3254     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
3255     Glue = Chain.getValue(1);
3256   }
3257 
3258   // Validate that none of the argument registers have been marked as
3259   // reserved, if so report an error. Do the same for the return address if this
3260   // is not a tailcall.
3261   validateCCReservedRegs(RegsToPass, MF);
3262   if (!IsTailCall &&
3263       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
3264     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
3265         MF.getFunction(),
3266         "Return address register required, but has been reserved."});
3267 
3268   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
3269   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
3270   // split it and then direct call can be matched by PseudoCALL.
3271   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
3272     const GlobalValue *GV = S->getGlobal();
3273 
3274     unsigned OpFlags = RISCVII::MO_CALL;
3275     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
3276       OpFlags = RISCVII::MO_PLT;
3277 
3278     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
3279   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3280     unsigned OpFlags = RISCVII::MO_CALL;
3281 
3282     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
3283                                                  nullptr))
3284       OpFlags = RISCVII::MO_PLT;
3285 
3286     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
3287   }
3288 
3289   // The first call operand is the chain and the second is the target address.
3290   SmallVector<SDValue, 8> Ops;
3291   Ops.push_back(Chain);
3292   Ops.push_back(Callee);
3293 
3294   // Add argument registers to the end of the list so that they are
3295   // known live into the call.
3296   for (auto &Reg : RegsToPass)
3297     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
3298 
3299   if (!IsTailCall) {
3300     // Add a register mask operand representing the call-preserved registers.
3301     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
3302     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
3303     assert(Mask && "Missing call preserved mask for calling convention");
3304     Ops.push_back(DAG.getRegisterMask(Mask));
3305   }
3306 
3307   // Glue the call to the argument copies, if any.
3308   if (Glue.getNode())
3309     Ops.push_back(Glue);
3310 
3311   // Emit the call.
3312   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3313 
3314   if (IsTailCall) {
3315     MF.getFrameInfo().setHasTailCall();
3316     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
3317   }
3318 
3319   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
3320   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
3321   Glue = Chain.getValue(1);
3322 
3323   // Mark the end of the call, which is glued to the call itself.
3324   Chain = DAG.getCALLSEQ_END(Chain,
3325                              DAG.getConstant(NumBytes, DL, PtrVT, true),
3326                              DAG.getConstant(0, DL, PtrVT, true),
3327                              Glue, DL);
3328   Glue = Chain.getValue(1);
3329 
3330   // Assign locations to each value returned by this call.
3331   SmallVector<CCValAssign, 16> RVLocs;
3332   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
3333   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true);
3334 
3335   // Copy all of the result registers out of their specified physreg.
3336   for (auto &VA : RVLocs) {
3337     // Copy the value out
3338     SDValue RetValue =
3339         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
3340     // Glue the RetValue to the end of the call sequence
3341     Chain = RetValue.getValue(1);
3342     Glue = RetValue.getValue(2);
3343 
3344     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
3345       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
3346       SDValue RetValue2 =
3347           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
3348       Chain = RetValue2.getValue(1);
3349       Glue = RetValue2.getValue(2);
3350       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
3351                              RetValue2);
3352     }
3353 
3354     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL);
3355 
3356     InVals.push_back(RetValue);
3357   }
3358 
3359   return Chain;
3360 }
3361 
3362 bool RISCVTargetLowering::CanLowerReturn(
3363     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
3364     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
3365   SmallVector<CCValAssign, 16> RVLocs;
3366   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
3367 
3368   Optional<unsigned> FirstMaskArgument;
3369   if (Subtarget.hasStdExtV())
3370     FirstMaskArgument = preAssignMask(Outs);
3371 
3372   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
3373     MVT VT = Outs[i].VT;
3374     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
3375     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
3376     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
3377                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
3378                  *this, FirstMaskArgument))
3379       return false;
3380   }
3381   return true;
3382 }
3383 
3384 SDValue
3385 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
3386                                  bool IsVarArg,
3387                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
3388                                  const SmallVectorImpl<SDValue> &OutVals,
3389                                  const SDLoc &DL, SelectionDAG &DAG) const {
3390   const MachineFunction &MF = DAG.getMachineFunction();
3391   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
3392 
3393   // Stores the assignment of the return value to a location.
3394   SmallVector<CCValAssign, 16> RVLocs;
3395 
3396   // Info about the registers and stack slot.
3397   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
3398                  *DAG.getContext());
3399 
3400   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
3401                     nullptr);
3402 
3403   if (CallConv == CallingConv::GHC && !RVLocs.empty())
3404     report_fatal_error("GHC functions return void only");
3405 
3406   SDValue Glue;
3407   SmallVector<SDValue, 4> RetOps(1, Chain);
3408 
3409   // Copy the result values into the output registers.
3410   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
3411     SDValue Val = OutVals[i];
3412     CCValAssign &VA = RVLocs[i];
3413     assert(VA.isRegLoc() && "Can only return in registers!");
3414 
3415     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
3416       // Handle returning f64 on RV32D with a soft float ABI.
3417       assert(VA.isRegLoc() && "Expected return via registers");
3418       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
3419                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
3420       SDValue Lo = SplitF64.getValue(0);
3421       SDValue Hi = SplitF64.getValue(1);
3422       Register RegLo = VA.getLocReg();
3423       assert(RegLo < RISCV::X31 && "Invalid register pair");
3424       Register RegHi = RegLo + 1;
3425 
3426       if (STI.isRegisterReservedByUser(RegLo) ||
3427           STI.isRegisterReservedByUser(RegHi))
3428         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
3429             MF.getFunction(),
3430             "Return value register required, but has been reserved."});
3431 
3432       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
3433       Glue = Chain.getValue(1);
3434       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
3435       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
3436       Glue = Chain.getValue(1);
3437       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
3438     } else {
3439       // Handle a 'normal' return.
3440       Val = convertValVTToLocVT(DAG, Val, VA, DL);
3441       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
3442 
3443       if (STI.isRegisterReservedByUser(VA.getLocReg()))
3444         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
3445             MF.getFunction(),
3446             "Return value register required, but has been reserved."});
3447 
3448       // Guarantee that all emitted copies are stuck together.
3449       Glue = Chain.getValue(1);
3450       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
3451     }
3452   }
3453 
3454   RetOps[0] = Chain; // Update chain.
3455 
3456   // Add the glue node if we have it.
3457   if (Glue.getNode()) {
3458     RetOps.push_back(Glue);
3459   }
3460 
3461   // Interrupt service routines use different return instructions.
3462   const Function &Func = DAG.getMachineFunction().getFunction();
3463   if (Func.hasFnAttribute("interrupt")) {
3464     if (!Func.getReturnType()->isVoidTy())
3465       report_fatal_error(
3466           "Functions with the interrupt attribute must have void return type!");
3467 
3468     MachineFunction &MF = DAG.getMachineFunction();
3469     StringRef Kind =
3470       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
3471 
3472     unsigned RetOpc;
3473     if (Kind == "user")
3474       RetOpc = RISCVISD::URET_FLAG;
3475     else if (Kind == "supervisor")
3476       RetOpc = RISCVISD::SRET_FLAG;
3477     else
3478       RetOpc = RISCVISD::MRET_FLAG;
3479 
3480     return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
3481   }
3482 
3483   return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps);
3484 }
3485 
3486 void RISCVTargetLowering::validateCCReservedRegs(
3487     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
3488     MachineFunction &MF) const {
3489   const Function &F = MF.getFunction();
3490   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
3491 
3492   if (llvm::any_of(Regs, [&STI](auto Reg) {
3493         return STI.isRegisterReservedByUser(Reg.first);
3494       }))
3495     F.getContext().diagnose(DiagnosticInfoUnsupported{
3496         F, "Argument register required, but has been reserved."});
3497 }
3498 
3499 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
3500   return CI->isTailCall();
3501 }
3502 
3503 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
3504 #define NODE_NAME_CASE(NODE)                                                   \
3505   case RISCVISD::NODE:                                                         \
3506     return "RISCVISD::" #NODE;
3507   // clang-format off
3508   switch ((RISCVISD::NodeType)Opcode) {
3509   case RISCVISD::FIRST_NUMBER:
3510     break;
3511   NODE_NAME_CASE(RET_FLAG)
3512   NODE_NAME_CASE(URET_FLAG)
3513   NODE_NAME_CASE(SRET_FLAG)
3514   NODE_NAME_CASE(MRET_FLAG)
3515   NODE_NAME_CASE(CALL)
3516   NODE_NAME_CASE(SELECT_CC)
3517   NODE_NAME_CASE(BuildPairF64)
3518   NODE_NAME_CASE(SplitF64)
3519   NODE_NAME_CASE(TAIL)
3520   NODE_NAME_CASE(SLLW)
3521   NODE_NAME_CASE(SRAW)
3522   NODE_NAME_CASE(SRLW)
3523   NODE_NAME_CASE(DIVW)
3524   NODE_NAME_CASE(DIVUW)
3525   NODE_NAME_CASE(REMUW)
3526   NODE_NAME_CASE(ROLW)
3527   NODE_NAME_CASE(RORW)
3528   NODE_NAME_CASE(FSLW)
3529   NODE_NAME_CASE(FSRW)
3530   NODE_NAME_CASE(FMV_H_X)
3531   NODE_NAME_CASE(FMV_X_ANYEXTH)
3532   NODE_NAME_CASE(FMV_W_X_RV64)
3533   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
3534   NODE_NAME_CASE(READ_CYCLE_WIDE)
3535   NODE_NAME_CASE(GREVI)
3536   NODE_NAME_CASE(GREVIW)
3537   NODE_NAME_CASE(GORCI)
3538   NODE_NAME_CASE(GORCIW)
3539   NODE_NAME_CASE(VMV_X_S)
3540   NODE_NAME_CASE(SPLAT_VECTOR_I64)
3541   }
3542   // clang-format on
3543   return nullptr;
3544 #undef NODE_NAME_CASE
3545 }
3546 
3547 /// getConstraintType - Given a constraint letter, return the type of
3548 /// constraint it is for this target.
3549 RISCVTargetLowering::ConstraintType
3550 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
3551   if (Constraint.size() == 1) {
3552     switch (Constraint[0]) {
3553     default:
3554       break;
3555     case 'f':
3556       return C_RegisterClass;
3557     case 'I':
3558     case 'J':
3559     case 'K':
3560       return C_Immediate;
3561     case 'A':
3562       return C_Memory;
3563     }
3564   }
3565   return TargetLowering::getConstraintType(Constraint);
3566 }
3567 
3568 std::pair<unsigned, const TargetRegisterClass *>
3569 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
3570                                                   StringRef Constraint,
3571                                                   MVT VT) const {
3572   // First, see if this is a constraint that directly corresponds to a
3573   // RISCV register class.
3574   if (Constraint.size() == 1) {
3575     switch (Constraint[0]) {
3576     case 'r':
3577       return std::make_pair(0U, &RISCV::GPRRegClass);
3578     case 'f':
3579       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
3580         return std::make_pair(0U, &RISCV::FPR16RegClass);
3581       if (Subtarget.hasStdExtF() && VT == MVT::f32)
3582         return std::make_pair(0U, &RISCV::FPR32RegClass);
3583       if (Subtarget.hasStdExtD() && VT == MVT::f64)
3584         return std::make_pair(0U, &RISCV::FPR64RegClass);
3585       break;
3586     default:
3587       break;
3588     }
3589   }
3590 
3591   // Clang will correctly decode the usage of register name aliases into their
3592   // official names. However, other frontends like `rustc` do not. This allows
3593   // users of these frontends to use the ABI names for registers in LLVM-style
3594   // register constraints.
3595   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
3596                                .Case("{zero}", RISCV::X0)
3597                                .Case("{ra}", RISCV::X1)
3598                                .Case("{sp}", RISCV::X2)
3599                                .Case("{gp}", RISCV::X3)
3600                                .Case("{tp}", RISCV::X4)
3601                                .Case("{t0}", RISCV::X5)
3602                                .Case("{t1}", RISCV::X6)
3603                                .Case("{t2}", RISCV::X7)
3604                                .Cases("{s0}", "{fp}", RISCV::X8)
3605                                .Case("{s1}", RISCV::X9)
3606                                .Case("{a0}", RISCV::X10)
3607                                .Case("{a1}", RISCV::X11)
3608                                .Case("{a2}", RISCV::X12)
3609                                .Case("{a3}", RISCV::X13)
3610                                .Case("{a4}", RISCV::X14)
3611                                .Case("{a5}", RISCV::X15)
3612                                .Case("{a6}", RISCV::X16)
3613                                .Case("{a7}", RISCV::X17)
3614                                .Case("{s2}", RISCV::X18)
3615                                .Case("{s3}", RISCV::X19)
3616                                .Case("{s4}", RISCV::X20)
3617                                .Case("{s5}", RISCV::X21)
3618                                .Case("{s6}", RISCV::X22)
3619                                .Case("{s7}", RISCV::X23)
3620                                .Case("{s8}", RISCV::X24)
3621                                .Case("{s9}", RISCV::X25)
3622                                .Case("{s10}", RISCV::X26)
3623                                .Case("{s11}", RISCV::X27)
3624                                .Case("{t3}", RISCV::X28)
3625                                .Case("{t4}", RISCV::X29)
3626                                .Case("{t5}", RISCV::X30)
3627                                .Case("{t6}", RISCV::X31)
3628                                .Default(RISCV::NoRegister);
3629   if (XRegFromAlias != RISCV::NoRegister)
3630     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
3631 
3632   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
3633   // TableGen record rather than the AsmName to choose registers for InlineAsm
3634   // constraints, plus we want to match those names to the widest floating point
3635   // register type available, manually select floating point registers here.
3636   //
3637   // The second case is the ABI name of the register, so that frontends can also
3638   // use the ABI names in register constraint lists.
3639   if (Subtarget.hasStdExtF()) {
3640     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
3641                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
3642                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
3643                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
3644                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
3645                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
3646                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
3647                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
3648                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
3649                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
3650                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
3651                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
3652                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
3653                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
3654                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
3655                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
3656                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
3657                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
3658                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
3659                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
3660                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
3661                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
3662                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
3663                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
3664                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
3665                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
3666                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
3667                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
3668                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
3669                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
3670                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
3671                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
3672                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
3673                         .Default(RISCV::NoRegister);
3674     if (FReg != RISCV::NoRegister) {
3675       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
3676       if (Subtarget.hasStdExtD()) {
3677         unsigned RegNo = FReg - RISCV::F0_F;
3678         unsigned DReg = RISCV::F0_D + RegNo;
3679         return std::make_pair(DReg, &RISCV::FPR64RegClass);
3680       }
3681       return std::make_pair(FReg, &RISCV::FPR32RegClass);
3682     }
3683   }
3684 
3685   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3686 }
3687 
3688 unsigned
3689 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
3690   // Currently only support length 1 constraints.
3691   if (ConstraintCode.size() == 1) {
3692     switch (ConstraintCode[0]) {
3693     case 'A':
3694       return InlineAsm::Constraint_A;
3695     default:
3696       break;
3697     }
3698   }
3699 
3700   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
3701 }
3702 
3703 void RISCVTargetLowering::LowerAsmOperandForConstraint(
3704     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
3705     SelectionDAG &DAG) const {
3706   // Currently only support length 1 constraints.
3707   if (Constraint.length() == 1) {
3708     switch (Constraint[0]) {
3709     case 'I':
3710       // Validate & create a 12-bit signed immediate operand.
3711       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
3712         uint64_t CVal = C->getSExtValue();
3713         if (isInt<12>(CVal))
3714           Ops.push_back(
3715               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
3716       }
3717       return;
3718     case 'J':
3719       // Validate & create an integer zero operand.
3720       if (auto *C = dyn_cast<ConstantSDNode>(Op))
3721         if (C->getZExtValue() == 0)
3722           Ops.push_back(
3723               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
3724       return;
3725     case 'K':
3726       // Validate & create a 5-bit unsigned immediate operand.
3727       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
3728         uint64_t CVal = C->getZExtValue();
3729         if (isUInt<5>(CVal))
3730           Ops.push_back(
3731               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
3732       }
3733       return;
3734     default:
3735       break;
3736     }
3737   }
3738   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3739 }
3740 
3741 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
3742                                                    Instruction *Inst,
3743                                                    AtomicOrdering Ord) const {
3744   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
3745     return Builder.CreateFence(Ord);
3746   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
3747     return Builder.CreateFence(AtomicOrdering::Release);
3748   return nullptr;
3749 }
3750 
3751 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
3752                                                     Instruction *Inst,
3753                                                     AtomicOrdering Ord) const {
3754   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
3755     return Builder.CreateFence(AtomicOrdering::Acquire);
3756   return nullptr;
3757 }
3758 
3759 TargetLowering::AtomicExpansionKind
3760 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
3761   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
3762   // point operations can't be used in an lr/sc sequence without breaking the
3763   // forward-progress guarantee.
3764   if (AI->isFloatingPointOperation())
3765     return AtomicExpansionKind::CmpXChg;
3766 
3767   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
3768   if (Size == 8 || Size == 16)
3769     return AtomicExpansionKind::MaskedIntrinsic;
3770   return AtomicExpansionKind::None;
3771 }
3772 
3773 static Intrinsic::ID
3774 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
3775   if (XLen == 32) {
3776     switch (BinOp) {
3777     default:
3778       llvm_unreachable("Unexpected AtomicRMW BinOp");
3779     case AtomicRMWInst::Xchg:
3780       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
3781     case AtomicRMWInst::Add:
3782       return Intrinsic::riscv_masked_atomicrmw_add_i32;
3783     case AtomicRMWInst::Sub:
3784       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
3785     case AtomicRMWInst::Nand:
3786       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
3787     case AtomicRMWInst::Max:
3788       return Intrinsic::riscv_masked_atomicrmw_max_i32;
3789     case AtomicRMWInst::Min:
3790       return Intrinsic::riscv_masked_atomicrmw_min_i32;
3791     case AtomicRMWInst::UMax:
3792       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
3793     case AtomicRMWInst::UMin:
3794       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
3795     }
3796   }
3797 
3798   if (XLen == 64) {
3799     switch (BinOp) {
3800     default:
3801       llvm_unreachable("Unexpected AtomicRMW BinOp");
3802     case AtomicRMWInst::Xchg:
3803       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
3804     case AtomicRMWInst::Add:
3805       return Intrinsic::riscv_masked_atomicrmw_add_i64;
3806     case AtomicRMWInst::Sub:
3807       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
3808     case AtomicRMWInst::Nand:
3809       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
3810     case AtomicRMWInst::Max:
3811       return Intrinsic::riscv_masked_atomicrmw_max_i64;
3812     case AtomicRMWInst::Min:
3813       return Intrinsic::riscv_masked_atomicrmw_min_i64;
3814     case AtomicRMWInst::UMax:
3815       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
3816     case AtomicRMWInst::UMin:
3817       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
3818     }
3819   }
3820 
3821   llvm_unreachable("Unexpected XLen\n");
3822 }
3823 
3824 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
3825     IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
3826     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
3827   unsigned XLen = Subtarget.getXLen();
3828   Value *Ordering =
3829       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
3830   Type *Tys[] = {AlignedAddr->getType()};
3831   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
3832       AI->getModule(),
3833       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
3834 
3835   if (XLen == 64) {
3836     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
3837     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
3838     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
3839   }
3840 
3841   Value *Result;
3842 
3843   // Must pass the shift amount needed to sign extend the loaded value prior
3844   // to performing a signed comparison for min/max. ShiftAmt is the number of
3845   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
3846   // is the number of bits to left+right shift the value in order to
3847   // sign-extend.
3848   if (AI->getOperation() == AtomicRMWInst::Min ||
3849       AI->getOperation() == AtomicRMWInst::Max) {
3850     const DataLayout &DL = AI->getModule()->getDataLayout();
3851     unsigned ValWidth =
3852         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
3853     Value *SextShamt =
3854         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
3855     Result = Builder.CreateCall(LrwOpScwLoop,
3856                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
3857   } else {
3858     Result =
3859         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
3860   }
3861 
3862   if (XLen == 64)
3863     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
3864   return Result;
3865 }
3866 
3867 TargetLowering::AtomicExpansionKind
3868 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
3869     AtomicCmpXchgInst *CI) const {
3870   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
3871   if (Size == 8 || Size == 16)
3872     return AtomicExpansionKind::MaskedIntrinsic;
3873   return AtomicExpansionKind::None;
3874 }
3875 
3876 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
3877     IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
3878     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
3879   unsigned XLen = Subtarget.getXLen();
3880   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
3881   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
3882   if (XLen == 64) {
3883     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
3884     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
3885     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
3886     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
3887   }
3888   Type *Tys[] = {AlignedAddr->getType()};
3889   Function *MaskedCmpXchg =
3890       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
3891   Value *Result = Builder.CreateCall(
3892       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
3893   if (XLen == 64)
3894     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
3895   return Result;
3896 }
3897 
3898 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
3899                                                      EVT VT) const {
3900   VT = VT.getScalarType();
3901 
3902   if (!VT.isSimple())
3903     return false;
3904 
3905   switch (VT.getSimpleVT().SimpleTy) {
3906   case MVT::f16:
3907     return Subtarget.hasStdExtZfh();
3908   case MVT::f32:
3909     return Subtarget.hasStdExtF();
3910   case MVT::f64:
3911     return Subtarget.hasStdExtD();
3912   default:
3913     break;
3914   }
3915 
3916   return false;
3917 }
3918 
3919 Register RISCVTargetLowering::getExceptionPointerRegister(
3920     const Constant *PersonalityFn) const {
3921   return RISCV::X10;
3922 }
3923 
3924 Register RISCVTargetLowering::getExceptionSelectorRegister(
3925     const Constant *PersonalityFn) const {
3926   return RISCV::X11;
3927 }
3928 
3929 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
3930   // Return false to suppress the unnecessary extensions if the LibCall
3931   // arguments or return value is f32 type for LP64 ABI.
3932   RISCVABI::ABI ABI = Subtarget.getTargetABI();
3933   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
3934     return false;
3935 
3936   return true;
3937 }
3938 
3939 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
3940                                                  SDValue C) const {
3941   // Check integral scalar types.
3942   if (VT.isScalarInteger()) {
3943     // Omit the optimization if the sub target has the M extension and the data
3944     // size exceeds XLen.
3945     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
3946       return false;
3947     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
3948       // Break the MUL to a SLLI and an ADD/SUB.
3949       const APInt &Imm = ConstNode->getAPIntValue();
3950       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
3951           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
3952         return true;
3953       // Omit the following optimization if the sub target has the M extension
3954       // and the data size >= XLen.
3955       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
3956         return false;
3957       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
3958       // a pair of LUI/ADDI.
3959       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
3960         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
3961         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
3962             (1 - ImmS).isPowerOf2())
3963         return true;
3964       }
3965     }
3966   }
3967 
3968   return false;
3969 }
3970 
3971 #define GET_REGISTER_MATCHER
3972 #include "RISCVGenAsmMatcher.inc"
3973 
3974 Register
3975 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
3976                                        const MachineFunction &MF) const {
3977   Register Reg = MatchRegisterAltName(RegName);
3978   if (Reg == RISCV::NoRegister)
3979     Reg = MatchRegisterName(RegName);
3980   if (Reg == RISCV::NoRegister)
3981     report_fatal_error(
3982         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
3983   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
3984   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
3985     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
3986                              StringRef(RegName) + "\"."));
3987   return Reg;
3988 }
3989 
3990 namespace llvm {
3991 namespace RISCVVIntrinsicsTable {
3992 
3993 #define GET_RISCVVIntrinsicsTable_IMPL
3994 #include "RISCVGenSearchableTables.inc"
3995 
3996 } // namespace RISCVVIntrinsicsTable
3997 } // namespace llvm
3998