1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "RISCV.h"
16 #include "RISCVMachineFunctionInfo.h"
17 #include "RISCVRegisterInfo.h"
18 #include "RISCVSubtarget.h"
19 #include "RISCVTargetMachine.h"
20 #include "Utils/RISCVMatInt.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/CodeGen/ValueTypes.h"
30 #include "llvm/IR/DiagnosticInfo.h"
31 #include "llvm/IR/DiagnosticPrinter.h"
32 #include "llvm/IR/IntrinsicsRISCV.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/KnownBits.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/raw_ostream.h"
38 
39 using namespace llvm;
40 
41 #define DEBUG_TYPE "riscv-lower"
42 
43 STATISTIC(NumTailCalls, "Number of tail calls");
44 
45 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
46                                          const RISCVSubtarget &STI)
47     : TargetLowering(TM), Subtarget(STI) {
48 
49   if (Subtarget.isRV32E())
50     report_fatal_error("Codegen not yet implemented for RV32E");
51 
52   RISCVABI::ABI ABI = Subtarget.getTargetABI();
53   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
54 
55   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
56       !Subtarget.hasStdExtF()) {
57     errs() << "Hard-float 'f' ABI can't be used for a target that "
58                 "doesn't support the F instruction set extension (ignoring "
59                           "target-abi)\n";
60     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
61   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
62              !Subtarget.hasStdExtD()) {
63     errs() << "Hard-float 'd' ABI can't be used for a target that "
64               "doesn't support the D instruction set extension (ignoring "
65               "target-abi)\n";
66     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
67   }
68 
69   switch (ABI) {
70   default:
71     report_fatal_error("Don't know how to lower this ABI");
72   case RISCVABI::ABI_ILP32:
73   case RISCVABI::ABI_ILP32F:
74   case RISCVABI::ABI_ILP32D:
75   case RISCVABI::ABI_LP64:
76   case RISCVABI::ABI_LP64F:
77   case RISCVABI::ABI_LP64D:
78     break;
79   }
80 
81   MVT XLenVT = Subtarget.getXLenVT();
82 
83   // Set up the register classes.
84   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
85 
86   if (Subtarget.hasStdExtZfh())
87     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
88   if (Subtarget.hasStdExtF())
89     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
90   if (Subtarget.hasStdExtD())
91     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
92 
93   if (Subtarget.hasStdExtV()) {
94     addRegisterClass(RISCVVMVTs::vbool64_t, &RISCV::VRRegClass);
95     addRegisterClass(RISCVVMVTs::vbool32_t, &RISCV::VRRegClass);
96     addRegisterClass(RISCVVMVTs::vbool16_t, &RISCV::VRRegClass);
97     addRegisterClass(RISCVVMVTs::vbool8_t, &RISCV::VRRegClass);
98     addRegisterClass(RISCVVMVTs::vbool4_t, &RISCV::VRRegClass);
99     addRegisterClass(RISCVVMVTs::vbool2_t, &RISCV::VRRegClass);
100     addRegisterClass(RISCVVMVTs::vbool1_t, &RISCV::VRRegClass);
101 
102     addRegisterClass(RISCVVMVTs::vint8mf8_t, &RISCV::VRRegClass);
103     addRegisterClass(RISCVVMVTs::vint8mf4_t, &RISCV::VRRegClass);
104     addRegisterClass(RISCVVMVTs::vint8mf2_t, &RISCV::VRRegClass);
105     addRegisterClass(RISCVVMVTs::vint8m1_t, &RISCV::VRRegClass);
106     addRegisterClass(RISCVVMVTs::vint8m2_t, &RISCV::VRM2RegClass);
107     addRegisterClass(RISCVVMVTs::vint8m4_t, &RISCV::VRM4RegClass);
108     addRegisterClass(RISCVVMVTs::vint8m8_t, &RISCV::VRM8RegClass);
109 
110     addRegisterClass(RISCVVMVTs::vint16mf4_t, &RISCV::VRRegClass);
111     addRegisterClass(RISCVVMVTs::vint16mf2_t, &RISCV::VRRegClass);
112     addRegisterClass(RISCVVMVTs::vint16m1_t, &RISCV::VRRegClass);
113     addRegisterClass(RISCVVMVTs::vint16m2_t, &RISCV::VRM2RegClass);
114     addRegisterClass(RISCVVMVTs::vint16m4_t, &RISCV::VRM4RegClass);
115     addRegisterClass(RISCVVMVTs::vint16m8_t, &RISCV::VRM8RegClass);
116 
117     addRegisterClass(RISCVVMVTs::vint32mf2_t, &RISCV::VRRegClass);
118     addRegisterClass(RISCVVMVTs::vint32m1_t, &RISCV::VRRegClass);
119     addRegisterClass(RISCVVMVTs::vint32m2_t, &RISCV::VRM2RegClass);
120     addRegisterClass(RISCVVMVTs::vint32m4_t, &RISCV::VRM4RegClass);
121     addRegisterClass(RISCVVMVTs::vint32m8_t, &RISCV::VRM8RegClass);
122 
123     addRegisterClass(RISCVVMVTs::vint64m1_t, &RISCV::VRRegClass);
124     addRegisterClass(RISCVVMVTs::vint64m2_t, &RISCV::VRM2RegClass);
125     addRegisterClass(RISCVVMVTs::vint64m4_t, &RISCV::VRM4RegClass);
126     addRegisterClass(RISCVVMVTs::vint64m8_t, &RISCV::VRM8RegClass);
127 
128     if (Subtarget.hasStdExtZfh()) {
129       addRegisterClass(RISCVVMVTs::vfloat16mf4_t, &RISCV::VRRegClass);
130       addRegisterClass(RISCVVMVTs::vfloat16mf2_t, &RISCV::VRRegClass);
131       addRegisterClass(RISCVVMVTs::vfloat16m1_t, &RISCV::VRRegClass);
132       addRegisterClass(RISCVVMVTs::vfloat16m2_t, &RISCV::VRM2RegClass);
133       addRegisterClass(RISCVVMVTs::vfloat16m4_t, &RISCV::VRM4RegClass);
134       addRegisterClass(RISCVVMVTs::vfloat16m8_t, &RISCV::VRM8RegClass);
135     }
136 
137     if (Subtarget.hasStdExtF()) {
138       addRegisterClass(RISCVVMVTs::vfloat32mf2_t, &RISCV::VRRegClass);
139       addRegisterClass(RISCVVMVTs::vfloat32m1_t, &RISCV::VRRegClass);
140       addRegisterClass(RISCVVMVTs::vfloat32m2_t, &RISCV::VRM2RegClass);
141       addRegisterClass(RISCVVMVTs::vfloat32m4_t, &RISCV::VRM4RegClass);
142       addRegisterClass(RISCVVMVTs::vfloat32m8_t, &RISCV::VRM8RegClass);
143     }
144 
145     if (Subtarget.hasStdExtD()) {
146       addRegisterClass(RISCVVMVTs::vfloat64m1_t, &RISCV::VRRegClass);
147       addRegisterClass(RISCVVMVTs::vfloat64m2_t, &RISCV::VRM2RegClass);
148       addRegisterClass(RISCVVMVTs::vfloat64m4_t, &RISCV::VRM4RegClass);
149       addRegisterClass(RISCVVMVTs::vfloat64m8_t, &RISCV::VRM8RegClass);
150     }
151   }
152 
153   // Compute derived properties from the register classes.
154   computeRegisterProperties(STI.getRegisterInfo());
155 
156   setStackPointerRegisterToSaveRestore(RISCV::X2);
157 
158   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
159     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
160 
161   // TODO: add all necessary setOperationAction calls.
162   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
163 
164   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
165   setOperationAction(ISD::BR_CC, XLenVT, Expand);
166   setOperationAction(ISD::SELECT, XLenVT, Custom);
167   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
168 
169   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
170   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
171 
172   setOperationAction(ISD::VASTART, MVT::Other, Custom);
173   setOperationAction(ISD::VAARG, MVT::Other, Expand);
174   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
175   setOperationAction(ISD::VAEND, MVT::Other, Expand);
176 
177   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
178   if (!Subtarget.hasStdExtZbb()) {
179     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
180     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
181   }
182 
183   if (Subtarget.is64Bit()) {
184     setOperationAction(ISD::ADD, MVT::i32, Custom);
185     setOperationAction(ISD::SUB, MVT::i32, Custom);
186     setOperationAction(ISD::SHL, MVT::i32, Custom);
187     setOperationAction(ISD::SRA, MVT::i32, Custom);
188     setOperationAction(ISD::SRL, MVT::i32, Custom);
189   }
190 
191   if (!Subtarget.hasStdExtM()) {
192     setOperationAction(ISD::MUL, XLenVT, Expand);
193     setOperationAction(ISD::MULHS, XLenVT, Expand);
194     setOperationAction(ISD::MULHU, XLenVT, Expand);
195     setOperationAction(ISD::SDIV, XLenVT, Expand);
196     setOperationAction(ISD::UDIV, XLenVT, Expand);
197     setOperationAction(ISD::SREM, XLenVT, Expand);
198     setOperationAction(ISD::UREM, XLenVT, Expand);
199   }
200 
201   if (Subtarget.is64Bit() && Subtarget.hasStdExtM()) {
202     setOperationAction(ISD::MUL, MVT::i32, Custom);
203     setOperationAction(ISD::SDIV, MVT::i32, Custom);
204     setOperationAction(ISD::UDIV, MVT::i32, Custom);
205     setOperationAction(ISD::UREM, MVT::i32, Custom);
206   }
207 
208   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
209   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
210   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
211   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
212 
213   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
214   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
215   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
216 
217   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
218     if (Subtarget.is64Bit()) {
219       setOperationAction(ISD::ROTL, MVT::i32, Custom);
220       setOperationAction(ISD::ROTR, MVT::i32, Custom);
221     }
222   } else {
223     setOperationAction(ISD::ROTL, XLenVT, Expand);
224     setOperationAction(ISD::ROTR, XLenVT, Expand);
225   }
226 
227   if (Subtarget.hasStdExtZbp()) {
228     setOperationAction(ISD::BITREVERSE, XLenVT, Custom);
229     setOperationAction(ISD::BSWAP, XLenVT, Custom);
230 
231     if (Subtarget.is64Bit()) {
232       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
233       setOperationAction(ISD::BSWAP, MVT::i32, Custom);
234     }
235   } else {
236     setOperationAction(ISD::BSWAP, XLenVT, Expand);
237   }
238 
239   if (Subtarget.hasStdExtZbb()) {
240     setOperationAction(ISD::SMIN, XLenVT, Legal);
241     setOperationAction(ISD::SMAX, XLenVT, Legal);
242     setOperationAction(ISD::UMIN, XLenVT, Legal);
243     setOperationAction(ISD::UMAX, XLenVT, Legal);
244   } else {
245     setOperationAction(ISD::CTTZ, XLenVT, Expand);
246     setOperationAction(ISD::CTLZ, XLenVT, Expand);
247     setOperationAction(ISD::CTPOP, XLenVT, Expand);
248   }
249 
250   if (Subtarget.hasStdExtZbt()) {
251     setOperationAction(ISD::FSHL, XLenVT, Legal);
252     setOperationAction(ISD::FSHR, XLenVT, Legal);
253 
254     if (Subtarget.is64Bit()) {
255       setOperationAction(ISD::FSHL, MVT::i32, Custom);
256       setOperationAction(ISD::FSHR, MVT::i32, Custom);
257     }
258   }
259 
260   ISD::CondCode FPCCToExpand[] = {
261       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
262       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
263       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
264 
265   ISD::NodeType FPOpToExpand[] = {
266       ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP,
267       ISD::FP_TO_FP16};
268 
269   if (Subtarget.hasStdExtZfh())
270     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
271 
272   if (Subtarget.hasStdExtZfh()) {
273     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
274     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
275     for (auto CC : FPCCToExpand)
276       setCondCodeAction(CC, MVT::f16, Expand);
277     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
278     setOperationAction(ISD::SELECT, MVT::f16, Custom);
279     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
280     for (auto Op : FPOpToExpand)
281       setOperationAction(Op, MVT::f16, Expand);
282   }
283 
284   if (Subtarget.hasStdExtF()) {
285     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
286     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
287     for (auto CC : FPCCToExpand)
288       setCondCodeAction(CC, MVT::f32, Expand);
289     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
290     setOperationAction(ISD::SELECT, MVT::f32, Custom);
291     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
292     for (auto Op : FPOpToExpand)
293       setOperationAction(Op, MVT::f32, Expand);
294     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
295     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
296   }
297 
298   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
299     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
300 
301   if (Subtarget.hasStdExtD()) {
302     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
303     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
304     for (auto CC : FPCCToExpand)
305       setCondCodeAction(CC, MVT::f64, Expand);
306     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
307     setOperationAction(ISD::SELECT, MVT::f64, Custom);
308     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
309     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
310     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
311     for (auto Op : FPOpToExpand)
312       setOperationAction(Op, MVT::f64, Expand);
313     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
314     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
315   }
316 
317   if (Subtarget.is64Bit()) {
318     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
319     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
320     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
321     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
322   }
323 
324   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
325   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
326   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
327   setOperationAction(ISD::JumpTable, XLenVT, Custom);
328 
329   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
330 
331   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
332   // Unfortunately this can't be determined just from the ISA naming string.
333   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
334                      Subtarget.is64Bit() ? Legal : Custom);
335 
336   setOperationAction(ISD::TRAP, MVT::Other, Legal);
337   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
338   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
339 
340   if (Subtarget.hasStdExtA()) {
341     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
342     setMinCmpXchgSizeInBits(32);
343   } else {
344     setMaxAtomicSizeInBitsSupported(0);
345   }
346 
347   setBooleanContents(ZeroOrOneBooleanContent);
348 
349   if (Subtarget.hasStdExtV()) {
350     setBooleanVectorContents(ZeroOrOneBooleanContent);
351 
352     setOperationAction(ISD::VSCALE, XLenVT, Custom);
353 
354     // RVV intrinsics may have illegal operands.
355     // We also need to custom legalize vmv.x.s.
356     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
357     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
358     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
359     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
360     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
361     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
362 
363     if (Subtarget.is64Bit()) {
364       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
365       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
366     }
367 
368     for (auto VT : MVT::integer_scalable_vector_valuetypes()) {
369       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
370 
371       setOperationAction(ISD::SMIN, VT, Legal);
372       setOperationAction(ISD::SMAX, VT, Legal);
373       setOperationAction(ISD::UMIN, VT, Legal);
374       setOperationAction(ISD::UMAX, VT, Legal);
375     }
376 
377     // We must custom-lower SPLAT_VECTOR vXi64 on RV32
378     if (!Subtarget.is64Bit())
379       setOperationAction(ISD::SPLAT_VECTOR, MVT::i64, Custom);
380 
381     // Expand various CCs to best match the RVV ISA, which natively supports UNE
382     // but no other unordered comparisons, and supports all ordered comparisons
383     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
384     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
385     // and we pattern-match those back to the "original", swapping operands once
386     // more. This way we catch both operations and both "vf" and "fv" forms with
387     // fewer patterns.
388     ISD::CondCode VFPCCToExpand[] = {
389         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
390         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
391         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
392     };
393 
394     if (Subtarget.hasStdExtZfh()) {
395       for (auto VT : {RISCVVMVTs::vfloat16mf4_t, RISCVVMVTs::vfloat16mf2_t,
396                       RISCVVMVTs::vfloat16m1_t, RISCVVMVTs::vfloat16m2_t,
397                       RISCVVMVTs::vfloat16m4_t, RISCVVMVTs::vfloat16m8_t}) {
398         setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
399         for (auto CC : VFPCCToExpand)
400           setCondCodeAction(CC, VT, Expand);
401       }
402     }
403 
404     if (Subtarget.hasStdExtF()) {
405       for (auto VT : {RISCVVMVTs::vfloat32mf2_t, RISCVVMVTs::vfloat32m1_t,
406                       RISCVVMVTs::vfloat32m2_t, RISCVVMVTs::vfloat32m4_t,
407                       RISCVVMVTs::vfloat32m8_t}) {
408         setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
409         for (auto CC : VFPCCToExpand)
410           setCondCodeAction(CC, VT, Expand);
411       }
412     }
413 
414     if (Subtarget.hasStdExtD()) {
415       for (auto VT : {RISCVVMVTs::vfloat64m1_t, RISCVVMVTs::vfloat64m2_t,
416                       RISCVVMVTs::vfloat64m4_t, RISCVVMVTs::vfloat64m8_t}) {
417         setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
418         for (auto CC : VFPCCToExpand)
419           setCondCodeAction(CC, VT, Expand);
420       }
421     }
422   }
423 
424   // Function alignments.
425   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
426   setMinFunctionAlignment(FunctionAlignment);
427   setPrefFunctionAlignment(FunctionAlignment);
428 
429   setMinimumJumpTableEntries(5);
430 
431   // Jumps are expensive, compared to logic
432   setJumpIsExpensive();
433 
434   // We can use any register for comparisons
435   setHasMultipleConditionRegisters();
436 
437   if (Subtarget.hasStdExtZbp()) {
438     setTargetDAGCombine(ISD::OR);
439   }
440 }
441 
442 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
443                                             EVT VT) const {
444   if (!VT.isVector())
445     return getPointerTy(DL);
446   if (Subtarget.hasStdExtV())
447     return MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
448   return VT.changeVectorElementTypeToInteger();
449 }
450 
451 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
452                                              const CallInst &I,
453                                              MachineFunction &MF,
454                                              unsigned Intrinsic) const {
455   switch (Intrinsic) {
456   default:
457     return false;
458   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
459   case Intrinsic::riscv_masked_atomicrmw_add_i32:
460   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
461   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
462   case Intrinsic::riscv_masked_atomicrmw_max_i32:
463   case Intrinsic::riscv_masked_atomicrmw_min_i32:
464   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
465   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
466   case Intrinsic::riscv_masked_cmpxchg_i32:
467     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
468     Info.opc = ISD::INTRINSIC_W_CHAIN;
469     Info.memVT = MVT::getVT(PtrTy->getElementType());
470     Info.ptrVal = I.getArgOperand(0);
471     Info.offset = 0;
472     Info.align = Align(4);
473     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
474                  MachineMemOperand::MOVolatile;
475     return true;
476   }
477 }
478 
479 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
480                                                 const AddrMode &AM, Type *Ty,
481                                                 unsigned AS,
482                                                 Instruction *I) const {
483   // No global is ever allowed as a base.
484   if (AM.BaseGV)
485     return false;
486 
487   // Require a 12-bit signed offset.
488   if (!isInt<12>(AM.BaseOffs))
489     return false;
490 
491   switch (AM.Scale) {
492   case 0: // "r+i" or just "i", depending on HasBaseReg.
493     break;
494   case 1:
495     if (!AM.HasBaseReg) // allow "r+i".
496       break;
497     return false; // disallow "r+r" or "r+r+i".
498   default:
499     return false;
500   }
501 
502   return true;
503 }
504 
505 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
506   return isInt<12>(Imm);
507 }
508 
509 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
510   return isInt<12>(Imm);
511 }
512 
513 // On RV32, 64-bit integers are split into their high and low parts and held
514 // in two different registers, so the trunc is free since the low register can
515 // just be used.
516 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
517   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
518     return false;
519   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
520   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
521   return (SrcBits == 64 && DestBits == 32);
522 }
523 
524 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
525   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
526       !SrcVT.isInteger() || !DstVT.isInteger())
527     return false;
528   unsigned SrcBits = SrcVT.getSizeInBits();
529   unsigned DestBits = DstVT.getSizeInBits();
530   return (SrcBits == 64 && DestBits == 32);
531 }
532 
533 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
534   // Zexts are free if they can be combined with a load.
535   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
536     EVT MemVT = LD->getMemoryVT();
537     if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
538          (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
539         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
540          LD->getExtensionType() == ISD::ZEXTLOAD))
541       return true;
542   }
543 
544   return TargetLowering::isZExtFree(Val, VT2);
545 }
546 
547 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
548   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
549 }
550 
551 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
552   return Subtarget.hasStdExtZbb();
553 }
554 
555 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
556   return Subtarget.hasStdExtZbb();
557 }
558 
559 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
560                                        bool ForCodeSize) const {
561   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
562     return false;
563   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
564     return false;
565   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
566     return false;
567   if (Imm.isNegZero())
568     return false;
569   return Imm.isZero();
570 }
571 
572 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
573   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
574          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
575          (VT == MVT::f64 && Subtarget.hasStdExtD());
576 }
577 
578 // Changes the condition code and swaps operands if necessary, so the SetCC
579 // operation matches one of the comparisons supported directly in the RISC-V
580 // ISA.
581 static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) {
582   switch (CC) {
583   default:
584     break;
585   case ISD::SETGT:
586   case ISD::SETLE:
587   case ISD::SETUGT:
588   case ISD::SETULE:
589     CC = ISD::getSetCCSwappedOperands(CC);
590     std::swap(LHS, RHS);
591     break;
592   }
593 }
594 
595 // Return the RISC-V branch opcode that matches the given DAG integer
596 // condition code. The CondCode must be one of those supported by the RISC-V
597 // ISA (see normaliseSetCC).
598 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
599   switch (CC) {
600   default:
601     llvm_unreachable("Unsupported CondCode");
602   case ISD::SETEQ:
603     return RISCV::BEQ;
604   case ISD::SETNE:
605     return RISCV::BNE;
606   case ISD::SETLT:
607     return RISCV::BLT;
608   case ISD::SETGE:
609     return RISCV::BGE;
610   case ISD::SETULT:
611     return RISCV::BLTU;
612   case ISD::SETUGE:
613     return RISCV::BGEU;
614   }
615 }
616 
617 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
618                                             SelectionDAG &DAG) const {
619   switch (Op.getOpcode()) {
620   default:
621     report_fatal_error("unimplemented operand");
622   case ISD::GlobalAddress:
623     return lowerGlobalAddress(Op, DAG);
624   case ISD::BlockAddress:
625     return lowerBlockAddress(Op, DAG);
626   case ISD::ConstantPool:
627     return lowerConstantPool(Op, DAG);
628   case ISD::JumpTable:
629     return lowerJumpTable(Op, DAG);
630   case ISD::GlobalTLSAddress:
631     return lowerGlobalTLSAddress(Op, DAG);
632   case ISD::SELECT:
633     return lowerSELECT(Op, DAG);
634   case ISD::VASTART:
635     return lowerVASTART(Op, DAG);
636   case ISD::FRAMEADDR:
637     return lowerFRAMEADDR(Op, DAG);
638   case ISD::RETURNADDR:
639     return lowerRETURNADDR(Op, DAG);
640   case ISD::SHL_PARTS:
641     return lowerShiftLeftParts(Op, DAG);
642   case ISD::SRA_PARTS:
643     return lowerShiftRightParts(Op, DAG, true);
644   case ISD::SRL_PARTS:
645     return lowerShiftRightParts(Op, DAG, false);
646   case ISD::BITCAST: {
647     assert(((Subtarget.is64Bit() && Subtarget.hasStdExtF()) ||
648             Subtarget.hasStdExtZfh()) &&
649            "Unexpected custom legalisation");
650     SDLoc DL(Op);
651     SDValue Op0 = Op.getOperand(0);
652     if (Op.getValueType() == MVT::f16 && Subtarget.hasStdExtZfh()) {
653       if (Op0.getValueType() != MVT::i16)
654         return SDValue();
655       SDValue NewOp0 =
656           DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), Op0);
657       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
658       return FPConv;
659     } else if (Op.getValueType() == MVT::f32 && Subtarget.is64Bit() &&
660                Subtarget.hasStdExtF()) {
661       if (Op0.getValueType() != MVT::i32)
662         return SDValue();
663       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
664       SDValue FPConv =
665           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
666       return FPConv;
667     }
668     return SDValue();
669   }
670   case ISD::INTRINSIC_WO_CHAIN:
671     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
672   case ISD::INTRINSIC_W_CHAIN:
673     return LowerINTRINSIC_W_CHAIN(Op, DAG);
674   case ISD::BSWAP:
675   case ISD::BITREVERSE: {
676     // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
677     assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
678     MVT VT = Op.getSimpleValueType();
679     SDLoc DL(Op);
680     // Start with the maximum immediate value which is the bitwidth - 1.
681     unsigned Imm = VT.getSizeInBits() - 1;
682     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
683     if (Op.getOpcode() == ISD::BSWAP)
684       Imm &= ~0x7U;
685     return DAG.getNode(RISCVISD::GREVI, DL, VT, Op.getOperand(0),
686                        DAG.getTargetConstant(Imm, DL, Subtarget.getXLenVT()));
687   }
688   case ISD::SPLAT_VECTOR:
689     return lowerSPLATVECTOR(Op, DAG);
690   case ISD::VSCALE: {
691     MVT VT = Op.getSimpleValueType();
692     SDLoc DL(Op);
693     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
694     // We define our scalable vector types for lmul=1 to use a 64 bit known
695     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
696     // vscale as VLENB / 8.
697     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
698                                  DAG.getConstant(3, DL, VT));
699     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
700   }
701   }
702 }
703 
704 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
705                              SelectionDAG &DAG, unsigned Flags) {
706   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
707 }
708 
709 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
710                              SelectionDAG &DAG, unsigned Flags) {
711   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
712                                    Flags);
713 }
714 
715 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
716                              SelectionDAG &DAG, unsigned Flags) {
717   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
718                                    N->getOffset(), Flags);
719 }
720 
721 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
722                              SelectionDAG &DAG, unsigned Flags) {
723   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
724 }
725 
726 template <class NodeTy>
727 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
728                                      bool IsLocal) const {
729   SDLoc DL(N);
730   EVT Ty = getPointerTy(DAG.getDataLayout());
731 
732   if (isPositionIndependent()) {
733     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
734     if (IsLocal)
735       // Use PC-relative addressing to access the symbol. This generates the
736       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
737       // %pcrel_lo(auipc)).
738       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
739 
740     // Use PC-relative addressing to access the GOT for this symbol, then load
741     // the address from the GOT. This generates the pattern (PseudoLA sym),
742     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
743     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
744   }
745 
746   switch (getTargetMachine().getCodeModel()) {
747   default:
748     report_fatal_error("Unsupported code model for lowering");
749   case CodeModel::Small: {
750     // Generate a sequence for accessing addresses within the first 2 GiB of
751     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
752     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
753     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
754     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
755     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
756   }
757   case CodeModel::Medium: {
758     // Generate a sequence for accessing addresses within any 2GiB range within
759     // the address space. This generates the pattern (PseudoLLA sym), which
760     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
761     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
762     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
763   }
764   }
765 }
766 
767 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
768                                                 SelectionDAG &DAG) const {
769   SDLoc DL(Op);
770   EVT Ty = Op.getValueType();
771   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
772   int64_t Offset = N->getOffset();
773   MVT XLenVT = Subtarget.getXLenVT();
774 
775   const GlobalValue *GV = N->getGlobal();
776   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
777   SDValue Addr = getAddr(N, DAG, IsLocal);
778 
779   // In order to maximise the opportunity for common subexpression elimination,
780   // emit a separate ADD node for the global address offset instead of folding
781   // it in the global address node. Later peephole optimisations may choose to
782   // fold it back in when profitable.
783   if (Offset != 0)
784     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
785                        DAG.getConstant(Offset, DL, XLenVT));
786   return Addr;
787 }
788 
789 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
790                                                SelectionDAG &DAG) const {
791   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
792 
793   return getAddr(N, DAG);
794 }
795 
796 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
797                                                SelectionDAG &DAG) const {
798   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
799 
800   return getAddr(N, DAG);
801 }
802 
803 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
804                                             SelectionDAG &DAG) const {
805   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
806 
807   return getAddr(N, DAG);
808 }
809 
810 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
811                                               SelectionDAG &DAG,
812                                               bool UseGOT) const {
813   SDLoc DL(N);
814   EVT Ty = getPointerTy(DAG.getDataLayout());
815   const GlobalValue *GV = N->getGlobal();
816   MVT XLenVT = Subtarget.getXLenVT();
817 
818   if (UseGOT) {
819     // Use PC-relative addressing to access the GOT for this TLS symbol, then
820     // load the address from the GOT and add the thread pointer. This generates
821     // the pattern (PseudoLA_TLS_IE sym), which expands to
822     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
823     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
824     SDValue Load =
825         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
826 
827     // Add the thread pointer.
828     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
829     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
830   }
831 
832   // Generate a sequence for accessing the address relative to the thread
833   // pointer, with the appropriate adjustment for the thread pointer offset.
834   // This generates the pattern
835   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
836   SDValue AddrHi =
837       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
838   SDValue AddrAdd =
839       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
840   SDValue AddrLo =
841       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
842 
843   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
844   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
845   SDValue MNAdd = SDValue(
846       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
847       0);
848   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
849 }
850 
851 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
852                                                SelectionDAG &DAG) const {
853   SDLoc DL(N);
854   EVT Ty = getPointerTy(DAG.getDataLayout());
855   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
856   const GlobalValue *GV = N->getGlobal();
857 
858   // Use a PC-relative addressing mode to access the global dynamic GOT address.
859   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
860   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
861   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
862   SDValue Load =
863       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
864 
865   // Prepare argument list to generate call.
866   ArgListTy Args;
867   ArgListEntry Entry;
868   Entry.Node = Load;
869   Entry.Ty = CallTy;
870   Args.push_back(Entry);
871 
872   // Setup call to __tls_get_addr.
873   TargetLowering::CallLoweringInfo CLI(DAG);
874   CLI.setDebugLoc(DL)
875       .setChain(DAG.getEntryNode())
876       .setLibCallee(CallingConv::C, CallTy,
877                     DAG.getExternalSymbol("__tls_get_addr", Ty),
878                     std::move(Args));
879 
880   return LowerCallTo(CLI).first;
881 }
882 
883 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
884                                                    SelectionDAG &DAG) const {
885   SDLoc DL(Op);
886   EVT Ty = Op.getValueType();
887   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
888   int64_t Offset = N->getOffset();
889   MVT XLenVT = Subtarget.getXLenVT();
890 
891   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
892 
893   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
894       CallingConv::GHC)
895     report_fatal_error("In GHC calling convention TLS is not supported");
896 
897   SDValue Addr;
898   switch (Model) {
899   case TLSModel::LocalExec:
900     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
901     break;
902   case TLSModel::InitialExec:
903     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
904     break;
905   case TLSModel::LocalDynamic:
906   case TLSModel::GeneralDynamic:
907     Addr = getDynamicTLSAddr(N, DAG);
908     break;
909   }
910 
911   // In order to maximise the opportunity for common subexpression elimination,
912   // emit a separate ADD node for the global address offset instead of folding
913   // it in the global address node. Later peephole optimisations may choose to
914   // fold it back in when profitable.
915   if (Offset != 0)
916     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
917                        DAG.getConstant(Offset, DL, XLenVT));
918   return Addr;
919 }
920 
921 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
922   SDValue CondV = Op.getOperand(0);
923   SDValue TrueV = Op.getOperand(1);
924   SDValue FalseV = Op.getOperand(2);
925   SDLoc DL(Op);
926   MVT XLenVT = Subtarget.getXLenVT();
927 
928   // If the result type is XLenVT and CondV is the output of a SETCC node
929   // which also operated on XLenVT inputs, then merge the SETCC node into the
930   // lowered RISCVISD::SELECT_CC to take advantage of the integer
931   // compare+branch instructions. i.e.:
932   // (select (setcc lhs, rhs, cc), truev, falsev)
933   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
934   if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC &&
935       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
936     SDValue LHS = CondV.getOperand(0);
937     SDValue RHS = CondV.getOperand(1);
938     auto CC = cast<CondCodeSDNode>(CondV.getOperand(2));
939     ISD::CondCode CCVal = CC->get();
940 
941     normaliseSetCC(LHS, RHS, CCVal);
942 
943     SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT);
944     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
945     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
946   }
947 
948   // Otherwise:
949   // (select condv, truev, falsev)
950   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
951   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
952   SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT);
953 
954   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
955 
956   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
957 }
958 
959 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
960   MachineFunction &MF = DAG.getMachineFunction();
961   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
962 
963   SDLoc DL(Op);
964   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
965                                  getPointerTy(MF.getDataLayout()));
966 
967   // vastart just stores the address of the VarArgsFrameIndex slot into the
968   // memory location argument.
969   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
970   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
971                       MachinePointerInfo(SV));
972 }
973 
974 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
975                                             SelectionDAG &DAG) const {
976   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
977   MachineFunction &MF = DAG.getMachineFunction();
978   MachineFrameInfo &MFI = MF.getFrameInfo();
979   MFI.setFrameAddressIsTaken(true);
980   Register FrameReg = RI.getFrameRegister(MF);
981   int XLenInBytes = Subtarget.getXLen() / 8;
982 
983   EVT VT = Op.getValueType();
984   SDLoc DL(Op);
985   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
986   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
987   while (Depth--) {
988     int Offset = -(XLenInBytes * 2);
989     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
990                               DAG.getIntPtrConstant(Offset, DL));
991     FrameAddr =
992         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
993   }
994   return FrameAddr;
995 }
996 
997 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
998                                              SelectionDAG &DAG) const {
999   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
1000   MachineFunction &MF = DAG.getMachineFunction();
1001   MachineFrameInfo &MFI = MF.getFrameInfo();
1002   MFI.setReturnAddressIsTaken(true);
1003   MVT XLenVT = Subtarget.getXLenVT();
1004   int XLenInBytes = Subtarget.getXLen() / 8;
1005 
1006   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
1007     return SDValue();
1008 
1009   EVT VT = Op.getValueType();
1010   SDLoc DL(Op);
1011   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1012   if (Depth) {
1013     int Off = -XLenInBytes;
1014     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
1015     SDValue Offset = DAG.getConstant(Off, DL, VT);
1016     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
1017                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
1018                        MachinePointerInfo());
1019   }
1020 
1021   // Return the value of the return address register, marking it an implicit
1022   // live-in.
1023   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
1024   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
1025 }
1026 
1027 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
1028                                                  SelectionDAG &DAG) const {
1029   SDLoc DL(Op);
1030   SDValue Lo = Op.getOperand(0);
1031   SDValue Hi = Op.getOperand(1);
1032   SDValue Shamt = Op.getOperand(2);
1033   EVT VT = Lo.getValueType();
1034 
1035   // if Shamt-XLEN < 0: // Shamt < XLEN
1036   //   Lo = Lo << Shamt
1037   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
1038   // else:
1039   //   Lo = 0
1040   //   Hi = Lo << (Shamt-XLEN)
1041 
1042   SDValue Zero = DAG.getConstant(0, DL, VT);
1043   SDValue One = DAG.getConstant(1, DL, VT);
1044   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
1045   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
1046   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
1047   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
1048 
1049   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
1050   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
1051   SDValue ShiftRightLo =
1052       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
1053   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
1054   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
1055   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
1056 
1057   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
1058 
1059   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
1060   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
1061 
1062   SDValue Parts[2] = {Lo, Hi};
1063   return DAG.getMergeValues(Parts, DL);
1064 }
1065 
1066 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
1067                                                   bool IsSRA) const {
1068   SDLoc DL(Op);
1069   SDValue Lo = Op.getOperand(0);
1070   SDValue Hi = Op.getOperand(1);
1071   SDValue Shamt = Op.getOperand(2);
1072   EVT VT = Lo.getValueType();
1073 
1074   // SRA expansion:
1075   //   if Shamt-XLEN < 0: // Shamt < XLEN
1076   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
1077   //     Hi = Hi >>s Shamt
1078   //   else:
1079   //     Lo = Hi >>s (Shamt-XLEN);
1080   //     Hi = Hi >>s (XLEN-1)
1081   //
1082   // SRL expansion:
1083   //   if Shamt-XLEN < 0: // Shamt < XLEN
1084   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
1085   //     Hi = Hi >>u Shamt
1086   //   else:
1087   //     Lo = Hi >>u (Shamt-XLEN);
1088   //     Hi = 0;
1089 
1090   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
1091 
1092   SDValue Zero = DAG.getConstant(0, DL, VT);
1093   SDValue One = DAG.getConstant(1, DL, VT);
1094   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
1095   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
1096   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
1097   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
1098 
1099   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
1100   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
1101   SDValue ShiftLeftHi =
1102       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
1103   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
1104   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
1105   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
1106   SDValue HiFalse =
1107       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
1108 
1109   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
1110 
1111   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
1112   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
1113 
1114   SDValue Parts[2] = {Lo, Hi};
1115   return DAG.getMergeValues(Parts, DL);
1116 }
1117 
1118 // Custom-lower a SPLAT_VECTOR where XLEN<SEW, as the SEW element type is
1119 // illegal (currently only vXi64 RV32).
1120 // FIXME: We could also catch non-constant sign-extended i32 values and lower
1121 // them to SPLAT_VECTOR_I64
1122 SDValue RISCVTargetLowering::lowerSPLATVECTOR(SDValue Op,
1123                                               SelectionDAG &DAG) const {
1124   SDLoc DL(Op);
1125   EVT VecVT = Op.getValueType();
1126   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
1127          "Unexpected SPLAT_VECTOR lowering");
1128   SDValue SplatVal = Op.getOperand(0);
1129 
1130   // If we can prove that the value is a sign-extended 32-bit value, lower this
1131   // as a custom node in order to try and match RVV vector/scalar instructions.
1132   if (auto *CVal = dyn_cast<ConstantSDNode>(SplatVal)) {
1133     if (isInt<32>(CVal->getSExtValue()))
1134       return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT,
1135                          DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32));
1136   }
1137 
1138   // Else, on RV32 we lower an i64-element SPLAT_VECTOR thus, being careful not
1139   // to accidentally sign-extend the 32-bit halves to the e64 SEW:
1140   // vmv.v.x vX, hi
1141   // vsll.vx vX, vX, /*32*/
1142   // vmv.v.x vY, lo
1143   // vsll.vx vY, vY, /*32*/
1144   // vsrl.vx vY, vY, /*32*/
1145   // vor.vv vX, vX, vY
1146   SDValue One = DAG.getConstant(1, DL, MVT::i32);
1147   SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
1148   SDValue ThirtyTwoV = DAG.getConstant(32, DL, VecVT);
1149   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, SplatVal, Zero);
1150   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, SplatVal, One);
1151 
1152   Lo = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
1153   Lo = DAG.getNode(ISD::SHL, DL, VecVT, Lo, ThirtyTwoV);
1154   Lo = DAG.getNode(ISD::SRL, DL, VecVT, Lo, ThirtyTwoV);
1155 
1156   if (isNullConstant(Hi))
1157     return Lo;
1158 
1159   Hi = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Hi);
1160   Hi = DAG.getNode(ISD::SHL, DL, VecVT, Hi, ThirtyTwoV);
1161 
1162   return DAG.getNode(ISD::OR, DL, VecVT, Lo, Hi);
1163 }
1164 
1165 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
1166                                                      SelectionDAG &DAG) const {
1167   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1168   SDLoc DL(Op);
1169 
1170   if (Subtarget.hasStdExtV()) {
1171     // Some RVV intrinsics may claim that they want an integer operand to be
1172     // extended.
1173     if (const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
1174             RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo)) {
1175       if (II->ExtendedOperand) {
1176         assert(II->ExtendedOperand < Op.getNumOperands());
1177         SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
1178         SDValue &ScalarOp = Operands[II->ExtendedOperand];
1179         EVT OpVT = ScalarOp.getValueType();
1180         if (OpVT == MVT::i8 || OpVT == MVT::i16 ||
1181             (OpVT == MVT::i32 && Subtarget.is64Bit())) {
1182           // If the operand is a constant, sign extend to increase our chances
1183           // of being able to use a .vi instruction. ANY_EXTEND would become a
1184           // a zero extend and the simm5 check in isel would fail.
1185           // FIXME: Should we ignore the upper bits in isel instead?
1186           unsigned ExtOpc = isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND
1187                                                           : ISD::ANY_EXTEND;
1188           ScalarOp = DAG.getNode(ExtOpc, DL, Subtarget.getXLenVT(), ScalarOp);
1189           return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
1190                              Operands);
1191         }
1192       }
1193     }
1194   }
1195 
1196   switch (IntNo) {
1197   default:
1198     return SDValue();    // Don't custom lower most intrinsics.
1199   case Intrinsic::thread_pointer: {
1200     EVT PtrVT = getPointerTy(DAG.getDataLayout());
1201     return DAG.getRegister(RISCV::X4, PtrVT);
1202   }
1203   case Intrinsic::riscv_vmv_x_s:
1204     assert(Op.getValueType() == Subtarget.getXLenVT() && "Unexpected VT!");
1205     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
1206                        Op.getOperand(1));
1207   }
1208 }
1209 
1210 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
1211                                                     SelectionDAG &DAG) const {
1212   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1213   SDLoc DL(Op);
1214 
1215   if (Subtarget.hasStdExtV()) {
1216     // Some RVV intrinsics may claim that they want an integer operand to be
1217     // extended.
1218     if (const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
1219             RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo)) {
1220       if (II->ExtendedOperand) {
1221         // The operands start from the second argument in INTRINSIC_W_CHAIN.
1222         unsigned ExtendOp = II->ExtendedOperand + 1;
1223         assert(ExtendOp < Op.getNumOperands());
1224         SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
1225         SDValue &ScalarOp = Operands[ExtendOp];
1226         EVT OpVT = ScalarOp.getValueType();
1227         if (OpVT == MVT::i8 || OpVT == MVT::i16 ||
1228             (OpVT == MVT::i32 && Subtarget.is64Bit())) {
1229           // If the operand is a constant, sign extend to increase our chances
1230           // of being able to use a .vi instruction. ANY_EXTEND would become a
1231           // a zero extend and the simm5 check in isel would fail.
1232           // FIXME: Should we ignore the upper bits in isel instead?
1233           unsigned ExtOpc = isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND
1234                                                           : ISD::ANY_EXTEND;
1235           ScalarOp = DAG.getNode(ExtOpc, DL, Subtarget.getXLenVT(), ScalarOp);
1236           return DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, Op->getVTList(),
1237                              Operands);
1238         }
1239       }
1240     }
1241   }
1242 
1243   return SDValue();
1244 }
1245 
1246 // Returns the opcode of the target-specific SDNode that implements the 32-bit
1247 // form of the given Opcode.
1248 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
1249   switch (Opcode) {
1250   default:
1251     llvm_unreachable("Unexpected opcode");
1252   case ISD::SHL:
1253     return RISCVISD::SLLW;
1254   case ISD::SRA:
1255     return RISCVISD::SRAW;
1256   case ISD::SRL:
1257     return RISCVISD::SRLW;
1258   case ISD::SDIV:
1259     return RISCVISD::DIVW;
1260   case ISD::UDIV:
1261     return RISCVISD::DIVUW;
1262   case ISD::UREM:
1263     return RISCVISD::REMUW;
1264   case ISD::ROTL:
1265     return RISCVISD::ROLW;
1266   case ISD::ROTR:
1267     return RISCVISD::RORW;
1268   case RISCVISD::GREVI:
1269     return RISCVISD::GREVIW;
1270   case RISCVISD::GORCI:
1271     return RISCVISD::GORCIW;
1272   }
1273 }
1274 
1275 // Converts the given 32-bit operation to a target-specific SelectionDAG node.
1276 // Because i32 isn't a legal type for RV64, these operations would otherwise
1277 // be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W
1278 // later one because the fact the operation was originally of type i32 is
1279 // lost.
1280 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG) {
1281   SDLoc DL(N);
1282   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
1283   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
1284   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
1285   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
1286   // ReplaceNodeResults requires we maintain the same type for the return value.
1287   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
1288 }
1289 
1290 // Converts the given 32-bit operation to a i64 operation with signed extension
1291 // semantic to reduce the signed extension instructions.
1292 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
1293   SDLoc DL(N);
1294   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
1295   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
1296   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
1297   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
1298                                DAG.getValueType(MVT::i32));
1299   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
1300 }
1301 
1302 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
1303                                              SmallVectorImpl<SDValue> &Results,
1304                                              SelectionDAG &DAG) const {
1305   SDLoc DL(N);
1306   switch (N->getOpcode()) {
1307   default:
1308     llvm_unreachable("Don't know how to custom type legalize this operation!");
1309   case ISD::STRICT_FP_TO_SINT:
1310   case ISD::STRICT_FP_TO_UINT:
1311   case ISD::FP_TO_SINT:
1312   case ISD::FP_TO_UINT: {
1313     bool IsStrict = N->isStrictFPOpcode();
1314     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1315            "Unexpected custom legalisation");
1316     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
1317     // If the FP type needs to be softened, emit a library call using the 'si'
1318     // version. If we left it to default legalization we'd end up with 'di'. If
1319     // the FP type doesn't need to be softened just let generic type
1320     // legalization promote the result type.
1321     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
1322         TargetLowering::TypeSoftenFloat)
1323       return;
1324     RTLIB::Libcall LC;
1325     if (N->getOpcode() == ISD::FP_TO_SINT ||
1326         N->getOpcode() == ISD::STRICT_FP_TO_SINT)
1327       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
1328     else
1329       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
1330     MakeLibCallOptions CallOptions;
1331     EVT OpVT = Op0.getValueType();
1332     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
1333     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
1334     SDValue Result;
1335     std::tie(Result, Chain) =
1336         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
1337     Results.push_back(Result);
1338     if (IsStrict)
1339       Results.push_back(Chain);
1340     break;
1341   }
1342   case ISD::READCYCLECOUNTER: {
1343     assert(!Subtarget.is64Bit() &&
1344            "READCYCLECOUNTER only has custom type legalization on riscv32");
1345 
1346     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
1347     SDValue RCW =
1348         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
1349 
1350     Results.push_back(
1351         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
1352     Results.push_back(RCW.getValue(2));
1353     break;
1354   }
1355   case ISD::ADD:
1356   case ISD::SUB:
1357   case ISD::MUL:
1358     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1359            "Unexpected custom legalisation");
1360     if (N->getOperand(1).getOpcode() == ISD::Constant)
1361       return;
1362     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
1363     break;
1364   case ISD::SHL:
1365   case ISD::SRA:
1366   case ISD::SRL:
1367     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1368            "Unexpected custom legalisation");
1369     if (N->getOperand(1).getOpcode() == ISD::Constant)
1370       return;
1371     Results.push_back(customLegalizeToWOp(N, DAG));
1372     break;
1373   case ISD::ROTL:
1374   case ISD::ROTR:
1375     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1376            "Unexpected custom legalisation");
1377     Results.push_back(customLegalizeToWOp(N, DAG));
1378     break;
1379   case ISD::SDIV:
1380   case ISD::UDIV:
1381   case ISD::UREM:
1382     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1383            Subtarget.hasStdExtM() && "Unexpected custom legalisation");
1384     if (N->getOperand(0).getOpcode() == ISD::Constant ||
1385         N->getOperand(1).getOpcode() == ISD::Constant)
1386       return;
1387     Results.push_back(customLegalizeToWOp(N, DAG));
1388     break;
1389   case ISD::BITCAST: {
1390     assert(((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1391              Subtarget.hasStdExtF()) ||
1392             (N->getValueType(0) == MVT::i16 && Subtarget.hasStdExtZfh())) &&
1393            "Unexpected custom legalisation");
1394     SDValue Op0 = N->getOperand(0);
1395     if (N->getValueType(0) == MVT::i16 && Subtarget.hasStdExtZfh()) {
1396       if (Op0.getValueType() != MVT::f16)
1397         return;
1398       SDValue FPConv =
1399           DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, Subtarget.getXLenVT(), Op0);
1400       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
1401     } else if (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1402                Subtarget.hasStdExtF()) {
1403       if (Op0.getValueType() != MVT::f32)
1404         return;
1405       SDValue FPConv =
1406           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
1407       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
1408     }
1409     break;
1410   }
1411   case RISCVISD::GREVI:
1412   case RISCVISD::GORCI: {
1413     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1414            "Unexpected custom legalisation");
1415     // This is similar to customLegalizeToWOp, except that we pass the second
1416     // operand (a TargetConstant) straight through: it is already of type
1417     // XLenVT.
1418     SDLoc DL(N);
1419     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
1420     SDValue NewOp0 =
1421         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
1422     SDValue NewRes =
1423         DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, N->getOperand(1));
1424     // ReplaceNodeResults requires we maintain the same type for the return
1425     // value.
1426     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
1427     break;
1428   }
1429   case ISD::BSWAP:
1430   case ISD::BITREVERSE: {
1431     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1432            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
1433     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
1434                                  N->getOperand(0));
1435     unsigned Imm = N->getOpcode() == ISD::BITREVERSE ? 31 : 24;
1436     SDValue GREVIW = DAG.getNode(RISCVISD::GREVIW, DL, MVT::i64, NewOp0,
1437                                  DAG.getTargetConstant(Imm, DL,
1438                                                        Subtarget.getXLenVT()));
1439     // ReplaceNodeResults requires we maintain the same type for the return
1440     // value.
1441     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, GREVIW));
1442     break;
1443   }
1444   case ISD::FSHL:
1445   case ISD::FSHR: {
1446     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1447            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
1448     SDValue NewOp0 =
1449         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
1450     SDValue NewOp1 =
1451         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
1452     SDValue NewOp2 =
1453         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
1454     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
1455     // Mask the shift amount to 5 bits.
1456     NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
1457                          DAG.getConstant(0x1f, DL, MVT::i64));
1458     unsigned Opc =
1459         N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW;
1460     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2);
1461     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
1462     break;
1463   }
1464   case ISD::INTRINSIC_WO_CHAIN: {
1465     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
1466     switch (IntNo) {
1467     default:
1468       llvm_unreachable(
1469           "Don't know how to custom type legalize this intrinsic!");
1470     case Intrinsic::riscv_vmv_x_s: {
1471       EVT VT = N->getValueType(0);
1472       assert((VT == MVT::i8 || VT == MVT::i16 ||
1473               (Subtarget.is64Bit() && VT == MVT::i32)) &&
1474              "Unexpected custom legalisation!");
1475       SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
1476                                     Subtarget.getXLenVT(), N->getOperand(1));
1477       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
1478       break;
1479     }
1480     }
1481     break;
1482   }
1483   }
1484 }
1485 
1486 // A structure to hold one of the bit-manipulation patterns below. Together, a
1487 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
1488 //   (or (and (shl x, 1), 0xAAAAAAAA),
1489 //       (and (srl x, 1), 0x55555555))
1490 struct RISCVBitmanipPat {
1491   SDValue Op;
1492   unsigned ShAmt;
1493   bool IsSHL;
1494 
1495   bool formsPairWith(const RISCVBitmanipPat &Other) const {
1496     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
1497   }
1498 };
1499 
1500 // Matches any of the following bit-manipulation patterns:
1501 //   (and (shl x, 1), (0x55555555 << 1))
1502 //   (and (srl x, 1), 0x55555555)
1503 //   (shl (and x, 0x55555555), 1)
1504 //   (srl (and x, (0x55555555 << 1)), 1)
1505 // where the shift amount and mask may vary thus:
1506 //   [1]  = 0x55555555 / 0xAAAAAAAA
1507 //   [2]  = 0x33333333 / 0xCCCCCCCC
1508 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
1509 //   [8]  = 0x00FF00FF / 0xFF00FF00
1510 //   [16] = 0x0000FFFF / 0xFFFFFFFF
1511 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
1512 static Optional<RISCVBitmanipPat> matchRISCVBitmanipPat(SDValue Op) {
1513   Optional<uint64_t> Mask;
1514   // Optionally consume a mask around the shift operation.
1515   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
1516     Mask = Op.getConstantOperandVal(1);
1517     Op = Op.getOperand(0);
1518   }
1519   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
1520     return None;
1521   bool IsSHL = Op.getOpcode() == ISD::SHL;
1522 
1523   if (!isa<ConstantSDNode>(Op.getOperand(1)))
1524     return None;
1525   auto ShAmt = Op.getConstantOperandVal(1);
1526 
1527   if (!isPowerOf2_64(ShAmt))
1528     return None;
1529 
1530   // These are the unshifted masks which we use to match bit-manipulation
1531   // patterns. They may be shifted left in certain circumstances.
1532   static const uint64_t BitmanipMasks[] = {
1533       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
1534       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL,
1535   };
1536 
1537   unsigned MaskIdx = Log2_64(ShAmt);
1538   if (MaskIdx >= array_lengthof(BitmanipMasks))
1539     return None;
1540 
1541   auto Src = Op.getOperand(0);
1542 
1543   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
1544   auto ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
1545 
1546   // The expected mask is shifted left when the AND is found around SHL
1547   // patterns.
1548   //   ((x >> 1) & 0x55555555)
1549   //   ((x << 1) & 0xAAAAAAAA)
1550   bool SHLExpMask = IsSHL;
1551 
1552   if (!Mask) {
1553     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
1554     // the mask is all ones: consume that now.
1555     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
1556       Mask = Src.getConstantOperandVal(1);
1557       Src = Src.getOperand(0);
1558       // The expected mask is now in fact shifted left for SRL, so reverse the
1559       // decision.
1560       //   ((x & 0xAAAAAAAA) >> 1)
1561       //   ((x & 0x55555555) << 1)
1562       SHLExpMask = !SHLExpMask;
1563     } else {
1564       // Use a default shifted mask of all-ones if there's no AND, truncated
1565       // down to the expected width. This simplifies the logic later on.
1566       Mask = maskTrailingOnes<uint64_t>(Width);
1567       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
1568     }
1569   }
1570 
1571   if (SHLExpMask)
1572     ExpMask <<= ShAmt;
1573 
1574   if (Mask != ExpMask)
1575     return None;
1576 
1577   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
1578 }
1579 
1580 // Match the following pattern as a GREVI(W) operation
1581 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
1582 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
1583                                const RISCVSubtarget &Subtarget) {
1584   EVT VT = Op.getValueType();
1585 
1586   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
1587     auto LHS = matchRISCVBitmanipPat(Op.getOperand(0));
1588     auto RHS = matchRISCVBitmanipPat(Op.getOperand(1));
1589     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
1590       SDLoc DL(Op);
1591       return DAG.getNode(
1592           RISCVISD::GREVI, DL, VT, LHS->Op,
1593           DAG.getTargetConstant(LHS->ShAmt, DL, Subtarget.getXLenVT()));
1594     }
1595   }
1596   return SDValue();
1597 }
1598 
1599 // Matches any the following pattern as a GORCI(W) operation
1600 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
1601 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
1602 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
1603 // Note that with the variant of 3.,
1604 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
1605 // the inner pattern will first be matched as GREVI and then the outer
1606 // pattern will be matched to GORC via the first rule above.
1607 // 4.  (or (rotl/rotr x, bitwidth/2), x)
1608 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
1609                                const RISCVSubtarget &Subtarget) {
1610   EVT VT = Op.getValueType();
1611 
1612   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
1613     SDLoc DL(Op);
1614     SDValue Op0 = Op.getOperand(0);
1615     SDValue Op1 = Op.getOperand(1);
1616 
1617     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
1618       if (Reverse.getOpcode() == RISCVISD::GREVI && Reverse.getOperand(0) == X &&
1619           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
1620         return DAG.getNode(RISCVISD::GORCI, DL, VT, X, Reverse.getOperand(1));
1621       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
1622       if ((Reverse.getOpcode() == ISD::ROTL ||
1623            Reverse.getOpcode() == ISD::ROTR) &&
1624           Reverse.getOperand(0) == X &&
1625           isa<ConstantSDNode>(Reverse.getOperand(1))) {
1626         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
1627         if (RotAmt == (VT.getSizeInBits() / 2))
1628           return DAG.getNode(
1629               RISCVISD::GORCI, DL, VT, X,
1630               DAG.getTargetConstant(RotAmt, DL, Subtarget.getXLenVT()));
1631       }
1632       return SDValue();
1633     };
1634 
1635     // Check for either commutable permutation of (or (GREVI x, shamt), x)
1636     if (SDValue V = MatchOROfReverse(Op0, Op1))
1637       return V;
1638     if (SDValue V = MatchOROfReverse(Op1, Op0))
1639       return V;
1640 
1641     // OR is commutable so canonicalize its OR operand to the left
1642     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
1643       std::swap(Op0, Op1);
1644     if (Op0.getOpcode() != ISD::OR)
1645       return SDValue();
1646     SDValue OrOp0 = Op0.getOperand(0);
1647     SDValue OrOp1 = Op0.getOperand(1);
1648     auto LHS = matchRISCVBitmanipPat(OrOp0);
1649     // OR is commutable so swap the operands and try again: x might have been
1650     // on the left
1651     if (!LHS) {
1652       std::swap(OrOp0, OrOp1);
1653       LHS = matchRISCVBitmanipPat(OrOp0);
1654     }
1655     auto RHS = matchRISCVBitmanipPat(Op1);
1656     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
1657       return DAG.getNode(
1658           RISCVISD::GORCI, DL, VT, LHS->Op,
1659           DAG.getTargetConstant(LHS->ShAmt, DL, Subtarget.getXLenVT()));
1660     }
1661   }
1662   return SDValue();
1663 }
1664 
1665 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
1666 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
1667 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
1668 // not undo itself, but they are redundant.
1669 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
1670   unsigned ShAmt1 = N->getConstantOperandVal(1);
1671   SDValue Src = N->getOperand(0);
1672 
1673   if (Src.getOpcode() != N->getOpcode())
1674     return SDValue();
1675 
1676   unsigned ShAmt2 = Src.getConstantOperandVal(1);
1677   Src = Src.getOperand(0);
1678 
1679   unsigned CombinedShAmt;
1680   if (N->getOpcode() == RISCVISD::GORCI || N->getOpcode() == RISCVISD::GORCIW)
1681     CombinedShAmt = ShAmt1 | ShAmt2;
1682   else
1683     CombinedShAmt = ShAmt1 ^ ShAmt2;
1684 
1685   if (CombinedShAmt == 0)
1686     return Src;
1687 
1688   SDLoc DL(N);
1689   return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), Src,
1690                      DAG.getTargetConstant(CombinedShAmt, DL,
1691                                            N->getOperand(1).getValueType()));
1692 }
1693 
1694 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
1695                                                DAGCombinerInfo &DCI) const {
1696   SelectionDAG &DAG = DCI.DAG;
1697 
1698   switch (N->getOpcode()) {
1699   default:
1700     break;
1701   case RISCVISD::SplitF64: {
1702     SDValue Op0 = N->getOperand(0);
1703     // If the input to SplitF64 is just BuildPairF64 then the operation is
1704     // redundant. Instead, use BuildPairF64's operands directly.
1705     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
1706       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
1707 
1708     SDLoc DL(N);
1709 
1710     // It's cheaper to materialise two 32-bit integers than to load a double
1711     // from the constant pool and transfer it to integer registers through the
1712     // stack.
1713     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
1714       APInt V = C->getValueAPF().bitcastToAPInt();
1715       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
1716       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
1717       return DCI.CombineTo(N, Lo, Hi);
1718     }
1719 
1720     // This is a target-specific version of a DAGCombine performed in
1721     // DAGCombiner::visitBITCAST. It performs the equivalent of:
1722     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
1723     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
1724     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
1725         !Op0.getNode()->hasOneUse())
1726       break;
1727     SDValue NewSplitF64 =
1728         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
1729                     Op0.getOperand(0));
1730     SDValue Lo = NewSplitF64.getValue(0);
1731     SDValue Hi = NewSplitF64.getValue(1);
1732     APInt SignBit = APInt::getSignMask(32);
1733     if (Op0.getOpcode() == ISD::FNEG) {
1734       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
1735                                   DAG.getConstant(SignBit, DL, MVT::i32));
1736       return DCI.CombineTo(N, Lo, NewHi);
1737     }
1738     assert(Op0.getOpcode() == ISD::FABS);
1739     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
1740                                 DAG.getConstant(~SignBit, DL, MVT::i32));
1741     return DCI.CombineTo(N, Lo, NewHi);
1742   }
1743   case RISCVISD::SLLW:
1744   case RISCVISD::SRAW:
1745   case RISCVISD::SRLW:
1746   case RISCVISD::ROLW:
1747   case RISCVISD::RORW: {
1748     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
1749     SDValue LHS = N->getOperand(0);
1750     SDValue RHS = N->getOperand(1);
1751     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
1752     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
1753     if (SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI) ||
1754         SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)) {
1755       if (N->getOpcode() != ISD::DELETED_NODE)
1756         DCI.AddToWorklist(N);
1757       return SDValue(N, 0);
1758     }
1759     break;
1760   }
1761   case RISCVISD::FSLW:
1762   case RISCVISD::FSRW: {
1763     // Only the lower 32 bits of Values and lower 6 bits of shift amount are
1764     // read.
1765     SDValue Op0 = N->getOperand(0);
1766     SDValue Op1 = N->getOperand(1);
1767     SDValue ShAmt = N->getOperand(2);
1768     APInt OpMask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
1769     APInt ShAmtMask = APInt::getLowBitsSet(ShAmt.getValueSizeInBits(), 6);
1770     if (SimplifyDemandedBits(Op0, OpMask, DCI) ||
1771         SimplifyDemandedBits(Op1, OpMask, DCI) ||
1772         SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
1773       if (N->getOpcode() != ISD::DELETED_NODE)
1774         DCI.AddToWorklist(N);
1775       return SDValue(N, 0);
1776     }
1777     break;
1778   }
1779   case RISCVISD::GREVIW:
1780   case RISCVISD::GORCIW: {
1781     // Only the lower 32 bits of the first operand are read
1782     SDValue Op0 = N->getOperand(0);
1783     APInt Mask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
1784     if (SimplifyDemandedBits(Op0, Mask, DCI)) {
1785       if (N->getOpcode() != ISD::DELETED_NODE)
1786         DCI.AddToWorklist(N);
1787       return SDValue(N, 0);
1788     }
1789 
1790     return combineGREVI_GORCI(N, DCI.DAG);
1791   }
1792   case RISCVISD::FMV_X_ANYEXTW_RV64: {
1793     SDLoc DL(N);
1794     SDValue Op0 = N->getOperand(0);
1795     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
1796     // conversion is unnecessary and can be replaced with an ANY_EXTEND
1797     // of the FMV_W_X_RV64 operand.
1798     if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) {
1799       assert(Op0.getOperand(0).getValueType() == MVT::i64 &&
1800              "Unexpected value type!");
1801       return Op0.getOperand(0);
1802     }
1803 
1804     // This is a target-specific version of a DAGCombine performed in
1805     // DAGCombiner::visitBITCAST. It performs the equivalent of:
1806     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
1807     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
1808     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
1809         !Op0.getNode()->hasOneUse())
1810       break;
1811     SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64,
1812                                  Op0.getOperand(0));
1813     APInt SignBit = APInt::getSignMask(32).sext(64);
1814     if (Op0.getOpcode() == ISD::FNEG)
1815       return DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV,
1816                          DAG.getConstant(SignBit, DL, MVT::i64));
1817 
1818     assert(Op0.getOpcode() == ISD::FABS);
1819     return DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV,
1820                        DAG.getConstant(~SignBit, DL, MVT::i64));
1821   }
1822   case RISCVISD::GREVI:
1823   case RISCVISD::GORCI:
1824     return combineGREVI_GORCI(N, DCI.DAG);
1825   case ISD::OR:
1826     if (auto GREV = combineORToGREV(SDValue(N, 0), DCI.DAG, Subtarget))
1827       return GREV;
1828     if (auto GORC = combineORToGORC(SDValue(N, 0), DCI.DAG, Subtarget))
1829       return GORC;
1830     break;
1831   }
1832 
1833   return SDValue();
1834 }
1835 
1836 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
1837     const SDNode *N, CombineLevel Level) const {
1838   // The following folds are only desirable if `(OP _, c1 << c2)` can be
1839   // materialised in fewer instructions than `(OP _, c1)`:
1840   //
1841   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
1842   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
1843   SDValue N0 = N->getOperand(0);
1844   EVT Ty = N0.getValueType();
1845   if (Ty.isScalarInteger() &&
1846       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
1847     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
1848     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
1849     if (C1 && C2) {
1850       APInt C1Int = C1->getAPIntValue();
1851       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
1852 
1853       // We can materialise `c1 << c2` into an add immediate, so it's "free",
1854       // and the combine should happen, to potentially allow further combines
1855       // later.
1856       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
1857           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
1858         return true;
1859 
1860       // We can materialise `c1` in an add immediate, so it's "free", and the
1861       // combine should be prevented.
1862       if (C1Int.getMinSignedBits() <= 64 &&
1863           isLegalAddImmediate(C1Int.getSExtValue()))
1864         return false;
1865 
1866       // Neither constant will fit into an immediate, so find materialisation
1867       // costs.
1868       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
1869                                               Subtarget.is64Bit());
1870       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
1871           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit());
1872 
1873       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
1874       // combine should be prevented.
1875       if (C1Cost < ShiftedC1Cost)
1876         return false;
1877     }
1878   }
1879   return true;
1880 }
1881 
1882 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
1883                                                         KnownBits &Known,
1884                                                         const APInt &DemandedElts,
1885                                                         const SelectionDAG &DAG,
1886                                                         unsigned Depth) const {
1887   unsigned Opc = Op.getOpcode();
1888   assert((Opc >= ISD::BUILTIN_OP_END ||
1889           Opc == ISD::INTRINSIC_WO_CHAIN ||
1890           Opc == ISD::INTRINSIC_W_CHAIN ||
1891           Opc == ISD::INTRINSIC_VOID) &&
1892          "Should use MaskedValueIsZero if you don't know whether Op"
1893          " is a target node!");
1894 
1895   Known.resetAll();
1896   switch (Opc) {
1897   default: break;
1898   case RISCVISD::READ_VLENB:
1899     // We assume VLENB is at least 8 bytes.
1900     // FIXME: The 1.0 draft spec defines minimum VLEN as 128 bits.
1901     Known.Zero.setLowBits(3);
1902     break;
1903   }
1904 }
1905 
1906 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
1907     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
1908     unsigned Depth) const {
1909   switch (Op.getOpcode()) {
1910   default:
1911     break;
1912   case RISCVISD::SLLW:
1913   case RISCVISD::SRAW:
1914   case RISCVISD::SRLW:
1915   case RISCVISD::DIVW:
1916   case RISCVISD::DIVUW:
1917   case RISCVISD::REMUW:
1918   case RISCVISD::ROLW:
1919   case RISCVISD::RORW:
1920   case RISCVISD::GREVIW:
1921   case RISCVISD::GORCIW:
1922   case RISCVISD::FSLW:
1923   case RISCVISD::FSRW:
1924     // TODO: As the result is sign-extended, this is conservatively correct. A
1925     // more precise answer could be calculated for SRAW depending on known
1926     // bits in the shift amount.
1927     return 33;
1928   case RISCVISD::VMV_X_S:
1929     // The number of sign bits of the scalar result is computed by obtaining the
1930     // element type of the input vector operand, substracting its width from the
1931     // XLEN, and then adding one (sign bit within the element type).
1932     return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1;
1933   }
1934 
1935   return 1;
1936 }
1937 
1938 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
1939                                                   MachineBasicBlock *BB) {
1940   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
1941 
1942   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
1943   // Should the count have wrapped while it was being read, we need to try
1944   // again.
1945   // ...
1946   // read:
1947   // rdcycleh x3 # load high word of cycle
1948   // rdcycle  x2 # load low word of cycle
1949   // rdcycleh x4 # load high word of cycle
1950   // bne x3, x4, read # check if high word reads match, otherwise try again
1951   // ...
1952 
1953   MachineFunction &MF = *BB->getParent();
1954   const BasicBlock *LLVM_BB = BB->getBasicBlock();
1955   MachineFunction::iterator It = ++BB->getIterator();
1956 
1957   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
1958   MF.insert(It, LoopMBB);
1959 
1960   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
1961   MF.insert(It, DoneMBB);
1962 
1963   // Transfer the remainder of BB and its successor edges to DoneMBB.
1964   DoneMBB->splice(DoneMBB->begin(), BB,
1965                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
1966   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
1967 
1968   BB->addSuccessor(LoopMBB);
1969 
1970   MachineRegisterInfo &RegInfo = MF.getRegInfo();
1971   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
1972   Register LoReg = MI.getOperand(0).getReg();
1973   Register HiReg = MI.getOperand(1).getReg();
1974   DebugLoc DL = MI.getDebugLoc();
1975 
1976   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
1977   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
1978       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
1979       .addReg(RISCV::X0);
1980   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
1981       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
1982       .addReg(RISCV::X0);
1983   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
1984       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
1985       .addReg(RISCV::X0);
1986 
1987   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
1988       .addReg(HiReg)
1989       .addReg(ReadAgainReg)
1990       .addMBB(LoopMBB);
1991 
1992   LoopMBB->addSuccessor(LoopMBB);
1993   LoopMBB->addSuccessor(DoneMBB);
1994 
1995   MI.eraseFromParent();
1996 
1997   return DoneMBB;
1998 }
1999 
2000 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
2001                                              MachineBasicBlock *BB) {
2002   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
2003 
2004   MachineFunction &MF = *BB->getParent();
2005   DebugLoc DL = MI.getDebugLoc();
2006   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
2007   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
2008   Register LoReg = MI.getOperand(0).getReg();
2009   Register HiReg = MI.getOperand(1).getReg();
2010   Register SrcReg = MI.getOperand(2).getReg();
2011   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
2012   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
2013 
2014   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
2015                           RI);
2016   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
2017   MachineMemOperand *MMOLo =
2018       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
2019   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
2020       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
2021   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
2022       .addFrameIndex(FI)
2023       .addImm(0)
2024       .addMemOperand(MMOLo);
2025   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
2026       .addFrameIndex(FI)
2027       .addImm(4)
2028       .addMemOperand(MMOHi);
2029   MI.eraseFromParent(); // The pseudo instruction is gone now.
2030   return BB;
2031 }
2032 
2033 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
2034                                                  MachineBasicBlock *BB) {
2035   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
2036          "Unexpected instruction");
2037 
2038   MachineFunction &MF = *BB->getParent();
2039   DebugLoc DL = MI.getDebugLoc();
2040   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
2041   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
2042   Register DstReg = MI.getOperand(0).getReg();
2043   Register LoReg = MI.getOperand(1).getReg();
2044   Register HiReg = MI.getOperand(2).getReg();
2045   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
2046   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
2047 
2048   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
2049   MachineMemOperand *MMOLo =
2050       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
2051   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
2052       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
2053   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
2054       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
2055       .addFrameIndex(FI)
2056       .addImm(0)
2057       .addMemOperand(MMOLo);
2058   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
2059       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
2060       .addFrameIndex(FI)
2061       .addImm(4)
2062       .addMemOperand(MMOHi);
2063   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
2064   MI.eraseFromParent(); // The pseudo instruction is gone now.
2065   return BB;
2066 }
2067 
2068 static bool isSelectPseudo(MachineInstr &MI) {
2069   switch (MI.getOpcode()) {
2070   default:
2071     return false;
2072   case RISCV::Select_GPR_Using_CC_GPR:
2073   case RISCV::Select_FPR16_Using_CC_GPR:
2074   case RISCV::Select_FPR32_Using_CC_GPR:
2075   case RISCV::Select_FPR64_Using_CC_GPR:
2076     return true;
2077   }
2078 }
2079 
2080 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
2081                                            MachineBasicBlock *BB) {
2082   // To "insert" Select_* instructions, we actually have to insert the triangle
2083   // control-flow pattern.  The incoming instructions know the destination vreg
2084   // to set, the condition code register to branch on, the true/false values to
2085   // select between, and the condcode to use to select the appropriate branch.
2086   //
2087   // We produce the following control flow:
2088   //     HeadMBB
2089   //     |  \
2090   //     |  IfFalseMBB
2091   //     | /
2092   //    TailMBB
2093   //
2094   // When we find a sequence of selects we attempt to optimize their emission
2095   // by sharing the control flow. Currently we only handle cases where we have
2096   // multiple selects with the exact same condition (same LHS, RHS and CC).
2097   // The selects may be interleaved with other instructions if the other
2098   // instructions meet some requirements we deem safe:
2099   // - They are debug instructions. Otherwise,
2100   // - They do not have side-effects, do not access memory and their inputs do
2101   //   not depend on the results of the select pseudo-instructions.
2102   // The TrueV/FalseV operands of the selects cannot depend on the result of
2103   // previous selects in the sequence.
2104   // These conditions could be further relaxed. See the X86 target for a
2105   // related approach and more information.
2106   Register LHS = MI.getOperand(1).getReg();
2107   Register RHS = MI.getOperand(2).getReg();
2108   auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
2109 
2110   SmallVector<MachineInstr *, 4> SelectDebugValues;
2111   SmallSet<Register, 4> SelectDests;
2112   SelectDests.insert(MI.getOperand(0).getReg());
2113 
2114   MachineInstr *LastSelectPseudo = &MI;
2115 
2116   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
2117        SequenceMBBI != E; ++SequenceMBBI) {
2118     if (SequenceMBBI->isDebugInstr())
2119       continue;
2120     else if (isSelectPseudo(*SequenceMBBI)) {
2121       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
2122           SequenceMBBI->getOperand(2).getReg() != RHS ||
2123           SequenceMBBI->getOperand(3).getImm() != CC ||
2124           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
2125           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
2126         break;
2127       LastSelectPseudo = &*SequenceMBBI;
2128       SequenceMBBI->collectDebugValues(SelectDebugValues);
2129       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
2130     } else {
2131       if (SequenceMBBI->hasUnmodeledSideEffects() ||
2132           SequenceMBBI->mayLoadOrStore())
2133         break;
2134       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
2135             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
2136           }))
2137         break;
2138     }
2139   }
2140 
2141   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
2142   const BasicBlock *LLVM_BB = BB->getBasicBlock();
2143   DebugLoc DL = MI.getDebugLoc();
2144   MachineFunction::iterator I = ++BB->getIterator();
2145 
2146   MachineBasicBlock *HeadMBB = BB;
2147   MachineFunction *F = BB->getParent();
2148   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
2149   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
2150 
2151   F->insert(I, IfFalseMBB);
2152   F->insert(I, TailMBB);
2153 
2154   // Transfer debug instructions associated with the selects to TailMBB.
2155   for (MachineInstr *DebugInstr : SelectDebugValues) {
2156     TailMBB->push_back(DebugInstr->removeFromParent());
2157   }
2158 
2159   // Move all instructions after the sequence to TailMBB.
2160   TailMBB->splice(TailMBB->end(), HeadMBB,
2161                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
2162   // Update machine-CFG edges by transferring all successors of the current
2163   // block to the new block which will contain the Phi nodes for the selects.
2164   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
2165   // Set the successors for HeadMBB.
2166   HeadMBB->addSuccessor(IfFalseMBB);
2167   HeadMBB->addSuccessor(TailMBB);
2168 
2169   // Insert appropriate branch.
2170   unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
2171 
2172   BuildMI(HeadMBB, DL, TII.get(Opcode))
2173     .addReg(LHS)
2174     .addReg(RHS)
2175     .addMBB(TailMBB);
2176 
2177   // IfFalseMBB just falls through to TailMBB.
2178   IfFalseMBB->addSuccessor(TailMBB);
2179 
2180   // Create PHIs for all of the select pseudo-instructions.
2181   auto SelectMBBI = MI.getIterator();
2182   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
2183   auto InsertionPoint = TailMBB->begin();
2184   while (SelectMBBI != SelectEnd) {
2185     auto Next = std::next(SelectMBBI);
2186     if (isSelectPseudo(*SelectMBBI)) {
2187       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
2188       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
2189               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
2190           .addReg(SelectMBBI->getOperand(4).getReg())
2191           .addMBB(HeadMBB)
2192           .addReg(SelectMBBI->getOperand(5).getReg())
2193           .addMBB(IfFalseMBB);
2194       SelectMBBI->eraseFromParent();
2195     }
2196     SelectMBBI = Next;
2197   }
2198 
2199   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
2200   return TailMBB;
2201 }
2202 
2203 static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB,
2204                                     int VLIndex, unsigned SEWIndex,
2205                                     RISCVVLMUL VLMul, bool WritesElement0) {
2206   MachineFunction &MF = *BB->getParent();
2207   DebugLoc DL = MI.getDebugLoc();
2208   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
2209 
2210   unsigned SEW = MI.getOperand(SEWIndex).getImm();
2211   assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
2212   RISCVVSEW ElementWidth = static_cast<RISCVVSEW>(Log2_32(SEW / 8));
2213 
2214   MachineRegisterInfo &MRI = MF.getRegInfo();
2215 
2216   // VL and VTYPE are alive here.
2217   MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI));
2218 
2219   if (VLIndex >= 0) {
2220     // Set VL (rs1 != X0).
2221     Register DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
2222     MIB.addReg(DestReg, RegState::Define | RegState::Dead)
2223         .addReg(MI.getOperand(VLIndex).getReg());
2224   } else
2225     // With no VL operator in the pseudo, do not modify VL (rd = X0, rs1 = X0).
2226     MIB.addReg(RISCV::X0, RegState::Define | RegState::Dead)
2227         .addReg(RISCV::X0, RegState::Kill);
2228 
2229   // Default to tail agnostic unless the destination is tied to a source. In
2230   // that case the user would have some control over the tail values. The tail
2231   // policy is also ignored on instructions that only update element 0 like
2232   // vmv.s.x or reductions so use agnostic there to match the common case.
2233   // FIXME: This is conservatively correct, but we might want to detect that
2234   // the input is undefined.
2235   bool TailAgnostic = true;
2236   if (MI.isRegTiedToUseOperand(0) && !WritesElement0)
2237     TailAgnostic = false;
2238 
2239   // For simplicity we reuse the vtype representation here.
2240   MIB.addImm(RISCVVType::encodeVTYPE(VLMul, ElementWidth,
2241                                      /*TailAgnostic*/ TailAgnostic,
2242                                      /*MaskAgnostic*/ false));
2243 
2244   // Remove (now) redundant operands from pseudo
2245   MI.getOperand(SEWIndex).setImm(-1);
2246   if (VLIndex >= 0) {
2247     MI.getOperand(VLIndex).setReg(RISCV::NoRegister);
2248     MI.getOperand(VLIndex).setIsKill(false);
2249   }
2250 
2251   return BB;
2252 }
2253 
2254 MachineBasicBlock *
2255 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
2256                                                  MachineBasicBlock *BB) const {
2257   uint64_t TSFlags = MI.getDesc().TSFlags;
2258 
2259   if (TSFlags & RISCVII::HasSEWOpMask) {
2260     unsigned NumOperands = MI.getNumExplicitOperands();
2261     int VLIndex = (TSFlags & RISCVII::HasVLOpMask) ? NumOperands - 2 : -1;
2262     unsigned SEWIndex = NumOperands - 1;
2263     bool WritesElement0 = TSFlags & RISCVII::WritesElement0Mask;
2264 
2265     RISCVVLMUL VLMul = static_cast<RISCVVLMUL>((TSFlags & RISCVII::VLMulMask) >>
2266                                                RISCVII::VLMulShift);
2267     return addVSetVL(MI, BB, VLIndex, SEWIndex, VLMul, WritesElement0);
2268   }
2269 
2270   switch (MI.getOpcode()) {
2271   default:
2272     llvm_unreachable("Unexpected instr type to insert");
2273   case RISCV::ReadCycleWide:
2274     assert(!Subtarget.is64Bit() &&
2275            "ReadCycleWrite is only to be used on riscv32");
2276     return emitReadCycleWidePseudo(MI, BB);
2277   case RISCV::Select_GPR_Using_CC_GPR:
2278   case RISCV::Select_FPR16_Using_CC_GPR:
2279   case RISCV::Select_FPR32_Using_CC_GPR:
2280   case RISCV::Select_FPR64_Using_CC_GPR:
2281     return emitSelectPseudo(MI, BB);
2282   case RISCV::BuildPairF64Pseudo:
2283     return emitBuildPairF64Pseudo(MI, BB);
2284   case RISCV::SplitF64Pseudo:
2285     return emitSplitF64Pseudo(MI, BB);
2286   }
2287 }
2288 
2289 // Calling Convention Implementation.
2290 // The expectations for frontend ABI lowering vary from target to target.
2291 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
2292 // details, but this is a longer term goal. For now, we simply try to keep the
2293 // role of the frontend as simple and well-defined as possible. The rules can
2294 // be summarised as:
2295 // * Never split up large scalar arguments. We handle them here.
2296 // * If a hardfloat calling convention is being used, and the struct may be
2297 // passed in a pair of registers (fp+fp, int+fp), and both registers are
2298 // available, then pass as two separate arguments. If either the GPRs or FPRs
2299 // are exhausted, then pass according to the rule below.
2300 // * If a struct could never be passed in registers or directly in a stack
2301 // slot (as it is larger than 2*XLEN and the floating point rules don't
2302 // apply), then pass it using a pointer with the byval attribute.
2303 // * If a struct is less than 2*XLEN, then coerce to either a two-element
2304 // word-sized array or a 2*XLEN scalar (depending on alignment).
2305 // * The frontend can determine whether a struct is returned by reference or
2306 // not based on its size and fields. If it will be returned by reference, the
2307 // frontend must modify the prototype so a pointer with the sret annotation is
2308 // passed as the first argument. This is not necessary for large scalar
2309 // returns.
2310 // * Struct return values and varargs should be coerced to structs containing
2311 // register-size fields in the same situations they would be for fixed
2312 // arguments.
2313 
2314 static const MCPhysReg ArgGPRs[] = {
2315   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
2316   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
2317 };
2318 static const MCPhysReg ArgFPR16s[] = {
2319   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
2320   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
2321 };
2322 static const MCPhysReg ArgFPR32s[] = {
2323   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
2324   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
2325 };
2326 static const MCPhysReg ArgFPR64s[] = {
2327   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
2328   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
2329 };
2330 // This is an interim calling convention and it may be changed in the future.
2331 static const MCPhysReg ArgVRs[] = {
2332   RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19, RISCV::V20,
2333   RISCV::V21, RISCV::V22, RISCV::V23
2334 };
2335 static const MCPhysReg ArgVRM2s[] = {
2336   RISCV::V16M2, RISCV::V18M2, RISCV::V20M2, RISCV::V22M2
2337 };
2338 static const MCPhysReg ArgVRM4s[] = {RISCV::V16M4, RISCV::V20M4};
2339 static const MCPhysReg ArgVRM8s[] = {RISCV::V16M8};
2340 
2341 // Pass a 2*XLEN argument that has been split into two XLEN values through
2342 // registers or the stack as necessary.
2343 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
2344                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
2345                                 MVT ValVT2, MVT LocVT2,
2346                                 ISD::ArgFlagsTy ArgFlags2) {
2347   unsigned XLenInBytes = XLen / 8;
2348   if (Register Reg = State.AllocateReg(ArgGPRs)) {
2349     // At least one half can be passed via register.
2350     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
2351                                      VA1.getLocVT(), CCValAssign::Full));
2352   } else {
2353     // Both halves must be passed on the stack, with proper alignment.
2354     Align StackAlign =
2355         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
2356     State.addLoc(
2357         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
2358                             State.AllocateStack(XLenInBytes, StackAlign),
2359                             VA1.getLocVT(), CCValAssign::Full));
2360     State.addLoc(CCValAssign::getMem(
2361         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
2362         LocVT2, CCValAssign::Full));
2363     return false;
2364   }
2365 
2366   if (Register Reg = State.AllocateReg(ArgGPRs)) {
2367     // The second half can also be passed via register.
2368     State.addLoc(
2369         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
2370   } else {
2371     // The second half is passed via the stack, without additional alignment.
2372     State.addLoc(CCValAssign::getMem(
2373         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
2374         LocVT2, CCValAssign::Full));
2375   }
2376 
2377   return false;
2378 }
2379 
2380 // Implements the RISC-V calling convention. Returns true upon failure.
2381 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
2382                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
2383                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
2384                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
2385                      Optional<unsigned> FirstMaskArgument) {
2386   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
2387   assert(XLen == 32 || XLen == 64);
2388   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
2389 
2390   // Any return value split in to more than two values can't be returned
2391   // directly.
2392   if (IsRet && ValNo > 1)
2393     return true;
2394 
2395   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
2396   // variadic argument, or if no F16/F32 argument registers are available.
2397   bool UseGPRForF16_F32 = true;
2398   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
2399   // variadic argument, or if no F64 argument registers are available.
2400   bool UseGPRForF64 = true;
2401 
2402   switch (ABI) {
2403   default:
2404     llvm_unreachable("Unexpected ABI");
2405   case RISCVABI::ABI_ILP32:
2406   case RISCVABI::ABI_LP64:
2407     break;
2408   case RISCVABI::ABI_ILP32F:
2409   case RISCVABI::ABI_LP64F:
2410     UseGPRForF16_F32 = !IsFixed;
2411     break;
2412   case RISCVABI::ABI_ILP32D:
2413   case RISCVABI::ABI_LP64D:
2414     UseGPRForF16_F32 = !IsFixed;
2415     UseGPRForF64 = !IsFixed;
2416     break;
2417   }
2418 
2419   // FPR16, FPR32, and FPR64 alias each other.
2420   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
2421     UseGPRForF16_F32 = true;
2422     UseGPRForF64 = true;
2423   }
2424 
2425   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
2426   // similar local variables rather than directly checking against the target
2427   // ABI.
2428 
2429   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
2430     LocVT = XLenVT;
2431     LocInfo = CCValAssign::BCvt;
2432   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
2433     LocVT = MVT::i64;
2434     LocInfo = CCValAssign::BCvt;
2435   }
2436 
2437   // If this is a variadic argument, the RISC-V calling convention requires
2438   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
2439   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
2440   // be used regardless of whether the original argument was split during
2441   // legalisation or not. The argument will not be passed by registers if the
2442   // original type is larger than 2*XLEN, so the register alignment rule does
2443   // not apply.
2444   unsigned TwoXLenInBytes = (2 * XLen) / 8;
2445   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
2446       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
2447     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
2448     // Skip 'odd' register if necessary.
2449     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
2450       State.AllocateReg(ArgGPRs);
2451   }
2452 
2453   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
2454   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
2455       State.getPendingArgFlags();
2456 
2457   assert(PendingLocs.size() == PendingArgFlags.size() &&
2458          "PendingLocs and PendingArgFlags out of sync");
2459 
2460   // Handle passing f64 on RV32D with a soft float ABI or when floating point
2461   // registers are exhausted.
2462   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
2463     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
2464            "Can't lower f64 if it is split");
2465     // Depending on available argument GPRS, f64 may be passed in a pair of
2466     // GPRs, split between a GPR and the stack, or passed completely on the
2467     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
2468     // cases.
2469     Register Reg = State.AllocateReg(ArgGPRs);
2470     LocVT = MVT::i32;
2471     if (!Reg) {
2472       unsigned StackOffset = State.AllocateStack(8, Align(8));
2473       State.addLoc(
2474           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
2475       return false;
2476     }
2477     if (!State.AllocateReg(ArgGPRs))
2478       State.AllocateStack(4, Align(4));
2479     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2480     return false;
2481   }
2482 
2483   // Split arguments might be passed indirectly, so keep track of the pending
2484   // values.
2485   if (ArgFlags.isSplit() || !PendingLocs.empty()) {
2486     LocVT = XLenVT;
2487     LocInfo = CCValAssign::Indirect;
2488     PendingLocs.push_back(
2489         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
2490     PendingArgFlags.push_back(ArgFlags);
2491     if (!ArgFlags.isSplitEnd()) {
2492       return false;
2493     }
2494   }
2495 
2496   // If the split argument only had two elements, it should be passed directly
2497   // in registers or on the stack.
2498   if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) {
2499     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
2500     // Apply the normal calling convention rules to the first half of the
2501     // split argument.
2502     CCValAssign VA = PendingLocs[0];
2503     ISD::ArgFlagsTy AF = PendingArgFlags[0];
2504     PendingLocs.clear();
2505     PendingArgFlags.clear();
2506     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
2507                                ArgFlags);
2508   }
2509 
2510   // Allocate to a register if possible, or else a stack slot.
2511   Register Reg;
2512   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
2513     Reg = State.AllocateReg(ArgFPR16s);
2514   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
2515     Reg = State.AllocateReg(ArgFPR32s);
2516   else if (ValVT == MVT::f64 && !UseGPRForF64)
2517     Reg = State.AllocateReg(ArgFPR64s);
2518   else if (ValVT.isScalableVector()) {
2519     const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
2520     if (RC == &RISCV::VRRegClass) {
2521       // Assign the first mask argument to V0.
2522       // This is an interim calling convention and it may be changed in the
2523       // future.
2524       if (FirstMaskArgument.hasValue() &&
2525           ValNo == FirstMaskArgument.getValue()) {
2526         Reg = State.AllocateReg(RISCV::V0);
2527       } else {
2528         Reg = State.AllocateReg(ArgVRs);
2529       }
2530     } else if (RC == &RISCV::VRM2RegClass) {
2531       Reg = State.AllocateReg(ArgVRM2s);
2532     } else if (RC == &RISCV::VRM4RegClass) {
2533       Reg = State.AllocateReg(ArgVRM4s);
2534     } else if (RC == &RISCV::VRM8RegClass) {
2535       Reg = State.AllocateReg(ArgVRM8s);
2536     } else {
2537       llvm_unreachable("Unhandled class register for ValueType");
2538     }
2539     if (!Reg) {
2540       LocInfo = CCValAssign::Indirect;
2541       // Try using a GPR to pass the address
2542       Reg = State.AllocateReg(ArgGPRs);
2543       LocVT = XLenVT;
2544     }
2545   } else
2546     Reg = State.AllocateReg(ArgGPRs);
2547   unsigned StackOffset =
2548       Reg ? 0 : State.AllocateStack(XLen / 8, Align(XLen / 8));
2549 
2550   // If we reach this point and PendingLocs is non-empty, we must be at the
2551   // end of a split argument that must be passed indirectly.
2552   if (!PendingLocs.empty()) {
2553     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
2554     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
2555 
2556     for (auto &It : PendingLocs) {
2557       if (Reg)
2558         It.convertToReg(Reg);
2559       else
2560         It.convertToMem(StackOffset);
2561       State.addLoc(It);
2562     }
2563     PendingLocs.clear();
2564     PendingArgFlags.clear();
2565     return false;
2566   }
2567 
2568   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
2569           (TLI.getSubtarget().hasStdExtV() && ValVT.isScalableVector())) &&
2570          "Expected an XLenVT or scalable vector types at this stage");
2571 
2572   if (Reg) {
2573     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2574     return false;
2575   }
2576 
2577   // When a floating-point value is passed on the stack, no bit-conversion is
2578   // needed.
2579   if (ValVT.isFloatingPoint()) {
2580     LocVT = ValVT;
2581     LocInfo = CCValAssign::Full;
2582   }
2583   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
2584   return false;
2585 }
2586 
2587 template <typename ArgTy>
2588 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
2589   for (const auto &ArgIdx : enumerate(Args)) {
2590     MVT ArgVT = ArgIdx.value().VT;
2591     if (ArgVT.isScalableVector() &&
2592         ArgVT.getVectorElementType().SimpleTy == MVT::i1)
2593       return ArgIdx.index();
2594   }
2595   return None;
2596 }
2597 
2598 void RISCVTargetLowering::analyzeInputArgs(
2599     MachineFunction &MF, CCState &CCInfo,
2600     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const {
2601   unsigned NumArgs = Ins.size();
2602   FunctionType *FType = MF.getFunction().getFunctionType();
2603 
2604   Optional<unsigned> FirstMaskArgument;
2605   if (Subtarget.hasStdExtV())
2606     FirstMaskArgument = preAssignMask(Ins);
2607 
2608   for (unsigned i = 0; i != NumArgs; ++i) {
2609     MVT ArgVT = Ins[i].VT;
2610     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
2611 
2612     Type *ArgTy = nullptr;
2613     if (IsRet)
2614       ArgTy = FType->getReturnType();
2615     else if (Ins[i].isOrigArg())
2616       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
2617 
2618     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
2619     if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
2620                  ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
2621                  FirstMaskArgument)) {
2622       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
2623                         << EVT(ArgVT).getEVTString() << '\n');
2624       llvm_unreachable(nullptr);
2625     }
2626   }
2627 }
2628 
2629 void RISCVTargetLowering::analyzeOutputArgs(
2630     MachineFunction &MF, CCState &CCInfo,
2631     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
2632     CallLoweringInfo *CLI) const {
2633   unsigned NumArgs = Outs.size();
2634 
2635   Optional<unsigned> FirstMaskArgument;
2636   if (Subtarget.hasStdExtV())
2637     FirstMaskArgument = preAssignMask(Outs);
2638 
2639   for (unsigned i = 0; i != NumArgs; i++) {
2640     MVT ArgVT = Outs[i].VT;
2641     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
2642     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
2643 
2644     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
2645     if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
2646                  ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
2647                  FirstMaskArgument)) {
2648       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
2649                         << EVT(ArgVT).getEVTString() << "\n");
2650       llvm_unreachable(nullptr);
2651     }
2652   }
2653 }
2654 
2655 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
2656 // values.
2657 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
2658                                    const CCValAssign &VA, const SDLoc &DL) {
2659   switch (VA.getLocInfo()) {
2660   default:
2661     llvm_unreachable("Unexpected CCValAssign::LocInfo");
2662   case CCValAssign::Full:
2663     break;
2664   case CCValAssign::BCvt:
2665     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
2666       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
2667     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
2668       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
2669     else
2670       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
2671     break;
2672   }
2673   return Val;
2674 }
2675 
2676 // The caller is responsible for loading the full value if the argument is
2677 // passed with CCValAssign::Indirect.
2678 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
2679                                 const CCValAssign &VA, const SDLoc &DL,
2680                                 const RISCVTargetLowering &TLI) {
2681   MachineFunction &MF = DAG.getMachineFunction();
2682   MachineRegisterInfo &RegInfo = MF.getRegInfo();
2683   EVT LocVT = VA.getLocVT();
2684   SDValue Val;
2685   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
2686   Register VReg = RegInfo.createVirtualRegister(RC);
2687   RegInfo.addLiveIn(VA.getLocReg(), VReg);
2688   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
2689 
2690   if (VA.getLocInfo() == CCValAssign::Indirect)
2691     return Val;
2692 
2693   return convertLocVTToValVT(DAG, Val, VA, DL);
2694 }
2695 
2696 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
2697                                    const CCValAssign &VA, const SDLoc &DL) {
2698   EVT LocVT = VA.getLocVT();
2699 
2700   switch (VA.getLocInfo()) {
2701   default:
2702     llvm_unreachable("Unexpected CCValAssign::LocInfo");
2703   case CCValAssign::Full:
2704     break;
2705   case CCValAssign::BCvt:
2706     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
2707       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
2708     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
2709       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
2710     else
2711       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
2712     break;
2713   }
2714   return Val;
2715 }
2716 
2717 // The caller is responsible for loading the full value if the argument is
2718 // passed with CCValAssign::Indirect.
2719 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
2720                                 const CCValAssign &VA, const SDLoc &DL) {
2721   MachineFunction &MF = DAG.getMachineFunction();
2722   MachineFrameInfo &MFI = MF.getFrameInfo();
2723   EVT LocVT = VA.getLocVT();
2724   EVT ValVT = VA.getValVT();
2725   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
2726   int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
2727                                  VA.getLocMemOffset(), /*Immutable=*/true);
2728   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
2729   SDValue Val;
2730 
2731   ISD::LoadExtType ExtType;
2732   switch (VA.getLocInfo()) {
2733   default:
2734     llvm_unreachable("Unexpected CCValAssign::LocInfo");
2735   case CCValAssign::Full:
2736   case CCValAssign::Indirect:
2737   case CCValAssign::BCvt:
2738     ExtType = ISD::NON_EXTLOAD;
2739     break;
2740   }
2741   Val = DAG.getExtLoad(
2742       ExtType, DL, LocVT, Chain, FIN,
2743       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
2744   return Val;
2745 }
2746 
2747 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
2748                                        const CCValAssign &VA, const SDLoc &DL) {
2749   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
2750          "Unexpected VA");
2751   MachineFunction &MF = DAG.getMachineFunction();
2752   MachineFrameInfo &MFI = MF.getFrameInfo();
2753   MachineRegisterInfo &RegInfo = MF.getRegInfo();
2754 
2755   if (VA.isMemLoc()) {
2756     // f64 is passed on the stack.
2757     int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
2758     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
2759     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
2760                        MachinePointerInfo::getFixedStack(MF, FI));
2761   }
2762 
2763   assert(VA.isRegLoc() && "Expected register VA assignment");
2764 
2765   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
2766   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
2767   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
2768   SDValue Hi;
2769   if (VA.getLocReg() == RISCV::X17) {
2770     // Second half of f64 is passed on the stack.
2771     int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
2772     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
2773     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
2774                      MachinePointerInfo::getFixedStack(MF, FI));
2775   } else {
2776     // Second half of f64 is passed in another GPR.
2777     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
2778     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
2779     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
2780   }
2781   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
2782 }
2783 
2784 // FastCC has less than 1% performance improvement for some particular
2785 // benchmark. But theoretically, it may has benenfit for some cases.
2786 static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
2787                             CCValAssign::LocInfo LocInfo,
2788                             ISD::ArgFlagsTy ArgFlags, CCState &State) {
2789 
2790   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
2791     // X5 and X6 might be used for save-restore libcall.
2792     static const MCPhysReg GPRList[] = {
2793         RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
2794         RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
2795         RISCV::X29, RISCV::X30, RISCV::X31};
2796     if (unsigned Reg = State.AllocateReg(GPRList)) {
2797       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2798       return false;
2799     }
2800   }
2801 
2802   if (LocVT == MVT::f16) {
2803     static const MCPhysReg FPR16List[] = {
2804         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
2805         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
2806         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
2807         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
2808     if (unsigned Reg = State.AllocateReg(FPR16List)) {
2809       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2810       return false;
2811     }
2812   }
2813 
2814   if (LocVT == MVT::f32) {
2815     static const MCPhysReg FPR32List[] = {
2816         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
2817         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
2818         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
2819         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
2820     if (unsigned Reg = State.AllocateReg(FPR32List)) {
2821       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2822       return false;
2823     }
2824   }
2825 
2826   if (LocVT == MVT::f64) {
2827     static const MCPhysReg FPR64List[] = {
2828         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
2829         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
2830         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
2831         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
2832     if (unsigned Reg = State.AllocateReg(FPR64List)) {
2833       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2834       return false;
2835     }
2836   }
2837 
2838   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
2839     unsigned Offset4 = State.AllocateStack(4, Align(4));
2840     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
2841     return false;
2842   }
2843 
2844   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
2845     unsigned Offset5 = State.AllocateStack(8, Align(8));
2846     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
2847     return false;
2848   }
2849 
2850   return true; // CC didn't match.
2851 }
2852 
2853 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
2854                          CCValAssign::LocInfo LocInfo,
2855                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
2856 
2857   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
2858     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
2859     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
2860     static const MCPhysReg GPRList[] = {
2861         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
2862         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
2863     if (unsigned Reg = State.AllocateReg(GPRList)) {
2864       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2865       return false;
2866     }
2867   }
2868 
2869   if (LocVT == MVT::f32) {
2870     // Pass in STG registers: F1, ..., F6
2871     //                        fs0 ... fs5
2872     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
2873                                           RISCV::F18_F, RISCV::F19_F,
2874                                           RISCV::F20_F, RISCV::F21_F};
2875     if (unsigned Reg = State.AllocateReg(FPR32List)) {
2876       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2877       return false;
2878     }
2879   }
2880 
2881   if (LocVT == MVT::f64) {
2882     // Pass in STG registers: D1, ..., D6
2883     //                        fs6 ... fs11
2884     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
2885                                           RISCV::F24_D, RISCV::F25_D,
2886                                           RISCV::F26_D, RISCV::F27_D};
2887     if (unsigned Reg = State.AllocateReg(FPR64List)) {
2888       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2889       return false;
2890     }
2891   }
2892 
2893   report_fatal_error("No registers left in GHC calling convention");
2894   return true;
2895 }
2896 
2897 // Transform physical registers into virtual registers.
2898 SDValue RISCVTargetLowering::LowerFormalArguments(
2899     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
2900     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2901     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
2902 
2903   MachineFunction &MF = DAG.getMachineFunction();
2904 
2905   switch (CallConv) {
2906   default:
2907     report_fatal_error("Unsupported calling convention");
2908   case CallingConv::C:
2909   case CallingConv::Fast:
2910     break;
2911   case CallingConv::GHC:
2912     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
2913         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
2914       report_fatal_error(
2915         "GHC calling convention requires the F and D instruction set extensions");
2916   }
2917 
2918   const Function &Func = MF.getFunction();
2919   if (Func.hasFnAttribute("interrupt")) {
2920     if (!Func.arg_empty())
2921       report_fatal_error(
2922         "Functions with the interrupt attribute cannot have arguments!");
2923 
2924     StringRef Kind =
2925       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
2926 
2927     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
2928       report_fatal_error(
2929         "Function interrupt attribute argument not supported!");
2930   }
2931 
2932   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2933   MVT XLenVT = Subtarget.getXLenVT();
2934   unsigned XLenInBytes = Subtarget.getXLen() / 8;
2935   // Used with vargs to acumulate store chains.
2936   std::vector<SDValue> OutChains;
2937 
2938   // Assign locations to all of the incoming arguments.
2939   SmallVector<CCValAssign, 16> ArgLocs;
2940   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2941 
2942   if (CallConv == CallingConv::Fast)
2943     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_FastCC);
2944   else if (CallConv == CallingConv::GHC)
2945     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
2946   else
2947     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false);
2948 
2949   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2950     CCValAssign &VA = ArgLocs[i];
2951     SDValue ArgValue;
2952     // Passing f64 on RV32D with a soft float ABI must be handled as a special
2953     // case.
2954     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
2955       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
2956     else if (VA.isRegLoc())
2957       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
2958     else
2959       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
2960 
2961     if (VA.getLocInfo() == CCValAssign::Indirect) {
2962       // If the original argument was split and passed by reference (e.g. i128
2963       // on RV32), we need to load all parts of it here (using the same
2964       // address).
2965       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
2966                                    MachinePointerInfo()));
2967       unsigned ArgIndex = Ins[i].OrigArgIndex;
2968       assert(Ins[i].PartOffset == 0);
2969       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
2970         CCValAssign &PartVA = ArgLocs[i + 1];
2971         unsigned PartOffset = Ins[i + 1].PartOffset;
2972         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
2973                                       DAG.getIntPtrConstant(PartOffset, DL));
2974         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
2975                                      MachinePointerInfo()));
2976         ++i;
2977       }
2978       continue;
2979     }
2980     InVals.push_back(ArgValue);
2981   }
2982 
2983   if (IsVarArg) {
2984     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
2985     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
2986     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
2987     MachineFrameInfo &MFI = MF.getFrameInfo();
2988     MachineRegisterInfo &RegInfo = MF.getRegInfo();
2989     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
2990 
2991     // Offset of the first variable argument from stack pointer, and size of
2992     // the vararg save area. For now, the varargs save area is either zero or
2993     // large enough to hold a0-a7.
2994     int VaArgOffset, VarArgsSaveSize;
2995 
2996     // If all registers are allocated, then all varargs must be passed on the
2997     // stack and we don't need to save any argregs.
2998     if (ArgRegs.size() == Idx) {
2999       VaArgOffset = CCInfo.getNextStackOffset();
3000       VarArgsSaveSize = 0;
3001     } else {
3002       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
3003       VaArgOffset = -VarArgsSaveSize;
3004     }
3005 
3006     // Record the frame index of the first variable argument
3007     // which is a value necessary to VASTART.
3008     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
3009     RVFI->setVarArgsFrameIndex(FI);
3010 
3011     // If saving an odd number of registers then create an extra stack slot to
3012     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
3013     // offsets to even-numbered registered remain 2*XLEN-aligned.
3014     if (Idx % 2) {
3015       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
3016       VarArgsSaveSize += XLenInBytes;
3017     }
3018 
3019     // Copy the integer registers that may have been used for passing varargs
3020     // to the vararg save area.
3021     for (unsigned I = Idx; I < ArgRegs.size();
3022          ++I, VaArgOffset += XLenInBytes) {
3023       const Register Reg = RegInfo.createVirtualRegister(RC);
3024       RegInfo.addLiveIn(ArgRegs[I], Reg);
3025       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
3026       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
3027       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3028       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
3029                                    MachinePointerInfo::getFixedStack(MF, FI));
3030       cast<StoreSDNode>(Store.getNode())
3031           ->getMemOperand()
3032           ->setValue((Value *)nullptr);
3033       OutChains.push_back(Store);
3034     }
3035     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
3036   }
3037 
3038   // All stores are grouped in one node to allow the matching between
3039   // the size of Ins and InVals. This only happens for vararg functions.
3040   if (!OutChains.empty()) {
3041     OutChains.push_back(Chain);
3042     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
3043   }
3044 
3045   return Chain;
3046 }
3047 
3048 /// isEligibleForTailCallOptimization - Check whether the call is eligible
3049 /// for tail call optimization.
3050 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
3051 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
3052     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
3053     const SmallVector<CCValAssign, 16> &ArgLocs) const {
3054 
3055   auto &Callee = CLI.Callee;
3056   auto CalleeCC = CLI.CallConv;
3057   auto &Outs = CLI.Outs;
3058   auto &Caller = MF.getFunction();
3059   auto CallerCC = Caller.getCallingConv();
3060 
3061   // Exception-handling functions need a special set of instructions to
3062   // indicate a return to the hardware. Tail-calling another function would
3063   // probably break this.
3064   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
3065   // should be expanded as new function attributes are introduced.
3066   if (Caller.hasFnAttribute("interrupt"))
3067     return false;
3068 
3069   // Do not tail call opt if the stack is used to pass parameters.
3070   if (CCInfo.getNextStackOffset() != 0)
3071     return false;
3072 
3073   // Do not tail call opt if any parameters need to be passed indirectly.
3074   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
3075   // passed indirectly. So the address of the value will be passed in a
3076   // register, or if not available, then the address is put on the stack. In
3077   // order to pass indirectly, space on the stack often needs to be allocated
3078   // in order to store the value. In this case the CCInfo.getNextStackOffset()
3079   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
3080   // are passed CCValAssign::Indirect.
3081   for (auto &VA : ArgLocs)
3082     if (VA.getLocInfo() == CCValAssign::Indirect)
3083       return false;
3084 
3085   // Do not tail call opt if either caller or callee uses struct return
3086   // semantics.
3087   auto IsCallerStructRet = Caller.hasStructRetAttr();
3088   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
3089   if (IsCallerStructRet || IsCalleeStructRet)
3090     return false;
3091 
3092   // Externally-defined functions with weak linkage should not be
3093   // tail-called. The behaviour of branch instructions in this situation (as
3094   // used for tail calls) is implementation-defined, so we cannot rely on the
3095   // linker replacing the tail call with a return.
3096   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
3097     const GlobalValue *GV = G->getGlobal();
3098     if (GV->hasExternalWeakLinkage())
3099       return false;
3100   }
3101 
3102   // The callee has to preserve all registers the caller needs to preserve.
3103   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
3104   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
3105   if (CalleeCC != CallerCC) {
3106     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
3107     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
3108       return false;
3109   }
3110 
3111   // Byval parameters hand the function a pointer directly into the stack area
3112   // we want to reuse during a tail call. Working around this *is* possible
3113   // but less efficient and uglier in LowerCall.
3114   for (auto &Arg : Outs)
3115     if (Arg.Flags.isByVal())
3116       return false;
3117 
3118   return true;
3119 }
3120 
3121 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
3122 // and output parameter nodes.
3123 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
3124                                        SmallVectorImpl<SDValue> &InVals) const {
3125   SelectionDAG &DAG = CLI.DAG;
3126   SDLoc &DL = CLI.DL;
3127   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
3128   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
3129   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
3130   SDValue Chain = CLI.Chain;
3131   SDValue Callee = CLI.Callee;
3132   bool &IsTailCall = CLI.IsTailCall;
3133   CallingConv::ID CallConv = CLI.CallConv;
3134   bool IsVarArg = CLI.IsVarArg;
3135   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3136   MVT XLenVT = Subtarget.getXLenVT();
3137 
3138   MachineFunction &MF = DAG.getMachineFunction();
3139 
3140   // Analyze the operands of the call, assigning locations to each operand.
3141   SmallVector<CCValAssign, 16> ArgLocs;
3142   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
3143 
3144   if (CallConv == CallingConv::Fast)
3145     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_FastCC);
3146   else if (CallConv == CallingConv::GHC)
3147     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
3148   else
3149     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI);
3150 
3151   // Check if it's really possible to do a tail call.
3152   if (IsTailCall)
3153     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
3154 
3155   if (IsTailCall)
3156     ++NumTailCalls;
3157   else if (CLI.CB && CLI.CB->isMustTailCall())
3158     report_fatal_error("failed to perform tail call elimination on a call "
3159                        "site marked musttail");
3160 
3161   // Get a count of how many bytes are to be pushed on the stack.
3162   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
3163 
3164   // Create local copies for byval args
3165   SmallVector<SDValue, 8> ByValArgs;
3166   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
3167     ISD::ArgFlagsTy Flags = Outs[i].Flags;
3168     if (!Flags.isByVal())
3169       continue;
3170 
3171     SDValue Arg = OutVals[i];
3172     unsigned Size = Flags.getByValSize();
3173     Align Alignment = Flags.getNonZeroByValAlign();
3174 
3175     int FI =
3176         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
3177     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3178     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
3179 
3180     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
3181                           /*IsVolatile=*/false,
3182                           /*AlwaysInline=*/false, IsTailCall,
3183                           MachinePointerInfo(), MachinePointerInfo());
3184     ByValArgs.push_back(FIPtr);
3185   }
3186 
3187   if (!IsTailCall)
3188     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
3189 
3190   // Copy argument values to their designated locations.
3191   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
3192   SmallVector<SDValue, 8> MemOpChains;
3193   SDValue StackPtr;
3194   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
3195     CCValAssign &VA = ArgLocs[i];
3196     SDValue ArgValue = OutVals[i];
3197     ISD::ArgFlagsTy Flags = Outs[i].Flags;
3198 
3199     // Handle passing f64 on RV32D with a soft float ABI as a special case.
3200     bool IsF64OnRV32DSoftABI =
3201         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
3202     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
3203       SDValue SplitF64 = DAG.getNode(
3204           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
3205       SDValue Lo = SplitF64.getValue(0);
3206       SDValue Hi = SplitF64.getValue(1);
3207 
3208       Register RegLo = VA.getLocReg();
3209       RegsToPass.push_back(std::make_pair(RegLo, Lo));
3210 
3211       if (RegLo == RISCV::X17) {
3212         // Second half of f64 is passed on the stack.
3213         // Work out the address of the stack slot.
3214         if (!StackPtr.getNode())
3215           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
3216         // Emit the store.
3217         MemOpChains.push_back(
3218             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
3219       } else {
3220         // Second half of f64 is passed in another GPR.
3221         assert(RegLo < RISCV::X31 && "Invalid register pair");
3222         Register RegHigh = RegLo + 1;
3223         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
3224       }
3225       continue;
3226     }
3227 
3228     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
3229     // as any other MemLoc.
3230 
3231     // Promote the value if needed.
3232     // For now, only handle fully promoted and indirect arguments.
3233     if (VA.getLocInfo() == CCValAssign::Indirect) {
3234       // Store the argument in a stack slot and pass its address.
3235       SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT);
3236       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
3237       MemOpChains.push_back(
3238           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
3239                        MachinePointerInfo::getFixedStack(MF, FI)));
3240       // If the original argument was split (e.g. i128), we need
3241       // to store all parts of it here (and pass just one address).
3242       unsigned ArgIndex = Outs[i].OrigArgIndex;
3243       assert(Outs[i].PartOffset == 0);
3244       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
3245         SDValue PartValue = OutVals[i + 1];
3246         unsigned PartOffset = Outs[i + 1].PartOffset;
3247         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
3248                                       DAG.getIntPtrConstant(PartOffset, DL));
3249         MemOpChains.push_back(
3250             DAG.getStore(Chain, DL, PartValue, Address,
3251                          MachinePointerInfo::getFixedStack(MF, FI)));
3252         ++i;
3253       }
3254       ArgValue = SpillSlot;
3255     } else {
3256       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL);
3257     }
3258 
3259     // Use local copy if it is a byval arg.
3260     if (Flags.isByVal())
3261       ArgValue = ByValArgs[j++];
3262 
3263     if (VA.isRegLoc()) {
3264       // Queue up the argument copies and emit them at the end.
3265       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
3266     } else {
3267       assert(VA.isMemLoc() && "Argument not register or memory");
3268       assert(!IsTailCall && "Tail call not allowed if stack is used "
3269                             "for passing parameters");
3270 
3271       // Work out the address of the stack slot.
3272       if (!StackPtr.getNode())
3273         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
3274       SDValue Address =
3275           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
3276                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
3277 
3278       // Emit the store.
3279       MemOpChains.push_back(
3280           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
3281     }
3282   }
3283 
3284   // Join the stores, which are independent of one another.
3285   if (!MemOpChains.empty())
3286     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
3287 
3288   SDValue Glue;
3289 
3290   // Build a sequence of copy-to-reg nodes, chained and glued together.
3291   for (auto &Reg : RegsToPass) {
3292     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
3293     Glue = Chain.getValue(1);
3294   }
3295 
3296   // Validate that none of the argument registers have been marked as
3297   // reserved, if so report an error. Do the same for the return address if this
3298   // is not a tailcall.
3299   validateCCReservedRegs(RegsToPass, MF);
3300   if (!IsTailCall &&
3301       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
3302     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
3303         MF.getFunction(),
3304         "Return address register required, but has been reserved."});
3305 
3306   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
3307   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
3308   // split it and then direct call can be matched by PseudoCALL.
3309   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
3310     const GlobalValue *GV = S->getGlobal();
3311 
3312     unsigned OpFlags = RISCVII::MO_CALL;
3313     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
3314       OpFlags = RISCVII::MO_PLT;
3315 
3316     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
3317   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3318     unsigned OpFlags = RISCVII::MO_CALL;
3319 
3320     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
3321                                                  nullptr))
3322       OpFlags = RISCVII::MO_PLT;
3323 
3324     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
3325   }
3326 
3327   // The first call operand is the chain and the second is the target address.
3328   SmallVector<SDValue, 8> Ops;
3329   Ops.push_back(Chain);
3330   Ops.push_back(Callee);
3331 
3332   // Add argument registers to the end of the list so that they are
3333   // known live into the call.
3334   for (auto &Reg : RegsToPass)
3335     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
3336 
3337   if (!IsTailCall) {
3338     // Add a register mask operand representing the call-preserved registers.
3339     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
3340     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
3341     assert(Mask && "Missing call preserved mask for calling convention");
3342     Ops.push_back(DAG.getRegisterMask(Mask));
3343   }
3344 
3345   // Glue the call to the argument copies, if any.
3346   if (Glue.getNode())
3347     Ops.push_back(Glue);
3348 
3349   // Emit the call.
3350   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3351 
3352   if (IsTailCall) {
3353     MF.getFrameInfo().setHasTailCall();
3354     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
3355   }
3356 
3357   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
3358   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
3359   Glue = Chain.getValue(1);
3360 
3361   // Mark the end of the call, which is glued to the call itself.
3362   Chain = DAG.getCALLSEQ_END(Chain,
3363                              DAG.getConstant(NumBytes, DL, PtrVT, true),
3364                              DAG.getConstant(0, DL, PtrVT, true),
3365                              Glue, DL);
3366   Glue = Chain.getValue(1);
3367 
3368   // Assign locations to each value returned by this call.
3369   SmallVector<CCValAssign, 16> RVLocs;
3370   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
3371   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true);
3372 
3373   // Copy all of the result registers out of their specified physreg.
3374   for (auto &VA : RVLocs) {
3375     // Copy the value out
3376     SDValue RetValue =
3377         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
3378     // Glue the RetValue to the end of the call sequence
3379     Chain = RetValue.getValue(1);
3380     Glue = RetValue.getValue(2);
3381 
3382     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
3383       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
3384       SDValue RetValue2 =
3385           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
3386       Chain = RetValue2.getValue(1);
3387       Glue = RetValue2.getValue(2);
3388       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
3389                              RetValue2);
3390     }
3391 
3392     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL);
3393 
3394     InVals.push_back(RetValue);
3395   }
3396 
3397   return Chain;
3398 }
3399 
3400 bool RISCVTargetLowering::CanLowerReturn(
3401     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
3402     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
3403   SmallVector<CCValAssign, 16> RVLocs;
3404   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
3405 
3406   Optional<unsigned> FirstMaskArgument;
3407   if (Subtarget.hasStdExtV())
3408     FirstMaskArgument = preAssignMask(Outs);
3409 
3410   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
3411     MVT VT = Outs[i].VT;
3412     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
3413     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
3414     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
3415                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
3416                  *this, FirstMaskArgument))
3417       return false;
3418   }
3419   return true;
3420 }
3421 
3422 SDValue
3423 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
3424                                  bool IsVarArg,
3425                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
3426                                  const SmallVectorImpl<SDValue> &OutVals,
3427                                  const SDLoc &DL, SelectionDAG &DAG) const {
3428   const MachineFunction &MF = DAG.getMachineFunction();
3429   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
3430 
3431   // Stores the assignment of the return value to a location.
3432   SmallVector<CCValAssign, 16> RVLocs;
3433 
3434   // Info about the registers and stack slot.
3435   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
3436                  *DAG.getContext());
3437 
3438   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
3439                     nullptr);
3440 
3441   if (CallConv == CallingConv::GHC && !RVLocs.empty())
3442     report_fatal_error("GHC functions return void only");
3443 
3444   SDValue Glue;
3445   SmallVector<SDValue, 4> RetOps(1, Chain);
3446 
3447   // Copy the result values into the output registers.
3448   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
3449     SDValue Val = OutVals[i];
3450     CCValAssign &VA = RVLocs[i];
3451     assert(VA.isRegLoc() && "Can only return in registers!");
3452 
3453     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
3454       // Handle returning f64 on RV32D with a soft float ABI.
3455       assert(VA.isRegLoc() && "Expected return via registers");
3456       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
3457                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
3458       SDValue Lo = SplitF64.getValue(0);
3459       SDValue Hi = SplitF64.getValue(1);
3460       Register RegLo = VA.getLocReg();
3461       assert(RegLo < RISCV::X31 && "Invalid register pair");
3462       Register RegHi = RegLo + 1;
3463 
3464       if (STI.isRegisterReservedByUser(RegLo) ||
3465           STI.isRegisterReservedByUser(RegHi))
3466         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
3467             MF.getFunction(),
3468             "Return value register required, but has been reserved."});
3469 
3470       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
3471       Glue = Chain.getValue(1);
3472       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
3473       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
3474       Glue = Chain.getValue(1);
3475       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
3476     } else {
3477       // Handle a 'normal' return.
3478       Val = convertValVTToLocVT(DAG, Val, VA, DL);
3479       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
3480 
3481       if (STI.isRegisterReservedByUser(VA.getLocReg()))
3482         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
3483             MF.getFunction(),
3484             "Return value register required, but has been reserved."});
3485 
3486       // Guarantee that all emitted copies are stuck together.
3487       Glue = Chain.getValue(1);
3488       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
3489     }
3490   }
3491 
3492   RetOps[0] = Chain; // Update chain.
3493 
3494   // Add the glue node if we have it.
3495   if (Glue.getNode()) {
3496     RetOps.push_back(Glue);
3497   }
3498 
3499   // Interrupt service routines use different return instructions.
3500   const Function &Func = DAG.getMachineFunction().getFunction();
3501   if (Func.hasFnAttribute("interrupt")) {
3502     if (!Func.getReturnType()->isVoidTy())
3503       report_fatal_error(
3504           "Functions with the interrupt attribute must have void return type!");
3505 
3506     MachineFunction &MF = DAG.getMachineFunction();
3507     StringRef Kind =
3508       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
3509 
3510     unsigned RetOpc;
3511     if (Kind == "user")
3512       RetOpc = RISCVISD::URET_FLAG;
3513     else if (Kind == "supervisor")
3514       RetOpc = RISCVISD::SRET_FLAG;
3515     else
3516       RetOpc = RISCVISD::MRET_FLAG;
3517 
3518     return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
3519   }
3520 
3521   return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps);
3522 }
3523 
3524 void RISCVTargetLowering::validateCCReservedRegs(
3525     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
3526     MachineFunction &MF) const {
3527   const Function &F = MF.getFunction();
3528   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
3529 
3530   if (llvm::any_of(Regs, [&STI](auto Reg) {
3531         return STI.isRegisterReservedByUser(Reg.first);
3532       }))
3533     F.getContext().diagnose(DiagnosticInfoUnsupported{
3534         F, "Argument register required, but has been reserved."});
3535 }
3536 
3537 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
3538   return CI->isTailCall();
3539 }
3540 
3541 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
3542 #define NODE_NAME_CASE(NODE)                                                   \
3543   case RISCVISD::NODE:                                                         \
3544     return "RISCVISD::" #NODE;
3545   // clang-format off
3546   switch ((RISCVISD::NodeType)Opcode) {
3547   case RISCVISD::FIRST_NUMBER:
3548     break;
3549   NODE_NAME_CASE(RET_FLAG)
3550   NODE_NAME_CASE(URET_FLAG)
3551   NODE_NAME_CASE(SRET_FLAG)
3552   NODE_NAME_CASE(MRET_FLAG)
3553   NODE_NAME_CASE(CALL)
3554   NODE_NAME_CASE(SELECT_CC)
3555   NODE_NAME_CASE(BuildPairF64)
3556   NODE_NAME_CASE(SplitF64)
3557   NODE_NAME_CASE(TAIL)
3558   NODE_NAME_CASE(SLLW)
3559   NODE_NAME_CASE(SRAW)
3560   NODE_NAME_CASE(SRLW)
3561   NODE_NAME_CASE(DIVW)
3562   NODE_NAME_CASE(DIVUW)
3563   NODE_NAME_CASE(REMUW)
3564   NODE_NAME_CASE(ROLW)
3565   NODE_NAME_CASE(RORW)
3566   NODE_NAME_CASE(FSLW)
3567   NODE_NAME_CASE(FSRW)
3568   NODE_NAME_CASE(FMV_H_X)
3569   NODE_NAME_CASE(FMV_X_ANYEXTH)
3570   NODE_NAME_CASE(FMV_W_X_RV64)
3571   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
3572   NODE_NAME_CASE(READ_CYCLE_WIDE)
3573   NODE_NAME_CASE(GREVI)
3574   NODE_NAME_CASE(GREVIW)
3575   NODE_NAME_CASE(GORCI)
3576   NODE_NAME_CASE(GORCIW)
3577   NODE_NAME_CASE(VMV_X_S)
3578   NODE_NAME_CASE(SPLAT_VECTOR_I64)
3579   NODE_NAME_CASE(READ_VLENB)
3580   }
3581   // clang-format on
3582   return nullptr;
3583 #undef NODE_NAME_CASE
3584 }
3585 
3586 /// getConstraintType - Given a constraint letter, return the type of
3587 /// constraint it is for this target.
3588 RISCVTargetLowering::ConstraintType
3589 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
3590   if (Constraint.size() == 1) {
3591     switch (Constraint[0]) {
3592     default:
3593       break;
3594     case 'f':
3595       return C_RegisterClass;
3596     case 'I':
3597     case 'J':
3598     case 'K':
3599       return C_Immediate;
3600     case 'A':
3601       return C_Memory;
3602     }
3603   }
3604   return TargetLowering::getConstraintType(Constraint);
3605 }
3606 
3607 std::pair<unsigned, const TargetRegisterClass *>
3608 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
3609                                                   StringRef Constraint,
3610                                                   MVT VT) const {
3611   // First, see if this is a constraint that directly corresponds to a
3612   // RISCV register class.
3613   if (Constraint.size() == 1) {
3614     switch (Constraint[0]) {
3615     case 'r':
3616       return std::make_pair(0U, &RISCV::GPRRegClass);
3617     case 'f':
3618       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
3619         return std::make_pair(0U, &RISCV::FPR16RegClass);
3620       if (Subtarget.hasStdExtF() && VT == MVT::f32)
3621         return std::make_pair(0U, &RISCV::FPR32RegClass);
3622       if (Subtarget.hasStdExtD() && VT == MVT::f64)
3623         return std::make_pair(0U, &RISCV::FPR64RegClass);
3624       break;
3625     default:
3626       break;
3627     }
3628   }
3629 
3630   // Clang will correctly decode the usage of register name aliases into their
3631   // official names. However, other frontends like `rustc` do not. This allows
3632   // users of these frontends to use the ABI names for registers in LLVM-style
3633   // register constraints.
3634   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
3635                                .Case("{zero}", RISCV::X0)
3636                                .Case("{ra}", RISCV::X1)
3637                                .Case("{sp}", RISCV::X2)
3638                                .Case("{gp}", RISCV::X3)
3639                                .Case("{tp}", RISCV::X4)
3640                                .Case("{t0}", RISCV::X5)
3641                                .Case("{t1}", RISCV::X6)
3642                                .Case("{t2}", RISCV::X7)
3643                                .Cases("{s0}", "{fp}", RISCV::X8)
3644                                .Case("{s1}", RISCV::X9)
3645                                .Case("{a0}", RISCV::X10)
3646                                .Case("{a1}", RISCV::X11)
3647                                .Case("{a2}", RISCV::X12)
3648                                .Case("{a3}", RISCV::X13)
3649                                .Case("{a4}", RISCV::X14)
3650                                .Case("{a5}", RISCV::X15)
3651                                .Case("{a6}", RISCV::X16)
3652                                .Case("{a7}", RISCV::X17)
3653                                .Case("{s2}", RISCV::X18)
3654                                .Case("{s3}", RISCV::X19)
3655                                .Case("{s4}", RISCV::X20)
3656                                .Case("{s5}", RISCV::X21)
3657                                .Case("{s6}", RISCV::X22)
3658                                .Case("{s7}", RISCV::X23)
3659                                .Case("{s8}", RISCV::X24)
3660                                .Case("{s9}", RISCV::X25)
3661                                .Case("{s10}", RISCV::X26)
3662                                .Case("{s11}", RISCV::X27)
3663                                .Case("{t3}", RISCV::X28)
3664                                .Case("{t4}", RISCV::X29)
3665                                .Case("{t5}", RISCV::X30)
3666                                .Case("{t6}", RISCV::X31)
3667                                .Default(RISCV::NoRegister);
3668   if (XRegFromAlias != RISCV::NoRegister)
3669     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
3670 
3671   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
3672   // TableGen record rather than the AsmName to choose registers for InlineAsm
3673   // constraints, plus we want to match those names to the widest floating point
3674   // register type available, manually select floating point registers here.
3675   //
3676   // The second case is the ABI name of the register, so that frontends can also
3677   // use the ABI names in register constraint lists.
3678   if (Subtarget.hasStdExtF()) {
3679     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
3680                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
3681                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
3682                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
3683                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
3684                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
3685                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
3686                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
3687                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
3688                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
3689                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
3690                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
3691                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
3692                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
3693                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
3694                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
3695                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
3696                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
3697                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
3698                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
3699                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
3700                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
3701                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
3702                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
3703                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
3704                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
3705                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
3706                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
3707                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
3708                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
3709                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
3710                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
3711                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
3712                         .Default(RISCV::NoRegister);
3713     if (FReg != RISCV::NoRegister) {
3714       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
3715       if (Subtarget.hasStdExtD()) {
3716         unsigned RegNo = FReg - RISCV::F0_F;
3717         unsigned DReg = RISCV::F0_D + RegNo;
3718         return std::make_pair(DReg, &RISCV::FPR64RegClass);
3719       }
3720       return std::make_pair(FReg, &RISCV::FPR32RegClass);
3721     }
3722   }
3723 
3724   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3725 }
3726 
3727 unsigned
3728 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
3729   // Currently only support length 1 constraints.
3730   if (ConstraintCode.size() == 1) {
3731     switch (ConstraintCode[0]) {
3732     case 'A':
3733       return InlineAsm::Constraint_A;
3734     default:
3735       break;
3736     }
3737   }
3738 
3739   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
3740 }
3741 
3742 void RISCVTargetLowering::LowerAsmOperandForConstraint(
3743     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
3744     SelectionDAG &DAG) const {
3745   // Currently only support length 1 constraints.
3746   if (Constraint.length() == 1) {
3747     switch (Constraint[0]) {
3748     case 'I':
3749       // Validate & create a 12-bit signed immediate operand.
3750       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
3751         uint64_t CVal = C->getSExtValue();
3752         if (isInt<12>(CVal))
3753           Ops.push_back(
3754               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
3755       }
3756       return;
3757     case 'J':
3758       // Validate & create an integer zero operand.
3759       if (auto *C = dyn_cast<ConstantSDNode>(Op))
3760         if (C->getZExtValue() == 0)
3761           Ops.push_back(
3762               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
3763       return;
3764     case 'K':
3765       // Validate & create a 5-bit unsigned immediate operand.
3766       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
3767         uint64_t CVal = C->getZExtValue();
3768         if (isUInt<5>(CVal))
3769           Ops.push_back(
3770               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
3771       }
3772       return;
3773     default:
3774       break;
3775     }
3776   }
3777   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3778 }
3779 
3780 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
3781                                                    Instruction *Inst,
3782                                                    AtomicOrdering Ord) const {
3783   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
3784     return Builder.CreateFence(Ord);
3785   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
3786     return Builder.CreateFence(AtomicOrdering::Release);
3787   return nullptr;
3788 }
3789 
3790 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
3791                                                     Instruction *Inst,
3792                                                     AtomicOrdering Ord) const {
3793   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
3794     return Builder.CreateFence(AtomicOrdering::Acquire);
3795   return nullptr;
3796 }
3797 
3798 TargetLowering::AtomicExpansionKind
3799 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
3800   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
3801   // point operations can't be used in an lr/sc sequence without breaking the
3802   // forward-progress guarantee.
3803   if (AI->isFloatingPointOperation())
3804     return AtomicExpansionKind::CmpXChg;
3805 
3806   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
3807   if (Size == 8 || Size == 16)
3808     return AtomicExpansionKind::MaskedIntrinsic;
3809   return AtomicExpansionKind::None;
3810 }
3811 
3812 static Intrinsic::ID
3813 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
3814   if (XLen == 32) {
3815     switch (BinOp) {
3816     default:
3817       llvm_unreachable("Unexpected AtomicRMW BinOp");
3818     case AtomicRMWInst::Xchg:
3819       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
3820     case AtomicRMWInst::Add:
3821       return Intrinsic::riscv_masked_atomicrmw_add_i32;
3822     case AtomicRMWInst::Sub:
3823       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
3824     case AtomicRMWInst::Nand:
3825       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
3826     case AtomicRMWInst::Max:
3827       return Intrinsic::riscv_masked_atomicrmw_max_i32;
3828     case AtomicRMWInst::Min:
3829       return Intrinsic::riscv_masked_atomicrmw_min_i32;
3830     case AtomicRMWInst::UMax:
3831       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
3832     case AtomicRMWInst::UMin:
3833       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
3834     }
3835   }
3836 
3837   if (XLen == 64) {
3838     switch (BinOp) {
3839     default:
3840       llvm_unreachable("Unexpected AtomicRMW BinOp");
3841     case AtomicRMWInst::Xchg:
3842       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
3843     case AtomicRMWInst::Add:
3844       return Intrinsic::riscv_masked_atomicrmw_add_i64;
3845     case AtomicRMWInst::Sub:
3846       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
3847     case AtomicRMWInst::Nand:
3848       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
3849     case AtomicRMWInst::Max:
3850       return Intrinsic::riscv_masked_atomicrmw_max_i64;
3851     case AtomicRMWInst::Min:
3852       return Intrinsic::riscv_masked_atomicrmw_min_i64;
3853     case AtomicRMWInst::UMax:
3854       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
3855     case AtomicRMWInst::UMin:
3856       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
3857     }
3858   }
3859 
3860   llvm_unreachable("Unexpected XLen\n");
3861 }
3862 
3863 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
3864     IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
3865     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
3866   unsigned XLen = Subtarget.getXLen();
3867   Value *Ordering =
3868       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
3869   Type *Tys[] = {AlignedAddr->getType()};
3870   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
3871       AI->getModule(),
3872       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
3873 
3874   if (XLen == 64) {
3875     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
3876     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
3877     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
3878   }
3879 
3880   Value *Result;
3881 
3882   // Must pass the shift amount needed to sign extend the loaded value prior
3883   // to performing a signed comparison for min/max. ShiftAmt is the number of
3884   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
3885   // is the number of bits to left+right shift the value in order to
3886   // sign-extend.
3887   if (AI->getOperation() == AtomicRMWInst::Min ||
3888       AI->getOperation() == AtomicRMWInst::Max) {
3889     const DataLayout &DL = AI->getModule()->getDataLayout();
3890     unsigned ValWidth =
3891         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
3892     Value *SextShamt =
3893         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
3894     Result = Builder.CreateCall(LrwOpScwLoop,
3895                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
3896   } else {
3897     Result =
3898         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
3899   }
3900 
3901   if (XLen == 64)
3902     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
3903   return Result;
3904 }
3905 
3906 TargetLowering::AtomicExpansionKind
3907 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
3908     AtomicCmpXchgInst *CI) const {
3909   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
3910   if (Size == 8 || Size == 16)
3911     return AtomicExpansionKind::MaskedIntrinsic;
3912   return AtomicExpansionKind::None;
3913 }
3914 
3915 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
3916     IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
3917     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
3918   unsigned XLen = Subtarget.getXLen();
3919   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
3920   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
3921   if (XLen == 64) {
3922     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
3923     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
3924     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
3925     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
3926   }
3927   Type *Tys[] = {AlignedAddr->getType()};
3928   Function *MaskedCmpXchg =
3929       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
3930   Value *Result = Builder.CreateCall(
3931       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
3932   if (XLen == 64)
3933     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
3934   return Result;
3935 }
3936 
3937 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
3938                                                      EVT VT) const {
3939   VT = VT.getScalarType();
3940 
3941   if (!VT.isSimple())
3942     return false;
3943 
3944   switch (VT.getSimpleVT().SimpleTy) {
3945   case MVT::f16:
3946     return Subtarget.hasStdExtZfh();
3947   case MVT::f32:
3948     return Subtarget.hasStdExtF();
3949   case MVT::f64:
3950     return Subtarget.hasStdExtD();
3951   default:
3952     break;
3953   }
3954 
3955   return false;
3956 }
3957 
3958 Register RISCVTargetLowering::getExceptionPointerRegister(
3959     const Constant *PersonalityFn) const {
3960   return RISCV::X10;
3961 }
3962 
3963 Register RISCVTargetLowering::getExceptionSelectorRegister(
3964     const Constant *PersonalityFn) const {
3965   return RISCV::X11;
3966 }
3967 
3968 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
3969   // Return false to suppress the unnecessary extensions if the LibCall
3970   // arguments or return value is f32 type for LP64 ABI.
3971   RISCVABI::ABI ABI = Subtarget.getTargetABI();
3972   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
3973     return false;
3974 
3975   return true;
3976 }
3977 
3978 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
3979                                                  SDValue C) const {
3980   // Check integral scalar types.
3981   if (VT.isScalarInteger()) {
3982     // Omit the optimization if the sub target has the M extension and the data
3983     // size exceeds XLen.
3984     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
3985       return false;
3986     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
3987       // Break the MUL to a SLLI and an ADD/SUB.
3988       const APInt &Imm = ConstNode->getAPIntValue();
3989       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
3990           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
3991         return true;
3992       // Omit the following optimization if the sub target has the M extension
3993       // and the data size >= XLen.
3994       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
3995         return false;
3996       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
3997       // a pair of LUI/ADDI.
3998       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
3999         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
4000         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
4001             (1 - ImmS).isPowerOf2())
4002         return true;
4003       }
4004     }
4005   }
4006 
4007   return false;
4008 }
4009 
4010 #define GET_REGISTER_MATCHER
4011 #include "RISCVGenAsmMatcher.inc"
4012 
4013 Register
4014 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
4015                                        const MachineFunction &MF) const {
4016   Register Reg = MatchRegisterAltName(RegName);
4017   if (Reg == RISCV::NoRegister)
4018     Reg = MatchRegisterName(RegName);
4019   if (Reg == RISCV::NoRegister)
4020     report_fatal_error(
4021         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
4022   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
4023   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
4024     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
4025                              StringRef(RegName) + "\"."));
4026   return Reg;
4027 }
4028 
4029 namespace llvm {
4030 namespace RISCVVIntrinsicsTable {
4031 
4032 #define GET_RISCVVIntrinsicsTable_IMPL
4033 #include "RISCVGenSearchableTables.inc"
4034 
4035 } // namespace RISCVVIntrinsicsTable
4036 } // namespace llvm
4037