1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "RISCV.h"
16 #include "RISCVMachineFunctionInfo.h"
17 #include "RISCVRegisterInfo.h"
18 #include "RISCVSubtarget.h"
19 #include "RISCVTargetMachine.h"
20 #include "Utils/RISCVMatInt.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/CodeGen/ValueTypes.h"
30 #include "llvm/IR/DiagnosticInfo.h"
31 #include "llvm/IR/DiagnosticPrinter.h"
32 #include "llvm/IR/IntrinsicsRISCV.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/MathExtras.h"
36 #include "llvm/Support/raw_ostream.h"
37 
38 using namespace llvm;
39 
40 #define DEBUG_TYPE "riscv-lower"
41 
42 STATISTIC(NumTailCalls, "Number of tail calls");
43 
44 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
45                                          const RISCVSubtarget &STI)
46     : TargetLowering(TM), Subtarget(STI) {
47 
48   if (Subtarget.isRV32E())
49     report_fatal_error("Codegen not yet implemented for RV32E");
50 
51   RISCVABI::ABI ABI = Subtarget.getTargetABI();
52   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
53 
54   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
55       !Subtarget.hasStdExtF()) {
56     errs() << "Hard-float 'f' ABI can't be used for a target that "
57                 "doesn't support the F instruction set extension (ignoring "
58                           "target-abi)\n";
59     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
60   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
61              !Subtarget.hasStdExtD()) {
62     errs() << "Hard-float 'd' ABI can't be used for a target that "
63               "doesn't support the D instruction set extension (ignoring "
64               "target-abi)\n";
65     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
66   }
67 
68   switch (ABI) {
69   default:
70     report_fatal_error("Don't know how to lower this ABI");
71   case RISCVABI::ABI_ILP32:
72   case RISCVABI::ABI_ILP32F:
73   case RISCVABI::ABI_ILP32D:
74   case RISCVABI::ABI_LP64:
75   case RISCVABI::ABI_LP64F:
76   case RISCVABI::ABI_LP64D:
77     break;
78   }
79 
80   MVT XLenVT = Subtarget.getXLenVT();
81 
82   // Set up the register classes.
83   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
84 
85   if (Subtarget.hasStdExtZfh())
86     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
87   if (Subtarget.hasStdExtF())
88     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
89   if (Subtarget.hasStdExtD())
90     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
91 
92   if (Subtarget.hasStdExtV()) {
93     addRegisterClass(RISCVVMVTs::vbool64_t, &RISCV::VRRegClass);
94     addRegisterClass(RISCVVMVTs::vbool32_t, &RISCV::VRRegClass);
95     addRegisterClass(RISCVVMVTs::vbool16_t, &RISCV::VRRegClass);
96     addRegisterClass(RISCVVMVTs::vbool8_t, &RISCV::VRRegClass);
97     addRegisterClass(RISCVVMVTs::vbool4_t, &RISCV::VRRegClass);
98     addRegisterClass(RISCVVMVTs::vbool2_t, &RISCV::VRRegClass);
99     addRegisterClass(RISCVVMVTs::vbool1_t, &RISCV::VRRegClass);
100 
101     addRegisterClass(RISCVVMVTs::vint8mf8_t, &RISCV::VRRegClass);
102     addRegisterClass(RISCVVMVTs::vint8mf4_t, &RISCV::VRRegClass);
103     addRegisterClass(RISCVVMVTs::vint8mf2_t, &RISCV::VRRegClass);
104     addRegisterClass(RISCVVMVTs::vint8m1_t, &RISCV::VRRegClass);
105     addRegisterClass(RISCVVMVTs::vint8m2_t, &RISCV::VRM2RegClass);
106     addRegisterClass(RISCVVMVTs::vint8m4_t, &RISCV::VRM4RegClass);
107     addRegisterClass(RISCVVMVTs::vint8m8_t, &RISCV::VRM8RegClass);
108 
109     addRegisterClass(RISCVVMVTs::vint16mf4_t, &RISCV::VRRegClass);
110     addRegisterClass(RISCVVMVTs::vint16mf2_t, &RISCV::VRRegClass);
111     addRegisterClass(RISCVVMVTs::vint16m1_t, &RISCV::VRRegClass);
112     addRegisterClass(RISCVVMVTs::vint16m2_t, &RISCV::VRM2RegClass);
113     addRegisterClass(RISCVVMVTs::vint16m4_t, &RISCV::VRM4RegClass);
114     addRegisterClass(RISCVVMVTs::vint16m8_t, &RISCV::VRM8RegClass);
115 
116     addRegisterClass(RISCVVMVTs::vint32mf2_t, &RISCV::VRRegClass);
117     addRegisterClass(RISCVVMVTs::vint32m1_t, &RISCV::VRRegClass);
118     addRegisterClass(RISCVVMVTs::vint32m2_t, &RISCV::VRM2RegClass);
119     addRegisterClass(RISCVVMVTs::vint32m4_t, &RISCV::VRM4RegClass);
120     addRegisterClass(RISCVVMVTs::vint32m8_t, &RISCV::VRM8RegClass);
121 
122     addRegisterClass(RISCVVMVTs::vint64m1_t, &RISCV::VRRegClass);
123     addRegisterClass(RISCVVMVTs::vint64m2_t, &RISCV::VRM2RegClass);
124     addRegisterClass(RISCVVMVTs::vint64m4_t, &RISCV::VRM4RegClass);
125     addRegisterClass(RISCVVMVTs::vint64m8_t, &RISCV::VRM8RegClass);
126 
127     if (Subtarget.hasStdExtZfh()) {
128       addRegisterClass(RISCVVMVTs::vfloat16mf4_t, &RISCV::VRRegClass);
129       addRegisterClass(RISCVVMVTs::vfloat16mf2_t, &RISCV::VRRegClass);
130       addRegisterClass(RISCVVMVTs::vfloat16m1_t, &RISCV::VRRegClass);
131       addRegisterClass(RISCVVMVTs::vfloat16m2_t, &RISCV::VRM2RegClass);
132       addRegisterClass(RISCVVMVTs::vfloat16m4_t, &RISCV::VRM4RegClass);
133       addRegisterClass(RISCVVMVTs::vfloat16m8_t, &RISCV::VRM8RegClass);
134     }
135 
136     if (Subtarget.hasStdExtF()) {
137       addRegisterClass(RISCVVMVTs::vfloat32mf2_t, &RISCV::VRRegClass);
138       addRegisterClass(RISCVVMVTs::vfloat32m1_t, &RISCV::VRRegClass);
139       addRegisterClass(RISCVVMVTs::vfloat32m2_t, &RISCV::VRM2RegClass);
140       addRegisterClass(RISCVVMVTs::vfloat32m4_t, &RISCV::VRM4RegClass);
141       addRegisterClass(RISCVVMVTs::vfloat32m8_t, &RISCV::VRM8RegClass);
142     }
143 
144     if (Subtarget.hasStdExtD()) {
145       addRegisterClass(RISCVVMVTs::vfloat64m1_t, &RISCV::VRRegClass);
146       addRegisterClass(RISCVVMVTs::vfloat64m2_t, &RISCV::VRM2RegClass);
147       addRegisterClass(RISCVVMVTs::vfloat64m4_t, &RISCV::VRM4RegClass);
148       addRegisterClass(RISCVVMVTs::vfloat64m8_t, &RISCV::VRM8RegClass);
149     }
150   }
151 
152   // Compute derived properties from the register classes.
153   computeRegisterProperties(STI.getRegisterInfo());
154 
155   setStackPointerRegisterToSaveRestore(RISCV::X2);
156 
157   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
158     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
159 
160   // TODO: add all necessary setOperationAction calls.
161   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
162 
163   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
164   setOperationAction(ISD::BR_CC, XLenVT, Expand);
165   setOperationAction(ISD::SELECT, XLenVT, Custom);
166   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
167 
168   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
169   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
170 
171   setOperationAction(ISD::VASTART, MVT::Other, Custom);
172   setOperationAction(ISD::VAARG, MVT::Other, Expand);
173   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
174   setOperationAction(ISD::VAEND, MVT::Other, Expand);
175 
176   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
177   if (!Subtarget.hasStdExtZbb()) {
178     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
179     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
180   }
181 
182   if (Subtarget.is64Bit()) {
183     setOperationAction(ISD::ADD, MVT::i32, Custom);
184     setOperationAction(ISD::SUB, MVT::i32, Custom);
185     setOperationAction(ISD::SHL, MVT::i32, Custom);
186     setOperationAction(ISD::SRA, MVT::i32, Custom);
187     setOperationAction(ISD::SRL, MVT::i32, Custom);
188   }
189 
190   if (!Subtarget.hasStdExtM()) {
191     setOperationAction(ISD::MUL, XLenVT, Expand);
192     setOperationAction(ISD::MULHS, XLenVT, Expand);
193     setOperationAction(ISD::MULHU, XLenVT, Expand);
194     setOperationAction(ISD::SDIV, XLenVT, Expand);
195     setOperationAction(ISD::UDIV, XLenVT, Expand);
196     setOperationAction(ISD::SREM, XLenVT, Expand);
197     setOperationAction(ISD::UREM, XLenVT, Expand);
198   }
199 
200   if (Subtarget.is64Bit() && Subtarget.hasStdExtM()) {
201     setOperationAction(ISD::MUL, MVT::i32, Custom);
202     setOperationAction(ISD::SDIV, MVT::i32, Custom);
203     setOperationAction(ISD::UDIV, MVT::i32, Custom);
204     setOperationAction(ISD::UREM, MVT::i32, Custom);
205   }
206 
207   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
208   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
209   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
210   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
211 
212   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
213   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
214   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
215 
216   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
217     if (Subtarget.is64Bit()) {
218       setOperationAction(ISD::ROTL, MVT::i32, Custom);
219       setOperationAction(ISD::ROTR, MVT::i32, Custom);
220     }
221   } else {
222     setOperationAction(ISD::ROTL, XLenVT, Expand);
223     setOperationAction(ISD::ROTR, XLenVT, Expand);
224   }
225 
226   if (Subtarget.hasStdExtZbp()) {
227     setOperationAction(ISD::BITREVERSE, XLenVT, Custom);
228     setOperationAction(ISD::BSWAP, XLenVT, Custom);
229 
230     if (Subtarget.is64Bit()) {
231       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
232       setOperationAction(ISD::BSWAP, MVT::i32, Custom);
233     }
234   } else {
235     setOperationAction(ISD::BSWAP, XLenVT, Expand);
236   }
237 
238   if (Subtarget.hasStdExtZbb()) {
239     setOperationAction(ISD::SMIN, XLenVT, Legal);
240     setOperationAction(ISD::SMAX, XLenVT, Legal);
241     setOperationAction(ISD::UMIN, XLenVT, Legal);
242     setOperationAction(ISD::UMAX, XLenVT, Legal);
243   } else {
244     setOperationAction(ISD::CTTZ, XLenVT, Expand);
245     setOperationAction(ISD::CTLZ, XLenVT, Expand);
246     setOperationAction(ISD::CTPOP, XLenVT, Expand);
247   }
248 
249   if (Subtarget.hasStdExtZbt()) {
250     setOperationAction(ISD::FSHL, XLenVT, Legal);
251     setOperationAction(ISD::FSHR, XLenVT, Legal);
252 
253     if (Subtarget.is64Bit()) {
254       setOperationAction(ISD::FSHL, MVT::i32, Custom);
255       setOperationAction(ISD::FSHR, MVT::i32, Custom);
256     }
257   }
258 
259   ISD::CondCode FPCCToExpand[] = {
260       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
261       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
262       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
263 
264   ISD::NodeType FPOpToExpand[] = {
265       ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP,
266       ISD::FP_TO_FP16};
267 
268   if (Subtarget.hasStdExtZfh())
269     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
270 
271   if (Subtarget.hasStdExtZfh()) {
272     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
273     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
274     for (auto CC : FPCCToExpand)
275       setCondCodeAction(CC, MVT::f16, Expand);
276     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
277     setOperationAction(ISD::SELECT, MVT::f16, Custom);
278     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
279     for (auto Op : FPOpToExpand)
280       setOperationAction(Op, MVT::f16, Expand);
281   }
282 
283   if (Subtarget.hasStdExtF()) {
284     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
285     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
286     for (auto CC : FPCCToExpand)
287       setCondCodeAction(CC, MVT::f32, Expand);
288     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
289     setOperationAction(ISD::SELECT, MVT::f32, Custom);
290     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
291     for (auto Op : FPOpToExpand)
292       setOperationAction(Op, MVT::f32, Expand);
293     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
294     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
295   }
296 
297   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
298     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
299 
300   if (Subtarget.hasStdExtD()) {
301     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
302     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
303     for (auto CC : FPCCToExpand)
304       setCondCodeAction(CC, MVT::f64, Expand);
305     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
306     setOperationAction(ISD::SELECT, MVT::f64, Custom);
307     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
308     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
309     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
310     for (auto Op : FPOpToExpand)
311       setOperationAction(Op, MVT::f64, Expand);
312     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
313     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
314   }
315 
316   if (Subtarget.is64Bit()) {
317     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
318     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
319     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
320     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
321   }
322 
323   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
324   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
325   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
326   setOperationAction(ISD::JumpTable, XLenVT, Custom);
327 
328   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
329 
330   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
331   // Unfortunately this can't be determined just from the ISA naming string.
332   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
333                      Subtarget.is64Bit() ? Legal : Custom);
334 
335   setOperationAction(ISD::TRAP, MVT::Other, Legal);
336   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
337   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
338 
339   if (Subtarget.hasStdExtA()) {
340     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
341     setMinCmpXchgSizeInBits(32);
342   } else {
343     setMaxAtomicSizeInBitsSupported(0);
344   }
345 
346   setBooleanContents(ZeroOrOneBooleanContent);
347 
348   if (Subtarget.hasStdExtV()) {
349     setBooleanVectorContents(ZeroOrOneBooleanContent);
350 
351     // RVV intrinsics may have illegal operands.
352     // We also need to custom legalize vmv.x.s.
353     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
354     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
355     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
356     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
357     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
358     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
359 
360     if (Subtarget.is64Bit()) {
361       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
362       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
363     }
364 
365     for (auto VT : MVT::integer_scalable_vector_valuetypes()) {
366       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
367 
368       setOperationAction(ISD::SMIN, VT, Legal);
369       setOperationAction(ISD::SMAX, VT, Legal);
370       setOperationAction(ISD::UMIN, VT, Legal);
371       setOperationAction(ISD::UMAX, VT, Legal);
372     }
373 
374     // We must custom-lower SPLAT_VECTOR vXi64 on RV32
375     if (!Subtarget.is64Bit())
376       setOperationAction(ISD::SPLAT_VECTOR, MVT::i64, Custom);
377   }
378 
379   // Function alignments.
380   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
381   setMinFunctionAlignment(FunctionAlignment);
382   setPrefFunctionAlignment(FunctionAlignment);
383 
384   setMinimumJumpTableEntries(5);
385 
386   // Jumps are expensive, compared to logic
387   setJumpIsExpensive();
388 
389   // We can use any register for comparisons
390   setHasMultipleConditionRegisters();
391 
392   if (Subtarget.hasStdExtZbp()) {
393     setTargetDAGCombine(ISD::OR);
394   }
395 }
396 
397 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
398                                             EVT VT) const {
399   if (!VT.isVector())
400     return getPointerTy(DL);
401   if (Subtarget.hasStdExtV())
402     return MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
403   return VT.changeVectorElementTypeToInteger();
404 }
405 
406 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
407                                              const CallInst &I,
408                                              MachineFunction &MF,
409                                              unsigned Intrinsic) const {
410   switch (Intrinsic) {
411   default:
412     return false;
413   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
414   case Intrinsic::riscv_masked_atomicrmw_add_i32:
415   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
416   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
417   case Intrinsic::riscv_masked_atomicrmw_max_i32:
418   case Intrinsic::riscv_masked_atomicrmw_min_i32:
419   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
420   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
421   case Intrinsic::riscv_masked_cmpxchg_i32:
422     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
423     Info.opc = ISD::INTRINSIC_W_CHAIN;
424     Info.memVT = MVT::getVT(PtrTy->getElementType());
425     Info.ptrVal = I.getArgOperand(0);
426     Info.offset = 0;
427     Info.align = Align(4);
428     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
429                  MachineMemOperand::MOVolatile;
430     return true;
431   }
432 }
433 
434 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
435                                                 const AddrMode &AM, Type *Ty,
436                                                 unsigned AS,
437                                                 Instruction *I) const {
438   // No global is ever allowed as a base.
439   if (AM.BaseGV)
440     return false;
441 
442   // Require a 12-bit signed offset.
443   if (!isInt<12>(AM.BaseOffs))
444     return false;
445 
446   switch (AM.Scale) {
447   case 0: // "r+i" or just "i", depending on HasBaseReg.
448     break;
449   case 1:
450     if (!AM.HasBaseReg) // allow "r+i".
451       break;
452     return false; // disallow "r+r" or "r+r+i".
453   default:
454     return false;
455   }
456 
457   return true;
458 }
459 
460 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
461   return isInt<12>(Imm);
462 }
463 
464 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
465   return isInt<12>(Imm);
466 }
467 
468 // On RV32, 64-bit integers are split into their high and low parts and held
469 // in two different registers, so the trunc is free since the low register can
470 // just be used.
471 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
472   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
473     return false;
474   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
475   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
476   return (SrcBits == 64 && DestBits == 32);
477 }
478 
479 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
480   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
481       !SrcVT.isInteger() || !DstVT.isInteger())
482     return false;
483   unsigned SrcBits = SrcVT.getSizeInBits();
484   unsigned DestBits = DstVT.getSizeInBits();
485   return (SrcBits == 64 && DestBits == 32);
486 }
487 
488 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
489   // Zexts are free if they can be combined with a load.
490   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
491     EVT MemVT = LD->getMemoryVT();
492     if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
493          (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
494         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
495          LD->getExtensionType() == ISD::ZEXTLOAD))
496       return true;
497   }
498 
499   return TargetLowering::isZExtFree(Val, VT2);
500 }
501 
502 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
503   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
504 }
505 
506 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
507   return Subtarget.hasStdExtZbb();
508 }
509 
510 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
511   return Subtarget.hasStdExtZbb();
512 }
513 
514 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
515                                        bool ForCodeSize) const {
516   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
517     return false;
518   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
519     return false;
520   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
521     return false;
522   if (Imm.isNegZero())
523     return false;
524   return Imm.isZero();
525 }
526 
527 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
528   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
529          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
530          (VT == MVT::f64 && Subtarget.hasStdExtD());
531 }
532 
533 // Changes the condition code and swaps operands if necessary, so the SetCC
534 // operation matches one of the comparisons supported directly in the RISC-V
535 // ISA.
536 static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) {
537   switch (CC) {
538   default:
539     break;
540   case ISD::SETGT:
541   case ISD::SETLE:
542   case ISD::SETUGT:
543   case ISD::SETULE:
544     CC = ISD::getSetCCSwappedOperands(CC);
545     std::swap(LHS, RHS);
546     break;
547   }
548 }
549 
550 // Return the RISC-V branch opcode that matches the given DAG integer
551 // condition code. The CondCode must be one of those supported by the RISC-V
552 // ISA (see normaliseSetCC).
553 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
554   switch (CC) {
555   default:
556     llvm_unreachable("Unsupported CondCode");
557   case ISD::SETEQ:
558     return RISCV::BEQ;
559   case ISD::SETNE:
560     return RISCV::BNE;
561   case ISD::SETLT:
562     return RISCV::BLT;
563   case ISD::SETGE:
564     return RISCV::BGE;
565   case ISD::SETULT:
566     return RISCV::BLTU;
567   case ISD::SETUGE:
568     return RISCV::BGEU;
569   }
570 }
571 
572 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
573                                             SelectionDAG &DAG) const {
574   switch (Op.getOpcode()) {
575   default:
576     report_fatal_error("unimplemented operand");
577   case ISD::GlobalAddress:
578     return lowerGlobalAddress(Op, DAG);
579   case ISD::BlockAddress:
580     return lowerBlockAddress(Op, DAG);
581   case ISD::ConstantPool:
582     return lowerConstantPool(Op, DAG);
583   case ISD::JumpTable:
584     return lowerJumpTable(Op, DAG);
585   case ISD::GlobalTLSAddress:
586     return lowerGlobalTLSAddress(Op, DAG);
587   case ISD::SELECT:
588     return lowerSELECT(Op, DAG);
589   case ISD::VASTART:
590     return lowerVASTART(Op, DAG);
591   case ISD::FRAMEADDR:
592     return lowerFRAMEADDR(Op, DAG);
593   case ISD::RETURNADDR:
594     return lowerRETURNADDR(Op, DAG);
595   case ISD::SHL_PARTS:
596     return lowerShiftLeftParts(Op, DAG);
597   case ISD::SRA_PARTS:
598     return lowerShiftRightParts(Op, DAG, true);
599   case ISD::SRL_PARTS:
600     return lowerShiftRightParts(Op, DAG, false);
601   case ISD::BITCAST: {
602     assert(((Subtarget.is64Bit() && Subtarget.hasStdExtF()) ||
603             Subtarget.hasStdExtZfh()) &&
604            "Unexpected custom legalisation");
605     SDLoc DL(Op);
606     SDValue Op0 = Op.getOperand(0);
607     if (Op.getValueType() == MVT::f16 && Subtarget.hasStdExtZfh()) {
608       if (Op0.getValueType() != MVT::i16)
609         return SDValue();
610       SDValue NewOp0 =
611           DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), Op0);
612       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
613       return FPConv;
614     } else if (Op.getValueType() == MVT::f32 && Subtarget.is64Bit() &&
615                Subtarget.hasStdExtF()) {
616       if (Op0.getValueType() != MVT::i32)
617         return SDValue();
618       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
619       SDValue FPConv =
620           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
621       return FPConv;
622     }
623     return SDValue();
624   }
625   case ISD::INTRINSIC_WO_CHAIN:
626     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
627   case ISD::INTRINSIC_W_CHAIN:
628     return LowerINTRINSIC_W_CHAIN(Op, DAG);
629   case ISD::BSWAP:
630   case ISD::BITREVERSE: {
631     // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
632     assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
633     MVT VT = Op.getSimpleValueType();
634     SDLoc DL(Op);
635     // Start with the maximum immediate value which is the bitwidth - 1.
636     unsigned Imm = VT.getSizeInBits() - 1;
637     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
638     if (Op.getOpcode() == ISD::BSWAP)
639       Imm &= ~0x7U;
640     return DAG.getNode(RISCVISD::GREVI, DL, VT, Op.getOperand(0),
641                        DAG.getTargetConstant(Imm, DL, Subtarget.getXLenVT()));
642   }
643   case ISD::SPLAT_VECTOR:
644     return lowerSPLATVECTOR(Op, DAG);
645   }
646 }
647 
648 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
649                              SelectionDAG &DAG, unsigned Flags) {
650   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
651 }
652 
653 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
654                              SelectionDAG &DAG, unsigned Flags) {
655   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
656                                    Flags);
657 }
658 
659 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
660                              SelectionDAG &DAG, unsigned Flags) {
661   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
662                                    N->getOffset(), Flags);
663 }
664 
665 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
666                              SelectionDAG &DAG, unsigned Flags) {
667   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
668 }
669 
670 template <class NodeTy>
671 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
672                                      bool IsLocal) const {
673   SDLoc DL(N);
674   EVT Ty = getPointerTy(DAG.getDataLayout());
675 
676   if (isPositionIndependent()) {
677     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
678     if (IsLocal)
679       // Use PC-relative addressing to access the symbol. This generates the
680       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
681       // %pcrel_lo(auipc)).
682       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
683 
684     // Use PC-relative addressing to access the GOT for this symbol, then load
685     // the address from the GOT. This generates the pattern (PseudoLA sym),
686     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
687     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
688   }
689 
690   switch (getTargetMachine().getCodeModel()) {
691   default:
692     report_fatal_error("Unsupported code model for lowering");
693   case CodeModel::Small: {
694     // Generate a sequence for accessing addresses within the first 2 GiB of
695     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
696     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
697     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
698     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
699     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
700   }
701   case CodeModel::Medium: {
702     // Generate a sequence for accessing addresses within any 2GiB range within
703     // the address space. This generates the pattern (PseudoLLA sym), which
704     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
705     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
706     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
707   }
708   }
709 }
710 
711 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
712                                                 SelectionDAG &DAG) const {
713   SDLoc DL(Op);
714   EVT Ty = Op.getValueType();
715   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
716   int64_t Offset = N->getOffset();
717   MVT XLenVT = Subtarget.getXLenVT();
718 
719   const GlobalValue *GV = N->getGlobal();
720   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
721   SDValue Addr = getAddr(N, DAG, IsLocal);
722 
723   // In order to maximise the opportunity for common subexpression elimination,
724   // emit a separate ADD node for the global address offset instead of folding
725   // it in the global address node. Later peephole optimisations may choose to
726   // fold it back in when profitable.
727   if (Offset != 0)
728     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
729                        DAG.getConstant(Offset, DL, XLenVT));
730   return Addr;
731 }
732 
733 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
734                                                SelectionDAG &DAG) const {
735   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
736 
737   return getAddr(N, DAG);
738 }
739 
740 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
741                                                SelectionDAG &DAG) const {
742   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
743 
744   return getAddr(N, DAG);
745 }
746 
747 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
748                                             SelectionDAG &DAG) const {
749   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
750 
751   return getAddr(N, DAG);
752 }
753 
754 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
755                                               SelectionDAG &DAG,
756                                               bool UseGOT) const {
757   SDLoc DL(N);
758   EVT Ty = getPointerTy(DAG.getDataLayout());
759   const GlobalValue *GV = N->getGlobal();
760   MVT XLenVT = Subtarget.getXLenVT();
761 
762   if (UseGOT) {
763     // Use PC-relative addressing to access the GOT for this TLS symbol, then
764     // load the address from the GOT and add the thread pointer. This generates
765     // the pattern (PseudoLA_TLS_IE sym), which expands to
766     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
767     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
768     SDValue Load =
769         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
770 
771     // Add the thread pointer.
772     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
773     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
774   }
775 
776   // Generate a sequence for accessing the address relative to the thread
777   // pointer, with the appropriate adjustment for the thread pointer offset.
778   // This generates the pattern
779   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
780   SDValue AddrHi =
781       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
782   SDValue AddrAdd =
783       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
784   SDValue AddrLo =
785       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
786 
787   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
788   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
789   SDValue MNAdd = SDValue(
790       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
791       0);
792   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
793 }
794 
795 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
796                                                SelectionDAG &DAG) const {
797   SDLoc DL(N);
798   EVT Ty = getPointerTy(DAG.getDataLayout());
799   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
800   const GlobalValue *GV = N->getGlobal();
801 
802   // Use a PC-relative addressing mode to access the global dynamic GOT address.
803   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
804   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
805   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
806   SDValue Load =
807       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
808 
809   // Prepare argument list to generate call.
810   ArgListTy Args;
811   ArgListEntry Entry;
812   Entry.Node = Load;
813   Entry.Ty = CallTy;
814   Args.push_back(Entry);
815 
816   // Setup call to __tls_get_addr.
817   TargetLowering::CallLoweringInfo CLI(DAG);
818   CLI.setDebugLoc(DL)
819       .setChain(DAG.getEntryNode())
820       .setLibCallee(CallingConv::C, CallTy,
821                     DAG.getExternalSymbol("__tls_get_addr", Ty),
822                     std::move(Args));
823 
824   return LowerCallTo(CLI).first;
825 }
826 
827 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
828                                                    SelectionDAG &DAG) const {
829   SDLoc DL(Op);
830   EVT Ty = Op.getValueType();
831   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
832   int64_t Offset = N->getOffset();
833   MVT XLenVT = Subtarget.getXLenVT();
834 
835   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
836 
837   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
838       CallingConv::GHC)
839     report_fatal_error("In GHC calling convention TLS is not supported");
840 
841   SDValue Addr;
842   switch (Model) {
843   case TLSModel::LocalExec:
844     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
845     break;
846   case TLSModel::InitialExec:
847     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
848     break;
849   case TLSModel::LocalDynamic:
850   case TLSModel::GeneralDynamic:
851     Addr = getDynamicTLSAddr(N, DAG);
852     break;
853   }
854 
855   // In order to maximise the opportunity for common subexpression elimination,
856   // emit a separate ADD node for the global address offset instead of folding
857   // it in the global address node. Later peephole optimisations may choose to
858   // fold it back in when profitable.
859   if (Offset != 0)
860     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
861                        DAG.getConstant(Offset, DL, XLenVT));
862   return Addr;
863 }
864 
865 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
866   SDValue CondV = Op.getOperand(0);
867   SDValue TrueV = Op.getOperand(1);
868   SDValue FalseV = Op.getOperand(2);
869   SDLoc DL(Op);
870   MVT XLenVT = Subtarget.getXLenVT();
871 
872   // If the result type is XLenVT and CondV is the output of a SETCC node
873   // which also operated on XLenVT inputs, then merge the SETCC node into the
874   // lowered RISCVISD::SELECT_CC to take advantage of the integer
875   // compare+branch instructions. i.e.:
876   // (select (setcc lhs, rhs, cc), truev, falsev)
877   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
878   if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC &&
879       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
880     SDValue LHS = CondV.getOperand(0);
881     SDValue RHS = CondV.getOperand(1);
882     auto CC = cast<CondCodeSDNode>(CondV.getOperand(2));
883     ISD::CondCode CCVal = CC->get();
884 
885     normaliseSetCC(LHS, RHS, CCVal);
886 
887     SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT);
888     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
889     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
890   }
891 
892   // Otherwise:
893   // (select condv, truev, falsev)
894   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
895   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
896   SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT);
897 
898   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
899 
900   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
901 }
902 
903 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
904   MachineFunction &MF = DAG.getMachineFunction();
905   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
906 
907   SDLoc DL(Op);
908   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
909                                  getPointerTy(MF.getDataLayout()));
910 
911   // vastart just stores the address of the VarArgsFrameIndex slot into the
912   // memory location argument.
913   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
914   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
915                       MachinePointerInfo(SV));
916 }
917 
918 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
919                                             SelectionDAG &DAG) const {
920   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
921   MachineFunction &MF = DAG.getMachineFunction();
922   MachineFrameInfo &MFI = MF.getFrameInfo();
923   MFI.setFrameAddressIsTaken(true);
924   Register FrameReg = RI.getFrameRegister(MF);
925   int XLenInBytes = Subtarget.getXLen() / 8;
926 
927   EVT VT = Op.getValueType();
928   SDLoc DL(Op);
929   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
930   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
931   while (Depth--) {
932     int Offset = -(XLenInBytes * 2);
933     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
934                               DAG.getIntPtrConstant(Offset, DL));
935     FrameAddr =
936         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
937   }
938   return FrameAddr;
939 }
940 
941 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
942                                              SelectionDAG &DAG) const {
943   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
944   MachineFunction &MF = DAG.getMachineFunction();
945   MachineFrameInfo &MFI = MF.getFrameInfo();
946   MFI.setReturnAddressIsTaken(true);
947   MVT XLenVT = Subtarget.getXLenVT();
948   int XLenInBytes = Subtarget.getXLen() / 8;
949 
950   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
951     return SDValue();
952 
953   EVT VT = Op.getValueType();
954   SDLoc DL(Op);
955   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
956   if (Depth) {
957     int Off = -XLenInBytes;
958     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
959     SDValue Offset = DAG.getConstant(Off, DL, VT);
960     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
961                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
962                        MachinePointerInfo());
963   }
964 
965   // Return the value of the return address register, marking it an implicit
966   // live-in.
967   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
968   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
969 }
970 
971 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
972                                                  SelectionDAG &DAG) const {
973   SDLoc DL(Op);
974   SDValue Lo = Op.getOperand(0);
975   SDValue Hi = Op.getOperand(1);
976   SDValue Shamt = Op.getOperand(2);
977   EVT VT = Lo.getValueType();
978 
979   // if Shamt-XLEN < 0: // Shamt < XLEN
980   //   Lo = Lo << Shamt
981   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
982   // else:
983   //   Lo = 0
984   //   Hi = Lo << (Shamt-XLEN)
985 
986   SDValue Zero = DAG.getConstant(0, DL, VT);
987   SDValue One = DAG.getConstant(1, DL, VT);
988   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
989   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
990   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
991   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
992 
993   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
994   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
995   SDValue ShiftRightLo =
996       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
997   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
998   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
999   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
1000 
1001   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
1002 
1003   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
1004   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
1005 
1006   SDValue Parts[2] = {Lo, Hi};
1007   return DAG.getMergeValues(Parts, DL);
1008 }
1009 
1010 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
1011                                                   bool IsSRA) const {
1012   SDLoc DL(Op);
1013   SDValue Lo = Op.getOperand(0);
1014   SDValue Hi = Op.getOperand(1);
1015   SDValue Shamt = Op.getOperand(2);
1016   EVT VT = Lo.getValueType();
1017 
1018   // SRA expansion:
1019   //   if Shamt-XLEN < 0: // Shamt < XLEN
1020   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
1021   //     Hi = Hi >>s Shamt
1022   //   else:
1023   //     Lo = Hi >>s (Shamt-XLEN);
1024   //     Hi = Hi >>s (XLEN-1)
1025   //
1026   // SRL expansion:
1027   //   if Shamt-XLEN < 0: // Shamt < XLEN
1028   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
1029   //     Hi = Hi >>u Shamt
1030   //   else:
1031   //     Lo = Hi >>u (Shamt-XLEN);
1032   //     Hi = 0;
1033 
1034   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
1035 
1036   SDValue Zero = DAG.getConstant(0, DL, VT);
1037   SDValue One = DAG.getConstant(1, DL, VT);
1038   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
1039   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
1040   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
1041   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
1042 
1043   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
1044   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
1045   SDValue ShiftLeftHi =
1046       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
1047   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
1048   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
1049   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
1050   SDValue HiFalse =
1051       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
1052 
1053   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
1054 
1055   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
1056   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
1057 
1058   SDValue Parts[2] = {Lo, Hi};
1059   return DAG.getMergeValues(Parts, DL);
1060 }
1061 
1062 // Custom-lower a SPLAT_VECTOR where XLEN<SEW, as the SEW element type is
1063 // illegal (currently only vXi64 RV32).
1064 // FIXME: We could also catch non-constant sign-extended i32 values and lower
1065 // them to SPLAT_VECTOR_I64
1066 SDValue RISCVTargetLowering::lowerSPLATVECTOR(SDValue Op,
1067                                               SelectionDAG &DAG) const {
1068   SDLoc DL(Op);
1069   EVT VecVT = Op.getValueType();
1070   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
1071          "Unexpected SPLAT_VECTOR lowering");
1072   SDValue SplatVal = Op.getOperand(0);
1073 
1074   // If we can prove that the value is a sign-extended 32-bit value, lower this
1075   // as a custom node in order to try and match RVV vector/scalar instructions.
1076   if (auto *CVal = dyn_cast<ConstantSDNode>(SplatVal)) {
1077     if (isInt<32>(CVal->getSExtValue()))
1078       return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT,
1079                          DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32));
1080   }
1081 
1082   // Else, on RV32 we lower an i64-element SPLAT_VECTOR thus, being careful not
1083   // to accidentally sign-extend the 32-bit halves to the e64 SEW:
1084   // vmv.v.x vX, hi
1085   // vsll.vx vX, vX, /*32*/
1086   // vmv.v.x vY, lo
1087   // vsll.vx vY, vY, /*32*/
1088   // vsrl.vx vY, vY, /*32*/
1089   // vor.vv vX, vX, vY
1090   SDValue One = DAG.getConstant(1, DL, MVT::i32);
1091   SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
1092   SDValue ThirtyTwoV = DAG.getConstant(32, DL, VecVT);
1093   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, SplatVal, Zero);
1094   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, SplatVal, One);
1095 
1096   Lo = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
1097   Lo = DAG.getNode(ISD::SHL, DL, VecVT, Lo, ThirtyTwoV);
1098   Lo = DAG.getNode(ISD::SRL, DL, VecVT, Lo, ThirtyTwoV);
1099 
1100   if (isNullConstant(Hi))
1101     return Lo;
1102 
1103   Hi = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Hi);
1104   Hi = DAG.getNode(ISD::SHL, DL, VecVT, Hi, ThirtyTwoV);
1105 
1106   return DAG.getNode(ISD::OR, DL, VecVT, Lo, Hi);
1107 }
1108 
1109 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
1110                                                      SelectionDAG &DAG) const {
1111   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1112   SDLoc DL(Op);
1113 
1114   if (Subtarget.hasStdExtV()) {
1115     // Some RVV intrinsics may claim that they want an integer operand to be
1116     // extended.
1117     if (const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
1118             RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo)) {
1119       if (II->ExtendedOperand) {
1120         assert(II->ExtendedOperand < Op.getNumOperands());
1121         SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
1122         SDValue &ScalarOp = Operands[II->ExtendedOperand];
1123         EVT OpVT = ScalarOp.getValueType();
1124         if (OpVT == MVT::i8 || OpVT == MVT::i16 ||
1125             (OpVT == MVT::i32 && Subtarget.is64Bit())) {
1126           // If the operand is a constant, sign extend to increase our chances
1127           // of being able to use a .vi instruction. ANY_EXTEND would become a
1128           // a zero extend and the simm5 check in isel would fail.
1129           // FIXME: Should we ignore the upper bits in isel instead?
1130           unsigned ExtOpc = isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND
1131                                                           : ISD::ANY_EXTEND;
1132           ScalarOp = DAG.getNode(ExtOpc, DL, Subtarget.getXLenVT(), ScalarOp);
1133           return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
1134                              Operands);
1135         }
1136       }
1137     }
1138   }
1139 
1140   switch (IntNo) {
1141   default:
1142     return SDValue();    // Don't custom lower most intrinsics.
1143   case Intrinsic::thread_pointer: {
1144     EVT PtrVT = getPointerTy(DAG.getDataLayout());
1145     return DAG.getRegister(RISCV::X4, PtrVT);
1146   }
1147   case Intrinsic::riscv_vmv_x_s:
1148     assert(Op.getValueType() == Subtarget.getXLenVT() && "Unexpected VT!");
1149     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
1150                        Op.getOperand(1));
1151   }
1152 }
1153 
1154 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
1155                                                     SelectionDAG &DAG) const {
1156   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1157   SDLoc DL(Op);
1158 
1159   if (Subtarget.hasStdExtV()) {
1160     // Some RVV intrinsics may claim that they want an integer operand to be
1161     // extended.
1162     if (const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
1163             RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo)) {
1164       if (II->ExtendedOperand) {
1165         // The operands start from the second argument in INTRINSIC_W_CHAIN.
1166         unsigned ExtendOp = II->ExtendedOperand + 1;
1167         assert(ExtendOp < Op.getNumOperands());
1168         SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
1169         SDValue &ScalarOp = Operands[ExtendOp];
1170         EVT OpVT = ScalarOp.getValueType();
1171         if (OpVT == MVT::i8 || OpVT == MVT::i16 ||
1172             (OpVT == MVT::i32 && Subtarget.is64Bit())) {
1173           // If the operand is a constant, sign extend to increase our chances
1174           // of being able to use a .vi instruction. ANY_EXTEND would become a
1175           // a zero extend and the simm5 check in isel would fail.
1176           // FIXME: Should we ignore the upper bits in isel instead?
1177           unsigned ExtOpc = isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND
1178                                                           : ISD::ANY_EXTEND;
1179           ScalarOp = DAG.getNode(ExtOpc, DL, Subtarget.getXLenVT(), ScalarOp);
1180           return DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, Op->getVTList(),
1181                              Operands);
1182         }
1183       }
1184     }
1185   }
1186 
1187   return SDValue();
1188 }
1189 
1190 // Returns the opcode of the target-specific SDNode that implements the 32-bit
1191 // form of the given Opcode.
1192 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
1193   switch (Opcode) {
1194   default:
1195     llvm_unreachable("Unexpected opcode");
1196   case ISD::SHL:
1197     return RISCVISD::SLLW;
1198   case ISD::SRA:
1199     return RISCVISD::SRAW;
1200   case ISD::SRL:
1201     return RISCVISD::SRLW;
1202   case ISD::SDIV:
1203     return RISCVISD::DIVW;
1204   case ISD::UDIV:
1205     return RISCVISD::DIVUW;
1206   case ISD::UREM:
1207     return RISCVISD::REMUW;
1208   case ISD::ROTL:
1209     return RISCVISD::ROLW;
1210   case ISD::ROTR:
1211     return RISCVISD::RORW;
1212   case RISCVISD::GREVI:
1213     return RISCVISD::GREVIW;
1214   case RISCVISD::GORCI:
1215     return RISCVISD::GORCIW;
1216   }
1217 }
1218 
1219 // Converts the given 32-bit operation to a target-specific SelectionDAG node.
1220 // Because i32 isn't a legal type for RV64, these operations would otherwise
1221 // be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W
1222 // later one because the fact the operation was originally of type i32 is
1223 // lost.
1224 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG) {
1225   SDLoc DL(N);
1226   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
1227   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
1228   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
1229   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
1230   // ReplaceNodeResults requires we maintain the same type for the return value.
1231   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
1232 }
1233 
1234 // Converts the given 32-bit operation to a i64 operation with signed extension
1235 // semantic to reduce the signed extension instructions.
1236 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
1237   SDLoc DL(N);
1238   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
1239   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
1240   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
1241   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
1242                                DAG.getValueType(MVT::i32));
1243   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
1244 }
1245 
1246 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
1247                                              SmallVectorImpl<SDValue> &Results,
1248                                              SelectionDAG &DAG) const {
1249   SDLoc DL(N);
1250   switch (N->getOpcode()) {
1251   default:
1252     llvm_unreachable("Don't know how to custom type legalize this operation!");
1253   case ISD::STRICT_FP_TO_SINT:
1254   case ISD::STRICT_FP_TO_UINT:
1255   case ISD::FP_TO_SINT:
1256   case ISD::FP_TO_UINT: {
1257     bool IsStrict = N->isStrictFPOpcode();
1258     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1259            "Unexpected custom legalisation");
1260     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
1261     // If the FP type needs to be softened, emit a library call using the 'si'
1262     // version. If we left it to default legalization we'd end up with 'di'. If
1263     // the FP type doesn't need to be softened just let generic type
1264     // legalization promote the result type.
1265     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
1266         TargetLowering::TypeSoftenFloat)
1267       return;
1268     RTLIB::Libcall LC;
1269     if (N->getOpcode() == ISD::FP_TO_SINT ||
1270         N->getOpcode() == ISD::STRICT_FP_TO_SINT)
1271       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
1272     else
1273       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
1274     MakeLibCallOptions CallOptions;
1275     EVT OpVT = Op0.getValueType();
1276     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
1277     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
1278     SDValue Result;
1279     std::tie(Result, Chain) =
1280         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
1281     Results.push_back(Result);
1282     if (IsStrict)
1283       Results.push_back(Chain);
1284     break;
1285   }
1286   case ISD::READCYCLECOUNTER: {
1287     assert(!Subtarget.is64Bit() &&
1288            "READCYCLECOUNTER only has custom type legalization on riscv32");
1289 
1290     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
1291     SDValue RCW =
1292         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
1293 
1294     Results.push_back(
1295         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
1296     Results.push_back(RCW.getValue(2));
1297     break;
1298   }
1299   case ISD::ADD:
1300   case ISD::SUB:
1301   case ISD::MUL:
1302     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1303            "Unexpected custom legalisation");
1304     if (N->getOperand(1).getOpcode() == ISD::Constant)
1305       return;
1306     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
1307     break;
1308   case ISD::SHL:
1309   case ISD::SRA:
1310   case ISD::SRL:
1311     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1312            "Unexpected custom legalisation");
1313     if (N->getOperand(1).getOpcode() == ISD::Constant)
1314       return;
1315     Results.push_back(customLegalizeToWOp(N, DAG));
1316     break;
1317   case ISD::ROTL:
1318   case ISD::ROTR:
1319     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1320            "Unexpected custom legalisation");
1321     Results.push_back(customLegalizeToWOp(N, DAG));
1322     break;
1323   case ISD::SDIV:
1324   case ISD::UDIV:
1325   case ISD::UREM:
1326     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1327            Subtarget.hasStdExtM() && "Unexpected custom legalisation");
1328     if (N->getOperand(0).getOpcode() == ISD::Constant ||
1329         N->getOperand(1).getOpcode() == ISD::Constant)
1330       return;
1331     Results.push_back(customLegalizeToWOp(N, DAG));
1332     break;
1333   case ISD::BITCAST: {
1334     assert(((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1335              Subtarget.hasStdExtF()) ||
1336             (N->getValueType(0) == MVT::i16 && Subtarget.hasStdExtZfh())) &&
1337            "Unexpected custom legalisation");
1338     SDValue Op0 = N->getOperand(0);
1339     if (N->getValueType(0) == MVT::i16 && Subtarget.hasStdExtZfh()) {
1340       if (Op0.getValueType() != MVT::f16)
1341         return;
1342       SDValue FPConv =
1343           DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, Subtarget.getXLenVT(), Op0);
1344       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
1345     } else if (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1346                Subtarget.hasStdExtF()) {
1347       if (Op0.getValueType() != MVT::f32)
1348         return;
1349       SDValue FPConv =
1350           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
1351       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
1352     }
1353     break;
1354   }
1355   case RISCVISD::GREVI:
1356   case RISCVISD::GORCI: {
1357     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1358            "Unexpected custom legalisation");
1359     // This is similar to customLegalizeToWOp, except that we pass the second
1360     // operand (a TargetConstant) straight through: it is already of type
1361     // XLenVT.
1362     SDLoc DL(N);
1363     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
1364     SDValue NewOp0 =
1365         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
1366     SDValue NewRes =
1367         DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, N->getOperand(1));
1368     // ReplaceNodeResults requires we maintain the same type for the return
1369     // value.
1370     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
1371     break;
1372   }
1373   case ISD::BSWAP:
1374   case ISD::BITREVERSE: {
1375     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1376            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
1377     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
1378                                  N->getOperand(0));
1379     unsigned Imm = N->getOpcode() == ISD::BITREVERSE ? 31 : 24;
1380     SDValue GREVIW = DAG.getNode(RISCVISD::GREVIW, DL, MVT::i64, NewOp0,
1381                                  DAG.getTargetConstant(Imm, DL,
1382                                                        Subtarget.getXLenVT()));
1383     // ReplaceNodeResults requires we maintain the same type for the return
1384     // value.
1385     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, GREVIW));
1386     break;
1387   }
1388   case ISD::FSHL:
1389   case ISD::FSHR: {
1390     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1391            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
1392     SDValue NewOp0 =
1393         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
1394     SDValue NewOp1 =
1395         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
1396     SDValue NewOp2 =
1397         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
1398     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
1399     // Mask the shift amount to 5 bits.
1400     NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
1401                          DAG.getConstant(0x1f, DL, MVT::i64));
1402     unsigned Opc =
1403         N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW;
1404     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2);
1405     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
1406     break;
1407   }
1408   case ISD::INTRINSIC_WO_CHAIN: {
1409     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
1410     switch (IntNo) {
1411     default:
1412       llvm_unreachable(
1413           "Don't know how to custom type legalize this intrinsic!");
1414     case Intrinsic::riscv_vmv_x_s: {
1415       EVT VT = N->getValueType(0);
1416       assert((VT == MVT::i8 || VT == MVT::i16 ||
1417               (Subtarget.is64Bit() && VT == MVT::i32)) &&
1418              "Unexpected custom legalisation!");
1419       SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
1420                                     Subtarget.getXLenVT(), N->getOperand(1));
1421       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
1422       break;
1423     }
1424     }
1425     break;
1426   }
1427   }
1428 }
1429 
1430 // A structure to hold one of the bit-manipulation patterns below. Together, a
1431 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
1432 //   (or (and (shl x, 1), 0xAAAAAAAA),
1433 //       (and (srl x, 1), 0x55555555))
1434 struct RISCVBitmanipPat {
1435   SDValue Op;
1436   unsigned ShAmt;
1437   bool IsSHL;
1438 
1439   bool formsPairWith(const RISCVBitmanipPat &Other) const {
1440     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
1441   }
1442 };
1443 
1444 // Matches any of the following bit-manipulation patterns:
1445 //   (and (shl x, 1), (0x55555555 << 1))
1446 //   (and (srl x, 1), 0x55555555)
1447 //   (shl (and x, 0x55555555), 1)
1448 //   (srl (and x, (0x55555555 << 1)), 1)
1449 // where the shift amount and mask may vary thus:
1450 //   [1]  = 0x55555555 / 0xAAAAAAAA
1451 //   [2]  = 0x33333333 / 0xCCCCCCCC
1452 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
1453 //   [8]  = 0x00FF00FF / 0xFF00FF00
1454 //   [16] = 0x0000FFFF / 0xFFFFFFFF
1455 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
1456 static Optional<RISCVBitmanipPat> matchRISCVBitmanipPat(SDValue Op) {
1457   Optional<uint64_t> Mask;
1458   // Optionally consume a mask around the shift operation.
1459   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
1460     Mask = Op.getConstantOperandVal(1);
1461     Op = Op.getOperand(0);
1462   }
1463   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
1464     return None;
1465   bool IsSHL = Op.getOpcode() == ISD::SHL;
1466 
1467   if (!isa<ConstantSDNode>(Op.getOperand(1)))
1468     return None;
1469   auto ShAmt = Op.getConstantOperandVal(1);
1470 
1471   if (!isPowerOf2_64(ShAmt))
1472     return None;
1473 
1474   // These are the unshifted masks which we use to match bit-manipulation
1475   // patterns. They may be shifted left in certain circumstances.
1476   static const uint64_t BitmanipMasks[] = {
1477       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
1478       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL,
1479   };
1480 
1481   unsigned MaskIdx = Log2_64(ShAmt);
1482   if (MaskIdx >= array_lengthof(BitmanipMasks))
1483     return None;
1484 
1485   auto Src = Op.getOperand(0);
1486 
1487   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
1488   auto ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
1489 
1490   // The expected mask is shifted left when the AND is found around SHL
1491   // patterns.
1492   //   ((x >> 1) & 0x55555555)
1493   //   ((x << 1) & 0xAAAAAAAA)
1494   bool SHLExpMask = IsSHL;
1495 
1496   if (!Mask) {
1497     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
1498     // the mask is all ones: consume that now.
1499     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
1500       Mask = Src.getConstantOperandVal(1);
1501       Src = Src.getOperand(0);
1502       // The expected mask is now in fact shifted left for SRL, so reverse the
1503       // decision.
1504       //   ((x & 0xAAAAAAAA) >> 1)
1505       //   ((x & 0x55555555) << 1)
1506       SHLExpMask = !SHLExpMask;
1507     } else {
1508       // Use a default shifted mask of all-ones if there's no AND, truncated
1509       // down to the expected width. This simplifies the logic later on.
1510       Mask = maskTrailingOnes<uint64_t>(Width);
1511       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
1512     }
1513   }
1514 
1515   if (SHLExpMask)
1516     ExpMask <<= ShAmt;
1517 
1518   if (Mask != ExpMask)
1519     return None;
1520 
1521   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
1522 }
1523 
1524 // Match the following pattern as a GREVI(W) operation
1525 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
1526 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
1527                                const RISCVSubtarget &Subtarget) {
1528   EVT VT = Op.getValueType();
1529 
1530   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
1531     auto LHS = matchRISCVBitmanipPat(Op.getOperand(0));
1532     auto RHS = matchRISCVBitmanipPat(Op.getOperand(1));
1533     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
1534       SDLoc DL(Op);
1535       return DAG.getNode(
1536           RISCVISD::GREVI, DL, VT, LHS->Op,
1537           DAG.getTargetConstant(LHS->ShAmt, DL, Subtarget.getXLenVT()));
1538     }
1539   }
1540   return SDValue();
1541 }
1542 
1543 // Matches any the following pattern as a GORCI(W) operation
1544 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
1545 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
1546 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
1547 // Note that with the variant of 3.,
1548 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
1549 // the inner pattern will first be matched as GREVI and then the outer
1550 // pattern will be matched to GORC via the first rule above.
1551 // 4.  (or (rotl/rotr x, bitwidth/2), x)
1552 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
1553                                const RISCVSubtarget &Subtarget) {
1554   EVT VT = Op.getValueType();
1555 
1556   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
1557     SDLoc DL(Op);
1558     SDValue Op0 = Op.getOperand(0);
1559     SDValue Op1 = Op.getOperand(1);
1560 
1561     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
1562       if (Reverse.getOpcode() == RISCVISD::GREVI && Reverse.getOperand(0) == X &&
1563           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
1564         return DAG.getNode(RISCVISD::GORCI, DL, VT, X, Reverse.getOperand(1));
1565       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
1566       if ((Reverse.getOpcode() == ISD::ROTL ||
1567            Reverse.getOpcode() == ISD::ROTR) &&
1568           Reverse.getOperand(0) == X &&
1569           isa<ConstantSDNode>(Reverse.getOperand(1))) {
1570         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
1571         if (RotAmt == (VT.getSizeInBits() / 2))
1572           return DAG.getNode(
1573               RISCVISD::GORCI, DL, VT, X,
1574               DAG.getTargetConstant(RotAmt, DL, Subtarget.getXLenVT()));
1575       }
1576       return SDValue();
1577     };
1578 
1579     // Check for either commutable permutation of (or (GREVI x, shamt), x)
1580     if (SDValue V = MatchOROfReverse(Op0, Op1))
1581       return V;
1582     if (SDValue V = MatchOROfReverse(Op1, Op0))
1583       return V;
1584 
1585     // OR is commutable so canonicalize its OR operand to the left
1586     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
1587       std::swap(Op0, Op1);
1588     if (Op0.getOpcode() != ISD::OR)
1589       return SDValue();
1590     SDValue OrOp0 = Op0.getOperand(0);
1591     SDValue OrOp1 = Op0.getOperand(1);
1592     auto LHS = matchRISCVBitmanipPat(OrOp0);
1593     // OR is commutable so swap the operands and try again: x might have been
1594     // on the left
1595     if (!LHS) {
1596       std::swap(OrOp0, OrOp1);
1597       LHS = matchRISCVBitmanipPat(OrOp0);
1598     }
1599     auto RHS = matchRISCVBitmanipPat(Op1);
1600     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
1601       return DAG.getNode(
1602           RISCVISD::GORCI, DL, VT, LHS->Op,
1603           DAG.getTargetConstant(LHS->ShAmt, DL, Subtarget.getXLenVT()));
1604     }
1605   }
1606   return SDValue();
1607 }
1608 
1609 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
1610 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
1611 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
1612 // not undo itself, but they are redundant.
1613 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
1614   unsigned ShAmt1 = N->getConstantOperandVal(1);
1615   SDValue Src = N->getOperand(0);
1616 
1617   if (Src.getOpcode() != N->getOpcode())
1618     return SDValue();
1619 
1620   unsigned ShAmt2 = Src.getConstantOperandVal(1);
1621   Src = Src.getOperand(0);
1622 
1623   unsigned CombinedShAmt;
1624   if (N->getOpcode() == RISCVISD::GORCI || N->getOpcode() == RISCVISD::GORCIW)
1625     CombinedShAmt = ShAmt1 | ShAmt2;
1626   else
1627     CombinedShAmt = ShAmt1 ^ ShAmt2;
1628 
1629   if (CombinedShAmt == 0)
1630     return Src;
1631 
1632   SDLoc DL(N);
1633   return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), Src,
1634                      DAG.getTargetConstant(CombinedShAmt, DL,
1635                                            N->getOperand(1).getValueType()));
1636 }
1637 
1638 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
1639                                                DAGCombinerInfo &DCI) const {
1640   SelectionDAG &DAG = DCI.DAG;
1641 
1642   switch (N->getOpcode()) {
1643   default:
1644     break;
1645   case RISCVISD::SplitF64: {
1646     SDValue Op0 = N->getOperand(0);
1647     // If the input to SplitF64 is just BuildPairF64 then the operation is
1648     // redundant. Instead, use BuildPairF64's operands directly.
1649     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
1650       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
1651 
1652     SDLoc DL(N);
1653 
1654     // It's cheaper to materialise two 32-bit integers than to load a double
1655     // from the constant pool and transfer it to integer registers through the
1656     // stack.
1657     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
1658       APInt V = C->getValueAPF().bitcastToAPInt();
1659       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
1660       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
1661       return DCI.CombineTo(N, Lo, Hi);
1662     }
1663 
1664     // This is a target-specific version of a DAGCombine performed in
1665     // DAGCombiner::visitBITCAST. It performs the equivalent of:
1666     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
1667     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
1668     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
1669         !Op0.getNode()->hasOneUse())
1670       break;
1671     SDValue NewSplitF64 =
1672         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
1673                     Op0.getOperand(0));
1674     SDValue Lo = NewSplitF64.getValue(0);
1675     SDValue Hi = NewSplitF64.getValue(1);
1676     APInt SignBit = APInt::getSignMask(32);
1677     if (Op0.getOpcode() == ISD::FNEG) {
1678       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
1679                                   DAG.getConstant(SignBit, DL, MVT::i32));
1680       return DCI.CombineTo(N, Lo, NewHi);
1681     }
1682     assert(Op0.getOpcode() == ISD::FABS);
1683     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
1684                                 DAG.getConstant(~SignBit, DL, MVT::i32));
1685     return DCI.CombineTo(N, Lo, NewHi);
1686   }
1687   case RISCVISD::SLLW:
1688   case RISCVISD::SRAW:
1689   case RISCVISD::SRLW:
1690   case RISCVISD::ROLW:
1691   case RISCVISD::RORW: {
1692     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
1693     SDValue LHS = N->getOperand(0);
1694     SDValue RHS = N->getOperand(1);
1695     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
1696     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
1697     if (SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI) ||
1698         SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)) {
1699       if (N->getOpcode() != ISD::DELETED_NODE)
1700         DCI.AddToWorklist(N);
1701       return SDValue(N, 0);
1702     }
1703     break;
1704   }
1705   case RISCVISD::FSLW:
1706   case RISCVISD::FSRW: {
1707     // Only the lower 32 bits of Values and lower 6 bits of shift amount are
1708     // read.
1709     SDValue Op0 = N->getOperand(0);
1710     SDValue Op1 = N->getOperand(1);
1711     SDValue ShAmt = N->getOperand(2);
1712     APInt OpMask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
1713     APInt ShAmtMask = APInt::getLowBitsSet(ShAmt.getValueSizeInBits(), 6);
1714     if (SimplifyDemandedBits(Op0, OpMask, DCI) ||
1715         SimplifyDemandedBits(Op1, OpMask, DCI) ||
1716         SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
1717       if (N->getOpcode() != ISD::DELETED_NODE)
1718         DCI.AddToWorklist(N);
1719       return SDValue(N, 0);
1720     }
1721     break;
1722   }
1723   case RISCVISD::GREVIW:
1724   case RISCVISD::GORCIW: {
1725     // Only the lower 32 bits of the first operand are read
1726     SDValue Op0 = N->getOperand(0);
1727     APInt Mask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
1728     if (SimplifyDemandedBits(Op0, Mask, DCI)) {
1729       if (N->getOpcode() != ISD::DELETED_NODE)
1730         DCI.AddToWorklist(N);
1731       return SDValue(N, 0);
1732     }
1733 
1734     return combineGREVI_GORCI(N, DCI.DAG);
1735   }
1736   case RISCVISD::FMV_X_ANYEXTW_RV64: {
1737     SDLoc DL(N);
1738     SDValue Op0 = N->getOperand(0);
1739     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
1740     // conversion is unnecessary and can be replaced with an ANY_EXTEND
1741     // of the FMV_W_X_RV64 operand.
1742     if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) {
1743       assert(Op0.getOperand(0).getValueType() == MVT::i64 &&
1744              "Unexpected value type!");
1745       return Op0.getOperand(0);
1746     }
1747 
1748     // This is a target-specific version of a DAGCombine performed in
1749     // DAGCombiner::visitBITCAST. It performs the equivalent of:
1750     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
1751     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
1752     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
1753         !Op0.getNode()->hasOneUse())
1754       break;
1755     SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64,
1756                                  Op0.getOperand(0));
1757     APInt SignBit = APInt::getSignMask(32).sext(64);
1758     if (Op0.getOpcode() == ISD::FNEG)
1759       return DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV,
1760                          DAG.getConstant(SignBit, DL, MVT::i64));
1761 
1762     assert(Op0.getOpcode() == ISD::FABS);
1763     return DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV,
1764                        DAG.getConstant(~SignBit, DL, MVT::i64));
1765   }
1766   case RISCVISD::GREVI:
1767   case RISCVISD::GORCI:
1768     return combineGREVI_GORCI(N, DCI.DAG);
1769   case ISD::OR:
1770     if (auto GREV = combineORToGREV(SDValue(N, 0), DCI.DAG, Subtarget))
1771       return GREV;
1772     if (auto GORC = combineORToGORC(SDValue(N, 0), DCI.DAG, Subtarget))
1773       return GORC;
1774     break;
1775   }
1776 
1777   return SDValue();
1778 }
1779 
1780 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
1781     const SDNode *N, CombineLevel Level) const {
1782   // The following folds are only desirable if `(OP _, c1 << c2)` can be
1783   // materialised in fewer instructions than `(OP _, c1)`:
1784   //
1785   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
1786   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
1787   SDValue N0 = N->getOperand(0);
1788   EVT Ty = N0.getValueType();
1789   if (Ty.isScalarInteger() &&
1790       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
1791     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
1792     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
1793     if (C1 && C2) {
1794       APInt C1Int = C1->getAPIntValue();
1795       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
1796 
1797       // We can materialise `c1 << c2` into an add immediate, so it's "free",
1798       // and the combine should happen, to potentially allow further combines
1799       // later.
1800       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
1801           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
1802         return true;
1803 
1804       // We can materialise `c1` in an add immediate, so it's "free", and the
1805       // combine should be prevented.
1806       if (C1Int.getMinSignedBits() <= 64 &&
1807           isLegalAddImmediate(C1Int.getSExtValue()))
1808         return false;
1809 
1810       // Neither constant will fit into an immediate, so find materialisation
1811       // costs.
1812       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
1813                                               Subtarget.is64Bit());
1814       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
1815           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit());
1816 
1817       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
1818       // combine should be prevented.
1819       if (C1Cost < ShiftedC1Cost)
1820         return false;
1821     }
1822   }
1823   return true;
1824 }
1825 
1826 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
1827     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
1828     unsigned Depth) const {
1829   switch (Op.getOpcode()) {
1830   default:
1831     break;
1832   case RISCVISD::SLLW:
1833   case RISCVISD::SRAW:
1834   case RISCVISD::SRLW:
1835   case RISCVISD::DIVW:
1836   case RISCVISD::DIVUW:
1837   case RISCVISD::REMUW:
1838   case RISCVISD::ROLW:
1839   case RISCVISD::RORW:
1840   case RISCVISD::GREVIW:
1841   case RISCVISD::GORCIW:
1842   case RISCVISD::FSLW:
1843   case RISCVISD::FSRW:
1844     // TODO: As the result is sign-extended, this is conservatively correct. A
1845     // more precise answer could be calculated for SRAW depending on known
1846     // bits in the shift amount.
1847     return 33;
1848   case RISCVISD::VMV_X_S:
1849     // The number of sign bits of the scalar result is computed by obtaining the
1850     // element type of the input vector operand, substracting its width from the
1851     // XLEN, and then adding one (sign bit within the element type).
1852     return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1;
1853   }
1854 
1855   return 1;
1856 }
1857 
1858 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
1859                                                   MachineBasicBlock *BB) {
1860   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
1861 
1862   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
1863   // Should the count have wrapped while it was being read, we need to try
1864   // again.
1865   // ...
1866   // read:
1867   // rdcycleh x3 # load high word of cycle
1868   // rdcycle  x2 # load low word of cycle
1869   // rdcycleh x4 # load high word of cycle
1870   // bne x3, x4, read # check if high word reads match, otherwise try again
1871   // ...
1872 
1873   MachineFunction &MF = *BB->getParent();
1874   const BasicBlock *LLVM_BB = BB->getBasicBlock();
1875   MachineFunction::iterator It = ++BB->getIterator();
1876 
1877   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
1878   MF.insert(It, LoopMBB);
1879 
1880   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
1881   MF.insert(It, DoneMBB);
1882 
1883   // Transfer the remainder of BB and its successor edges to DoneMBB.
1884   DoneMBB->splice(DoneMBB->begin(), BB,
1885                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
1886   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
1887 
1888   BB->addSuccessor(LoopMBB);
1889 
1890   MachineRegisterInfo &RegInfo = MF.getRegInfo();
1891   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
1892   Register LoReg = MI.getOperand(0).getReg();
1893   Register HiReg = MI.getOperand(1).getReg();
1894   DebugLoc DL = MI.getDebugLoc();
1895 
1896   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
1897   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
1898       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
1899       .addReg(RISCV::X0);
1900   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
1901       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
1902       .addReg(RISCV::X0);
1903   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
1904       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
1905       .addReg(RISCV::X0);
1906 
1907   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
1908       .addReg(HiReg)
1909       .addReg(ReadAgainReg)
1910       .addMBB(LoopMBB);
1911 
1912   LoopMBB->addSuccessor(LoopMBB);
1913   LoopMBB->addSuccessor(DoneMBB);
1914 
1915   MI.eraseFromParent();
1916 
1917   return DoneMBB;
1918 }
1919 
1920 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
1921                                              MachineBasicBlock *BB) {
1922   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
1923 
1924   MachineFunction &MF = *BB->getParent();
1925   DebugLoc DL = MI.getDebugLoc();
1926   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
1927   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
1928   Register LoReg = MI.getOperand(0).getReg();
1929   Register HiReg = MI.getOperand(1).getReg();
1930   Register SrcReg = MI.getOperand(2).getReg();
1931   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
1932   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
1933 
1934   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
1935                           RI);
1936   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
1937   MachineMemOperand *MMOLo =
1938       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
1939   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
1940       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
1941   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
1942       .addFrameIndex(FI)
1943       .addImm(0)
1944       .addMemOperand(MMOLo);
1945   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
1946       .addFrameIndex(FI)
1947       .addImm(4)
1948       .addMemOperand(MMOHi);
1949   MI.eraseFromParent(); // The pseudo instruction is gone now.
1950   return BB;
1951 }
1952 
1953 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
1954                                                  MachineBasicBlock *BB) {
1955   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
1956          "Unexpected instruction");
1957 
1958   MachineFunction &MF = *BB->getParent();
1959   DebugLoc DL = MI.getDebugLoc();
1960   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
1961   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
1962   Register DstReg = MI.getOperand(0).getReg();
1963   Register LoReg = MI.getOperand(1).getReg();
1964   Register HiReg = MI.getOperand(2).getReg();
1965   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
1966   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
1967 
1968   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
1969   MachineMemOperand *MMOLo =
1970       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
1971   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
1972       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
1973   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
1974       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
1975       .addFrameIndex(FI)
1976       .addImm(0)
1977       .addMemOperand(MMOLo);
1978   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
1979       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
1980       .addFrameIndex(FI)
1981       .addImm(4)
1982       .addMemOperand(MMOHi);
1983   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
1984   MI.eraseFromParent(); // The pseudo instruction is gone now.
1985   return BB;
1986 }
1987 
1988 static bool isSelectPseudo(MachineInstr &MI) {
1989   switch (MI.getOpcode()) {
1990   default:
1991     return false;
1992   case RISCV::Select_GPR_Using_CC_GPR:
1993   case RISCV::Select_FPR16_Using_CC_GPR:
1994   case RISCV::Select_FPR32_Using_CC_GPR:
1995   case RISCV::Select_FPR64_Using_CC_GPR:
1996     return true;
1997   }
1998 }
1999 
2000 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
2001                                            MachineBasicBlock *BB) {
2002   // To "insert" Select_* instructions, we actually have to insert the triangle
2003   // control-flow pattern.  The incoming instructions know the destination vreg
2004   // to set, the condition code register to branch on, the true/false values to
2005   // select between, and the condcode to use to select the appropriate branch.
2006   //
2007   // We produce the following control flow:
2008   //     HeadMBB
2009   //     |  \
2010   //     |  IfFalseMBB
2011   //     | /
2012   //    TailMBB
2013   //
2014   // When we find a sequence of selects we attempt to optimize their emission
2015   // by sharing the control flow. Currently we only handle cases where we have
2016   // multiple selects with the exact same condition (same LHS, RHS and CC).
2017   // The selects may be interleaved with other instructions if the other
2018   // instructions meet some requirements we deem safe:
2019   // - They are debug instructions. Otherwise,
2020   // - They do not have side-effects, do not access memory and their inputs do
2021   //   not depend on the results of the select pseudo-instructions.
2022   // The TrueV/FalseV operands of the selects cannot depend on the result of
2023   // previous selects in the sequence.
2024   // These conditions could be further relaxed. See the X86 target for a
2025   // related approach and more information.
2026   Register LHS = MI.getOperand(1).getReg();
2027   Register RHS = MI.getOperand(2).getReg();
2028   auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
2029 
2030   SmallVector<MachineInstr *, 4> SelectDebugValues;
2031   SmallSet<Register, 4> SelectDests;
2032   SelectDests.insert(MI.getOperand(0).getReg());
2033 
2034   MachineInstr *LastSelectPseudo = &MI;
2035 
2036   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
2037        SequenceMBBI != E; ++SequenceMBBI) {
2038     if (SequenceMBBI->isDebugInstr())
2039       continue;
2040     else if (isSelectPseudo(*SequenceMBBI)) {
2041       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
2042           SequenceMBBI->getOperand(2).getReg() != RHS ||
2043           SequenceMBBI->getOperand(3).getImm() != CC ||
2044           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
2045           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
2046         break;
2047       LastSelectPseudo = &*SequenceMBBI;
2048       SequenceMBBI->collectDebugValues(SelectDebugValues);
2049       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
2050     } else {
2051       if (SequenceMBBI->hasUnmodeledSideEffects() ||
2052           SequenceMBBI->mayLoadOrStore())
2053         break;
2054       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
2055             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
2056           }))
2057         break;
2058     }
2059   }
2060 
2061   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
2062   const BasicBlock *LLVM_BB = BB->getBasicBlock();
2063   DebugLoc DL = MI.getDebugLoc();
2064   MachineFunction::iterator I = ++BB->getIterator();
2065 
2066   MachineBasicBlock *HeadMBB = BB;
2067   MachineFunction *F = BB->getParent();
2068   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
2069   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
2070 
2071   F->insert(I, IfFalseMBB);
2072   F->insert(I, TailMBB);
2073 
2074   // Transfer debug instructions associated with the selects to TailMBB.
2075   for (MachineInstr *DebugInstr : SelectDebugValues) {
2076     TailMBB->push_back(DebugInstr->removeFromParent());
2077   }
2078 
2079   // Move all instructions after the sequence to TailMBB.
2080   TailMBB->splice(TailMBB->end(), HeadMBB,
2081                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
2082   // Update machine-CFG edges by transferring all successors of the current
2083   // block to the new block which will contain the Phi nodes for the selects.
2084   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
2085   // Set the successors for HeadMBB.
2086   HeadMBB->addSuccessor(IfFalseMBB);
2087   HeadMBB->addSuccessor(TailMBB);
2088 
2089   // Insert appropriate branch.
2090   unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
2091 
2092   BuildMI(HeadMBB, DL, TII.get(Opcode))
2093     .addReg(LHS)
2094     .addReg(RHS)
2095     .addMBB(TailMBB);
2096 
2097   // IfFalseMBB just falls through to TailMBB.
2098   IfFalseMBB->addSuccessor(TailMBB);
2099 
2100   // Create PHIs for all of the select pseudo-instructions.
2101   auto SelectMBBI = MI.getIterator();
2102   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
2103   auto InsertionPoint = TailMBB->begin();
2104   while (SelectMBBI != SelectEnd) {
2105     auto Next = std::next(SelectMBBI);
2106     if (isSelectPseudo(*SelectMBBI)) {
2107       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
2108       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
2109               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
2110           .addReg(SelectMBBI->getOperand(4).getReg())
2111           .addMBB(HeadMBB)
2112           .addReg(SelectMBBI->getOperand(5).getReg())
2113           .addMBB(IfFalseMBB);
2114       SelectMBBI->eraseFromParent();
2115     }
2116     SelectMBBI = Next;
2117   }
2118 
2119   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
2120   return TailMBB;
2121 }
2122 
2123 static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB,
2124                                     int VLIndex, unsigned SEWIndex,
2125                                     unsigned VLMul, bool WritesElement0) {
2126   MachineFunction &MF = *BB->getParent();
2127   DebugLoc DL = MI.getDebugLoc();
2128   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
2129 
2130   unsigned SEW = MI.getOperand(SEWIndex).getImm();
2131   assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
2132   RISCVVSEW ElementWidth = static_cast<RISCVVSEW>(Log2_32(SEW / 8));
2133 
2134   // LMUL should already be encoded correctly.
2135   RISCVVLMUL Multiplier = static_cast<RISCVVLMUL>(VLMul);
2136 
2137   MachineRegisterInfo &MRI = MF.getRegInfo();
2138 
2139   // VL and VTYPE are alive here.
2140   MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI));
2141 
2142   if (VLIndex >= 0) {
2143     // Set VL (rs1 != X0).
2144     Register DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
2145     MIB.addReg(DestReg, RegState::Define | RegState::Dead)
2146         .addReg(MI.getOperand(VLIndex).getReg());
2147   } else
2148     // With no VL operator in the pseudo, do not modify VL (rd = X0, rs1 = X0).
2149     MIB.addReg(RISCV::X0, RegState::Define | RegState::Dead)
2150         .addReg(RISCV::X0, RegState::Kill);
2151 
2152   // Default to tail agnostic unless the destination is tied to a source. In
2153   // that case the user would have some control over the tail values. The tail
2154   // policy is also ignored on instructions that only update element 0 like
2155   // vmv.s.x or reductions so use agnostic there to match the common case.
2156   // FIXME: This is conservatively correct, but we might want to detect that
2157   // the input is undefined.
2158   bool TailAgnostic = true;
2159   if (MI.isRegTiedToUseOperand(0) && !WritesElement0)
2160     TailAgnostic = false;
2161 
2162   // For simplicity we reuse the vtype representation here.
2163   MIB.addImm(RISCVVType::encodeVTYPE(Multiplier, ElementWidth,
2164                                      /*TailAgnostic*/ TailAgnostic,
2165                                      /*MaskAgnostic*/ false));
2166 
2167   // Remove (now) redundant operands from pseudo
2168   MI.getOperand(SEWIndex).setImm(-1);
2169   if (VLIndex >= 0) {
2170     MI.getOperand(VLIndex).setReg(RISCV::NoRegister);
2171     MI.getOperand(VLIndex).setIsKill(false);
2172   }
2173 
2174   return BB;
2175 }
2176 
2177 MachineBasicBlock *
2178 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
2179                                                  MachineBasicBlock *BB) const {
2180 
2181   if (const RISCVVPseudosTable::PseudoInfo *RVV =
2182           RISCVVPseudosTable::getPseudoInfo(MI.getOpcode())) {
2183     int VLIndex = RVV->getVLIndex();
2184     int SEWIndex = RVV->getSEWIndex();
2185     bool WritesElement0 = RVV->writesElement0();
2186 
2187     assert(SEWIndex >= 0 && "SEWIndex must be >= 0");
2188     return addVSetVL(MI, BB, VLIndex, SEWIndex, RVV->VLMul, WritesElement0);
2189   }
2190 
2191   switch (MI.getOpcode()) {
2192   default:
2193     llvm_unreachable("Unexpected instr type to insert");
2194   case RISCV::ReadCycleWide:
2195     assert(!Subtarget.is64Bit() &&
2196            "ReadCycleWrite is only to be used on riscv32");
2197     return emitReadCycleWidePseudo(MI, BB);
2198   case RISCV::Select_GPR_Using_CC_GPR:
2199   case RISCV::Select_FPR16_Using_CC_GPR:
2200   case RISCV::Select_FPR32_Using_CC_GPR:
2201   case RISCV::Select_FPR64_Using_CC_GPR:
2202     return emitSelectPseudo(MI, BB);
2203   case RISCV::BuildPairF64Pseudo:
2204     return emitBuildPairF64Pseudo(MI, BB);
2205   case RISCV::SplitF64Pseudo:
2206     return emitSplitF64Pseudo(MI, BB);
2207   }
2208 }
2209 
2210 // Calling Convention Implementation.
2211 // The expectations for frontend ABI lowering vary from target to target.
2212 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
2213 // details, but this is a longer term goal. For now, we simply try to keep the
2214 // role of the frontend as simple and well-defined as possible. The rules can
2215 // be summarised as:
2216 // * Never split up large scalar arguments. We handle them here.
2217 // * If a hardfloat calling convention is being used, and the struct may be
2218 // passed in a pair of registers (fp+fp, int+fp), and both registers are
2219 // available, then pass as two separate arguments. If either the GPRs or FPRs
2220 // are exhausted, then pass according to the rule below.
2221 // * If a struct could never be passed in registers or directly in a stack
2222 // slot (as it is larger than 2*XLEN and the floating point rules don't
2223 // apply), then pass it using a pointer with the byval attribute.
2224 // * If a struct is less than 2*XLEN, then coerce to either a two-element
2225 // word-sized array or a 2*XLEN scalar (depending on alignment).
2226 // * The frontend can determine whether a struct is returned by reference or
2227 // not based on its size and fields. If it will be returned by reference, the
2228 // frontend must modify the prototype so a pointer with the sret annotation is
2229 // passed as the first argument. This is not necessary for large scalar
2230 // returns.
2231 // * Struct return values and varargs should be coerced to structs containing
2232 // register-size fields in the same situations they would be for fixed
2233 // arguments.
2234 
2235 static const MCPhysReg ArgGPRs[] = {
2236   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
2237   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
2238 };
2239 static const MCPhysReg ArgFPR16s[] = {
2240   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
2241   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
2242 };
2243 static const MCPhysReg ArgFPR32s[] = {
2244   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
2245   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
2246 };
2247 static const MCPhysReg ArgFPR64s[] = {
2248   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
2249   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
2250 };
2251 // This is an interim calling convention and it may be changed in the future.
2252 static const MCPhysReg ArgVRs[] = {
2253   RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19, RISCV::V20,
2254   RISCV::V21, RISCV::V22, RISCV::V23
2255 };
2256 static const MCPhysReg ArgVRM2s[] = {
2257   RISCV::V16M2, RISCV::V18M2, RISCV::V20M2, RISCV::V22M2
2258 };
2259 static const MCPhysReg ArgVRM4s[] = {RISCV::V16M4, RISCV::V20M4};
2260 static const MCPhysReg ArgVRM8s[] = {RISCV::V16M8};
2261 
2262 // Pass a 2*XLEN argument that has been split into two XLEN values through
2263 // registers or the stack as necessary.
2264 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
2265                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
2266                                 MVT ValVT2, MVT LocVT2,
2267                                 ISD::ArgFlagsTy ArgFlags2) {
2268   unsigned XLenInBytes = XLen / 8;
2269   if (Register Reg = State.AllocateReg(ArgGPRs)) {
2270     // At least one half can be passed via register.
2271     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
2272                                      VA1.getLocVT(), CCValAssign::Full));
2273   } else {
2274     // Both halves must be passed on the stack, with proper alignment.
2275     Align StackAlign =
2276         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
2277     State.addLoc(
2278         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
2279                             State.AllocateStack(XLenInBytes, StackAlign),
2280                             VA1.getLocVT(), CCValAssign::Full));
2281     State.addLoc(CCValAssign::getMem(
2282         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
2283         LocVT2, CCValAssign::Full));
2284     return false;
2285   }
2286 
2287   if (Register Reg = State.AllocateReg(ArgGPRs)) {
2288     // The second half can also be passed via register.
2289     State.addLoc(
2290         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
2291   } else {
2292     // The second half is passed via the stack, without additional alignment.
2293     State.addLoc(CCValAssign::getMem(
2294         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
2295         LocVT2, CCValAssign::Full));
2296   }
2297 
2298   return false;
2299 }
2300 
2301 // Implements the RISC-V calling convention. Returns true upon failure.
2302 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
2303                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
2304                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
2305                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
2306                      Optional<unsigned> FirstMaskArgument) {
2307   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
2308   assert(XLen == 32 || XLen == 64);
2309   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
2310 
2311   // Any return value split in to more than two values can't be returned
2312   // directly.
2313   if (IsRet && ValNo > 1)
2314     return true;
2315 
2316   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
2317   // variadic argument, or if no F16/F32 argument registers are available.
2318   bool UseGPRForF16_F32 = true;
2319   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
2320   // variadic argument, or if no F64 argument registers are available.
2321   bool UseGPRForF64 = true;
2322 
2323   switch (ABI) {
2324   default:
2325     llvm_unreachable("Unexpected ABI");
2326   case RISCVABI::ABI_ILP32:
2327   case RISCVABI::ABI_LP64:
2328     break;
2329   case RISCVABI::ABI_ILP32F:
2330   case RISCVABI::ABI_LP64F:
2331     UseGPRForF16_F32 = !IsFixed;
2332     break;
2333   case RISCVABI::ABI_ILP32D:
2334   case RISCVABI::ABI_LP64D:
2335     UseGPRForF16_F32 = !IsFixed;
2336     UseGPRForF64 = !IsFixed;
2337     break;
2338   }
2339 
2340   // FPR16, FPR32, and FPR64 alias each other.
2341   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
2342     UseGPRForF16_F32 = true;
2343     UseGPRForF64 = true;
2344   }
2345 
2346   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
2347   // similar local variables rather than directly checking against the target
2348   // ABI.
2349 
2350   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
2351     LocVT = XLenVT;
2352     LocInfo = CCValAssign::BCvt;
2353   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
2354     LocVT = MVT::i64;
2355     LocInfo = CCValAssign::BCvt;
2356   }
2357 
2358   // If this is a variadic argument, the RISC-V calling convention requires
2359   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
2360   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
2361   // be used regardless of whether the original argument was split during
2362   // legalisation or not. The argument will not be passed by registers if the
2363   // original type is larger than 2*XLEN, so the register alignment rule does
2364   // not apply.
2365   unsigned TwoXLenInBytes = (2 * XLen) / 8;
2366   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
2367       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
2368     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
2369     // Skip 'odd' register if necessary.
2370     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
2371       State.AllocateReg(ArgGPRs);
2372   }
2373 
2374   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
2375   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
2376       State.getPendingArgFlags();
2377 
2378   assert(PendingLocs.size() == PendingArgFlags.size() &&
2379          "PendingLocs and PendingArgFlags out of sync");
2380 
2381   // Handle passing f64 on RV32D with a soft float ABI or when floating point
2382   // registers are exhausted.
2383   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
2384     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
2385            "Can't lower f64 if it is split");
2386     // Depending on available argument GPRS, f64 may be passed in a pair of
2387     // GPRs, split between a GPR and the stack, or passed completely on the
2388     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
2389     // cases.
2390     Register Reg = State.AllocateReg(ArgGPRs);
2391     LocVT = MVT::i32;
2392     if (!Reg) {
2393       unsigned StackOffset = State.AllocateStack(8, Align(8));
2394       State.addLoc(
2395           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
2396       return false;
2397     }
2398     if (!State.AllocateReg(ArgGPRs))
2399       State.AllocateStack(4, Align(4));
2400     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2401     return false;
2402   }
2403 
2404   // Split arguments might be passed indirectly, so keep track of the pending
2405   // values.
2406   if (ArgFlags.isSplit() || !PendingLocs.empty()) {
2407     LocVT = XLenVT;
2408     LocInfo = CCValAssign::Indirect;
2409     PendingLocs.push_back(
2410         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
2411     PendingArgFlags.push_back(ArgFlags);
2412     if (!ArgFlags.isSplitEnd()) {
2413       return false;
2414     }
2415   }
2416 
2417   // If the split argument only had two elements, it should be passed directly
2418   // in registers or on the stack.
2419   if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) {
2420     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
2421     // Apply the normal calling convention rules to the first half of the
2422     // split argument.
2423     CCValAssign VA = PendingLocs[0];
2424     ISD::ArgFlagsTy AF = PendingArgFlags[0];
2425     PendingLocs.clear();
2426     PendingArgFlags.clear();
2427     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
2428                                ArgFlags);
2429   }
2430 
2431   // Allocate to a register if possible, or else a stack slot.
2432   Register Reg;
2433   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
2434     Reg = State.AllocateReg(ArgFPR16s);
2435   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
2436     Reg = State.AllocateReg(ArgFPR32s);
2437   else if (ValVT == MVT::f64 && !UseGPRForF64)
2438     Reg = State.AllocateReg(ArgFPR64s);
2439   else if (ValVT.isScalableVector()) {
2440     const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
2441     if (RC == &RISCV::VRRegClass) {
2442       // Assign the first mask argument to V0.
2443       // This is an interim calling convention and it may be changed in the
2444       // future.
2445       if (FirstMaskArgument.hasValue() &&
2446           ValNo == FirstMaskArgument.getValue()) {
2447         Reg = State.AllocateReg(RISCV::V0);
2448       } else {
2449         Reg = State.AllocateReg(ArgVRs);
2450       }
2451     } else if (RC == &RISCV::VRM2RegClass) {
2452       Reg = State.AllocateReg(ArgVRM2s);
2453     } else if (RC == &RISCV::VRM4RegClass) {
2454       Reg = State.AllocateReg(ArgVRM4s);
2455     } else if (RC == &RISCV::VRM8RegClass) {
2456       Reg = State.AllocateReg(ArgVRM8s);
2457     } else {
2458       llvm_unreachable("Unhandled class register for ValueType");
2459     }
2460     if (!Reg) {
2461       LocInfo = CCValAssign::Indirect;
2462       // Try using a GPR to pass the address
2463       Reg = State.AllocateReg(ArgGPRs);
2464       LocVT = XLenVT;
2465     }
2466   } else
2467     Reg = State.AllocateReg(ArgGPRs);
2468   unsigned StackOffset =
2469       Reg ? 0 : State.AllocateStack(XLen / 8, Align(XLen / 8));
2470 
2471   // If we reach this point and PendingLocs is non-empty, we must be at the
2472   // end of a split argument that must be passed indirectly.
2473   if (!PendingLocs.empty()) {
2474     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
2475     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
2476 
2477     for (auto &It : PendingLocs) {
2478       if (Reg)
2479         It.convertToReg(Reg);
2480       else
2481         It.convertToMem(StackOffset);
2482       State.addLoc(It);
2483     }
2484     PendingLocs.clear();
2485     PendingArgFlags.clear();
2486     return false;
2487   }
2488 
2489   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
2490           (TLI.getSubtarget().hasStdExtV() && ValVT.isScalableVector())) &&
2491          "Expected an XLenVT or scalable vector types at this stage");
2492 
2493   if (Reg) {
2494     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2495     return false;
2496   }
2497 
2498   // When a floating-point value is passed on the stack, no bit-conversion is
2499   // needed.
2500   if (ValVT.isFloatingPoint()) {
2501     LocVT = ValVT;
2502     LocInfo = CCValAssign::Full;
2503   }
2504   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
2505   return false;
2506 }
2507 
2508 template <typename ArgTy>
2509 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
2510   for (const auto &ArgIdx : enumerate(Args)) {
2511     MVT ArgVT = ArgIdx.value().VT;
2512     if (ArgVT.isScalableVector() &&
2513         ArgVT.getVectorElementType().SimpleTy == MVT::i1)
2514       return ArgIdx.index();
2515   }
2516   return None;
2517 }
2518 
2519 void RISCVTargetLowering::analyzeInputArgs(
2520     MachineFunction &MF, CCState &CCInfo,
2521     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const {
2522   unsigned NumArgs = Ins.size();
2523   FunctionType *FType = MF.getFunction().getFunctionType();
2524 
2525   Optional<unsigned> FirstMaskArgument;
2526   if (Subtarget.hasStdExtV())
2527     FirstMaskArgument = preAssignMask(Ins);
2528 
2529   for (unsigned i = 0; i != NumArgs; ++i) {
2530     MVT ArgVT = Ins[i].VT;
2531     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
2532 
2533     Type *ArgTy = nullptr;
2534     if (IsRet)
2535       ArgTy = FType->getReturnType();
2536     else if (Ins[i].isOrigArg())
2537       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
2538 
2539     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
2540     if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
2541                  ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
2542                  FirstMaskArgument)) {
2543       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
2544                         << EVT(ArgVT).getEVTString() << '\n');
2545       llvm_unreachable(nullptr);
2546     }
2547   }
2548 }
2549 
2550 void RISCVTargetLowering::analyzeOutputArgs(
2551     MachineFunction &MF, CCState &CCInfo,
2552     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
2553     CallLoweringInfo *CLI) const {
2554   unsigned NumArgs = Outs.size();
2555 
2556   Optional<unsigned> FirstMaskArgument;
2557   if (Subtarget.hasStdExtV())
2558     FirstMaskArgument = preAssignMask(Outs);
2559 
2560   for (unsigned i = 0; i != NumArgs; i++) {
2561     MVT ArgVT = Outs[i].VT;
2562     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
2563     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
2564 
2565     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
2566     if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
2567                  ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
2568                  FirstMaskArgument)) {
2569       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
2570                         << EVT(ArgVT).getEVTString() << "\n");
2571       llvm_unreachable(nullptr);
2572     }
2573   }
2574 }
2575 
2576 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
2577 // values.
2578 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
2579                                    const CCValAssign &VA, const SDLoc &DL) {
2580   switch (VA.getLocInfo()) {
2581   default:
2582     llvm_unreachable("Unexpected CCValAssign::LocInfo");
2583   case CCValAssign::Full:
2584     break;
2585   case CCValAssign::BCvt:
2586     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
2587       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
2588     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
2589       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
2590     else
2591       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
2592     break;
2593   }
2594   return Val;
2595 }
2596 
2597 // The caller is responsible for loading the full value if the argument is
2598 // passed with CCValAssign::Indirect.
2599 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
2600                                 const CCValAssign &VA, const SDLoc &DL,
2601                                 const RISCVTargetLowering &TLI) {
2602   MachineFunction &MF = DAG.getMachineFunction();
2603   MachineRegisterInfo &RegInfo = MF.getRegInfo();
2604   EVT LocVT = VA.getLocVT();
2605   SDValue Val;
2606   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
2607   Register VReg = RegInfo.createVirtualRegister(RC);
2608   RegInfo.addLiveIn(VA.getLocReg(), VReg);
2609   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
2610 
2611   if (VA.getLocInfo() == CCValAssign::Indirect)
2612     return Val;
2613 
2614   return convertLocVTToValVT(DAG, Val, VA, DL);
2615 }
2616 
2617 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
2618                                    const CCValAssign &VA, const SDLoc &DL) {
2619   EVT LocVT = VA.getLocVT();
2620 
2621   switch (VA.getLocInfo()) {
2622   default:
2623     llvm_unreachable("Unexpected CCValAssign::LocInfo");
2624   case CCValAssign::Full:
2625     break;
2626   case CCValAssign::BCvt:
2627     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
2628       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
2629     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
2630       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
2631     else
2632       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
2633     break;
2634   }
2635   return Val;
2636 }
2637 
2638 // The caller is responsible for loading the full value if the argument is
2639 // passed with CCValAssign::Indirect.
2640 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
2641                                 const CCValAssign &VA, const SDLoc &DL) {
2642   MachineFunction &MF = DAG.getMachineFunction();
2643   MachineFrameInfo &MFI = MF.getFrameInfo();
2644   EVT LocVT = VA.getLocVT();
2645   EVT ValVT = VA.getValVT();
2646   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
2647   int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
2648                                  VA.getLocMemOffset(), /*Immutable=*/true);
2649   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
2650   SDValue Val;
2651 
2652   ISD::LoadExtType ExtType;
2653   switch (VA.getLocInfo()) {
2654   default:
2655     llvm_unreachable("Unexpected CCValAssign::LocInfo");
2656   case CCValAssign::Full:
2657   case CCValAssign::Indirect:
2658   case CCValAssign::BCvt:
2659     ExtType = ISD::NON_EXTLOAD;
2660     break;
2661   }
2662   Val = DAG.getExtLoad(
2663       ExtType, DL, LocVT, Chain, FIN,
2664       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
2665   return Val;
2666 }
2667 
2668 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
2669                                        const CCValAssign &VA, const SDLoc &DL) {
2670   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
2671          "Unexpected VA");
2672   MachineFunction &MF = DAG.getMachineFunction();
2673   MachineFrameInfo &MFI = MF.getFrameInfo();
2674   MachineRegisterInfo &RegInfo = MF.getRegInfo();
2675 
2676   if (VA.isMemLoc()) {
2677     // f64 is passed on the stack.
2678     int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
2679     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
2680     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
2681                        MachinePointerInfo::getFixedStack(MF, FI));
2682   }
2683 
2684   assert(VA.isRegLoc() && "Expected register VA assignment");
2685 
2686   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
2687   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
2688   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
2689   SDValue Hi;
2690   if (VA.getLocReg() == RISCV::X17) {
2691     // Second half of f64 is passed on the stack.
2692     int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
2693     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
2694     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
2695                      MachinePointerInfo::getFixedStack(MF, FI));
2696   } else {
2697     // Second half of f64 is passed in another GPR.
2698     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
2699     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
2700     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
2701   }
2702   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
2703 }
2704 
2705 // FastCC has less than 1% performance improvement for some particular
2706 // benchmark. But theoretically, it may has benenfit for some cases.
2707 static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
2708                             CCValAssign::LocInfo LocInfo,
2709                             ISD::ArgFlagsTy ArgFlags, CCState &State) {
2710 
2711   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
2712     // X5 and X6 might be used for save-restore libcall.
2713     static const MCPhysReg GPRList[] = {
2714         RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
2715         RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
2716         RISCV::X29, RISCV::X30, RISCV::X31};
2717     if (unsigned Reg = State.AllocateReg(GPRList)) {
2718       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2719       return false;
2720     }
2721   }
2722 
2723   if (LocVT == MVT::f16) {
2724     static const MCPhysReg FPR16List[] = {
2725         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
2726         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
2727         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
2728         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
2729     if (unsigned Reg = State.AllocateReg(FPR16List)) {
2730       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2731       return false;
2732     }
2733   }
2734 
2735   if (LocVT == MVT::f32) {
2736     static const MCPhysReg FPR32List[] = {
2737         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
2738         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
2739         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
2740         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
2741     if (unsigned Reg = State.AllocateReg(FPR32List)) {
2742       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2743       return false;
2744     }
2745   }
2746 
2747   if (LocVT == MVT::f64) {
2748     static const MCPhysReg FPR64List[] = {
2749         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
2750         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
2751         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
2752         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
2753     if (unsigned Reg = State.AllocateReg(FPR64List)) {
2754       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2755       return false;
2756     }
2757   }
2758 
2759   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
2760     unsigned Offset4 = State.AllocateStack(4, Align(4));
2761     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
2762     return false;
2763   }
2764 
2765   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
2766     unsigned Offset5 = State.AllocateStack(8, Align(8));
2767     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
2768     return false;
2769   }
2770 
2771   return true; // CC didn't match.
2772 }
2773 
2774 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
2775                          CCValAssign::LocInfo LocInfo,
2776                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
2777 
2778   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
2779     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
2780     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
2781     static const MCPhysReg GPRList[] = {
2782         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
2783         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
2784     if (unsigned Reg = State.AllocateReg(GPRList)) {
2785       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2786       return false;
2787     }
2788   }
2789 
2790   if (LocVT == MVT::f32) {
2791     // Pass in STG registers: F1, ..., F6
2792     //                        fs0 ... fs5
2793     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
2794                                           RISCV::F18_F, RISCV::F19_F,
2795                                           RISCV::F20_F, RISCV::F21_F};
2796     if (unsigned Reg = State.AllocateReg(FPR32List)) {
2797       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2798       return false;
2799     }
2800   }
2801 
2802   if (LocVT == MVT::f64) {
2803     // Pass in STG registers: D1, ..., D6
2804     //                        fs6 ... fs11
2805     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
2806                                           RISCV::F24_D, RISCV::F25_D,
2807                                           RISCV::F26_D, RISCV::F27_D};
2808     if (unsigned Reg = State.AllocateReg(FPR64List)) {
2809       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2810       return false;
2811     }
2812   }
2813 
2814   report_fatal_error("No registers left in GHC calling convention");
2815   return true;
2816 }
2817 
2818 // Transform physical registers into virtual registers.
2819 SDValue RISCVTargetLowering::LowerFormalArguments(
2820     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
2821     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2822     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
2823 
2824   MachineFunction &MF = DAG.getMachineFunction();
2825 
2826   switch (CallConv) {
2827   default:
2828     report_fatal_error("Unsupported calling convention");
2829   case CallingConv::C:
2830   case CallingConv::Fast:
2831     break;
2832   case CallingConv::GHC:
2833     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
2834         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
2835       report_fatal_error(
2836         "GHC calling convention requires the F and D instruction set extensions");
2837   }
2838 
2839   const Function &Func = MF.getFunction();
2840   if (Func.hasFnAttribute("interrupt")) {
2841     if (!Func.arg_empty())
2842       report_fatal_error(
2843         "Functions with the interrupt attribute cannot have arguments!");
2844 
2845     StringRef Kind =
2846       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
2847 
2848     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
2849       report_fatal_error(
2850         "Function interrupt attribute argument not supported!");
2851   }
2852 
2853   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2854   MVT XLenVT = Subtarget.getXLenVT();
2855   unsigned XLenInBytes = Subtarget.getXLen() / 8;
2856   // Used with vargs to acumulate store chains.
2857   std::vector<SDValue> OutChains;
2858 
2859   // Assign locations to all of the incoming arguments.
2860   SmallVector<CCValAssign, 16> ArgLocs;
2861   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2862 
2863   if (CallConv == CallingConv::Fast)
2864     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_FastCC);
2865   else if (CallConv == CallingConv::GHC)
2866     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
2867   else
2868     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false);
2869 
2870   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2871     CCValAssign &VA = ArgLocs[i];
2872     SDValue ArgValue;
2873     // Passing f64 on RV32D with a soft float ABI must be handled as a special
2874     // case.
2875     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
2876       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
2877     else if (VA.isRegLoc())
2878       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
2879     else
2880       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
2881 
2882     if (VA.getLocInfo() == CCValAssign::Indirect) {
2883       // If the original argument was split and passed by reference (e.g. i128
2884       // on RV32), we need to load all parts of it here (using the same
2885       // address).
2886       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
2887                                    MachinePointerInfo()));
2888       unsigned ArgIndex = Ins[i].OrigArgIndex;
2889       assert(Ins[i].PartOffset == 0);
2890       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
2891         CCValAssign &PartVA = ArgLocs[i + 1];
2892         unsigned PartOffset = Ins[i + 1].PartOffset;
2893         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
2894                                       DAG.getIntPtrConstant(PartOffset, DL));
2895         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
2896                                      MachinePointerInfo()));
2897         ++i;
2898       }
2899       continue;
2900     }
2901     InVals.push_back(ArgValue);
2902   }
2903 
2904   if (IsVarArg) {
2905     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
2906     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
2907     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
2908     MachineFrameInfo &MFI = MF.getFrameInfo();
2909     MachineRegisterInfo &RegInfo = MF.getRegInfo();
2910     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
2911 
2912     // Offset of the first variable argument from stack pointer, and size of
2913     // the vararg save area. For now, the varargs save area is either zero or
2914     // large enough to hold a0-a7.
2915     int VaArgOffset, VarArgsSaveSize;
2916 
2917     // If all registers are allocated, then all varargs must be passed on the
2918     // stack and we don't need to save any argregs.
2919     if (ArgRegs.size() == Idx) {
2920       VaArgOffset = CCInfo.getNextStackOffset();
2921       VarArgsSaveSize = 0;
2922     } else {
2923       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
2924       VaArgOffset = -VarArgsSaveSize;
2925     }
2926 
2927     // Record the frame index of the first variable argument
2928     // which is a value necessary to VASTART.
2929     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
2930     RVFI->setVarArgsFrameIndex(FI);
2931 
2932     // If saving an odd number of registers then create an extra stack slot to
2933     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
2934     // offsets to even-numbered registered remain 2*XLEN-aligned.
2935     if (Idx % 2) {
2936       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
2937       VarArgsSaveSize += XLenInBytes;
2938     }
2939 
2940     // Copy the integer registers that may have been used for passing varargs
2941     // to the vararg save area.
2942     for (unsigned I = Idx; I < ArgRegs.size();
2943          ++I, VaArgOffset += XLenInBytes) {
2944       const Register Reg = RegInfo.createVirtualRegister(RC);
2945       RegInfo.addLiveIn(ArgRegs[I], Reg);
2946       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
2947       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
2948       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2949       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
2950                                    MachinePointerInfo::getFixedStack(MF, FI));
2951       cast<StoreSDNode>(Store.getNode())
2952           ->getMemOperand()
2953           ->setValue((Value *)nullptr);
2954       OutChains.push_back(Store);
2955     }
2956     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
2957   }
2958 
2959   // All stores are grouped in one node to allow the matching between
2960   // the size of Ins and InVals. This only happens for vararg functions.
2961   if (!OutChains.empty()) {
2962     OutChains.push_back(Chain);
2963     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
2964   }
2965 
2966   return Chain;
2967 }
2968 
2969 /// isEligibleForTailCallOptimization - Check whether the call is eligible
2970 /// for tail call optimization.
2971 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
2972 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
2973     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
2974     const SmallVector<CCValAssign, 16> &ArgLocs) const {
2975 
2976   auto &Callee = CLI.Callee;
2977   auto CalleeCC = CLI.CallConv;
2978   auto &Outs = CLI.Outs;
2979   auto &Caller = MF.getFunction();
2980   auto CallerCC = Caller.getCallingConv();
2981 
2982   // Exception-handling functions need a special set of instructions to
2983   // indicate a return to the hardware. Tail-calling another function would
2984   // probably break this.
2985   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
2986   // should be expanded as new function attributes are introduced.
2987   if (Caller.hasFnAttribute("interrupt"))
2988     return false;
2989 
2990   // Do not tail call opt if the stack is used to pass parameters.
2991   if (CCInfo.getNextStackOffset() != 0)
2992     return false;
2993 
2994   // Do not tail call opt if any parameters need to be passed indirectly.
2995   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
2996   // passed indirectly. So the address of the value will be passed in a
2997   // register, or if not available, then the address is put on the stack. In
2998   // order to pass indirectly, space on the stack often needs to be allocated
2999   // in order to store the value. In this case the CCInfo.getNextStackOffset()
3000   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
3001   // are passed CCValAssign::Indirect.
3002   for (auto &VA : ArgLocs)
3003     if (VA.getLocInfo() == CCValAssign::Indirect)
3004       return false;
3005 
3006   // Do not tail call opt if either caller or callee uses struct return
3007   // semantics.
3008   auto IsCallerStructRet = Caller.hasStructRetAttr();
3009   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
3010   if (IsCallerStructRet || IsCalleeStructRet)
3011     return false;
3012 
3013   // Externally-defined functions with weak linkage should not be
3014   // tail-called. The behaviour of branch instructions in this situation (as
3015   // used for tail calls) is implementation-defined, so we cannot rely on the
3016   // linker replacing the tail call with a return.
3017   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
3018     const GlobalValue *GV = G->getGlobal();
3019     if (GV->hasExternalWeakLinkage())
3020       return false;
3021   }
3022 
3023   // The callee has to preserve all registers the caller needs to preserve.
3024   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
3025   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
3026   if (CalleeCC != CallerCC) {
3027     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
3028     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
3029       return false;
3030   }
3031 
3032   // Byval parameters hand the function a pointer directly into the stack area
3033   // we want to reuse during a tail call. Working around this *is* possible
3034   // but less efficient and uglier in LowerCall.
3035   for (auto &Arg : Outs)
3036     if (Arg.Flags.isByVal())
3037       return false;
3038 
3039   return true;
3040 }
3041 
3042 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
3043 // and output parameter nodes.
3044 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
3045                                        SmallVectorImpl<SDValue> &InVals) const {
3046   SelectionDAG &DAG = CLI.DAG;
3047   SDLoc &DL = CLI.DL;
3048   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
3049   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
3050   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
3051   SDValue Chain = CLI.Chain;
3052   SDValue Callee = CLI.Callee;
3053   bool &IsTailCall = CLI.IsTailCall;
3054   CallingConv::ID CallConv = CLI.CallConv;
3055   bool IsVarArg = CLI.IsVarArg;
3056   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3057   MVT XLenVT = Subtarget.getXLenVT();
3058 
3059   MachineFunction &MF = DAG.getMachineFunction();
3060 
3061   // Analyze the operands of the call, assigning locations to each operand.
3062   SmallVector<CCValAssign, 16> ArgLocs;
3063   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
3064 
3065   if (CallConv == CallingConv::Fast)
3066     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_FastCC);
3067   else if (CallConv == CallingConv::GHC)
3068     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
3069   else
3070     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI);
3071 
3072   // Check if it's really possible to do a tail call.
3073   if (IsTailCall)
3074     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
3075 
3076   if (IsTailCall)
3077     ++NumTailCalls;
3078   else if (CLI.CB && CLI.CB->isMustTailCall())
3079     report_fatal_error("failed to perform tail call elimination on a call "
3080                        "site marked musttail");
3081 
3082   // Get a count of how many bytes are to be pushed on the stack.
3083   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
3084 
3085   // Create local copies for byval args
3086   SmallVector<SDValue, 8> ByValArgs;
3087   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
3088     ISD::ArgFlagsTy Flags = Outs[i].Flags;
3089     if (!Flags.isByVal())
3090       continue;
3091 
3092     SDValue Arg = OutVals[i];
3093     unsigned Size = Flags.getByValSize();
3094     Align Alignment = Flags.getNonZeroByValAlign();
3095 
3096     int FI =
3097         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
3098     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3099     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
3100 
3101     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
3102                           /*IsVolatile=*/false,
3103                           /*AlwaysInline=*/false, IsTailCall,
3104                           MachinePointerInfo(), MachinePointerInfo());
3105     ByValArgs.push_back(FIPtr);
3106   }
3107 
3108   if (!IsTailCall)
3109     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
3110 
3111   // Copy argument values to their designated locations.
3112   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
3113   SmallVector<SDValue, 8> MemOpChains;
3114   SDValue StackPtr;
3115   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
3116     CCValAssign &VA = ArgLocs[i];
3117     SDValue ArgValue = OutVals[i];
3118     ISD::ArgFlagsTy Flags = Outs[i].Flags;
3119 
3120     // Handle passing f64 on RV32D with a soft float ABI as a special case.
3121     bool IsF64OnRV32DSoftABI =
3122         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
3123     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
3124       SDValue SplitF64 = DAG.getNode(
3125           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
3126       SDValue Lo = SplitF64.getValue(0);
3127       SDValue Hi = SplitF64.getValue(1);
3128 
3129       Register RegLo = VA.getLocReg();
3130       RegsToPass.push_back(std::make_pair(RegLo, Lo));
3131 
3132       if (RegLo == RISCV::X17) {
3133         // Second half of f64 is passed on the stack.
3134         // Work out the address of the stack slot.
3135         if (!StackPtr.getNode())
3136           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
3137         // Emit the store.
3138         MemOpChains.push_back(
3139             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
3140       } else {
3141         // Second half of f64 is passed in another GPR.
3142         assert(RegLo < RISCV::X31 && "Invalid register pair");
3143         Register RegHigh = RegLo + 1;
3144         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
3145       }
3146       continue;
3147     }
3148 
3149     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
3150     // as any other MemLoc.
3151 
3152     // Promote the value if needed.
3153     // For now, only handle fully promoted and indirect arguments.
3154     if (VA.getLocInfo() == CCValAssign::Indirect) {
3155       // Store the argument in a stack slot and pass its address.
3156       SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT);
3157       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
3158       MemOpChains.push_back(
3159           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
3160                        MachinePointerInfo::getFixedStack(MF, FI)));
3161       // If the original argument was split (e.g. i128), we need
3162       // to store all parts of it here (and pass just one address).
3163       unsigned ArgIndex = Outs[i].OrigArgIndex;
3164       assert(Outs[i].PartOffset == 0);
3165       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
3166         SDValue PartValue = OutVals[i + 1];
3167         unsigned PartOffset = Outs[i + 1].PartOffset;
3168         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
3169                                       DAG.getIntPtrConstant(PartOffset, DL));
3170         MemOpChains.push_back(
3171             DAG.getStore(Chain, DL, PartValue, Address,
3172                          MachinePointerInfo::getFixedStack(MF, FI)));
3173         ++i;
3174       }
3175       ArgValue = SpillSlot;
3176     } else {
3177       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL);
3178     }
3179 
3180     // Use local copy if it is a byval arg.
3181     if (Flags.isByVal())
3182       ArgValue = ByValArgs[j++];
3183 
3184     if (VA.isRegLoc()) {
3185       // Queue up the argument copies and emit them at the end.
3186       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
3187     } else {
3188       assert(VA.isMemLoc() && "Argument not register or memory");
3189       assert(!IsTailCall && "Tail call not allowed if stack is used "
3190                             "for passing parameters");
3191 
3192       // Work out the address of the stack slot.
3193       if (!StackPtr.getNode())
3194         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
3195       SDValue Address =
3196           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
3197                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
3198 
3199       // Emit the store.
3200       MemOpChains.push_back(
3201           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
3202     }
3203   }
3204 
3205   // Join the stores, which are independent of one another.
3206   if (!MemOpChains.empty())
3207     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
3208 
3209   SDValue Glue;
3210 
3211   // Build a sequence of copy-to-reg nodes, chained and glued together.
3212   for (auto &Reg : RegsToPass) {
3213     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
3214     Glue = Chain.getValue(1);
3215   }
3216 
3217   // Validate that none of the argument registers have been marked as
3218   // reserved, if so report an error. Do the same for the return address if this
3219   // is not a tailcall.
3220   validateCCReservedRegs(RegsToPass, MF);
3221   if (!IsTailCall &&
3222       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
3223     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
3224         MF.getFunction(),
3225         "Return address register required, but has been reserved."});
3226 
3227   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
3228   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
3229   // split it and then direct call can be matched by PseudoCALL.
3230   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
3231     const GlobalValue *GV = S->getGlobal();
3232 
3233     unsigned OpFlags = RISCVII::MO_CALL;
3234     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
3235       OpFlags = RISCVII::MO_PLT;
3236 
3237     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
3238   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3239     unsigned OpFlags = RISCVII::MO_CALL;
3240 
3241     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
3242                                                  nullptr))
3243       OpFlags = RISCVII::MO_PLT;
3244 
3245     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
3246   }
3247 
3248   // The first call operand is the chain and the second is the target address.
3249   SmallVector<SDValue, 8> Ops;
3250   Ops.push_back(Chain);
3251   Ops.push_back(Callee);
3252 
3253   // Add argument registers to the end of the list so that they are
3254   // known live into the call.
3255   for (auto &Reg : RegsToPass)
3256     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
3257 
3258   if (!IsTailCall) {
3259     // Add a register mask operand representing the call-preserved registers.
3260     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
3261     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
3262     assert(Mask && "Missing call preserved mask for calling convention");
3263     Ops.push_back(DAG.getRegisterMask(Mask));
3264   }
3265 
3266   // Glue the call to the argument copies, if any.
3267   if (Glue.getNode())
3268     Ops.push_back(Glue);
3269 
3270   // Emit the call.
3271   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3272 
3273   if (IsTailCall) {
3274     MF.getFrameInfo().setHasTailCall();
3275     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
3276   }
3277 
3278   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
3279   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
3280   Glue = Chain.getValue(1);
3281 
3282   // Mark the end of the call, which is glued to the call itself.
3283   Chain = DAG.getCALLSEQ_END(Chain,
3284                              DAG.getConstant(NumBytes, DL, PtrVT, true),
3285                              DAG.getConstant(0, DL, PtrVT, true),
3286                              Glue, DL);
3287   Glue = Chain.getValue(1);
3288 
3289   // Assign locations to each value returned by this call.
3290   SmallVector<CCValAssign, 16> RVLocs;
3291   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
3292   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true);
3293 
3294   // Copy all of the result registers out of their specified physreg.
3295   for (auto &VA : RVLocs) {
3296     // Copy the value out
3297     SDValue RetValue =
3298         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
3299     // Glue the RetValue to the end of the call sequence
3300     Chain = RetValue.getValue(1);
3301     Glue = RetValue.getValue(2);
3302 
3303     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
3304       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
3305       SDValue RetValue2 =
3306           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
3307       Chain = RetValue2.getValue(1);
3308       Glue = RetValue2.getValue(2);
3309       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
3310                              RetValue2);
3311     }
3312 
3313     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL);
3314 
3315     InVals.push_back(RetValue);
3316   }
3317 
3318   return Chain;
3319 }
3320 
3321 bool RISCVTargetLowering::CanLowerReturn(
3322     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
3323     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
3324   SmallVector<CCValAssign, 16> RVLocs;
3325   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
3326 
3327   Optional<unsigned> FirstMaskArgument;
3328   if (Subtarget.hasStdExtV())
3329     FirstMaskArgument = preAssignMask(Outs);
3330 
3331   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
3332     MVT VT = Outs[i].VT;
3333     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
3334     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
3335     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
3336                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
3337                  *this, FirstMaskArgument))
3338       return false;
3339   }
3340   return true;
3341 }
3342 
3343 SDValue
3344 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
3345                                  bool IsVarArg,
3346                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
3347                                  const SmallVectorImpl<SDValue> &OutVals,
3348                                  const SDLoc &DL, SelectionDAG &DAG) const {
3349   const MachineFunction &MF = DAG.getMachineFunction();
3350   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
3351 
3352   // Stores the assignment of the return value to a location.
3353   SmallVector<CCValAssign, 16> RVLocs;
3354 
3355   // Info about the registers and stack slot.
3356   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
3357                  *DAG.getContext());
3358 
3359   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
3360                     nullptr);
3361 
3362   if (CallConv == CallingConv::GHC && !RVLocs.empty())
3363     report_fatal_error("GHC functions return void only");
3364 
3365   SDValue Glue;
3366   SmallVector<SDValue, 4> RetOps(1, Chain);
3367 
3368   // Copy the result values into the output registers.
3369   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
3370     SDValue Val = OutVals[i];
3371     CCValAssign &VA = RVLocs[i];
3372     assert(VA.isRegLoc() && "Can only return in registers!");
3373 
3374     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
3375       // Handle returning f64 on RV32D with a soft float ABI.
3376       assert(VA.isRegLoc() && "Expected return via registers");
3377       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
3378                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
3379       SDValue Lo = SplitF64.getValue(0);
3380       SDValue Hi = SplitF64.getValue(1);
3381       Register RegLo = VA.getLocReg();
3382       assert(RegLo < RISCV::X31 && "Invalid register pair");
3383       Register RegHi = RegLo + 1;
3384 
3385       if (STI.isRegisterReservedByUser(RegLo) ||
3386           STI.isRegisterReservedByUser(RegHi))
3387         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
3388             MF.getFunction(),
3389             "Return value register required, but has been reserved."});
3390 
3391       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
3392       Glue = Chain.getValue(1);
3393       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
3394       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
3395       Glue = Chain.getValue(1);
3396       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
3397     } else {
3398       // Handle a 'normal' return.
3399       Val = convertValVTToLocVT(DAG, Val, VA, DL);
3400       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
3401 
3402       if (STI.isRegisterReservedByUser(VA.getLocReg()))
3403         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
3404             MF.getFunction(),
3405             "Return value register required, but has been reserved."});
3406 
3407       // Guarantee that all emitted copies are stuck together.
3408       Glue = Chain.getValue(1);
3409       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
3410     }
3411   }
3412 
3413   RetOps[0] = Chain; // Update chain.
3414 
3415   // Add the glue node if we have it.
3416   if (Glue.getNode()) {
3417     RetOps.push_back(Glue);
3418   }
3419 
3420   // Interrupt service routines use different return instructions.
3421   const Function &Func = DAG.getMachineFunction().getFunction();
3422   if (Func.hasFnAttribute("interrupt")) {
3423     if (!Func.getReturnType()->isVoidTy())
3424       report_fatal_error(
3425           "Functions with the interrupt attribute must have void return type!");
3426 
3427     MachineFunction &MF = DAG.getMachineFunction();
3428     StringRef Kind =
3429       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
3430 
3431     unsigned RetOpc;
3432     if (Kind == "user")
3433       RetOpc = RISCVISD::URET_FLAG;
3434     else if (Kind == "supervisor")
3435       RetOpc = RISCVISD::SRET_FLAG;
3436     else
3437       RetOpc = RISCVISD::MRET_FLAG;
3438 
3439     return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
3440   }
3441 
3442   return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps);
3443 }
3444 
3445 void RISCVTargetLowering::validateCCReservedRegs(
3446     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
3447     MachineFunction &MF) const {
3448   const Function &F = MF.getFunction();
3449   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
3450 
3451   if (llvm::any_of(Regs, [&STI](auto Reg) {
3452         return STI.isRegisterReservedByUser(Reg.first);
3453       }))
3454     F.getContext().diagnose(DiagnosticInfoUnsupported{
3455         F, "Argument register required, but has been reserved."});
3456 }
3457 
3458 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
3459   return CI->isTailCall();
3460 }
3461 
3462 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
3463 #define NODE_NAME_CASE(NODE)                                                   \
3464   case RISCVISD::NODE:                                                         \
3465     return "RISCVISD::" #NODE;
3466   // clang-format off
3467   switch ((RISCVISD::NodeType)Opcode) {
3468   case RISCVISD::FIRST_NUMBER:
3469     break;
3470   NODE_NAME_CASE(RET_FLAG)
3471   NODE_NAME_CASE(URET_FLAG)
3472   NODE_NAME_CASE(SRET_FLAG)
3473   NODE_NAME_CASE(MRET_FLAG)
3474   NODE_NAME_CASE(CALL)
3475   NODE_NAME_CASE(SELECT_CC)
3476   NODE_NAME_CASE(BuildPairF64)
3477   NODE_NAME_CASE(SplitF64)
3478   NODE_NAME_CASE(TAIL)
3479   NODE_NAME_CASE(SLLW)
3480   NODE_NAME_CASE(SRAW)
3481   NODE_NAME_CASE(SRLW)
3482   NODE_NAME_CASE(DIVW)
3483   NODE_NAME_CASE(DIVUW)
3484   NODE_NAME_CASE(REMUW)
3485   NODE_NAME_CASE(ROLW)
3486   NODE_NAME_CASE(RORW)
3487   NODE_NAME_CASE(FSLW)
3488   NODE_NAME_CASE(FSRW)
3489   NODE_NAME_CASE(FMV_H_X)
3490   NODE_NAME_CASE(FMV_X_ANYEXTH)
3491   NODE_NAME_CASE(FMV_W_X_RV64)
3492   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
3493   NODE_NAME_CASE(READ_CYCLE_WIDE)
3494   NODE_NAME_CASE(GREVI)
3495   NODE_NAME_CASE(GREVIW)
3496   NODE_NAME_CASE(GORCI)
3497   NODE_NAME_CASE(GORCIW)
3498   NODE_NAME_CASE(VMV_X_S)
3499   NODE_NAME_CASE(SPLAT_VECTOR_I64)
3500   }
3501   // clang-format on
3502   return nullptr;
3503 #undef NODE_NAME_CASE
3504 }
3505 
3506 /// getConstraintType - Given a constraint letter, return the type of
3507 /// constraint it is for this target.
3508 RISCVTargetLowering::ConstraintType
3509 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
3510   if (Constraint.size() == 1) {
3511     switch (Constraint[0]) {
3512     default:
3513       break;
3514     case 'f':
3515       return C_RegisterClass;
3516     case 'I':
3517     case 'J':
3518     case 'K':
3519       return C_Immediate;
3520     case 'A':
3521       return C_Memory;
3522     }
3523   }
3524   return TargetLowering::getConstraintType(Constraint);
3525 }
3526 
3527 std::pair<unsigned, const TargetRegisterClass *>
3528 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
3529                                                   StringRef Constraint,
3530                                                   MVT VT) const {
3531   // First, see if this is a constraint that directly corresponds to a
3532   // RISCV register class.
3533   if (Constraint.size() == 1) {
3534     switch (Constraint[0]) {
3535     case 'r':
3536       return std::make_pair(0U, &RISCV::GPRRegClass);
3537     case 'f':
3538       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
3539         return std::make_pair(0U, &RISCV::FPR16RegClass);
3540       if (Subtarget.hasStdExtF() && VT == MVT::f32)
3541         return std::make_pair(0U, &RISCV::FPR32RegClass);
3542       if (Subtarget.hasStdExtD() && VT == MVT::f64)
3543         return std::make_pair(0U, &RISCV::FPR64RegClass);
3544       break;
3545     default:
3546       break;
3547     }
3548   }
3549 
3550   // Clang will correctly decode the usage of register name aliases into their
3551   // official names. However, other frontends like `rustc` do not. This allows
3552   // users of these frontends to use the ABI names for registers in LLVM-style
3553   // register constraints.
3554   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
3555                                .Case("{zero}", RISCV::X0)
3556                                .Case("{ra}", RISCV::X1)
3557                                .Case("{sp}", RISCV::X2)
3558                                .Case("{gp}", RISCV::X3)
3559                                .Case("{tp}", RISCV::X4)
3560                                .Case("{t0}", RISCV::X5)
3561                                .Case("{t1}", RISCV::X6)
3562                                .Case("{t2}", RISCV::X7)
3563                                .Cases("{s0}", "{fp}", RISCV::X8)
3564                                .Case("{s1}", RISCV::X9)
3565                                .Case("{a0}", RISCV::X10)
3566                                .Case("{a1}", RISCV::X11)
3567                                .Case("{a2}", RISCV::X12)
3568                                .Case("{a3}", RISCV::X13)
3569                                .Case("{a4}", RISCV::X14)
3570                                .Case("{a5}", RISCV::X15)
3571                                .Case("{a6}", RISCV::X16)
3572                                .Case("{a7}", RISCV::X17)
3573                                .Case("{s2}", RISCV::X18)
3574                                .Case("{s3}", RISCV::X19)
3575                                .Case("{s4}", RISCV::X20)
3576                                .Case("{s5}", RISCV::X21)
3577                                .Case("{s6}", RISCV::X22)
3578                                .Case("{s7}", RISCV::X23)
3579                                .Case("{s8}", RISCV::X24)
3580                                .Case("{s9}", RISCV::X25)
3581                                .Case("{s10}", RISCV::X26)
3582                                .Case("{s11}", RISCV::X27)
3583                                .Case("{t3}", RISCV::X28)
3584                                .Case("{t4}", RISCV::X29)
3585                                .Case("{t5}", RISCV::X30)
3586                                .Case("{t6}", RISCV::X31)
3587                                .Default(RISCV::NoRegister);
3588   if (XRegFromAlias != RISCV::NoRegister)
3589     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
3590 
3591   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
3592   // TableGen record rather than the AsmName to choose registers for InlineAsm
3593   // constraints, plus we want to match those names to the widest floating point
3594   // register type available, manually select floating point registers here.
3595   //
3596   // The second case is the ABI name of the register, so that frontends can also
3597   // use the ABI names in register constraint lists.
3598   if (Subtarget.hasStdExtF()) {
3599     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
3600                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
3601                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
3602                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
3603                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
3604                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
3605                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
3606                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
3607                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
3608                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
3609                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
3610                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
3611                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
3612                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
3613                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
3614                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
3615                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
3616                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
3617                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
3618                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
3619                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
3620                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
3621                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
3622                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
3623                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
3624                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
3625                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
3626                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
3627                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
3628                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
3629                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
3630                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
3631                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
3632                         .Default(RISCV::NoRegister);
3633     if (FReg != RISCV::NoRegister) {
3634       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
3635       if (Subtarget.hasStdExtD()) {
3636         unsigned RegNo = FReg - RISCV::F0_F;
3637         unsigned DReg = RISCV::F0_D + RegNo;
3638         return std::make_pair(DReg, &RISCV::FPR64RegClass);
3639       }
3640       return std::make_pair(FReg, &RISCV::FPR32RegClass);
3641     }
3642   }
3643 
3644   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3645 }
3646 
3647 unsigned
3648 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
3649   // Currently only support length 1 constraints.
3650   if (ConstraintCode.size() == 1) {
3651     switch (ConstraintCode[0]) {
3652     case 'A':
3653       return InlineAsm::Constraint_A;
3654     default:
3655       break;
3656     }
3657   }
3658 
3659   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
3660 }
3661 
3662 void RISCVTargetLowering::LowerAsmOperandForConstraint(
3663     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
3664     SelectionDAG &DAG) const {
3665   // Currently only support length 1 constraints.
3666   if (Constraint.length() == 1) {
3667     switch (Constraint[0]) {
3668     case 'I':
3669       // Validate & create a 12-bit signed immediate operand.
3670       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
3671         uint64_t CVal = C->getSExtValue();
3672         if (isInt<12>(CVal))
3673           Ops.push_back(
3674               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
3675       }
3676       return;
3677     case 'J':
3678       // Validate & create an integer zero operand.
3679       if (auto *C = dyn_cast<ConstantSDNode>(Op))
3680         if (C->getZExtValue() == 0)
3681           Ops.push_back(
3682               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
3683       return;
3684     case 'K':
3685       // Validate & create a 5-bit unsigned immediate operand.
3686       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
3687         uint64_t CVal = C->getZExtValue();
3688         if (isUInt<5>(CVal))
3689           Ops.push_back(
3690               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
3691       }
3692       return;
3693     default:
3694       break;
3695     }
3696   }
3697   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3698 }
3699 
3700 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
3701                                                    Instruction *Inst,
3702                                                    AtomicOrdering Ord) const {
3703   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
3704     return Builder.CreateFence(Ord);
3705   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
3706     return Builder.CreateFence(AtomicOrdering::Release);
3707   return nullptr;
3708 }
3709 
3710 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
3711                                                     Instruction *Inst,
3712                                                     AtomicOrdering Ord) const {
3713   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
3714     return Builder.CreateFence(AtomicOrdering::Acquire);
3715   return nullptr;
3716 }
3717 
3718 TargetLowering::AtomicExpansionKind
3719 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
3720   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
3721   // point operations can't be used in an lr/sc sequence without breaking the
3722   // forward-progress guarantee.
3723   if (AI->isFloatingPointOperation())
3724     return AtomicExpansionKind::CmpXChg;
3725 
3726   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
3727   if (Size == 8 || Size == 16)
3728     return AtomicExpansionKind::MaskedIntrinsic;
3729   return AtomicExpansionKind::None;
3730 }
3731 
3732 static Intrinsic::ID
3733 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
3734   if (XLen == 32) {
3735     switch (BinOp) {
3736     default:
3737       llvm_unreachable("Unexpected AtomicRMW BinOp");
3738     case AtomicRMWInst::Xchg:
3739       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
3740     case AtomicRMWInst::Add:
3741       return Intrinsic::riscv_masked_atomicrmw_add_i32;
3742     case AtomicRMWInst::Sub:
3743       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
3744     case AtomicRMWInst::Nand:
3745       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
3746     case AtomicRMWInst::Max:
3747       return Intrinsic::riscv_masked_atomicrmw_max_i32;
3748     case AtomicRMWInst::Min:
3749       return Intrinsic::riscv_masked_atomicrmw_min_i32;
3750     case AtomicRMWInst::UMax:
3751       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
3752     case AtomicRMWInst::UMin:
3753       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
3754     }
3755   }
3756 
3757   if (XLen == 64) {
3758     switch (BinOp) {
3759     default:
3760       llvm_unreachable("Unexpected AtomicRMW BinOp");
3761     case AtomicRMWInst::Xchg:
3762       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
3763     case AtomicRMWInst::Add:
3764       return Intrinsic::riscv_masked_atomicrmw_add_i64;
3765     case AtomicRMWInst::Sub:
3766       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
3767     case AtomicRMWInst::Nand:
3768       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
3769     case AtomicRMWInst::Max:
3770       return Intrinsic::riscv_masked_atomicrmw_max_i64;
3771     case AtomicRMWInst::Min:
3772       return Intrinsic::riscv_masked_atomicrmw_min_i64;
3773     case AtomicRMWInst::UMax:
3774       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
3775     case AtomicRMWInst::UMin:
3776       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
3777     }
3778   }
3779 
3780   llvm_unreachable("Unexpected XLen\n");
3781 }
3782 
3783 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
3784     IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
3785     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
3786   unsigned XLen = Subtarget.getXLen();
3787   Value *Ordering =
3788       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
3789   Type *Tys[] = {AlignedAddr->getType()};
3790   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
3791       AI->getModule(),
3792       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
3793 
3794   if (XLen == 64) {
3795     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
3796     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
3797     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
3798   }
3799 
3800   Value *Result;
3801 
3802   // Must pass the shift amount needed to sign extend the loaded value prior
3803   // to performing a signed comparison for min/max. ShiftAmt is the number of
3804   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
3805   // is the number of bits to left+right shift the value in order to
3806   // sign-extend.
3807   if (AI->getOperation() == AtomicRMWInst::Min ||
3808       AI->getOperation() == AtomicRMWInst::Max) {
3809     const DataLayout &DL = AI->getModule()->getDataLayout();
3810     unsigned ValWidth =
3811         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
3812     Value *SextShamt =
3813         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
3814     Result = Builder.CreateCall(LrwOpScwLoop,
3815                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
3816   } else {
3817     Result =
3818         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
3819   }
3820 
3821   if (XLen == 64)
3822     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
3823   return Result;
3824 }
3825 
3826 TargetLowering::AtomicExpansionKind
3827 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
3828     AtomicCmpXchgInst *CI) const {
3829   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
3830   if (Size == 8 || Size == 16)
3831     return AtomicExpansionKind::MaskedIntrinsic;
3832   return AtomicExpansionKind::None;
3833 }
3834 
3835 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
3836     IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
3837     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
3838   unsigned XLen = Subtarget.getXLen();
3839   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
3840   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
3841   if (XLen == 64) {
3842     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
3843     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
3844     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
3845     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
3846   }
3847   Type *Tys[] = {AlignedAddr->getType()};
3848   Function *MaskedCmpXchg =
3849       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
3850   Value *Result = Builder.CreateCall(
3851       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
3852   if (XLen == 64)
3853     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
3854   return Result;
3855 }
3856 
3857 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
3858                                                      EVT VT) const {
3859   VT = VT.getScalarType();
3860 
3861   if (!VT.isSimple())
3862     return false;
3863 
3864   switch (VT.getSimpleVT().SimpleTy) {
3865   case MVT::f16:
3866     return Subtarget.hasStdExtZfh();
3867   case MVT::f32:
3868     return Subtarget.hasStdExtF();
3869   case MVT::f64:
3870     return Subtarget.hasStdExtD();
3871   default:
3872     break;
3873   }
3874 
3875   return false;
3876 }
3877 
3878 Register RISCVTargetLowering::getExceptionPointerRegister(
3879     const Constant *PersonalityFn) const {
3880   return RISCV::X10;
3881 }
3882 
3883 Register RISCVTargetLowering::getExceptionSelectorRegister(
3884     const Constant *PersonalityFn) const {
3885   return RISCV::X11;
3886 }
3887 
3888 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
3889   // Return false to suppress the unnecessary extensions if the LibCall
3890   // arguments or return value is f32 type for LP64 ABI.
3891   RISCVABI::ABI ABI = Subtarget.getTargetABI();
3892   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
3893     return false;
3894 
3895   return true;
3896 }
3897 
3898 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
3899                                                  SDValue C) const {
3900   // Check integral scalar types.
3901   if (VT.isScalarInteger()) {
3902     // Do not perform the transformation on riscv32 with the M extension.
3903     if (!Subtarget.is64Bit() && Subtarget.hasStdExtM())
3904       return false;
3905     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
3906       if (ConstNode->getAPIntValue().getBitWidth() > 8 * sizeof(int64_t))
3907         return false;
3908       int64_t Imm = ConstNode->getSExtValue();
3909       if (isPowerOf2_64(Imm + 1) || isPowerOf2_64(Imm - 1) ||
3910           isPowerOf2_64(1 - Imm) || isPowerOf2_64(-1 - Imm))
3911         return true;
3912     }
3913   }
3914 
3915   return false;
3916 }
3917 
3918 #define GET_REGISTER_MATCHER
3919 #include "RISCVGenAsmMatcher.inc"
3920 
3921 Register
3922 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
3923                                        const MachineFunction &MF) const {
3924   Register Reg = MatchRegisterAltName(RegName);
3925   if (Reg == RISCV::NoRegister)
3926     Reg = MatchRegisterName(RegName);
3927   if (Reg == RISCV::NoRegister)
3928     report_fatal_error(
3929         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
3930   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
3931   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
3932     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
3933                              StringRef(RegName) + "\"."));
3934   return Reg;
3935 }
3936 
3937 namespace llvm {
3938 namespace RISCVVIntrinsicsTable {
3939 
3940 #define GET_RISCVVIntrinsicsTable_IMPL
3941 #include "RISCVGenSearchableTables.inc"
3942 
3943 } // namespace RISCVVIntrinsicsTable
3944 } // namespace llvm
3945