1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "RISCV.h"
16 #include "RISCVMachineFunctionInfo.h"
17 #include "RISCVRegisterInfo.h"
18 #include "RISCVSubtarget.h"
19 #include "RISCVTargetMachine.h"
20 #include "Utils/RISCVMatInt.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/CodeGen/ValueTypes.h"
30 #include "llvm/IR/DiagnosticInfo.h"
31 #include "llvm/IR/DiagnosticPrinter.h"
32 #include "llvm/IR/IntrinsicsRISCV.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/MathExtras.h"
36 #include "llvm/Support/raw_ostream.h"
37 
38 using namespace llvm;
39 
40 #define DEBUG_TYPE "riscv-lower"
41 
42 STATISTIC(NumTailCalls, "Number of tail calls");
43 
44 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
45                                          const RISCVSubtarget &STI)
46     : TargetLowering(TM), Subtarget(STI) {
47 
48   if (Subtarget.isRV32E())
49     report_fatal_error("Codegen not yet implemented for RV32E");
50 
51   RISCVABI::ABI ABI = Subtarget.getTargetABI();
52   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
53 
54   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
55       !Subtarget.hasStdExtF()) {
56     errs() << "Hard-float 'f' ABI can't be used for a target that "
57                 "doesn't support the F instruction set extension (ignoring "
58                           "target-abi)\n";
59     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
60   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
61              !Subtarget.hasStdExtD()) {
62     errs() << "Hard-float 'd' ABI can't be used for a target that "
63               "doesn't support the D instruction set extension (ignoring "
64               "target-abi)\n";
65     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
66   }
67 
68   switch (ABI) {
69   default:
70     report_fatal_error("Don't know how to lower this ABI");
71   case RISCVABI::ABI_ILP32:
72   case RISCVABI::ABI_ILP32F:
73   case RISCVABI::ABI_ILP32D:
74   case RISCVABI::ABI_LP64:
75   case RISCVABI::ABI_LP64F:
76   case RISCVABI::ABI_LP64D:
77     break;
78   }
79 
80   MVT XLenVT = Subtarget.getXLenVT();
81 
82   // Set up the register classes.
83   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
84 
85   if (Subtarget.hasStdExtF())
86     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
87   if (Subtarget.hasStdExtD())
88     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
89 
90   // Compute derived properties from the register classes.
91   computeRegisterProperties(STI.getRegisterInfo());
92 
93   setStackPointerRegisterToSaveRestore(RISCV::X2);
94 
95   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
96     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
97 
98   // TODO: add all necessary setOperationAction calls.
99   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
100 
101   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
102   setOperationAction(ISD::BR_CC, XLenVT, Expand);
103   setOperationAction(ISD::SELECT, XLenVT, Custom);
104   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
105 
106   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
107   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
108 
109   setOperationAction(ISD::VASTART, MVT::Other, Custom);
110   setOperationAction(ISD::VAARG, MVT::Other, Expand);
111   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
112   setOperationAction(ISD::VAEND, MVT::Other, Expand);
113 
114   for (auto VT : {MVT::i1, MVT::i8, MVT::i16})
115     setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
116 
117   if (Subtarget.is64Bit()) {
118     setOperationAction(ISD::ADD, MVT::i32, Custom);
119     setOperationAction(ISD::SUB, MVT::i32, Custom);
120     setOperationAction(ISD::SHL, MVT::i32, Custom);
121     setOperationAction(ISD::SRA, MVT::i32, Custom);
122     setOperationAction(ISD::SRL, MVT::i32, Custom);
123   }
124 
125   if (!Subtarget.hasStdExtM()) {
126     setOperationAction(ISD::MUL, XLenVT, Expand);
127     setOperationAction(ISD::MULHS, XLenVT, Expand);
128     setOperationAction(ISD::MULHU, XLenVT, Expand);
129     setOperationAction(ISD::SDIV, XLenVT, Expand);
130     setOperationAction(ISD::UDIV, XLenVT, Expand);
131     setOperationAction(ISD::SREM, XLenVT, Expand);
132     setOperationAction(ISD::UREM, XLenVT, Expand);
133   }
134 
135   if (Subtarget.is64Bit() && Subtarget.hasStdExtM()) {
136     setOperationAction(ISD::MUL, MVT::i32, Custom);
137     setOperationAction(ISD::SDIV, MVT::i32, Custom);
138     setOperationAction(ISD::UDIV, MVT::i32, Custom);
139     setOperationAction(ISD::UREM, MVT::i32, Custom);
140   }
141 
142   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
143   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
144   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
145   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
146 
147   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
148   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
149   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
150 
151   if (!(Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp())) {
152     setOperationAction(ISD::ROTL, XLenVT, Expand);
153     setOperationAction(ISD::ROTR, XLenVT, Expand);
154   }
155 
156   if (!Subtarget.hasStdExtZbp())
157     setOperationAction(ISD::BSWAP, XLenVT, Expand);
158 
159   if (!Subtarget.hasStdExtZbb()) {
160     setOperationAction(ISD::CTTZ, XLenVT, Expand);
161     setOperationAction(ISD::CTLZ, XLenVT, Expand);
162     setOperationAction(ISD::CTPOP, XLenVT, Expand);
163   }
164 
165   if (Subtarget.hasStdExtZbp())
166     setOperationAction(ISD::BITREVERSE, XLenVT, Legal);
167 
168   if (Subtarget.hasStdExtZbt()) {
169     setOperationAction(ISD::FSHL, XLenVT, Legal);
170     setOperationAction(ISD::FSHR, XLenVT, Legal);
171   }
172 
173   ISD::CondCode FPCCToExtend[] = {
174       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
175       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
176       ISD::SETGE,  ISD::SETNE};
177 
178   ISD::NodeType FPOpToExtend[] = {
179       ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP,
180       ISD::FP_TO_FP16};
181 
182   if (Subtarget.hasStdExtF()) {
183     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
184     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
185     for (auto CC : FPCCToExtend)
186       setCondCodeAction(CC, MVT::f32, Expand);
187     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
188     setOperationAction(ISD::SELECT, MVT::f32, Custom);
189     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
190     for (auto Op : FPOpToExtend)
191       setOperationAction(Op, MVT::f32, Expand);
192     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
193     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
194   }
195 
196   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
197     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
198 
199   if (Subtarget.hasStdExtD()) {
200     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
201     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
202     for (auto CC : FPCCToExtend)
203       setCondCodeAction(CC, MVT::f64, Expand);
204     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
205     setOperationAction(ISD::SELECT, MVT::f64, Custom);
206     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
207     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
208     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
209     for (auto Op : FPOpToExtend)
210       setOperationAction(Op, MVT::f64, Expand);
211     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
212     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
213   }
214 
215   if (Subtarget.is64Bit() &&
216       !(Subtarget.hasStdExtD() || Subtarget.hasStdExtF())) {
217     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
218     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
219     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
220     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
221   }
222 
223   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
224   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
225   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
226 
227   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
228 
229   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
230   // Unfortunately this can't be determined just from the ISA naming string.
231   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
232                      Subtarget.is64Bit() ? Legal : Custom);
233 
234   setOperationAction(ISD::TRAP, MVT::Other, Legal);
235   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
236   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
237 
238   if (Subtarget.hasStdExtA()) {
239     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
240     setMinCmpXchgSizeInBits(32);
241   } else {
242     setMaxAtomicSizeInBitsSupported(0);
243   }
244 
245   setBooleanContents(ZeroOrOneBooleanContent);
246 
247   // Function alignments.
248   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
249   setMinFunctionAlignment(FunctionAlignment);
250   setPrefFunctionAlignment(FunctionAlignment);
251 
252   // Effectively disable jump table generation.
253   setMinimumJumpTableEntries(INT_MAX);
254 
255   // Jumps are expensive, compared to logic
256   setJumpIsExpensive();
257 
258   // We can use any register for comparisons
259   setHasMultipleConditionRegisters();
260 }
261 
262 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
263                                             EVT VT) const {
264   if (!VT.isVector())
265     return getPointerTy(DL);
266   return VT.changeVectorElementTypeToInteger();
267 }
268 
269 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
270                                              const CallInst &I,
271                                              MachineFunction &MF,
272                                              unsigned Intrinsic) const {
273   switch (Intrinsic) {
274   default:
275     return false;
276   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
277   case Intrinsic::riscv_masked_atomicrmw_add_i32:
278   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
279   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
280   case Intrinsic::riscv_masked_atomicrmw_max_i32:
281   case Intrinsic::riscv_masked_atomicrmw_min_i32:
282   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
283   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
284   case Intrinsic::riscv_masked_cmpxchg_i32:
285     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
286     Info.opc = ISD::INTRINSIC_W_CHAIN;
287     Info.memVT = MVT::getVT(PtrTy->getElementType());
288     Info.ptrVal = I.getArgOperand(0);
289     Info.offset = 0;
290     Info.align = Align(4);
291     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
292                  MachineMemOperand::MOVolatile;
293     return true;
294   }
295 }
296 
297 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
298                                                 const AddrMode &AM, Type *Ty,
299                                                 unsigned AS,
300                                                 Instruction *I) const {
301   // No global is ever allowed as a base.
302   if (AM.BaseGV)
303     return false;
304 
305   // Require a 12-bit signed offset.
306   if (!isInt<12>(AM.BaseOffs))
307     return false;
308 
309   switch (AM.Scale) {
310   case 0: // "r+i" or just "i", depending on HasBaseReg.
311     break;
312   case 1:
313     if (!AM.HasBaseReg) // allow "r+i".
314       break;
315     return false; // disallow "r+r" or "r+r+i".
316   default:
317     return false;
318   }
319 
320   return true;
321 }
322 
323 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
324   return isInt<12>(Imm);
325 }
326 
327 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
328   return isInt<12>(Imm);
329 }
330 
331 // On RV32, 64-bit integers are split into their high and low parts and held
332 // in two different registers, so the trunc is free since the low register can
333 // just be used.
334 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
335   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
336     return false;
337   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
338   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
339   return (SrcBits == 64 && DestBits == 32);
340 }
341 
342 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
343   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
344       !SrcVT.isInteger() || !DstVT.isInteger())
345     return false;
346   unsigned SrcBits = SrcVT.getSizeInBits();
347   unsigned DestBits = DstVT.getSizeInBits();
348   return (SrcBits == 64 && DestBits == 32);
349 }
350 
351 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
352   // Zexts are free if they can be combined with a load.
353   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
354     EVT MemVT = LD->getMemoryVT();
355     if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
356          (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
357         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
358          LD->getExtensionType() == ISD::ZEXTLOAD))
359       return true;
360   }
361 
362   return TargetLowering::isZExtFree(Val, VT2);
363 }
364 
365 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
366   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
367 }
368 
369 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
370                                        bool ForCodeSize) const {
371   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
372     return false;
373   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
374     return false;
375   if (Imm.isNegZero())
376     return false;
377   return Imm.isZero();
378 }
379 
380 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
381   return (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
382          (VT == MVT::f64 && Subtarget.hasStdExtD());
383 }
384 
385 // Changes the condition code and swaps operands if necessary, so the SetCC
386 // operation matches one of the comparisons supported directly in the RISC-V
387 // ISA.
388 static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) {
389   switch (CC) {
390   default:
391     break;
392   case ISD::SETGT:
393   case ISD::SETLE:
394   case ISD::SETUGT:
395   case ISD::SETULE:
396     CC = ISD::getSetCCSwappedOperands(CC);
397     std::swap(LHS, RHS);
398     break;
399   }
400 }
401 
402 // Return the RISC-V branch opcode that matches the given DAG integer
403 // condition code. The CondCode must be one of those supported by the RISC-V
404 // ISA (see normaliseSetCC).
405 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
406   switch (CC) {
407   default:
408     llvm_unreachable("Unsupported CondCode");
409   case ISD::SETEQ:
410     return RISCV::BEQ;
411   case ISD::SETNE:
412     return RISCV::BNE;
413   case ISD::SETLT:
414     return RISCV::BLT;
415   case ISD::SETGE:
416     return RISCV::BGE;
417   case ISD::SETULT:
418     return RISCV::BLTU;
419   case ISD::SETUGE:
420     return RISCV::BGEU;
421   }
422 }
423 
424 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
425                                             SelectionDAG &DAG) const {
426   switch (Op.getOpcode()) {
427   default:
428     report_fatal_error("unimplemented operand");
429   case ISD::GlobalAddress:
430     return lowerGlobalAddress(Op, DAG);
431   case ISD::BlockAddress:
432     return lowerBlockAddress(Op, DAG);
433   case ISD::ConstantPool:
434     return lowerConstantPool(Op, DAG);
435   case ISD::GlobalTLSAddress:
436     return lowerGlobalTLSAddress(Op, DAG);
437   case ISD::SELECT:
438     return lowerSELECT(Op, DAG);
439   case ISD::VASTART:
440     return lowerVASTART(Op, DAG);
441   case ISD::FRAMEADDR:
442     return lowerFRAMEADDR(Op, DAG);
443   case ISD::RETURNADDR:
444     return lowerRETURNADDR(Op, DAG);
445   case ISD::SHL_PARTS:
446     return lowerShiftLeftParts(Op, DAG);
447   case ISD::SRA_PARTS:
448     return lowerShiftRightParts(Op, DAG, true);
449   case ISD::SRL_PARTS:
450     return lowerShiftRightParts(Op, DAG, false);
451   case ISD::BITCAST: {
452     assert(Subtarget.is64Bit() && Subtarget.hasStdExtF() &&
453            "Unexpected custom legalisation");
454     SDLoc DL(Op);
455     SDValue Op0 = Op.getOperand(0);
456     if (Op.getValueType() != MVT::f32 || Op0.getValueType() != MVT::i32)
457       return SDValue();
458     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
459     SDValue FPConv = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
460     return FPConv;
461   }
462   case ISD::INTRINSIC_WO_CHAIN:
463     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
464   }
465 }
466 
467 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
468                              SelectionDAG &DAG, unsigned Flags) {
469   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
470 }
471 
472 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
473                              SelectionDAG &DAG, unsigned Flags) {
474   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
475                                    Flags);
476 }
477 
478 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
479                              SelectionDAG &DAG, unsigned Flags) {
480   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
481                                    N->getOffset(), Flags);
482 }
483 
484 template <class NodeTy>
485 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
486                                      bool IsLocal) const {
487   SDLoc DL(N);
488   EVT Ty = getPointerTy(DAG.getDataLayout());
489 
490   if (isPositionIndependent()) {
491     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
492     if (IsLocal)
493       // Use PC-relative addressing to access the symbol. This generates the
494       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
495       // %pcrel_lo(auipc)).
496       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
497 
498     // Use PC-relative addressing to access the GOT for this symbol, then load
499     // the address from the GOT. This generates the pattern (PseudoLA sym),
500     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
501     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
502   }
503 
504   switch (getTargetMachine().getCodeModel()) {
505   default:
506     report_fatal_error("Unsupported code model for lowering");
507   case CodeModel::Small: {
508     // Generate a sequence for accessing addresses within the first 2 GiB of
509     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
510     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
511     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
512     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
513     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
514   }
515   case CodeModel::Medium: {
516     // Generate a sequence for accessing addresses within any 2GiB range within
517     // the address space. This generates the pattern (PseudoLLA sym), which
518     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
519     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
520     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
521   }
522   }
523 }
524 
525 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
526                                                 SelectionDAG &DAG) const {
527   SDLoc DL(Op);
528   EVT Ty = Op.getValueType();
529   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
530   int64_t Offset = N->getOffset();
531   MVT XLenVT = Subtarget.getXLenVT();
532 
533   const GlobalValue *GV = N->getGlobal();
534   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
535   SDValue Addr = getAddr(N, DAG, IsLocal);
536 
537   // In order to maximise the opportunity for common subexpression elimination,
538   // emit a separate ADD node for the global address offset instead of folding
539   // it in the global address node. Later peephole optimisations may choose to
540   // fold it back in when profitable.
541   if (Offset != 0)
542     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
543                        DAG.getConstant(Offset, DL, XLenVT));
544   return Addr;
545 }
546 
547 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
548                                                SelectionDAG &DAG) const {
549   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
550 
551   return getAddr(N, DAG);
552 }
553 
554 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
555                                                SelectionDAG &DAG) const {
556   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
557 
558   return getAddr(N, DAG);
559 }
560 
561 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
562                                               SelectionDAG &DAG,
563                                               bool UseGOT) const {
564   SDLoc DL(N);
565   EVT Ty = getPointerTy(DAG.getDataLayout());
566   const GlobalValue *GV = N->getGlobal();
567   MVT XLenVT = Subtarget.getXLenVT();
568 
569   if (UseGOT) {
570     // Use PC-relative addressing to access the GOT for this TLS symbol, then
571     // load the address from the GOT and add the thread pointer. This generates
572     // the pattern (PseudoLA_TLS_IE sym), which expands to
573     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
574     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
575     SDValue Load =
576         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
577 
578     // Add the thread pointer.
579     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
580     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
581   }
582 
583   // Generate a sequence for accessing the address relative to the thread
584   // pointer, with the appropriate adjustment for the thread pointer offset.
585   // This generates the pattern
586   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
587   SDValue AddrHi =
588       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
589   SDValue AddrAdd =
590       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
591   SDValue AddrLo =
592       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
593 
594   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
595   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
596   SDValue MNAdd = SDValue(
597       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
598       0);
599   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
600 }
601 
602 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
603                                                SelectionDAG &DAG) const {
604   SDLoc DL(N);
605   EVT Ty = getPointerTy(DAG.getDataLayout());
606   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
607   const GlobalValue *GV = N->getGlobal();
608 
609   // Use a PC-relative addressing mode to access the global dynamic GOT address.
610   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
611   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
612   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
613   SDValue Load =
614       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
615 
616   // Prepare argument list to generate call.
617   ArgListTy Args;
618   ArgListEntry Entry;
619   Entry.Node = Load;
620   Entry.Ty = CallTy;
621   Args.push_back(Entry);
622 
623   // Setup call to __tls_get_addr.
624   TargetLowering::CallLoweringInfo CLI(DAG);
625   CLI.setDebugLoc(DL)
626       .setChain(DAG.getEntryNode())
627       .setLibCallee(CallingConv::C, CallTy,
628                     DAG.getExternalSymbol("__tls_get_addr", Ty),
629                     std::move(Args));
630 
631   return LowerCallTo(CLI).first;
632 }
633 
634 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
635                                                    SelectionDAG &DAG) const {
636   SDLoc DL(Op);
637   EVT Ty = Op.getValueType();
638   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
639   int64_t Offset = N->getOffset();
640   MVT XLenVT = Subtarget.getXLenVT();
641 
642   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
643 
644   SDValue Addr;
645   switch (Model) {
646   case TLSModel::LocalExec:
647     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
648     break;
649   case TLSModel::InitialExec:
650     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
651     break;
652   case TLSModel::LocalDynamic:
653   case TLSModel::GeneralDynamic:
654     Addr = getDynamicTLSAddr(N, DAG);
655     break;
656   }
657 
658   // In order to maximise the opportunity for common subexpression elimination,
659   // emit a separate ADD node for the global address offset instead of folding
660   // it in the global address node. Later peephole optimisations may choose to
661   // fold it back in when profitable.
662   if (Offset != 0)
663     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
664                        DAG.getConstant(Offset, DL, XLenVT));
665   return Addr;
666 }
667 
668 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
669   SDValue CondV = Op.getOperand(0);
670   SDValue TrueV = Op.getOperand(1);
671   SDValue FalseV = Op.getOperand(2);
672   SDLoc DL(Op);
673   MVT XLenVT = Subtarget.getXLenVT();
674 
675   // If the result type is XLenVT and CondV is the output of a SETCC node
676   // which also operated on XLenVT inputs, then merge the SETCC node into the
677   // lowered RISCVISD::SELECT_CC to take advantage of the integer
678   // compare+branch instructions. i.e.:
679   // (select (setcc lhs, rhs, cc), truev, falsev)
680   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
681   if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC &&
682       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
683     SDValue LHS = CondV.getOperand(0);
684     SDValue RHS = CondV.getOperand(1);
685     auto CC = cast<CondCodeSDNode>(CondV.getOperand(2));
686     ISD::CondCode CCVal = CC->get();
687 
688     normaliseSetCC(LHS, RHS, CCVal);
689 
690     SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT);
691     SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
692     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
693     return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops);
694   }
695 
696   // Otherwise:
697   // (select condv, truev, falsev)
698   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
699   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
700   SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT);
701 
702   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
703   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
704 
705   return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops);
706 }
707 
708 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
709   MachineFunction &MF = DAG.getMachineFunction();
710   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
711 
712   SDLoc DL(Op);
713   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
714                                  getPointerTy(MF.getDataLayout()));
715 
716   // vastart just stores the address of the VarArgsFrameIndex slot into the
717   // memory location argument.
718   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
719   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
720                       MachinePointerInfo(SV));
721 }
722 
723 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
724                                             SelectionDAG &DAG) const {
725   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
726   MachineFunction &MF = DAG.getMachineFunction();
727   MachineFrameInfo &MFI = MF.getFrameInfo();
728   MFI.setFrameAddressIsTaken(true);
729   Register FrameReg = RI.getFrameRegister(MF);
730   int XLenInBytes = Subtarget.getXLen() / 8;
731 
732   EVT VT = Op.getValueType();
733   SDLoc DL(Op);
734   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
735   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
736   while (Depth--) {
737     int Offset = -(XLenInBytes * 2);
738     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
739                               DAG.getIntPtrConstant(Offset, DL));
740     FrameAddr =
741         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
742   }
743   return FrameAddr;
744 }
745 
746 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
747                                              SelectionDAG &DAG) const {
748   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
749   MachineFunction &MF = DAG.getMachineFunction();
750   MachineFrameInfo &MFI = MF.getFrameInfo();
751   MFI.setReturnAddressIsTaken(true);
752   MVT XLenVT = Subtarget.getXLenVT();
753   int XLenInBytes = Subtarget.getXLen() / 8;
754 
755   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
756     return SDValue();
757 
758   EVT VT = Op.getValueType();
759   SDLoc DL(Op);
760   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
761   if (Depth) {
762     int Off = -XLenInBytes;
763     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
764     SDValue Offset = DAG.getConstant(Off, DL, VT);
765     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
766                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
767                        MachinePointerInfo());
768   }
769 
770   // Return the value of the return address register, marking it an implicit
771   // live-in.
772   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
773   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
774 }
775 
776 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
777                                                  SelectionDAG &DAG) const {
778   SDLoc DL(Op);
779   SDValue Lo = Op.getOperand(0);
780   SDValue Hi = Op.getOperand(1);
781   SDValue Shamt = Op.getOperand(2);
782   EVT VT = Lo.getValueType();
783 
784   // if Shamt-XLEN < 0: // Shamt < XLEN
785   //   Lo = Lo << Shamt
786   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
787   // else:
788   //   Lo = 0
789   //   Hi = Lo << (Shamt-XLEN)
790 
791   SDValue Zero = DAG.getConstant(0, DL, VT);
792   SDValue One = DAG.getConstant(1, DL, VT);
793   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
794   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
795   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
796   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
797 
798   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
799   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
800   SDValue ShiftRightLo =
801       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
802   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
803   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
804   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
805 
806   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
807 
808   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
809   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
810 
811   SDValue Parts[2] = {Lo, Hi};
812   return DAG.getMergeValues(Parts, DL);
813 }
814 
815 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
816                                                   bool IsSRA) const {
817   SDLoc DL(Op);
818   SDValue Lo = Op.getOperand(0);
819   SDValue Hi = Op.getOperand(1);
820   SDValue Shamt = Op.getOperand(2);
821   EVT VT = Lo.getValueType();
822 
823   // SRA expansion:
824   //   if Shamt-XLEN < 0: // Shamt < XLEN
825   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
826   //     Hi = Hi >>s Shamt
827   //   else:
828   //     Lo = Hi >>s (Shamt-XLEN);
829   //     Hi = Hi >>s (XLEN-1)
830   //
831   // SRL expansion:
832   //   if Shamt-XLEN < 0: // Shamt < XLEN
833   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
834   //     Hi = Hi >>u Shamt
835   //   else:
836   //     Lo = Hi >>u (Shamt-XLEN);
837   //     Hi = 0;
838 
839   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
840 
841   SDValue Zero = DAG.getConstant(0, DL, VT);
842   SDValue One = DAG.getConstant(1, DL, VT);
843   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
844   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
845   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
846   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
847 
848   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
849   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
850   SDValue ShiftLeftHi =
851       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
852   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
853   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
854   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
855   SDValue HiFalse =
856       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
857 
858   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
859 
860   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
861   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
862 
863   SDValue Parts[2] = {Lo, Hi};
864   return DAG.getMergeValues(Parts, DL);
865 }
866 
867 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
868                                                      SelectionDAG &DAG) const {
869   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
870   SDLoc DL(Op);
871   switch (IntNo) {
872   default:
873     return SDValue();    // Don't custom lower most intrinsics.
874   case Intrinsic::thread_pointer: {
875     EVT PtrVT = getPointerTy(DAG.getDataLayout());
876     return DAG.getRegister(RISCV::X4, PtrVT);
877   }
878   }
879 }
880 
881 // Returns the opcode of the target-specific SDNode that implements the 32-bit
882 // form of the given Opcode.
883 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
884   switch (Opcode) {
885   default:
886     llvm_unreachable("Unexpected opcode");
887   case ISD::SHL:
888     return RISCVISD::SLLW;
889   case ISD::SRA:
890     return RISCVISD::SRAW;
891   case ISD::SRL:
892     return RISCVISD::SRLW;
893   case ISD::SDIV:
894     return RISCVISD::DIVW;
895   case ISD::UDIV:
896     return RISCVISD::DIVUW;
897   case ISD::UREM:
898     return RISCVISD::REMUW;
899   }
900 }
901 
902 // Converts the given 32-bit operation to a target-specific SelectionDAG node.
903 // Because i32 isn't a legal type for RV64, these operations would otherwise
904 // be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W
905 // later one because the fact the operation was originally of type i32 is
906 // lost.
907 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG) {
908   SDLoc DL(N);
909   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
910   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
911   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
912   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
913   // ReplaceNodeResults requires we maintain the same type for the return value.
914   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
915 }
916 
917 // Converts the given 32-bit operation to a i64 operation with signed extension
918 // semantic to reduce the signed extension instructions.
919 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
920   SDLoc DL(N);
921   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
922   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
923   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
924   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
925                                DAG.getValueType(MVT::i32));
926   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
927 }
928 
929 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
930                                              SmallVectorImpl<SDValue> &Results,
931                                              SelectionDAG &DAG) const {
932   SDLoc DL(N);
933   switch (N->getOpcode()) {
934   default:
935     llvm_unreachable("Don't know how to custom type legalize this operation!");
936   case ISD::STRICT_FP_TO_SINT:
937   case ISD::STRICT_FP_TO_UINT:
938   case ISD::FP_TO_SINT:
939   case ISD::FP_TO_UINT: {
940     bool IsStrict = N->isStrictFPOpcode();
941     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
942            "Unexpected custom legalisation");
943     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
944     RTLIB::Libcall LC;
945     if (N->getOpcode() == ISD::FP_TO_SINT ||
946         N->getOpcode() == ISD::STRICT_FP_TO_SINT)
947       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
948     else
949       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
950     MakeLibCallOptions CallOptions;
951     EVT OpVT = Op0.getValueType();
952     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
953     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
954     SDValue Result;
955     std::tie(Result, Chain) =
956         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
957     Results.push_back(Result);
958     if (IsStrict)
959       Results.push_back(Chain);
960     break;
961   }
962   case ISD::READCYCLECOUNTER: {
963     assert(!Subtarget.is64Bit() &&
964            "READCYCLECOUNTER only has custom type legalization on riscv32");
965 
966     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
967     SDValue RCW =
968         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
969 
970     Results.push_back(
971         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
972     Results.push_back(RCW.getValue(2));
973     break;
974   }
975   case ISD::ADD:
976   case ISD::SUB:
977   case ISD::MUL:
978     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
979            "Unexpected custom legalisation");
980     if (N->getOperand(1).getOpcode() == ISD::Constant)
981       return;
982     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
983     break;
984   case ISD::SHL:
985   case ISD::SRA:
986   case ISD::SRL:
987     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
988            "Unexpected custom legalisation");
989     if (N->getOperand(1).getOpcode() == ISD::Constant)
990       return;
991     Results.push_back(customLegalizeToWOp(N, DAG));
992     break;
993   case ISD::SDIV:
994   case ISD::UDIV:
995   case ISD::UREM:
996     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
997            Subtarget.hasStdExtM() && "Unexpected custom legalisation");
998     if (N->getOperand(0).getOpcode() == ISD::Constant ||
999         N->getOperand(1).getOpcode() == ISD::Constant)
1000       return;
1001     Results.push_back(customLegalizeToWOp(N, DAG));
1002     break;
1003   case ISD::BITCAST: {
1004     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1005            Subtarget.hasStdExtF() && "Unexpected custom legalisation");
1006     SDValue Op0 = N->getOperand(0);
1007     if (Op0.getValueType() != MVT::f32)
1008       return;
1009     SDValue FPConv =
1010         DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
1011     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
1012     break;
1013   }
1014   }
1015 }
1016 
1017 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
1018                                                DAGCombinerInfo &DCI) const {
1019   SelectionDAG &DAG = DCI.DAG;
1020 
1021   switch (N->getOpcode()) {
1022   default:
1023     break;
1024   case RISCVISD::SplitF64: {
1025     SDValue Op0 = N->getOperand(0);
1026     // If the input to SplitF64 is just BuildPairF64 then the operation is
1027     // redundant. Instead, use BuildPairF64's operands directly.
1028     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
1029       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
1030 
1031     SDLoc DL(N);
1032 
1033     // It's cheaper to materialise two 32-bit integers than to load a double
1034     // from the constant pool and transfer it to integer registers through the
1035     // stack.
1036     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
1037       APInt V = C->getValueAPF().bitcastToAPInt();
1038       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
1039       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
1040       return DCI.CombineTo(N, Lo, Hi);
1041     }
1042 
1043     // This is a target-specific version of a DAGCombine performed in
1044     // DAGCombiner::visitBITCAST. It performs the equivalent of:
1045     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
1046     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
1047     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
1048         !Op0.getNode()->hasOneUse())
1049       break;
1050     SDValue NewSplitF64 =
1051         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
1052                     Op0.getOperand(0));
1053     SDValue Lo = NewSplitF64.getValue(0);
1054     SDValue Hi = NewSplitF64.getValue(1);
1055     APInt SignBit = APInt::getSignMask(32);
1056     if (Op0.getOpcode() == ISD::FNEG) {
1057       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
1058                                   DAG.getConstant(SignBit, DL, MVT::i32));
1059       return DCI.CombineTo(N, Lo, NewHi);
1060     }
1061     assert(Op0.getOpcode() == ISD::FABS);
1062     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
1063                                 DAG.getConstant(~SignBit, DL, MVT::i32));
1064     return DCI.CombineTo(N, Lo, NewHi);
1065   }
1066   case RISCVISD::SLLW:
1067   case RISCVISD::SRAW:
1068   case RISCVISD::SRLW: {
1069     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
1070     SDValue LHS = N->getOperand(0);
1071     SDValue RHS = N->getOperand(1);
1072     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
1073     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
1074     if (SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI) ||
1075         SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)) {
1076       if (N->getOpcode() != ISD::DELETED_NODE)
1077         DCI.AddToWorklist(N);
1078       return SDValue(N, 0);
1079     }
1080     break;
1081   }
1082   case RISCVISD::FMV_X_ANYEXTW_RV64: {
1083     SDLoc DL(N);
1084     SDValue Op0 = N->getOperand(0);
1085     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
1086     // conversion is unnecessary and can be replaced with an ANY_EXTEND
1087     // of the FMV_W_X_RV64 operand.
1088     if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) {
1089       SDValue AExtOp =
1090           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0.getOperand(0));
1091       return DCI.CombineTo(N, AExtOp);
1092     }
1093 
1094     // This is a target-specific version of a DAGCombine performed in
1095     // DAGCombiner::visitBITCAST. It performs the equivalent of:
1096     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
1097     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
1098     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
1099         !Op0.getNode()->hasOneUse())
1100       break;
1101     SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64,
1102                                  Op0.getOperand(0));
1103     APInt SignBit = APInt::getSignMask(32).sext(64);
1104     if (Op0.getOpcode() == ISD::FNEG) {
1105       return DCI.CombineTo(N,
1106                            DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV,
1107                                        DAG.getConstant(SignBit, DL, MVT::i64)));
1108     }
1109     assert(Op0.getOpcode() == ISD::FABS);
1110     return DCI.CombineTo(N,
1111                          DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV,
1112                                      DAG.getConstant(~SignBit, DL, MVT::i64)));
1113   }
1114   }
1115 
1116   return SDValue();
1117 }
1118 
1119 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
1120     const SDNode *N, CombineLevel Level) const {
1121   // The following folds are only desirable if `(OP _, c1 << c2)` can be
1122   // materialised in fewer instructions than `(OP _, c1)`:
1123   //
1124   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
1125   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
1126   SDValue N0 = N->getOperand(0);
1127   EVT Ty = N0.getValueType();
1128   if (Ty.isScalarInteger() &&
1129       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
1130     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
1131     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
1132     if (C1 && C2) {
1133       APInt C1Int = C1->getAPIntValue();
1134       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
1135 
1136       // We can materialise `c1 << c2` into an add immediate, so it's "free",
1137       // and the combine should happen, to potentially allow further combines
1138       // later.
1139       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
1140           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
1141         return true;
1142 
1143       // We can materialise `c1` in an add immediate, so it's "free", and the
1144       // combine should be prevented.
1145       if (C1Int.getMinSignedBits() <= 64 &&
1146           isLegalAddImmediate(C1Int.getSExtValue()))
1147         return false;
1148 
1149       // Neither constant will fit into an immediate, so find materialisation
1150       // costs.
1151       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
1152                                               Subtarget.is64Bit());
1153       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
1154           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit());
1155 
1156       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
1157       // combine should be prevented.
1158       if (C1Cost < ShiftedC1Cost)
1159         return false;
1160     }
1161   }
1162   return true;
1163 }
1164 
1165 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
1166     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
1167     unsigned Depth) const {
1168   switch (Op.getOpcode()) {
1169   default:
1170     break;
1171   case RISCVISD::SLLW:
1172   case RISCVISD::SRAW:
1173   case RISCVISD::SRLW:
1174   case RISCVISD::DIVW:
1175   case RISCVISD::DIVUW:
1176   case RISCVISD::REMUW:
1177     // TODO: As the result is sign-extended, this is conservatively correct. A
1178     // more precise answer could be calculated for SRAW depending on known
1179     // bits in the shift amount.
1180     return 33;
1181   }
1182 
1183   return 1;
1184 }
1185 
1186 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
1187                                                   MachineBasicBlock *BB) {
1188   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
1189 
1190   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
1191   // Should the count have wrapped while it was being read, we need to try
1192   // again.
1193   // ...
1194   // read:
1195   // rdcycleh x3 # load high word of cycle
1196   // rdcycle  x2 # load low word of cycle
1197   // rdcycleh x4 # load high word of cycle
1198   // bne x3, x4, read # check if high word reads match, otherwise try again
1199   // ...
1200 
1201   MachineFunction &MF = *BB->getParent();
1202   const BasicBlock *LLVM_BB = BB->getBasicBlock();
1203   MachineFunction::iterator It = ++BB->getIterator();
1204 
1205   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
1206   MF.insert(It, LoopMBB);
1207 
1208   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
1209   MF.insert(It, DoneMBB);
1210 
1211   // Transfer the remainder of BB and its successor edges to DoneMBB.
1212   DoneMBB->splice(DoneMBB->begin(), BB,
1213                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
1214   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
1215 
1216   BB->addSuccessor(LoopMBB);
1217 
1218   MachineRegisterInfo &RegInfo = MF.getRegInfo();
1219   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
1220   Register LoReg = MI.getOperand(0).getReg();
1221   Register HiReg = MI.getOperand(1).getReg();
1222   DebugLoc DL = MI.getDebugLoc();
1223 
1224   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
1225   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
1226       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
1227       .addReg(RISCV::X0);
1228   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
1229       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
1230       .addReg(RISCV::X0);
1231   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
1232       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
1233       .addReg(RISCV::X0);
1234 
1235   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
1236       .addReg(HiReg)
1237       .addReg(ReadAgainReg)
1238       .addMBB(LoopMBB);
1239 
1240   LoopMBB->addSuccessor(LoopMBB);
1241   LoopMBB->addSuccessor(DoneMBB);
1242 
1243   MI.eraseFromParent();
1244 
1245   return DoneMBB;
1246 }
1247 
1248 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
1249                                              MachineBasicBlock *BB) {
1250   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
1251 
1252   MachineFunction &MF = *BB->getParent();
1253   DebugLoc DL = MI.getDebugLoc();
1254   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
1255   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
1256   Register LoReg = MI.getOperand(0).getReg();
1257   Register HiReg = MI.getOperand(1).getReg();
1258   Register SrcReg = MI.getOperand(2).getReg();
1259   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
1260   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
1261 
1262   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
1263                           RI);
1264   MachineMemOperand *MMO =
1265       MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
1266                               MachineMemOperand::MOLoad, 8, Align(8));
1267   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
1268       .addFrameIndex(FI)
1269       .addImm(0)
1270       .addMemOperand(MMO);
1271   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
1272       .addFrameIndex(FI)
1273       .addImm(4)
1274       .addMemOperand(MMO);
1275   MI.eraseFromParent(); // The pseudo instruction is gone now.
1276   return BB;
1277 }
1278 
1279 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
1280                                                  MachineBasicBlock *BB) {
1281   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
1282          "Unexpected instruction");
1283 
1284   MachineFunction &MF = *BB->getParent();
1285   DebugLoc DL = MI.getDebugLoc();
1286   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
1287   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
1288   Register DstReg = MI.getOperand(0).getReg();
1289   Register LoReg = MI.getOperand(1).getReg();
1290   Register HiReg = MI.getOperand(2).getReg();
1291   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
1292   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
1293 
1294   MachineMemOperand *MMO =
1295       MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
1296                               MachineMemOperand::MOStore, 8, Align(8));
1297   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
1298       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
1299       .addFrameIndex(FI)
1300       .addImm(0)
1301       .addMemOperand(MMO);
1302   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
1303       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
1304       .addFrameIndex(FI)
1305       .addImm(4)
1306       .addMemOperand(MMO);
1307   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
1308   MI.eraseFromParent(); // The pseudo instruction is gone now.
1309   return BB;
1310 }
1311 
1312 static bool isSelectPseudo(MachineInstr &MI) {
1313   switch (MI.getOpcode()) {
1314   default:
1315     return false;
1316   case RISCV::Select_GPR_Using_CC_GPR:
1317   case RISCV::Select_FPR32_Using_CC_GPR:
1318   case RISCV::Select_FPR64_Using_CC_GPR:
1319     return true;
1320   }
1321 }
1322 
1323 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
1324                                            MachineBasicBlock *BB) {
1325   // To "insert" Select_* instructions, we actually have to insert the triangle
1326   // control-flow pattern.  The incoming instructions know the destination vreg
1327   // to set, the condition code register to branch on, the true/false values to
1328   // select between, and the condcode to use to select the appropriate branch.
1329   //
1330   // We produce the following control flow:
1331   //     HeadMBB
1332   //     |  \
1333   //     |  IfFalseMBB
1334   //     | /
1335   //    TailMBB
1336   //
1337   // When we find a sequence of selects we attempt to optimize their emission
1338   // by sharing the control flow. Currently we only handle cases where we have
1339   // multiple selects with the exact same condition (same LHS, RHS and CC).
1340   // The selects may be interleaved with other instructions if the other
1341   // instructions meet some requirements we deem safe:
1342   // - They are debug instructions. Otherwise,
1343   // - They do not have side-effects, do not access memory and their inputs do
1344   //   not depend on the results of the select pseudo-instructions.
1345   // The TrueV/FalseV operands of the selects cannot depend on the result of
1346   // previous selects in the sequence.
1347   // These conditions could be further relaxed. See the X86 target for a
1348   // related approach and more information.
1349   Register LHS = MI.getOperand(1).getReg();
1350   Register RHS = MI.getOperand(2).getReg();
1351   auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
1352 
1353   SmallVector<MachineInstr *, 4> SelectDebugValues;
1354   SmallSet<Register, 4> SelectDests;
1355   SelectDests.insert(MI.getOperand(0).getReg());
1356 
1357   MachineInstr *LastSelectPseudo = &MI;
1358 
1359   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
1360        SequenceMBBI != E; ++SequenceMBBI) {
1361     if (SequenceMBBI->isDebugInstr())
1362       continue;
1363     else if (isSelectPseudo(*SequenceMBBI)) {
1364       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
1365           SequenceMBBI->getOperand(2).getReg() != RHS ||
1366           SequenceMBBI->getOperand(3).getImm() != CC ||
1367           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
1368           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
1369         break;
1370       LastSelectPseudo = &*SequenceMBBI;
1371       SequenceMBBI->collectDebugValues(SelectDebugValues);
1372       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
1373     } else {
1374       if (SequenceMBBI->hasUnmodeledSideEffects() ||
1375           SequenceMBBI->mayLoadOrStore())
1376         break;
1377       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
1378             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
1379           }))
1380         break;
1381     }
1382   }
1383 
1384   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
1385   const BasicBlock *LLVM_BB = BB->getBasicBlock();
1386   DebugLoc DL = MI.getDebugLoc();
1387   MachineFunction::iterator I = ++BB->getIterator();
1388 
1389   MachineBasicBlock *HeadMBB = BB;
1390   MachineFunction *F = BB->getParent();
1391   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
1392   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
1393 
1394   F->insert(I, IfFalseMBB);
1395   F->insert(I, TailMBB);
1396 
1397   // Transfer debug instructions associated with the selects to TailMBB.
1398   for (MachineInstr *DebugInstr : SelectDebugValues) {
1399     TailMBB->push_back(DebugInstr->removeFromParent());
1400   }
1401 
1402   // Move all instructions after the sequence to TailMBB.
1403   TailMBB->splice(TailMBB->end(), HeadMBB,
1404                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
1405   // Update machine-CFG edges by transferring all successors of the current
1406   // block to the new block which will contain the Phi nodes for the selects.
1407   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
1408   // Set the successors for HeadMBB.
1409   HeadMBB->addSuccessor(IfFalseMBB);
1410   HeadMBB->addSuccessor(TailMBB);
1411 
1412   // Insert appropriate branch.
1413   unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
1414 
1415   BuildMI(HeadMBB, DL, TII.get(Opcode))
1416     .addReg(LHS)
1417     .addReg(RHS)
1418     .addMBB(TailMBB);
1419 
1420   // IfFalseMBB just falls through to TailMBB.
1421   IfFalseMBB->addSuccessor(TailMBB);
1422 
1423   // Create PHIs for all of the select pseudo-instructions.
1424   auto SelectMBBI = MI.getIterator();
1425   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
1426   auto InsertionPoint = TailMBB->begin();
1427   while (SelectMBBI != SelectEnd) {
1428     auto Next = std::next(SelectMBBI);
1429     if (isSelectPseudo(*SelectMBBI)) {
1430       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
1431       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
1432               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
1433           .addReg(SelectMBBI->getOperand(4).getReg())
1434           .addMBB(HeadMBB)
1435           .addReg(SelectMBBI->getOperand(5).getReg())
1436           .addMBB(IfFalseMBB);
1437       SelectMBBI->eraseFromParent();
1438     }
1439     SelectMBBI = Next;
1440   }
1441 
1442   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
1443   return TailMBB;
1444 }
1445 
1446 MachineBasicBlock *
1447 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
1448                                                  MachineBasicBlock *BB) const {
1449   switch (MI.getOpcode()) {
1450   default:
1451     llvm_unreachable("Unexpected instr type to insert");
1452   case RISCV::ReadCycleWide:
1453     assert(!Subtarget.is64Bit() &&
1454            "ReadCycleWrite is only to be used on riscv32");
1455     return emitReadCycleWidePseudo(MI, BB);
1456   case RISCV::Select_GPR_Using_CC_GPR:
1457   case RISCV::Select_FPR32_Using_CC_GPR:
1458   case RISCV::Select_FPR64_Using_CC_GPR:
1459     return emitSelectPseudo(MI, BB);
1460   case RISCV::BuildPairF64Pseudo:
1461     return emitBuildPairF64Pseudo(MI, BB);
1462   case RISCV::SplitF64Pseudo:
1463     return emitSplitF64Pseudo(MI, BB);
1464   }
1465 }
1466 
1467 // Calling Convention Implementation.
1468 // The expectations for frontend ABI lowering vary from target to target.
1469 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
1470 // details, but this is a longer term goal. For now, we simply try to keep the
1471 // role of the frontend as simple and well-defined as possible. The rules can
1472 // be summarised as:
1473 // * Never split up large scalar arguments. We handle them here.
1474 // * If a hardfloat calling convention is being used, and the struct may be
1475 // passed in a pair of registers (fp+fp, int+fp), and both registers are
1476 // available, then pass as two separate arguments. If either the GPRs or FPRs
1477 // are exhausted, then pass according to the rule below.
1478 // * If a struct could never be passed in registers or directly in a stack
1479 // slot (as it is larger than 2*XLEN and the floating point rules don't
1480 // apply), then pass it using a pointer with the byval attribute.
1481 // * If a struct is less than 2*XLEN, then coerce to either a two-element
1482 // word-sized array or a 2*XLEN scalar (depending on alignment).
1483 // * The frontend can determine whether a struct is returned by reference or
1484 // not based on its size and fields. If it will be returned by reference, the
1485 // frontend must modify the prototype so a pointer with the sret annotation is
1486 // passed as the first argument. This is not necessary for large scalar
1487 // returns.
1488 // * Struct return values and varargs should be coerced to structs containing
1489 // register-size fields in the same situations they would be for fixed
1490 // arguments.
1491 
1492 static const MCPhysReg ArgGPRs[] = {
1493   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
1494   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
1495 };
1496 static const MCPhysReg ArgFPR32s[] = {
1497   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
1498   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
1499 };
1500 static const MCPhysReg ArgFPR64s[] = {
1501   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
1502   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
1503 };
1504 
1505 // Pass a 2*XLEN argument that has been split into two XLEN values through
1506 // registers or the stack as necessary.
1507 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
1508                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
1509                                 MVT ValVT2, MVT LocVT2,
1510                                 ISD::ArgFlagsTy ArgFlags2) {
1511   unsigned XLenInBytes = XLen / 8;
1512   if (Register Reg = State.AllocateReg(ArgGPRs)) {
1513     // At least one half can be passed via register.
1514     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
1515                                      VA1.getLocVT(), CCValAssign::Full));
1516   } else {
1517     // Both halves must be passed on the stack, with proper alignment.
1518     Align StackAlign =
1519         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
1520     State.addLoc(
1521         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
1522                             State.AllocateStack(XLenInBytes, StackAlign),
1523                             VA1.getLocVT(), CCValAssign::Full));
1524     State.addLoc(CCValAssign::getMem(
1525         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
1526         LocVT2, CCValAssign::Full));
1527     return false;
1528   }
1529 
1530   if (Register Reg = State.AllocateReg(ArgGPRs)) {
1531     // The second half can also be passed via register.
1532     State.addLoc(
1533         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
1534   } else {
1535     // The second half is passed via the stack, without additional alignment.
1536     State.addLoc(CCValAssign::getMem(
1537         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
1538         LocVT2, CCValAssign::Full));
1539   }
1540 
1541   return false;
1542 }
1543 
1544 // Implements the RISC-V calling convention. Returns true upon failure.
1545 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
1546                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
1547                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
1548                      bool IsRet, Type *OrigTy) {
1549   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
1550   assert(XLen == 32 || XLen == 64);
1551   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
1552 
1553   // Any return value split in to more than two values can't be returned
1554   // directly.
1555   if (IsRet && ValNo > 1)
1556     return true;
1557 
1558   // UseGPRForF32 if targeting one of the soft-float ABIs, if passing a
1559   // variadic argument, or if no F32 argument registers are available.
1560   bool UseGPRForF32 = true;
1561   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
1562   // variadic argument, or if no F64 argument registers are available.
1563   bool UseGPRForF64 = true;
1564 
1565   switch (ABI) {
1566   default:
1567     llvm_unreachable("Unexpected ABI");
1568   case RISCVABI::ABI_ILP32:
1569   case RISCVABI::ABI_LP64:
1570     break;
1571   case RISCVABI::ABI_ILP32F:
1572   case RISCVABI::ABI_LP64F:
1573     UseGPRForF32 = !IsFixed;
1574     break;
1575   case RISCVABI::ABI_ILP32D:
1576   case RISCVABI::ABI_LP64D:
1577     UseGPRForF32 = !IsFixed;
1578     UseGPRForF64 = !IsFixed;
1579     break;
1580   }
1581 
1582   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s))
1583     UseGPRForF32 = true;
1584   if (State.getFirstUnallocated(ArgFPR64s) == array_lengthof(ArgFPR64s))
1585     UseGPRForF64 = true;
1586 
1587   // From this point on, rely on UseGPRForF32, UseGPRForF64 and similar local
1588   // variables rather than directly checking against the target ABI.
1589 
1590   if (UseGPRForF32 && ValVT == MVT::f32) {
1591     LocVT = XLenVT;
1592     LocInfo = CCValAssign::BCvt;
1593   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
1594     LocVT = MVT::i64;
1595     LocInfo = CCValAssign::BCvt;
1596   }
1597 
1598   // If this is a variadic argument, the RISC-V calling convention requires
1599   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
1600   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
1601   // be used regardless of whether the original argument was split during
1602   // legalisation or not. The argument will not be passed by registers if the
1603   // original type is larger than 2*XLEN, so the register alignment rule does
1604   // not apply.
1605   unsigned TwoXLenInBytes = (2 * XLen) / 8;
1606   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
1607       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
1608     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
1609     // Skip 'odd' register if necessary.
1610     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
1611       State.AllocateReg(ArgGPRs);
1612   }
1613 
1614   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
1615   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
1616       State.getPendingArgFlags();
1617 
1618   assert(PendingLocs.size() == PendingArgFlags.size() &&
1619          "PendingLocs and PendingArgFlags out of sync");
1620 
1621   // Handle passing f64 on RV32D with a soft float ABI or when floating point
1622   // registers are exhausted.
1623   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
1624     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
1625            "Can't lower f64 if it is split");
1626     // Depending on available argument GPRS, f64 may be passed in a pair of
1627     // GPRs, split between a GPR and the stack, or passed completely on the
1628     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
1629     // cases.
1630     Register Reg = State.AllocateReg(ArgGPRs);
1631     LocVT = MVT::i32;
1632     if (!Reg) {
1633       unsigned StackOffset = State.AllocateStack(8, Align(8));
1634       State.addLoc(
1635           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
1636       return false;
1637     }
1638     if (!State.AllocateReg(ArgGPRs))
1639       State.AllocateStack(4, Align(4));
1640     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
1641     return false;
1642   }
1643 
1644   // Split arguments might be passed indirectly, so keep track of the pending
1645   // values.
1646   if (ArgFlags.isSplit() || !PendingLocs.empty()) {
1647     LocVT = XLenVT;
1648     LocInfo = CCValAssign::Indirect;
1649     PendingLocs.push_back(
1650         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
1651     PendingArgFlags.push_back(ArgFlags);
1652     if (!ArgFlags.isSplitEnd()) {
1653       return false;
1654     }
1655   }
1656 
1657   // If the split argument only had two elements, it should be passed directly
1658   // in registers or on the stack.
1659   if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) {
1660     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
1661     // Apply the normal calling convention rules to the first half of the
1662     // split argument.
1663     CCValAssign VA = PendingLocs[0];
1664     ISD::ArgFlagsTy AF = PendingArgFlags[0];
1665     PendingLocs.clear();
1666     PendingArgFlags.clear();
1667     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
1668                                ArgFlags);
1669   }
1670 
1671   // Allocate to a register if possible, or else a stack slot.
1672   Register Reg;
1673   if (ValVT == MVT::f32 && !UseGPRForF32)
1674     Reg = State.AllocateReg(ArgFPR32s, ArgFPR64s);
1675   else if (ValVT == MVT::f64 && !UseGPRForF64)
1676     Reg = State.AllocateReg(ArgFPR64s, ArgFPR32s);
1677   else
1678     Reg = State.AllocateReg(ArgGPRs);
1679   unsigned StackOffset =
1680       Reg ? 0 : State.AllocateStack(XLen / 8, Align(XLen / 8));
1681 
1682   // If we reach this point and PendingLocs is non-empty, we must be at the
1683   // end of a split argument that must be passed indirectly.
1684   if (!PendingLocs.empty()) {
1685     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
1686     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
1687 
1688     for (auto &It : PendingLocs) {
1689       if (Reg)
1690         It.convertToReg(Reg);
1691       else
1692         It.convertToMem(StackOffset);
1693       State.addLoc(It);
1694     }
1695     PendingLocs.clear();
1696     PendingArgFlags.clear();
1697     return false;
1698   }
1699 
1700   assert((!UseGPRForF32 || !UseGPRForF64 || LocVT == XLenVT) &&
1701          "Expected an XLenVT at this stage");
1702 
1703   if (Reg) {
1704     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
1705     return false;
1706   }
1707 
1708   // When an f32 or f64 is passed on the stack, no bit-conversion is needed.
1709   if (ValVT == MVT::f32 || ValVT == MVT::f64) {
1710     LocVT = ValVT;
1711     LocInfo = CCValAssign::Full;
1712   }
1713   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
1714   return false;
1715 }
1716 
1717 void RISCVTargetLowering::analyzeInputArgs(
1718     MachineFunction &MF, CCState &CCInfo,
1719     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const {
1720   unsigned NumArgs = Ins.size();
1721   FunctionType *FType = MF.getFunction().getFunctionType();
1722 
1723   for (unsigned i = 0; i != NumArgs; ++i) {
1724     MVT ArgVT = Ins[i].VT;
1725     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
1726 
1727     Type *ArgTy = nullptr;
1728     if (IsRet)
1729       ArgTy = FType->getReturnType();
1730     else if (Ins[i].isOrigArg())
1731       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
1732 
1733     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
1734     if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
1735                  ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy)) {
1736       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
1737                         << EVT(ArgVT).getEVTString() << '\n');
1738       llvm_unreachable(nullptr);
1739     }
1740   }
1741 }
1742 
1743 void RISCVTargetLowering::analyzeOutputArgs(
1744     MachineFunction &MF, CCState &CCInfo,
1745     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
1746     CallLoweringInfo *CLI) const {
1747   unsigned NumArgs = Outs.size();
1748 
1749   for (unsigned i = 0; i != NumArgs; i++) {
1750     MVT ArgVT = Outs[i].VT;
1751     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
1752     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
1753 
1754     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
1755     if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
1756                  ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) {
1757       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
1758                         << EVT(ArgVT).getEVTString() << "\n");
1759       llvm_unreachable(nullptr);
1760     }
1761   }
1762 }
1763 
1764 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
1765 // values.
1766 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
1767                                    const CCValAssign &VA, const SDLoc &DL) {
1768   switch (VA.getLocInfo()) {
1769   default:
1770     llvm_unreachable("Unexpected CCValAssign::LocInfo");
1771   case CCValAssign::Full:
1772     break;
1773   case CCValAssign::BCvt:
1774     if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) {
1775       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
1776       break;
1777     }
1778     Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
1779     break;
1780   }
1781   return Val;
1782 }
1783 
1784 // The caller is responsible for loading the full value if the argument is
1785 // passed with CCValAssign::Indirect.
1786 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
1787                                 const CCValAssign &VA, const SDLoc &DL) {
1788   MachineFunction &MF = DAG.getMachineFunction();
1789   MachineRegisterInfo &RegInfo = MF.getRegInfo();
1790   EVT LocVT = VA.getLocVT();
1791   SDValue Val;
1792   const TargetRegisterClass *RC;
1793 
1794   switch (LocVT.getSimpleVT().SimpleTy) {
1795   default:
1796     llvm_unreachable("Unexpected register type");
1797   case MVT::i32:
1798   case MVT::i64:
1799     RC = &RISCV::GPRRegClass;
1800     break;
1801   case MVT::f32:
1802     RC = &RISCV::FPR32RegClass;
1803     break;
1804   case MVT::f64:
1805     RC = &RISCV::FPR64RegClass;
1806     break;
1807   }
1808 
1809   Register VReg = RegInfo.createVirtualRegister(RC);
1810   RegInfo.addLiveIn(VA.getLocReg(), VReg);
1811   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
1812 
1813   if (VA.getLocInfo() == CCValAssign::Indirect)
1814     return Val;
1815 
1816   return convertLocVTToValVT(DAG, Val, VA, DL);
1817 }
1818 
1819 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
1820                                    const CCValAssign &VA, const SDLoc &DL) {
1821   EVT LocVT = VA.getLocVT();
1822 
1823   switch (VA.getLocInfo()) {
1824   default:
1825     llvm_unreachable("Unexpected CCValAssign::LocInfo");
1826   case CCValAssign::Full:
1827     break;
1828   case CCValAssign::BCvt:
1829     if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) {
1830       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
1831       break;
1832     }
1833     Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
1834     break;
1835   }
1836   return Val;
1837 }
1838 
1839 // The caller is responsible for loading the full value if the argument is
1840 // passed with CCValAssign::Indirect.
1841 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
1842                                 const CCValAssign &VA, const SDLoc &DL) {
1843   MachineFunction &MF = DAG.getMachineFunction();
1844   MachineFrameInfo &MFI = MF.getFrameInfo();
1845   EVT LocVT = VA.getLocVT();
1846   EVT ValVT = VA.getValVT();
1847   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
1848   int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
1849                                  VA.getLocMemOffset(), /*Immutable=*/true);
1850   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1851   SDValue Val;
1852 
1853   ISD::LoadExtType ExtType;
1854   switch (VA.getLocInfo()) {
1855   default:
1856     llvm_unreachable("Unexpected CCValAssign::LocInfo");
1857   case CCValAssign::Full:
1858   case CCValAssign::Indirect:
1859   case CCValAssign::BCvt:
1860     ExtType = ISD::NON_EXTLOAD;
1861     break;
1862   }
1863   Val = DAG.getExtLoad(
1864       ExtType, DL, LocVT, Chain, FIN,
1865       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
1866   return Val;
1867 }
1868 
1869 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
1870                                        const CCValAssign &VA, const SDLoc &DL) {
1871   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
1872          "Unexpected VA");
1873   MachineFunction &MF = DAG.getMachineFunction();
1874   MachineFrameInfo &MFI = MF.getFrameInfo();
1875   MachineRegisterInfo &RegInfo = MF.getRegInfo();
1876 
1877   if (VA.isMemLoc()) {
1878     // f64 is passed on the stack.
1879     int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
1880     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1881     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
1882                        MachinePointerInfo::getFixedStack(MF, FI));
1883   }
1884 
1885   assert(VA.isRegLoc() && "Expected register VA assignment");
1886 
1887   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
1888   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
1889   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
1890   SDValue Hi;
1891   if (VA.getLocReg() == RISCV::X17) {
1892     // Second half of f64 is passed on the stack.
1893     int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
1894     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1895     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
1896                      MachinePointerInfo::getFixedStack(MF, FI));
1897   } else {
1898     // Second half of f64 is passed in another GPR.
1899     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
1900     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
1901     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
1902   }
1903   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
1904 }
1905 
1906 // FastCC has less than 1% performance improvement for some particular
1907 // benchmark. But theoretically, it may has benenfit for some cases.
1908 static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
1909                             CCValAssign::LocInfo LocInfo,
1910                             ISD::ArgFlagsTy ArgFlags, CCState &State) {
1911 
1912   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
1913     // X5 and X6 might be used for save-restore libcall.
1914     static const MCPhysReg GPRList[] = {
1915         RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
1916         RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
1917         RISCV::X29, RISCV::X30, RISCV::X31};
1918     if (unsigned Reg = State.AllocateReg(GPRList)) {
1919       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
1920       return false;
1921     }
1922   }
1923 
1924   if (LocVT == MVT::f32) {
1925     static const MCPhysReg FPR32List[] = {
1926         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
1927         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
1928         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
1929         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
1930     if (unsigned Reg = State.AllocateReg(FPR32List)) {
1931       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
1932       return false;
1933     }
1934   }
1935 
1936   if (LocVT == MVT::f64) {
1937     static const MCPhysReg FPR64List[] = {
1938         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
1939         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
1940         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
1941         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
1942     if (unsigned Reg = State.AllocateReg(FPR64List)) {
1943       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
1944       return false;
1945     }
1946   }
1947 
1948   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
1949     unsigned Offset4 = State.AllocateStack(4, Align(4));
1950     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
1951     return false;
1952   }
1953 
1954   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
1955     unsigned Offset5 = State.AllocateStack(8, Align(8));
1956     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
1957     return false;
1958   }
1959 
1960   return true; // CC didn't match.
1961 }
1962 
1963 // Transform physical registers into virtual registers.
1964 SDValue RISCVTargetLowering::LowerFormalArguments(
1965     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1966     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1967     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1968 
1969   switch (CallConv) {
1970   default:
1971     report_fatal_error("Unsupported calling convention");
1972   case CallingConv::C:
1973   case CallingConv::Fast:
1974     break;
1975   }
1976 
1977   MachineFunction &MF = DAG.getMachineFunction();
1978 
1979   const Function &Func = MF.getFunction();
1980   if (Func.hasFnAttribute("interrupt")) {
1981     if (!Func.arg_empty())
1982       report_fatal_error(
1983         "Functions with the interrupt attribute cannot have arguments!");
1984 
1985     StringRef Kind =
1986       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
1987 
1988     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
1989       report_fatal_error(
1990         "Function interrupt attribute argument not supported!");
1991   }
1992 
1993   EVT PtrVT = getPointerTy(DAG.getDataLayout());
1994   MVT XLenVT = Subtarget.getXLenVT();
1995   unsigned XLenInBytes = Subtarget.getXLen() / 8;
1996   // Used with vargs to acumulate store chains.
1997   std::vector<SDValue> OutChains;
1998 
1999   // Assign locations to all of the incoming arguments.
2000   SmallVector<CCValAssign, 16> ArgLocs;
2001   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2002 
2003   if (CallConv == CallingConv::Fast)
2004     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_FastCC);
2005   else
2006     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false);
2007 
2008   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2009     CCValAssign &VA = ArgLocs[i];
2010     SDValue ArgValue;
2011     // Passing f64 on RV32D with a soft float ABI must be handled as a special
2012     // case.
2013     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
2014       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
2015     else if (VA.isRegLoc())
2016       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL);
2017     else
2018       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
2019 
2020     if (VA.getLocInfo() == CCValAssign::Indirect) {
2021       // If the original argument was split and passed by reference (e.g. i128
2022       // on RV32), we need to load all parts of it here (using the same
2023       // address).
2024       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
2025                                    MachinePointerInfo()));
2026       unsigned ArgIndex = Ins[i].OrigArgIndex;
2027       assert(Ins[i].PartOffset == 0);
2028       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
2029         CCValAssign &PartVA = ArgLocs[i + 1];
2030         unsigned PartOffset = Ins[i + 1].PartOffset;
2031         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
2032                                       DAG.getIntPtrConstant(PartOffset, DL));
2033         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
2034                                      MachinePointerInfo()));
2035         ++i;
2036       }
2037       continue;
2038     }
2039     InVals.push_back(ArgValue);
2040   }
2041 
2042   if (IsVarArg) {
2043     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
2044     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
2045     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
2046     MachineFrameInfo &MFI = MF.getFrameInfo();
2047     MachineRegisterInfo &RegInfo = MF.getRegInfo();
2048     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
2049 
2050     // Offset of the first variable argument from stack pointer, and size of
2051     // the vararg save area. For now, the varargs save area is either zero or
2052     // large enough to hold a0-a7.
2053     int VaArgOffset, VarArgsSaveSize;
2054 
2055     // If all registers are allocated, then all varargs must be passed on the
2056     // stack and we don't need to save any argregs.
2057     if (ArgRegs.size() == Idx) {
2058       VaArgOffset = CCInfo.getNextStackOffset();
2059       VarArgsSaveSize = 0;
2060     } else {
2061       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
2062       VaArgOffset = -VarArgsSaveSize;
2063     }
2064 
2065     // Record the frame index of the first variable argument
2066     // which is a value necessary to VASTART.
2067     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
2068     RVFI->setVarArgsFrameIndex(FI);
2069 
2070     // If saving an odd number of registers then create an extra stack slot to
2071     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
2072     // offsets to even-numbered registered remain 2*XLEN-aligned.
2073     if (Idx % 2) {
2074       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
2075       VarArgsSaveSize += XLenInBytes;
2076     }
2077 
2078     // Copy the integer registers that may have been used for passing varargs
2079     // to the vararg save area.
2080     for (unsigned I = Idx; I < ArgRegs.size();
2081          ++I, VaArgOffset += XLenInBytes) {
2082       const Register Reg = RegInfo.createVirtualRegister(RC);
2083       RegInfo.addLiveIn(ArgRegs[I], Reg);
2084       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
2085       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
2086       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2087       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
2088                                    MachinePointerInfo::getFixedStack(MF, FI));
2089       cast<StoreSDNode>(Store.getNode())
2090           ->getMemOperand()
2091           ->setValue((Value *)nullptr);
2092       OutChains.push_back(Store);
2093     }
2094     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
2095   }
2096 
2097   // All stores are grouped in one node to allow the matching between
2098   // the size of Ins and InVals. This only happens for vararg functions.
2099   if (!OutChains.empty()) {
2100     OutChains.push_back(Chain);
2101     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
2102   }
2103 
2104   return Chain;
2105 }
2106 
2107 /// isEligibleForTailCallOptimization - Check whether the call is eligible
2108 /// for tail call optimization.
2109 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
2110 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
2111     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
2112     const SmallVector<CCValAssign, 16> &ArgLocs) const {
2113 
2114   auto &Callee = CLI.Callee;
2115   auto CalleeCC = CLI.CallConv;
2116   auto &Outs = CLI.Outs;
2117   auto &Caller = MF.getFunction();
2118   auto CallerCC = Caller.getCallingConv();
2119 
2120   // Exception-handling functions need a special set of instructions to
2121   // indicate a return to the hardware. Tail-calling another function would
2122   // probably break this.
2123   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
2124   // should be expanded as new function attributes are introduced.
2125   if (Caller.hasFnAttribute("interrupt"))
2126     return false;
2127 
2128   // Do not tail call opt if the stack is used to pass parameters.
2129   if (CCInfo.getNextStackOffset() != 0)
2130     return false;
2131 
2132   // Do not tail call opt if any parameters need to be passed indirectly.
2133   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
2134   // passed indirectly. So the address of the value will be passed in a
2135   // register, or if not available, then the address is put on the stack. In
2136   // order to pass indirectly, space on the stack often needs to be allocated
2137   // in order to store the value. In this case the CCInfo.getNextStackOffset()
2138   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
2139   // are passed CCValAssign::Indirect.
2140   for (auto &VA : ArgLocs)
2141     if (VA.getLocInfo() == CCValAssign::Indirect)
2142       return false;
2143 
2144   // Do not tail call opt if either caller or callee uses struct return
2145   // semantics.
2146   auto IsCallerStructRet = Caller.hasStructRetAttr();
2147   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
2148   if (IsCallerStructRet || IsCalleeStructRet)
2149     return false;
2150 
2151   // Externally-defined functions with weak linkage should not be
2152   // tail-called. The behaviour of branch instructions in this situation (as
2153   // used for tail calls) is implementation-defined, so we cannot rely on the
2154   // linker replacing the tail call with a return.
2155   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2156     const GlobalValue *GV = G->getGlobal();
2157     if (GV->hasExternalWeakLinkage())
2158       return false;
2159   }
2160 
2161   // The callee has to preserve all registers the caller needs to preserve.
2162   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
2163   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2164   if (CalleeCC != CallerCC) {
2165     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2166     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2167       return false;
2168   }
2169 
2170   // Byval parameters hand the function a pointer directly into the stack area
2171   // we want to reuse during a tail call. Working around this *is* possible
2172   // but less efficient and uglier in LowerCall.
2173   for (auto &Arg : Outs)
2174     if (Arg.Flags.isByVal())
2175       return false;
2176 
2177   return true;
2178 }
2179 
2180 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
2181 // and output parameter nodes.
2182 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
2183                                        SmallVectorImpl<SDValue> &InVals) const {
2184   SelectionDAG &DAG = CLI.DAG;
2185   SDLoc &DL = CLI.DL;
2186   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2187   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2188   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2189   SDValue Chain = CLI.Chain;
2190   SDValue Callee = CLI.Callee;
2191   bool &IsTailCall = CLI.IsTailCall;
2192   CallingConv::ID CallConv = CLI.CallConv;
2193   bool IsVarArg = CLI.IsVarArg;
2194   EVT PtrVT = getPointerTy(DAG.getDataLayout());
2195   MVT XLenVT = Subtarget.getXLenVT();
2196 
2197   MachineFunction &MF = DAG.getMachineFunction();
2198 
2199   // Analyze the operands of the call, assigning locations to each operand.
2200   SmallVector<CCValAssign, 16> ArgLocs;
2201   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2202 
2203   if (CallConv == CallingConv::Fast)
2204     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_FastCC);
2205   else
2206     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI);
2207 
2208   // Check if it's really possible to do a tail call.
2209   if (IsTailCall)
2210     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
2211 
2212   if (IsTailCall)
2213     ++NumTailCalls;
2214   else if (CLI.CB && CLI.CB->isMustTailCall())
2215     report_fatal_error("failed to perform tail call elimination on a call "
2216                        "site marked musttail");
2217 
2218   // Get a count of how many bytes are to be pushed on the stack.
2219   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
2220 
2221   // Create local copies for byval args
2222   SmallVector<SDValue, 8> ByValArgs;
2223   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
2224     ISD::ArgFlagsTy Flags = Outs[i].Flags;
2225     if (!Flags.isByVal())
2226       continue;
2227 
2228     SDValue Arg = OutVals[i];
2229     unsigned Size = Flags.getByValSize();
2230     Align Alignment = Flags.getNonZeroByValAlign();
2231 
2232     int FI =
2233         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
2234     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2235     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
2236 
2237     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
2238                           /*IsVolatile=*/false,
2239                           /*AlwaysInline=*/false, IsTailCall,
2240                           MachinePointerInfo(), MachinePointerInfo());
2241     ByValArgs.push_back(FIPtr);
2242   }
2243 
2244   if (!IsTailCall)
2245     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
2246 
2247   // Copy argument values to their designated locations.
2248   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
2249   SmallVector<SDValue, 8> MemOpChains;
2250   SDValue StackPtr;
2251   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
2252     CCValAssign &VA = ArgLocs[i];
2253     SDValue ArgValue = OutVals[i];
2254     ISD::ArgFlagsTy Flags = Outs[i].Flags;
2255 
2256     // Handle passing f64 on RV32D with a soft float ABI as a special case.
2257     bool IsF64OnRV32DSoftABI =
2258         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
2259     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
2260       SDValue SplitF64 = DAG.getNode(
2261           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
2262       SDValue Lo = SplitF64.getValue(0);
2263       SDValue Hi = SplitF64.getValue(1);
2264 
2265       Register RegLo = VA.getLocReg();
2266       RegsToPass.push_back(std::make_pair(RegLo, Lo));
2267 
2268       if (RegLo == RISCV::X17) {
2269         // Second half of f64 is passed on the stack.
2270         // Work out the address of the stack slot.
2271         if (!StackPtr.getNode())
2272           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
2273         // Emit the store.
2274         MemOpChains.push_back(
2275             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
2276       } else {
2277         // Second half of f64 is passed in another GPR.
2278         assert(RegLo < RISCV::X31 && "Invalid register pair");
2279         Register RegHigh = RegLo + 1;
2280         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
2281       }
2282       continue;
2283     }
2284 
2285     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
2286     // as any other MemLoc.
2287 
2288     // Promote the value if needed.
2289     // For now, only handle fully promoted and indirect arguments.
2290     if (VA.getLocInfo() == CCValAssign::Indirect) {
2291       // Store the argument in a stack slot and pass its address.
2292       SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT);
2293       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2294       MemOpChains.push_back(
2295           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
2296                        MachinePointerInfo::getFixedStack(MF, FI)));
2297       // If the original argument was split (e.g. i128), we need
2298       // to store all parts of it here (and pass just one address).
2299       unsigned ArgIndex = Outs[i].OrigArgIndex;
2300       assert(Outs[i].PartOffset == 0);
2301       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
2302         SDValue PartValue = OutVals[i + 1];
2303         unsigned PartOffset = Outs[i + 1].PartOffset;
2304         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
2305                                       DAG.getIntPtrConstant(PartOffset, DL));
2306         MemOpChains.push_back(
2307             DAG.getStore(Chain, DL, PartValue, Address,
2308                          MachinePointerInfo::getFixedStack(MF, FI)));
2309         ++i;
2310       }
2311       ArgValue = SpillSlot;
2312     } else {
2313       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL);
2314     }
2315 
2316     // Use local copy if it is a byval arg.
2317     if (Flags.isByVal())
2318       ArgValue = ByValArgs[j++];
2319 
2320     if (VA.isRegLoc()) {
2321       // Queue up the argument copies and emit them at the end.
2322       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
2323     } else {
2324       assert(VA.isMemLoc() && "Argument not register or memory");
2325       assert(!IsTailCall && "Tail call not allowed if stack is used "
2326                             "for passing parameters");
2327 
2328       // Work out the address of the stack slot.
2329       if (!StackPtr.getNode())
2330         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
2331       SDValue Address =
2332           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
2333                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
2334 
2335       // Emit the store.
2336       MemOpChains.push_back(
2337           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
2338     }
2339   }
2340 
2341   // Join the stores, which are independent of one another.
2342   if (!MemOpChains.empty())
2343     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
2344 
2345   SDValue Glue;
2346 
2347   // Build a sequence of copy-to-reg nodes, chained and glued together.
2348   for (auto &Reg : RegsToPass) {
2349     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
2350     Glue = Chain.getValue(1);
2351   }
2352 
2353   // Validate that none of the argument registers have been marked as
2354   // reserved, if so report an error. Do the same for the return address if this
2355   // is not a tailcall.
2356   validateCCReservedRegs(RegsToPass, MF);
2357   if (!IsTailCall &&
2358       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
2359     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
2360         MF.getFunction(),
2361         "Return address register required, but has been reserved."});
2362 
2363   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
2364   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
2365   // split it and then direct call can be matched by PseudoCALL.
2366   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
2367     const GlobalValue *GV = S->getGlobal();
2368 
2369     unsigned OpFlags = RISCVII::MO_CALL;
2370     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
2371       OpFlags = RISCVII::MO_PLT;
2372 
2373     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
2374   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2375     unsigned OpFlags = RISCVII::MO_CALL;
2376 
2377     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
2378                                                  nullptr))
2379       OpFlags = RISCVII::MO_PLT;
2380 
2381     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
2382   }
2383 
2384   // The first call operand is the chain and the second is the target address.
2385   SmallVector<SDValue, 8> Ops;
2386   Ops.push_back(Chain);
2387   Ops.push_back(Callee);
2388 
2389   // Add argument registers to the end of the list so that they are
2390   // known live into the call.
2391   for (auto &Reg : RegsToPass)
2392     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
2393 
2394   if (!IsTailCall) {
2395     // Add a register mask operand representing the call-preserved registers.
2396     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
2397     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
2398     assert(Mask && "Missing call preserved mask for calling convention");
2399     Ops.push_back(DAG.getRegisterMask(Mask));
2400   }
2401 
2402   // Glue the call to the argument copies, if any.
2403   if (Glue.getNode())
2404     Ops.push_back(Glue);
2405 
2406   // Emit the call.
2407   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2408 
2409   if (IsTailCall) {
2410     MF.getFrameInfo().setHasTailCall();
2411     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
2412   }
2413 
2414   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
2415   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
2416   Glue = Chain.getValue(1);
2417 
2418   // Mark the end of the call, which is glued to the call itself.
2419   Chain = DAG.getCALLSEQ_END(Chain,
2420                              DAG.getConstant(NumBytes, DL, PtrVT, true),
2421                              DAG.getConstant(0, DL, PtrVT, true),
2422                              Glue, DL);
2423   Glue = Chain.getValue(1);
2424 
2425   // Assign locations to each value returned by this call.
2426   SmallVector<CCValAssign, 16> RVLocs;
2427   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
2428   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true);
2429 
2430   // Copy all of the result registers out of their specified physreg.
2431   for (auto &VA : RVLocs) {
2432     // Copy the value out
2433     SDValue RetValue =
2434         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
2435     // Glue the RetValue to the end of the call sequence
2436     Chain = RetValue.getValue(1);
2437     Glue = RetValue.getValue(2);
2438 
2439     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
2440       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
2441       SDValue RetValue2 =
2442           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
2443       Chain = RetValue2.getValue(1);
2444       Glue = RetValue2.getValue(2);
2445       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
2446                              RetValue2);
2447     }
2448 
2449     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL);
2450 
2451     InVals.push_back(RetValue);
2452   }
2453 
2454   return Chain;
2455 }
2456 
2457 bool RISCVTargetLowering::CanLowerReturn(
2458     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
2459     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
2460   SmallVector<CCValAssign, 16> RVLocs;
2461   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
2462   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
2463     MVT VT = Outs[i].VT;
2464     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
2465     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
2466     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
2467                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr))
2468       return false;
2469   }
2470   return true;
2471 }
2472 
2473 SDValue
2474 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2475                                  bool IsVarArg,
2476                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
2477                                  const SmallVectorImpl<SDValue> &OutVals,
2478                                  const SDLoc &DL, SelectionDAG &DAG) const {
2479   const MachineFunction &MF = DAG.getMachineFunction();
2480   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
2481 
2482   // Stores the assignment of the return value to a location.
2483   SmallVector<CCValAssign, 16> RVLocs;
2484 
2485   // Info about the registers and stack slot.
2486   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
2487                  *DAG.getContext());
2488 
2489   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
2490                     nullptr);
2491 
2492   SDValue Glue;
2493   SmallVector<SDValue, 4> RetOps(1, Chain);
2494 
2495   // Copy the result values into the output registers.
2496   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
2497     SDValue Val = OutVals[i];
2498     CCValAssign &VA = RVLocs[i];
2499     assert(VA.isRegLoc() && "Can only return in registers!");
2500 
2501     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
2502       // Handle returning f64 on RV32D with a soft float ABI.
2503       assert(VA.isRegLoc() && "Expected return via registers");
2504       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
2505                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
2506       SDValue Lo = SplitF64.getValue(0);
2507       SDValue Hi = SplitF64.getValue(1);
2508       Register RegLo = VA.getLocReg();
2509       assert(RegLo < RISCV::X31 && "Invalid register pair");
2510       Register RegHi = RegLo + 1;
2511 
2512       if (STI.isRegisterReservedByUser(RegLo) ||
2513           STI.isRegisterReservedByUser(RegHi))
2514         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
2515             MF.getFunction(),
2516             "Return value register required, but has been reserved."});
2517 
2518       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
2519       Glue = Chain.getValue(1);
2520       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
2521       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
2522       Glue = Chain.getValue(1);
2523       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
2524     } else {
2525       // Handle a 'normal' return.
2526       Val = convertValVTToLocVT(DAG, Val, VA, DL);
2527       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
2528 
2529       if (STI.isRegisterReservedByUser(VA.getLocReg()))
2530         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
2531             MF.getFunction(),
2532             "Return value register required, but has been reserved."});
2533 
2534       // Guarantee that all emitted copies are stuck together.
2535       Glue = Chain.getValue(1);
2536       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2537     }
2538   }
2539 
2540   RetOps[0] = Chain; // Update chain.
2541 
2542   // Add the glue node if we have it.
2543   if (Glue.getNode()) {
2544     RetOps.push_back(Glue);
2545   }
2546 
2547   // Interrupt service routines use different return instructions.
2548   const Function &Func = DAG.getMachineFunction().getFunction();
2549   if (Func.hasFnAttribute("interrupt")) {
2550     if (!Func.getReturnType()->isVoidTy())
2551       report_fatal_error(
2552           "Functions with the interrupt attribute must have void return type!");
2553 
2554     MachineFunction &MF = DAG.getMachineFunction();
2555     StringRef Kind =
2556       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
2557 
2558     unsigned RetOpc;
2559     if (Kind == "user")
2560       RetOpc = RISCVISD::URET_FLAG;
2561     else if (Kind == "supervisor")
2562       RetOpc = RISCVISD::SRET_FLAG;
2563     else
2564       RetOpc = RISCVISD::MRET_FLAG;
2565 
2566     return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
2567   }
2568 
2569   return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps);
2570 }
2571 
2572 void RISCVTargetLowering::validateCCReservedRegs(
2573     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
2574     MachineFunction &MF) const {
2575   const Function &F = MF.getFunction();
2576   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
2577 
2578   if (std::any_of(std::begin(Regs), std::end(Regs), [&STI](auto Reg) {
2579         return STI.isRegisterReservedByUser(Reg.first);
2580       }))
2581     F.getContext().diagnose(DiagnosticInfoUnsupported{
2582         F, "Argument register required, but has been reserved."});
2583 }
2584 
2585 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2586   return CI->isTailCall();
2587 }
2588 
2589 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
2590   switch ((RISCVISD::NodeType)Opcode) {
2591   case RISCVISD::FIRST_NUMBER:
2592     break;
2593   case RISCVISD::RET_FLAG:
2594     return "RISCVISD::RET_FLAG";
2595   case RISCVISD::URET_FLAG:
2596     return "RISCVISD::URET_FLAG";
2597   case RISCVISD::SRET_FLAG:
2598     return "RISCVISD::SRET_FLAG";
2599   case RISCVISD::MRET_FLAG:
2600     return "RISCVISD::MRET_FLAG";
2601   case RISCVISD::CALL:
2602     return "RISCVISD::CALL";
2603   case RISCVISD::SELECT_CC:
2604     return "RISCVISD::SELECT_CC";
2605   case RISCVISD::BuildPairF64:
2606     return "RISCVISD::BuildPairF64";
2607   case RISCVISD::SplitF64:
2608     return "RISCVISD::SplitF64";
2609   case RISCVISD::TAIL:
2610     return "RISCVISD::TAIL";
2611   case RISCVISD::SLLW:
2612     return "RISCVISD::SLLW";
2613   case RISCVISD::SRAW:
2614     return "RISCVISD::SRAW";
2615   case RISCVISD::SRLW:
2616     return "RISCVISD::SRLW";
2617   case RISCVISD::DIVW:
2618     return "RISCVISD::DIVW";
2619   case RISCVISD::DIVUW:
2620     return "RISCVISD::DIVUW";
2621   case RISCVISD::REMUW:
2622     return "RISCVISD::REMUW";
2623   case RISCVISD::FMV_W_X_RV64:
2624     return "RISCVISD::FMV_W_X_RV64";
2625   case RISCVISD::FMV_X_ANYEXTW_RV64:
2626     return "RISCVISD::FMV_X_ANYEXTW_RV64";
2627   case RISCVISD::READ_CYCLE_WIDE:
2628     return "RISCVISD::READ_CYCLE_WIDE";
2629   }
2630   return nullptr;
2631 }
2632 
2633 /// getConstraintType - Given a constraint letter, return the type of
2634 /// constraint it is for this target.
2635 RISCVTargetLowering::ConstraintType
2636 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
2637   if (Constraint.size() == 1) {
2638     switch (Constraint[0]) {
2639     default:
2640       break;
2641     case 'f':
2642       return C_RegisterClass;
2643     case 'I':
2644     case 'J':
2645     case 'K':
2646       return C_Immediate;
2647     case 'A':
2648       return C_Memory;
2649     }
2650   }
2651   return TargetLowering::getConstraintType(Constraint);
2652 }
2653 
2654 std::pair<unsigned, const TargetRegisterClass *>
2655 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
2656                                                   StringRef Constraint,
2657                                                   MVT VT) const {
2658   // First, see if this is a constraint that directly corresponds to a
2659   // RISCV register class.
2660   if (Constraint.size() == 1) {
2661     switch (Constraint[0]) {
2662     case 'r':
2663       return std::make_pair(0U, &RISCV::GPRRegClass);
2664     case 'f':
2665       if (Subtarget.hasStdExtF() && VT == MVT::f32)
2666         return std::make_pair(0U, &RISCV::FPR32RegClass);
2667       if (Subtarget.hasStdExtD() && VT == MVT::f64)
2668         return std::make_pair(0U, &RISCV::FPR64RegClass);
2669       break;
2670     default:
2671       break;
2672     }
2673   }
2674 
2675   // Clang will correctly decode the usage of register name aliases into their
2676   // official names. However, other frontends like `rustc` do not. This allows
2677   // users of these frontends to use the ABI names for registers in LLVM-style
2678   // register constraints.
2679   Register XRegFromAlias = StringSwitch<Register>(Constraint.lower())
2680                                .Case("{zero}", RISCV::X0)
2681                                .Case("{ra}", RISCV::X1)
2682                                .Case("{sp}", RISCV::X2)
2683                                .Case("{gp}", RISCV::X3)
2684                                .Case("{tp}", RISCV::X4)
2685                                .Case("{t0}", RISCV::X5)
2686                                .Case("{t1}", RISCV::X6)
2687                                .Case("{t2}", RISCV::X7)
2688                                .Cases("{s0}", "{fp}", RISCV::X8)
2689                                .Case("{s1}", RISCV::X9)
2690                                .Case("{a0}", RISCV::X10)
2691                                .Case("{a1}", RISCV::X11)
2692                                .Case("{a2}", RISCV::X12)
2693                                .Case("{a3}", RISCV::X13)
2694                                .Case("{a4}", RISCV::X14)
2695                                .Case("{a5}", RISCV::X15)
2696                                .Case("{a6}", RISCV::X16)
2697                                .Case("{a7}", RISCV::X17)
2698                                .Case("{s2}", RISCV::X18)
2699                                .Case("{s3}", RISCV::X19)
2700                                .Case("{s4}", RISCV::X20)
2701                                .Case("{s5}", RISCV::X21)
2702                                .Case("{s6}", RISCV::X22)
2703                                .Case("{s7}", RISCV::X23)
2704                                .Case("{s8}", RISCV::X24)
2705                                .Case("{s9}", RISCV::X25)
2706                                .Case("{s10}", RISCV::X26)
2707                                .Case("{s11}", RISCV::X27)
2708                                .Case("{t3}", RISCV::X28)
2709                                .Case("{t4}", RISCV::X29)
2710                                .Case("{t5}", RISCV::X30)
2711                                .Case("{t6}", RISCV::X31)
2712                                .Default(RISCV::NoRegister);
2713   if (XRegFromAlias != RISCV::NoRegister)
2714     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
2715 
2716   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
2717   // TableGen record rather than the AsmName to choose registers for InlineAsm
2718   // constraints, plus we want to match those names to the widest floating point
2719   // register type available, manually select floating point registers here.
2720   //
2721   // The second case is the ABI name of the register, so that frontends can also
2722   // use the ABI names in register constraint lists.
2723   if (Subtarget.hasStdExtF() || Subtarget.hasStdExtD()) {
2724     std::pair<Register, Register> FReg =
2725         StringSwitch<std::pair<Register, Register>>(Constraint.lower())
2726             .Cases("{f0}", "{ft0}", {RISCV::F0_F, RISCV::F0_D})
2727             .Cases("{f1}", "{ft1}", {RISCV::F1_F, RISCV::F1_D})
2728             .Cases("{f2}", "{ft2}", {RISCV::F2_F, RISCV::F2_D})
2729             .Cases("{f3}", "{ft3}", {RISCV::F3_F, RISCV::F3_D})
2730             .Cases("{f4}", "{ft4}", {RISCV::F4_F, RISCV::F4_D})
2731             .Cases("{f5}", "{ft5}", {RISCV::F5_F, RISCV::F5_D})
2732             .Cases("{f6}", "{ft6}", {RISCV::F6_F, RISCV::F6_D})
2733             .Cases("{f7}", "{ft7}", {RISCV::F7_F, RISCV::F7_D})
2734             .Cases("{f8}", "{fs0}", {RISCV::F8_F, RISCV::F8_D})
2735             .Cases("{f9}", "{fs1}", {RISCV::F9_F, RISCV::F9_D})
2736             .Cases("{f10}", "{fa0}", {RISCV::F10_F, RISCV::F10_D})
2737             .Cases("{f11}", "{fa1}", {RISCV::F11_F, RISCV::F11_D})
2738             .Cases("{f12}", "{fa2}", {RISCV::F12_F, RISCV::F12_D})
2739             .Cases("{f13}", "{fa3}", {RISCV::F13_F, RISCV::F13_D})
2740             .Cases("{f14}", "{fa4}", {RISCV::F14_F, RISCV::F14_D})
2741             .Cases("{f15}", "{fa5}", {RISCV::F15_F, RISCV::F15_D})
2742             .Cases("{f16}", "{fa6}", {RISCV::F16_F, RISCV::F16_D})
2743             .Cases("{f17}", "{fa7}", {RISCV::F17_F, RISCV::F17_D})
2744             .Cases("{f18}", "{fs2}", {RISCV::F18_F, RISCV::F18_D})
2745             .Cases("{f19}", "{fs3}", {RISCV::F19_F, RISCV::F19_D})
2746             .Cases("{f20}", "{fs4}", {RISCV::F20_F, RISCV::F20_D})
2747             .Cases("{f21}", "{fs5}", {RISCV::F21_F, RISCV::F21_D})
2748             .Cases("{f22}", "{fs6}", {RISCV::F22_F, RISCV::F22_D})
2749             .Cases("{f23}", "{fs7}", {RISCV::F23_F, RISCV::F23_D})
2750             .Cases("{f24}", "{fs8}", {RISCV::F24_F, RISCV::F24_D})
2751             .Cases("{f25}", "{fs9}", {RISCV::F25_F, RISCV::F25_D})
2752             .Cases("{f26}", "{fs10}", {RISCV::F26_F, RISCV::F26_D})
2753             .Cases("{f27}", "{fs11}", {RISCV::F27_F, RISCV::F27_D})
2754             .Cases("{f28}", "{ft8}", {RISCV::F28_F, RISCV::F28_D})
2755             .Cases("{f29}", "{ft9}", {RISCV::F29_F, RISCV::F29_D})
2756             .Cases("{f30}", "{ft10}", {RISCV::F30_F, RISCV::F30_D})
2757             .Cases("{f31}", "{ft11}", {RISCV::F31_F, RISCV::F31_D})
2758             .Default({RISCV::NoRegister, RISCV::NoRegister});
2759     if (FReg.first != RISCV::NoRegister)
2760       return Subtarget.hasStdExtD()
2761                  ? std::make_pair(FReg.second, &RISCV::FPR64RegClass)
2762                  : std::make_pair(FReg.first, &RISCV::FPR32RegClass);
2763   }
2764 
2765   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
2766 }
2767 
2768 unsigned
2769 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
2770   // Currently only support length 1 constraints.
2771   if (ConstraintCode.size() == 1) {
2772     switch (ConstraintCode[0]) {
2773     case 'A':
2774       return InlineAsm::Constraint_A;
2775     default:
2776       break;
2777     }
2778   }
2779 
2780   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
2781 }
2782 
2783 void RISCVTargetLowering::LowerAsmOperandForConstraint(
2784     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
2785     SelectionDAG &DAG) const {
2786   // Currently only support length 1 constraints.
2787   if (Constraint.length() == 1) {
2788     switch (Constraint[0]) {
2789     case 'I':
2790       // Validate & create a 12-bit signed immediate operand.
2791       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
2792         uint64_t CVal = C->getSExtValue();
2793         if (isInt<12>(CVal))
2794           Ops.push_back(
2795               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
2796       }
2797       return;
2798     case 'J':
2799       // Validate & create an integer zero operand.
2800       if (auto *C = dyn_cast<ConstantSDNode>(Op))
2801         if (C->getZExtValue() == 0)
2802           Ops.push_back(
2803               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
2804       return;
2805     case 'K':
2806       // Validate & create a 5-bit unsigned immediate operand.
2807       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
2808         uint64_t CVal = C->getZExtValue();
2809         if (isUInt<5>(CVal))
2810           Ops.push_back(
2811               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
2812       }
2813       return;
2814     default:
2815       break;
2816     }
2817   }
2818   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
2819 }
2820 
2821 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
2822                                                    Instruction *Inst,
2823                                                    AtomicOrdering Ord) const {
2824   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
2825     return Builder.CreateFence(Ord);
2826   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
2827     return Builder.CreateFence(AtomicOrdering::Release);
2828   return nullptr;
2829 }
2830 
2831 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
2832                                                     Instruction *Inst,
2833                                                     AtomicOrdering Ord) const {
2834   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
2835     return Builder.CreateFence(AtomicOrdering::Acquire);
2836   return nullptr;
2837 }
2838 
2839 TargetLowering::AtomicExpansionKind
2840 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
2841   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
2842   // point operations can't be used in an lr/sc sequence without breaking the
2843   // forward-progress guarantee.
2844   if (AI->isFloatingPointOperation())
2845     return AtomicExpansionKind::CmpXChg;
2846 
2847   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
2848   if (Size == 8 || Size == 16)
2849     return AtomicExpansionKind::MaskedIntrinsic;
2850   return AtomicExpansionKind::None;
2851 }
2852 
2853 static Intrinsic::ID
2854 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
2855   if (XLen == 32) {
2856     switch (BinOp) {
2857     default:
2858       llvm_unreachable("Unexpected AtomicRMW BinOp");
2859     case AtomicRMWInst::Xchg:
2860       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
2861     case AtomicRMWInst::Add:
2862       return Intrinsic::riscv_masked_atomicrmw_add_i32;
2863     case AtomicRMWInst::Sub:
2864       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
2865     case AtomicRMWInst::Nand:
2866       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
2867     case AtomicRMWInst::Max:
2868       return Intrinsic::riscv_masked_atomicrmw_max_i32;
2869     case AtomicRMWInst::Min:
2870       return Intrinsic::riscv_masked_atomicrmw_min_i32;
2871     case AtomicRMWInst::UMax:
2872       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
2873     case AtomicRMWInst::UMin:
2874       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
2875     }
2876   }
2877 
2878   if (XLen == 64) {
2879     switch (BinOp) {
2880     default:
2881       llvm_unreachable("Unexpected AtomicRMW BinOp");
2882     case AtomicRMWInst::Xchg:
2883       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
2884     case AtomicRMWInst::Add:
2885       return Intrinsic::riscv_masked_atomicrmw_add_i64;
2886     case AtomicRMWInst::Sub:
2887       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
2888     case AtomicRMWInst::Nand:
2889       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
2890     case AtomicRMWInst::Max:
2891       return Intrinsic::riscv_masked_atomicrmw_max_i64;
2892     case AtomicRMWInst::Min:
2893       return Intrinsic::riscv_masked_atomicrmw_min_i64;
2894     case AtomicRMWInst::UMax:
2895       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
2896     case AtomicRMWInst::UMin:
2897       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
2898     }
2899   }
2900 
2901   llvm_unreachable("Unexpected XLen\n");
2902 }
2903 
2904 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
2905     IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
2906     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
2907   unsigned XLen = Subtarget.getXLen();
2908   Value *Ordering =
2909       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
2910   Type *Tys[] = {AlignedAddr->getType()};
2911   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
2912       AI->getModule(),
2913       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
2914 
2915   if (XLen == 64) {
2916     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
2917     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
2918     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
2919   }
2920 
2921   Value *Result;
2922 
2923   // Must pass the shift amount needed to sign extend the loaded value prior
2924   // to performing a signed comparison for min/max. ShiftAmt is the number of
2925   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
2926   // is the number of bits to left+right shift the value in order to
2927   // sign-extend.
2928   if (AI->getOperation() == AtomicRMWInst::Min ||
2929       AI->getOperation() == AtomicRMWInst::Max) {
2930     const DataLayout &DL = AI->getModule()->getDataLayout();
2931     unsigned ValWidth =
2932         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
2933     Value *SextShamt =
2934         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
2935     Result = Builder.CreateCall(LrwOpScwLoop,
2936                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
2937   } else {
2938     Result =
2939         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
2940   }
2941 
2942   if (XLen == 64)
2943     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
2944   return Result;
2945 }
2946 
2947 TargetLowering::AtomicExpansionKind
2948 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
2949     AtomicCmpXchgInst *CI) const {
2950   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
2951   if (Size == 8 || Size == 16)
2952     return AtomicExpansionKind::MaskedIntrinsic;
2953   return AtomicExpansionKind::None;
2954 }
2955 
2956 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
2957     IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
2958     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
2959   unsigned XLen = Subtarget.getXLen();
2960   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
2961   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
2962   if (XLen == 64) {
2963     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
2964     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
2965     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
2966     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
2967   }
2968   Type *Tys[] = {AlignedAddr->getType()};
2969   Function *MaskedCmpXchg =
2970       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
2971   Value *Result = Builder.CreateCall(
2972       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
2973   if (XLen == 64)
2974     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
2975   return Result;
2976 }
2977 
2978 Register RISCVTargetLowering::getExceptionPointerRegister(
2979     const Constant *PersonalityFn) const {
2980   return RISCV::X10;
2981 }
2982 
2983 Register RISCVTargetLowering::getExceptionSelectorRegister(
2984     const Constant *PersonalityFn) const {
2985   return RISCV::X11;
2986 }
2987 
2988 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
2989   // Return false to suppress the unnecessary extensions if the LibCall
2990   // arguments or return value is f32 type for LP64 ABI.
2991   RISCVABI::ABI ABI = Subtarget.getTargetABI();
2992   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
2993     return false;
2994 
2995   return true;
2996 }
2997 
2998 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
2999                                                  SDValue C) const {
3000   // Check integral scalar types.
3001   if (VT.isScalarInteger()) {
3002     // Do not perform the transformation on riscv32 with the M extension.
3003     if (!Subtarget.is64Bit() && Subtarget.hasStdExtM())
3004       return false;
3005     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
3006       if (ConstNode->getAPIntValue().getBitWidth() > 8 * sizeof(int64_t))
3007         return false;
3008       int64_t Imm = ConstNode->getSExtValue();
3009       if (isPowerOf2_64(Imm + 1) || isPowerOf2_64(Imm - 1) ||
3010           isPowerOf2_64(1 - Imm) || isPowerOf2_64(-1 - Imm))
3011         return true;
3012     }
3013   }
3014 
3015   return false;
3016 }
3017 
3018 #define GET_REGISTER_MATCHER
3019 #include "RISCVGenAsmMatcher.inc"
3020 
3021 Register
3022 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
3023                                        const MachineFunction &MF) const {
3024   Register Reg = MatchRegisterAltName(RegName);
3025   if (Reg == RISCV::NoRegister)
3026     Reg = MatchRegisterName(RegName);
3027   if (Reg == RISCV::NoRegister)
3028     report_fatal_error(
3029         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
3030   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
3031   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
3032     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
3033                              StringRef(RegName) + "\"."));
3034   return Reg;
3035 }
3036