1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the interfaces that RISCV uses to lower LLVM code into a 10 // selection DAG. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "RISCVISelLowering.h" 15 #include "RISCV.h" 16 #include "RISCVMachineFunctionInfo.h" 17 #include "RISCVRegisterInfo.h" 18 #include "RISCVSubtarget.h" 19 #include "RISCVTargetMachine.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/CodeGen/CallingConvLower.h" 22 #include "llvm/CodeGen/MachineFrameInfo.h" 23 #include "llvm/CodeGen/MachineFunction.h" 24 #include "llvm/CodeGen/MachineInstrBuilder.h" 25 #include "llvm/CodeGen/MachineRegisterInfo.h" 26 #include "llvm/CodeGen/SelectionDAGISel.h" 27 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 28 #include "llvm/CodeGen/ValueTypes.h" 29 #include "llvm/IR/DiagnosticInfo.h" 30 #include "llvm/IR/DiagnosticPrinter.h" 31 #include "llvm/Support/Debug.h" 32 #include "llvm/Support/ErrorHandling.h" 33 #include "llvm/Support/raw_ostream.h" 34 35 using namespace llvm; 36 37 #define DEBUG_TYPE "riscv-lower" 38 39 STATISTIC(NumTailCalls, "Number of tail calls"); 40 41 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, 42 const RISCVSubtarget &STI) 43 : TargetLowering(TM), Subtarget(STI) { 44 45 MVT XLenVT = Subtarget.getXLenVT(); 46 47 // Set up the register classes. 48 addRegisterClass(XLenVT, &RISCV::GPRRegClass); 49 50 if (Subtarget.hasStdExtF()) 51 addRegisterClass(MVT::f32, &RISCV::FPR32RegClass); 52 if (Subtarget.hasStdExtD()) 53 addRegisterClass(MVT::f64, &RISCV::FPR64RegClass); 54 55 // Compute derived properties from the register classes. 56 computeRegisterProperties(STI.getRegisterInfo()); 57 58 setStackPointerRegisterToSaveRestore(RISCV::X2); 59 60 for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) 61 setLoadExtAction(N, XLenVT, MVT::i1, Promote); 62 63 // TODO: add all necessary setOperationAction calls. 64 setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand); 65 66 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 67 setOperationAction(ISD::BR_CC, XLenVT, Expand); 68 setOperationAction(ISD::SELECT, XLenVT, Custom); 69 setOperationAction(ISD::SELECT_CC, XLenVT, Expand); 70 71 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 72 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 73 74 setOperationAction(ISD::VASTART, MVT::Other, Custom); 75 setOperationAction(ISD::VAARG, MVT::Other, Expand); 76 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 77 setOperationAction(ISD::VAEND, MVT::Other, Expand); 78 79 for (auto VT : {MVT::i1, MVT::i8, MVT::i16}) 80 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 81 82 if (Subtarget.is64Bit()) { 83 setOperationAction(ISD::SHL, MVT::i32, Custom); 84 setOperationAction(ISD::SRA, MVT::i32, Custom); 85 setOperationAction(ISD::SRL, MVT::i32, Custom); 86 } 87 88 if (!Subtarget.hasStdExtM()) { 89 setOperationAction(ISD::MUL, XLenVT, Expand); 90 setOperationAction(ISD::MULHS, XLenVT, Expand); 91 setOperationAction(ISD::MULHU, XLenVT, Expand); 92 setOperationAction(ISD::SDIV, XLenVT, Expand); 93 setOperationAction(ISD::UDIV, XLenVT, Expand); 94 setOperationAction(ISD::SREM, XLenVT, Expand); 95 setOperationAction(ISD::UREM, XLenVT, Expand); 96 } 97 98 if (Subtarget.is64Bit() && Subtarget.hasStdExtM()) { 99 setOperationAction(ISD::SDIV, MVT::i32, Custom); 100 setOperationAction(ISD::UDIV, MVT::i32, Custom); 101 setOperationAction(ISD::UREM, MVT::i32, Custom); 102 } 103 104 setOperationAction(ISD::SDIVREM, XLenVT, Expand); 105 setOperationAction(ISD::UDIVREM, XLenVT, Expand); 106 setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand); 107 setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand); 108 109 setOperationAction(ISD::SHL_PARTS, XLenVT, Expand); 110 setOperationAction(ISD::SRL_PARTS, XLenVT, Expand); 111 setOperationAction(ISD::SRA_PARTS, XLenVT, Expand); 112 113 setOperationAction(ISD::ROTL, XLenVT, Expand); 114 setOperationAction(ISD::ROTR, XLenVT, Expand); 115 setOperationAction(ISD::BSWAP, XLenVT, Expand); 116 setOperationAction(ISD::CTTZ, XLenVT, Expand); 117 setOperationAction(ISD::CTLZ, XLenVT, Expand); 118 setOperationAction(ISD::CTPOP, XLenVT, Expand); 119 120 ISD::CondCode FPCCToExtend[] = { 121 ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETO, ISD::SETUEQ, 122 ISD::SETUGT, ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, 123 ISD::SETGT, ISD::SETGE, ISD::SETNE}; 124 125 ISD::NodeType FPOpToExtend[] = { 126 ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM}; 127 128 if (Subtarget.hasStdExtF()) { 129 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 130 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 131 for (auto CC : FPCCToExtend) 132 setCondCodeAction(CC, MVT::f32, Expand); 133 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 134 setOperationAction(ISD::SELECT, MVT::f32, Custom); 135 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 136 for (auto Op : FPOpToExtend) 137 setOperationAction(Op, MVT::f32, Expand); 138 } 139 140 if (Subtarget.hasStdExtD()) { 141 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 142 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 143 for (auto CC : FPCCToExtend) 144 setCondCodeAction(CC, MVT::f64, Expand); 145 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 146 setOperationAction(ISD::SELECT, MVT::f64, Custom); 147 setOperationAction(ISD::BR_CC, MVT::f64, Expand); 148 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); 149 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 150 for (auto Op : FPOpToExtend) 151 setOperationAction(Op, MVT::f64, Expand); 152 } 153 154 setOperationAction(ISD::GlobalAddress, XLenVT, Custom); 155 setOperationAction(ISD::BlockAddress, XLenVT, Custom); 156 setOperationAction(ISD::ConstantPool, XLenVT, Custom); 157 158 if (Subtarget.hasStdExtA()) { 159 setMaxAtomicSizeInBitsSupported(Subtarget.getXLen()); 160 setMinCmpXchgSizeInBits(32); 161 } else { 162 setMaxAtomicSizeInBitsSupported(0); 163 } 164 165 setBooleanContents(ZeroOrOneBooleanContent); 166 167 // Function alignments (log2). 168 unsigned FunctionAlignment = Subtarget.hasStdExtC() ? 1 : 2; 169 setMinFunctionAlignment(FunctionAlignment); 170 setPrefFunctionAlignment(FunctionAlignment); 171 172 // Effectively disable jump table generation. 173 setMinimumJumpTableEntries(INT_MAX); 174 } 175 176 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, 177 EVT VT) const { 178 if (!VT.isVector()) 179 return getPointerTy(DL); 180 return VT.changeVectorElementTypeToInteger(); 181 } 182 183 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 184 const CallInst &I, 185 MachineFunction &MF, 186 unsigned Intrinsic) const { 187 switch (Intrinsic) { 188 default: 189 return false; 190 case Intrinsic::riscv_masked_atomicrmw_xchg_i32: 191 case Intrinsic::riscv_masked_atomicrmw_add_i32: 192 case Intrinsic::riscv_masked_atomicrmw_sub_i32: 193 case Intrinsic::riscv_masked_atomicrmw_nand_i32: 194 case Intrinsic::riscv_masked_atomicrmw_max_i32: 195 case Intrinsic::riscv_masked_atomicrmw_min_i32: 196 case Intrinsic::riscv_masked_atomicrmw_umax_i32: 197 case Intrinsic::riscv_masked_atomicrmw_umin_i32: 198 case Intrinsic::riscv_masked_cmpxchg_i32: 199 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); 200 Info.opc = ISD::INTRINSIC_W_CHAIN; 201 Info.memVT = MVT::getVT(PtrTy->getElementType()); 202 Info.ptrVal = I.getArgOperand(0); 203 Info.offset = 0; 204 Info.align = 4; 205 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore | 206 MachineMemOperand::MOVolatile; 207 return true; 208 } 209 } 210 211 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL, 212 const AddrMode &AM, Type *Ty, 213 unsigned AS, 214 Instruction *I) const { 215 // No global is ever allowed as a base. 216 if (AM.BaseGV) 217 return false; 218 219 // Require a 12-bit signed offset. 220 if (!isInt<12>(AM.BaseOffs)) 221 return false; 222 223 switch (AM.Scale) { 224 case 0: // "r+i" or just "i", depending on HasBaseReg. 225 break; 226 case 1: 227 if (!AM.HasBaseReg) // allow "r+i". 228 break; 229 return false; // disallow "r+r" or "r+r+i". 230 default: 231 return false; 232 } 233 234 return true; 235 } 236 237 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 238 return isInt<12>(Imm); 239 } 240 241 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const { 242 return isInt<12>(Imm); 243 } 244 245 // On RV32, 64-bit integers are split into their high and low parts and held 246 // in two different registers, so the trunc is free since the low register can 247 // just be used. 248 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const { 249 if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy()) 250 return false; 251 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); 252 unsigned DestBits = DstTy->getPrimitiveSizeInBits(); 253 return (SrcBits == 64 && DestBits == 32); 254 } 255 256 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const { 257 if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() || 258 !SrcVT.isInteger() || !DstVT.isInteger()) 259 return false; 260 unsigned SrcBits = SrcVT.getSizeInBits(); 261 unsigned DestBits = DstVT.getSizeInBits(); 262 return (SrcBits == 64 && DestBits == 32); 263 } 264 265 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 266 // Zexts are free if they can be combined with a load. 267 if (auto *LD = dyn_cast<LoadSDNode>(Val)) { 268 EVT MemVT = LD->getMemoryVT(); 269 if ((MemVT == MVT::i8 || MemVT == MVT::i16 || 270 (Subtarget.is64Bit() && MemVT == MVT::i32)) && 271 (LD->getExtensionType() == ISD::NON_EXTLOAD || 272 LD->getExtensionType() == ISD::ZEXTLOAD)) 273 return true; 274 } 275 276 return TargetLowering::isZExtFree(Val, VT2); 277 } 278 279 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const { 280 return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64; 281 } 282 283 // Changes the condition code and swaps operands if necessary, so the SetCC 284 // operation matches one of the comparisons supported directly in the RISC-V 285 // ISA. 286 static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) { 287 switch (CC) { 288 default: 289 break; 290 case ISD::SETGT: 291 case ISD::SETLE: 292 case ISD::SETUGT: 293 case ISD::SETULE: 294 CC = ISD::getSetCCSwappedOperands(CC); 295 std::swap(LHS, RHS); 296 break; 297 } 298 } 299 300 // Return the RISC-V branch opcode that matches the given DAG integer 301 // condition code. The CondCode must be one of those supported by the RISC-V 302 // ISA (see normaliseSetCC). 303 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) { 304 switch (CC) { 305 default: 306 llvm_unreachable("Unsupported CondCode"); 307 case ISD::SETEQ: 308 return RISCV::BEQ; 309 case ISD::SETNE: 310 return RISCV::BNE; 311 case ISD::SETLT: 312 return RISCV::BLT; 313 case ISD::SETGE: 314 return RISCV::BGE; 315 case ISD::SETULT: 316 return RISCV::BLTU; 317 case ISD::SETUGE: 318 return RISCV::BGEU; 319 } 320 } 321 322 SDValue RISCVTargetLowering::LowerOperation(SDValue Op, 323 SelectionDAG &DAG) const { 324 switch (Op.getOpcode()) { 325 default: 326 report_fatal_error("unimplemented operand"); 327 case ISD::GlobalAddress: 328 return lowerGlobalAddress(Op, DAG); 329 case ISD::BlockAddress: 330 return lowerBlockAddress(Op, DAG); 331 case ISD::ConstantPool: 332 return lowerConstantPool(Op, DAG); 333 case ISD::SELECT: 334 return lowerSELECT(Op, DAG); 335 case ISD::VASTART: 336 return lowerVASTART(Op, DAG); 337 case ISD::FRAMEADDR: 338 return lowerFRAMEADDR(Op, DAG); 339 case ISD::RETURNADDR: 340 return lowerRETURNADDR(Op, DAG); 341 } 342 } 343 344 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op, 345 SelectionDAG &DAG) const { 346 SDLoc DL(Op); 347 EVT Ty = Op.getValueType(); 348 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); 349 const GlobalValue *GV = N->getGlobal(); 350 int64_t Offset = N->getOffset(); 351 MVT XLenVT = Subtarget.getXLenVT(); 352 353 if (isPositionIndependent()) 354 report_fatal_error("Unable to lowerGlobalAddress"); 355 // In order to maximise the opportunity for common subexpression elimination, 356 // emit a separate ADD node for the global address offset instead of folding 357 // it in the global address node. Later peephole optimisations may choose to 358 // fold it back in when profitable. 359 SDValue GAHi = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_HI); 360 SDValue GALo = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_LO); 361 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, GAHi), 0); 362 SDValue MNLo = 363 SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, GALo), 0); 364 if (Offset != 0) 365 return DAG.getNode(ISD::ADD, DL, Ty, MNLo, 366 DAG.getConstant(Offset, DL, XLenVT)); 367 return MNLo; 368 } 369 370 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op, 371 SelectionDAG &DAG) const { 372 SDLoc DL(Op); 373 EVT Ty = Op.getValueType(); 374 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op); 375 const BlockAddress *BA = N->getBlockAddress(); 376 int64_t Offset = N->getOffset(); 377 378 if (isPositionIndependent()) 379 report_fatal_error("Unable to lowerBlockAddress"); 380 381 SDValue BAHi = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_HI); 382 SDValue BALo = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_LO); 383 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, BAHi), 0); 384 SDValue MNLo = 385 SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, BALo), 0); 386 return MNLo; 387 } 388 389 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op, 390 SelectionDAG &DAG) const { 391 SDLoc DL(Op); 392 EVT Ty = Op.getValueType(); 393 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op); 394 const Constant *CPA = N->getConstVal(); 395 int64_t Offset = N->getOffset(); 396 unsigned Alignment = N->getAlignment(); 397 398 if (!isPositionIndependent()) { 399 SDValue CPAHi = 400 DAG.getTargetConstantPool(CPA, Ty, Alignment, Offset, RISCVII::MO_HI); 401 SDValue CPALo = 402 DAG.getTargetConstantPool(CPA, Ty, Alignment, Offset, RISCVII::MO_LO); 403 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, CPAHi), 0); 404 SDValue MNLo = 405 SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, CPALo), 0); 406 return MNLo; 407 } else { 408 report_fatal_error("Unable to lowerConstantPool"); 409 } 410 } 411 412 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const { 413 SDValue CondV = Op.getOperand(0); 414 SDValue TrueV = Op.getOperand(1); 415 SDValue FalseV = Op.getOperand(2); 416 SDLoc DL(Op); 417 MVT XLenVT = Subtarget.getXLenVT(); 418 419 // If the result type is XLenVT and CondV is the output of a SETCC node 420 // which also operated on XLenVT inputs, then merge the SETCC node into the 421 // lowered RISCVISD::SELECT_CC to take advantage of the integer 422 // compare+branch instructions. i.e.: 423 // (select (setcc lhs, rhs, cc), truev, falsev) 424 // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev) 425 if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC && 426 CondV.getOperand(0).getSimpleValueType() == XLenVT) { 427 SDValue LHS = CondV.getOperand(0); 428 SDValue RHS = CondV.getOperand(1); 429 auto CC = cast<CondCodeSDNode>(CondV.getOperand(2)); 430 ISD::CondCode CCVal = CC->get(); 431 432 normaliseSetCC(LHS, RHS, CCVal); 433 434 SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT); 435 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 436 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV}; 437 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops); 438 } 439 440 // Otherwise: 441 // (select condv, truev, falsev) 442 // -> (riscvisd::select_cc condv, zero, setne, truev, falsev) 443 SDValue Zero = DAG.getConstant(0, DL, XLenVT); 444 SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT); 445 446 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 447 SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV}; 448 449 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops); 450 } 451 452 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const { 453 MachineFunction &MF = DAG.getMachineFunction(); 454 RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>(); 455 456 SDLoc DL(Op); 457 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 458 getPointerTy(MF.getDataLayout())); 459 460 // vastart just stores the address of the VarArgsFrameIndex slot into the 461 // memory location argument. 462 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 463 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1), 464 MachinePointerInfo(SV)); 465 } 466 467 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op, 468 SelectionDAG &DAG) const { 469 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); 470 MachineFunction &MF = DAG.getMachineFunction(); 471 MachineFrameInfo &MFI = MF.getFrameInfo(); 472 MFI.setFrameAddressIsTaken(true); 473 unsigned FrameReg = RI.getFrameRegister(MF); 474 int XLenInBytes = Subtarget.getXLen() / 8; 475 476 EVT VT = Op.getValueType(); 477 SDLoc DL(Op); 478 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT); 479 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 480 while (Depth--) { 481 int Offset = -(XLenInBytes * 2); 482 SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr, 483 DAG.getIntPtrConstant(Offset, DL)); 484 FrameAddr = 485 DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 486 } 487 return FrameAddr; 488 } 489 490 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op, 491 SelectionDAG &DAG) const { 492 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); 493 MachineFunction &MF = DAG.getMachineFunction(); 494 MachineFrameInfo &MFI = MF.getFrameInfo(); 495 MFI.setReturnAddressIsTaken(true); 496 MVT XLenVT = Subtarget.getXLenVT(); 497 int XLenInBytes = Subtarget.getXLen() / 8; 498 499 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 500 return SDValue(); 501 502 EVT VT = Op.getValueType(); 503 SDLoc DL(Op); 504 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 505 if (Depth) { 506 int Off = -XLenInBytes; 507 SDValue FrameAddr = lowerFRAMEADDR(Op, DAG); 508 SDValue Offset = DAG.getConstant(Off, DL, VT); 509 return DAG.getLoad(VT, DL, DAG.getEntryNode(), 510 DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset), 511 MachinePointerInfo()); 512 } 513 514 // Return the value of the return address register, marking it an implicit 515 // live-in. 516 unsigned Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT)); 517 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT); 518 } 519 520 // Returns the opcode of the target-specific SDNode that implements the 32-bit 521 // form of the given Opcode. 522 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) { 523 switch (Opcode) { 524 default: 525 llvm_unreachable("Unexpected opcode"); 526 case ISD::SHL: 527 return RISCVISD::SLLW; 528 case ISD::SRA: 529 return RISCVISD::SRAW; 530 case ISD::SRL: 531 return RISCVISD::SRLW; 532 case ISD::SDIV: 533 return RISCVISD::DIVW; 534 case ISD::UDIV: 535 return RISCVISD::DIVUW; 536 case ISD::UREM: 537 return RISCVISD::REMUW; 538 } 539 } 540 541 // Converts the given 32-bit operation to a target-specific SelectionDAG node. 542 // Because i32 isn't a legal type for RV64, these operations would otherwise 543 // be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W 544 // later one because the fact the operation was originally of type i32 is 545 // lost. 546 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG) { 547 SDLoc DL(N); 548 RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode()); 549 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 550 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 551 SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1); 552 // ReplaceNodeResults requires we maintain the same type for the return value. 553 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes); 554 } 555 556 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, 557 SmallVectorImpl<SDValue> &Results, 558 SelectionDAG &DAG) const { 559 SDLoc DL(N); 560 switch (N->getOpcode()) { 561 default: 562 llvm_unreachable("Don't know how to custom type legalize this operation!"); 563 case ISD::SHL: 564 case ISD::SRA: 565 case ISD::SRL: 566 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 567 "Unexpected custom legalisation"); 568 if (N->getOperand(1).getOpcode() == ISD::Constant) 569 return; 570 Results.push_back(customLegalizeToWOp(N, DAG)); 571 break; 572 case ISD::SDIV: 573 case ISD::UDIV: 574 case ISD::UREM: 575 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 576 Subtarget.hasStdExtM() && "Unexpected custom legalisation"); 577 if (N->getOperand(0).getOpcode() == ISD::Constant || 578 N->getOperand(1).getOpcode() == ISD::Constant) 579 return; 580 Results.push_back(customLegalizeToWOp(N, DAG)); 581 break; 582 } 583 } 584 585 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, 586 DAGCombinerInfo &DCI) const { 587 SelectionDAG &DAG = DCI.DAG; 588 589 switch (N->getOpcode()) { 590 default: 591 break; 592 case RISCVISD::SplitF64: { 593 SDValue Op0 = N->getOperand(0); 594 // If the input to SplitF64 is just BuildPairF64 then the operation is 595 // redundant. Instead, use BuildPairF64's operands directly. 596 if (Op0->getOpcode() == RISCVISD::BuildPairF64) 597 return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1)); 598 599 SDLoc DL(N); 600 // This is a target-specific version of a DAGCombine performed in 601 // DAGCombiner::visitBITCAST. It performs the equivalent of: 602 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 603 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 604 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || 605 !Op0.getNode()->hasOneUse()) 606 break; 607 SDValue NewSplitF64 = 608 DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), 609 Op0.getOperand(0)); 610 SDValue Lo = NewSplitF64.getValue(0); 611 SDValue Hi = NewSplitF64.getValue(1); 612 APInt SignBit = APInt::getSignMask(32); 613 if (Op0.getOpcode() == ISD::FNEG) { 614 SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi, 615 DAG.getConstant(SignBit, DL, MVT::i32)); 616 return DCI.CombineTo(N, Lo, NewHi); 617 } 618 assert(Op0.getOpcode() == ISD::FABS); 619 SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi, 620 DAG.getConstant(~SignBit, DL, MVT::i32)); 621 return DCI.CombineTo(N, Lo, NewHi); 622 } 623 case RISCVISD::SLLW: 624 case RISCVISD::SRAW: 625 case RISCVISD::SRLW: { 626 // Only the lower 32 bits of LHS and lower 5 bits of RHS are read. 627 SDValue LHS = N->getOperand(0); 628 SDValue RHS = N->getOperand(1); 629 APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32); 630 APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5); 631 if ((SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI)) || 632 (SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI))) 633 return SDValue(); 634 break; 635 } 636 } 637 638 return SDValue(); 639 } 640 641 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode( 642 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 643 unsigned Depth) const { 644 switch (Op.getOpcode()) { 645 default: 646 break; 647 case RISCVISD::SLLW: 648 case RISCVISD::SRAW: 649 case RISCVISD::SRLW: 650 case RISCVISD::DIVW: 651 case RISCVISD::DIVUW: 652 case RISCVISD::REMUW: 653 // TODO: As the result is sign-extended, this is conservatively correct. A 654 // more precise answer could be calculated for SRAW depending on known 655 // bits in the shift amount. 656 return 33; 657 } 658 659 return 1; 660 } 661 662 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI, 663 MachineBasicBlock *BB) { 664 assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction"); 665 666 MachineFunction &MF = *BB->getParent(); 667 DebugLoc DL = MI.getDebugLoc(); 668 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 669 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); 670 unsigned LoReg = MI.getOperand(0).getReg(); 671 unsigned HiReg = MI.getOperand(1).getReg(); 672 unsigned SrcReg = MI.getOperand(2).getReg(); 673 const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass; 674 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(); 675 676 TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC, 677 RI); 678 MachineMemOperand *MMO = 679 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI), 680 MachineMemOperand::MOLoad, 8, 8); 681 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg) 682 .addFrameIndex(FI) 683 .addImm(0) 684 .addMemOperand(MMO); 685 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg) 686 .addFrameIndex(FI) 687 .addImm(4) 688 .addMemOperand(MMO); 689 MI.eraseFromParent(); // The pseudo instruction is gone now. 690 return BB; 691 } 692 693 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI, 694 MachineBasicBlock *BB) { 695 assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo && 696 "Unexpected instruction"); 697 698 MachineFunction &MF = *BB->getParent(); 699 DebugLoc DL = MI.getDebugLoc(); 700 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 701 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); 702 unsigned DstReg = MI.getOperand(0).getReg(); 703 unsigned LoReg = MI.getOperand(1).getReg(); 704 unsigned HiReg = MI.getOperand(2).getReg(); 705 const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass; 706 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(); 707 708 MachineMemOperand *MMO = 709 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI), 710 MachineMemOperand::MOStore, 8, 8); 711 BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) 712 .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill())) 713 .addFrameIndex(FI) 714 .addImm(0) 715 .addMemOperand(MMO); 716 BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) 717 .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill())) 718 .addFrameIndex(FI) 719 .addImm(4) 720 .addMemOperand(MMO); 721 TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI); 722 MI.eraseFromParent(); // The pseudo instruction is gone now. 723 return BB; 724 } 725 726 MachineBasicBlock * 727 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 728 MachineBasicBlock *BB) const { 729 switch (MI.getOpcode()) { 730 default: 731 llvm_unreachable("Unexpected instr type to insert"); 732 case RISCV::Select_GPR_Using_CC_GPR: 733 case RISCV::Select_FPR32_Using_CC_GPR: 734 case RISCV::Select_FPR64_Using_CC_GPR: 735 break; 736 case RISCV::BuildPairF64Pseudo: 737 return emitBuildPairF64Pseudo(MI, BB); 738 case RISCV::SplitF64Pseudo: 739 return emitSplitF64Pseudo(MI, BB); 740 } 741 742 // To "insert" a SELECT instruction, we actually have to insert the triangle 743 // control-flow pattern. The incoming instruction knows the destination vreg 744 // to set, the condition code register to branch on, the true/false values to 745 // select between, and the condcode to use to select the appropriate branch. 746 // 747 // We produce the following control flow: 748 // HeadMBB 749 // | \ 750 // | IfFalseMBB 751 // | / 752 // TailMBB 753 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo(); 754 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 755 DebugLoc DL = MI.getDebugLoc(); 756 MachineFunction::iterator I = ++BB->getIterator(); 757 758 MachineBasicBlock *HeadMBB = BB; 759 MachineFunction *F = BB->getParent(); 760 MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB); 761 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB); 762 763 F->insert(I, IfFalseMBB); 764 F->insert(I, TailMBB); 765 // Move all remaining instructions to TailMBB. 766 TailMBB->splice(TailMBB->begin(), HeadMBB, 767 std::next(MachineBasicBlock::iterator(MI)), HeadMBB->end()); 768 // Update machine-CFG edges by transferring all successors of the current 769 // block to the new block which will contain the Phi node for the select. 770 TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB); 771 // Set the successors for HeadMBB. 772 HeadMBB->addSuccessor(IfFalseMBB); 773 HeadMBB->addSuccessor(TailMBB); 774 775 // Insert appropriate branch. 776 unsigned LHS = MI.getOperand(1).getReg(); 777 unsigned RHS = MI.getOperand(2).getReg(); 778 auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm()); 779 unsigned Opcode = getBranchOpcodeForIntCondCode(CC); 780 781 BuildMI(HeadMBB, DL, TII.get(Opcode)) 782 .addReg(LHS) 783 .addReg(RHS) 784 .addMBB(TailMBB); 785 786 // IfFalseMBB just falls through to TailMBB. 787 IfFalseMBB->addSuccessor(TailMBB); 788 789 // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ] 790 BuildMI(*TailMBB, TailMBB->begin(), DL, TII.get(RISCV::PHI), 791 MI.getOperand(0).getReg()) 792 .addReg(MI.getOperand(4).getReg()) 793 .addMBB(HeadMBB) 794 .addReg(MI.getOperand(5).getReg()) 795 .addMBB(IfFalseMBB); 796 797 MI.eraseFromParent(); // The pseudo instruction is gone now. 798 return TailMBB; 799 } 800 801 // Calling Convention Implementation. 802 // The expectations for frontend ABI lowering vary from target to target. 803 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI 804 // details, but this is a longer term goal. For now, we simply try to keep the 805 // role of the frontend as simple and well-defined as possible. The rules can 806 // be summarised as: 807 // * Never split up large scalar arguments. We handle them here. 808 // * If a hardfloat calling convention is being used, and the struct may be 809 // passed in a pair of registers (fp+fp, int+fp), and both registers are 810 // available, then pass as two separate arguments. If either the GPRs or FPRs 811 // are exhausted, then pass according to the rule below. 812 // * If a struct could never be passed in registers or directly in a stack 813 // slot (as it is larger than 2*XLEN and the floating point rules don't 814 // apply), then pass it using a pointer with the byval attribute. 815 // * If a struct is less than 2*XLEN, then coerce to either a two-element 816 // word-sized array or a 2*XLEN scalar (depending on alignment). 817 // * The frontend can determine whether a struct is returned by reference or 818 // not based on its size and fields. If it will be returned by reference, the 819 // frontend must modify the prototype so a pointer with the sret annotation is 820 // passed as the first argument. This is not necessary for large scalar 821 // returns. 822 // * Struct return values and varargs should be coerced to structs containing 823 // register-size fields in the same situations they would be for fixed 824 // arguments. 825 826 static const MCPhysReg ArgGPRs[] = { 827 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, 828 RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17 829 }; 830 831 // Pass a 2*XLEN argument that has been split into two XLEN values through 832 // registers or the stack as necessary. 833 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, 834 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, 835 MVT ValVT2, MVT LocVT2, 836 ISD::ArgFlagsTy ArgFlags2) { 837 unsigned XLenInBytes = XLen / 8; 838 if (unsigned Reg = State.AllocateReg(ArgGPRs)) { 839 // At least one half can be passed via register. 840 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg, 841 VA1.getLocVT(), CCValAssign::Full)); 842 } else { 843 // Both halves must be passed on the stack, with proper alignment. 844 unsigned StackAlign = std::max(XLenInBytes, ArgFlags1.getOrigAlign()); 845 State.addLoc( 846 CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(), 847 State.AllocateStack(XLenInBytes, StackAlign), 848 VA1.getLocVT(), CCValAssign::Full)); 849 State.addLoc(CCValAssign::getMem( 850 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2, 851 CCValAssign::Full)); 852 return false; 853 } 854 855 if (unsigned Reg = State.AllocateReg(ArgGPRs)) { 856 // The second half can also be passed via register. 857 State.addLoc( 858 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full)); 859 } else { 860 // The second half is passed via the stack, without additional alignment. 861 State.addLoc(CCValAssign::getMem( 862 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2, 863 CCValAssign::Full)); 864 } 865 866 return false; 867 } 868 869 // Implements the RISC-V calling convention. Returns true upon failure. 870 static bool CC_RISCV(const DataLayout &DL, unsigned ValNo, MVT ValVT, MVT LocVT, 871 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, 872 CCState &State, bool IsFixed, bool IsRet, Type *OrigTy) { 873 unsigned XLen = DL.getLargestLegalIntTypeSizeInBits(); 874 assert(XLen == 32 || XLen == 64); 875 MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64; 876 if (ValVT == MVT::f32) { 877 LocVT = MVT::i32; 878 LocInfo = CCValAssign::BCvt; 879 } 880 881 // Any return value split in to more than two values can't be returned 882 // directly. 883 if (IsRet && ValNo > 1) 884 return true; 885 886 // If this is a variadic argument, the RISC-V calling convention requires 887 // that it is assigned an 'even' or 'aligned' register if it has 8-byte 888 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should 889 // be used regardless of whether the original argument was split during 890 // legalisation or not. The argument will not be passed by registers if the 891 // original type is larger than 2*XLEN, so the register alignment rule does 892 // not apply. 893 unsigned TwoXLenInBytes = (2 * XLen) / 8; 894 if (!IsFixed && ArgFlags.getOrigAlign() == TwoXLenInBytes && 895 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) { 896 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs); 897 // Skip 'odd' register if necessary. 898 if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1) 899 State.AllocateReg(ArgGPRs); 900 } 901 902 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs(); 903 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags = 904 State.getPendingArgFlags(); 905 906 assert(PendingLocs.size() == PendingArgFlags.size() && 907 "PendingLocs and PendingArgFlags out of sync"); 908 909 // Handle passing f64 on RV32D with a soft float ABI. 910 if (XLen == 32 && ValVT == MVT::f64) { 911 assert(!ArgFlags.isSplit() && PendingLocs.empty() && 912 "Can't lower f64 if it is split"); 913 // Depending on available argument GPRS, f64 may be passed in a pair of 914 // GPRs, split between a GPR and the stack, or passed completely on the 915 // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these 916 // cases. 917 unsigned Reg = State.AllocateReg(ArgGPRs); 918 LocVT = MVT::i32; 919 if (!Reg) { 920 unsigned StackOffset = State.AllocateStack(8, 8); 921 State.addLoc( 922 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 923 return false; 924 } 925 if (!State.AllocateReg(ArgGPRs)) 926 State.AllocateStack(4, 4); 927 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 928 return false; 929 } 930 931 // Split arguments might be passed indirectly, so keep track of the pending 932 // values. 933 if (ArgFlags.isSplit() || !PendingLocs.empty()) { 934 LocVT = XLenVT; 935 LocInfo = CCValAssign::Indirect; 936 PendingLocs.push_back( 937 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo)); 938 PendingArgFlags.push_back(ArgFlags); 939 if (!ArgFlags.isSplitEnd()) { 940 return false; 941 } 942 } 943 944 // If the split argument only had two elements, it should be passed directly 945 // in registers or on the stack. 946 if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) { 947 assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()"); 948 // Apply the normal calling convention rules to the first half of the 949 // split argument. 950 CCValAssign VA = PendingLocs[0]; 951 ISD::ArgFlagsTy AF = PendingArgFlags[0]; 952 PendingLocs.clear(); 953 PendingArgFlags.clear(); 954 return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT, 955 ArgFlags); 956 } 957 958 // Allocate to a register if possible, or else a stack slot. 959 unsigned Reg = State.AllocateReg(ArgGPRs); 960 unsigned StackOffset = Reg ? 0 : State.AllocateStack(XLen / 8, XLen / 8); 961 962 // If we reach this point and PendingLocs is non-empty, we must be at the 963 // end of a split argument that must be passed indirectly. 964 if (!PendingLocs.empty()) { 965 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()"); 966 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()"); 967 968 for (auto &It : PendingLocs) { 969 if (Reg) 970 It.convertToReg(Reg); 971 else 972 It.convertToMem(StackOffset); 973 State.addLoc(It); 974 } 975 PendingLocs.clear(); 976 PendingArgFlags.clear(); 977 return false; 978 } 979 980 assert(LocVT == XLenVT && "Expected an XLenVT at this stage"); 981 982 if (Reg) { 983 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 984 return false; 985 } 986 987 if (ValVT == MVT::f32) { 988 LocVT = MVT::f32; 989 LocInfo = CCValAssign::Full; 990 } 991 State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 992 return false; 993 } 994 995 void RISCVTargetLowering::analyzeInputArgs( 996 MachineFunction &MF, CCState &CCInfo, 997 const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const { 998 unsigned NumArgs = Ins.size(); 999 FunctionType *FType = MF.getFunction().getFunctionType(); 1000 1001 for (unsigned i = 0; i != NumArgs; ++i) { 1002 MVT ArgVT = Ins[i].VT; 1003 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags; 1004 1005 Type *ArgTy = nullptr; 1006 if (IsRet) 1007 ArgTy = FType->getReturnType(); 1008 else if (Ins[i].isOrigArg()) 1009 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex()); 1010 1011 if (CC_RISCV(MF.getDataLayout(), i, ArgVT, ArgVT, CCValAssign::Full, 1012 ArgFlags, CCInfo, /*IsRet=*/true, IsRet, ArgTy)) { 1013 LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type " 1014 << EVT(ArgVT).getEVTString() << '\n'); 1015 llvm_unreachable(nullptr); 1016 } 1017 } 1018 } 1019 1020 void RISCVTargetLowering::analyzeOutputArgs( 1021 MachineFunction &MF, CCState &CCInfo, 1022 const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet, 1023 CallLoweringInfo *CLI) const { 1024 unsigned NumArgs = Outs.size(); 1025 1026 for (unsigned i = 0; i != NumArgs; i++) { 1027 MVT ArgVT = Outs[i].VT; 1028 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 1029 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr; 1030 1031 if (CC_RISCV(MF.getDataLayout(), i, ArgVT, ArgVT, CCValAssign::Full, 1032 ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) { 1033 LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type " 1034 << EVT(ArgVT).getEVTString() << "\n"); 1035 llvm_unreachable(nullptr); 1036 } 1037 } 1038 } 1039 1040 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect 1041 // values. 1042 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, 1043 const CCValAssign &VA, const SDLoc &DL) { 1044 switch (VA.getLocInfo()) { 1045 default: 1046 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 1047 case CCValAssign::Full: 1048 break; 1049 case CCValAssign::BCvt: 1050 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); 1051 break; 1052 } 1053 return Val; 1054 } 1055 1056 // The caller is responsible for loading the full value if the argument is 1057 // passed with CCValAssign::Indirect. 1058 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain, 1059 const CCValAssign &VA, const SDLoc &DL) { 1060 MachineFunction &MF = DAG.getMachineFunction(); 1061 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1062 EVT LocVT = VA.getLocVT(); 1063 SDValue Val; 1064 1065 unsigned VReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 1066 RegInfo.addLiveIn(VA.getLocReg(), VReg); 1067 Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 1068 1069 if (VA.getLocInfo() == CCValAssign::Indirect) 1070 return Val; 1071 1072 return convertLocVTToValVT(DAG, Val, VA, DL); 1073 } 1074 1075 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, 1076 const CCValAssign &VA, const SDLoc &DL) { 1077 EVT LocVT = VA.getLocVT(); 1078 1079 switch (VA.getLocInfo()) { 1080 default: 1081 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 1082 case CCValAssign::Full: 1083 break; 1084 case CCValAssign::BCvt: 1085 Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val); 1086 break; 1087 } 1088 return Val; 1089 } 1090 1091 // The caller is responsible for loading the full value if the argument is 1092 // passed with CCValAssign::Indirect. 1093 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, 1094 const CCValAssign &VA, const SDLoc &DL) { 1095 MachineFunction &MF = DAG.getMachineFunction(); 1096 MachineFrameInfo &MFI = MF.getFrameInfo(); 1097 EVT LocVT = VA.getLocVT(); 1098 EVT ValVT = VA.getValVT(); 1099 EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0)); 1100 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8, 1101 VA.getLocMemOffset(), /*Immutable=*/true); 1102 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 1103 SDValue Val; 1104 1105 ISD::LoadExtType ExtType; 1106 switch (VA.getLocInfo()) { 1107 default: 1108 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 1109 case CCValAssign::Full: 1110 case CCValAssign::Indirect: 1111 ExtType = ISD::NON_EXTLOAD; 1112 break; 1113 } 1114 Val = DAG.getExtLoad( 1115 ExtType, DL, LocVT, Chain, FIN, 1116 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT); 1117 return Val; 1118 } 1119 1120 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain, 1121 const CCValAssign &VA, const SDLoc &DL) { 1122 assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 && 1123 "Unexpected VA"); 1124 MachineFunction &MF = DAG.getMachineFunction(); 1125 MachineFrameInfo &MFI = MF.getFrameInfo(); 1126 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1127 1128 if (VA.isMemLoc()) { 1129 // f64 is passed on the stack. 1130 int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true); 1131 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1132 return DAG.getLoad(MVT::f64, DL, Chain, FIN, 1133 MachinePointerInfo::getFixedStack(MF, FI)); 1134 } 1135 1136 assert(VA.isRegLoc() && "Expected register VA assignment"); 1137 1138 unsigned LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 1139 RegInfo.addLiveIn(VA.getLocReg(), LoVReg); 1140 SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32); 1141 SDValue Hi; 1142 if (VA.getLocReg() == RISCV::X17) { 1143 // Second half of f64 is passed on the stack. 1144 int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true); 1145 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1146 Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN, 1147 MachinePointerInfo::getFixedStack(MF, FI)); 1148 } else { 1149 // Second half of f64 is passed in another GPR. 1150 unsigned HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 1151 RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg); 1152 Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32); 1153 } 1154 return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi); 1155 } 1156 1157 // Transform physical registers into virtual registers. 1158 SDValue RISCVTargetLowering::LowerFormalArguments( 1159 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 1160 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 1161 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1162 1163 switch (CallConv) { 1164 default: 1165 report_fatal_error("Unsupported calling convention"); 1166 case CallingConv::C: 1167 case CallingConv::Fast: 1168 break; 1169 } 1170 1171 MachineFunction &MF = DAG.getMachineFunction(); 1172 1173 const Function &Func = MF.getFunction(); 1174 if (Func.hasFnAttribute("interrupt")) { 1175 if (!Func.arg_empty()) 1176 report_fatal_error( 1177 "Functions with the interrupt attribute cannot have arguments!"); 1178 1179 StringRef Kind = 1180 MF.getFunction().getFnAttribute("interrupt").getValueAsString(); 1181 1182 if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine")) 1183 report_fatal_error( 1184 "Function interrupt attribute argument not supported!"); 1185 } 1186 1187 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 1188 MVT XLenVT = Subtarget.getXLenVT(); 1189 unsigned XLenInBytes = Subtarget.getXLen() / 8; 1190 // Used with vargs to acumulate store chains. 1191 std::vector<SDValue> OutChains; 1192 1193 // Assign locations to all of the incoming arguments. 1194 SmallVector<CCValAssign, 16> ArgLocs; 1195 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 1196 analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false); 1197 1198 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1199 CCValAssign &VA = ArgLocs[i]; 1200 SDValue ArgValue; 1201 // Passing f64 on RV32D with a soft float ABI must be handled as a special 1202 // case. 1203 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) 1204 ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL); 1205 else if (VA.isRegLoc()) 1206 ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL); 1207 else 1208 ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL); 1209 1210 if (VA.getLocInfo() == CCValAssign::Indirect) { 1211 // If the original argument was split and passed by reference (e.g. i128 1212 // on RV32), we need to load all parts of it here (using the same 1213 // address). 1214 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, 1215 MachinePointerInfo())); 1216 unsigned ArgIndex = Ins[i].OrigArgIndex; 1217 assert(Ins[i].PartOffset == 0); 1218 while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) { 1219 CCValAssign &PartVA = ArgLocs[i + 1]; 1220 unsigned PartOffset = Ins[i + 1].PartOffset; 1221 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, 1222 DAG.getIntPtrConstant(PartOffset, DL)); 1223 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address, 1224 MachinePointerInfo())); 1225 ++i; 1226 } 1227 continue; 1228 } 1229 InVals.push_back(ArgValue); 1230 } 1231 1232 if (IsVarArg) { 1233 ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs); 1234 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs); 1235 const TargetRegisterClass *RC = &RISCV::GPRRegClass; 1236 MachineFrameInfo &MFI = MF.getFrameInfo(); 1237 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1238 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>(); 1239 1240 // Offset of the first variable argument from stack pointer, and size of 1241 // the vararg save area. For now, the varargs save area is either zero or 1242 // large enough to hold a0-a7. 1243 int VaArgOffset, VarArgsSaveSize; 1244 1245 // If all registers are allocated, then all varargs must be passed on the 1246 // stack and we don't need to save any argregs. 1247 if (ArgRegs.size() == Idx) { 1248 VaArgOffset = CCInfo.getNextStackOffset(); 1249 VarArgsSaveSize = 0; 1250 } else { 1251 VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx); 1252 VaArgOffset = -VarArgsSaveSize; 1253 } 1254 1255 // Record the frame index of the first variable argument 1256 // which is a value necessary to VASTART. 1257 int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 1258 RVFI->setVarArgsFrameIndex(FI); 1259 1260 // If saving an odd number of registers then create an extra stack slot to 1261 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures 1262 // offsets to even-numbered registered remain 2*XLEN-aligned. 1263 if (Idx % 2) { 1264 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, 1265 true); 1266 VarArgsSaveSize += XLenInBytes; 1267 } 1268 1269 // Copy the integer registers that may have been used for passing varargs 1270 // to the vararg save area. 1271 for (unsigned I = Idx; I < ArgRegs.size(); 1272 ++I, VaArgOffset += XLenInBytes) { 1273 const unsigned Reg = RegInfo.createVirtualRegister(RC); 1274 RegInfo.addLiveIn(ArgRegs[I], Reg); 1275 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT); 1276 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 1277 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 1278 SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff, 1279 MachinePointerInfo::getFixedStack(MF, FI)); 1280 cast<StoreSDNode>(Store.getNode()) 1281 ->getMemOperand() 1282 ->setValue((Value *)nullptr); 1283 OutChains.push_back(Store); 1284 } 1285 RVFI->setVarArgsSaveSize(VarArgsSaveSize); 1286 } 1287 1288 // All stores are grouped in one node to allow the matching between 1289 // the size of Ins and InVals. This only happens for vararg functions. 1290 if (!OutChains.empty()) { 1291 OutChains.push_back(Chain); 1292 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains); 1293 } 1294 1295 return Chain; 1296 } 1297 1298 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 1299 /// for tail call optimization. 1300 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization. 1301 bool RISCVTargetLowering::IsEligibleForTailCallOptimization( 1302 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF, 1303 const SmallVector<CCValAssign, 16> &ArgLocs) const { 1304 1305 auto &Callee = CLI.Callee; 1306 auto CalleeCC = CLI.CallConv; 1307 auto IsVarArg = CLI.IsVarArg; 1308 auto &Outs = CLI.Outs; 1309 auto &Caller = MF.getFunction(); 1310 auto CallerCC = Caller.getCallingConv(); 1311 1312 // Do not tail call opt functions with "disable-tail-calls" attribute. 1313 if (Caller.getFnAttribute("disable-tail-calls").getValueAsString() == "true") 1314 return false; 1315 1316 // Exception-handling functions need a special set of instructions to 1317 // indicate a return to the hardware. Tail-calling another function would 1318 // probably break this. 1319 // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This 1320 // should be expanded as new function attributes are introduced. 1321 if (Caller.hasFnAttribute("interrupt")) 1322 return false; 1323 1324 // Do not tail call opt functions with varargs. 1325 if (IsVarArg) 1326 return false; 1327 1328 // Do not tail call opt if the stack is used to pass parameters. 1329 if (CCInfo.getNextStackOffset() != 0) 1330 return false; 1331 1332 // Do not tail call opt if any parameters need to be passed indirectly. 1333 // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are 1334 // passed indirectly. So the address of the value will be passed in a 1335 // register, or if not available, then the address is put on the stack. In 1336 // order to pass indirectly, space on the stack often needs to be allocated 1337 // in order to store the value. In this case the CCInfo.getNextStackOffset() 1338 // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs 1339 // are passed CCValAssign::Indirect. 1340 for (auto &VA : ArgLocs) 1341 if (VA.getLocInfo() == CCValAssign::Indirect) 1342 return false; 1343 1344 // Do not tail call opt if either caller or callee uses struct return 1345 // semantics. 1346 auto IsCallerStructRet = Caller.hasStructRetAttr(); 1347 auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet(); 1348 if (IsCallerStructRet || IsCalleeStructRet) 1349 return false; 1350 1351 // Externally-defined functions with weak linkage should not be 1352 // tail-called. The behaviour of branch instructions in this situation (as 1353 // used for tail calls) is implementation-defined, so we cannot rely on the 1354 // linker replacing the tail call with a return. 1355 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1356 const GlobalValue *GV = G->getGlobal(); 1357 if (GV->hasExternalWeakLinkage()) 1358 return false; 1359 } 1360 1361 // The callee has to preserve all registers the caller needs to preserve. 1362 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); 1363 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 1364 if (CalleeCC != CallerCC) { 1365 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 1366 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 1367 return false; 1368 } 1369 1370 // Byval parameters hand the function a pointer directly into the stack area 1371 // we want to reuse during a tail call. Working around this *is* possible 1372 // but less efficient and uglier in LowerCall. 1373 for (auto &Arg : Outs) 1374 if (Arg.Flags.isByVal()) 1375 return false; 1376 1377 return true; 1378 } 1379 1380 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input 1381 // and output parameter nodes. 1382 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI, 1383 SmallVectorImpl<SDValue> &InVals) const { 1384 SelectionDAG &DAG = CLI.DAG; 1385 SDLoc &DL = CLI.DL; 1386 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 1387 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 1388 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 1389 SDValue Chain = CLI.Chain; 1390 SDValue Callee = CLI.Callee; 1391 bool &IsTailCall = CLI.IsTailCall; 1392 CallingConv::ID CallConv = CLI.CallConv; 1393 bool IsVarArg = CLI.IsVarArg; 1394 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 1395 MVT XLenVT = Subtarget.getXLenVT(); 1396 1397 MachineFunction &MF = DAG.getMachineFunction(); 1398 1399 // Analyze the operands of the call, assigning locations to each operand. 1400 SmallVector<CCValAssign, 16> ArgLocs; 1401 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 1402 analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI); 1403 1404 // Check if it's really possible to do a tail call. 1405 if (IsTailCall) 1406 IsTailCall = IsEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, 1407 ArgLocs); 1408 1409 if (IsTailCall) 1410 ++NumTailCalls; 1411 else if (CLI.CS && CLI.CS.isMustTailCall()) 1412 report_fatal_error("failed to perform tail call elimination on a call " 1413 "site marked musttail"); 1414 1415 // Get a count of how many bytes are to be pushed on the stack. 1416 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 1417 1418 // Create local copies for byval args 1419 SmallVector<SDValue, 8> ByValArgs; 1420 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 1421 ISD::ArgFlagsTy Flags = Outs[i].Flags; 1422 if (!Flags.isByVal()) 1423 continue; 1424 1425 SDValue Arg = OutVals[i]; 1426 unsigned Size = Flags.getByValSize(); 1427 unsigned Align = Flags.getByValAlign(); 1428 1429 int FI = MF.getFrameInfo().CreateStackObject(Size, Align, /*isSS=*/false); 1430 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 1431 SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT); 1432 1433 Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Align, 1434 /*IsVolatile=*/false, 1435 /*AlwaysInline=*/false, 1436 IsTailCall, MachinePointerInfo(), 1437 MachinePointerInfo()); 1438 ByValArgs.push_back(FIPtr); 1439 } 1440 1441 if (!IsTailCall) 1442 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL); 1443 1444 // Copy argument values to their designated locations. 1445 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 1446 SmallVector<SDValue, 8> MemOpChains; 1447 SDValue StackPtr; 1448 for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) { 1449 CCValAssign &VA = ArgLocs[i]; 1450 SDValue ArgValue = OutVals[i]; 1451 ISD::ArgFlagsTy Flags = Outs[i].Flags; 1452 1453 // Handle passing f64 on RV32D with a soft float ABI as a special case. 1454 bool IsF64OnRV32DSoftABI = 1455 VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64; 1456 if (IsF64OnRV32DSoftABI && VA.isRegLoc()) { 1457 SDValue SplitF64 = DAG.getNode( 1458 RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue); 1459 SDValue Lo = SplitF64.getValue(0); 1460 SDValue Hi = SplitF64.getValue(1); 1461 1462 unsigned RegLo = VA.getLocReg(); 1463 RegsToPass.push_back(std::make_pair(RegLo, Lo)); 1464 1465 if (RegLo == RISCV::X17) { 1466 // Second half of f64 is passed on the stack. 1467 // Work out the address of the stack slot. 1468 if (!StackPtr.getNode()) 1469 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); 1470 // Emit the store. 1471 MemOpChains.push_back( 1472 DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo())); 1473 } else { 1474 // Second half of f64 is passed in another GPR. 1475 unsigned RegHigh = RegLo + 1; 1476 RegsToPass.push_back(std::make_pair(RegHigh, Hi)); 1477 } 1478 continue; 1479 } 1480 1481 // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way 1482 // as any other MemLoc. 1483 1484 // Promote the value if needed. 1485 // For now, only handle fully promoted and indirect arguments. 1486 if (VA.getLocInfo() == CCValAssign::Indirect) { 1487 // Store the argument in a stack slot and pass its address. 1488 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT); 1489 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 1490 MemOpChains.push_back( 1491 DAG.getStore(Chain, DL, ArgValue, SpillSlot, 1492 MachinePointerInfo::getFixedStack(MF, FI))); 1493 // If the original argument was split (e.g. i128), we need 1494 // to store all parts of it here (and pass just one address). 1495 unsigned ArgIndex = Outs[i].OrigArgIndex; 1496 assert(Outs[i].PartOffset == 0); 1497 while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) { 1498 SDValue PartValue = OutVals[i + 1]; 1499 unsigned PartOffset = Outs[i + 1].PartOffset; 1500 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, 1501 DAG.getIntPtrConstant(PartOffset, DL)); 1502 MemOpChains.push_back( 1503 DAG.getStore(Chain, DL, PartValue, Address, 1504 MachinePointerInfo::getFixedStack(MF, FI))); 1505 ++i; 1506 } 1507 ArgValue = SpillSlot; 1508 } else { 1509 ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL); 1510 } 1511 1512 // Use local copy if it is a byval arg. 1513 if (Flags.isByVal()) 1514 ArgValue = ByValArgs[j++]; 1515 1516 if (VA.isRegLoc()) { 1517 // Queue up the argument copies and emit them at the end. 1518 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 1519 } else { 1520 assert(VA.isMemLoc() && "Argument not register or memory"); 1521 assert(!IsTailCall && "Tail call not allowed if stack is used " 1522 "for passing parameters"); 1523 1524 // Work out the address of the stack slot. 1525 if (!StackPtr.getNode()) 1526 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); 1527 SDValue Address = 1528 DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 1529 DAG.getIntPtrConstant(VA.getLocMemOffset(), DL)); 1530 1531 // Emit the store. 1532 MemOpChains.push_back( 1533 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo())); 1534 } 1535 } 1536 1537 // Join the stores, which are independent of one another. 1538 if (!MemOpChains.empty()) 1539 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 1540 1541 SDValue Glue; 1542 1543 // Build a sequence of copy-to-reg nodes, chained and glued together. 1544 for (auto &Reg : RegsToPass) { 1545 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue); 1546 Glue = Chain.getValue(1); 1547 } 1548 1549 // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a 1550 // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't 1551 // split it and then direct call can be matched by PseudoCALL. 1552 if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) { 1553 Callee = DAG.getTargetGlobalAddress(S->getGlobal(), DL, PtrVT, 0, 0); 1554 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1555 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, 0); 1556 } 1557 1558 // The first call operand is the chain and the second is the target address. 1559 SmallVector<SDValue, 8> Ops; 1560 Ops.push_back(Chain); 1561 Ops.push_back(Callee); 1562 1563 // Add argument registers to the end of the list so that they are 1564 // known live into the call. 1565 for (auto &Reg : RegsToPass) 1566 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType())); 1567 1568 if (!IsTailCall) { 1569 // Add a register mask operand representing the call-preserved registers. 1570 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 1571 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); 1572 assert(Mask && "Missing call preserved mask for calling convention"); 1573 Ops.push_back(DAG.getRegisterMask(Mask)); 1574 } 1575 1576 // Glue the call to the argument copies, if any. 1577 if (Glue.getNode()) 1578 Ops.push_back(Glue); 1579 1580 // Emit the call. 1581 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1582 1583 if (IsTailCall) { 1584 MF.getFrameInfo().setHasTailCall(); 1585 return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops); 1586 } 1587 1588 Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops); 1589 Glue = Chain.getValue(1); 1590 1591 // Mark the end of the call, which is glued to the call itself. 1592 Chain = DAG.getCALLSEQ_END(Chain, 1593 DAG.getConstant(NumBytes, DL, PtrVT, true), 1594 DAG.getConstant(0, DL, PtrVT, true), 1595 Glue, DL); 1596 Glue = Chain.getValue(1); 1597 1598 // Assign locations to each value returned by this call. 1599 SmallVector<CCValAssign, 16> RVLocs; 1600 CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); 1601 analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true); 1602 1603 // Copy all of the result registers out of their specified physreg. 1604 for (auto &VA : RVLocs) { 1605 // Copy the value out 1606 SDValue RetValue = 1607 DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue); 1608 // Glue the RetValue to the end of the call sequence 1609 Chain = RetValue.getValue(1); 1610 Glue = RetValue.getValue(2); 1611 1612 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { 1613 assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment"); 1614 SDValue RetValue2 = 1615 DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue); 1616 Chain = RetValue2.getValue(1); 1617 Glue = RetValue2.getValue(2); 1618 RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue, 1619 RetValue2); 1620 } 1621 1622 RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL); 1623 1624 InVals.push_back(RetValue); 1625 } 1626 1627 return Chain; 1628 } 1629 1630 bool RISCVTargetLowering::CanLowerReturn( 1631 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, 1632 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { 1633 SmallVector<CCValAssign, 16> RVLocs; 1634 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); 1635 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 1636 MVT VT = Outs[i].VT; 1637 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 1638 if (CC_RISCV(MF.getDataLayout(), i, VT, VT, CCValAssign::Full, ArgFlags, 1639 CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr)) 1640 return false; 1641 } 1642 return true; 1643 } 1644 1645 SDValue 1646 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 1647 bool IsVarArg, 1648 const SmallVectorImpl<ISD::OutputArg> &Outs, 1649 const SmallVectorImpl<SDValue> &OutVals, 1650 const SDLoc &DL, SelectionDAG &DAG) const { 1651 // Stores the assignment of the return value to a location. 1652 SmallVector<CCValAssign, 16> RVLocs; 1653 1654 // Info about the registers and stack slot. 1655 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 1656 *DAG.getContext()); 1657 1658 analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true, 1659 nullptr); 1660 1661 SDValue Glue; 1662 SmallVector<SDValue, 4> RetOps(1, Chain); 1663 1664 // Copy the result values into the output registers. 1665 for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) { 1666 SDValue Val = OutVals[i]; 1667 CCValAssign &VA = RVLocs[i]; 1668 assert(VA.isRegLoc() && "Can only return in registers!"); 1669 1670 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { 1671 // Handle returning f64 on RV32D with a soft float ABI. 1672 assert(VA.isRegLoc() && "Expected return via registers"); 1673 SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL, 1674 DAG.getVTList(MVT::i32, MVT::i32), Val); 1675 SDValue Lo = SplitF64.getValue(0); 1676 SDValue Hi = SplitF64.getValue(1); 1677 unsigned RegLo = VA.getLocReg(); 1678 unsigned RegHi = RegLo + 1; 1679 Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue); 1680 Glue = Chain.getValue(1); 1681 RetOps.push_back(DAG.getRegister(RegLo, MVT::i32)); 1682 Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue); 1683 Glue = Chain.getValue(1); 1684 RetOps.push_back(DAG.getRegister(RegHi, MVT::i32)); 1685 } else { 1686 // Handle a 'normal' return. 1687 Val = convertValVTToLocVT(DAG, Val, VA, DL); 1688 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue); 1689 1690 // Guarantee that all emitted copies are stuck together. 1691 Glue = Chain.getValue(1); 1692 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 1693 } 1694 } 1695 1696 RetOps[0] = Chain; // Update chain. 1697 1698 // Add the glue node if we have it. 1699 if (Glue.getNode()) { 1700 RetOps.push_back(Glue); 1701 } 1702 1703 // Interrupt service routines use different return instructions. 1704 const Function &Func = DAG.getMachineFunction().getFunction(); 1705 if (Func.hasFnAttribute("interrupt")) { 1706 if (!Func.getReturnType()->isVoidTy()) 1707 report_fatal_error( 1708 "Functions with the interrupt attribute must have void return type!"); 1709 1710 MachineFunction &MF = DAG.getMachineFunction(); 1711 StringRef Kind = 1712 MF.getFunction().getFnAttribute("interrupt").getValueAsString(); 1713 1714 unsigned RetOpc; 1715 if (Kind == "user") 1716 RetOpc = RISCVISD::URET_FLAG; 1717 else if (Kind == "supervisor") 1718 RetOpc = RISCVISD::SRET_FLAG; 1719 else 1720 RetOpc = RISCVISD::MRET_FLAG; 1721 1722 return DAG.getNode(RetOpc, DL, MVT::Other, RetOps); 1723 } 1724 1725 return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps); 1726 } 1727 1728 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { 1729 switch ((RISCVISD::NodeType)Opcode) { 1730 case RISCVISD::FIRST_NUMBER: 1731 break; 1732 case RISCVISD::RET_FLAG: 1733 return "RISCVISD::RET_FLAG"; 1734 case RISCVISD::URET_FLAG: 1735 return "RISCVISD::URET_FLAG"; 1736 case RISCVISD::SRET_FLAG: 1737 return "RISCVISD::SRET_FLAG"; 1738 case RISCVISD::MRET_FLAG: 1739 return "RISCVISD::MRET_FLAG"; 1740 case RISCVISD::CALL: 1741 return "RISCVISD::CALL"; 1742 case RISCVISD::SELECT_CC: 1743 return "RISCVISD::SELECT_CC"; 1744 case RISCVISD::BuildPairF64: 1745 return "RISCVISD::BuildPairF64"; 1746 case RISCVISD::SplitF64: 1747 return "RISCVISD::SplitF64"; 1748 case RISCVISD::TAIL: 1749 return "RISCVISD::TAIL"; 1750 case RISCVISD::SLLW: 1751 return "RISCVISD::SLLW"; 1752 case RISCVISD::SRAW: 1753 return "RISCVISD::SRAW"; 1754 case RISCVISD::SRLW: 1755 return "RISCVISD::SRLW"; 1756 case RISCVISD::DIVW: 1757 return "RISCVISD::DIVW"; 1758 case RISCVISD::DIVUW: 1759 return "RISCVISD::DIVUW"; 1760 case RISCVISD::REMUW: 1761 return "RISCVISD::REMUW"; 1762 } 1763 return nullptr; 1764 } 1765 1766 std::pair<unsigned, const TargetRegisterClass *> 1767 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 1768 StringRef Constraint, 1769 MVT VT) const { 1770 // First, see if this is a constraint that directly corresponds to a 1771 // RISCV register class. 1772 if (Constraint.size() == 1) { 1773 switch (Constraint[0]) { 1774 case 'r': 1775 return std::make_pair(0U, &RISCV::GPRRegClass); 1776 default: 1777 break; 1778 } 1779 } 1780 1781 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 1782 } 1783 1784 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 1785 Instruction *Inst, 1786 AtomicOrdering Ord) const { 1787 if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent) 1788 return Builder.CreateFence(Ord); 1789 if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord)) 1790 return Builder.CreateFence(AtomicOrdering::Release); 1791 return nullptr; 1792 } 1793 1794 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 1795 Instruction *Inst, 1796 AtomicOrdering Ord) const { 1797 if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord)) 1798 return Builder.CreateFence(AtomicOrdering::Acquire); 1799 return nullptr; 1800 } 1801 1802 TargetLowering::AtomicExpansionKind 1803 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { 1804 // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating 1805 // point operations can't be used in an lr/sc sequence without breaking the 1806 // forward-progress guarantee. 1807 if (AI->isFloatingPointOperation()) 1808 return AtomicExpansionKind::CmpXChg; 1809 1810 unsigned Size = AI->getType()->getPrimitiveSizeInBits(); 1811 if (Size == 8 || Size == 16) 1812 return AtomicExpansionKind::MaskedIntrinsic; 1813 return AtomicExpansionKind::None; 1814 } 1815 1816 static Intrinsic::ID 1817 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) { 1818 if (XLen == 32) { 1819 switch (BinOp) { 1820 default: 1821 llvm_unreachable("Unexpected AtomicRMW BinOp"); 1822 case AtomicRMWInst::Xchg: 1823 return Intrinsic::riscv_masked_atomicrmw_xchg_i32; 1824 case AtomicRMWInst::Add: 1825 return Intrinsic::riscv_masked_atomicrmw_add_i32; 1826 case AtomicRMWInst::Sub: 1827 return Intrinsic::riscv_masked_atomicrmw_sub_i32; 1828 case AtomicRMWInst::Nand: 1829 return Intrinsic::riscv_masked_atomicrmw_nand_i32; 1830 case AtomicRMWInst::Max: 1831 return Intrinsic::riscv_masked_atomicrmw_max_i32; 1832 case AtomicRMWInst::Min: 1833 return Intrinsic::riscv_masked_atomicrmw_min_i32; 1834 case AtomicRMWInst::UMax: 1835 return Intrinsic::riscv_masked_atomicrmw_umax_i32; 1836 case AtomicRMWInst::UMin: 1837 return Intrinsic::riscv_masked_atomicrmw_umin_i32; 1838 } 1839 } 1840 1841 if (XLen == 64) { 1842 switch (BinOp) { 1843 default: 1844 llvm_unreachable("Unexpected AtomicRMW BinOp"); 1845 case AtomicRMWInst::Xchg: 1846 return Intrinsic::riscv_masked_atomicrmw_xchg_i64; 1847 case AtomicRMWInst::Add: 1848 return Intrinsic::riscv_masked_atomicrmw_add_i64; 1849 case AtomicRMWInst::Sub: 1850 return Intrinsic::riscv_masked_atomicrmw_sub_i64; 1851 case AtomicRMWInst::Nand: 1852 return Intrinsic::riscv_masked_atomicrmw_nand_i64; 1853 case AtomicRMWInst::Max: 1854 return Intrinsic::riscv_masked_atomicrmw_max_i64; 1855 case AtomicRMWInst::Min: 1856 return Intrinsic::riscv_masked_atomicrmw_min_i64; 1857 case AtomicRMWInst::UMax: 1858 return Intrinsic::riscv_masked_atomicrmw_umax_i64; 1859 case AtomicRMWInst::UMin: 1860 return Intrinsic::riscv_masked_atomicrmw_umin_i64; 1861 } 1862 } 1863 1864 llvm_unreachable("Unexpected XLen\n"); 1865 } 1866 1867 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic( 1868 IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, 1869 Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const { 1870 unsigned XLen = Subtarget.getXLen(); 1871 Value *Ordering = 1872 Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering())); 1873 Type *Tys[] = {AlignedAddr->getType()}; 1874 Function *LrwOpScwLoop = Intrinsic::getDeclaration( 1875 AI->getModule(), 1876 getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys); 1877 1878 if (XLen == 64) { 1879 Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty()); 1880 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); 1881 ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty()); 1882 } 1883 1884 Value *Result; 1885 1886 // Must pass the shift amount needed to sign extend the loaded value prior 1887 // to performing a signed comparison for min/max. ShiftAmt is the number of 1888 // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which 1889 // is the number of bits to left+right shift the value in order to 1890 // sign-extend. 1891 if (AI->getOperation() == AtomicRMWInst::Min || 1892 AI->getOperation() == AtomicRMWInst::Max) { 1893 const DataLayout &DL = AI->getModule()->getDataLayout(); 1894 unsigned ValWidth = 1895 DL.getTypeStoreSizeInBits(AI->getValOperand()->getType()); 1896 Value *SextShamt = 1897 Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt); 1898 Result = Builder.CreateCall(LrwOpScwLoop, 1899 {AlignedAddr, Incr, Mask, SextShamt, Ordering}); 1900 } else { 1901 Result = 1902 Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering}); 1903 } 1904 1905 if (XLen == 64) 1906 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); 1907 return Result; 1908 } 1909 1910 TargetLowering::AtomicExpansionKind 1911 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR( 1912 AtomicCmpXchgInst *CI) const { 1913 unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits(); 1914 if (Size == 8 || Size == 16) 1915 return AtomicExpansionKind::MaskedIntrinsic; 1916 return AtomicExpansionKind::None; 1917 } 1918 1919 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic( 1920 IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, 1921 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const { 1922 unsigned XLen = Subtarget.getXLen(); 1923 Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord)); 1924 Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32; 1925 if (XLen == 64) { 1926 CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty()); 1927 NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty()); 1928 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); 1929 CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64; 1930 } 1931 Type *Tys[] = {AlignedAddr->getType()}; 1932 Function *MaskedCmpXchg = 1933 Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys); 1934 Value *Result = Builder.CreateCall( 1935 MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering}); 1936 if (XLen == 64) 1937 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); 1938 return Result; 1939 } 1940