1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the IRTranslator class. 10 //===----------------------------------------------------------------------===// 11 12 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 13 #include "llvm/ADT/PostOrderIterator.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/ADT/ScopeExit.h" 16 #include "llvm/ADT/SmallSet.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/Analysis/BranchProbabilityInfo.h" 19 #include "llvm/Analysis/Loads.h" 20 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 21 #include "llvm/Analysis/ValueTracking.h" 22 #include "llvm/CodeGen/Analysis.h" 23 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 24 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" 25 #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h" 26 #include "llvm/CodeGen/LowLevelType.h" 27 #include "llvm/CodeGen/MachineBasicBlock.h" 28 #include "llvm/CodeGen/MachineFrameInfo.h" 29 #include "llvm/CodeGen/MachineFunction.h" 30 #include "llvm/CodeGen/MachineInstrBuilder.h" 31 #include "llvm/CodeGen/MachineMemOperand.h" 32 #include "llvm/CodeGen/MachineModuleInfo.h" 33 #include "llvm/CodeGen/MachineOperand.h" 34 #include "llvm/CodeGen/MachineRegisterInfo.h" 35 #include "llvm/CodeGen/StackProtector.h" 36 #include "llvm/CodeGen/SwitchLoweringUtils.h" 37 #include "llvm/CodeGen/TargetFrameLowering.h" 38 #include "llvm/CodeGen/TargetInstrInfo.h" 39 #include "llvm/CodeGen/TargetLowering.h" 40 #include "llvm/CodeGen/TargetPassConfig.h" 41 #include "llvm/CodeGen/TargetRegisterInfo.h" 42 #include "llvm/CodeGen/TargetSubtargetInfo.h" 43 #include "llvm/IR/BasicBlock.h" 44 #include "llvm/IR/CFG.h" 45 #include "llvm/IR/Constant.h" 46 #include "llvm/IR/Constants.h" 47 #include "llvm/IR/DataLayout.h" 48 #include "llvm/IR/DebugInfo.h" 49 #include "llvm/IR/DerivedTypes.h" 50 #include "llvm/IR/DiagnosticInfo.h" 51 #include "llvm/IR/Function.h" 52 #include "llvm/IR/GetElementPtrTypeIterator.h" 53 #include "llvm/IR/InlineAsm.h" 54 #include "llvm/IR/InstrTypes.h" 55 #include "llvm/IR/Instructions.h" 56 #include "llvm/IR/IntrinsicInst.h" 57 #include "llvm/IR/Intrinsics.h" 58 #include "llvm/IR/LLVMContext.h" 59 #include "llvm/IR/Metadata.h" 60 #include "llvm/IR/PatternMatch.h" 61 #include "llvm/IR/Type.h" 62 #include "llvm/IR/User.h" 63 #include "llvm/IR/Value.h" 64 #include "llvm/InitializePasses.h" 65 #include "llvm/MC/MCContext.h" 66 #include "llvm/Pass.h" 67 #include "llvm/Support/Casting.h" 68 #include "llvm/Support/CodeGen.h" 69 #include "llvm/Support/Debug.h" 70 #include "llvm/Support/ErrorHandling.h" 71 #include "llvm/Support/LowLevelTypeImpl.h" 72 #include "llvm/Support/MathExtras.h" 73 #include "llvm/Support/raw_ostream.h" 74 #include "llvm/Target/TargetIntrinsicInfo.h" 75 #include "llvm/Target/TargetMachine.h" 76 #include "llvm/Transforms/Utils/MemoryOpRemark.h" 77 #include <algorithm> 78 #include <cassert> 79 #include <cstddef> 80 #include <cstdint> 81 #include <iterator> 82 #include <string> 83 #include <utility> 84 #include <vector> 85 86 #define DEBUG_TYPE "irtranslator" 87 88 using namespace llvm; 89 90 static cl::opt<bool> 91 EnableCSEInIRTranslator("enable-cse-in-irtranslator", 92 cl::desc("Should enable CSE in irtranslator"), 93 cl::Optional, cl::init(false)); 94 char IRTranslator::ID = 0; 95 96 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 97 false, false) 98 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) 99 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass) 100 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 101 INITIALIZE_PASS_DEPENDENCY(StackProtector) 102 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 103 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 104 false, false) 105 106 static void reportTranslationError(MachineFunction &MF, 107 const TargetPassConfig &TPC, 108 OptimizationRemarkEmitter &ORE, 109 OptimizationRemarkMissed &R) { 110 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel); 111 112 // Print the function name explicitly if we don't have a debug location (which 113 // makes the diagnostic less useful) or if we're going to emit a raw error. 114 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled()) 115 R << (" (in function: " + MF.getName() + ")").str(); 116 117 if (TPC.isGlobalISelAbortEnabled()) 118 report_fatal_error(R.getMsg()); 119 else 120 ORE.emit(R); 121 } 122 123 IRTranslator::IRTranslator(CodeGenOpt::Level optlevel) 124 : MachineFunctionPass(ID), OptLevel(optlevel) {} 125 126 #ifndef NDEBUG 127 namespace { 128 /// Verify that every instruction created has the same DILocation as the 129 /// instruction being translated. 130 class DILocationVerifier : public GISelChangeObserver { 131 const Instruction *CurrInst = nullptr; 132 133 public: 134 DILocationVerifier() = default; 135 ~DILocationVerifier() = default; 136 137 const Instruction *getCurrentInst() const { return CurrInst; } 138 void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; } 139 140 void erasingInstr(MachineInstr &MI) override {} 141 void changingInstr(MachineInstr &MI) override {} 142 void changedInstr(MachineInstr &MI) override {} 143 144 void createdInstr(MachineInstr &MI) override { 145 assert(getCurrentInst() && "Inserted instruction without a current MI"); 146 147 // Only print the check message if we're actually checking it. 148 #ifndef NDEBUG 149 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst 150 << " was copied to " << MI); 151 #endif 152 // We allow insts in the entry block to have a debug loc line of 0 because 153 // they could have originated from constants, and we don't want a jumpy 154 // debug experience. 155 assert((CurrInst->getDebugLoc() == MI.getDebugLoc() || 156 MI.getDebugLoc().getLine() == 0) && 157 "Line info was not transferred to all instructions"); 158 } 159 }; 160 } // namespace 161 #endif // ifndef NDEBUG 162 163 164 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const { 165 AU.addRequired<StackProtector>(); 166 AU.addRequired<TargetPassConfig>(); 167 AU.addRequired<GISelCSEAnalysisWrapperPass>(); 168 if (OptLevel != CodeGenOpt::None) 169 AU.addRequired<BranchProbabilityInfoWrapperPass>(); 170 AU.addRequired<TargetLibraryInfoWrapperPass>(); 171 AU.addPreserved<TargetLibraryInfoWrapperPass>(); 172 getSelectionDAGFallbackAnalysisUsage(AU); 173 MachineFunctionPass::getAnalysisUsage(AU); 174 } 175 176 IRTranslator::ValueToVRegInfo::VRegListT & 177 IRTranslator::allocateVRegs(const Value &Val) { 178 auto VRegsIt = VMap.findVRegs(Val); 179 if (VRegsIt != VMap.vregs_end()) 180 return *VRegsIt->second; 181 auto *Regs = VMap.getVRegs(Val); 182 auto *Offsets = VMap.getOffsets(Val); 183 SmallVector<LLT, 4> SplitTys; 184 computeValueLLTs(*DL, *Val.getType(), SplitTys, 185 Offsets->empty() ? Offsets : nullptr); 186 for (unsigned i = 0; i < SplitTys.size(); ++i) 187 Regs->push_back(0); 188 return *Regs; 189 } 190 191 ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) { 192 auto VRegsIt = VMap.findVRegs(Val); 193 if (VRegsIt != VMap.vregs_end()) 194 return *VRegsIt->second; 195 196 if (Val.getType()->isVoidTy()) 197 return *VMap.getVRegs(Val); 198 199 // Create entry for this type. 200 auto *VRegs = VMap.getVRegs(Val); 201 auto *Offsets = VMap.getOffsets(Val); 202 203 assert(Val.getType()->isSized() && 204 "Don't know how to create an empty vreg"); 205 206 SmallVector<LLT, 4> SplitTys; 207 computeValueLLTs(*DL, *Val.getType(), SplitTys, 208 Offsets->empty() ? Offsets : nullptr); 209 210 if (!isa<Constant>(Val)) { 211 for (auto Ty : SplitTys) 212 VRegs->push_back(MRI->createGenericVirtualRegister(Ty)); 213 return *VRegs; 214 } 215 216 if (Val.getType()->isAggregateType()) { 217 // UndefValue, ConstantAggregateZero 218 auto &C = cast<Constant>(Val); 219 unsigned Idx = 0; 220 while (auto Elt = C.getAggregateElement(Idx++)) { 221 auto EltRegs = getOrCreateVRegs(*Elt); 222 llvm::copy(EltRegs, std::back_inserter(*VRegs)); 223 } 224 } else { 225 assert(SplitTys.size() == 1 && "unexpectedly split LLT"); 226 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0])); 227 bool Success = translate(cast<Constant>(Val), VRegs->front()); 228 if (!Success) { 229 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 230 MF->getFunction().getSubprogram(), 231 &MF->getFunction().getEntryBlock()); 232 R << "unable to translate constant: " << ore::NV("Type", Val.getType()); 233 reportTranslationError(*MF, *TPC, *ORE, R); 234 return *VRegs; 235 } 236 } 237 238 return *VRegs; 239 } 240 241 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) { 242 auto MapEntry = FrameIndices.find(&AI); 243 if (MapEntry != FrameIndices.end()) 244 return MapEntry->second; 245 246 uint64_t ElementSize = DL->getTypeAllocSize(AI.getAllocatedType()); 247 uint64_t Size = 248 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue(); 249 250 // Always allocate at least one byte. 251 Size = std::max<uint64_t>(Size, 1u); 252 253 int &FI = FrameIndices[&AI]; 254 FI = MF->getFrameInfo().CreateStackObject(Size, AI.getAlign(), false, &AI); 255 return FI; 256 } 257 258 Align IRTranslator::getMemOpAlign(const Instruction &I) { 259 if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) 260 return SI->getAlign(); 261 if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) 262 return LI->getAlign(); 263 if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) 264 return AI->getAlign(); 265 if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) 266 return AI->getAlign(); 267 268 OptimizationRemarkMissed R("gisel-irtranslator", "", &I); 269 R << "unable to translate memop: " << ore::NV("Opcode", &I); 270 reportTranslationError(*MF, *TPC, *ORE, R); 271 return Align(1); 272 } 273 274 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) { 275 MachineBasicBlock *&MBB = BBToMBB[&BB]; 276 assert(MBB && "BasicBlock was not encountered before"); 277 return *MBB; 278 } 279 280 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) { 281 assert(NewPred && "new predecessor must be a real MachineBasicBlock"); 282 MachinePreds[Edge].push_back(NewPred); 283 } 284 285 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U, 286 MachineIRBuilder &MIRBuilder) { 287 // Get or create a virtual register for each value. 288 // Unless the value is a Constant => loadimm cst? 289 // or inline constant each time? 290 // Creation of a virtual register needs to have a size. 291 Register Op0 = getOrCreateVReg(*U.getOperand(0)); 292 Register Op1 = getOrCreateVReg(*U.getOperand(1)); 293 Register Res = getOrCreateVReg(U); 294 uint16_t Flags = 0; 295 if (isa<Instruction>(U)) { 296 const Instruction &I = cast<Instruction>(U); 297 Flags = MachineInstr::copyFlagsFromInstruction(I); 298 } 299 300 MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags); 301 return true; 302 } 303 304 bool IRTranslator::translateUnaryOp(unsigned Opcode, const User &U, 305 MachineIRBuilder &MIRBuilder) { 306 Register Op0 = getOrCreateVReg(*U.getOperand(0)); 307 Register Res = getOrCreateVReg(U); 308 uint16_t Flags = 0; 309 if (isa<Instruction>(U)) { 310 const Instruction &I = cast<Instruction>(U); 311 Flags = MachineInstr::copyFlagsFromInstruction(I); 312 } 313 MIRBuilder.buildInstr(Opcode, {Res}, {Op0}, Flags); 314 return true; 315 } 316 317 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) { 318 return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder); 319 } 320 321 bool IRTranslator::translateCompare(const User &U, 322 MachineIRBuilder &MIRBuilder) { 323 auto *CI = dyn_cast<CmpInst>(&U); 324 Register Op0 = getOrCreateVReg(*U.getOperand(0)); 325 Register Op1 = getOrCreateVReg(*U.getOperand(1)); 326 Register Res = getOrCreateVReg(U); 327 CmpInst::Predicate Pred = 328 CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>( 329 cast<ConstantExpr>(U).getPredicate()); 330 if (CmpInst::isIntPredicate(Pred)) 331 MIRBuilder.buildICmp(Pred, Res, Op0, Op1); 332 else if (Pred == CmpInst::FCMP_FALSE) 333 MIRBuilder.buildCopy( 334 Res, getOrCreateVReg(*Constant::getNullValue(U.getType()))); 335 else if (Pred == CmpInst::FCMP_TRUE) 336 MIRBuilder.buildCopy( 337 Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType()))); 338 else { 339 assert(CI && "Instruction should be CmpInst"); 340 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1, 341 MachineInstr::copyFlagsFromInstruction(*CI)); 342 } 343 344 return true; 345 } 346 347 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) { 348 const ReturnInst &RI = cast<ReturnInst>(U); 349 const Value *Ret = RI.getReturnValue(); 350 if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0) 351 Ret = nullptr; 352 353 ArrayRef<Register> VRegs; 354 if (Ret) 355 VRegs = getOrCreateVRegs(*Ret); 356 357 Register SwiftErrorVReg = 0; 358 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) { 359 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt( 360 &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg()); 361 } 362 363 // The target may mess up with the insertion point, but 364 // this is not important as a return is the last instruction 365 // of the block anyway. 366 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg); 367 } 368 369 void IRTranslator::emitBranchForMergedCondition( 370 const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, 371 MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, 372 BranchProbability TProb, BranchProbability FProb, bool InvertCond) { 373 // If the leaf of the tree is a comparison, merge the condition into 374 // the caseblock. 375 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) { 376 CmpInst::Predicate Condition; 377 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) { 378 Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate(); 379 } else { 380 const FCmpInst *FC = cast<FCmpInst>(Cond); 381 Condition = InvertCond ? FC->getInversePredicate() : FC->getPredicate(); 382 } 383 384 SwitchCG::CaseBlock CB(Condition, false, BOp->getOperand(0), 385 BOp->getOperand(1), nullptr, TBB, FBB, CurBB, 386 CurBuilder->getDebugLoc(), TProb, FProb); 387 SL->SwitchCases.push_back(CB); 388 return; 389 } 390 391 // Create a CaseBlock record representing this branch. 392 CmpInst::Predicate Pred = InvertCond ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ; 393 SwitchCG::CaseBlock CB( 394 Pred, false, Cond, ConstantInt::getTrue(MF->getFunction().getContext()), 395 nullptr, TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb); 396 SL->SwitchCases.push_back(CB); 397 } 398 399 static bool isValInBlock(const Value *V, const BasicBlock *BB) { 400 if (const Instruction *I = dyn_cast<Instruction>(V)) 401 return I->getParent() == BB; 402 return true; 403 } 404 405 void IRTranslator::findMergedConditions( 406 const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, 407 MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, 408 Instruction::BinaryOps Opc, BranchProbability TProb, 409 BranchProbability FProb, bool InvertCond) { 410 using namespace PatternMatch; 411 assert((Opc == Instruction::And || Opc == Instruction::Or) && 412 "Expected Opc to be AND/OR"); 413 // Skip over not part of the tree and remember to invert op and operands at 414 // next level. 415 Value *NotCond; 416 if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) && 417 isValInBlock(NotCond, CurBB->getBasicBlock())) { 418 findMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb, 419 !InvertCond); 420 return; 421 } 422 423 const Instruction *BOp = dyn_cast<Instruction>(Cond); 424 const Value *BOpOp0, *BOpOp1; 425 // Compute the effective opcode for Cond, taking into account whether it needs 426 // to be inverted, e.g. 427 // and (not (or A, B)), C 428 // gets lowered as 429 // and (and (not A, not B), C) 430 Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0; 431 if (BOp) { 432 BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1))) 433 ? Instruction::And 434 : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1))) 435 ? Instruction::Or 436 : (Instruction::BinaryOps)0); 437 if (InvertCond) { 438 if (BOpc == Instruction::And) 439 BOpc = Instruction::Or; 440 else if (BOpc == Instruction::Or) 441 BOpc = Instruction::And; 442 } 443 } 444 445 // If this node is not part of the or/and tree, emit it as a branch. 446 // Note that all nodes in the tree should have same opcode. 447 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse(); 448 if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() || 449 !isValInBlock(BOpOp0, CurBB->getBasicBlock()) || 450 !isValInBlock(BOpOp1, CurBB->getBasicBlock())) { 451 emitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, TProb, FProb, 452 InvertCond); 453 return; 454 } 455 456 // Create TmpBB after CurBB. 457 MachineFunction::iterator BBI(CurBB); 458 MachineBasicBlock *TmpBB = 459 MF->CreateMachineBasicBlock(CurBB->getBasicBlock()); 460 CurBB->getParent()->insert(++BBI, TmpBB); 461 462 if (Opc == Instruction::Or) { 463 // Codegen X | Y as: 464 // BB1: 465 // jmp_if_X TBB 466 // jmp TmpBB 467 // TmpBB: 468 // jmp_if_Y TBB 469 // jmp FBB 470 // 471 472 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 473 // The requirement is that 474 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) 475 // = TrueProb for original BB. 476 // Assuming the original probabilities are A and B, one choice is to set 477 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to 478 // A/(1+B) and 2B/(1+B). This choice assumes that 479 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. 480 // Another choice is to assume TrueProb for BB1 equals to TrueProb for 481 // TmpBB, but the math is more complicated. 482 483 auto NewTrueProb = TProb / 2; 484 auto NewFalseProb = TProb / 2 + FProb; 485 // Emit the LHS condition. 486 findMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb, 487 NewFalseProb, InvertCond); 488 489 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B). 490 SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb}; 491 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end()); 492 // Emit the RHS condition into TmpBB. 493 findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0], 494 Probs[1], InvertCond); 495 } else { 496 assert(Opc == Instruction::And && "Unknown merge op!"); 497 // Codegen X & Y as: 498 // BB1: 499 // jmp_if_X TmpBB 500 // jmp FBB 501 // TmpBB: 502 // jmp_if_Y TBB 503 // jmp FBB 504 // 505 // This requires creation of TmpBB after CurBB. 506 507 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 508 // The requirement is that 509 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) 510 // = FalseProb for original BB. 511 // Assuming the original probabilities are A and B, one choice is to set 512 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to 513 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 == 514 // TrueProb for BB1 * FalseProb for TmpBB. 515 516 auto NewTrueProb = TProb + FProb / 2; 517 auto NewFalseProb = FProb / 2; 518 // Emit the LHS condition. 519 findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb, 520 NewFalseProb, InvertCond); 521 522 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A). 523 SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2}; 524 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end()); 525 // Emit the RHS condition into TmpBB. 526 findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0], 527 Probs[1], InvertCond); 528 } 529 } 530 531 bool IRTranslator::shouldEmitAsBranches( 532 const std::vector<SwitchCG::CaseBlock> &Cases) { 533 // For multiple cases, it's better to emit as branches. 534 if (Cases.size() != 2) 535 return true; 536 537 // If this is two comparisons of the same values or'd or and'd together, they 538 // will get folded into a single comparison, so don't emit two blocks. 539 if ((Cases[0].CmpLHS == Cases[1].CmpLHS && 540 Cases[0].CmpRHS == Cases[1].CmpRHS) || 541 (Cases[0].CmpRHS == Cases[1].CmpLHS && 542 Cases[0].CmpLHS == Cases[1].CmpRHS)) { 543 return false; 544 } 545 546 // Handle: (X != null) | (Y != null) --> (X|Y) != 0 547 // Handle: (X == null) & (Y == null) --> (X|Y) == 0 548 if (Cases[0].CmpRHS == Cases[1].CmpRHS && 549 Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred && 550 isa<Constant>(Cases[0].CmpRHS) && 551 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) { 552 if (Cases[0].PredInfo.Pred == CmpInst::ICMP_EQ && 553 Cases[0].TrueBB == Cases[1].ThisBB) 554 return false; 555 if (Cases[0].PredInfo.Pred == CmpInst::ICMP_NE && 556 Cases[0].FalseBB == Cases[1].ThisBB) 557 return false; 558 } 559 560 return true; 561 } 562 563 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) { 564 const BranchInst &BrInst = cast<BranchInst>(U); 565 auto &CurMBB = MIRBuilder.getMBB(); 566 auto *Succ0MBB = &getMBB(*BrInst.getSuccessor(0)); 567 568 if (BrInst.isUnconditional()) { 569 // If the unconditional target is the layout successor, fallthrough. 570 if (OptLevel == CodeGenOpt::None || !CurMBB.isLayoutSuccessor(Succ0MBB)) 571 MIRBuilder.buildBr(*Succ0MBB); 572 573 // Link successors. 574 for (const BasicBlock *Succ : successors(&BrInst)) 575 CurMBB.addSuccessor(&getMBB(*Succ)); 576 return true; 577 } 578 579 // If this condition is one of the special cases we handle, do special stuff 580 // now. 581 const Value *CondVal = BrInst.getCondition(); 582 MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.getSuccessor(1)); 583 584 const auto &TLI = *MF->getSubtarget().getTargetLowering(); 585 586 // If this is a series of conditions that are or'd or and'd together, emit 587 // this as a sequence of branches instead of setcc's with and/or operations. 588 // As long as jumps are not expensive (exceptions for multi-use logic ops, 589 // unpredictable branches, and vector extracts because those jumps are likely 590 // expensive for any target), this should improve performance. 591 // For example, instead of something like: 592 // cmp A, B 593 // C = seteq 594 // cmp D, E 595 // F = setle 596 // or C, F 597 // jnz foo 598 // Emit: 599 // cmp A, B 600 // je foo 601 // cmp D, E 602 // jle foo 603 using namespace PatternMatch; 604 const Instruction *CondI = dyn_cast<Instruction>(CondVal); 605 if (!TLI.isJumpExpensive() && CondI && CondI->hasOneUse() && 606 !BrInst.hasMetadata(LLVMContext::MD_unpredictable)) { 607 Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0; 608 Value *Vec; 609 const Value *BOp0, *BOp1; 610 if (match(CondI, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1)))) 611 Opcode = Instruction::And; 612 else if (match(CondI, m_LogicalOr(m_Value(BOp0), m_Value(BOp1)))) 613 Opcode = Instruction::Or; 614 615 if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) && 616 match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) { 617 findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode, 618 getEdgeProbability(&CurMBB, Succ0MBB), 619 getEdgeProbability(&CurMBB, Succ1MBB), 620 /*InvertCond=*/false); 621 assert(SL->SwitchCases[0].ThisBB == &CurMBB && "Unexpected lowering!"); 622 623 // Allow some cases to be rejected. 624 if (shouldEmitAsBranches(SL->SwitchCases)) { 625 // Emit the branch for this block. 626 emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder); 627 SL->SwitchCases.erase(SL->SwitchCases.begin()); 628 return true; 629 } 630 631 // Okay, we decided not to do this, remove any inserted MBB's and clear 632 // SwitchCases. 633 for (unsigned I = 1, E = SL->SwitchCases.size(); I != E; ++I) 634 MF->erase(SL->SwitchCases[I].ThisBB); 635 636 SL->SwitchCases.clear(); 637 } 638 } 639 640 // Create a CaseBlock record representing this branch. 641 SwitchCG::CaseBlock CB(CmpInst::ICMP_EQ, false, CondVal, 642 ConstantInt::getTrue(MF->getFunction().getContext()), 643 nullptr, Succ0MBB, Succ1MBB, &CurMBB, 644 CurBuilder->getDebugLoc()); 645 646 // Use emitSwitchCase to actually insert the fast branch sequence for this 647 // cond branch. 648 emitSwitchCase(CB, &CurMBB, *CurBuilder); 649 return true; 650 } 651 652 void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src, 653 MachineBasicBlock *Dst, 654 BranchProbability Prob) { 655 if (!FuncInfo.BPI) { 656 Src->addSuccessorWithoutProb(Dst); 657 return; 658 } 659 if (Prob.isUnknown()) 660 Prob = getEdgeProbability(Src, Dst); 661 Src->addSuccessor(Dst, Prob); 662 } 663 664 BranchProbability 665 IRTranslator::getEdgeProbability(const MachineBasicBlock *Src, 666 const MachineBasicBlock *Dst) const { 667 const BasicBlock *SrcBB = Src->getBasicBlock(); 668 const BasicBlock *DstBB = Dst->getBasicBlock(); 669 if (!FuncInfo.BPI) { 670 // If BPI is not available, set the default probability as 1 / N, where N is 671 // the number of successors. 672 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1); 673 return BranchProbability(1, SuccSize); 674 } 675 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB); 676 } 677 678 bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) { 679 using namespace SwitchCG; 680 // Extract cases from the switch. 681 const SwitchInst &SI = cast<SwitchInst>(U); 682 BranchProbabilityInfo *BPI = FuncInfo.BPI; 683 CaseClusterVector Clusters; 684 Clusters.reserve(SI.getNumCases()); 685 for (auto &I : SI.cases()) { 686 MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor()); 687 assert(Succ && "Could not find successor mbb in mapping"); 688 const ConstantInt *CaseVal = I.getCaseValue(); 689 BranchProbability Prob = 690 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex()) 691 : BranchProbability(1, SI.getNumCases() + 1); 692 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob)); 693 } 694 695 MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest()); 696 697 // Cluster adjacent cases with the same destination. We do this at all 698 // optimization levels because it's cheap to do and will make codegen faster 699 // if there are many clusters. 700 sortAndRangeify(Clusters); 701 702 MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent()); 703 704 // If there is only the default destination, jump there directly. 705 if (Clusters.empty()) { 706 SwitchMBB->addSuccessor(DefaultMBB); 707 if (DefaultMBB != SwitchMBB->getNextNode()) 708 MIB.buildBr(*DefaultMBB); 709 return true; 710 } 711 712 SL->findJumpTables(Clusters, &SI, DefaultMBB, nullptr, nullptr); 713 SL->findBitTestClusters(Clusters, &SI); 714 715 LLVM_DEBUG({ 716 dbgs() << "Case clusters: "; 717 for (const CaseCluster &C : Clusters) { 718 if (C.Kind == CC_JumpTable) 719 dbgs() << "JT:"; 720 if (C.Kind == CC_BitTests) 721 dbgs() << "BT:"; 722 723 C.Low->getValue().print(dbgs(), true); 724 if (C.Low != C.High) { 725 dbgs() << '-'; 726 C.High->getValue().print(dbgs(), true); 727 } 728 dbgs() << ' '; 729 } 730 dbgs() << '\n'; 731 }); 732 733 assert(!Clusters.empty()); 734 SwitchWorkList WorkList; 735 CaseClusterIt First = Clusters.begin(); 736 CaseClusterIt Last = Clusters.end() - 1; 737 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB); 738 WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb}); 739 740 // FIXME: At the moment we don't do any splitting optimizations here like 741 // SelectionDAG does, so this worklist only has one entry. 742 while (!WorkList.empty()) { 743 SwitchWorkListItem W = WorkList.back(); 744 WorkList.pop_back(); 745 if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB)) 746 return false; 747 } 748 return true; 749 } 750 751 void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT, 752 MachineBasicBlock *MBB) { 753 // Emit the code for the jump table 754 assert(JT.Reg != -1U && "Should lower JT Header first!"); 755 MachineIRBuilder MIB(*MBB->getParent()); 756 MIB.setMBB(*MBB); 757 MIB.setDebugLoc(CurBuilder->getDebugLoc()); 758 759 Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext()); 760 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL); 761 762 auto Table = MIB.buildJumpTable(PtrTy, JT.JTI); 763 MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg); 764 } 765 766 bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT, 767 SwitchCG::JumpTableHeader &JTH, 768 MachineBasicBlock *HeaderBB) { 769 MachineIRBuilder MIB(*HeaderBB->getParent()); 770 MIB.setMBB(*HeaderBB); 771 MIB.setDebugLoc(CurBuilder->getDebugLoc()); 772 773 const Value &SValue = *JTH.SValue; 774 // Subtract the lowest switch case value from the value being switched on. 775 const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL); 776 Register SwitchOpReg = getOrCreateVReg(SValue); 777 auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First); 778 auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst); 779 780 // This value may be smaller or larger than the target's pointer type, and 781 // therefore require extension or truncating. 782 Type *PtrIRTy = SValue.getType()->getPointerTo(); 783 const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy)); 784 Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub); 785 786 JT.Reg = Sub.getReg(0); 787 788 if (JTH.OmitRangeCheck) { 789 if (JT.MBB != HeaderBB->getNextNode()) 790 MIB.buildBr(*JT.MBB); 791 return true; 792 } 793 794 // Emit the range check for the jump table, and branch to the default block 795 // for the switch statement if the value being switched on exceeds the 796 // largest case in the switch. 797 auto Cst = getOrCreateVReg( 798 *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First)); 799 Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0); 800 auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst); 801 802 auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default); 803 804 // Avoid emitting unnecessary branches to the next block. 805 if (JT.MBB != HeaderBB->getNextNode()) 806 BrCond = MIB.buildBr(*JT.MBB); 807 return true; 808 } 809 810 void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB, 811 MachineBasicBlock *SwitchBB, 812 MachineIRBuilder &MIB) { 813 Register CondLHS = getOrCreateVReg(*CB.CmpLHS); 814 Register Cond; 815 DebugLoc OldDbgLoc = MIB.getDebugLoc(); 816 MIB.setDebugLoc(CB.DbgLoc); 817 MIB.setMBB(*CB.ThisBB); 818 819 if (CB.PredInfo.NoCmp) { 820 // Branch or fall through to TrueBB. 821 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb); 822 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()}, 823 CB.ThisBB); 824 CB.ThisBB->normalizeSuccProbs(); 825 if (CB.TrueBB != CB.ThisBB->getNextNode()) 826 MIB.buildBr(*CB.TrueBB); 827 MIB.setDebugLoc(OldDbgLoc); 828 return; 829 } 830 831 const LLT i1Ty = LLT::scalar(1); 832 // Build the compare. 833 if (!CB.CmpMHS) { 834 const auto *CI = dyn_cast<ConstantInt>(CB.CmpRHS); 835 // For conditional branch lowering, we might try to do something silly like 836 // emit an G_ICMP to compare an existing G_ICMP i1 result with true. If so, 837 // just re-use the existing condition vreg. 838 if (MRI->getType(CondLHS).getSizeInBits() == 1 && CI && 839 CI->getZExtValue() == 1 && CB.PredInfo.Pred == CmpInst::ICMP_EQ) { 840 Cond = CondLHS; 841 } else { 842 Register CondRHS = getOrCreateVReg(*CB.CmpRHS); 843 if (CmpInst::isFPPredicate(CB.PredInfo.Pred)) 844 Cond = 845 MIB.buildFCmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0); 846 else 847 Cond = 848 MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0); 849 } 850 } else { 851 assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE && 852 "Can only handle SLE ranges"); 853 854 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue(); 855 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue(); 856 857 Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS); 858 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) { 859 Register CondRHS = getOrCreateVReg(*CB.CmpRHS); 860 Cond = 861 MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0); 862 } else { 863 const LLT CmpTy = MRI->getType(CmpOpReg); 864 auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS); 865 auto Diff = MIB.buildConstant(CmpTy, High - Low); 866 Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0); 867 } 868 } 869 870 // Update successor info 871 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb); 872 873 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()}, 874 CB.ThisBB); 875 876 // TrueBB and FalseBB are always different unless the incoming IR is 877 // degenerate. This only happens when running llc on weird IR. 878 if (CB.TrueBB != CB.FalseBB) 879 addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb); 880 CB.ThisBB->normalizeSuccProbs(); 881 882 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()}, 883 CB.ThisBB); 884 885 MIB.buildBrCond(Cond, *CB.TrueBB); 886 MIB.buildBr(*CB.FalseBB); 887 MIB.setDebugLoc(OldDbgLoc); 888 } 889 890 bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W, 891 MachineBasicBlock *SwitchMBB, 892 MachineBasicBlock *CurMBB, 893 MachineBasicBlock *DefaultMBB, 894 MachineIRBuilder &MIB, 895 MachineFunction::iterator BBI, 896 BranchProbability UnhandledProbs, 897 SwitchCG::CaseClusterIt I, 898 MachineBasicBlock *Fallthrough, 899 bool FallthroughUnreachable) { 900 using namespace SwitchCG; 901 MachineFunction *CurMF = SwitchMBB->getParent(); 902 // FIXME: Optimize away range check based on pivot comparisons. 903 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first; 904 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second; 905 BranchProbability DefaultProb = W.DefaultProb; 906 907 // The jump block hasn't been inserted yet; insert it here. 908 MachineBasicBlock *JumpMBB = JT->MBB; 909 CurMF->insert(BBI, JumpMBB); 910 911 // Since the jump table block is separate from the switch block, we need 912 // to keep track of it as a machine predecessor to the default block, 913 // otherwise we lose the phi edges. 914 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()}, 915 CurMBB); 916 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()}, 917 JumpMBB); 918 919 auto JumpProb = I->Prob; 920 auto FallthroughProb = UnhandledProbs; 921 922 // If the default statement is a target of the jump table, we evenly 923 // distribute the default probability to successors of CurMBB. Also 924 // update the probability on the edge from JumpMBB to Fallthrough. 925 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(), 926 SE = JumpMBB->succ_end(); 927 SI != SE; ++SI) { 928 if (*SI == DefaultMBB) { 929 JumpProb += DefaultProb / 2; 930 FallthroughProb -= DefaultProb / 2; 931 JumpMBB->setSuccProbability(SI, DefaultProb / 2); 932 JumpMBB->normalizeSuccProbs(); 933 } else { 934 // Also record edges from the jump table block to it's successors. 935 addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()}, 936 JumpMBB); 937 } 938 } 939 940 // Skip the range check if the fallthrough block is unreachable. 941 if (FallthroughUnreachable) 942 JTH->OmitRangeCheck = true; 943 944 if (!JTH->OmitRangeCheck) 945 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb); 946 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb); 947 CurMBB->normalizeSuccProbs(); 948 949 // The jump table header will be inserted in our current block, do the 950 // range check, and fall through to our fallthrough block. 951 JTH->HeaderBB = CurMBB; 952 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader. 953 954 // If we're in the right place, emit the jump table header right now. 955 if (CurMBB == SwitchMBB) { 956 if (!emitJumpTableHeader(*JT, *JTH, CurMBB)) 957 return false; 958 JTH->Emitted = true; 959 } 960 return true; 961 } 962 bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, 963 Value *Cond, 964 MachineBasicBlock *Fallthrough, 965 bool FallthroughUnreachable, 966 BranchProbability UnhandledProbs, 967 MachineBasicBlock *CurMBB, 968 MachineIRBuilder &MIB, 969 MachineBasicBlock *SwitchMBB) { 970 using namespace SwitchCG; 971 const Value *RHS, *LHS, *MHS; 972 CmpInst::Predicate Pred; 973 if (I->Low == I->High) { 974 // Check Cond == I->Low. 975 Pred = CmpInst::ICMP_EQ; 976 LHS = Cond; 977 RHS = I->Low; 978 MHS = nullptr; 979 } else { 980 // Check I->Low <= Cond <= I->High. 981 Pred = CmpInst::ICMP_SLE; 982 LHS = I->Low; 983 MHS = Cond; 984 RHS = I->High; 985 } 986 987 // If Fallthrough is unreachable, fold away the comparison. 988 // The false probability is the sum of all unhandled cases. 989 CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough, 990 CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs); 991 992 emitSwitchCase(CB, SwitchMBB, MIB); 993 return true; 994 } 995 996 void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B, 997 MachineBasicBlock *SwitchBB) { 998 MachineIRBuilder &MIB = *CurBuilder; 999 MIB.setMBB(*SwitchBB); 1000 1001 // Subtract the minimum value. 1002 Register SwitchOpReg = getOrCreateVReg(*B.SValue); 1003 1004 LLT SwitchOpTy = MRI->getType(SwitchOpReg); 1005 Register MinValReg = MIB.buildConstant(SwitchOpTy, B.First).getReg(0); 1006 auto RangeSub = MIB.buildSub(SwitchOpTy, SwitchOpReg, MinValReg); 1007 1008 // Ensure that the type will fit the mask value. 1009 LLT MaskTy = SwitchOpTy; 1010 for (unsigned I = 0, E = B.Cases.size(); I != E; ++I) { 1011 if (!isUIntN(SwitchOpTy.getSizeInBits(), B.Cases[I].Mask)) { 1012 // Switch table case range are encoded into series of masks. 1013 // Just use pointer type, it's guaranteed to fit. 1014 MaskTy = LLT::scalar(64); 1015 break; 1016 } 1017 } 1018 Register SubReg = RangeSub.getReg(0); 1019 if (SwitchOpTy != MaskTy) 1020 SubReg = MIB.buildZExtOrTrunc(MaskTy, SubReg).getReg(0); 1021 1022 B.RegVT = getMVTForLLT(MaskTy); 1023 B.Reg = SubReg; 1024 1025 MachineBasicBlock *MBB = B.Cases[0].ThisBB; 1026 1027 if (!B.OmitRangeCheck) 1028 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb); 1029 addSuccessorWithProb(SwitchBB, MBB, B.Prob); 1030 1031 SwitchBB->normalizeSuccProbs(); 1032 1033 if (!B.OmitRangeCheck) { 1034 // Conditional branch to the default block. 1035 auto RangeCst = MIB.buildConstant(SwitchOpTy, B.Range); 1036 auto RangeCmp = MIB.buildICmp(CmpInst::Predicate::ICMP_UGT, LLT::scalar(1), 1037 RangeSub, RangeCst); 1038 MIB.buildBrCond(RangeCmp, *B.Default); 1039 } 1040 1041 // Avoid emitting unnecessary branches to the next block. 1042 if (MBB != SwitchBB->getNextNode()) 1043 MIB.buildBr(*MBB); 1044 } 1045 1046 void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock &BB, 1047 MachineBasicBlock *NextMBB, 1048 BranchProbability BranchProbToNext, 1049 Register Reg, SwitchCG::BitTestCase &B, 1050 MachineBasicBlock *SwitchBB) { 1051 MachineIRBuilder &MIB = *CurBuilder; 1052 MIB.setMBB(*SwitchBB); 1053 1054 LLT SwitchTy = getLLTForMVT(BB.RegVT); 1055 Register Cmp; 1056 unsigned PopCount = countPopulation(B.Mask); 1057 if (PopCount == 1) { 1058 // Testing for a single bit; just compare the shift count with what it 1059 // would need to be to shift a 1 bit in that position. 1060 auto MaskTrailingZeros = 1061 MIB.buildConstant(SwitchTy, countTrailingZeros(B.Mask)); 1062 Cmp = 1063 MIB.buildICmp(ICmpInst::ICMP_EQ, LLT::scalar(1), Reg, MaskTrailingZeros) 1064 .getReg(0); 1065 } else if (PopCount == BB.Range) { 1066 // There is only one zero bit in the range, test for it directly. 1067 auto MaskTrailingOnes = 1068 MIB.buildConstant(SwitchTy, countTrailingOnes(B.Mask)); 1069 Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Reg, MaskTrailingOnes) 1070 .getReg(0); 1071 } else { 1072 // Make desired shift. 1073 auto CstOne = MIB.buildConstant(SwitchTy, 1); 1074 auto SwitchVal = MIB.buildShl(SwitchTy, CstOne, Reg); 1075 1076 // Emit bit tests and jumps. 1077 auto CstMask = MIB.buildConstant(SwitchTy, B.Mask); 1078 auto AndOp = MIB.buildAnd(SwitchTy, SwitchVal, CstMask); 1079 auto CstZero = MIB.buildConstant(SwitchTy, 0); 1080 Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), AndOp, CstZero) 1081 .getReg(0); 1082 } 1083 1084 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb. 1085 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb); 1086 // The branch probability from SwitchBB to NextMBB is BranchProbToNext. 1087 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext); 1088 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is 1089 // one as they are relative probabilities (and thus work more like weights), 1090 // and hence we need to normalize them to let the sum of them become one. 1091 SwitchBB->normalizeSuccProbs(); 1092 1093 // Record the fact that the IR edge from the header to the bit test target 1094 // will go through our new block. Neeeded for PHIs to have nodes added. 1095 addMachineCFGPred({BB.Parent->getBasicBlock(), B.TargetBB->getBasicBlock()}, 1096 SwitchBB); 1097 1098 MIB.buildBrCond(Cmp, *B.TargetBB); 1099 1100 // Avoid emitting unnecessary branches to the next block. 1101 if (NextMBB != SwitchBB->getNextNode()) 1102 MIB.buildBr(*NextMBB); 1103 } 1104 1105 bool IRTranslator::lowerBitTestWorkItem( 1106 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB, 1107 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB, 1108 MachineIRBuilder &MIB, MachineFunction::iterator BBI, 1109 BranchProbability DefaultProb, BranchProbability UnhandledProbs, 1110 SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough, 1111 bool FallthroughUnreachable) { 1112 using namespace SwitchCG; 1113 MachineFunction *CurMF = SwitchMBB->getParent(); 1114 // FIXME: Optimize away range check based on pivot comparisons. 1115 BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex]; 1116 // The bit test blocks haven't been inserted yet; insert them here. 1117 for (BitTestCase &BTC : BTB->Cases) 1118 CurMF->insert(BBI, BTC.ThisBB); 1119 1120 // Fill in fields of the BitTestBlock. 1121 BTB->Parent = CurMBB; 1122 BTB->Default = Fallthrough; 1123 1124 BTB->DefaultProb = UnhandledProbs; 1125 // If the cases in bit test don't form a contiguous range, we evenly 1126 // distribute the probability on the edge to Fallthrough to two 1127 // successors of CurMBB. 1128 if (!BTB->ContiguousRange) { 1129 BTB->Prob += DefaultProb / 2; 1130 BTB->DefaultProb -= DefaultProb / 2; 1131 } 1132 1133 if (FallthroughUnreachable) { 1134 // Skip the range check if the fallthrough block is unreachable. 1135 BTB->OmitRangeCheck = true; 1136 } 1137 1138 // If we're in the right place, emit the bit test header right now. 1139 if (CurMBB == SwitchMBB) { 1140 emitBitTestHeader(*BTB, SwitchMBB); 1141 BTB->Emitted = true; 1142 } 1143 return true; 1144 } 1145 1146 bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, 1147 Value *Cond, 1148 MachineBasicBlock *SwitchMBB, 1149 MachineBasicBlock *DefaultMBB, 1150 MachineIRBuilder &MIB) { 1151 using namespace SwitchCG; 1152 MachineFunction *CurMF = FuncInfo.MF; 1153 MachineBasicBlock *NextMBB = nullptr; 1154 MachineFunction::iterator BBI(W.MBB); 1155 if (++BBI != FuncInfo.MF->end()) 1156 NextMBB = &*BBI; 1157 1158 if (EnableOpts) { 1159 // Here, we order cases by probability so the most likely case will be 1160 // checked first. However, two clusters can have the same probability in 1161 // which case their relative ordering is non-deterministic. So we use Low 1162 // as a tie-breaker as clusters are guaranteed to never overlap. 1163 llvm::sort(W.FirstCluster, W.LastCluster + 1, 1164 [](const CaseCluster &a, const CaseCluster &b) { 1165 return a.Prob != b.Prob 1166 ? a.Prob > b.Prob 1167 : a.Low->getValue().slt(b.Low->getValue()); 1168 }); 1169 1170 // Rearrange the case blocks so that the last one falls through if possible 1171 // without changing the order of probabilities. 1172 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) { 1173 --I; 1174 if (I->Prob > W.LastCluster->Prob) 1175 break; 1176 if (I->Kind == CC_Range && I->MBB == NextMBB) { 1177 std::swap(*I, *W.LastCluster); 1178 break; 1179 } 1180 } 1181 } 1182 1183 // Compute total probability. 1184 BranchProbability DefaultProb = W.DefaultProb; 1185 BranchProbability UnhandledProbs = DefaultProb; 1186 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I) 1187 UnhandledProbs += I->Prob; 1188 1189 MachineBasicBlock *CurMBB = W.MBB; 1190 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) { 1191 bool FallthroughUnreachable = false; 1192 MachineBasicBlock *Fallthrough; 1193 if (I == W.LastCluster) { 1194 // For the last cluster, fall through to the default destination. 1195 Fallthrough = DefaultMBB; 1196 FallthroughUnreachable = isa<UnreachableInst>( 1197 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg()); 1198 } else { 1199 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock()); 1200 CurMF->insert(BBI, Fallthrough); 1201 } 1202 UnhandledProbs -= I->Prob; 1203 1204 switch (I->Kind) { 1205 case CC_BitTests: { 1206 if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI, 1207 DefaultProb, UnhandledProbs, I, Fallthrough, 1208 FallthroughUnreachable)) { 1209 LLVM_DEBUG(dbgs() << "Failed to lower bit test for switch"); 1210 return false; 1211 } 1212 break; 1213 } 1214 1215 case CC_JumpTable: { 1216 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI, 1217 UnhandledProbs, I, Fallthrough, 1218 FallthroughUnreachable)) { 1219 LLVM_DEBUG(dbgs() << "Failed to lower jump table"); 1220 return false; 1221 } 1222 break; 1223 } 1224 case CC_Range: { 1225 if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough, 1226 FallthroughUnreachable, UnhandledProbs, 1227 CurMBB, MIB, SwitchMBB)) { 1228 LLVM_DEBUG(dbgs() << "Failed to lower switch range"); 1229 return false; 1230 } 1231 break; 1232 } 1233 } 1234 CurMBB = Fallthrough; 1235 } 1236 1237 return true; 1238 } 1239 1240 bool IRTranslator::translateIndirectBr(const User &U, 1241 MachineIRBuilder &MIRBuilder) { 1242 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U); 1243 1244 const Register Tgt = getOrCreateVReg(*BrInst.getAddress()); 1245 MIRBuilder.buildBrIndirect(Tgt); 1246 1247 // Link successors. 1248 SmallPtrSet<const BasicBlock *, 32> AddedSuccessors; 1249 MachineBasicBlock &CurBB = MIRBuilder.getMBB(); 1250 for (const BasicBlock *Succ : successors(&BrInst)) { 1251 // It's legal for indirectbr instructions to have duplicate blocks in the 1252 // destination list. We don't allow this in MIR. Skip anything that's 1253 // already a successor. 1254 if (!AddedSuccessors.insert(Succ).second) 1255 continue; 1256 CurBB.addSuccessor(&getMBB(*Succ)); 1257 } 1258 1259 return true; 1260 } 1261 1262 static bool isSwiftError(const Value *V) { 1263 if (auto Arg = dyn_cast<Argument>(V)) 1264 return Arg->hasSwiftErrorAttr(); 1265 if (auto AI = dyn_cast<AllocaInst>(V)) 1266 return AI->isSwiftError(); 1267 return false; 1268 } 1269 1270 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { 1271 const LoadInst &LI = cast<LoadInst>(U); 1272 if (DL->getTypeStoreSize(LI.getType()) == 0) 1273 return true; 1274 1275 ArrayRef<Register> Regs = getOrCreateVRegs(LI); 1276 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI); 1277 Register Base = getOrCreateVReg(*LI.getPointerOperand()); 1278 1279 Type *OffsetIRTy = DL->getIntPtrType(LI.getPointerOperandType()); 1280 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); 1281 1282 if (CLI->supportSwiftError() && isSwiftError(LI.getPointerOperand())) { 1283 assert(Regs.size() == 1 && "swifterror should be single pointer"); 1284 Register VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(), 1285 LI.getPointerOperand()); 1286 MIRBuilder.buildCopy(Regs[0], VReg); 1287 return true; 1288 } 1289 1290 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1291 MachineMemOperand::Flags Flags = TLI.getLoadMemOperandFlags(LI, *DL); 1292 1293 const MDNode *Ranges = 1294 Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr; 1295 for (unsigned i = 0; i < Regs.size(); ++i) { 1296 Register Addr; 1297 MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8); 1298 1299 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8); 1300 Align BaseAlign = getMemOpAlign(LI); 1301 AAMDNodes AAMetadata; 1302 LI.getAAMetadata(AAMetadata); 1303 auto MMO = MF->getMachineMemOperand( 1304 Ptr, Flags, MRI->getType(Regs[i]), 1305 commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, Ranges, 1306 LI.getSyncScopeID(), LI.getOrdering()); 1307 MIRBuilder.buildLoad(Regs[i], Addr, *MMO); 1308 } 1309 1310 return true; 1311 } 1312 1313 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) { 1314 const StoreInst &SI = cast<StoreInst>(U); 1315 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0) 1316 return true; 1317 1318 ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand()); 1319 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand()); 1320 Register Base = getOrCreateVReg(*SI.getPointerOperand()); 1321 1322 Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType()); 1323 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); 1324 1325 if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) { 1326 assert(Vals.size() == 1 && "swifterror should be single pointer"); 1327 1328 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(), 1329 SI.getPointerOperand()); 1330 MIRBuilder.buildCopy(VReg, Vals[0]); 1331 return true; 1332 } 1333 1334 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1335 MachineMemOperand::Flags Flags = TLI.getStoreMemOperandFlags(SI, *DL); 1336 1337 for (unsigned i = 0; i < Vals.size(); ++i) { 1338 Register Addr; 1339 MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8); 1340 1341 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8); 1342 Align BaseAlign = getMemOpAlign(SI); 1343 AAMDNodes AAMetadata; 1344 SI.getAAMetadata(AAMetadata); 1345 auto MMO = MF->getMachineMemOperand( 1346 Ptr, Flags, MRI->getType(Vals[i]), 1347 commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, nullptr, 1348 SI.getSyncScopeID(), SI.getOrdering()); 1349 MIRBuilder.buildStore(Vals[i], Addr, *MMO); 1350 } 1351 return true; 1352 } 1353 1354 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) { 1355 const Value *Src = U.getOperand(0); 1356 Type *Int32Ty = Type::getInt32Ty(U.getContext()); 1357 1358 // getIndexedOffsetInType is designed for GEPs, so the first index is the 1359 // usual array element rather than looking into the actual aggregate. 1360 SmallVector<Value *, 1> Indices; 1361 Indices.push_back(ConstantInt::get(Int32Ty, 0)); 1362 1363 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) { 1364 for (auto Idx : EVI->indices()) 1365 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 1366 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) { 1367 for (auto Idx : IVI->indices()) 1368 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 1369 } else { 1370 for (unsigned i = 1; i < U.getNumOperands(); ++i) 1371 Indices.push_back(U.getOperand(i)); 1372 } 1373 1374 return 8 * static_cast<uint64_t>( 1375 DL.getIndexedOffsetInType(Src->getType(), Indices)); 1376 } 1377 1378 bool IRTranslator::translateExtractValue(const User &U, 1379 MachineIRBuilder &MIRBuilder) { 1380 const Value *Src = U.getOperand(0); 1381 uint64_t Offset = getOffsetFromIndices(U, *DL); 1382 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src); 1383 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src); 1384 unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin(); 1385 auto &DstRegs = allocateVRegs(U); 1386 1387 for (unsigned i = 0; i < DstRegs.size(); ++i) 1388 DstRegs[i] = SrcRegs[Idx++]; 1389 1390 return true; 1391 } 1392 1393 bool IRTranslator::translateInsertValue(const User &U, 1394 MachineIRBuilder &MIRBuilder) { 1395 const Value *Src = U.getOperand(0); 1396 uint64_t Offset = getOffsetFromIndices(U, *DL); 1397 auto &DstRegs = allocateVRegs(U); 1398 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U); 1399 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src); 1400 ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1)); 1401 auto InsertedIt = InsertedRegs.begin(); 1402 1403 for (unsigned i = 0; i < DstRegs.size(); ++i) { 1404 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end()) 1405 DstRegs[i] = *InsertedIt++; 1406 else 1407 DstRegs[i] = SrcRegs[i]; 1408 } 1409 1410 return true; 1411 } 1412 1413 bool IRTranslator::translateSelect(const User &U, 1414 MachineIRBuilder &MIRBuilder) { 1415 Register Tst = getOrCreateVReg(*U.getOperand(0)); 1416 ArrayRef<Register> ResRegs = getOrCreateVRegs(U); 1417 ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1)); 1418 ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2)); 1419 1420 uint16_t Flags = 0; 1421 if (const SelectInst *SI = dyn_cast<SelectInst>(&U)) 1422 Flags = MachineInstr::copyFlagsFromInstruction(*SI); 1423 1424 for (unsigned i = 0; i < ResRegs.size(); ++i) { 1425 MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags); 1426 } 1427 1428 return true; 1429 } 1430 1431 bool IRTranslator::translateCopy(const User &U, const Value &V, 1432 MachineIRBuilder &MIRBuilder) { 1433 Register Src = getOrCreateVReg(V); 1434 auto &Regs = *VMap.getVRegs(U); 1435 if (Regs.empty()) { 1436 Regs.push_back(Src); 1437 VMap.getOffsets(U)->push_back(0); 1438 } else { 1439 // If we already assigned a vreg for this instruction, we can't change that. 1440 // Emit a copy to satisfy the users we already emitted. 1441 MIRBuilder.buildCopy(Regs[0], Src); 1442 } 1443 return true; 1444 } 1445 1446 bool IRTranslator::translateBitCast(const User &U, 1447 MachineIRBuilder &MIRBuilder) { 1448 // If we're bitcasting to the source type, we can reuse the source vreg. 1449 if (getLLTForType(*U.getOperand(0)->getType(), *DL) == 1450 getLLTForType(*U.getType(), *DL)) 1451 return translateCopy(U, *U.getOperand(0), MIRBuilder); 1452 1453 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder); 1454 } 1455 1456 bool IRTranslator::translateCast(unsigned Opcode, const User &U, 1457 MachineIRBuilder &MIRBuilder) { 1458 Register Op = getOrCreateVReg(*U.getOperand(0)); 1459 Register Res = getOrCreateVReg(U); 1460 MIRBuilder.buildInstr(Opcode, {Res}, {Op}); 1461 return true; 1462 } 1463 1464 bool IRTranslator::translateGetElementPtr(const User &U, 1465 MachineIRBuilder &MIRBuilder) { 1466 Value &Op0 = *U.getOperand(0); 1467 Register BaseReg = getOrCreateVReg(Op0); 1468 Type *PtrIRTy = Op0.getType(); 1469 LLT PtrTy = getLLTForType(*PtrIRTy, *DL); 1470 Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy); 1471 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); 1472 1473 // Normalize Vector GEP - all scalar operands should be converted to the 1474 // splat vector. 1475 unsigned VectorWidth = 0; 1476 1477 // True if we should use a splat vector; using VectorWidth alone is not 1478 // sufficient. 1479 bool WantSplatVector = false; 1480 if (auto *VT = dyn_cast<VectorType>(U.getType())) { 1481 VectorWidth = cast<FixedVectorType>(VT)->getNumElements(); 1482 // We don't produce 1 x N vectors; those are treated as scalars. 1483 WantSplatVector = VectorWidth > 1; 1484 } 1485 1486 // We might need to splat the base pointer into a vector if the offsets 1487 // are vectors. 1488 if (WantSplatVector && !PtrTy.isVector()) { 1489 BaseReg = 1490 MIRBuilder 1491 .buildSplatVector(LLT::fixed_vector(VectorWidth, PtrTy), BaseReg) 1492 .getReg(0); 1493 PtrIRTy = FixedVectorType::get(PtrIRTy, VectorWidth); 1494 PtrTy = getLLTForType(*PtrIRTy, *DL); 1495 OffsetIRTy = DL->getIntPtrType(PtrIRTy); 1496 OffsetTy = getLLTForType(*OffsetIRTy, *DL); 1497 } 1498 1499 int64_t Offset = 0; 1500 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U); 1501 GTI != E; ++GTI) { 1502 const Value *Idx = GTI.getOperand(); 1503 if (StructType *StTy = GTI.getStructTypeOrNull()) { 1504 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue(); 1505 Offset += DL->getStructLayout(StTy)->getElementOffset(Field); 1506 continue; 1507 } else { 1508 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType()); 1509 1510 // If this is a scalar constant or a splat vector of constants, 1511 // handle it quickly. 1512 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) { 1513 Offset += ElementSize * CI->getSExtValue(); 1514 continue; 1515 } 1516 1517 if (Offset != 0) { 1518 auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset); 1519 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0)) 1520 .getReg(0); 1521 Offset = 0; 1522 } 1523 1524 Register IdxReg = getOrCreateVReg(*Idx); 1525 LLT IdxTy = MRI->getType(IdxReg); 1526 if (IdxTy != OffsetTy) { 1527 if (!IdxTy.isVector() && WantSplatVector) { 1528 IdxReg = MIRBuilder.buildSplatVector( 1529 OffsetTy.changeElementType(IdxTy), IdxReg).getReg(0); 1530 } 1531 1532 IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0); 1533 } 1534 1535 // N = N + Idx * ElementSize; 1536 // Avoid doing it for ElementSize of 1. 1537 Register GepOffsetReg; 1538 if (ElementSize != 1) { 1539 auto ElementSizeMIB = MIRBuilder.buildConstant( 1540 getLLTForType(*OffsetIRTy, *DL), ElementSize); 1541 GepOffsetReg = 1542 MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB).getReg(0); 1543 } else 1544 GepOffsetReg = IdxReg; 1545 1546 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0); 1547 } 1548 } 1549 1550 if (Offset != 0) { 1551 auto OffsetMIB = 1552 MIRBuilder.buildConstant(OffsetTy, Offset); 1553 MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0)); 1554 return true; 1555 } 1556 1557 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg); 1558 return true; 1559 } 1560 1561 bool IRTranslator::translateMemFunc(const CallInst &CI, 1562 MachineIRBuilder &MIRBuilder, 1563 unsigned Opcode) { 1564 1565 // If the source is undef, then just emit a nop. 1566 if (isa<UndefValue>(CI.getArgOperand(1))) 1567 return true; 1568 1569 SmallVector<Register, 3> SrcRegs; 1570 1571 unsigned MinPtrSize = UINT_MAX; 1572 for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI) { 1573 Register SrcReg = getOrCreateVReg(**AI); 1574 LLT SrcTy = MRI->getType(SrcReg); 1575 if (SrcTy.isPointer()) 1576 MinPtrSize = std::min<unsigned>(SrcTy.getSizeInBits(), MinPtrSize); 1577 SrcRegs.push_back(SrcReg); 1578 } 1579 1580 LLT SizeTy = LLT::scalar(MinPtrSize); 1581 1582 // The size operand should be the minimum of the pointer sizes. 1583 Register &SizeOpReg = SrcRegs[SrcRegs.size() - 1]; 1584 if (MRI->getType(SizeOpReg) != SizeTy) 1585 SizeOpReg = MIRBuilder.buildZExtOrTrunc(SizeTy, SizeOpReg).getReg(0); 1586 1587 auto ICall = MIRBuilder.buildInstr(Opcode); 1588 for (Register SrcReg : SrcRegs) 1589 ICall.addUse(SrcReg); 1590 1591 Align DstAlign; 1592 Align SrcAlign; 1593 unsigned IsVol = 1594 cast<ConstantInt>(CI.getArgOperand(CI.getNumArgOperands() - 1)) 1595 ->getZExtValue(); 1596 1597 if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) { 1598 DstAlign = MCI->getDestAlign().valueOrOne(); 1599 SrcAlign = MCI->getSourceAlign().valueOrOne(); 1600 } else if (auto *MCI = dyn_cast<MemCpyInlineInst>(&CI)) { 1601 DstAlign = MCI->getDestAlign().valueOrOne(); 1602 SrcAlign = MCI->getSourceAlign().valueOrOne(); 1603 } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) { 1604 DstAlign = MMI->getDestAlign().valueOrOne(); 1605 SrcAlign = MMI->getSourceAlign().valueOrOne(); 1606 } else { 1607 auto *MSI = cast<MemSetInst>(&CI); 1608 DstAlign = MSI->getDestAlign().valueOrOne(); 1609 } 1610 1611 if (Opcode != TargetOpcode::G_MEMCPY_INLINE) { 1612 // We need to propagate the tail call flag from the IR inst as an argument. 1613 // Otherwise, we have to pessimize and assume later that we cannot tail call 1614 // any memory intrinsics. 1615 ICall.addImm(CI.isTailCall() ? 1 : 0); 1616 } 1617 1618 // Create mem operands to store the alignment and volatile info. 1619 auto VolFlag = IsVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 1620 ICall.addMemOperand(MF->getMachineMemOperand( 1621 MachinePointerInfo(CI.getArgOperand(0)), 1622 MachineMemOperand::MOStore | VolFlag, 1, DstAlign)); 1623 if (Opcode != TargetOpcode::G_MEMSET) 1624 ICall.addMemOperand(MF->getMachineMemOperand( 1625 MachinePointerInfo(CI.getArgOperand(1)), 1626 MachineMemOperand::MOLoad | VolFlag, 1, SrcAlign)); 1627 1628 return true; 1629 } 1630 1631 void IRTranslator::getStackGuard(Register DstReg, 1632 MachineIRBuilder &MIRBuilder) { 1633 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 1634 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF)); 1635 auto MIB = 1636 MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {}); 1637 1638 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1639 Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent()); 1640 if (!Global) 1641 return; 1642 1643 unsigned AddrSpace = Global->getType()->getPointerAddressSpace(); 1644 LLT PtrTy = LLT::pointer(AddrSpace, DL->getPointerSizeInBits(AddrSpace)); 1645 1646 MachinePointerInfo MPInfo(Global); 1647 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 1648 MachineMemOperand::MODereferenceable; 1649 MachineMemOperand *MemRef = MF->getMachineMemOperand( 1650 MPInfo, Flags, PtrTy, DL->getPointerABIAlignment(AddrSpace)); 1651 MIB.setMemRefs({MemRef}); 1652 } 1653 1654 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op, 1655 MachineIRBuilder &MIRBuilder) { 1656 ArrayRef<Register> ResRegs = getOrCreateVRegs(CI); 1657 MIRBuilder.buildInstr( 1658 Op, {ResRegs[0], ResRegs[1]}, 1659 {getOrCreateVReg(*CI.getOperand(0)), getOrCreateVReg(*CI.getOperand(1))}); 1660 1661 return true; 1662 } 1663 1664 bool IRTranslator::translateFixedPointIntrinsic(unsigned Op, const CallInst &CI, 1665 MachineIRBuilder &MIRBuilder) { 1666 Register Dst = getOrCreateVReg(CI); 1667 Register Src0 = getOrCreateVReg(*CI.getOperand(0)); 1668 Register Src1 = getOrCreateVReg(*CI.getOperand(1)); 1669 uint64_t Scale = cast<ConstantInt>(CI.getOperand(2))->getZExtValue(); 1670 MIRBuilder.buildInstr(Op, {Dst}, { Src0, Src1, Scale }); 1671 return true; 1672 } 1673 1674 unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) { 1675 switch (ID) { 1676 default: 1677 break; 1678 case Intrinsic::bswap: 1679 return TargetOpcode::G_BSWAP; 1680 case Intrinsic::bitreverse: 1681 return TargetOpcode::G_BITREVERSE; 1682 case Intrinsic::fshl: 1683 return TargetOpcode::G_FSHL; 1684 case Intrinsic::fshr: 1685 return TargetOpcode::G_FSHR; 1686 case Intrinsic::ceil: 1687 return TargetOpcode::G_FCEIL; 1688 case Intrinsic::cos: 1689 return TargetOpcode::G_FCOS; 1690 case Intrinsic::ctpop: 1691 return TargetOpcode::G_CTPOP; 1692 case Intrinsic::exp: 1693 return TargetOpcode::G_FEXP; 1694 case Intrinsic::exp2: 1695 return TargetOpcode::G_FEXP2; 1696 case Intrinsic::fabs: 1697 return TargetOpcode::G_FABS; 1698 case Intrinsic::copysign: 1699 return TargetOpcode::G_FCOPYSIGN; 1700 case Intrinsic::minnum: 1701 return TargetOpcode::G_FMINNUM; 1702 case Intrinsic::maxnum: 1703 return TargetOpcode::G_FMAXNUM; 1704 case Intrinsic::minimum: 1705 return TargetOpcode::G_FMINIMUM; 1706 case Intrinsic::maximum: 1707 return TargetOpcode::G_FMAXIMUM; 1708 case Intrinsic::canonicalize: 1709 return TargetOpcode::G_FCANONICALIZE; 1710 case Intrinsic::floor: 1711 return TargetOpcode::G_FFLOOR; 1712 case Intrinsic::fma: 1713 return TargetOpcode::G_FMA; 1714 case Intrinsic::log: 1715 return TargetOpcode::G_FLOG; 1716 case Intrinsic::log2: 1717 return TargetOpcode::G_FLOG2; 1718 case Intrinsic::log10: 1719 return TargetOpcode::G_FLOG10; 1720 case Intrinsic::nearbyint: 1721 return TargetOpcode::G_FNEARBYINT; 1722 case Intrinsic::pow: 1723 return TargetOpcode::G_FPOW; 1724 case Intrinsic::powi: 1725 return TargetOpcode::G_FPOWI; 1726 case Intrinsic::rint: 1727 return TargetOpcode::G_FRINT; 1728 case Intrinsic::round: 1729 return TargetOpcode::G_INTRINSIC_ROUND; 1730 case Intrinsic::roundeven: 1731 return TargetOpcode::G_INTRINSIC_ROUNDEVEN; 1732 case Intrinsic::sin: 1733 return TargetOpcode::G_FSIN; 1734 case Intrinsic::sqrt: 1735 return TargetOpcode::G_FSQRT; 1736 case Intrinsic::trunc: 1737 return TargetOpcode::G_INTRINSIC_TRUNC; 1738 case Intrinsic::readcyclecounter: 1739 return TargetOpcode::G_READCYCLECOUNTER; 1740 case Intrinsic::ptrmask: 1741 return TargetOpcode::G_PTRMASK; 1742 case Intrinsic::lrint: 1743 return TargetOpcode::G_INTRINSIC_LRINT; 1744 // FADD/FMUL require checking the FMF, so are handled elsewhere. 1745 case Intrinsic::vector_reduce_fmin: 1746 return TargetOpcode::G_VECREDUCE_FMIN; 1747 case Intrinsic::vector_reduce_fmax: 1748 return TargetOpcode::G_VECREDUCE_FMAX; 1749 case Intrinsic::vector_reduce_add: 1750 return TargetOpcode::G_VECREDUCE_ADD; 1751 case Intrinsic::vector_reduce_mul: 1752 return TargetOpcode::G_VECREDUCE_MUL; 1753 case Intrinsic::vector_reduce_and: 1754 return TargetOpcode::G_VECREDUCE_AND; 1755 case Intrinsic::vector_reduce_or: 1756 return TargetOpcode::G_VECREDUCE_OR; 1757 case Intrinsic::vector_reduce_xor: 1758 return TargetOpcode::G_VECREDUCE_XOR; 1759 case Intrinsic::vector_reduce_smax: 1760 return TargetOpcode::G_VECREDUCE_SMAX; 1761 case Intrinsic::vector_reduce_smin: 1762 return TargetOpcode::G_VECREDUCE_SMIN; 1763 case Intrinsic::vector_reduce_umax: 1764 return TargetOpcode::G_VECREDUCE_UMAX; 1765 case Intrinsic::vector_reduce_umin: 1766 return TargetOpcode::G_VECREDUCE_UMIN; 1767 case Intrinsic::lround: 1768 return TargetOpcode::G_LROUND; 1769 case Intrinsic::llround: 1770 return TargetOpcode::G_LLROUND; 1771 } 1772 return Intrinsic::not_intrinsic; 1773 } 1774 1775 bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI, 1776 Intrinsic::ID ID, 1777 MachineIRBuilder &MIRBuilder) { 1778 1779 unsigned Op = getSimpleIntrinsicOpcode(ID); 1780 1781 // Is this a simple intrinsic? 1782 if (Op == Intrinsic::not_intrinsic) 1783 return false; 1784 1785 // Yes. Let's translate it. 1786 SmallVector<llvm::SrcOp, 4> VRegs; 1787 for (auto &Arg : CI.arg_operands()) 1788 VRegs.push_back(getOrCreateVReg(*Arg)); 1789 1790 MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs, 1791 MachineInstr::copyFlagsFromInstruction(CI)); 1792 return true; 1793 } 1794 1795 // TODO: Include ConstainedOps.def when all strict instructions are defined. 1796 static unsigned getConstrainedOpcode(Intrinsic::ID ID) { 1797 switch (ID) { 1798 case Intrinsic::experimental_constrained_fadd: 1799 return TargetOpcode::G_STRICT_FADD; 1800 case Intrinsic::experimental_constrained_fsub: 1801 return TargetOpcode::G_STRICT_FSUB; 1802 case Intrinsic::experimental_constrained_fmul: 1803 return TargetOpcode::G_STRICT_FMUL; 1804 case Intrinsic::experimental_constrained_fdiv: 1805 return TargetOpcode::G_STRICT_FDIV; 1806 case Intrinsic::experimental_constrained_frem: 1807 return TargetOpcode::G_STRICT_FREM; 1808 case Intrinsic::experimental_constrained_fma: 1809 return TargetOpcode::G_STRICT_FMA; 1810 case Intrinsic::experimental_constrained_sqrt: 1811 return TargetOpcode::G_STRICT_FSQRT; 1812 default: 1813 return 0; 1814 } 1815 } 1816 1817 bool IRTranslator::translateConstrainedFPIntrinsic( 1818 const ConstrainedFPIntrinsic &FPI, MachineIRBuilder &MIRBuilder) { 1819 fp::ExceptionBehavior EB = FPI.getExceptionBehavior().getValue(); 1820 1821 unsigned Opcode = getConstrainedOpcode(FPI.getIntrinsicID()); 1822 if (!Opcode) 1823 return false; 1824 1825 unsigned Flags = MachineInstr::copyFlagsFromInstruction(FPI); 1826 if (EB == fp::ExceptionBehavior::ebIgnore) 1827 Flags |= MachineInstr::NoFPExcept; 1828 1829 SmallVector<llvm::SrcOp, 4> VRegs; 1830 VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(0))); 1831 if (!FPI.isUnaryOp()) 1832 VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(1))); 1833 if (FPI.isTernaryOp()) 1834 VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(2))); 1835 1836 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(FPI)}, VRegs, Flags); 1837 return true; 1838 } 1839 1840 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, 1841 MachineIRBuilder &MIRBuilder) { 1842 if (auto *MI = dyn_cast<AnyMemIntrinsic>(&CI)) { 1843 if (ORE->enabled()) { 1844 const Function &F = *MI->getParent()->getParent(); 1845 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 1846 if (MemoryOpRemark::canHandle(MI, TLI)) { 1847 MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, TLI); 1848 R.visit(MI); 1849 } 1850 } 1851 } 1852 1853 // If this is a simple intrinsic (that is, we just need to add a def of 1854 // a vreg, and uses for each arg operand, then translate it. 1855 if (translateSimpleIntrinsic(CI, ID, MIRBuilder)) 1856 return true; 1857 1858 switch (ID) { 1859 default: 1860 break; 1861 case Intrinsic::lifetime_start: 1862 case Intrinsic::lifetime_end: { 1863 // No stack colouring in O0, discard region information. 1864 if (MF->getTarget().getOptLevel() == CodeGenOpt::None) 1865 return true; 1866 1867 unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START 1868 : TargetOpcode::LIFETIME_END; 1869 1870 // Get the underlying objects for the location passed on the lifetime 1871 // marker. 1872 SmallVector<const Value *, 4> Allocas; 1873 getUnderlyingObjects(CI.getArgOperand(1), Allocas); 1874 1875 // Iterate over each underlying object, creating lifetime markers for each 1876 // static alloca. Quit if we find a non-static alloca. 1877 for (const Value *V : Allocas) { 1878 const AllocaInst *AI = dyn_cast<AllocaInst>(V); 1879 if (!AI) 1880 continue; 1881 1882 if (!AI->isStaticAlloca()) 1883 return true; 1884 1885 MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI)); 1886 } 1887 return true; 1888 } 1889 case Intrinsic::dbg_declare: { 1890 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI); 1891 assert(DI.getVariable() && "Missing variable"); 1892 1893 const Value *Address = DI.getAddress(); 1894 if (!Address || isa<UndefValue>(Address)) { 1895 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); 1896 return true; 1897 } 1898 1899 assert(DI.getVariable()->isValidLocationForIntrinsic( 1900 MIRBuilder.getDebugLoc()) && 1901 "Expected inlined-at fields to agree"); 1902 auto AI = dyn_cast<AllocaInst>(Address); 1903 if (AI && AI->isStaticAlloca()) { 1904 // Static allocas are tracked at the MF level, no need for DBG_VALUE 1905 // instructions (in fact, they get ignored if they *do* exist). 1906 MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(), 1907 getOrCreateFrameIndex(*AI), DI.getDebugLoc()); 1908 } else { 1909 // A dbg.declare describes the address of a source variable, so lower it 1910 // into an indirect DBG_VALUE. 1911 MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address), 1912 DI.getVariable(), DI.getExpression()); 1913 } 1914 return true; 1915 } 1916 case Intrinsic::dbg_label: { 1917 const DbgLabelInst &DI = cast<DbgLabelInst>(CI); 1918 assert(DI.getLabel() && "Missing label"); 1919 1920 assert(DI.getLabel()->isValidLocationForIntrinsic( 1921 MIRBuilder.getDebugLoc()) && 1922 "Expected inlined-at fields to agree"); 1923 1924 MIRBuilder.buildDbgLabel(DI.getLabel()); 1925 return true; 1926 } 1927 case Intrinsic::vaend: 1928 // No target I know of cares about va_end. Certainly no in-tree target 1929 // does. Simplest intrinsic ever! 1930 return true; 1931 case Intrinsic::vastart: { 1932 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1933 Value *Ptr = CI.getArgOperand(0); 1934 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8; 1935 1936 // FIXME: Get alignment 1937 MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)}) 1938 .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr), 1939 MachineMemOperand::MOStore, 1940 ListSize, Align(1))); 1941 return true; 1942 } 1943 case Intrinsic::dbg_value: { 1944 // This form of DBG_VALUE is target-independent. 1945 const DbgValueInst &DI = cast<DbgValueInst>(CI); 1946 const Value *V = DI.getValue(); 1947 assert(DI.getVariable()->isValidLocationForIntrinsic( 1948 MIRBuilder.getDebugLoc()) && 1949 "Expected inlined-at fields to agree"); 1950 if (!V || DI.hasArgList()) { 1951 // DI cannot produce a valid DBG_VALUE, so produce an undef DBG_VALUE to 1952 // terminate any prior location. 1953 MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression()); 1954 } else if (const auto *CI = dyn_cast<Constant>(V)) { 1955 MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression()); 1956 } else { 1957 for (Register Reg : getOrCreateVRegs(*V)) { 1958 // FIXME: This does not handle register-indirect values at offset 0. The 1959 // direct/indirect thing shouldn't really be handled by something as 1960 // implicit as reg+noreg vs reg+imm in the first place, but it seems 1961 // pretty baked in right now. 1962 MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression()); 1963 } 1964 } 1965 return true; 1966 } 1967 case Intrinsic::uadd_with_overflow: 1968 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder); 1969 case Intrinsic::sadd_with_overflow: 1970 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder); 1971 case Intrinsic::usub_with_overflow: 1972 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder); 1973 case Intrinsic::ssub_with_overflow: 1974 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder); 1975 case Intrinsic::umul_with_overflow: 1976 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder); 1977 case Intrinsic::smul_with_overflow: 1978 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder); 1979 case Intrinsic::uadd_sat: 1980 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder); 1981 case Intrinsic::sadd_sat: 1982 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder); 1983 case Intrinsic::usub_sat: 1984 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder); 1985 case Intrinsic::ssub_sat: 1986 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder); 1987 case Intrinsic::ushl_sat: 1988 return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder); 1989 case Intrinsic::sshl_sat: 1990 return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder); 1991 case Intrinsic::umin: 1992 return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder); 1993 case Intrinsic::umax: 1994 return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder); 1995 case Intrinsic::smin: 1996 return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder); 1997 case Intrinsic::smax: 1998 return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder); 1999 case Intrinsic::abs: 2000 // TODO: Preserve "int min is poison" arg in GMIR? 2001 return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder); 2002 case Intrinsic::smul_fix: 2003 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder); 2004 case Intrinsic::umul_fix: 2005 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder); 2006 case Intrinsic::smul_fix_sat: 2007 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder); 2008 case Intrinsic::umul_fix_sat: 2009 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder); 2010 case Intrinsic::sdiv_fix: 2011 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder); 2012 case Intrinsic::udiv_fix: 2013 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder); 2014 case Intrinsic::sdiv_fix_sat: 2015 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder); 2016 case Intrinsic::udiv_fix_sat: 2017 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder); 2018 case Intrinsic::fmuladd: { 2019 const TargetMachine &TM = MF->getTarget(); 2020 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); 2021 Register Dst = getOrCreateVReg(CI); 2022 Register Op0 = getOrCreateVReg(*CI.getArgOperand(0)); 2023 Register Op1 = getOrCreateVReg(*CI.getArgOperand(1)); 2024 Register Op2 = getOrCreateVReg(*CI.getArgOperand(2)); 2025 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && 2026 TLI.isFMAFasterThanFMulAndFAdd(*MF, 2027 TLI.getValueType(*DL, CI.getType()))) { 2028 // TODO: Revisit this to see if we should move this part of the 2029 // lowering to the combiner. 2030 MIRBuilder.buildFMA(Dst, Op0, Op1, Op2, 2031 MachineInstr::copyFlagsFromInstruction(CI)); 2032 } else { 2033 LLT Ty = getLLTForType(*CI.getType(), *DL); 2034 auto FMul = MIRBuilder.buildFMul( 2035 Ty, Op0, Op1, MachineInstr::copyFlagsFromInstruction(CI)); 2036 MIRBuilder.buildFAdd(Dst, FMul, Op2, 2037 MachineInstr::copyFlagsFromInstruction(CI)); 2038 } 2039 return true; 2040 } 2041 case Intrinsic::convert_from_fp16: 2042 // FIXME: This intrinsic should probably be removed from the IR. 2043 MIRBuilder.buildFPExt(getOrCreateVReg(CI), 2044 getOrCreateVReg(*CI.getArgOperand(0)), 2045 MachineInstr::copyFlagsFromInstruction(CI)); 2046 return true; 2047 case Intrinsic::convert_to_fp16: 2048 // FIXME: This intrinsic should probably be removed from the IR. 2049 MIRBuilder.buildFPTrunc(getOrCreateVReg(CI), 2050 getOrCreateVReg(*CI.getArgOperand(0)), 2051 MachineInstr::copyFlagsFromInstruction(CI)); 2052 return true; 2053 case Intrinsic::memcpy_inline: 2054 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE); 2055 case Intrinsic::memcpy: 2056 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY); 2057 case Intrinsic::memmove: 2058 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE); 2059 case Intrinsic::memset: 2060 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET); 2061 case Intrinsic::eh_typeid_for: { 2062 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0)); 2063 Register Reg = getOrCreateVReg(CI); 2064 unsigned TypeID = MF->getTypeIDFor(GV); 2065 MIRBuilder.buildConstant(Reg, TypeID); 2066 return true; 2067 } 2068 case Intrinsic::objectsize: 2069 llvm_unreachable("llvm.objectsize.* should have been lowered already"); 2070 2071 case Intrinsic::is_constant: 2072 llvm_unreachable("llvm.is.constant.* should have been lowered already"); 2073 2074 case Intrinsic::stackguard: 2075 getStackGuard(getOrCreateVReg(CI), MIRBuilder); 2076 return true; 2077 case Intrinsic::stackprotector: { 2078 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); 2079 Register GuardVal = MRI->createGenericVirtualRegister(PtrTy); 2080 getStackGuard(GuardVal, MIRBuilder); 2081 2082 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1)); 2083 int FI = getOrCreateFrameIndex(*Slot); 2084 MF->getFrameInfo().setStackProtectorIndex(FI); 2085 2086 MIRBuilder.buildStore( 2087 GuardVal, getOrCreateVReg(*Slot), 2088 *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI), 2089 MachineMemOperand::MOStore | 2090 MachineMemOperand::MOVolatile, 2091 PtrTy, Align(8))); 2092 return true; 2093 } 2094 case Intrinsic::stacksave: { 2095 // Save the stack pointer to the location provided by the intrinsic. 2096 Register Reg = getOrCreateVReg(CI); 2097 Register StackPtr = MF->getSubtarget() 2098 .getTargetLowering() 2099 ->getStackPointerRegisterToSaveRestore(); 2100 2101 // If the target doesn't specify a stack pointer, then fall back. 2102 if (!StackPtr) 2103 return false; 2104 2105 MIRBuilder.buildCopy(Reg, StackPtr); 2106 return true; 2107 } 2108 case Intrinsic::stackrestore: { 2109 // Restore the stack pointer from the location provided by the intrinsic. 2110 Register Reg = getOrCreateVReg(*CI.getArgOperand(0)); 2111 Register StackPtr = MF->getSubtarget() 2112 .getTargetLowering() 2113 ->getStackPointerRegisterToSaveRestore(); 2114 2115 // If the target doesn't specify a stack pointer, then fall back. 2116 if (!StackPtr) 2117 return false; 2118 2119 MIRBuilder.buildCopy(StackPtr, Reg); 2120 return true; 2121 } 2122 case Intrinsic::cttz: 2123 case Intrinsic::ctlz: { 2124 ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1)); 2125 bool isTrailing = ID == Intrinsic::cttz; 2126 unsigned Opcode = isTrailing 2127 ? Cst->isZero() ? TargetOpcode::G_CTTZ 2128 : TargetOpcode::G_CTTZ_ZERO_UNDEF 2129 : Cst->isZero() ? TargetOpcode::G_CTLZ 2130 : TargetOpcode::G_CTLZ_ZERO_UNDEF; 2131 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(CI)}, 2132 {getOrCreateVReg(*CI.getArgOperand(0))}); 2133 return true; 2134 } 2135 case Intrinsic::invariant_start: { 2136 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); 2137 Register Undef = MRI->createGenericVirtualRegister(PtrTy); 2138 MIRBuilder.buildUndef(Undef); 2139 return true; 2140 } 2141 case Intrinsic::invariant_end: 2142 return true; 2143 case Intrinsic::expect: 2144 case Intrinsic::annotation: 2145 case Intrinsic::ptr_annotation: 2146 case Intrinsic::launder_invariant_group: 2147 case Intrinsic::strip_invariant_group: { 2148 // Drop the intrinsic, but forward the value. 2149 MIRBuilder.buildCopy(getOrCreateVReg(CI), 2150 getOrCreateVReg(*CI.getArgOperand(0))); 2151 return true; 2152 } 2153 case Intrinsic::assume: 2154 case Intrinsic::experimental_noalias_scope_decl: 2155 case Intrinsic::var_annotation: 2156 case Intrinsic::sideeffect: 2157 // Discard annotate attributes, assumptions, and artificial side-effects. 2158 return true; 2159 case Intrinsic::read_volatile_register: 2160 case Intrinsic::read_register: { 2161 Value *Arg = CI.getArgOperand(0); 2162 MIRBuilder 2163 .buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {}) 2164 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata())); 2165 return true; 2166 } 2167 case Intrinsic::write_register: { 2168 Value *Arg = CI.getArgOperand(0); 2169 MIRBuilder.buildInstr(TargetOpcode::G_WRITE_REGISTER) 2170 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata())) 2171 .addUse(getOrCreateVReg(*CI.getArgOperand(1))); 2172 return true; 2173 } 2174 case Intrinsic::localescape: { 2175 MachineBasicBlock &EntryMBB = MF->front(); 2176 StringRef EscapedName = GlobalValue::dropLLVMManglingEscape(MF->getName()); 2177 2178 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission 2179 // is the same on all targets. 2180 for (unsigned Idx = 0, E = CI.getNumArgOperands(); Idx < E; ++Idx) { 2181 Value *Arg = CI.getArgOperand(Idx)->stripPointerCasts(); 2182 if (isa<ConstantPointerNull>(Arg)) 2183 continue; // Skip null pointers. They represent a hole in index space. 2184 2185 int FI = getOrCreateFrameIndex(*cast<AllocaInst>(Arg)); 2186 MCSymbol *FrameAllocSym = 2187 MF->getMMI().getContext().getOrCreateFrameAllocSymbol(EscapedName, 2188 Idx); 2189 2190 // This should be inserted at the start of the entry block. 2191 auto LocalEscape = 2192 MIRBuilder.buildInstrNoInsert(TargetOpcode::LOCAL_ESCAPE) 2193 .addSym(FrameAllocSym) 2194 .addFrameIndex(FI); 2195 2196 EntryMBB.insert(EntryMBB.begin(), LocalEscape); 2197 } 2198 2199 return true; 2200 } 2201 case Intrinsic::vector_reduce_fadd: 2202 case Intrinsic::vector_reduce_fmul: { 2203 // Need to check for the reassoc flag to decide whether we want a 2204 // sequential reduction opcode or not. 2205 Register Dst = getOrCreateVReg(CI); 2206 Register ScalarSrc = getOrCreateVReg(*CI.getArgOperand(0)); 2207 Register VecSrc = getOrCreateVReg(*CI.getArgOperand(1)); 2208 unsigned Opc = 0; 2209 if (!CI.hasAllowReassoc()) { 2210 // The sequential ordering case. 2211 Opc = ID == Intrinsic::vector_reduce_fadd 2212 ? TargetOpcode::G_VECREDUCE_SEQ_FADD 2213 : TargetOpcode::G_VECREDUCE_SEQ_FMUL; 2214 MIRBuilder.buildInstr(Opc, {Dst}, {ScalarSrc, VecSrc}, 2215 MachineInstr::copyFlagsFromInstruction(CI)); 2216 return true; 2217 } 2218 // We split the operation into a separate G_FADD/G_FMUL + the reduce, 2219 // since the associativity doesn't matter. 2220 unsigned ScalarOpc; 2221 if (ID == Intrinsic::vector_reduce_fadd) { 2222 Opc = TargetOpcode::G_VECREDUCE_FADD; 2223 ScalarOpc = TargetOpcode::G_FADD; 2224 } else { 2225 Opc = TargetOpcode::G_VECREDUCE_FMUL; 2226 ScalarOpc = TargetOpcode::G_FMUL; 2227 } 2228 LLT DstTy = MRI->getType(Dst); 2229 auto Rdx = MIRBuilder.buildInstr( 2230 Opc, {DstTy}, {VecSrc}, MachineInstr::copyFlagsFromInstruction(CI)); 2231 MIRBuilder.buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx}, 2232 MachineInstr::copyFlagsFromInstruction(CI)); 2233 2234 return true; 2235 } 2236 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ 2237 case Intrinsic::INTRINSIC: 2238 #include "llvm/IR/ConstrainedOps.def" 2239 return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI), 2240 MIRBuilder); 2241 2242 } 2243 return false; 2244 } 2245 2246 bool IRTranslator::translateInlineAsm(const CallBase &CB, 2247 MachineIRBuilder &MIRBuilder) { 2248 2249 const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering(); 2250 2251 if (!ALI) { 2252 LLVM_DEBUG( 2253 dbgs() << "Inline asm lowering is not supported for this target yet\n"); 2254 return false; 2255 } 2256 2257 return ALI->lowerInlineAsm( 2258 MIRBuilder, CB, [&](const Value &Val) { return getOrCreateVRegs(Val); }); 2259 } 2260 2261 bool IRTranslator::translateCallBase(const CallBase &CB, 2262 MachineIRBuilder &MIRBuilder) { 2263 ArrayRef<Register> Res = getOrCreateVRegs(CB); 2264 2265 SmallVector<ArrayRef<Register>, 8> Args; 2266 Register SwiftInVReg = 0; 2267 Register SwiftErrorVReg = 0; 2268 for (auto &Arg : CB.args()) { 2269 if (CLI->supportSwiftError() && isSwiftError(Arg)) { 2270 assert(SwiftInVReg == 0 && "Expected only one swift error argument"); 2271 LLT Ty = getLLTForType(*Arg->getType(), *DL); 2272 SwiftInVReg = MRI->createGenericVirtualRegister(Ty); 2273 MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt( 2274 &CB, &MIRBuilder.getMBB(), Arg)); 2275 Args.emplace_back(makeArrayRef(SwiftInVReg)); 2276 SwiftErrorVReg = 2277 SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.getMBB(), Arg); 2278 continue; 2279 } 2280 Args.push_back(getOrCreateVRegs(*Arg)); 2281 } 2282 2283 if (auto *CI = dyn_cast<CallInst>(&CB)) { 2284 if (ORE->enabled()) { 2285 const Function &F = *CI->getParent()->getParent(); 2286 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 2287 if (MemoryOpRemark::canHandle(CI, TLI)) { 2288 MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, TLI); 2289 R.visit(CI); 2290 } 2291 } 2292 } 2293 2294 // We don't set HasCalls on MFI here yet because call lowering may decide to 2295 // optimize into tail calls. Instead, we defer that to selection where a final 2296 // scan is done to check if any instructions are calls. 2297 bool Success = 2298 CLI->lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg, 2299 [&]() { return getOrCreateVReg(*CB.getCalledOperand()); }); 2300 2301 // Check if we just inserted a tail call. 2302 if (Success) { 2303 assert(!HasTailCall && "Can't tail call return twice from block?"); 2304 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 2305 HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt())); 2306 } 2307 2308 return Success; 2309 } 2310 2311 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) { 2312 const CallInst &CI = cast<CallInst>(U); 2313 auto TII = MF->getTarget().getIntrinsicInfo(); 2314 const Function *F = CI.getCalledFunction(); 2315 2316 // FIXME: support Windows dllimport function calls. 2317 if (F && (F->hasDLLImportStorageClass() || 2318 (MF->getTarget().getTargetTriple().isOSWindows() && 2319 F->hasExternalWeakLinkage()))) 2320 return false; 2321 2322 // FIXME: support control flow guard targets. 2323 if (CI.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget)) 2324 return false; 2325 2326 if (CI.isInlineAsm()) 2327 return translateInlineAsm(CI, MIRBuilder); 2328 2329 if (F && F->hasFnAttribute("dontcall")) { 2330 unsigned LocCookie = 0; 2331 if (MDNode *MD = CI.getMetadata("srcloc")) 2332 LocCookie = 2333 mdconst::extract<ConstantInt>(MD->getOperand(0))->getZExtValue(); 2334 DiagnosticInfoDontCall D(F->getName(), LocCookie); 2335 F->getContext().diagnose(D); 2336 } 2337 2338 Intrinsic::ID ID = Intrinsic::not_intrinsic; 2339 if (F && F->isIntrinsic()) { 2340 ID = F->getIntrinsicID(); 2341 if (TII && ID == Intrinsic::not_intrinsic) 2342 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F)); 2343 } 2344 2345 if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) 2346 return translateCallBase(CI, MIRBuilder); 2347 2348 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic"); 2349 2350 if (translateKnownIntrinsic(CI, ID, MIRBuilder)) 2351 return true; 2352 2353 ArrayRef<Register> ResultRegs; 2354 if (!CI.getType()->isVoidTy()) 2355 ResultRegs = getOrCreateVRegs(CI); 2356 2357 // Ignore the callsite attributes. Backend code is most likely not expecting 2358 // an intrinsic to sometimes have side effects and sometimes not. 2359 MachineInstrBuilder MIB = 2360 MIRBuilder.buildIntrinsic(ID, ResultRegs, !F->doesNotAccessMemory()); 2361 if (isa<FPMathOperator>(CI)) 2362 MIB->copyIRFlags(CI); 2363 2364 for (auto &Arg : enumerate(CI.arg_operands())) { 2365 // If this is required to be an immediate, don't materialize it in a 2366 // register. 2367 if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) { 2368 if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) { 2369 // imm arguments are more convenient than cimm (and realistically 2370 // probably sufficient), so use them. 2371 assert(CI->getBitWidth() <= 64 && 2372 "large intrinsic immediates not handled"); 2373 MIB.addImm(CI->getSExtValue()); 2374 } else { 2375 MIB.addFPImm(cast<ConstantFP>(Arg.value())); 2376 } 2377 } else if (auto MD = dyn_cast<MetadataAsValue>(Arg.value())) { 2378 auto *MDN = dyn_cast<MDNode>(MD->getMetadata()); 2379 if (!MDN) // This was probably an MDString. 2380 return false; 2381 MIB.addMetadata(MDN); 2382 } else { 2383 ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value()); 2384 if (VRegs.size() > 1) 2385 return false; 2386 MIB.addUse(VRegs[0]); 2387 } 2388 } 2389 2390 // Add a MachineMemOperand if it is a target mem intrinsic. 2391 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); 2392 TargetLowering::IntrinsicInfo Info; 2393 // TODO: Add a GlobalISel version of getTgtMemIntrinsic. 2394 if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) { 2395 Align Alignment = Info.align.getValueOr( 2396 DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext()))); 2397 LLT MemTy = Info.memVT.isSimple() 2398 ? getLLTForMVT(Info.memVT.getSimpleVT()) 2399 : LLT::scalar(Info.memVT.getStoreSizeInBits()); 2400 MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal), 2401 Info.flags, MemTy, Alignment)); 2402 } 2403 2404 return true; 2405 } 2406 2407 bool IRTranslator::findUnwindDestinations( 2408 const BasicBlock *EHPadBB, 2409 BranchProbability Prob, 2410 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>> 2411 &UnwindDests) { 2412 EHPersonality Personality = classifyEHPersonality( 2413 EHPadBB->getParent()->getFunction().getPersonalityFn()); 2414 bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX; 2415 bool IsCoreCLR = Personality == EHPersonality::CoreCLR; 2416 bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX; 2417 bool IsSEH = isAsynchronousEHPersonality(Personality); 2418 2419 if (IsWasmCXX) { 2420 // Ignore this for now. 2421 return false; 2422 } 2423 2424 while (EHPadBB) { 2425 const Instruction *Pad = EHPadBB->getFirstNonPHI(); 2426 BasicBlock *NewEHPadBB = nullptr; 2427 if (isa<LandingPadInst>(Pad)) { 2428 // Stop on landingpads. They are not funclets. 2429 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob); 2430 break; 2431 } 2432 if (isa<CleanupPadInst>(Pad)) { 2433 // Stop on cleanup pads. Cleanups are always funclet entries for all known 2434 // personalities. 2435 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob); 2436 UnwindDests.back().first->setIsEHScopeEntry(); 2437 UnwindDests.back().first->setIsEHFuncletEntry(); 2438 break; 2439 } 2440 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) { 2441 // Add the catchpad handlers to the possible destinations. 2442 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) { 2443 UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob); 2444 // For MSVC++ and the CLR, catchblocks are funclets and need prologues. 2445 if (IsMSVCCXX || IsCoreCLR) 2446 UnwindDests.back().first->setIsEHFuncletEntry(); 2447 if (!IsSEH) 2448 UnwindDests.back().first->setIsEHScopeEntry(); 2449 } 2450 NewEHPadBB = CatchSwitch->getUnwindDest(); 2451 } else { 2452 continue; 2453 } 2454 2455 BranchProbabilityInfo *BPI = FuncInfo.BPI; 2456 if (BPI && NewEHPadBB) 2457 Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB); 2458 EHPadBB = NewEHPadBB; 2459 } 2460 return true; 2461 } 2462 2463 bool IRTranslator::translateInvoke(const User &U, 2464 MachineIRBuilder &MIRBuilder) { 2465 const InvokeInst &I = cast<InvokeInst>(U); 2466 MCContext &Context = MF->getContext(); 2467 2468 const BasicBlock *ReturnBB = I.getSuccessor(0); 2469 const BasicBlock *EHPadBB = I.getSuccessor(1); 2470 2471 const Function *Fn = I.getCalledFunction(); 2472 2473 // FIXME: support invoking patchpoint and statepoint intrinsics. 2474 if (Fn && Fn->isIntrinsic()) 2475 return false; 2476 2477 // FIXME: support whatever these are. 2478 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) 2479 return false; 2480 2481 // FIXME: support control flow guard targets. 2482 if (I.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget)) 2483 return false; 2484 2485 // FIXME: support Windows exception handling. 2486 if (!isa<LandingPadInst>(EHPadBB->getFirstNonPHI())) 2487 return false; 2488 2489 bool LowerInlineAsm = false; 2490 if (I.isInlineAsm()) { 2491 const InlineAsm *IA = cast<InlineAsm>(I.getCalledOperand()); 2492 if (!IA->canThrow()) { 2493 // Fast path without emitting EH_LABELs. 2494 2495 if (!translateInlineAsm(I, MIRBuilder)) 2496 return false; 2497 2498 MachineBasicBlock *InvokeMBB = &MIRBuilder.getMBB(), 2499 *ReturnMBB = &getMBB(*ReturnBB); 2500 2501 // Update successor info. 2502 addSuccessorWithProb(InvokeMBB, ReturnMBB, BranchProbability::getOne()); 2503 2504 MIRBuilder.buildBr(*ReturnMBB); 2505 return true; 2506 } else { 2507 LowerInlineAsm = true; 2508 } 2509 } 2510 2511 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about 2512 // the region covered by the try. 2513 MCSymbol *BeginSymbol = Context.createTempSymbol(); 2514 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol); 2515 2516 if (LowerInlineAsm) { 2517 if (!translateInlineAsm(I, MIRBuilder)) 2518 return false; 2519 } else if (!translateCallBase(I, MIRBuilder)) 2520 return false; 2521 2522 MCSymbol *EndSymbol = Context.createTempSymbol(); 2523 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol); 2524 2525 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests; 2526 BranchProbabilityInfo *BPI = FuncInfo.BPI; 2527 MachineBasicBlock *InvokeMBB = &MIRBuilder.getMBB(); 2528 BranchProbability EHPadBBProb = 2529 BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB) 2530 : BranchProbability::getZero(); 2531 2532 if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests)) 2533 return false; 2534 2535 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB), 2536 &ReturnMBB = getMBB(*ReturnBB); 2537 // Update successor info. 2538 addSuccessorWithProb(InvokeMBB, &ReturnMBB); 2539 for (auto &UnwindDest : UnwindDests) { 2540 UnwindDest.first->setIsEHPad(); 2541 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second); 2542 } 2543 InvokeMBB->normalizeSuccProbs(); 2544 2545 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol); 2546 MIRBuilder.buildBr(ReturnMBB); 2547 return true; 2548 } 2549 2550 bool IRTranslator::translateCallBr(const User &U, 2551 MachineIRBuilder &MIRBuilder) { 2552 // FIXME: Implement this. 2553 return false; 2554 } 2555 2556 bool IRTranslator::translateLandingPad(const User &U, 2557 MachineIRBuilder &MIRBuilder) { 2558 const LandingPadInst &LP = cast<LandingPadInst>(U); 2559 2560 MachineBasicBlock &MBB = MIRBuilder.getMBB(); 2561 2562 MBB.setIsEHPad(); 2563 2564 // If there aren't registers to copy the values into (e.g., during SjLj 2565 // exceptions), then don't bother. 2566 auto &TLI = *MF->getSubtarget().getTargetLowering(); 2567 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn(); 2568 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 && 2569 TLI.getExceptionSelectorRegister(PersonalityFn) == 0) 2570 return true; 2571 2572 // If landingpad's return type is token type, we don't create DAG nodes 2573 // for its exception pointer and selector value. The extraction of exception 2574 // pointer or selector value from token type landingpads is not currently 2575 // supported. 2576 if (LP.getType()->isTokenTy()) 2577 return true; 2578 2579 // Add a label to mark the beginning of the landing pad. Deletion of the 2580 // landing pad can thus be detected via the MachineModuleInfo. 2581 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL) 2582 .addSym(MF->addLandingPad(&MBB)); 2583 2584 // If the unwinder does not preserve all registers, ensure that the 2585 // function marks the clobbered registers as used. 2586 const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo(); 2587 if (auto *RegMask = TRI.getCustomEHPadPreservedMask(*MF)) 2588 MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask); 2589 2590 LLT Ty = getLLTForType(*LP.getType(), *DL); 2591 Register Undef = MRI->createGenericVirtualRegister(Ty); 2592 MIRBuilder.buildUndef(Undef); 2593 2594 SmallVector<LLT, 2> Tys; 2595 for (Type *Ty : cast<StructType>(LP.getType())->elements()) 2596 Tys.push_back(getLLTForType(*Ty, *DL)); 2597 assert(Tys.size() == 2 && "Only two-valued landingpads are supported"); 2598 2599 // Mark exception register as live in. 2600 Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn); 2601 if (!ExceptionReg) 2602 return false; 2603 2604 MBB.addLiveIn(ExceptionReg); 2605 ArrayRef<Register> ResRegs = getOrCreateVRegs(LP); 2606 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg); 2607 2608 Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn); 2609 if (!SelectorReg) 2610 return false; 2611 2612 MBB.addLiveIn(SelectorReg); 2613 Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]); 2614 MIRBuilder.buildCopy(PtrVReg, SelectorReg); 2615 MIRBuilder.buildCast(ResRegs[1], PtrVReg); 2616 2617 return true; 2618 } 2619 2620 bool IRTranslator::translateAlloca(const User &U, 2621 MachineIRBuilder &MIRBuilder) { 2622 auto &AI = cast<AllocaInst>(U); 2623 2624 if (AI.isSwiftError()) 2625 return true; 2626 2627 if (AI.isStaticAlloca()) { 2628 Register Res = getOrCreateVReg(AI); 2629 int FI = getOrCreateFrameIndex(AI); 2630 MIRBuilder.buildFrameIndex(Res, FI); 2631 return true; 2632 } 2633 2634 // FIXME: support stack probing for Windows. 2635 if (MF->getTarget().getTargetTriple().isOSWindows()) 2636 return false; 2637 2638 // Now we're in the harder dynamic case. 2639 Register NumElts = getOrCreateVReg(*AI.getArraySize()); 2640 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType()); 2641 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL); 2642 if (MRI->getType(NumElts) != IntPtrTy) { 2643 Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy); 2644 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts); 2645 NumElts = ExtElts; 2646 } 2647 2648 Type *Ty = AI.getAllocatedType(); 2649 2650 Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy); 2651 Register TySize = 2652 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty))); 2653 MIRBuilder.buildMul(AllocSize, NumElts, TySize); 2654 2655 // Round the size of the allocation up to the stack alignment size 2656 // by add SA-1 to the size. This doesn't overflow because we're computing 2657 // an address inside an alloca. 2658 Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign(); 2659 auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign.value() - 1); 2660 auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne, 2661 MachineInstr::NoUWrap); 2662 auto AlignCst = 2663 MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign.value() - 1)); 2664 auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst); 2665 2666 Align Alignment = std::max(AI.getAlign(), DL->getPrefTypeAlign(Ty)); 2667 if (Alignment <= StackAlign) 2668 Alignment = Align(1); 2669 MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Alignment); 2670 2671 MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI); 2672 assert(MF->getFrameInfo().hasVarSizedObjects()); 2673 return true; 2674 } 2675 2676 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) { 2677 // FIXME: We may need more info about the type. Because of how LLT works, 2678 // we're completely discarding the i64/double distinction here (amongst 2679 // others). Fortunately the ABIs I know of where that matters don't use va_arg 2680 // anyway but that's not guaranteed. 2681 MIRBuilder.buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)}, 2682 {getOrCreateVReg(*U.getOperand(0)), 2683 DL->getABITypeAlign(U.getType()).value()}); 2684 return true; 2685 } 2686 2687 bool IRTranslator::translateInsertElement(const User &U, 2688 MachineIRBuilder &MIRBuilder) { 2689 // If it is a <1 x Ty> vector, use the scalar as it is 2690 // not a legal vector type in LLT. 2691 if (cast<FixedVectorType>(U.getType())->getNumElements() == 1) 2692 return translateCopy(U, *U.getOperand(1), MIRBuilder); 2693 2694 Register Res = getOrCreateVReg(U); 2695 Register Val = getOrCreateVReg(*U.getOperand(0)); 2696 Register Elt = getOrCreateVReg(*U.getOperand(1)); 2697 Register Idx = getOrCreateVReg(*U.getOperand(2)); 2698 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx); 2699 return true; 2700 } 2701 2702 bool IRTranslator::translateExtractElement(const User &U, 2703 MachineIRBuilder &MIRBuilder) { 2704 // If it is a <1 x Ty> vector, use the scalar as it is 2705 // not a legal vector type in LLT. 2706 if (cast<FixedVectorType>(U.getOperand(0)->getType())->getNumElements() == 1) 2707 return translateCopy(U, *U.getOperand(0), MIRBuilder); 2708 2709 Register Res = getOrCreateVReg(U); 2710 Register Val = getOrCreateVReg(*U.getOperand(0)); 2711 const auto &TLI = *MF->getSubtarget().getTargetLowering(); 2712 unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits(); 2713 Register Idx; 2714 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) { 2715 if (CI->getBitWidth() != PreferredVecIdxWidth) { 2716 APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth); 2717 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx); 2718 Idx = getOrCreateVReg(*NewIdxCI); 2719 } 2720 } 2721 if (!Idx) 2722 Idx = getOrCreateVReg(*U.getOperand(1)); 2723 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) { 2724 const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth); 2725 Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx).getReg(0); 2726 } 2727 MIRBuilder.buildExtractVectorElement(Res, Val, Idx); 2728 return true; 2729 } 2730 2731 bool IRTranslator::translateShuffleVector(const User &U, 2732 MachineIRBuilder &MIRBuilder) { 2733 ArrayRef<int> Mask; 2734 if (auto *SVI = dyn_cast<ShuffleVectorInst>(&U)) 2735 Mask = SVI->getShuffleMask(); 2736 else 2737 Mask = cast<ConstantExpr>(U).getShuffleMask(); 2738 ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask); 2739 MIRBuilder 2740 .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)}, 2741 {getOrCreateVReg(*U.getOperand(0)), 2742 getOrCreateVReg(*U.getOperand(1))}) 2743 .addShuffleMask(MaskAlloc); 2744 return true; 2745 } 2746 2747 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) { 2748 const PHINode &PI = cast<PHINode>(U); 2749 2750 SmallVector<MachineInstr *, 4> Insts; 2751 for (auto Reg : getOrCreateVRegs(PI)) { 2752 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {}); 2753 Insts.push_back(MIB.getInstr()); 2754 } 2755 2756 PendingPHIs.emplace_back(&PI, std::move(Insts)); 2757 return true; 2758 } 2759 2760 bool IRTranslator::translateAtomicCmpXchg(const User &U, 2761 MachineIRBuilder &MIRBuilder) { 2762 const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U); 2763 2764 auto &TLI = *MF->getSubtarget().getTargetLowering(); 2765 auto Flags = TLI.getAtomicMemOperandFlags(I, *DL); 2766 2767 auto Res = getOrCreateVRegs(I); 2768 Register OldValRes = Res[0]; 2769 Register SuccessRes = Res[1]; 2770 Register Addr = getOrCreateVReg(*I.getPointerOperand()); 2771 Register Cmp = getOrCreateVReg(*I.getCompareOperand()); 2772 Register NewVal = getOrCreateVReg(*I.getNewValOperand()); 2773 2774 AAMDNodes AAMetadata; 2775 I.getAAMetadata(AAMetadata); 2776 2777 MIRBuilder.buildAtomicCmpXchgWithSuccess( 2778 OldValRes, SuccessRes, Addr, Cmp, NewVal, 2779 *MF->getMachineMemOperand( 2780 MachinePointerInfo(I.getPointerOperand()), Flags, MRI->getType(Cmp), 2781 getMemOpAlign(I), AAMetadata, nullptr, I.getSyncScopeID(), 2782 I.getSuccessOrdering(), I.getFailureOrdering())); 2783 return true; 2784 } 2785 2786 bool IRTranslator::translateAtomicRMW(const User &U, 2787 MachineIRBuilder &MIRBuilder) { 2788 const AtomicRMWInst &I = cast<AtomicRMWInst>(U); 2789 auto &TLI = *MF->getSubtarget().getTargetLowering(); 2790 auto Flags = TLI.getAtomicMemOperandFlags(I, *DL); 2791 2792 Register Res = getOrCreateVReg(I); 2793 Register Addr = getOrCreateVReg(*I.getPointerOperand()); 2794 Register Val = getOrCreateVReg(*I.getValOperand()); 2795 2796 unsigned Opcode = 0; 2797 switch (I.getOperation()) { 2798 default: 2799 return false; 2800 case AtomicRMWInst::Xchg: 2801 Opcode = TargetOpcode::G_ATOMICRMW_XCHG; 2802 break; 2803 case AtomicRMWInst::Add: 2804 Opcode = TargetOpcode::G_ATOMICRMW_ADD; 2805 break; 2806 case AtomicRMWInst::Sub: 2807 Opcode = TargetOpcode::G_ATOMICRMW_SUB; 2808 break; 2809 case AtomicRMWInst::And: 2810 Opcode = TargetOpcode::G_ATOMICRMW_AND; 2811 break; 2812 case AtomicRMWInst::Nand: 2813 Opcode = TargetOpcode::G_ATOMICRMW_NAND; 2814 break; 2815 case AtomicRMWInst::Or: 2816 Opcode = TargetOpcode::G_ATOMICRMW_OR; 2817 break; 2818 case AtomicRMWInst::Xor: 2819 Opcode = TargetOpcode::G_ATOMICRMW_XOR; 2820 break; 2821 case AtomicRMWInst::Max: 2822 Opcode = TargetOpcode::G_ATOMICRMW_MAX; 2823 break; 2824 case AtomicRMWInst::Min: 2825 Opcode = TargetOpcode::G_ATOMICRMW_MIN; 2826 break; 2827 case AtomicRMWInst::UMax: 2828 Opcode = TargetOpcode::G_ATOMICRMW_UMAX; 2829 break; 2830 case AtomicRMWInst::UMin: 2831 Opcode = TargetOpcode::G_ATOMICRMW_UMIN; 2832 break; 2833 case AtomicRMWInst::FAdd: 2834 Opcode = TargetOpcode::G_ATOMICRMW_FADD; 2835 break; 2836 case AtomicRMWInst::FSub: 2837 Opcode = TargetOpcode::G_ATOMICRMW_FSUB; 2838 break; 2839 } 2840 2841 AAMDNodes AAMetadata; 2842 I.getAAMetadata(AAMetadata); 2843 2844 MIRBuilder.buildAtomicRMW( 2845 Opcode, Res, Addr, Val, 2846 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), 2847 Flags, MRI->getType(Val), getMemOpAlign(I), 2848 AAMetadata, nullptr, I.getSyncScopeID(), 2849 I.getOrdering())); 2850 return true; 2851 } 2852 2853 bool IRTranslator::translateFence(const User &U, 2854 MachineIRBuilder &MIRBuilder) { 2855 const FenceInst &Fence = cast<FenceInst>(U); 2856 MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()), 2857 Fence.getSyncScopeID()); 2858 return true; 2859 } 2860 2861 bool IRTranslator::translateFreeze(const User &U, 2862 MachineIRBuilder &MIRBuilder) { 2863 const ArrayRef<Register> DstRegs = getOrCreateVRegs(U); 2864 const ArrayRef<Register> SrcRegs = getOrCreateVRegs(*U.getOperand(0)); 2865 2866 assert(DstRegs.size() == SrcRegs.size() && 2867 "Freeze with different source and destination type?"); 2868 2869 for (unsigned I = 0; I < DstRegs.size(); ++I) { 2870 MIRBuilder.buildFreeze(DstRegs[I], SrcRegs[I]); 2871 } 2872 2873 return true; 2874 } 2875 2876 void IRTranslator::finishPendingPhis() { 2877 #ifndef NDEBUG 2878 DILocationVerifier Verifier; 2879 GISelObserverWrapper WrapperObserver(&Verifier); 2880 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver); 2881 #endif // ifndef NDEBUG 2882 for (auto &Phi : PendingPHIs) { 2883 const PHINode *PI = Phi.first; 2884 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second; 2885 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent(); 2886 EntryBuilder->setDebugLoc(PI->getDebugLoc()); 2887 #ifndef NDEBUG 2888 Verifier.setCurrentInst(PI); 2889 #endif // ifndef NDEBUG 2890 2891 SmallSet<const MachineBasicBlock *, 16> SeenPreds; 2892 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) { 2893 auto IRPred = PI->getIncomingBlock(i); 2894 ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i)); 2895 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) { 2896 if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred)) 2897 continue; 2898 SeenPreds.insert(Pred); 2899 for (unsigned j = 0; j < ValRegs.size(); ++j) { 2900 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]); 2901 MIB.addUse(ValRegs[j]); 2902 MIB.addMBB(Pred); 2903 } 2904 } 2905 } 2906 } 2907 } 2908 2909 bool IRTranslator::valueIsSplit(const Value &V, 2910 SmallVectorImpl<uint64_t> *Offsets) { 2911 SmallVector<LLT, 4> SplitTys; 2912 if (Offsets && !Offsets->empty()) 2913 Offsets->clear(); 2914 computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets); 2915 return SplitTys.size() > 1; 2916 } 2917 2918 bool IRTranslator::translate(const Instruction &Inst) { 2919 CurBuilder->setDebugLoc(Inst.getDebugLoc()); 2920 2921 auto &TLI = *MF->getSubtarget().getTargetLowering(); 2922 if (TLI.fallBackToDAGISel(Inst)) 2923 return false; 2924 2925 switch (Inst.getOpcode()) { 2926 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 2927 case Instruction::OPCODE: \ 2928 return translate##OPCODE(Inst, *CurBuilder.get()); 2929 #include "llvm/IR/Instruction.def" 2930 default: 2931 return false; 2932 } 2933 } 2934 2935 bool IRTranslator::translate(const Constant &C, Register Reg) { 2936 // We only emit constants into the entry block from here. To prevent jumpy 2937 // debug behaviour set the line to 0. 2938 if (auto CurrInstDL = CurBuilder->getDL()) 2939 EntryBuilder->setDebugLoc(DILocation::get(C.getContext(), 0, 0, 2940 CurrInstDL.getScope(), 2941 CurrInstDL.getInlinedAt())); 2942 2943 if (auto CI = dyn_cast<ConstantInt>(&C)) 2944 EntryBuilder->buildConstant(Reg, *CI); 2945 else if (auto CF = dyn_cast<ConstantFP>(&C)) 2946 EntryBuilder->buildFConstant(Reg, *CF); 2947 else if (isa<UndefValue>(C)) 2948 EntryBuilder->buildUndef(Reg); 2949 else if (isa<ConstantPointerNull>(C)) 2950 EntryBuilder->buildConstant(Reg, 0); 2951 else if (auto GV = dyn_cast<GlobalValue>(&C)) 2952 EntryBuilder->buildGlobalValue(Reg, GV); 2953 else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) { 2954 if (!isa<FixedVectorType>(CAZ->getType())) 2955 return false; 2956 // Return the scalar if it is a <1 x Ty> vector. 2957 unsigned NumElts = CAZ->getElementCount().getFixedValue(); 2958 if (NumElts == 1) 2959 return translateCopy(C, *CAZ->getElementValue(0u), *EntryBuilder.get()); 2960 SmallVector<Register, 4> Ops; 2961 for (unsigned I = 0; I < NumElts; ++I) { 2962 Constant &Elt = *CAZ->getElementValue(I); 2963 Ops.push_back(getOrCreateVReg(Elt)); 2964 } 2965 EntryBuilder->buildBuildVector(Reg, Ops); 2966 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) { 2967 // Return the scalar if it is a <1 x Ty> vector. 2968 if (CV->getNumElements() == 1) 2969 return translateCopy(C, *CV->getElementAsConstant(0), 2970 *EntryBuilder.get()); 2971 SmallVector<Register, 4> Ops; 2972 for (unsigned i = 0; i < CV->getNumElements(); ++i) { 2973 Constant &Elt = *CV->getElementAsConstant(i); 2974 Ops.push_back(getOrCreateVReg(Elt)); 2975 } 2976 EntryBuilder->buildBuildVector(Reg, Ops); 2977 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) { 2978 switch(CE->getOpcode()) { 2979 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 2980 case Instruction::OPCODE: \ 2981 return translate##OPCODE(*CE, *EntryBuilder.get()); 2982 #include "llvm/IR/Instruction.def" 2983 default: 2984 return false; 2985 } 2986 } else if (auto CV = dyn_cast<ConstantVector>(&C)) { 2987 if (CV->getNumOperands() == 1) 2988 return translateCopy(C, *CV->getOperand(0), *EntryBuilder.get()); 2989 SmallVector<Register, 4> Ops; 2990 for (unsigned i = 0; i < CV->getNumOperands(); ++i) { 2991 Ops.push_back(getOrCreateVReg(*CV->getOperand(i))); 2992 } 2993 EntryBuilder->buildBuildVector(Reg, Ops); 2994 } else if (auto *BA = dyn_cast<BlockAddress>(&C)) { 2995 EntryBuilder->buildBlockAddress(Reg, BA); 2996 } else 2997 return false; 2998 2999 return true; 3000 } 3001 3002 void IRTranslator::finalizeBasicBlock() { 3003 for (auto &BTB : SL->BitTestCases) { 3004 // Emit header first, if it wasn't already emitted. 3005 if (!BTB.Emitted) 3006 emitBitTestHeader(BTB, BTB.Parent); 3007 3008 BranchProbability UnhandledProb = BTB.Prob; 3009 for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) { 3010 UnhandledProb -= BTB.Cases[j].ExtraProb; 3011 // Set the current basic block to the mbb we wish to insert the code into 3012 MachineBasicBlock *MBB = BTB.Cases[j].ThisBB; 3013 // If all cases cover a contiguous range, it is not necessary to jump to 3014 // the default block after the last bit test fails. This is because the 3015 // range check during bit test header creation has guaranteed that every 3016 // case here doesn't go outside the range. In this case, there is no need 3017 // to perform the last bit test, as it will always be true. Instead, make 3018 // the second-to-last bit-test fall through to the target of the last bit 3019 // test, and delete the last bit test. 3020 3021 MachineBasicBlock *NextMBB; 3022 if (BTB.ContiguousRange && j + 2 == ej) { 3023 // Second-to-last bit-test with contiguous range: fall through to the 3024 // target of the final bit test. 3025 NextMBB = BTB.Cases[j + 1].TargetBB; 3026 } else if (j + 1 == ej) { 3027 // For the last bit test, fall through to Default. 3028 NextMBB = BTB.Default; 3029 } else { 3030 // Otherwise, fall through to the next bit test. 3031 NextMBB = BTB.Cases[j + 1].ThisBB; 3032 } 3033 3034 emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j], MBB); 3035 3036 if (BTB.ContiguousRange && j + 2 == ej) { 3037 // We need to record the replacement phi edge here that normally 3038 // happens in emitBitTestCase before we delete the case, otherwise the 3039 // phi edge will be lost. 3040 addMachineCFGPred({BTB.Parent->getBasicBlock(), 3041 BTB.Cases[ej - 1].TargetBB->getBasicBlock()}, 3042 MBB); 3043 // Since we're not going to use the final bit test, remove it. 3044 BTB.Cases.pop_back(); 3045 break; 3046 } 3047 } 3048 // This is "default" BB. We have two jumps to it. From "header" BB and from 3049 // last "case" BB, unless the latter was skipped. 3050 CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(), 3051 BTB.Default->getBasicBlock()}; 3052 addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent); 3053 if (!BTB.ContiguousRange) { 3054 addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB); 3055 } 3056 } 3057 SL->BitTestCases.clear(); 3058 3059 for (auto &JTCase : SL->JTCases) { 3060 // Emit header first, if it wasn't already emitted. 3061 if (!JTCase.first.Emitted) 3062 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB); 3063 3064 emitJumpTable(JTCase.second, JTCase.second.MBB); 3065 } 3066 SL->JTCases.clear(); 3067 3068 for (auto &SwCase : SL->SwitchCases) 3069 emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder); 3070 SL->SwitchCases.clear(); 3071 } 3072 3073 void IRTranslator::finalizeFunction() { 3074 // Release the memory used by the different maps we 3075 // needed during the translation. 3076 PendingPHIs.clear(); 3077 VMap.reset(); 3078 FrameIndices.clear(); 3079 MachinePreds.clear(); 3080 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it 3081 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid 3082 // destroying it twice (in ~IRTranslator() and ~LLVMContext()) 3083 EntryBuilder.reset(); 3084 CurBuilder.reset(); 3085 FuncInfo.clear(); 3086 } 3087 3088 /// Returns true if a BasicBlock \p BB within a variadic function contains a 3089 /// variadic musttail call. 3090 static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) { 3091 if (!IsVarArg) 3092 return false; 3093 3094 // Walk the block backwards, because tail calls usually only appear at the end 3095 // of a block. 3096 return std::any_of(BB.rbegin(), BB.rend(), [](const Instruction &I) { 3097 const auto *CI = dyn_cast<CallInst>(&I); 3098 return CI && CI->isMustTailCall(); 3099 }); 3100 } 3101 3102 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { 3103 MF = &CurMF; 3104 const Function &F = MF->getFunction(); 3105 GISelCSEAnalysisWrapper &Wrapper = 3106 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper(); 3107 // Set the CSEConfig and run the analysis. 3108 GISelCSEInfo *CSEInfo = nullptr; 3109 TPC = &getAnalysis<TargetPassConfig>(); 3110 bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences() 3111 ? EnableCSEInIRTranslator 3112 : TPC->isGISelCSEEnabled(); 3113 3114 if (EnableCSE) { 3115 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF); 3116 CSEInfo = &Wrapper.get(TPC->getCSEConfig()); 3117 EntryBuilder->setCSEInfo(CSEInfo); 3118 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF); 3119 CurBuilder->setCSEInfo(CSEInfo); 3120 } else { 3121 EntryBuilder = std::make_unique<MachineIRBuilder>(); 3122 CurBuilder = std::make_unique<MachineIRBuilder>(); 3123 } 3124 CLI = MF->getSubtarget().getCallLowering(); 3125 CurBuilder->setMF(*MF); 3126 EntryBuilder->setMF(*MF); 3127 MRI = &MF->getRegInfo(); 3128 DL = &F.getParent()->getDataLayout(); 3129 ORE = std::make_unique<OptimizationRemarkEmitter>(&F); 3130 const TargetMachine &TM = MF->getTarget(); 3131 TM.resetTargetOptions(F); 3132 EnableOpts = OptLevel != CodeGenOpt::None && !skipFunction(F); 3133 FuncInfo.MF = MF; 3134 if (EnableOpts) 3135 FuncInfo.BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI(); 3136 else 3137 FuncInfo.BPI = nullptr; 3138 3139 FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF); 3140 3141 const auto &TLI = *MF->getSubtarget().getTargetLowering(); 3142 3143 SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo); 3144 SL->init(TLI, TM, *DL); 3145 3146 3147 3148 assert(PendingPHIs.empty() && "stale PHIs"); 3149 3150 // Targets which want to use big endian can enable it using 3151 // enableBigEndian() 3152 if (!DL->isLittleEndian() && !CLI->enableBigEndian()) { 3153 // Currently we don't properly handle big endian code. 3154 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 3155 F.getSubprogram(), &F.getEntryBlock()); 3156 R << "unable to translate in big endian mode"; 3157 reportTranslationError(*MF, *TPC, *ORE, R); 3158 } 3159 3160 // Release the per-function state when we return, whether we succeeded or not. 3161 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); }); 3162 3163 // Setup a separate basic-block for the arguments and constants 3164 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock(); 3165 MF->push_back(EntryBB); 3166 EntryBuilder->setMBB(*EntryBB); 3167 3168 DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc(); 3169 SwiftError.setFunction(CurMF); 3170 SwiftError.createEntriesInEntryBlock(DbgLoc); 3171 3172 bool IsVarArg = F.isVarArg(); 3173 bool HasMustTailInVarArgFn = false; 3174 3175 // Create all blocks, in IR order, to preserve the layout. 3176 for (const BasicBlock &BB: F) { 3177 auto *&MBB = BBToMBB[&BB]; 3178 3179 MBB = MF->CreateMachineBasicBlock(&BB); 3180 MF->push_back(MBB); 3181 3182 if (BB.hasAddressTaken()) 3183 MBB->setHasAddressTaken(); 3184 3185 if (!HasMustTailInVarArgFn) 3186 HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB); 3187 } 3188 3189 MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn); 3190 3191 // Make our arguments/constants entry block fallthrough to the IR entry block. 3192 EntryBB->addSuccessor(&getMBB(F.front())); 3193 3194 if (CLI->fallBackToDAGISel(*MF)) { 3195 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 3196 F.getSubprogram(), &F.getEntryBlock()); 3197 R << "unable to lower function: " << ore::NV("Prototype", F.getType()); 3198 reportTranslationError(*MF, *TPC, *ORE, R); 3199 return false; 3200 } 3201 3202 // Lower the actual args into this basic block. 3203 SmallVector<ArrayRef<Register>, 8> VRegArgs; 3204 for (const Argument &Arg: F.args()) { 3205 if (DL->getTypeStoreSize(Arg.getType()).isZero()) 3206 continue; // Don't handle zero sized types. 3207 ArrayRef<Register> VRegs = getOrCreateVRegs(Arg); 3208 VRegArgs.push_back(VRegs); 3209 3210 if (Arg.hasSwiftErrorAttr()) { 3211 assert(VRegs.size() == 1 && "Too many vregs for Swift error"); 3212 SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]); 3213 } 3214 } 3215 3216 if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs, FuncInfo)) { 3217 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 3218 F.getSubprogram(), &F.getEntryBlock()); 3219 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType()); 3220 reportTranslationError(*MF, *TPC, *ORE, R); 3221 return false; 3222 } 3223 3224 // Need to visit defs before uses when translating instructions. 3225 GISelObserverWrapper WrapperObserver; 3226 if (EnableCSE && CSEInfo) 3227 WrapperObserver.addObserver(CSEInfo); 3228 { 3229 ReversePostOrderTraversal<const Function *> RPOT(&F); 3230 #ifndef NDEBUG 3231 DILocationVerifier Verifier; 3232 WrapperObserver.addObserver(&Verifier); 3233 #endif // ifndef NDEBUG 3234 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver); 3235 RAIIMFObserverInstaller ObsInstall(*MF, WrapperObserver); 3236 for (const BasicBlock *BB : RPOT) { 3237 MachineBasicBlock &MBB = getMBB(*BB); 3238 // Set the insertion point of all the following translations to 3239 // the end of this basic block. 3240 CurBuilder->setMBB(MBB); 3241 HasTailCall = false; 3242 for (const Instruction &Inst : *BB) { 3243 // If we translated a tail call in the last step, then we know 3244 // everything after the call is either a return, or something that is 3245 // handled by the call itself. (E.g. a lifetime marker or assume 3246 // intrinsic.) In this case, we should stop translating the block and 3247 // move on. 3248 if (HasTailCall) 3249 break; 3250 #ifndef NDEBUG 3251 Verifier.setCurrentInst(&Inst); 3252 #endif // ifndef NDEBUG 3253 if (translate(Inst)) 3254 continue; 3255 3256 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 3257 Inst.getDebugLoc(), BB); 3258 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst); 3259 3260 if (ORE->allowExtraAnalysis("gisel-irtranslator")) { 3261 std::string InstStrStorage; 3262 raw_string_ostream InstStr(InstStrStorage); 3263 InstStr << Inst; 3264 3265 R << ": '" << InstStr.str() << "'"; 3266 } 3267 3268 reportTranslationError(*MF, *TPC, *ORE, R); 3269 return false; 3270 } 3271 3272 finalizeBasicBlock(); 3273 } 3274 #ifndef NDEBUG 3275 WrapperObserver.removeObserver(&Verifier); 3276 #endif 3277 } 3278 3279 finishPendingPhis(); 3280 3281 SwiftError.propagateVRegs(); 3282 3283 // Merge the argument lowering and constants block with its single 3284 // successor, the LLVM-IR entry block. We want the basic block to 3285 // be maximal. 3286 assert(EntryBB->succ_size() == 1 && 3287 "Custom BB used for lowering should have only one successor"); 3288 // Get the successor of the current entry block. 3289 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin(); 3290 assert(NewEntryBB.pred_size() == 1 && 3291 "LLVM-IR entry block has a predecessor!?"); 3292 // Move all the instruction from the current entry block to the 3293 // new entry block. 3294 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(), 3295 EntryBB->end()); 3296 3297 // Update the live-in information for the new entry block. 3298 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins()) 3299 NewEntryBB.addLiveIn(LiveIn); 3300 NewEntryBB.sortUniqueLiveIns(); 3301 3302 // Get rid of the now empty basic block. 3303 EntryBB->removeSuccessor(&NewEntryBB); 3304 MF->remove(EntryBB); 3305 MF->DeleteMachineBasicBlock(EntryBB); 3306 3307 assert(&MF->front() == &NewEntryBB && 3308 "New entry wasn't next in the list of basic block!"); 3309 3310 // Initialize stack protector information. 3311 StackProtector &SP = getAnalysis<StackProtector>(); 3312 SP.copyToMachineFrameInfo(MF->getFrameInfo()); 3313 3314 return false; 3315 } 3316