1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the IRTranslator class. 10 //===----------------------------------------------------------------------===// 11 12 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 13 #include "llvm/ADT/PostOrderIterator.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/ADT/ScopeExit.h" 16 #include "llvm/ADT/SmallSet.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/Analysis/BranchProbabilityInfo.h" 19 #include "llvm/Analysis/Loads.h" 20 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 21 #include "llvm/Analysis/ValueTracking.h" 22 #include "llvm/CodeGen/Analysis.h" 23 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 24 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" 25 #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h" 26 #include "llvm/CodeGen/LowLevelType.h" 27 #include "llvm/CodeGen/MachineBasicBlock.h" 28 #include "llvm/CodeGen/MachineFrameInfo.h" 29 #include "llvm/CodeGen/MachineFunction.h" 30 #include "llvm/CodeGen/MachineInstrBuilder.h" 31 #include "llvm/CodeGen/MachineMemOperand.h" 32 #include "llvm/CodeGen/MachineModuleInfo.h" 33 #include "llvm/CodeGen/MachineOperand.h" 34 #include "llvm/CodeGen/MachineRegisterInfo.h" 35 #include "llvm/CodeGen/StackProtector.h" 36 #include "llvm/CodeGen/SwitchLoweringUtils.h" 37 #include "llvm/CodeGen/TargetFrameLowering.h" 38 #include "llvm/CodeGen/TargetInstrInfo.h" 39 #include "llvm/CodeGen/TargetLowering.h" 40 #include "llvm/CodeGen/TargetPassConfig.h" 41 #include "llvm/CodeGen/TargetRegisterInfo.h" 42 #include "llvm/CodeGen/TargetSubtargetInfo.h" 43 #include "llvm/IR/BasicBlock.h" 44 #include "llvm/IR/CFG.h" 45 #include "llvm/IR/Constant.h" 46 #include "llvm/IR/Constants.h" 47 #include "llvm/IR/DataLayout.h" 48 #include "llvm/IR/DebugInfo.h" 49 #include "llvm/IR/DerivedTypes.h" 50 #include "llvm/IR/Function.h" 51 #include "llvm/IR/GetElementPtrTypeIterator.h" 52 #include "llvm/IR/InlineAsm.h" 53 #include "llvm/IR/InstrTypes.h" 54 #include "llvm/IR/Instructions.h" 55 #include "llvm/IR/IntrinsicInst.h" 56 #include "llvm/IR/Intrinsics.h" 57 #include "llvm/IR/LLVMContext.h" 58 #include "llvm/IR/Metadata.h" 59 #include "llvm/IR/PatternMatch.h" 60 #include "llvm/IR/Type.h" 61 #include "llvm/IR/User.h" 62 #include "llvm/IR/Value.h" 63 #include "llvm/InitializePasses.h" 64 #include "llvm/MC/MCContext.h" 65 #include "llvm/Pass.h" 66 #include "llvm/Support/Casting.h" 67 #include "llvm/Support/CodeGen.h" 68 #include "llvm/Support/Debug.h" 69 #include "llvm/Support/ErrorHandling.h" 70 #include "llvm/Support/LowLevelTypeImpl.h" 71 #include "llvm/Support/MathExtras.h" 72 #include "llvm/Support/raw_ostream.h" 73 #include "llvm/Target/TargetIntrinsicInfo.h" 74 #include "llvm/Target/TargetMachine.h" 75 #include <algorithm> 76 #include <cassert> 77 #include <cstddef> 78 #include <cstdint> 79 #include <iterator> 80 #include <string> 81 #include <utility> 82 #include <vector> 83 84 #define DEBUG_TYPE "irtranslator" 85 86 using namespace llvm; 87 88 static cl::opt<bool> 89 EnableCSEInIRTranslator("enable-cse-in-irtranslator", 90 cl::desc("Should enable CSE in irtranslator"), 91 cl::Optional, cl::init(false)); 92 char IRTranslator::ID = 0; 93 94 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 95 false, false) 96 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) 97 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass) 98 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 99 INITIALIZE_PASS_DEPENDENCY(StackProtector) 100 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 101 false, false) 102 103 static void reportTranslationError(MachineFunction &MF, 104 const TargetPassConfig &TPC, 105 OptimizationRemarkEmitter &ORE, 106 OptimizationRemarkMissed &R) { 107 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel); 108 109 // Print the function name explicitly if we don't have a debug location (which 110 // makes the diagnostic less useful) or if we're going to emit a raw error. 111 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled()) 112 R << (" (in function: " + MF.getName() + ")").str(); 113 114 if (TPC.isGlobalISelAbortEnabled()) 115 report_fatal_error(R.getMsg()); 116 else 117 ORE.emit(R); 118 } 119 120 IRTranslator::IRTranslator(CodeGenOpt::Level optlevel) 121 : MachineFunctionPass(ID), OptLevel(optlevel) {} 122 123 #ifndef NDEBUG 124 namespace { 125 /// Verify that every instruction created has the same DILocation as the 126 /// instruction being translated. 127 class DILocationVerifier : public GISelChangeObserver { 128 const Instruction *CurrInst = nullptr; 129 130 public: 131 DILocationVerifier() = default; 132 ~DILocationVerifier() = default; 133 134 const Instruction *getCurrentInst() const { return CurrInst; } 135 void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; } 136 137 void erasingInstr(MachineInstr &MI) override {} 138 void changingInstr(MachineInstr &MI) override {} 139 void changedInstr(MachineInstr &MI) override {} 140 141 void createdInstr(MachineInstr &MI) override { 142 assert(getCurrentInst() && "Inserted instruction without a current MI"); 143 144 // Only print the check message if we're actually checking it. 145 #ifndef NDEBUG 146 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst 147 << " was copied to " << MI); 148 #endif 149 // We allow insts in the entry block to have a debug loc line of 0 because 150 // they could have originated from constants, and we don't want a jumpy 151 // debug experience. 152 assert((CurrInst->getDebugLoc() == MI.getDebugLoc() || 153 MI.getDebugLoc().getLine() == 0) && 154 "Line info was not transferred to all instructions"); 155 } 156 }; 157 } // namespace 158 #endif // ifndef NDEBUG 159 160 161 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const { 162 AU.addRequired<StackProtector>(); 163 AU.addRequired<TargetPassConfig>(); 164 AU.addRequired<GISelCSEAnalysisWrapperPass>(); 165 if (OptLevel != CodeGenOpt::None) 166 AU.addRequired<BranchProbabilityInfoWrapperPass>(); 167 getSelectionDAGFallbackAnalysisUsage(AU); 168 MachineFunctionPass::getAnalysisUsage(AU); 169 } 170 171 IRTranslator::ValueToVRegInfo::VRegListT & 172 IRTranslator::allocateVRegs(const Value &Val) { 173 assert(!VMap.contains(Val) && "Value already allocated in VMap"); 174 auto *Regs = VMap.getVRegs(Val); 175 auto *Offsets = VMap.getOffsets(Val); 176 SmallVector<LLT, 4> SplitTys; 177 computeValueLLTs(*DL, *Val.getType(), SplitTys, 178 Offsets->empty() ? Offsets : nullptr); 179 for (unsigned i = 0; i < SplitTys.size(); ++i) 180 Regs->push_back(0); 181 return *Regs; 182 } 183 184 ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) { 185 auto VRegsIt = VMap.findVRegs(Val); 186 if (VRegsIt != VMap.vregs_end()) 187 return *VRegsIt->second; 188 189 if (Val.getType()->isVoidTy()) 190 return *VMap.getVRegs(Val); 191 192 // Create entry for this type. 193 auto *VRegs = VMap.getVRegs(Val); 194 auto *Offsets = VMap.getOffsets(Val); 195 196 assert(Val.getType()->isSized() && 197 "Don't know how to create an empty vreg"); 198 199 SmallVector<LLT, 4> SplitTys; 200 computeValueLLTs(*DL, *Val.getType(), SplitTys, 201 Offsets->empty() ? Offsets : nullptr); 202 203 if (!isa<Constant>(Val)) { 204 for (auto Ty : SplitTys) 205 VRegs->push_back(MRI->createGenericVirtualRegister(Ty)); 206 return *VRegs; 207 } 208 209 if (Val.getType()->isAggregateType()) { 210 // UndefValue, ConstantAggregateZero 211 auto &C = cast<Constant>(Val); 212 unsigned Idx = 0; 213 while (auto Elt = C.getAggregateElement(Idx++)) { 214 auto EltRegs = getOrCreateVRegs(*Elt); 215 llvm::copy(EltRegs, std::back_inserter(*VRegs)); 216 } 217 } else { 218 assert(SplitTys.size() == 1 && "unexpectedly split LLT"); 219 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0])); 220 bool Success = translate(cast<Constant>(Val), VRegs->front()); 221 if (!Success) { 222 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 223 MF->getFunction().getSubprogram(), 224 &MF->getFunction().getEntryBlock()); 225 R << "unable to translate constant: " << ore::NV("Type", Val.getType()); 226 reportTranslationError(*MF, *TPC, *ORE, R); 227 return *VRegs; 228 } 229 } 230 231 return *VRegs; 232 } 233 234 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) { 235 auto MapEntry = FrameIndices.find(&AI); 236 if (MapEntry != FrameIndices.end()) 237 return MapEntry->second; 238 239 uint64_t ElementSize = DL->getTypeAllocSize(AI.getAllocatedType()); 240 uint64_t Size = 241 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue(); 242 243 // Always allocate at least one byte. 244 Size = std::max<uint64_t>(Size, 1u); 245 246 int &FI = FrameIndices[&AI]; 247 FI = MF->getFrameInfo().CreateStackObject(Size, AI.getAlign(), false, &AI); 248 return FI; 249 } 250 251 Align IRTranslator::getMemOpAlign(const Instruction &I) { 252 if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) 253 return SI->getAlign(); 254 if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) { 255 return LI->getAlign(); 256 } 257 if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) { 258 // TODO(PR27168): This instruction has no alignment attribute, but unlike 259 // the default alignment for load/store, the default here is to assume 260 // it has NATURAL alignment, not DataLayout-specified alignment. 261 const DataLayout &DL = AI->getModule()->getDataLayout(); 262 return Align(DL.getTypeStoreSize(AI->getCompareOperand()->getType())); 263 } 264 if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) { 265 // TODO(PR27168): This instruction has no alignment attribute, but unlike 266 // the default alignment for load/store, the default here is to assume 267 // it has NATURAL alignment, not DataLayout-specified alignment. 268 const DataLayout &DL = AI->getModule()->getDataLayout(); 269 return Align(DL.getTypeStoreSize(AI->getValOperand()->getType())); 270 } 271 OptimizationRemarkMissed R("gisel-irtranslator", "", &I); 272 R << "unable to translate memop: " << ore::NV("Opcode", &I); 273 reportTranslationError(*MF, *TPC, *ORE, R); 274 return Align(1); 275 } 276 277 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) { 278 MachineBasicBlock *&MBB = BBToMBB[&BB]; 279 assert(MBB && "BasicBlock was not encountered before"); 280 return *MBB; 281 } 282 283 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) { 284 assert(NewPred && "new predecessor must be a real MachineBasicBlock"); 285 MachinePreds[Edge].push_back(NewPred); 286 } 287 288 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U, 289 MachineIRBuilder &MIRBuilder) { 290 // Get or create a virtual register for each value. 291 // Unless the value is a Constant => loadimm cst? 292 // or inline constant each time? 293 // Creation of a virtual register needs to have a size. 294 Register Op0 = getOrCreateVReg(*U.getOperand(0)); 295 Register Op1 = getOrCreateVReg(*U.getOperand(1)); 296 Register Res = getOrCreateVReg(U); 297 uint16_t Flags = 0; 298 if (isa<Instruction>(U)) { 299 const Instruction &I = cast<Instruction>(U); 300 Flags = MachineInstr::copyFlagsFromInstruction(I); 301 } 302 303 MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags); 304 return true; 305 } 306 307 bool IRTranslator::translateUnaryOp(unsigned Opcode, const User &U, 308 MachineIRBuilder &MIRBuilder) { 309 Register Op0 = getOrCreateVReg(*U.getOperand(0)); 310 Register Res = getOrCreateVReg(U); 311 uint16_t Flags = 0; 312 if (isa<Instruction>(U)) { 313 const Instruction &I = cast<Instruction>(U); 314 Flags = MachineInstr::copyFlagsFromInstruction(I); 315 } 316 MIRBuilder.buildInstr(Opcode, {Res}, {Op0}, Flags); 317 return true; 318 } 319 320 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) { 321 return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder); 322 } 323 324 bool IRTranslator::translateCompare(const User &U, 325 MachineIRBuilder &MIRBuilder) { 326 auto *CI = dyn_cast<CmpInst>(&U); 327 Register Op0 = getOrCreateVReg(*U.getOperand(0)); 328 Register Op1 = getOrCreateVReg(*U.getOperand(1)); 329 Register Res = getOrCreateVReg(U); 330 CmpInst::Predicate Pred = 331 CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>( 332 cast<ConstantExpr>(U).getPredicate()); 333 if (CmpInst::isIntPredicate(Pred)) 334 MIRBuilder.buildICmp(Pred, Res, Op0, Op1); 335 else if (Pred == CmpInst::FCMP_FALSE) 336 MIRBuilder.buildCopy( 337 Res, getOrCreateVReg(*Constant::getNullValue(U.getType()))); 338 else if (Pred == CmpInst::FCMP_TRUE) 339 MIRBuilder.buildCopy( 340 Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType()))); 341 else { 342 assert(CI && "Instruction should be CmpInst"); 343 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1, 344 MachineInstr::copyFlagsFromInstruction(*CI)); 345 } 346 347 return true; 348 } 349 350 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) { 351 const ReturnInst &RI = cast<ReturnInst>(U); 352 const Value *Ret = RI.getReturnValue(); 353 if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0) 354 Ret = nullptr; 355 356 ArrayRef<Register> VRegs; 357 if (Ret) 358 VRegs = getOrCreateVRegs(*Ret); 359 360 Register SwiftErrorVReg = 0; 361 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) { 362 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt( 363 &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg()); 364 } 365 366 // The target may mess up with the insertion point, but 367 // this is not important as a return is the last instruction 368 // of the block anyway. 369 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, SwiftErrorVReg); 370 } 371 372 void IRTranslator::emitBranchForMergedCondition( 373 const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, 374 MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, 375 BranchProbability TProb, BranchProbability FProb, bool InvertCond) { 376 // If the leaf of the tree is a comparison, merge the condition into 377 // the caseblock. 378 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) { 379 CmpInst::Predicate Condition; 380 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) { 381 Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate(); 382 } else { 383 const FCmpInst *FC = cast<FCmpInst>(Cond); 384 Condition = InvertCond ? FC->getInversePredicate() : FC->getPredicate(); 385 } 386 387 SwitchCG::CaseBlock CB(Condition, false, BOp->getOperand(0), 388 BOp->getOperand(1), nullptr, TBB, FBB, CurBB, 389 CurBuilder->getDebugLoc(), TProb, FProb); 390 SL->SwitchCases.push_back(CB); 391 return; 392 } 393 394 // Create a CaseBlock record representing this branch. 395 CmpInst::Predicate Pred = InvertCond ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ; 396 SwitchCG::CaseBlock CB( 397 Pred, false, Cond, ConstantInt::getTrue(MF->getFunction().getContext()), 398 nullptr, TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb); 399 SL->SwitchCases.push_back(CB); 400 } 401 402 static bool isValInBlock(const Value *V, const BasicBlock *BB) { 403 if (const Instruction *I = dyn_cast<Instruction>(V)) 404 return I->getParent() == BB; 405 return true; 406 } 407 408 void IRTranslator::findMergedConditions( 409 const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, 410 MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, 411 Instruction::BinaryOps Opc, BranchProbability TProb, 412 BranchProbability FProb, bool InvertCond) { 413 using namespace PatternMatch; 414 assert((Opc == Instruction::And || Opc == Instruction::Or) && 415 "Expected Opc to be AND/OR"); 416 // Skip over not part of the tree and remember to invert op and operands at 417 // next level. 418 Value *NotCond; 419 if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) && 420 isValInBlock(NotCond, CurBB->getBasicBlock())) { 421 findMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb, 422 !InvertCond); 423 return; 424 } 425 426 const Instruction *BOp = dyn_cast<Instruction>(Cond); 427 // Compute the effective opcode for Cond, taking into account whether it needs 428 // to be inverted, e.g. 429 // and (not (or A, B)), C 430 // gets lowered as 431 // and (and (not A, not B), C) 432 unsigned BOpc = 0; 433 if (BOp) { 434 BOpc = BOp->getOpcode(); 435 if (InvertCond) { 436 if (BOpc == Instruction::And) 437 BOpc = Instruction::Or; 438 else if (BOpc == Instruction::Or) 439 BOpc = Instruction::And; 440 } 441 } 442 443 // If this node is not part of the or/and tree, emit it as a branch. 444 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) || 445 BOpc != static_cast<unsigned>(Opc) || !BOp->hasOneUse() || 446 BOp->getParent() != CurBB->getBasicBlock() || 447 !isValInBlock(BOp->getOperand(0), CurBB->getBasicBlock()) || 448 !isValInBlock(BOp->getOperand(1), CurBB->getBasicBlock())) { 449 emitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, TProb, FProb, 450 InvertCond); 451 return; 452 } 453 454 // Create TmpBB after CurBB. 455 MachineFunction::iterator BBI(CurBB); 456 MachineBasicBlock *TmpBB = 457 MF->CreateMachineBasicBlock(CurBB->getBasicBlock()); 458 CurBB->getParent()->insert(++BBI, TmpBB); 459 460 if (Opc == Instruction::Or) { 461 // Codegen X | Y as: 462 // BB1: 463 // jmp_if_X TBB 464 // jmp TmpBB 465 // TmpBB: 466 // jmp_if_Y TBB 467 // jmp FBB 468 // 469 470 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 471 // The requirement is that 472 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) 473 // = TrueProb for original BB. 474 // Assuming the original probabilities are A and B, one choice is to set 475 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to 476 // A/(1+B) and 2B/(1+B). This choice assumes that 477 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. 478 // Another choice is to assume TrueProb for BB1 equals to TrueProb for 479 // TmpBB, but the math is more complicated. 480 481 auto NewTrueProb = TProb / 2; 482 auto NewFalseProb = TProb / 2 + FProb; 483 // Emit the LHS condition. 484 findMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc, 485 NewTrueProb, NewFalseProb, InvertCond); 486 487 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B). 488 SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb}; 489 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end()); 490 // Emit the RHS condition into TmpBB. 491 findMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc, 492 Probs[0], Probs[1], InvertCond); 493 } else { 494 assert(Opc == Instruction::And && "Unknown merge op!"); 495 // Codegen X & Y as: 496 // BB1: 497 // jmp_if_X TmpBB 498 // jmp FBB 499 // TmpBB: 500 // jmp_if_Y TBB 501 // jmp FBB 502 // 503 // This requires creation of TmpBB after CurBB. 504 505 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 506 // The requirement is that 507 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) 508 // = FalseProb for original BB. 509 // Assuming the original probabilities are A and B, one choice is to set 510 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to 511 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 == 512 // TrueProb for BB1 * FalseProb for TmpBB. 513 514 auto NewTrueProb = TProb + FProb / 2; 515 auto NewFalseProb = FProb / 2; 516 // Emit the LHS condition. 517 findMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc, 518 NewTrueProb, NewFalseProb, InvertCond); 519 520 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A). 521 SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2}; 522 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end()); 523 // Emit the RHS condition into TmpBB. 524 findMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc, 525 Probs[0], Probs[1], InvertCond); 526 } 527 } 528 529 bool IRTranslator::shouldEmitAsBranches( 530 const std::vector<SwitchCG::CaseBlock> &Cases) { 531 // For multiple cases, it's better to emit as branches. 532 if (Cases.size() != 2) 533 return true; 534 535 // If this is two comparisons of the same values or'd or and'd together, they 536 // will get folded into a single comparison, so don't emit two blocks. 537 if ((Cases[0].CmpLHS == Cases[1].CmpLHS && 538 Cases[0].CmpRHS == Cases[1].CmpRHS) || 539 (Cases[0].CmpRHS == Cases[1].CmpLHS && 540 Cases[0].CmpLHS == Cases[1].CmpRHS)) { 541 return false; 542 } 543 544 // Handle: (X != null) | (Y != null) --> (X|Y) != 0 545 // Handle: (X == null) & (Y == null) --> (X|Y) == 0 546 if (Cases[0].CmpRHS == Cases[1].CmpRHS && 547 Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred && 548 isa<Constant>(Cases[0].CmpRHS) && 549 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) { 550 if (Cases[0].PredInfo.Pred == CmpInst::ICMP_EQ && 551 Cases[0].TrueBB == Cases[1].ThisBB) 552 return false; 553 if (Cases[0].PredInfo.Pred == CmpInst::ICMP_NE && 554 Cases[0].FalseBB == Cases[1].ThisBB) 555 return false; 556 } 557 558 return true; 559 } 560 561 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) { 562 const BranchInst &BrInst = cast<BranchInst>(U); 563 auto &CurMBB = MIRBuilder.getMBB(); 564 auto *Succ0MBB = &getMBB(*BrInst.getSuccessor(0)); 565 566 if (BrInst.isUnconditional()) { 567 // If the unconditional target is the layout successor, fallthrough. 568 if (!CurMBB.isLayoutSuccessor(Succ0MBB)) 569 MIRBuilder.buildBr(*Succ0MBB); 570 571 // Link successors. 572 for (const BasicBlock *Succ : successors(&BrInst)) 573 CurMBB.addSuccessor(&getMBB(*Succ)); 574 return true; 575 } 576 577 // If this condition is one of the special cases we handle, do special stuff 578 // now. 579 const Value *CondVal = BrInst.getCondition(); 580 MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.getSuccessor(1)); 581 582 const auto &TLI = *MF->getSubtarget().getTargetLowering(); 583 584 // If this is a series of conditions that are or'd or and'd together, emit 585 // this as a sequence of branches instead of setcc's with and/or operations. 586 // As long as jumps are not expensive (exceptions for multi-use logic ops, 587 // unpredictable branches, and vector extracts because those jumps are likely 588 // expensive for any target), this should improve performance. 589 // For example, instead of something like: 590 // cmp A, B 591 // C = seteq 592 // cmp D, E 593 // F = setle 594 // or C, F 595 // jnz foo 596 // Emit: 597 // cmp A, B 598 // je foo 599 // cmp D, E 600 // jle foo 601 using namespace PatternMatch; 602 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) { 603 Instruction::BinaryOps Opcode = BOp->getOpcode(); 604 Value *Vec, *BOp0 = BOp->getOperand(0), *BOp1 = BOp->getOperand(1); 605 if (!TLI.isJumpExpensive() && BOp->hasOneUse() && 606 !BrInst.hasMetadata(LLVMContext::MD_unpredictable) && 607 (Opcode == Instruction::And || Opcode == Instruction::Or) && 608 !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) && 609 match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) { 610 findMergedConditions(BOp, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode, 611 getEdgeProbability(&CurMBB, Succ0MBB), 612 getEdgeProbability(&CurMBB, Succ1MBB), 613 /*InvertCond=*/false); 614 assert(SL->SwitchCases[0].ThisBB == &CurMBB && "Unexpected lowering!"); 615 616 // Allow some cases to be rejected. 617 if (shouldEmitAsBranches(SL->SwitchCases)) { 618 // Emit the branch for this block. 619 emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder); 620 SL->SwitchCases.erase(SL->SwitchCases.begin()); 621 return true; 622 } 623 624 // Okay, we decided not to do this, remove any inserted MBB's and clear 625 // SwitchCases. 626 for (unsigned I = 1, E = SL->SwitchCases.size(); I != E; ++I) 627 MF->erase(SL->SwitchCases[I].ThisBB); 628 629 SL->SwitchCases.clear(); 630 } 631 } 632 633 // Create a CaseBlock record representing this branch. 634 SwitchCG::CaseBlock CB(CmpInst::ICMP_EQ, false, CondVal, 635 ConstantInt::getTrue(MF->getFunction().getContext()), 636 nullptr, Succ0MBB, Succ1MBB, &CurMBB, 637 CurBuilder->getDebugLoc()); 638 639 // Use emitSwitchCase to actually insert the fast branch sequence for this 640 // cond branch. 641 emitSwitchCase(CB, &CurMBB, *CurBuilder); 642 return true; 643 } 644 645 void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src, 646 MachineBasicBlock *Dst, 647 BranchProbability Prob) { 648 if (!FuncInfo.BPI) { 649 Src->addSuccessorWithoutProb(Dst); 650 return; 651 } 652 if (Prob.isUnknown()) 653 Prob = getEdgeProbability(Src, Dst); 654 Src->addSuccessor(Dst, Prob); 655 } 656 657 BranchProbability 658 IRTranslator::getEdgeProbability(const MachineBasicBlock *Src, 659 const MachineBasicBlock *Dst) const { 660 const BasicBlock *SrcBB = Src->getBasicBlock(); 661 const BasicBlock *DstBB = Dst->getBasicBlock(); 662 if (!FuncInfo.BPI) { 663 // If BPI is not available, set the default probability as 1 / N, where N is 664 // the number of successors. 665 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1); 666 return BranchProbability(1, SuccSize); 667 } 668 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB); 669 } 670 671 bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) { 672 using namespace SwitchCG; 673 // Extract cases from the switch. 674 const SwitchInst &SI = cast<SwitchInst>(U); 675 BranchProbabilityInfo *BPI = FuncInfo.BPI; 676 CaseClusterVector Clusters; 677 Clusters.reserve(SI.getNumCases()); 678 for (auto &I : SI.cases()) { 679 MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor()); 680 assert(Succ && "Could not find successor mbb in mapping"); 681 const ConstantInt *CaseVal = I.getCaseValue(); 682 BranchProbability Prob = 683 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex()) 684 : BranchProbability(1, SI.getNumCases() + 1); 685 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob)); 686 } 687 688 MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest()); 689 690 // Cluster adjacent cases with the same destination. We do this at all 691 // optimization levels because it's cheap to do and will make codegen faster 692 // if there are many clusters. 693 sortAndRangeify(Clusters); 694 695 MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent()); 696 697 // If there is only the default destination, jump there directly. 698 if (Clusters.empty()) { 699 SwitchMBB->addSuccessor(DefaultMBB); 700 if (DefaultMBB != SwitchMBB->getNextNode()) 701 MIB.buildBr(*DefaultMBB); 702 return true; 703 } 704 705 SL->findJumpTables(Clusters, &SI, DefaultMBB, nullptr, nullptr); 706 SL->findBitTestClusters(Clusters, &SI); 707 708 LLVM_DEBUG({ 709 dbgs() << "Case clusters: "; 710 for (const CaseCluster &C : Clusters) { 711 if (C.Kind == CC_JumpTable) 712 dbgs() << "JT:"; 713 if (C.Kind == CC_BitTests) 714 dbgs() << "BT:"; 715 716 C.Low->getValue().print(dbgs(), true); 717 if (C.Low != C.High) { 718 dbgs() << '-'; 719 C.High->getValue().print(dbgs(), true); 720 } 721 dbgs() << ' '; 722 } 723 dbgs() << '\n'; 724 }); 725 726 assert(!Clusters.empty()); 727 SwitchWorkList WorkList; 728 CaseClusterIt First = Clusters.begin(); 729 CaseClusterIt Last = Clusters.end() - 1; 730 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB); 731 WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb}); 732 733 // FIXME: At the moment we don't do any splitting optimizations here like 734 // SelectionDAG does, so this worklist only has one entry. 735 while (!WorkList.empty()) { 736 SwitchWorkListItem W = WorkList.back(); 737 WorkList.pop_back(); 738 if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB)) 739 return false; 740 } 741 return true; 742 } 743 744 void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT, 745 MachineBasicBlock *MBB) { 746 // Emit the code for the jump table 747 assert(JT.Reg != -1U && "Should lower JT Header first!"); 748 MachineIRBuilder MIB(*MBB->getParent()); 749 MIB.setMBB(*MBB); 750 MIB.setDebugLoc(CurBuilder->getDebugLoc()); 751 752 Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext()); 753 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL); 754 755 auto Table = MIB.buildJumpTable(PtrTy, JT.JTI); 756 MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg); 757 } 758 759 bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT, 760 SwitchCG::JumpTableHeader &JTH, 761 MachineBasicBlock *HeaderBB) { 762 MachineIRBuilder MIB(*HeaderBB->getParent()); 763 MIB.setMBB(*HeaderBB); 764 MIB.setDebugLoc(CurBuilder->getDebugLoc()); 765 766 const Value &SValue = *JTH.SValue; 767 // Subtract the lowest switch case value from the value being switched on. 768 const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL); 769 Register SwitchOpReg = getOrCreateVReg(SValue); 770 auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First); 771 auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst); 772 773 // This value may be smaller or larger than the target's pointer type, and 774 // therefore require extension or truncating. 775 Type *PtrIRTy = SValue.getType()->getPointerTo(); 776 const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy)); 777 Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub); 778 779 JT.Reg = Sub.getReg(0); 780 781 if (JTH.OmitRangeCheck) { 782 if (JT.MBB != HeaderBB->getNextNode()) 783 MIB.buildBr(*JT.MBB); 784 return true; 785 } 786 787 // Emit the range check for the jump table, and branch to the default block 788 // for the switch statement if the value being switched on exceeds the 789 // largest case in the switch. 790 auto Cst = getOrCreateVReg( 791 *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First)); 792 Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0); 793 auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst); 794 795 auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default); 796 797 // Avoid emitting unnecessary branches to the next block. 798 if (JT.MBB != HeaderBB->getNextNode()) 799 BrCond = MIB.buildBr(*JT.MBB); 800 return true; 801 } 802 803 void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB, 804 MachineBasicBlock *SwitchBB, 805 MachineIRBuilder &MIB) { 806 Register CondLHS = getOrCreateVReg(*CB.CmpLHS); 807 Register Cond; 808 DebugLoc OldDbgLoc = MIB.getDebugLoc(); 809 MIB.setDebugLoc(CB.DbgLoc); 810 MIB.setMBB(*CB.ThisBB); 811 812 if (CB.PredInfo.NoCmp) { 813 // Branch or fall through to TrueBB. 814 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb); 815 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()}, 816 CB.ThisBB); 817 CB.ThisBB->normalizeSuccProbs(); 818 if (CB.TrueBB != CB.ThisBB->getNextNode()) 819 MIB.buildBr(*CB.TrueBB); 820 MIB.setDebugLoc(OldDbgLoc); 821 return; 822 } 823 824 const LLT i1Ty = LLT::scalar(1); 825 // Build the compare. 826 if (!CB.CmpMHS) { 827 const auto *CI = dyn_cast<ConstantInt>(CB.CmpRHS); 828 // For conditional branch lowering, we might try to do something silly like 829 // emit an G_ICMP to compare an existing G_ICMP i1 result with true. If so, 830 // just re-use the existing condition vreg. 831 if (CI && CI->getZExtValue() == 1 && 832 MRI->getType(CondLHS).getSizeInBits() == 1 && 833 CB.PredInfo.Pred == CmpInst::ICMP_EQ) { 834 Cond = CondLHS; 835 } else { 836 Register CondRHS = getOrCreateVReg(*CB.CmpRHS); 837 if (CmpInst::isFPPredicate(CB.PredInfo.Pred)) 838 Cond = 839 MIB.buildFCmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0); 840 else 841 Cond = 842 MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0); 843 } 844 } else { 845 assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE && 846 "Can only handle SLE ranges"); 847 848 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue(); 849 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue(); 850 851 Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS); 852 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) { 853 Register CondRHS = getOrCreateVReg(*CB.CmpRHS); 854 Cond = 855 MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0); 856 } else { 857 const LLT CmpTy = MRI->getType(CmpOpReg); 858 auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS); 859 auto Diff = MIB.buildConstant(CmpTy, High - Low); 860 Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0); 861 } 862 } 863 864 // Update successor info 865 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb); 866 867 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()}, 868 CB.ThisBB); 869 870 // TrueBB and FalseBB are always different unless the incoming IR is 871 // degenerate. This only happens when running llc on weird IR. 872 if (CB.TrueBB != CB.FalseBB) 873 addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb); 874 CB.ThisBB->normalizeSuccProbs(); 875 876 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()}, 877 CB.ThisBB); 878 879 MIB.buildBrCond(Cond, *CB.TrueBB); 880 MIB.buildBr(*CB.FalseBB); 881 MIB.setDebugLoc(OldDbgLoc); 882 } 883 884 bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W, 885 MachineBasicBlock *SwitchMBB, 886 MachineBasicBlock *CurMBB, 887 MachineBasicBlock *DefaultMBB, 888 MachineIRBuilder &MIB, 889 MachineFunction::iterator BBI, 890 BranchProbability UnhandledProbs, 891 SwitchCG::CaseClusterIt I, 892 MachineBasicBlock *Fallthrough, 893 bool FallthroughUnreachable) { 894 using namespace SwitchCG; 895 MachineFunction *CurMF = SwitchMBB->getParent(); 896 // FIXME: Optimize away range check based on pivot comparisons. 897 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first; 898 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second; 899 BranchProbability DefaultProb = W.DefaultProb; 900 901 // The jump block hasn't been inserted yet; insert it here. 902 MachineBasicBlock *JumpMBB = JT->MBB; 903 CurMF->insert(BBI, JumpMBB); 904 905 // Since the jump table block is separate from the switch block, we need 906 // to keep track of it as a machine predecessor to the default block, 907 // otherwise we lose the phi edges. 908 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()}, 909 CurMBB); 910 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()}, 911 JumpMBB); 912 913 auto JumpProb = I->Prob; 914 auto FallthroughProb = UnhandledProbs; 915 916 // If the default statement is a target of the jump table, we evenly 917 // distribute the default probability to successors of CurMBB. Also 918 // update the probability on the edge from JumpMBB to Fallthrough. 919 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(), 920 SE = JumpMBB->succ_end(); 921 SI != SE; ++SI) { 922 if (*SI == DefaultMBB) { 923 JumpProb += DefaultProb / 2; 924 FallthroughProb -= DefaultProb / 2; 925 JumpMBB->setSuccProbability(SI, DefaultProb / 2); 926 JumpMBB->normalizeSuccProbs(); 927 } else { 928 // Also record edges from the jump table block to it's successors. 929 addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()}, 930 JumpMBB); 931 } 932 } 933 934 // Skip the range check if the fallthrough block is unreachable. 935 if (FallthroughUnreachable) 936 JTH->OmitRangeCheck = true; 937 938 if (!JTH->OmitRangeCheck) 939 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb); 940 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb); 941 CurMBB->normalizeSuccProbs(); 942 943 // The jump table header will be inserted in our current block, do the 944 // range check, and fall through to our fallthrough block. 945 JTH->HeaderBB = CurMBB; 946 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader. 947 948 // If we're in the right place, emit the jump table header right now. 949 if (CurMBB == SwitchMBB) { 950 if (!emitJumpTableHeader(*JT, *JTH, CurMBB)) 951 return false; 952 JTH->Emitted = true; 953 } 954 return true; 955 } 956 bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, 957 Value *Cond, 958 MachineBasicBlock *Fallthrough, 959 bool FallthroughUnreachable, 960 BranchProbability UnhandledProbs, 961 MachineBasicBlock *CurMBB, 962 MachineIRBuilder &MIB, 963 MachineBasicBlock *SwitchMBB) { 964 using namespace SwitchCG; 965 const Value *RHS, *LHS, *MHS; 966 CmpInst::Predicate Pred; 967 if (I->Low == I->High) { 968 // Check Cond == I->Low. 969 Pred = CmpInst::ICMP_EQ; 970 LHS = Cond; 971 RHS = I->Low; 972 MHS = nullptr; 973 } else { 974 // Check I->Low <= Cond <= I->High. 975 Pred = CmpInst::ICMP_SLE; 976 LHS = I->Low; 977 MHS = Cond; 978 RHS = I->High; 979 } 980 981 // If Fallthrough is unreachable, fold away the comparison. 982 // The false probability is the sum of all unhandled cases. 983 CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough, 984 CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs); 985 986 emitSwitchCase(CB, SwitchMBB, MIB); 987 return true; 988 } 989 990 void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B, 991 MachineBasicBlock *SwitchBB) { 992 MachineIRBuilder &MIB = *CurBuilder; 993 MIB.setMBB(*SwitchBB); 994 995 // Subtract the minimum value. 996 Register SwitchOpReg = getOrCreateVReg(*B.SValue); 997 998 LLT SwitchOpTy = MRI->getType(SwitchOpReg); 999 Register MinValReg = MIB.buildConstant(SwitchOpTy, B.First).getReg(0); 1000 auto RangeSub = MIB.buildSub(SwitchOpTy, SwitchOpReg, MinValReg); 1001 1002 // Ensure that the type will fit the mask value. 1003 LLT MaskTy = SwitchOpTy; 1004 for (unsigned I = 0, E = B.Cases.size(); I != E; ++I) { 1005 if (!isUIntN(SwitchOpTy.getSizeInBits(), B.Cases[I].Mask)) { 1006 // Switch table case range are encoded into series of masks. 1007 // Just use pointer type, it's guaranteed to fit. 1008 MaskTy = LLT::scalar(64); 1009 break; 1010 } 1011 } 1012 Register SubReg = RangeSub.getReg(0); 1013 if (SwitchOpTy != MaskTy) 1014 SubReg = MIB.buildZExtOrTrunc(MaskTy, SubReg).getReg(0); 1015 1016 B.RegVT = getMVTForLLT(MaskTy); 1017 B.Reg = SubReg; 1018 1019 MachineBasicBlock *MBB = B.Cases[0].ThisBB; 1020 1021 if (!B.OmitRangeCheck) 1022 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb); 1023 addSuccessorWithProb(SwitchBB, MBB, B.Prob); 1024 1025 SwitchBB->normalizeSuccProbs(); 1026 1027 if (!B.OmitRangeCheck) { 1028 // Conditional branch to the default block. 1029 auto RangeCst = MIB.buildConstant(SwitchOpTy, B.Range); 1030 auto RangeCmp = MIB.buildICmp(CmpInst::Predicate::ICMP_UGT, LLT::scalar(1), 1031 RangeSub, RangeCst); 1032 MIB.buildBrCond(RangeCmp, *B.Default); 1033 } 1034 1035 // Avoid emitting unnecessary branches to the next block. 1036 if (MBB != SwitchBB->getNextNode()) 1037 MIB.buildBr(*MBB); 1038 } 1039 1040 void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock &BB, 1041 MachineBasicBlock *NextMBB, 1042 BranchProbability BranchProbToNext, 1043 Register Reg, SwitchCG::BitTestCase &B, 1044 MachineBasicBlock *SwitchBB) { 1045 MachineIRBuilder &MIB = *CurBuilder; 1046 MIB.setMBB(*SwitchBB); 1047 1048 LLT SwitchTy = getLLTForMVT(BB.RegVT); 1049 Register Cmp; 1050 unsigned PopCount = countPopulation(B.Mask); 1051 if (PopCount == 1) { 1052 // Testing for a single bit; just compare the shift count with what it 1053 // would need to be to shift a 1 bit in that position. 1054 auto MaskTrailingZeros = 1055 MIB.buildConstant(SwitchTy, countTrailingZeros(B.Mask)); 1056 Cmp = 1057 MIB.buildICmp(ICmpInst::ICMP_EQ, LLT::scalar(1), Reg, MaskTrailingZeros) 1058 .getReg(0); 1059 } else if (PopCount == BB.Range) { 1060 // There is only one zero bit in the range, test for it directly. 1061 auto MaskTrailingOnes = 1062 MIB.buildConstant(SwitchTy, countTrailingOnes(B.Mask)); 1063 Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Reg, MaskTrailingOnes) 1064 .getReg(0); 1065 } else { 1066 // Make desired shift. 1067 auto CstOne = MIB.buildConstant(SwitchTy, 1); 1068 auto SwitchVal = MIB.buildShl(SwitchTy, CstOne, Reg); 1069 1070 // Emit bit tests and jumps. 1071 auto CstMask = MIB.buildConstant(SwitchTy, B.Mask); 1072 auto AndOp = MIB.buildAnd(SwitchTy, SwitchVal, CstMask); 1073 auto CstZero = MIB.buildConstant(SwitchTy, 0); 1074 Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), AndOp, CstZero) 1075 .getReg(0); 1076 } 1077 1078 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb. 1079 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb); 1080 // The branch probability from SwitchBB to NextMBB is BranchProbToNext. 1081 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext); 1082 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is 1083 // one as they are relative probabilities (and thus work more like weights), 1084 // and hence we need to normalize them to let the sum of them become one. 1085 SwitchBB->normalizeSuccProbs(); 1086 1087 // Record the fact that the IR edge from the header to the bit test target 1088 // will go through our new block. Neeeded for PHIs to have nodes added. 1089 addMachineCFGPred({BB.Parent->getBasicBlock(), B.TargetBB->getBasicBlock()}, 1090 SwitchBB); 1091 1092 MIB.buildBrCond(Cmp, *B.TargetBB); 1093 1094 // Avoid emitting unnecessary branches to the next block. 1095 if (NextMBB != SwitchBB->getNextNode()) 1096 MIB.buildBr(*NextMBB); 1097 } 1098 1099 bool IRTranslator::lowerBitTestWorkItem( 1100 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB, 1101 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB, 1102 MachineIRBuilder &MIB, MachineFunction::iterator BBI, 1103 BranchProbability DefaultProb, BranchProbability UnhandledProbs, 1104 SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough, 1105 bool FallthroughUnreachable) { 1106 using namespace SwitchCG; 1107 MachineFunction *CurMF = SwitchMBB->getParent(); 1108 // FIXME: Optimize away range check based on pivot comparisons. 1109 BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex]; 1110 // The bit test blocks haven't been inserted yet; insert them here. 1111 for (BitTestCase &BTC : BTB->Cases) 1112 CurMF->insert(BBI, BTC.ThisBB); 1113 1114 // Fill in fields of the BitTestBlock. 1115 BTB->Parent = CurMBB; 1116 BTB->Default = Fallthrough; 1117 1118 BTB->DefaultProb = UnhandledProbs; 1119 // If the cases in bit test don't form a contiguous range, we evenly 1120 // distribute the probability on the edge to Fallthrough to two 1121 // successors of CurMBB. 1122 if (!BTB->ContiguousRange) { 1123 BTB->Prob += DefaultProb / 2; 1124 BTB->DefaultProb -= DefaultProb / 2; 1125 } 1126 1127 if (FallthroughUnreachable) { 1128 // Skip the range check if the fallthrough block is unreachable. 1129 BTB->OmitRangeCheck = true; 1130 } 1131 1132 // If we're in the right place, emit the bit test header right now. 1133 if (CurMBB == SwitchMBB) { 1134 emitBitTestHeader(*BTB, SwitchMBB); 1135 BTB->Emitted = true; 1136 } 1137 return true; 1138 } 1139 1140 bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, 1141 Value *Cond, 1142 MachineBasicBlock *SwitchMBB, 1143 MachineBasicBlock *DefaultMBB, 1144 MachineIRBuilder &MIB) { 1145 using namespace SwitchCG; 1146 MachineFunction *CurMF = FuncInfo.MF; 1147 MachineBasicBlock *NextMBB = nullptr; 1148 MachineFunction::iterator BBI(W.MBB); 1149 if (++BBI != FuncInfo.MF->end()) 1150 NextMBB = &*BBI; 1151 1152 if (EnableOpts) { 1153 // Here, we order cases by probability so the most likely case will be 1154 // checked first. However, two clusters can have the same probability in 1155 // which case their relative ordering is non-deterministic. So we use Low 1156 // as a tie-breaker as clusters are guaranteed to never overlap. 1157 llvm::sort(W.FirstCluster, W.LastCluster + 1, 1158 [](const CaseCluster &a, const CaseCluster &b) { 1159 return a.Prob != b.Prob 1160 ? a.Prob > b.Prob 1161 : a.Low->getValue().slt(b.Low->getValue()); 1162 }); 1163 1164 // Rearrange the case blocks so that the last one falls through if possible 1165 // without changing the order of probabilities. 1166 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) { 1167 --I; 1168 if (I->Prob > W.LastCluster->Prob) 1169 break; 1170 if (I->Kind == CC_Range && I->MBB == NextMBB) { 1171 std::swap(*I, *W.LastCluster); 1172 break; 1173 } 1174 } 1175 } 1176 1177 // Compute total probability. 1178 BranchProbability DefaultProb = W.DefaultProb; 1179 BranchProbability UnhandledProbs = DefaultProb; 1180 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I) 1181 UnhandledProbs += I->Prob; 1182 1183 MachineBasicBlock *CurMBB = W.MBB; 1184 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) { 1185 bool FallthroughUnreachable = false; 1186 MachineBasicBlock *Fallthrough; 1187 if (I == W.LastCluster) { 1188 // For the last cluster, fall through to the default destination. 1189 Fallthrough = DefaultMBB; 1190 FallthroughUnreachable = isa<UnreachableInst>( 1191 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg()); 1192 } else { 1193 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock()); 1194 CurMF->insert(BBI, Fallthrough); 1195 } 1196 UnhandledProbs -= I->Prob; 1197 1198 switch (I->Kind) { 1199 case CC_BitTests: { 1200 if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI, 1201 DefaultProb, UnhandledProbs, I, Fallthrough, 1202 FallthroughUnreachable)) { 1203 LLVM_DEBUG(dbgs() << "Failed to lower bit test for switch"); 1204 return false; 1205 } 1206 break; 1207 } 1208 1209 case CC_JumpTable: { 1210 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI, 1211 UnhandledProbs, I, Fallthrough, 1212 FallthroughUnreachable)) { 1213 LLVM_DEBUG(dbgs() << "Failed to lower jump table"); 1214 return false; 1215 } 1216 break; 1217 } 1218 case CC_Range: { 1219 if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough, 1220 FallthroughUnreachable, UnhandledProbs, 1221 CurMBB, MIB, SwitchMBB)) { 1222 LLVM_DEBUG(dbgs() << "Failed to lower switch range"); 1223 return false; 1224 } 1225 break; 1226 } 1227 } 1228 CurMBB = Fallthrough; 1229 } 1230 1231 return true; 1232 } 1233 1234 bool IRTranslator::translateIndirectBr(const User &U, 1235 MachineIRBuilder &MIRBuilder) { 1236 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U); 1237 1238 const Register Tgt = getOrCreateVReg(*BrInst.getAddress()); 1239 MIRBuilder.buildBrIndirect(Tgt); 1240 1241 // Link successors. 1242 SmallPtrSet<const BasicBlock *, 32> AddedSuccessors; 1243 MachineBasicBlock &CurBB = MIRBuilder.getMBB(); 1244 for (const BasicBlock *Succ : successors(&BrInst)) { 1245 // It's legal for indirectbr instructions to have duplicate blocks in the 1246 // destination list. We don't allow this in MIR. Skip anything that's 1247 // already a successor. 1248 if (!AddedSuccessors.insert(Succ).second) 1249 continue; 1250 CurBB.addSuccessor(&getMBB(*Succ)); 1251 } 1252 1253 return true; 1254 } 1255 1256 static bool isSwiftError(const Value *V) { 1257 if (auto Arg = dyn_cast<Argument>(V)) 1258 return Arg->hasSwiftErrorAttr(); 1259 if (auto AI = dyn_cast<AllocaInst>(V)) 1260 return AI->isSwiftError(); 1261 return false; 1262 } 1263 1264 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { 1265 const LoadInst &LI = cast<LoadInst>(U); 1266 if (DL->getTypeStoreSize(LI.getType()) == 0) 1267 return true; 1268 1269 ArrayRef<Register> Regs = getOrCreateVRegs(LI); 1270 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI); 1271 Register Base = getOrCreateVReg(*LI.getPointerOperand()); 1272 1273 Type *OffsetIRTy = DL->getIntPtrType(LI.getPointerOperandType()); 1274 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); 1275 1276 if (CLI->supportSwiftError() && isSwiftError(LI.getPointerOperand())) { 1277 assert(Regs.size() == 1 && "swifterror should be single pointer"); 1278 Register VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(), 1279 LI.getPointerOperand()); 1280 MIRBuilder.buildCopy(Regs[0], VReg); 1281 return true; 1282 } 1283 1284 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1285 MachineMemOperand::Flags Flags = TLI.getLoadMemOperandFlags(LI, *DL); 1286 1287 const MDNode *Ranges = 1288 Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr; 1289 for (unsigned i = 0; i < Regs.size(); ++i) { 1290 Register Addr; 1291 MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8); 1292 1293 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8); 1294 Align BaseAlign = getMemOpAlign(LI); 1295 AAMDNodes AAMetadata; 1296 LI.getAAMetadata(AAMetadata); 1297 auto MMO = MF->getMachineMemOperand( 1298 Ptr, Flags, MRI->getType(Regs[i]).getSizeInBytes(), 1299 commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, Ranges, 1300 LI.getSyncScopeID(), LI.getOrdering()); 1301 MIRBuilder.buildLoad(Regs[i], Addr, *MMO); 1302 } 1303 1304 return true; 1305 } 1306 1307 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) { 1308 const StoreInst &SI = cast<StoreInst>(U); 1309 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0) 1310 return true; 1311 1312 ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand()); 1313 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand()); 1314 Register Base = getOrCreateVReg(*SI.getPointerOperand()); 1315 1316 Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType()); 1317 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); 1318 1319 if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) { 1320 assert(Vals.size() == 1 && "swifterror should be single pointer"); 1321 1322 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(), 1323 SI.getPointerOperand()); 1324 MIRBuilder.buildCopy(VReg, Vals[0]); 1325 return true; 1326 } 1327 1328 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1329 MachineMemOperand::Flags Flags = TLI.getStoreMemOperandFlags(SI, *DL); 1330 1331 for (unsigned i = 0; i < Vals.size(); ++i) { 1332 Register Addr; 1333 MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8); 1334 1335 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8); 1336 Align BaseAlign = getMemOpAlign(SI); 1337 AAMDNodes AAMetadata; 1338 SI.getAAMetadata(AAMetadata); 1339 auto MMO = MF->getMachineMemOperand( 1340 Ptr, Flags, MRI->getType(Vals[i]).getSizeInBytes(), 1341 commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, nullptr, 1342 SI.getSyncScopeID(), SI.getOrdering()); 1343 MIRBuilder.buildStore(Vals[i], Addr, *MMO); 1344 } 1345 return true; 1346 } 1347 1348 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) { 1349 const Value *Src = U.getOperand(0); 1350 Type *Int32Ty = Type::getInt32Ty(U.getContext()); 1351 1352 // getIndexedOffsetInType is designed for GEPs, so the first index is the 1353 // usual array element rather than looking into the actual aggregate. 1354 SmallVector<Value *, 1> Indices; 1355 Indices.push_back(ConstantInt::get(Int32Ty, 0)); 1356 1357 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) { 1358 for (auto Idx : EVI->indices()) 1359 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 1360 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) { 1361 for (auto Idx : IVI->indices()) 1362 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 1363 } else { 1364 for (unsigned i = 1; i < U.getNumOperands(); ++i) 1365 Indices.push_back(U.getOperand(i)); 1366 } 1367 1368 return 8 * static_cast<uint64_t>( 1369 DL.getIndexedOffsetInType(Src->getType(), Indices)); 1370 } 1371 1372 bool IRTranslator::translateExtractValue(const User &U, 1373 MachineIRBuilder &MIRBuilder) { 1374 const Value *Src = U.getOperand(0); 1375 uint64_t Offset = getOffsetFromIndices(U, *DL); 1376 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src); 1377 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src); 1378 unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin(); 1379 auto &DstRegs = allocateVRegs(U); 1380 1381 for (unsigned i = 0; i < DstRegs.size(); ++i) 1382 DstRegs[i] = SrcRegs[Idx++]; 1383 1384 return true; 1385 } 1386 1387 bool IRTranslator::translateInsertValue(const User &U, 1388 MachineIRBuilder &MIRBuilder) { 1389 const Value *Src = U.getOperand(0); 1390 uint64_t Offset = getOffsetFromIndices(U, *DL); 1391 auto &DstRegs = allocateVRegs(U); 1392 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U); 1393 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src); 1394 ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1)); 1395 auto InsertedIt = InsertedRegs.begin(); 1396 1397 for (unsigned i = 0; i < DstRegs.size(); ++i) { 1398 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end()) 1399 DstRegs[i] = *InsertedIt++; 1400 else 1401 DstRegs[i] = SrcRegs[i]; 1402 } 1403 1404 return true; 1405 } 1406 1407 bool IRTranslator::translateSelect(const User &U, 1408 MachineIRBuilder &MIRBuilder) { 1409 Register Tst = getOrCreateVReg(*U.getOperand(0)); 1410 ArrayRef<Register> ResRegs = getOrCreateVRegs(U); 1411 ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1)); 1412 ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2)); 1413 1414 uint16_t Flags = 0; 1415 if (const SelectInst *SI = dyn_cast<SelectInst>(&U)) 1416 Flags = MachineInstr::copyFlagsFromInstruction(*SI); 1417 1418 for (unsigned i = 0; i < ResRegs.size(); ++i) { 1419 MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags); 1420 } 1421 1422 return true; 1423 } 1424 1425 bool IRTranslator::translateCopy(const User &U, const Value &V, 1426 MachineIRBuilder &MIRBuilder) { 1427 Register Src = getOrCreateVReg(V); 1428 auto &Regs = *VMap.getVRegs(U); 1429 if (Regs.empty()) { 1430 Regs.push_back(Src); 1431 VMap.getOffsets(U)->push_back(0); 1432 } else { 1433 // If we already assigned a vreg for this instruction, we can't change that. 1434 // Emit a copy to satisfy the users we already emitted. 1435 MIRBuilder.buildCopy(Regs[0], Src); 1436 } 1437 return true; 1438 } 1439 1440 bool IRTranslator::translateBitCast(const User &U, 1441 MachineIRBuilder &MIRBuilder) { 1442 // If we're bitcasting to the source type, we can reuse the source vreg. 1443 if (getLLTForType(*U.getOperand(0)->getType(), *DL) == 1444 getLLTForType(*U.getType(), *DL)) 1445 return translateCopy(U, *U.getOperand(0), MIRBuilder); 1446 1447 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder); 1448 } 1449 1450 bool IRTranslator::translateCast(unsigned Opcode, const User &U, 1451 MachineIRBuilder &MIRBuilder) { 1452 Register Op = getOrCreateVReg(*U.getOperand(0)); 1453 Register Res = getOrCreateVReg(U); 1454 MIRBuilder.buildInstr(Opcode, {Res}, {Op}); 1455 return true; 1456 } 1457 1458 bool IRTranslator::translateGetElementPtr(const User &U, 1459 MachineIRBuilder &MIRBuilder) { 1460 Value &Op0 = *U.getOperand(0); 1461 Register BaseReg = getOrCreateVReg(Op0); 1462 Type *PtrIRTy = Op0.getType(); 1463 LLT PtrTy = getLLTForType(*PtrIRTy, *DL); 1464 Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy); 1465 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); 1466 1467 // Normalize Vector GEP - all scalar operands should be converted to the 1468 // splat vector. 1469 unsigned VectorWidth = 0; 1470 if (auto *VT = dyn_cast<VectorType>(U.getType())) 1471 VectorWidth = cast<FixedVectorType>(VT)->getNumElements(); 1472 1473 // We might need to splat the base pointer into a vector if the offsets 1474 // are vectors. 1475 if (VectorWidth && !PtrTy.isVector()) { 1476 BaseReg = 1477 MIRBuilder.buildSplatVector(LLT::vector(VectorWidth, PtrTy), BaseReg) 1478 .getReg(0); 1479 PtrIRTy = FixedVectorType::get(PtrIRTy, VectorWidth); 1480 PtrTy = getLLTForType(*PtrIRTy, *DL); 1481 OffsetIRTy = DL->getIntPtrType(PtrIRTy); 1482 OffsetTy = getLLTForType(*OffsetIRTy, *DL); 1483 } 1484 1485 int64_t Offset = 0; 1486 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U); 1487 GTI != E; ++GTI) { 1488 const Value *Idx = GTI.getOperand(); 1489 if (StructType *StTy = GTI.getStructTypeOrNull()) { 1490 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue(); 1491 Offset += DL->getStructLayout(StTy)->getElementOffset(Field); 1492 continue; 1493 } else { 1494 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType()); 1495 1496 // If this is a scalar constant or a splat vector of constants, 1497 // handle it quickly. 1498 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) { 1499 Offset += ElementSize * CI->getSExtValue(); 1500 continue; 1501 } 1502 1503 if (Offset != 0) { 1504 auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset); 1505 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0)) 1506 .getReg(0); 1507 Offset = 0; 1508 } 1509 1510 Register IdxReg = getOrCreateVReg(*Idx); 1511 LLT IdxTy = MRI->getType(IdxReg); 1512 if (IdxTy != OffsetTy) { 1513 if (!IdxTy.isVector() && VectorWidth) { 1514 IdxReg = MIRBuilder.buildSplatVector( 1515 OffsetTy.changeElementType(IdxTy), IdxReg).getReg(0); 1516 } 1517 1518 IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0); 1519 } 1520 1521 // N = N + Idx * ElementSize; 1522 // Avoid doing it for ElementSize of 1. 1523 Register GepOffsetReg; 1524 if (ElementSize != 1) { 1525 auto ElementSizeMIB = MIRBuilder.buildConstant( 1526 getLLTForType(*OffsetIRTy, *DL), ElementSize); 1527 GepOffsetReg = 1528 MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB).getReg(0); 1529 } else 1530 GepOffsetReg = IdxReg; 1531 1532 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0); 1533 } 1534 } 1535 1536 if (Offset != 0) { 1537 auto OffsetMIB = 1538 MIRBuilder.buildConstant(OffsetTy, Offset); 1539 MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0)); 1540 return true; 1541 } 1542 1543 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg); 1544 return true; 1545 } 1546 1547 bool IRTranslator::translateMemFunc(const CallInst &CI, 1548 MachineIRBuilder &MIRBuilder, 1549 unsigned Opcode) { 1550 1551 // If the source is undef, then just emit a nop. 1552 if (isa<UndefValue>(CI.getArgOperand(1))) 1553 return true; 1554 1555 SmallVector<Register, 3> SrcRegs; 1556 1557 unsigned MinPtrSize = UINT_MAX; 1558 for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI) { 1559 Register SrcReg = getOrCreateVReg(**AI); 1560 LLT SrcTy = MRI->getType(SrcReg); 1561 if (SrcTy.isPointer()) 1562 MinPtrSize = std::min(SrcTy.getSizeInBits(), MinPtrSize); 1563 SrcRegs.push_back(SrcReg); 1564 } 1565 1566 LLT SizeTy = LLT::scalar(MinPtrSize); 1567 1568 // The size operand should be the minimum of the pointer sizes. 1569 Register &SizeOpReg = SrcRegs[SrcRegs.size() - 1]; 1570 if (MRI->getType(SizeOpReg) != SizeTy) 1571 SizeOpReg = MIRBuilder.buildZExtOrTrunc(SizeTy, SizeOpReg).getReg(0); 1572 1573 auto ICall = MIRBuilder.buildInstr(Opcode); 1574 for (Register SrcReg : SrcRegs) 1575 ICall.addUse(SrcReg); 1576 1577 Align DstAlign; 1578 Align SrcAlign; 1579 unsigned IsVol = 1580 cast<ConstantInt>(CI.getArgOperand(CI.getNumArgOperands() - 1)) 1581 ->getZExtValue(); 1582 1583 if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) { 1584 DstAlign = MCI->getDestAlign().valueOrOne(); 1585 SrcAlign = MCI->getSourceAlign().valueOrOne(); 1586 } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) { 1587 DstAlign = MMI->getDestAlign().valueOrOne(); 1588 SrcAlign = MMI->getSourceAlign().valueOrOne(); 1589 } else { 1590 auto *MSI = cast<MemSetInst>(&CI); 1591 DstAlign = MSI->getDestAlign().valueOrOne(); 1592 } 1593 1594 // We need to propagate the tail call flag from the IR inst as an argument. 1595 // Otherwise, we have to pessimize and assume later that we cannot tail call 1596 // any memory intrinsics. 1597 ICall.addImm(CI.isTailCall() ? 1 : 0); 1598 1599 // Create mem operands to store the alignment and volatile info. 1600 auto VolFlag = IsVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 1601 ICall.addMemOperand(MF->getMachineMemOperand( 1602 MachinePointerInfo(CI.getArgOperand(0)), 1603 MachineMemOperand::MOStore | VolFlag, 1, DstAlign)); 1604 if (Opcode != TargetOpcode::G_MEMSET) 1605 ICall.addMemOperand(MF->getMachineMemOperand( 1606 MachinePointerInfo(CI.getArgOperand(1)), 1607 MachineMemOperand::MOLoad | VolFlag, 1, SrcAlign)); 1608 1609 return true; 1610 } 1611 1612 void IRTranslator::getStackGuard(Register DstReg, 1613 MachineIRBuilder &MIRBuilder) { 1614 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 1615 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF)); 1616 auto MIB = 1617 MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {}); 1618 1619 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1620 Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent()); 1621 if (!Global) 1622 return; 1623 1624 MachinePointerInfo MPInfo(Global); 1625 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 1626 MachineMemOperand::MODereferenceable; 1627 MachineMemOperand *MemRef = 1628 MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8, 1629 DL->getPointerABIAlignment(0)); 1630 MIB.setMemRefs({MemRef}); 1631 } 1632 1633 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op, 1634 MachineIRBuilder &MIRBuilder) { 1635 ArrayRef<Register> ResRegs = getOrCreateVRegs(CI); 1636 MIRBuilder.buildInstr( 1637 Op, {ResRegs[0], ResRegs[1]}, 1638 {getOrCreateVReg(*CI.getOperand(0)), getOrCreateVReg(*CI.getOperand(1))}); 1639 1640 return true; 1641 } 1642 1643 bool IRTranslator::translateFixedPointIntrinsic(unsigned Op, const CallInst &CI, 1644 MachineIRBuilder &MIRBuilder) { 1645 Register Dst = getOrCreateVReg(CI); 1646 Register Src0 = getOrCreateVReg(*CI.getOperand(0)); 1647 Register Src1 = getOrCreateVReg(*CI.getOperand(1)); 1648 uint64_t Scale = cast<ConstantInt>(CI.getOperand(2))->getZExtValue(); 1649 MIRBuilder.buildInstr(Op, {Dst}, { Src0, Src1, Scale }); 1650 return true; 1651 } 1652 1653 unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) { 1654 switch (ID) { 1655 default: 1656 break; 1657 case Intrinsic::bswap: 1658 return TargetOpcode::G_BSWAP; 1659 case Intrinsic::bitreverse: 1660 return TargetOpcode::G_BITREVERSE; 1661 case Intrinsic::fshl: 1662 return TargetOpcode::G_FSHL; 1663 case Intrinsic::fshr: 1664 return TargetOpcode::G_FSHR; 1665 case Intrinsic::ceil: 1666 return TargetOpcode::G_FCEIL; 1667 case Intrinsic::cos: 1668 return TargetOpcode::G_FCOS; 1669 case Intrinsic::ctpop: 1670 return TargetOpcode::G_CTPOP; 1671 case Intrinsic::exp: 1672 return TargetOpcode::G_FEXP; 1673 case Intrinsic::exp2: 1674 return TargetOpcode::G_FEXP2; 1675 case Intrinsic::fabs: 1676 return TargetOpcode::G_FABS; 1677 case Intrinsic::copysign: 1678 return TargetOpcode::G_FCOPYSIGN; 1679 case Intrinsic::minnum: 1680 return TargetOpcode::G_FMINNUM; 1681 case Intrinsic::maxnum: 1682 return TargetOpcode::G_FMAXNUM; 1683 case Intrinsic::minimum: 1684 return TargetOpcode::G_FMINIMUM; 1685 case Intrinsic::maximum: 1686 return TargetOpcode::G_FMAXIMUM; 1687 case Intrinsic::canonicalize: 1688 return TargetOpcode::G_FCANONICALIZE; 1689 case Intrinsic::floor: 1690 return TargetOpcode::G_FFLOOR; 1691 case Intrinsic::fma: 1692 return TargetOpcode::G_FMA; 1693 case Intrinsic::log: 1694 return TargetOpcode::G_FLOG; 1695 case Intrinsic::log2: 1696 return TargetOpcode::G_FLOG2; 1697 case Intrinsic::log10: 1698 return TargetOpcode::G_FLOG10; 1699 case Intrinsic::nearbyint: 1700 return TargetOpcode::G_FNEARBYINT; 1701 case Intrinsic::pow: 1702 return TargetOpcode::G_FPOW; 1703 case Intrinsic::powi: 1704 return TargetOpcode::G_FPOWI; 1705 case Intrinsic::rint: 1706 return TargetOpcode::G_FRINT; 1707 case Intrinsic::round: 1708 return TargetOpcode::G_INTRINSIC_ROUND; 1709 case Intrinsic::roundeven: 1710 return TargetOpcode::G_INTRINSIC_ROUNDEVEN; 1711 case Intrinsic::sin: 1712 return TargetOpcode::G_FSIN; 1713 case Intrinsic::sqrt: 1714 return TargetOpcode::G_FSQRT; 1715 case Intrinsic::trunc: 1716 return TargetOpcode::G_INTRINSIC_TRUNC; 1717 case Intrinsic::readcyclecounter: 1718 return TargetOpcode::G_READCYCLECOUNTER; 1719 case Intrinsic::ptrmask: 1720 return TargetOpcode::G_PTRMASK; 1721 case Intrinsic::lrint: 1722 return TargetOpcode::G_INTRINSIC_LRINT; 1723 } 1724 return Intrinsic::not_intrinsic; 1725 } 1726 1727 bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI, 1728 Intrinsic::ID ID, 1729 MachineIRBuilder &MIRBuilder) { 1730 1731 unsigned Op = getSimpleIntrinsicOpcode(ID); 1732 1733 // Is this a simple intrinsic? 1734 if (Op == Intrinsic::not_intrinsic) 1735 return false; 1736 1737 // Yes. Let's translate it. 1738 SmallVector<llvm::SrcOp, 4> VRegs; 1739 for (auto &Arg : CI.arg_operands()) 1740 VRegs.push_back(getOrCreateVReg(*Arg)); 1741 1742 MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs, 1743 MachineInstr::copyFlagsFromInstruction(CI)); 1744 return true; 1745 } 1746 1747 // TODO: Include ConstainedOps.def when all strict instructions are defined. 1748 static unsigned getConstrainedOpcode(Intrinsic::ID ID) { 1749 switch (ID) { 1750 case Intrinsic::experimental_constrained_fadd: 1751 return TargetOpcode::G_STRICT_FADD; 1752 case Intrinsic::experimental_constrained_fsub: 1753 return TargetOpcode::G_STRICT_FSUB; 1754 case Intrinsic::experimental_constrained_fmul: 1755 return TargetOpcode::G_STRICT_FMUL; 1756 case Intrinsic::experimental_constrained_fdiv: 1757 return TargetOpcode::G_STRICT_FDIV; 1758 case Intrinsic::experimental_constrained_frem: 1759 return TargetOpcode::G_STRICT_FREM; 1760 case Intrinsic::experimental_constrained_fma: 1761 return TargetOpcode::G_STRICT_FMA; 1762 case Intrinsic::experimental_constrained_sqrt: 1763 return TargetOpcode::G_STRICT_FSQRT; 1764 default: 1765 return 0; 1766 } 1767 } 1768 1769 bool IRTranslator::translateConstrainedFPIntrinsic( 1770 const ConstrainedFPIntrinsic &FPI, MachineIRBuilder &MIRBuilder) { 1771 fp::ExceptionBehavior EB = FPI.getExceptionBehavior().getValue(); 1772 1773 unsigned Opcode = getConstrainedOpcode(FPI.getIntrinsicID()); 1774 if (!Opcode) 1775 return false; 1776 1777 unsigned Flags = MachineInstr::copyFlagsFromInstruction(FPI); 1778 if (EB == fp::ExceptionBehavior::ebIgnore) 1779 Flags |= MachineInstr::NoFPExcept; 1780 1781 SmallVector<llvm::SrcOp, 4> VRegs; 1782 VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(0))); 1783 if (!FPI.isUnaryOp()) 1784 VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(1))); 1785 if (FPI.isTernaryOp()) 1786 VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(2))); 1787 1788 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(FPI)}, VRegs, Flags); 1789 return true; 1790 } 1791 1792 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, 1793 MachineIRBuilder &MIRBuilder) { 1794 1795 // If this is a simple intrinsic (that is, we just need to add a def of 1796 // a vreg, and uses for each arg operand, then translate it. 1797 if (translateSimpleIntrinsic(CI, ID, MIRBuilder)) 1798 return true; 1799 1800 switch (ID) { 1801 default: 1802 break; 1803 case Intrinsic::lifetime_start: 1804 case Intrinsic::lifetime_end: { 1805 // No stack colouring in O0, discard region information. 1806 if (MF->getTarget().getOptLevel() == CodeGenOpt::None) 1807 return true; 1808 1809 unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START 1810 : TargetOpcode::LIFETIME_END; 1811 1812 // Get the underlying objects for the location passed on the lifetime 1813 // marker. 1814 SmallVector<const Value *, 4> Allocas; 1815 getUnderlyingObjects(CI.getArgOperand(1), Allocas); 1816 1817 // Iterate over each underlying object, creating lifetime markers for each 1818 // static alloca. Quit if we find a non-static alloca. 1819 for (const Value *V : Allocas) { 1820 const AllocaInst *AI = dyn_cast<AllocaInst>(V); 1821 if (!AI) 1822 continue; 1823 1824 if (!AI->isStaticAlloca()) 1825 return true; 1826 1827 MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI)); 1828 } 1829 return true; 1830 } 1831 case Intrinsic::dbg_declare: { 1832 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI); 1833 assert(DI.getVariable() && "Missing variable"); 1834 1835 const Value *Address = DI.getAddress(); 1836 if (!Address || isa<UndefValue>(Address)) { 1837 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); 1838 return true; 1839 } 1840 1841 assert(DI.getVariable()->isValidLocationForIntrinsic( 1842 MIRBuilder.getDebugLoc()) && 1843 "Expected inlined-at fields to agree"); 1844 auto AI = dyn_cast<AllocaInst>(Address); 1845 if (AI && AI->isStaticAlloca()) { 1846 // Static allocas are tracked at the MF level, no need for DBG_VALUE 1847 // instructions (in fact, they get ignored if they *do* exist). 1848 MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(), 1849 getOrCreateFrameIndex(*AI), DI.getDebugLoc()); 1850 } else { 1851 // A dbg.declare describes the address of a source variable, so lower it 1852 // into an indirect DBG_VALUE. 1853 MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address), 1854 DI.getVariable(), DI.getExpression()); 1855 } 1856 return true; 1857 } 1858 case Intrinsic::dbg_label: { 1859 const DbgLabelInst &DI = cast<DbgLabelInst>(CI); 1860 assert(DI.getLabel() && "Missing label"); 1861 1862 assert(DI.getLabel()->isValidLocationForIntrinsic( 1863 MIRBuilder.getDebugLoc()) && 1864 "Expected inlined-at fields to agree"); 1865 1866 MIRBuilder.buildDbgLabel(DI.getLabel()); 1867 return true; 1868 } 1869 case Intrinsic::vaend: 1870 // No target I know of cares about va_end. Certainly no in-tree target 1871 // does. Simplest intrinsic ever! 1872 return true; 1873 case Intrinsic::vastart: { 1874 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1875 Value *Ptr = CI.getArgOperand(0); 1876 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8; 1877 1878 // FIXME: Get alignment 1879 MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)}) 1880 .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr), 1881 MachineMemOperand::MOStore, 1882 ListSize, Align(1))); 1883 return true; 1884 } 1885 case Intrinsic::dbg_value: { 1886 // This form of DBG_VALUE is target-independent. 1887 const DbgValueInst &DI = cast<DbgValueInst>(CI); 1888 const Value *V = DI.getValue(); 1889 assert(DI.getVariable()->isValidLocationForIntrinsic( 1890 MIRBuilder.getDebugLoc()) && 1891 "Expected inlined-at fields to agree"); 1892 if (!V) { 1893 // Currently the optimizer can produce this; insert an undef to 1894 // help debugging. Probably the optimizer should not do this. 1895 MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression()); 1896 } else if (const auto *CI = dyn_cast<Constant>(V)) { 1897 MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression()); 1898 } else { 1899 for (Register Reg : getOrCreateVRegs(*V)) { 1900 // FIXME: This does not handle register-indirect values at offset 0. The 1901 // direct/indirect thing shouldn't really be handled by something as 1902 // implicit as reg+noreg vs reg+imm in the first place, but it seems 1903 // pretty baked in right now. 1904 MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression()); 1905 } 1906 } 1907 return true; 1908 } 1909 case Intrinsic::uadd_with_overflow: 1910 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder); 1911 case Intrinsic::sadd_with_overflow: 1912 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder); 1913 case Intrinsic::usub_with_overflow: 1914 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder); 1915 case Intrinsic::ssub_with_overflow: 1916 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder); 1917 case Intrinsic::umul_with_overflow: 1918 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder); 1919 case Intrinsic::smul_with_overflow: 1920 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder); 1921 case Intrinsic::uadd_sat: 1922 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder); 1923 case Intrinsic::sadd_sat: 1924 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder); 1925 case Intrinsic::usub_sat: 1926 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder); 1927 case Intrinsic::ssub_sat: 1928 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder); 1929 case Intrinsic::ushl_sat: 1930 return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder); 1931 case Intrinsic::sshl_sat: 1932 return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder); 1933 case Intrinsic::umin: 1934 return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder); 1935 case Intrinsic::umax: 1936 return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder); 1937 case Intrinsic::smin: 1938 return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder); 1939 case Intrinsic::smax: 1940 return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder); 1941 case Intrinsic::abs: 1942 // TODO: Preserve "int min is poison" arg in GMIR? 1943 return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder); 1944 case Intrinsic::smul_fix: 1945 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder); 1946 case Intrinsic::umul_fix: 1947 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder); 1948 case Intrinsic::smul_fix_sat: 1949 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder); 1950 case Intrinsic::umul_fix_sat: 1951 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder); 1952 case Intrinsic::sdiv_fix: 1953 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder); 1954 case Intrinsic::udiv_fix: 1955 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder); 1956 case Intrinsic::sdiv_fix_sat: 1957 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder); 1958 case Intrinsic::udiv_fix_sat: 1959 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder); 1960 case Intrinsic::fmuladd: { 1961 const TargetMachine &TM = MF->getTarget(); 1962 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); 1963 Register Dst = getOrCreateVReg(CI); 1964 Register Op0 = getOrCreateVReg(*CI.getArgOperand(0)); 1965 Register Op1 = getOrCreateVReg(*CI.getArgOperand(1)); 1966 Register Op2 = getOrCreateVReg(*CI.getArgOperand(2)); 1967 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && 1968 TLI.isFMAFasterThanFMulAndFAdd(*MF, 1969 TLI.getValueType(*DL, CI.getType()))) { 1970 // TODO: Revisit this to see if we should move this part of the 1971 // lowering to the combiner. 1972 MIRBuilder.buildFMA(Dst, Op0, Op1, Op2, 1973 MachineInstr::copyFlagsFromInstruction(CI)); 1974 } else { 1975 LLT Ty = getLLTForType(*CI.getType(), *DL); 1976 auto FMul = MIRBuilder.buildFMul( 1977 Ty, Op0, Op1, MachineInstr::copyFlagsFromInstruction(CI)); 1978 MIRBuilder.buildFAdd(Dst, FMul, Op2, 1979 MachineInstr::copyFlagsFromInstruction(CI)); 1980 } 1981 return true; 1982 } 1983 case Intrinsic::convert_from_fp16: 1984 // FIXME: This intrinsic should probably be removed from the IR. 1985 MIRBuilder.buildFPExt(getOrCreateVReg(CI), 1986 getOrCreateVReg(*CI.getArgOperand(0)), 1987 MachineInstr::copyFlagsFromInstruction(CI)); 1988 return true; 1989 case Intrinsic::convert_to_fp16: 1990 // FIXME: This intrinsic should probably be removed from the IR. 1991 MIRBuilder.buildFPTrunc(getOrCreateVReg(CI), 1992 getOrCreateVReg(*CI.getArgOperand(0)), 1993 MachineInstr::copyFlagsFromInstruction(CI)); 1994 return true; 1995 case Intrinsic::memcpy: 1996 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY); 1997 case Intrinsic::memmove: 1998 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE); 1999 case Intrinsic::memset: 2000 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET); 2001 case Intrinsic::eh_typeid_for: { 2002 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0)); 2003 Register Reg = getOrCreateVReg(CI); 2004 unsigned TypeID = MF->getTypeIDFor(GV); 2005 MIRBuilder.buildConstant(Reg, TypeID); 2006 return true; 2007 } 2008 case Intrinsic::objectsize: 2009 llvm_unreachable("llvm.objectsize.* should have been lowered already"); 2010 2011 case Intrinsic::is_constant: 2012 llvm_unreachable("llvm.is.constant.* should have been lowered already"); 2013 2014 case Intrinsic::stackguard: 2015 getStackGuard(getOrCreateVReg(CI), MIRBuilder); 2016 return true; 2017 case Intrinsic::stackprotector: { 2018 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); 2019 Register GuardVal = MRI->createGenericVirtualRegister(PtrTy); 2020 getStackGuard(GuardVal, MIRBuilder); 2021 2022 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1)); 2023 int FI = getOrCreateFrameIndex(*Slot); 2024 MF->getFrameInfo().setStackProtectorIndex(FI); 2025 2026 MIRBuilder.buildStore( 2027 GuardVal, getOrCreateVReg(*Slot), 2028 *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI), 2029 MachineMemOperand::MOStore | 2030 MachineMemOperand::MOVolatile, 2031 PtrTy.getSizeInBits() / 8, Align(8))); 2032 return true; 2033 } 2034 case Intrinsic::stacksave: { 2035 // Save the stack pointer to the location provided by the intrinsic. 2036 Register Reg = getOrCreateVReg(CI); 2037 Register StackPtr = MF->getSubtarget() 2038 .getTargetLowering() 2039 ->getStackPointerRegisterToSaveRestore(); 2040 2041 // If the target doesn't specify a stack pointer, then fall back. 2042 if (!StackPtr) 2043 return false; 2044 2045 MIRBuilder.buildCopy(Reg, StackPtr); 2046 return true; 2047 } 2048 case Intrinsic::stackrestore: { 2049 // Restore the stack pointer from the location provided by the intrinsic. 2050 Register Reg = getOrCreateVReg(*CI.getArgOperand(0)); 2051 Register StackPtr = MF->getSubtarget() 2052 .getTargetLowering() 2053 ->getStackPointerRegisterToSaveRestore(); 2054 2055 // If the target doesn't specify a stack pointer, then fall back. 2056 if (!StackPtr) 2057 return false; 2058 2059 MIRBuilder.buildCopy(StackPtr, Reg); 2060 return true; 2061 } 2062 case Intrinsic::cttz: 2063 case Intrinsic::ctlz: { 2064 ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1)); 2065 bool isTrailing = ID == Intrinsic::cttz; 2066 unsigned Opcode = isTrailing 2067 ? Cst->isZero() ? TargetOpcode::G_CTTZ 2068 : TargetOpcode::G_CTTZ_ZERO_UNDEF 2069 : Cst->isZero() ? TargetOpcode::G_CTLZ 2070 : TargetOpcode::G_CTLZ_ZERO_UNDEF; 2071 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(CI)}, 2072 {getOrCreateVReg(*CI.getArgOperand(0))}); 2073 return true; 2074 } 2075 case Intrinsic::invariant_start: { 2076 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); 2077 Register Undef = MRI->createGenericVirtualRegister(PtrTy); 2078 MIRBuilder.buildUndef(Undef); 2079 return true; 2080 } 2081 case Intrinsic::invariant_end: 2082 return true; 2083 case Intrinsic::expect: 2084 case Intrinsic::annotation: 2085 case Intrinsic::ptr_annotation: 2086 case Intrinsic::launder_invariant_group: 2087 case Intrinsic::strip_invariant_group: { 2088 // Drop the intrinsic, but forward the value. 2089 MIRBuilder.buildCopy(getOrCreateVReg(CI), 2090 getOrCreateVReg(*CI.getArgOperand(0))); 2091 return true; 2092 } 2093 case Intrinsic::assume: 2094 case Intrinsic::var_annotation: 2095 case Intrinsic::sideeffect: 2096 // Discard annotate attributes, assumptions, and artificial side-effects. 2097 return true; 2098 case Intrinsic::read_volatile_register: 2099 case Intrinsic::read_register: { 2100 Value *Arg = CI.getArgOperand(0); 2101 MIRBuilder 2102 .buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {}) 2103 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata())); 2104 return true; 2105 } 2106 case Intrinsic::write_register: { 2107 Value *Arg = CI.getArgOperand(0); 2108 MIRBuilder.buildInstr(TargetOpcode::G_WRITE_REGISTER) 2109 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata())) 2110 .addUse(getOrCreateVReg(*CI.getArgOperand(1))); 2111 return true; 2112 } 2113 case Intrinsic::localescape: { 2114 MachineBasicBlock &EntryMBB = MF->front(); 2115 StringRef EscapedName = GlobalValue::dropLLVMManglingEscape(MF->getName()); 2116 2117 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission 2118 // is the same on all targets. 2119 for (unsigned Idx = 0, E = CI.getNumArgOperands(); Idx < E; ++Idx) { 2120 Value *Arg = CI.getArgOperand(Idx)->stripPointerCasts(); 2121 if (isa<ConstantPointerNull>(Arg)) 2122 continue; // Skip null pointers. They represent a hole in index space. 2123 2124 int FI = getOrCreateFrameIndex(*cast<AllocaInst>(Arg)); 2125 MCSymbol *FrameAllocSym = 2126 MF->getMMI().getContext().getOrCreateFrameAllocSymbol(EscapedName, 2127 Idx); 2128 2129 // This should be inserted at the start of the entry block. 2130 auto LocalEscape = 2131 MIRBuilder.buildInstrNoInsert(TargetOpcode::LOCAL_ESCAPE) 2132 .addSym(FrameAllocSym) 2133 .addFrameIndex(FI); 2134 2135 EntryMBB.insert(EntryMBB.begin(), LocalEscape); 2136 } 2137 2138 return true; 2139 } 2140 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ 2141 case Intrinsic::INTRINSIC: 2142 #include "llvm/IR/ConstrainedOps.def" 2143 return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI), 2144 MIRBuilder); 2145 2146 } 2147 return false; 2148 } 2149 2150 bool IRTranslator::translateInlineAsm(const CallBase &CB, 2151 MachineIRBuilder &MIRBuilder) { 2152 2153 const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering(); 2154 2155 if (!ALI) { 2156 LLVM_DEBUG( 2157 dbgs() << "Inline asm lowering is not supported for this target yet\n"); 2158 return false; 2159 } 2160 2161 return ALI->lowerInlineAsm( 2162 MIRBuilder, CB, [&](const Value &Val) { return getOrCreateVRegs(Val); }); 2163 } 2164 2165 bool IRTranslator::translateCallBase(const CallBase &CB, 2166 MachineIRBuilder &MIRBuilder) { 2167 ArrayRef<Register> Res = getOrCreateVRegs(CB); 2168 2169 SmallVector<ArrayRef<Register>, 8> Args; 2170 Register SwiftInVReg = 0; 2171 Register SwiftErrorVReg = 0; 2172 for (auto &Arg : CB.args()) { 2173 if (CLI->supportSwiftError() && isSwiftError(Arg)) { 2174 assert(SwiftInVReg == 0 && "Expected only one swift error argument"); 2175 LLT Ty = getLLTForType(*Arg->getType(), *DL); 2176 SwiftInVReg = MRI->createGenericVirtualRegister(Ty); 2177 MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt( 2178 &CB, &MIRBuilder.getMBB(), Arg)); 2179 Args.emplace_back(makeArrayRef(SwiftInVReg)); 2180 SwiftErrorVReg = 2181 SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.getMBB(), Arg); 2182 continue; 2183 } 2184 Args.push_back(getOrCreateVRegs(*Arg)); 2185 } 2186 2187 // We don't set HasCalls on MFI here yet because call lowering may decide to 2188 // optimize into tail calls. Instead, we defer that to selection where a final 2189 // scan is done to check if any instructions are calls. 2190 bool Success = 2191 CLI->lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg, 2192 [&]() { return getOrCreateVReg(*CB.getCalledOperand()); }); 2193 2194 // Check if we just inserted a tail call. 2195 if (Success) { 2196 assert(!HasTailCall && "Can't tail call return twice from block?"); 2197 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 2198 HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt())); 2199 } 2200 2201 return Success; 2202 } 2203 2204 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) { 2205 const CallInst &CI = cast<CallInst>(U); 2206 auto TII = MF->getTarget().getIntrinsicInfo(); 2207 const Function *F = CI.getCalledFunction(); 2208 2209 // FIXME: support Windows dllimport function calls. 2210 if (F && (F->hasDLLImportStorageClass() || 2211 (MF->getTarget().getTargetTriple().isOSWindows() && 2212 F->hasExternalWeakLinkage()))) 2213 return false; 2214 2215 // FIXME: support control flow guard targets. 2216 if (CI.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget)) 2217 return false; 2218 2219 if (CI.isInlineAsm()) 2220 return translateInlineAsm(CI, MIRBuilder); 2221 2222 Intrinsic::ID ID = Intrinsic::not_intrinsic; 2223 if (F && F->isIntrinsic()) { 2224 ID = F->getIntrinsicID(); 2225 if (TII && ID == Intrinsic::not_intrinsic) 2226 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F)); 2227 } 2228 2229 if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) 2230 return translateCallBase(CI, MIRBuilder); 2231 2232 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic"); 2233 2234 if (translateKnownIntrinsic(CI, ID, MIRBuilder)) 2235 return true; 2236 2237 ArrayRef<Register> ResultRegs; 2238 if (!CI.getType()->isVoidTy()) 2239 ResultRegs = getOrCreateVRegs(CI); 2240 2241 // Ignore the callsite attributes. Backend code is most likely not expecting 2242 // an intrinsic to sometimes have side effects and sometimes not. 2243 MachineInstrBuilder MIB = 2244 MIRBuilder.buildIntrinsic(ID, ResultRegs, !F->doesNotAccessMemory()); 2245 if (isa<FPMathOperator>(CI)) 2246 MIB->copyIRFlags(CI); 2247 2248 for (auto &Arg : enumerate(CI.arg_operands())) { 2249 // If this is required to be an immediate, don't materialize it in a 2250 // register. 2251 if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) { 2252 if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) { 2253 // imm arguments are more convenient than cimm (and realistically 2254 // probably sufficient), so use them. 2255 assert(CI->getBitWidth() <= 64 && 2256 "large intrinsic immediates not handled"); 2257 MIB.addImm(CI->getSExtValue()); 2258 } else { 2259 MIB.addFPImm(cast<ConstantFP>(Arg.value())); 2260 } 2261 } else if (auto MD = dyn_cast<MetadataAsValue>(Arg.value())) { 2262 auto *MDN = dyn_cast<MDNode>(MD->getMetadata()); 2263 if (!MDN) // This was probably an MDString. 2264 return false; 2265 MIB.addMetadata(MDN); 2266 } else { 2267 ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value()); 2268 if (VRegs.size() > 1) 2269 return false; 2270 MIB.addUse(VRegs[0]); 2271 } 2272 } 2273 2274 // Add a MachineMemOperand if it is a target mem intrinsic. 2275 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); 2276 TargetLowering::IntrinsicInfo Info; 2277 // TODO: Add a GlobalISel version of getTgtMemIntrinsic. 2278 if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) { 2279 Align Alignment = Info.align.getValueOr( 2280 DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext()))); 2281 2282 uint64_t Size = Info.memVT.getStoreSize(); 2283 MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal), 2284 Info.flags, Size, Alignment)); 2285 } 2286 2287 return true; 2288 } 2289 2290 bool IRTranslator::translateInvoke(const User &U, 2291 MachineIRBuilder &MIRBuilder) { 2292 const InvokeInst &I = cast<InvokeInst>(U); 2293 MCContext &Context = MF->getContext(); 2294 2295 const BasicBlock *ReturnBB = I.getSuccessor(0); 2296 const BasicBlock *EHPadBB = I.getSuccessor(1); 2297 2298 const Function *Fn = I.getCalledFunction(); 2299 if (I.isInlineAsm()) 2300 return false; 2301 2302 // FIXME: support invoking patchpoint and statepoint intrinsics. 2303 if (Fn && Fn->isIntrinsic()) 2304 return false; 2305 2306 // FIXME: support whatever these are. 2307 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) 2308 return false; 2309 2310 // FIXME: support control flow guard targets. 2311 if (I.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget)) 2312 return false; 2313 2314 // FIXME: support Windows exception handling. 2315 if (!isa<LandingPadInst>(EHPadBB->getFirstNonPHI())) 2316 return false; 2317 2318 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about 2319 // the region covered by the try. 2320 MCSymbol *BeginSymbol = Context.createTempSymbol(); 2321 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol); 2322 2323 if (!translateCallBase(I, MIRBuilder)) 2324 return false; 2325 2326 MCSymbol *EndSymbol = Context.createTempSymbol(); 2327 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol); 2328 2329 // FIXME: track probabilities. 2330 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB), 2331 &ReturnMBB = getMBB(*ReturnBB); 2332 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol); 2333 MIRBuilder.getMBB().addSuccessor(&ReturnMBB); 2334 MIRBuilder.getMBB().addSuccessor(&EHPadMBB); 2335 MIRBuilder.buildBr(ReturnMBB); 2336 2337 return true; 2338 } 2339 2340 bool IRTranslator::translateCallBr(const User &U, 2341 MachineIRBuilder &MIRBuilder) { 2342 // FIXME: Implement this. 2343 return false; 2344 } 2345 2346 bool IRTranslator::translateLandingPad(const User &U, 2347 MachineIRBuilder &MIRBuilder) { 2348 const LandingPadInst &LP = cast<LandingPadInst>(U); 2349 2350 MachineBasicBlock &MBB = MIRBuilder.getMBB(); 2351 2352 MBB.setIsEHPad(); 2353 2354 // If there aren't registers to copy the values into (e.g., during SjLj 2355 // exceptions), then don't bother. 2356 auto &TLI = *MF->getSubtarget().getTargetLowering(); 2357 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn(); 2358 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 && 2359 TLI.getExceptionSelectorRegister(PersonalityFn) == 0) 2360 return true; 2361 2362 // If landingpad's return type is token type, we don't create DAG nodes 2363 // for its exception pointer and selector value. The extraction of exception 2364 // pointer or selector value from token type landingpads is not currently 2365 // supported. 2366 if (LP.getType()->isTokenTy()) 2367 return true; 2368 2369 // Add a label to mark the beginning of the landing pad. Deletion of the 2370 // landing pad can thus be detected via the MachineModuleInfo. 2371 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL) 2372 .addSym(MF->addLandingPad(&MBB)); 2373 2374 // If the unwinder does not preserve all registers, ensure that the 2375 // function marks the clobbered registers as used. 2376 const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo(); 2377 if (auto *RegMask = TRI.getCustomEHPadPreservedMask(*MF)) 2378 MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask); 2379 2380 LLT Ty = getLLTForType(*LP.getType(), *DL); 2381 Register Undef = MRI->createGenericVirtualRegister(Ty); 2382 MIRBuilder.buildUndef(Undef); 2383 2384 SmallVector<LLT, 2> Tys; 2385 for (Type *Ty : cast<StructType>(LP.getType())->elements()) 2386 Tys.push_back(getLLTForType(*Ty, *DL)); 2387 assert(Tys.size() == 2 && "Only two-valued landingpads are supported"); 2388 2389 // Mark exception register as live in. 2390 Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn); 2391 if (!ExceptionReg) 2392 return false; 2393 2394 MBB.addLiveIn(ExceptionReg); 2395 ArrayRef<Register> ResRegs = getOrCreateVRegs(LP); 2396 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg); 2397 2398 Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn); 2399 if (!SelectorReg) 2400 return false; 2401 2402 MBB.addLiveIn(SelectorReg); 2403 Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]); 2404 MIRBuilder.buildCopy(PtrVReg, SelectorReg); 2405 MIRBuilder.buildCast(ResRegs[1], PtrVReg); 2406 2407 return true; 2408 } 2409 2410 bool IRTranslator::translateAlloca(const User &U, 2411 MachineIRBuilder &MIRBuilder) { 2412 auto &AI = cast<AllocaInst>(U); 2413 2414 if (AI.isSwiftError()) 2415 return true; 2416 2417 if (AI.isStaticAlloca()) { 2418 Register Res = getOrCreateVReg(AI); 2419 int FI = getOrCreateFrameIndex(AI); 2420 MIRBuilder.buildFrameIndex(Res, FI); 2421 return true; 2422 } 2423 2424 // FIXME: support stack probing for Windows. 2425 if (MF->getTarget().getTargetTriple().isOSWindows()) 2426 return false; 2427 2428 // Now we're in the harder dynamic case. 2429 Register NumElts = getOrCreateVReg(*AI.getArraySize()); 2430 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType()); 2431 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL); 2432 if (MRI->getType(NumElts) != IntPtrTy) { 2433 Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy); 2434 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts); 2435 NumElts = ExtElts; 2436 } 2437 2438 Type *Ty = AI.getAllocatedType(); 2439 2440 Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy); 2441 Register TySize = 2442 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty))); 2443 MIRBuilder.buildMul(AllocSize, NumElts, TySize); 2444 2445 // Round the size of the allocation up to the stack alignment size 2446 // by add SA-1 to the size. This doesn't overflow because we're computing 2447 // an address inside an alloca. 2448 Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign(); 2449 auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign.value() - 1); 2450 auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne, 2451 MachineInstr::NoUWrap); 2452 auto AlignCst = 2453 MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign.value() - 1)); 2454 auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst); 2455 2456 Align Alignment = std::max(AI.getAlign(), DL->getPrefTypeAlign(Ty)); 2457 if (Alignment <= StackAlign) 2458 Alignment = Align(1); 2459 MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Alignment); 2460 2461 MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI); 2462 assert(MF->getFrameInfo().hasVarSizedObjects()); 2463 return true; 2464 } 2465 2466 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) { 2467 // FIXME: We may need more info about the type. Because of how LLT works, 2468 // we're completely discarding the i64/double distinction here (amongst 2469 // others). Fortunately the ABIs I know of where that matters don't use va_arg 2470 // anyway but that's not guaranteed. 2471 MIRBuilder.buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)}, 2472 {getOrCreateVReg(*U.getOperand(0)), 2473 DL->getABITypeAlign(U.getType()).value()}); 2474 return true; 2475 } 2476 2477 bool IRTranslator::translateInsertElement(const User &U, 2478 MachineIRBuilder &MIRBuilder) { 2479 // If it is a <1 x Ty> vector, use the scalar as it is 2480 // not a legal vector type in LLT. 2481 if (cast<FixedVectorType>(U.getType())->getNumElements() == 1) 2482 return translateCopy(U, *U.getOperand(1), MIRBuilder); 2483 2484 Register Res = getOrCreateVReg(U); 2485 Register Val = getOrCreateVReg(*U.getOperand(0)); 2486 Register Elt = getOrCreateVReg(*U.getOperand(1)); 2487 Register Idx = getOrCreateVReg(*U.getOperand(2)); 2488 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx); 2489 return true; 2490 } 2491 2492 bool IRTranslator::translateExtractElement(const User &U, 2493 MachineIRBuilder &MIRBuilder) { 2494 // If it is a <1 x Ty> vector, use the scalar as it is 2495 // not a legal vector type in LLT. 2496 if (cast<FixedVectorType>(U.getOperand(0)->getType())->getNumElements() == 1) 2497 return translateCopy(U, *U.getOperand(0), MIRBuilder); 2498 2499 Register Res = getOrCreateVReg(U); 2500 Register Val = getOrCreateVReg(*U.getOperand(0)); 2501 const auto &TLI = *MF->getSubtarget().getTargetLowering(); 2502 unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits(); 2503 Register Idx; 2504 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) { 2505 if (CI->getBitWidth() != PreferredVecIdxWidth) { 2506 APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth); 2507 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx); 2508 Idx = getOrCreateVReg(*NewIdxCI); 2509 } 2510 } 2511 if (!Idx) 2512 Idx = getOrCreateVReg(*U.getOperand(1)); 2513 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) { 2514 const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth); 2515 Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx).getReg(0); 2516 } 2517 MIRBuilder.buildExtractVectorElement(Res, Val, Idx); 2518 return true; 2519 } 2520 2521 bool IRTranslator::translateShuffleVector(const User &U, 2522 MachineIRBuilder &MIRBuilder) { 2523 ArrayRef<int> Mask; 2524 if (auto *SVI = dyn_cast<ShuffleVectorInst>(&U)) 2525 Mask = SVI->getShuffleMask(); 2526 else 2527 Mask = cast<ConstantExpr>(U).getShuffleMask(); 2528 ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask); 2529 MIRBuilder 2530 .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)}, 2531 {getOrCreateVReg(*U.getOperand(0)), 2532 getOrCreateVReg(*U.getOperand(1))}) 2533 .addShuffleMask(MaskAlloc); 2534 return true; 2535 } 2536 2537 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) { 2538 const PHINode &PI = cast<PHINode>(U); 2539 2540 SmallVector<MachineInstr *, 4> Insts; 2541 for (auto Reg : getOrCreateVRegs(PI)) { 2542 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {}); 2543 Insts.push_back(MIB.getInstr()); 2544 } 2545 2546 PendingPHIs.emplace_back(&PI, std::move(Insts)); 2547 return true; 2548 } 2549 2550 bool IRTranslator::translateAtomicCmpXchg(const User &U, 2551 MachineIRBuilder &MIRBuilder) { 2552 const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U); 2553 2554 auto &TLI = *MF->getSubtarget().getTargetLowering(); 2555 auto Flags = TLI.getAtomicMemOperandFlags(I, *DL); 2556 2557 Type *ResType = I.getType(); 2558 Type *ValType = ResType->Type::getStructElementType(0); 2559 2560 auto Res = getOrCreateVRegs(I); 2561 Register OldValRes = Res[0]; 2562 Register SuccessRes = Res[1]; 2563 Register Addr = getOrCreateVReg(*I.getPointerOperand()); 2564 Register Cmp = getOrCreateVReg(*I.getCompareOperand()); 2565 Register NewVal = getOrCreateVReg(*I.getNewValOperand()); 2566 2567 AAMDNodes AAMetadata; 2568 I.getAAMetadata(AAMetadata); 2569 2570 MIRBuilder.buildAtomicCmpXchgWithSuccess( 2571 OldValRes, SuccessRes, Addr, Cmp, NewVal, 2572 *MF->getMachineMemOperand( 2573 MachinePointerInfo(I.getPointerOperand()), Flags, 2574 DL->getTypeStoreSize(ValType), getMemOpAlign(I), AAMetadata, nullptr, 2575 I.getSyncScopeID(), I.getSuccessOrdering(), I.getFailureOrdering())); 2576 return true; 2577 } 2578 2579 bool IRTranslator::translateAtomicRMW(const User &U, 2580 MachineIRBuilder &MIRBuilder) { 2581 const AtomicRMWInst &I = cast<AtomicRMWInst>(U); 2582 auto &TLI = *MF->getSubtarget().getTargetLowering(); 2583 auto Flags = TLI.getAtomicMemOperandFlags(I, *DL); 2584 2585 Type *ResType = I.getType(); 2586 2587 Register Res = getOrCreateVReg(I); 2588 Register Addr = getOrCreateVReg(*I.getPointerOperand()); 2589 Register Val = getOrCreateVReg(*I.getValOperand()); 2590 2591 unsigned Opcode = 0; 2592 switch (I.getOperation()) { 2593 default: 2594 return false; 2595 case AtomicRMWInst::Xchg: 2596 Opcode = TargetOpcode::G_ATOMICRMW_XCHG; 2597 break; 2598 case AtomicRMWInst::Add: 2599 Opcode = TargetOpcode::G_ATOMICRMW_ADD; 2600 break; 2601 case AtomicRMWInst::Sub: 2602 Opcode = TargetOpcode::G_ATOMICRMW_SUB; 2603 break; 2604 case AtomicRMWInst::And: 2605 Opcode = TargetOpcode::G_ATOMICRMW_AND; 2606 break; 2607 case AtomicRMWInst::Nand: 2608 Opcode = TargetOpcode::G_ATOMICRMW_NAND; 2609 break; 2610 case AtomicRMWInst::Or: 2611 Opcode = TargetOpcode::G_ATOMICRMW_OR; 2612 break; 2613 case AtomicRMWInst::Xor: 2614 Opcode = TargetOpcode::G_ATOMICRMW_XOR; 2615 break; 2616 case AtomicRMWInst::Max: 2617 Opcode = TargetOpcode::G_ATOMICRMW_MAX; 2618 break; 2619 case AtomicRMWInst::Min: 2620 Opcode = TargetOpcode::G_ATOMICRMW_MIN; 2621 break; 2622 case AtomicRMWInst::UMax: 2623 Opcode = TargetOpcode::G_ATOMICRMW_UMAX; 2624 break; 2625 case AtomicRMWInst::UMin: 2626 Opcode = TargetOpcode::G_ATOMICRMW_UMIN; 2627 break; 2628 case AtomicRMWInst::FAdd: 2629 Opcode = TargetOpcode::G_ATOMICRMW_FADD; 2630 break; 2631 case AtomicRMWInst::FSub: 2632 Opcode = TargetOpcode::G_ATOMICRMW_FSUB; 2633 break; 2634 } 2635 2636 AAMDNodes AAMetadata; 2637 I.getAAMetadata(AAMetadata); 2638 2639 MIRBuilder.buildAtomicRMW( 2640 Opcode, Res, Addr, Val, 2641 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), 2642 Flags, DL->getTypeStoreSize(ResType), 2643 getMemOpAlign(I), AAMetadata, nullptr, 2644 I.getSyncScopeID(), I.getOrdering())); 2645 return true; 2646 } 2647 2648 bool IRTranslator::translateFence(const User &U, 2649 MachineIRBuilder &MIRBuilder) { 2650 const FenceInst &Fence = cast<FenceInst>(U); 2651 MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()), 2652 Fence.getSyncScopeID()); 2653 return true; 2654 } 2655 2656 bool IRTranslator::translateFreeze(const User &U, 2657 MachineIRBuilder &MIRBuilder) { 2658 const ArrayRef<Register> DstRegs = getOrCreateVRegs(U); 2659 const ArrayRef<Register> SrcRegs = getOrCreateVRegs(*U.getOperand(0)); 2660 2661 assert(DstRegs.size() == SrcRegs.size() && 2662 "Freeze with different source and destination type?"); 2663 2664 for (unsigned I = 0; I < DstRegs.size(); ++I) { 2665 MIRBuilder.buildFreeze(DstRegs[I], SrcRegs[I]); 2666 } 2667 2668 return true; 2669 } 2670 2671 void IRTranslator::finishPendingPhis() { 2672 #ifndef NDEBUG 2673 DILocationVerifier Verifier; 2674 GISelObserverWrapper WrapperObserver(&Verifier); 2675 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver); 2676 #endif // ifndef NDEBUG 2677 for (auto &Phi : PendingPHIs) { 2678 const PHINode *PI = Phi.first; 2679 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second; 2680 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent(); 2681 EntryBuilder->setDebugLoc(PI->getDebugLoc()); 2682 #ifndef NDEBUG 2683 Verifier.setCurrentInst(PI); 2684 #endif // ifndef NDEBUG 2685 2686 SmallSet<const MachineBasicBlock *, 16> SeenPreds; 2687 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) { 2688 auto IRPred = PI->getIncomingBlock(i); 2689 ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i)); 2690 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) { 2691 if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred)) 2692 continue; 2693 SeenPreds.insert(Pred); 2694 for (unsigned j = 0; j < ValRegs.size(); ++j) { 2695 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]); 2696 MIB.addUse(ValRegs[j]); 2697 MIB.addMBB(Pred); 2698 } 2699 } 2700 } 2701 } 2702 } 2703 2704 bool IRTranslator::valueIsSplit(const Value &V, 2705 SmallVectorImpl<uint64_t> *Offsets) { 2706 SmallVector<LLT, 4> SplitTys; 2707 if (Offsets && !Offsets->empty()) 2708 Offsets->clear(); 2709 computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets); 2710 return SplitTys.size() > 1; 2711 } 2712 2713 bool IRTranslator::translate(const Instruction &Inst) { 2714 CurBuilder->setDebugLoc(Inst.getDebugLoc()); 2715 // We only emit constants into the entry block from here. To prevent jumpy 2716 // debug behaviour set the line to 0. 2717 if (const DebugLoc &DL = Inst.getDebugLoc()) 2718 EntryBuilder->setDebugLoc( 2719 DebugLoc::get(0, 0, DL.getScope(), DL.getInlinedAt())); 2720 else 2721 EntryBuilder->setDebugLoc(DebugLoc()); 2722 2723 auto &TLI = *MF->getSubtarget().getTargetLowering(); 2724 if (TLI.fallBackToDAGISel(Inst)) 2725 return false; 2726 2727 switch (Inst.getOpcode()) { 2728 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 2729 case Instruction::OPCODE: \ 2730 return translate##OPCODE(Inst, *CurBuilder.get()); 2731 #include "llvm/IR/Instruction.def" 2732 default: 2733 return false; 2734 } 2735 } 2736 2737 bool IRTranslator::translate(const Constant &C, Register Reg) { 2738 if (auto CI = dyn_cast<ConstantInt>(&C)) 2739 EntryBuilder->buildConstant(Reg, *CI); 2740 else if (auto CF = dyn_cast<ConstantFP>(&C)) 2741 EntryBuilder->buildFConstant(Reg, *CF); 2742 else if (isa<UndefValue>(C)) 2743 EntryBuilder->buildUndef(Reg); 2744 else if (isa<ConstantPointerNull>(C)) 2745 EntryBuilder->buildConstant(Reg, 0); 2746 else if (auto GV = dyn_cast<GlobalValue>(&C)) 2747 EntryBuilder->buildGlobalValue(Reg, GV); 2748 else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) { 2749 if (!CAZ->getType()->isVectorTy()) 2750 return false; 2751 // Return the scalar if it is a <1 x Ty> vector. 2752 if (CAZ->getNumElements() == 1) 2753 return translateCopy(C, *CAZ->getElementValue(0u), *EntryBuilder.get()); 2754 SmallVector<Register, 4> Ops; 2755 for (unsigned i = 0; i < CAZ->getNumElements(); ++i) { 2756 Constant &Elt = *CAZ->getElementValue(i); 2757 Ops.push_back(getOrCreateVReg(Elt)); 2758 } 2759 EntryBuilder->buildBuildVector(Reg, Ops); 2760 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) { 2761 // Return the scalar if it is a <1 x Ty> vector. 2762 if (CV->getNumElements() == 1) 2763 return translateCopy(C, *CV->getElementAsConstant(0), 2764 *EntryBuilder.get()); 2765 SmallVector<Register, 4> Ops; 2766 for (unsigned i = 0; i < CV->getNumElements(); ++i) { 2767 Constant &Elt = *CV->getElementAsConstant(i); 2768 Ops.push_back(getOrCreateVReg(Elt)); 2769 } 2770 EntryBuilder->buildBuildVector(Reg, Ops); 2771 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) { 2772 switch(CE->getOpcode()) { 2773 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 2774 case Instruction::OPCODE: \ 2775 return translate##OPCODE(*CE, *EntryBuilder.get()); 2776 #include "llvm/IR/Instruction.def" 2777 default: 2778 return false; 2779 } 2780 } else if (auto CV = dyn_cast<ConstantVector>(&C)) { 2781 if (CV->getNumOperands() == 1) 2782 return translateCopy(C, *CV->getOperand(0), *EntryBuilder.get()); 2783 SmallVector<Register, 4> Ops; 2784 for (unsigned i = 0; i < CV->getNumOperands(); ++i) { 2785 Ops.push_back(getOrCreateVReg(*CV->getOperand(i))); 2786 } 2787 EntryBuilder->buildBuildVector(Reg, Ops); 2788 } else if (auto *BA = dyn_cast<BlockAddress>(&C)) { 2789 EntryBuilder->buildBlockAddress(Reg, BA); 2790 } else 2791 return false; 2792 2793 return true; 2794 } 2795 2796 void IRTranslator::finalizeBasicBlock() { 2797 for (auto &BTB : SL->BitTestCases) { 2798 // Emit header first, if it wasn't already emitted. 2799 if (!BTB.Emitted) 2800 emitBitTestHeader(BTB, BTB.Parent); 2801 2802 BranchProbability UnhandledProb = BTB.Prob; 2803 for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) { 2804 UnhandledProb -= BTB.Cases[j].ExtraProb; 2805 // Set the current basic block to the mbb we wish to insert the code into 2806 MachineBasicBlock *MBB = BTB.Cases[j].ThisBB; 2807 // If all cases cover a contiguous range, it is not necessary to jump to 2808 // the default block after the last bit test fails. This is because the 2809 // range check during bit test header creation has guaranteed that every 2810 // case here doesn't go outside the range. In this case, there is no need 2811 // to perform the last bit test, as it will always be true. Instead, make 2812 // the second-to-last bit-test fall through to the target of the last bit 2813 // test, and delete the last bit test. 2814 2815 MachineBasicBlock *NextMBB; 2816 if (BTB.ContiguousRange && j + 2 == ej) { 2817 // Second-to-last bit-test with contiguous range: fall through to the 2818 // target of the final bit test. 2819 NextMBB = BTB.Cases[j + 1].TargetBB; 2820 } else if (j + 1 == ej) { 2821 // For the last bit test, fall through to Default. 2822 NextMBB = BTB.Default; 2823 } else { 2824 // Otherwise, fall through to the next bit test. 2825 NextMBB = BTB.Cases[j + 1].ThisBB; 2826 } 2827 2828 emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j], MBB); 2829 2830 // FIXME delete this block below? 2831 if (BTB.ContiguousRange && j + 2 == ej) { 2832 // Since we're not going to use the final bit test, remove it. 2833 BTB.Cases.pop_back(); 2834 break; 2835 } 2836 } 2837 // This is "default" BB. We have two jumps to it. From "header" BB and from 2838 // last "case" BB, unless the latter was skipped. 2839 CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(), 2840 BTB.Default->getBasicBlock()}; 2841 addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent); 2842 if (!BTB.ContiguousRange) { 2843 addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB); 2844 } 2845 } 2846 SL->BitTestCases.clear(); 2847 2848 for (auto &JTCase : SL->JTCases) { 2849 // Emit header first, if it wasn't already emitted. 2850 if (!JTCase.first.Emitted) 2851 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB); 2852 2853 emitJumpTable(JTCase.second, JTCase.second.MBB); 2854 } 2855 SL->JTCases.clear(); 2856 2857 for (auto &SwCase : SL->SwitchCases) 2858 emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder); 2859 SL->SwitchCases.clear(); 2860 } 2861 2862 void IRTranslator::finalizeFunction() { 2863 // Release the memory used by the different maps we 2864 // needed during the translation. 2865 PendingPHIs.clear(); 2866 VMap.reset(); 2867 FrameIndices.clear(); 2868 MachinePreds.clear(); 2869 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it 2870 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid 2871 // destroying it twice (in ~IRTranslator() and ~LLVMContext()) 2872 EntryBuilder.reset(); 2873 CurBuilder.reset(); 2874 FuncInfo.clear(); 2875 } 2876 2877 /// Returns true if a BasicBlock \p BB within a variadic function contains a 2878 /// variadic musttail call. 2879 static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) { 2880 if (!IsVarArg) 2881 return false; 2882 2883 // Walk the block backwards, because tail calls usually only appear at the end 2884 // of a block. 2885 return std::any_of(BB.rbegin(), BB.rend(), [](const Instruction &I) { 2886 const auto *CI = dyn_cast<CallInst>(&I); 2887 return CI && CI->isMustTailCall(); 2888 }); 2889 } 2890 2891 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { 2892 MF = &CurMF; 2893 const Function &F = MF->getFunction(); 2894 if (F.empty()) 2895 return false; 2896 GISelCSEAnalysisWrapper &Wrapper = 2897 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper(); 2898 // Set the CSEConfig and run the analysis. 2899 GISelCSEInfo *CSEInfo = nullptr; 2900 TPC = &getAnalysis<TargetPassConfig>(); 2901 bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences() 2902 ? EnableCSEInIRTranslator 2903 : TPC->isGISelCSEEnabled(); 2904 2905 if (EnableCSE) { 2906 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF); 2907 CSEInfo = &Wrapper.get(TPC->getCSEConfig()); 2908 EntryBuilder->setCSEInfo(CSEInfo); 2909 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF); 2910 CurBuilder->setCSEInfo(CSEInfo); 2911 } else { 2912 EntryBuilder = std::make_unique<MachineIRBuilder>(); 2913 CurBuilder = std::make_unique<MachineIRBuilder>(); 2914 } 2915 CLI = MF->getSubtarget().getCallLowering(); 2916 CurBuilder->setMF(*MF); 2917 EntryBuilder->setMF(*MF); 2918 MRI = &MF->getRegInfo(); 2919 DL = &F.getParent()->getDataLayout(); 2920 ORE = std::make_unique<OptimizationRemarkEmitter>(&F); 2921 const TargetMachine &TM = MF->getTarget(); 2922 TM.resetTargetOptions(F); 2923 EnableOpts = OptLevel != CodeGenOpt::None && !skipFunction(F); 2924 FuncInfo.MF = MF; 2925 if (EnableOpts) 2926 FuncInfo.BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI(); 2927 else 2928 FuncInfo.BPI = nullptr; 2929 2930 const auto &TLI = *MF->getSubtarget().getTargetLowering(); 2931 2932 SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo); 2933 SL->init(TLI, TM, *DL); 2934 2935 2936 2937 assert(PendingPHIs.empty() && "stale PHIs"); 2938 2939 if (!DL->isLittleEndian()) { 2940 // Currently we don't properly handle big endian code. 2941 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 2942 F.getSubprogram(), &F.getEntryBlock()); 2943 R << "unable to translate in big endian mode"; 2944 reportTranslationError(*MF, *TPC, *ORE, R); 2945 } 2946 2947 // Release the per-function state when we return, whether we succeeded or not. 2948 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); }); 2949 2950 // Setup a separate basic-block for the arguments and constants 2951 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock(); 2952 MF->push_back(EntryBB); 2953 EntryBuilder->setMBB(*EntryBB); 2954 2955 DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc(); 2956 SwiftError.setFunction(CurMF); 2957 SwiftError.createEntriesInEntryBlock(DbgLoc); 2958 2959 bool IsVarArg = F.isVarArg(); 2960 bool HasMustTailInVarArgFn = false; 2961 2962 // Create all blocks, in IR order, to preserve the layout. 2963 for (const BasicBlock &BB: F) { 2964 auto *&MBB = BBToMBB[&BB]; 2965 2966 MBB = MF->CreateMachineBasicBlock(&BB); 2967 MF->push_back(MBB); 2968 2969 if (BB.hasAddressTaken()) 2970 MBB->setHasAddressTaken(); 2971 2972 if (!HasMustTailInVarArgFn) 2973 HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB); 2974 } 2975 2976 MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn); 2977 2978 // Make our arguments/constants entry block fallthrough to the IR entry block. 2979 EntryBB->addSuccessor(&getMBB(F.front())); 2980 2981 if (CLI->fallBackToDAGISel(F)) { 2982 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 2983 F.getSubprogram(), &F.getEntryBlock()); 2984 R << "unable to lower function: " << ore::NV("Prototype", F.getType()); 2985 reportTranslationError(*MF, *TPC, *ORE, R); 2986 return false; 2987 } 2988 2989 // Lower the actual args into this basic block. 2990 SmallVector<ArrayRef<Register>, 8> VRegArgs; 2991 for (const Argument &Arg: F.args()) { 2992 if (DL->getTypeStoreSize(Arg.getType()).isZero()) 2993 continue; // Don't handle zero sized types. 2994 ArrayRef<Register> VRegs = getOrCreateVRegs(Arg); 2995 VRegArgs.push_back(VRegs); 2996 2997 if (Arg.hasSwiftErrorAttr()) { 2998 assert(VRegs.size() == 1 && "Too many vregs for Swift error"); 2999 SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]); 3000 } 3001 } 3002 3003 if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs)) { 3004 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 3005 F.getSubprogram(), &F.getEntryBlock()); 3006 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType()); 3007 reportTranslationError(*MF, *TPC, *ORE, R); 3008 return false; 3009 } 3010 3011 // Need to visit defs before uses when translating instructions. 3012 GISelObserverWrapper WrapperObserver; 3013 if (EnableCSE && CSEInfo) 3014 WrapperObserver.addObserver(CSEInfo); 3015 { 3016 ReversePostOrderTraversal<const Function *> RPOT(&F); 3017 #ifndef NDEBUG 3018 DILocationVerifier Verifier; 3019 WrapperObserver.addObserver(&Verifier); 3020 #endif // ifndef NDEBUG 3021 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver); 3022 RAIIMFObserverInstaller ObsInstall(*MF, WrapperObserver); 3023 for (const BasicBlock *BB : RPOT) { 3024 MachineBasicBlock &MBB = getMBB(*BB); 3025 // Set the insertion point of all the following translations to 3026 // the end of this basic block. 3027 CurBuilder->setMBB(MBB); 3028 HasTailCall = false; 3029 for (const Instruction &Inst : *BB) { 3030 // If we translated a tail call in the last step, then we know 3031 // everything after the call is either a return, or something that is 3032 // handled by the call itself. (E.g. a lifetime marker or assume 3033 // intrinsic.) In this case, we should stop translating the block and 3034 // move on. 3035 if (HasTailCall) 3036 break; 3037 #ifndef NDEBUG 3038 Verifier.setCurrentInst(&Inst); 3039 #endif // ifndef NDEBUG 3040 if (translate(Inst)) 3041 continue; 3042 3043 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 3044 Inst.getDebugLoc(), BB); 3045 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst); 3046 3047 if (ORE->allowExtraAnalysis("gisel-irtranslator")) { 3048 std::string InstStrStorage; 3049 raw_string_ostream InstStr(InstStrStorage); 3050 InstStr << Inst; 3051 3052 R << ": '" << InstStr.str() << "'"; 3053 } 3054 3055 reportTranslationError(*MF, *TPC, *ORE, R); 3056 return false; 3057 } 3058 3059 finalizeBasicBlock(); 3060 } 3061 #ifndef NDEBUG 3062 WrapperObserver.removeObserver(&Verifier); 3063 #endif 3064 } 3065 3066 finishPendingPhis(); 3067 3068 SwiftError.propagateVRegs(); 3069 3070 // Merge the argument lowering and constants block with its single 3071 // successor, the LLVM-IR entry block. We want the basic block to 3072 // be maximal. 3073 assert(EntryBB->succ_size() == 1 && 3074 "Custom BB used for lowering should have only one successor"); 3075 // Get the successor of the current entry block. 3076 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin(); 3077 assert(NewEntryBB.pred_size() == 1 && 3078 "LLVM-IR entry block has a predecessor!?"); 3079 // Move all the instruction from the current entry block to the 3080 // new entry block. 3081 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(), 3082 EntryBB->end()); 3083 3084 // Update the live-in information for the new entry block. 3085 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins()) 3086 NewEntryBB.addLiveIn(LiveIn); 3087 NewEntryBB.sortUniqueLiveIns(); 3088 3089 // Get rid of the now empty basic block. 3090 EntryBB->removeSuccessor(&NewEntryBB); 3091 MF->remove(EntryBB); 3092 MF->DeleteMachineBasicBlock(EntryBB); 3093 3094 assert(&MF->front() == &NewEntryBB && 3095 "New entry wasn't next in the list of basic block!"); 3096 3097 // Initialize stack protector information. 3098 StackProtector &SP = getAnalysis<StackProtector>(); 3099 SP.copyToMachineFrameInfo(MF->getFrameInfo()); 3100 3101 return false; 3102 } 3103