1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the IRTranslator class. 10 //===----------------------------------------------------------------------===// 11 12 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 13 #include "llvm/ADT/PostOrderIterator.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/ADT/ScopeExit.h" 16 #include "llvm/ADT/SmallSet.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/Analysis/BranchProbabilityInfo.h" 19 #include "llvm/Analysis/Loads.h" 20 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 21 #include "llvm/Analysis/ValueTracking.h" 22 #include "llvm/CodeGen/Analysis.h" 23 #include "llvm/CodeGen/FunctionLoweringInfo.h" 24 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 25 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" 26 #include "llvm/CodeGen/LowLevelType.h" 27 #include "llvm/CodeGen/MachineBasicBlock.h" 28 #include "llvm/CodeGen/MachineFrameInfo.h" 29 #include "llvm/CodeGen/MachineFunction.h" 30 #include "llvm/CodeGen/MachineInstrBuilder.h" 31 #include "llvm/CodeGen/MachineMemOperand.h" 32 #include "llvm/CodeGen/MachineOperand.h" 33 #include "llvm/CodeGen/MachineRegisterInfo.h" 34 #include "llvm/CodeGen/StackProtector.h" 35 #include "llvm/CodeGen/TargetFrameLowering.h" 36 #include "llvm/CodeGen/TargetInstrInfo.h" 37 #include "llvm/CodeGen/TargetLowering.h" 38 #include "llvm/CodeGen/TargetPassConfig.h" 39 #include "llvm/CodeGen/TargetRegisterInfo.h" 40 #include "llvm/CodeGen/TargetSubtargetInfo.h" 41 #include "llvm/IR/BasicBlock.h" 42 #include "llvm/IR/CFG.h" 43 #include "llvm/IR/Constant.h" 44 #include "llvm/IR/Constants.h" 45 #include "llvm/IR/DataLayout.h" 46 #include "llvm/IR/DebugInfo.h" 47 #include "llvm/IR/DerivedTypes.h" 48 #include "llvm/IR/Function.h" 49 #include "llvm/IR/GetElementPtrTypeIterator.h" 50 #include "llvm/IR/InlineAsm.h" 51 #include "llvm/IR/InstrTypes.h" 52 #include "llvm/IR/Instructions.h" 53 #include "llvm/IR/IntrinsicInst.h" 54 #include "llvm/IR/Intrinsics.h" 55 #include "llvm/IR/LLVMContext.h" 56 #include "llvm/IR/Metadata.h" 57 #include "llvm/IR/Type.h" 58 #include "llvm/IR/User.h" 59 #include "llvm/IR/Value.h" 60 #include "llvm/InitializePasses.h" 61 #include "llvm/MC/MCContext.h" 62 #include "llvm/Pass.h" 63 #include "llvm/Support/Casting.h" 64 #include "llvm/Support/CodeGen.h" 65 #include "llvm/Support/Debug.h" 66 #include "llvm/Support/ErrorHandling.h" 67 #include "llvm/Support/LowLevelTypeImpl.h" 68 #include "llvm/Support/MathExtras.h" 69 #include "llvm/Support/raw_ostream.h" 70 #include "llvm/Target/TargetIntrinsicInfo.h" 71 #include "llvm/Target/TargetMachine.h" 72 #include <algorithm> 73 #include <cassert> 74 #include <cstdint> 75 #include <iterator> 76 #include <string> 77 #include <utility> 78 #include <vector> 79 80 #define DEBUG_TYPE "irtranslator" 81 82 using namespace llvm; 83 84 static cl::opt<bool> 85 EnableCSEInIRTranslator("enable-cse-in-irtranslator", 86 cl::desc("Should enable CSE in irtranslator"), 87 cl::Optional, cl::init(false)); 88 char IRTranslator::ID = 0; 89 90 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 91 false, false) 92 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) 93 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass) 94 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 95 false, false) 96 97 static void reportTranslationError(MachineFunction &MF, 98 const TargetPassConfig &TPC, 99 OptimizationRemarkEmitter &ORE, 100 OptimizationRemarkMissed &R) { 101 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel); 102 103 // Print the function name explicitly if we don't have a debug location (which 104 // makes the diagnostic less useful) or if we're going to emit a raw error. 105 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled()) 106 R << (" (in function: " + MF.getName() + ")").str(); 107 108 if (TPC.isGlobalISelAbortEnabled()) 109 report_fatal_error(R.getMsg()); 110 else 111 ORE.emit(R); 112 } 113 114 IRTranslator::IRTranslator() : MachineFunctionPass(ID) { } 115 116 #ifndef NDEBUG 117 namespace { 118 /// Verify that every instruction created has the same DILocation as the 119 /// instruction being translated. 120 class DILocationVerifier : public GISelChangeObserver { 121 const Instruction *CurrInst = nullptr; 122 123 public: 124 DILocationVerifier() = default; 125 ~DILocationVerifier() = default; 126 127 const Instruction *getCurrentInst() const { return CurrInst; } 128 void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; } 129 130 void erasingInstr(MachineInstr &MI) override {} 131 void changingInstr(MachineInstr &MI) override {} 132 void changedInstr(MachineInstr &MI) override {} 133 134 void createdInstr(MachineInstr &MI) override { 135 assert(getCurrentInst() && "Inserted instruction without a current MI"); 136 137 // Only print the check message if we're actually checking it. 138 #ifndef NDEBUG 139 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst 140 << " was copied to " << MI); 141 #endif 142 // We allow insts in the entry block to have a debug loc line of 0 because 143 // they could have originated from constants, and we don't want a jumpy 144 // debug experience. 145 assert((CurrInst->getDebugLoc() == MI.getDebugLoc() || 146 MI.getDebugLoc().getLine() == 0) && 147 "Line info was not transferred to all instructions"); 148 } 149 }; 150 } // namespace 151 #endif // ifndef NDEBUG 152 153 154 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const { 155 AU.addRequired<StackProtector>(); 156 AU.addRequired<TargetPassConfig>(); 157 AU.addRequired<GISelCSEAnalysisWrapperPass>(); 158 getSelectionDAGFallbackAnalysisUsage(AU); 159 MachineFunctionPass::getAnalysisUsage(AU); 160 } 161 162 IRTranslator::ValueToVRegInfo::VRegListT & 163 IRTranslator::allocateVRegs(const Value &Val) { 164 assert(!VMap.contains(Val) && "Value already allocated in VMap"); 165 auto *Regs = VMap.getVRegs(Val); 166 auto *Offsets = VMap.getOffsets(Val); 167 SmallVector<LLT, 4> SplitTys; 168 computeValueLLTs(*DL, *Val.getType(), SplitTys, 169 Offsets->empty() ? Offsets : nullptr); 170 for (unsigned i = 0; i < SplitTys.size(); ++i) 171 Regs->push_back(0); 172 return *Regs; 173 } 174 175 ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) { 176 auto VRegsIt = VMap.findVRegs(Val); 177 if (VRegsIt != VMap.vregs_end()) 178 return *VRegsIt->second; 179 180 if (Val.getType()->isVoidTy()) 181 return *VMap.getVRegs(Val); 182 183 // Create entry for this type. 184 auto *VRegs = VMap.getVRegs(Val); 185 auto *Offsets = VMap.getOffsets(Val); 186 187 assert(Val.getType()->isSized() && 188 "Don't know how to create an empty vreg"); 189 190 SmallVector<LLT, 4> SplitTys; 191 computeValueLLTs(*DL, *Val.getType(), SplitTys, 192 Offsets->empty() ? Offsets : nullptr); 193 194 if (!isa<Constant>(Val)) { 195 for (auto Ty : SplitTys) 196 VRegs->push_back(MRI->createGenericVirtualRegister(Ty)); 197 return *VRegs; 198 } 199 200 if (Val.getType()->isAggregateType()) { 201 // UndefValue, ConstantAggregateZero 202 auto &C = cast<Constant>(Val); 203 unsigned Idx = 0; 204 while (auto Elt = C.getAggregateElement(Idx++)) { 205 auto EltRegs = getOrCreateVRegs(*Elt); 206 llvm::copy(EltRegs, std::back_inserter(*VRegs)); 207 } 208 } else { 209 assert(SplitTys.size() == 1 && "unexpectedly split LLT"); 210 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0])); 211 bool Success = translate(cast<Constant>(Val), VRegs->front()); 212 if (!Success) { 213 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 214 MF->getFunction().getSubprogram(), 215 &MF->getFunction().getEntryBlock()); 216 R << "unable to translate constant: " << ore::NV("Type", Val.getType()); 217 reportTranslationError(*MF, *TPC, *ORE, R); 218 return *VRegs; 219 } 220 } 221 222 return *VRegs; 223 } 224 225 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) { 226 if (FrameIndices.find(&AI) != FrameIndices.end()) 227 return FrameIndices[&AI]; 228 229 uint64_t ElementSize = DL->getTypeAllocSize(AI.getAllocatedType()); 230 uint64_t Size = 231 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue(); 232 233 // Always allocate at least one byte. 234 Size = std::max<uint64_t>(Size, 1u); 235 236 unsigned Alignment = AI.getAlignment(); 237 if (!Alignment) 238 Alignment = DL->getABITypeAlignment(AI.getAllocatedType()); 239 240 int &FI = FrameIndices[&AI]; 241 FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI); 242 return FI; 243 } 244 245 Align IRTranslator::getMemOpAlign(const Instruction &I) { 246 if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) { 247 Type *ValTy = SI->getValueOperand()->getType(); 248 return SI->getAlign().getValueOr(DL->getABITypeAlign(ValTy)); 249 } 250 if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) { 251 return DL->getValueOrABITypeAlignment(LI->getAlign(), LI->getType()); 252 } 253 if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) { 254 // TODO(PR27168): This instruction has no alignment attribute, but unlike 255 // the default alignment for load/store, the default here is to assume 256 // it has NATURAL alignment, not DataLayout-specified alignment. 257 const DataLayout &DL = AI->getModule()->getDataLayout(); 258 return Align(DL.getTypeStoreSize(AI->getCompareOperand()->getType())); 259 } 260 if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) { 261 // TODO(PR27168): This instruction has no alignment attribute, but unlike 262 // the default alignment for load/store, the default here is to assume 263 // it has NATURAL alignment, not DataLayout-specified alignment. 264 const DataLayout &DL = AI->getModule()->getDataLayout(); 265 return Align(DL.getTypeStoreSize(AI->getValOperand()->getType())); 266 } 267 OptimizationRemarkMissed R("gisel-irtranslator", "", &I); 268 R << "unable to translate memop: " << ore::NV("Opcode", &I); 269 reportTranslationError(*MF, *TPC, *ORE, R); 270 return Align(1); 271 } 272 273 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) { 274 MachineBasicBlock *&MBB = BBToMBB[&BB]; 275 assert(MBB && "BasicBlock was not encountered before"); 276 return *MBB; 277 } 278 279 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) { 280 assert(NewPred && "new predecessor must be a real MachineBasicBlock"); 281 MachinePreds[Edge].push_back(NewPred); 282 } 283 284 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U, 285 MachineIRBuilder &MIRBuilder) { 286 // Get or create a virtual register for each value. 287 // Unless the value is a Constant => loadimm cst? 288 // or inline constant each time? 289 // Creation of a virtual register needs to have a size. 290 Register Op0 = getOrCreateVReg(*U.getOperand(0)); 291 Register Op1 = getOrCreateVReg(*U.getOperand(1)); 292 Register Res = getOrCreateVReg(U); 293 uint16_t Flags = 0; 294 if (isa<Instruction>(U)) { 295 const Instruction &I = cast<Instruction>(U); 296 Flags = MachineInstr::copyFlagsFromInstruction(I); 297 } 298 299 MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags); 300 return true; 301 } 302 303 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) { 304 // -0.0 - X --> G_FNEG 305 if (isa<Constant>(U.getOperand(0)) && 306 U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) { 307 Register Op1 = getOrCreateVReg(*U.getOperand(1)); 308 Register Res = getOrCreateVReg(U); 309 uint16_t Flags = 0; 310 if (isa<Instruction>(U)) { 311 const Instruction &I = cast<Instruction>(U); 312 Flags = MachineInstr::copyFlagsFromInstruction(I); 313 } 314 // Negate the last operand of the FSUB 315 MIRBuilder.buildFNeg(Res, Op1, Flags); 316 return true; 317 } 318 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder); 319 } 320 321 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) { 322 Register Op0 = getOrCreateVReg(*U.getOperand(0)); 323 Register Res = getOrCreateVReg(U); 324 uint16_t Flags = 0; 325 if (isa<Instruction>(U)) { 326 const Instruction &I = cast<Instruction>(U); 327 Flags = MachineInstr::copyFlagsFromInstruction(I); 328 } 329 MIRBuilder.buildFNeg(Res, Op0, Flags); 330 return true; 331 } 332 333 bool IRTranslator::translateCompare(const User &U, 334 MachineIRBuilder &MIRBuilder) { 335 auto *CI = dyn_cast<CmpInst>(&U); 336 Register Op0 = getOrCreateVReg(*U.getOperand(0)); 337 Register Op1 = getOrCreateVReg(*U.getOperand(1)); 338 Register Res = getOrCreateVReg(U); 339 CmpInst::Predicate Pred = 340 CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>( 341 cast<ConstantExpr>(U).getPredicate()); 342 if (CmpInst::isIntPredicate(Pred)) 343 MIRBuilder.buildICmp(Pred, Res, Op0, Op1); 344 else if (Pred == CmpInst::FCMP_FALSE) 345 MIRBuilder.buildCopy( 346 Res, getOrCreateVReg(*Constant::getNullValue(U.getType()))); 347 else if (Pred == CmpInst::FCMP_TRUE) 348 MIRBuilder.buildCopy( 349 Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType()))); 350 else { 351 assert(CI && "Instruction should be CmpInst"); 352 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1, 353 MachineInstr::copyFlagsFromInstruction(*CI)); 354 } 355 356 return true; 357 } 358 359 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) { 360 const ReturnInst &RI = cast<ReturnInst>(U); 361 const Value *Ret = RI.getReturnValue(); 362 if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0) 363 Ret = nullptr; 364 365 ArrayRef<Register> VRegs; 366 if (Ret) 367 VRegs = getOrCreateVRegs(*Ret); 368 369 Register SwiftErrorVReg = 0; 370 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) { 371 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt( 372 &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg()); 373 } 374 375 // The target may mess up with the insertion point, but 376 // this is not important as a return is the last instruction 377 // of the block anyway. 378 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, SwiftErrorVReg); 379 } 380 381 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) { 382 const BranchInst &BrInst = cast<BranchInst>(U); 383 unsigned Succ = 0; 384 if (!BrInst.isUnconditional()) { 385 // We want a G_BRCOND to the true BB followed by an unconditional branch. 386 Register Tst = getOrCreateVReg(*BrInst.getCondition()); 387 const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++)); 388 MachineBasicBlock &TrueBB = getMBB(TrueTgt); 389 MIRBuilder.buildBrCond(Tst, TrueBB); 390 } 391 392 const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ)); 393 MachineBasicBlock &TgtBB = getMBB(BrTgt); 394 MachineBasicBlock &CurBB = MIRBuilder.getMBB(); 395 396 // If the unconditional target is the layout successor, fallthrough. 397 if (!CurBB.isLayoutSuccessor(&TgtBB)) 398 MIRBuilder.buildBr(TgtBB); 399 400 // Link successors. 401 for (const BasicBlock *Succ : successors(&BrInst)) 402 CurBB.addSuccessor(&getMBB(*Succ)); 403 return true; 404 } 405 406 void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src, 407 MachineBasicBlock *Dst, 408 BranchProbability Prob) { 409 if (!FuncInfo.BPI) { 410 Src->addSuccessorWithoutProb(Dst); 411 return; 412 } 413 if (Prob.isUnknown()) 414 Prob = getEdgeProbability(Src, Dst); 415 Src->addSuccessor(Dst, Prob); 416 } 417 418 BranchProbability 419 IRTranslator::getEdgeProbability(const MachineBasicBlock *Src, 420 const MachineBasicBlock *Dst) const { 421 const BasicBlock *SrcBB = Src->getBasicBlock(); 422 const BasicBlock *DstBB = Dst->getBasicBlock(); 423 if (!FuncInfo.BPI) { 424 // If BPI is not available, set the default probability as 1 / N, where N is 425 // the number of successors. 426 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1); 427 return BranchProbability(1, SuccSize); 428 } 429 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB); 430 } 431 432 bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) { 433 using namespace SwitchCG; 434 // Extract cases from the switch. 435 const SwitchInst &SI = cast<SwitchInst>(U); 436 BranchProbabilityInfo *BPI = FuncInfo.BPI; 437 CaseClusterVector Clusters; 438 Clusters.reserve(SI.getNumCases()); 439 for (auto &I : SI.cases()) { 440 MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor()); 441 assert(Succ && "Could not find successor mbb in mapping"); 442 const ConstantInt *CaseVal = I.getCaseValue(); 443 BranchProbability Prob = 444 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex()) 445 : BranchProbability(1, SI.getNumCases() + 1); 446 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob)); 447 } 448 449 MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest()); 450 451 // Cluster adjacent cases with the same destination. We do this at all 452 // optimization levels because it's cheap to do and will make codegen faster 453 // if there are many clusters. 454 sortAndRangeify(Clusters); 455 456 MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent()); 457 458 // If there is only the default destination, jump there directly. 459 if (Clusters.empty()) { 460 SwitchMBB->addSuccessor(DefaultMBB); 461 if (DefaultMBB != SwitchMBB->getNextNode()) 462 MIB.buildBr(*DefaultMBB); 463 return true; 464 } 465 466 SL->findJumpTables(Clusters, &SI, DefaultMBB, nullptr, nullptr); 467 468 LLVM_DEBUG({ 469 dbgs() << "Case clusters: "; 470 for (const CaseCluster &C : Clusters) { 471 if (C.Kind == CC_JumpTable) 472 dbgs() << "JT:"; 473 if (C.Kind == CC_BitTests) 474 dbgs() << "BT:"; 475 476 C.Low->getValue().print(dbgs(), true); 477 if (C.Low != C.High) { 478 dbgs() << '-'; 479 C.High->getValue().print(dbgs(), true); 480 } 481 dbgs() << ' '; 482 } 483 dbgs() << '\n'; 484 }); 485 486 assert(!Clusters.empty()); 487 SwitchWorkList WorkList; 488 CaseClusterIt First = Clusters.begin(); 489 CaseClusterIt Last = Clusters.end() - 1; 490 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB); 491 WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb}); 492 493 // FIXME: At the moment we don't do any splitting optimizations here like 494 // SelectionDAG does, so this worklist only has one entry. 495 while (!WorkList.empty()) { 496 SwitchWorkListItem W = WorkList.back(); 497 WorkList.pop_back(); 498 if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB)) 499 return false; 500 } 501 return true; 502 } 503 504 void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT, 505 MachineBasicBlock *MBB) { 506 // Emit the code for the jump table 507 assert(JT.Reg != -1U && "Should lower JT Header first!"); 508 MachineIRBuilder MIB(*MBB->getParent()); 509 MIB.setMBB(*MBB); 510 MIB.setDebugLoc(CurBuilder->getDebugLoc()); 511 512 Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext()); 513 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL); 514 515 auto Table = MIB.buildJumpTable(PtrTy, JT.JTI); 516 MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg); 517 } 518 519 bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT, 520 SwitchCG::JumpTableHeader &JTH, 521 MachineBasicBlock *HeaderBB) { 522 MachineIRBuilder MIB(*HeaderBB->getParent()); 523 MIB.setMBB(*HeaderBB); 524 MIB.setDebugLoc(CurBuilder->getDebugLoc()); 525 526 const Value &SValue = *JTH.SValue; 527 // Subtract the lowest switch case value from the value being switched on. 528 const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL); 529 Register SwitchOpReg = getOrCreateVReg(SValue); 530 auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First); 531 auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst); 532 533 // This value may be smaller or larger than the target's pointer type, and 534 // therefore require extension or truncating. 535 Type *PtrIRTy = SValue.getType()->getPointerTo(); 536 const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy)); 537 Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub); 538 539 JT.Reg = Sub.getReg(0); 540 541 if (JTH.OmitRangeCheck) { 542 if (JT.MBB != HeaderBB->getNextNode()) 543 MIB.buildBr(*JT.MBB); 544 return true; 545 } 546 547 // Emit the range check for the jump table, and branch to the default block 548 // for the switch statement if the value being switched on exceeds the 549 // largest case in the switch. 550 auto Cst = getOrCreateVReg( 551 *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First)); 552 Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0); 553 auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst); 554 555 auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default); 556 557 // Avoid emitting unnecessary branches to the next block. 558 if (JT.MBB != HeaderBB->getNextNode()) 559 BrCond = MIB.buildBr(*JT.MBB); 560 return true; 561 } 562 563 void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB, 564 MachineBasicBlock *SwitchBB, 565 MachineIRBuilder &MIB) { 566 Register CondLHS = getOrCreateVReg(*CB.CmpLHS); 567 Register Cond; 568 DebugLoc OldDbgLoc = MIB.getDebugLoc(); 569 MIB.setDebugLoc(CB.DbgLoc); 570 MIB.setMBB(*CB.ThisBB); 571 572 if (CB.PredInfo.NoCmp) { 573 // Branch or fall through to TrueBB. 574 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb); 575 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()}, 576 CB.ThisBB); 577 CB.ThisBB->normalizeSuccProbs(); 578 if (CB.TrueBB != CB.ThisBB->getNextNode()) 579 MIB.buildBr(*CB.TrueBB); 580 MIB.setDebugLoc(OldDbgLoc); 581 return; 582 } 583 584 const LLT i1Ty = LLT::scalar(1); 585 // Build the compare. 586 if (!CB.CmpMHS) { 587 Register CondRHS = getOrCreateVReg(*CB.CmpRHS); 588 Cond = MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0); 589 } else { 590 assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE && 591 "Can only handle SLE ranges"); 592 593 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue(); 594 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue(); 595 596 Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS); 597 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) { 598 Register CondRHS = getOrCreateVReg(*CB.CmpRHS); 599 Cond = 600 MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0); 601 } else { 602 const LLT CmpTy = MRI->getType(CmpOpReg); 603 auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS); 604 auto Diff = MIB.buildConstant(CmpTy, High - Low); 605 Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0); 606 } 607 } 608 609 // Update successor info 610 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb); 611 612 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()}, 613 CB.ThisBB); 614 615 // TrueBB and FalseBB are always different unless the incoming IR is 616 // degenerate. This only happens when running llc on weird IR. 617 if (CB.TrueBB != CB.FalseBB) 618 addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb); 619 CB.ThisBB->normalizeSuccProbs(); 620 621 // if (SwitchBB->getBasicBlock() != CB.FalseBB->getBasicBlock()) 622 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()}, 623 CB.ThisBB); 624 625 // If the lhs block is the next block, invert the condition so that we can 626 // fall through to the lhs instead of the rhs block. 627 if (CB.TrueBB == CB.ThisBB->getNextNode()) { 628 std::swap(CB.TrueBB, CB.FalseBB); 629 auto True = MIB.buildConstant(i1Ty, 1); 630 Cond = MIB.buildXor(i1Ty, Cond, True).getReg(0); 631 } 632 633 MIB.buildBrCond(Cond, *CB.TrueBB); 634 MIB.buildBr(*CB.FalseBB); 635 MIB.setDebugLoc(OldDbgLoc); 636 } 637 638 bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W, 639 MachineBasicBlock *SwitchMBB, 640 MachineBasicBlock *CurMBB, 641 MachineBasicBlock *DefaultMBB, 642 MachineIRBuilder &MIB, 643 MachineFunction::iterator BBI, 644 BranchProbability UnhandledProbs, 645 SwitchCG::CaseClusterIt I, 646 MachineBasicBlock *Fallthrough, 647 bool FallthroughUnreachable) { 648 using namespace SwitchCG; 649 MachineFunction *CurMF = SwitchMBB->getParent(); 650 // FIXME: Optimize away range check based on pivot comparisons. 651 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first; 652 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second; 653 BranchProbability DefaultProb = W.DefaultProb; 654 655 // The jump block hasn't been inserted yet; insert it here. 656 MachineBasicBlock *JumpMBB = JT->MBB; 657 CurMF->insert(BBI, JumpMBB); 658 659 // Since the jump table block is separate from the switch block, we need 660 // to keep track of it as a machine predecessor to the default block, 661 // otherwise we lose the phi edges. 662 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()}, 663 CurMBB); 664 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()}, 665 JumpMBB); 666 667 auto JumpProb = I->Prob; 668 auto FallthroughProb = UnhandledProbs; 669 670 // If the default statement is a target of the jump table, we evenly 671 // distribute the default probability to successors of CurMBB. Also 672 // update the probability on the edge from JumpMBB to Fallthrough. 673 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(), 674 SE = JumpMBB->succ_end(); 675 SI != SE; ++SI) { 676 if (*SI == DefaultMBB) { 677 JumpProb += DefaultProb / 2; 678 FallthroughProb -= DefaultProb / 2; 679 JumpMBB->setSuccProbability(SI, DefaultProb / 2); 680 JumpMBB->normalizeSuccProbs(); 681 } else { 682 // Also record edges from the jump table block to it's successors. 683 addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()}, 684 JumpMBB); 685 } 686 } 687 688 // Skip the range check if the fallthrough block is unreachable. 689 if (FallthroughUnreachable) 690 JTH->OmitRangeCheck = true; 691 692 if (!JTH->OmitRangeCheck) 693 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb); 694 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb); 695 CurMBB->normalizeSuccProbs(); 696 697 // The jump table header will be inserted in our current block, do the 698 // range check, and fall through to our fallthrough block. 699 JTH->HeaderBB = CurMBB; 700 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader. 701 702 // If we're in the right place, emit the jump table header right now. 703 if (CurMBB == SwitchMBB) { 704 if (!emitJumpTableHeader(*JT, *JTH, CurMBB)) 705 return false; 706 JTH->Emitted = true; 707 } 708 return true; 709 } 710 bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, 711 Value *Cond, 712 MachineBasicBlock *Fallthrough, 713 bool FallthroughUnreachable, 714 BranchProbability UnhandledProbs, 715 MachineBasicBlock *CurMBB, 716 MachineIRBuilder &MIB, 717 MachineBasicBlock *SwitchMBB) { 718 using namespace SwitchCG; 719 const Value *RHS, *LHS, *MHS; 720 CmpInst::Predicate Pred; 721 if (I->Low == I->High) { 722 // Check Cond == I->Low. 723 Pred = CmpInst::ICMP_EQ; 724 LHS = Cond; 725 RHS = I->Low; 726 MHS = nullptr; 727 } else { 728 // Check I->Low <= Cond <= I->High. 729 Pred = CmpInst::ICMP_SLE; 730 LHS = I->Low; 731 MHS = Cond; 732 RHS = I->High; 733 } 734 735 // If Fallthrough is unreachable, fold away the comparison. 736 // The false probability is the sum of all unhandled cases. 737 CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough, 738 CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs); 739 740 emitSwitchCase(CB, SwitchMBB, MIB); 741 return true; 742 } 743 744 bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, 745 Value *Cond, 746 MachineBasicBlock *SwitchMBB, 747 MachineBasicBlock *DefaultMBB, 748 MachineIRBuilder &MIB) { 749 using namespace SwitchCG; 750 MachineFunction *CurMF = FuncInfo.MF; 751 MachineBasicBlock *NextMBB = nullptr; 752 MachineFunction::iterator BBI(W.MBB); 753 if (++BBI != FuncInfo.MF->end()) 754 NextMBB = &*BBI; 755 756 if (EnableOpts) { 757 // Here, we order cases by probability so the most likely case will be 758 // checked first. However, two clusters can have the same probability in 759 // which case their relative ordering is non-deterministic. So we use Low 760 // as a tie-breaker as clusters are guaranteed to never overlap. 761 llvm::sort(W.FirstCluster, W.LastCluster + 1, 762 [](const CaseCluster &a, const CaseCluster &b) { 763 return a.Prob != b.Prob 764 ? a.Prob > b.Prob 765 : a.Low->getValue().slt(b.Low->getValue()); 766 }); 767 768 // Rearrange the case blocks so that the last one falls through if possible 769 // without changing the order of probabilities. 770 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) { 771 --I; 772 if (I->Prob > W.LastCluster->Prob) 773 break; 774 if (I->Kind == CC_Range && I->MBB == NextMBB) { 775 std::swap(*I, *W.LastCluster); 776 break; 777 } 778 } 779 } 780 781 // Compute total probability. 782 BranchProbability DefaultProb = W.DefaultProb; 783 BranchProbability UnhandledProbs = DefaultProb; 784 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I) 785 UnhandledProbs += I->Prob; 786 787 MachineBasicBlock *CurMBB = W.MBB; 788 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) { 789 bool FallthroughUnreachable = false; 790 MachineBasicBlock *Fallthrough; 791 if (I == W.LastCluster) { 792 // For the last cluster, fall through to the default destination. 793 Fallthrough = DefaultMBB; 794 FallthroughUnreachable = isa<UnreachableInst>( 795 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg()); 796 } else { 797 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock()); 798 CurMF->insert(BBI, Fallthrough); 799 } 800 UnhandledProbs -= I->Prob; 801 802 switch (I->Kind) { 803 case CC_BitTests: { 804 LLVM_DEBUG(dbgs() << "Switch to bit test optimization unimplemented"); 805 return false; // Bit tests currently unimplemented. 806 } 807 case CC_JumpTable: { 808 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI, 809 UnhandledProbs, I, Fallthrough, 810 FallthroughUnreachable)) { 811 LLVM_DEBUG(dbgs() << "Failed to lower jump table"); 812 return false; 813 } 814 break; 815 } 816 case CC_Range: { 817 if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough, 818 FallthroughUnreachable, UnhandledProbs, 819 CurMBB, MIB, SwitchMBB)) { 820 LLVM_DEBUG(dbgs() << "Failed to lower switch range"); 821 return false; 822 } 823 break; 824 } 825 } 826 CurMBB = Fallthrough; 827 } 828 829 return true; 830 } 831 832 bool IRTranslator::translateIndirectBr(const User &U, 833 MachineIRBuilder &MIRBuilder) { 834 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U); 835 836 const Register Tgt = getOrCreateVReg(*BrInst.getAddress()); 837 MIRBuilder.buildBrIndirect(Tgt); 838 839 // Link successors. 840 MachineBasicBlock &CurBB = MIRBuilder.getMBB(); 841 for (const BasicBlock *Succ : successors(&BrInst)) 842 CurBB.addSuccessor(&getMBB(*Succ)); 843 844 return true; 845 } 846 847 static bool isSwiftError(const Value *V) { 848 if (auto Arg = dyn_cast<Argument>(V)) 849 return Arg->hasSwiftErrorAttr(); 850 if (auto AI = dyn_cast<AllocaInst>(V)) 851 return AI->isSwiftError(); 852 return false; 853 } 854 855 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { 856 const LoadInst &LI = cast<LoadInst>(U); 857 if (DL->getTypeStoreSize(LI.getType()) == 0) 858 return true; 859 860 ArrayRef<Register> Regs = getOrCreateVRegs(LI); 861 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI); 862 Register Base = getOrCreateVReg(*LI.getPointerOperand()); 863 864 Type *OffsetIRTy = DL->getIntPtrType(LI.getPointerOperandType()); 865 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); 866 867 if (CLI->supportSwiftError() && isSwiftError(LI.getPointerOperand())) { 868 assert(Regs.size() == 1 && "swifterror should be single pointer"); 869 Register VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(), 870 LI.getPointerOperand()); 871 MIRBuilder.buildCopy(Regs[0], VReg); 872 return true; 873 } 874 875 auto &TLI = *MF->getSubtarget().getTargetLowering(); 876 MachineMemOperand::Flags Flags = TLI.getLoadMemOperandFlags(LI, *DL); 877 878 const MDNode *Ranges = 879 Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr; 880 for (unsigned i = 0; i < Regs.size(); ++i) { 881 Register Addr; 882 MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8); 883 884 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8); 885 Align BaseAlign = getMemOpAlign(LI); 886 AAMDNodes AAMetadata; 887 LI.getAAMetadata(AAMetadata); 888 auto MMO = MF->getMachineMemOperand( 889 Ptr, Flags, MRI->getType(Regs[i]).getSizeInBytes(), 890 commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, Ranges, 891 LI.getSyncScopeID(), LI.getOrdering()); 892 MIRBuilder.buildLoad(Regs[i], Addr, *MMO); 893 } 894 895 return true; 896 } 897 898 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) { 899 const StoreInst &SI = cast<StoreInst>(U); 900 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0) 901 return true; 902 903 ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand()); 904 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand()); 905 Register Base = getOrCreateVReg(*SI.getPointerOperand()); 906 907 Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType()); 908 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); 909 910 if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) { 911 assert(Vals.size() == 1 && "swifterror should be single pointer"); 912 913 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(), 914 SI.getPointerOperand()); 915 MIRBuilder.buildCopy(VReg, Vals[0]); 916 return true; 917 } 918 919 auto &TLI = *MF->getSubtarget().getTargetLowering(); 920 MachineMemOperand::Flags Flags = TLI.getStoreMemOperandFlags(SI, *DL); 921 922 for (unsigned i = 0; i < Vals.size(); ++i) { 923 Register Addr; 924 MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8); 925 926 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8); 927 Align BaseAlign = getMemOpAlign(SI); 928 AAMDNodes AAMetadata; 929 SI.getAAMetadata(AAMetadata); 930 auto MMO = MF->getMachineMemOperand( 931 Ptr, Flags, MRI->getType(Vals[i]).getSizeInBytes(), 932 commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, nullptr, 933 SI.getSyncScopeID(), SI.getOrdering()); 934 MIRBuilder.buildStore(Vals[i], Addr, *MMO); 935 } 936 return true; 937 } 938 939 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) { 940 const Value *Src = U.getOperand(0); 941 Type *Int32Ty = Type::getInt32Ty(U.getContext()); 942 943 // getIndexedOffsetInType is designed for GEPs, so the first index is the 944 // usual array element rather than looking into the actual aggregate. 945 SmallVector<Value *, 1> Indices; 946 Indices.push_back(ConstantInt::get(Int32Ty, 0)); 947 948 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) { 949 for (auto Idx : EVI->indices()) 950 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 951 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) { 952 for (auto Idx : IVI->indices()) 953 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 954 } else { 955 for (unsigned i = 1; i < U.getNumOperands(); ++i) 956 Indices.push_back(U.getOperand(i)); 957 } 958 959 return 8 * static_cast<uint64_t>( 960 DL.getIndexedOffsetInType(Src->getType(), Indices)); 961 } 962 963 bool IRTranslator::translateExtractValue(const User &U, 964 MachineIRBuilder &MIRBuilder) { 965 const Value *Src = U.getOperand(0); 966 uint64_t Offset = getOffsetFromIndices(U, *DL); 967 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src); 968 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src); 969 unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin(); 970 auto &DstRegs = allocateVRegs(U); 971 972 for (unsigned i = 0; i < DstRegs.size(); ++i) 973 DstRegs[i] = SrcRegs[Idx++]; 974 975 return true; 976 } 977 978 bool IRTranslator::translateInsertValue(const User &U, 979 MachineIRBuilder &MIRBuilder) { 980 const Value *Src = U.getOperand(0); 981 uint64_t Offset = getOffsetFromIndices(U, *DL); 982 auto &DstRegs = allocateVRegs(U); 983 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U); 984 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src); 985 ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1)); 986 auto InsertedIt = InsertedRegs.begin(); 987 988 for (unsigned i = 0; i < DstRegs.size(); ++i) { 989 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end()) 990 DstRegs[i] = *InsertedIt++; 991 else 992 DstRegs[i] = SrcRegs[i]; 993 } 994 995 return true; 996 } 997 998 bool IRTranslator::translateSelect(const User &U, 999 MachineIRBuilder &MIRBuilder) { 1000 Register Tst = getOrCreateVReg(*U.getOperand(0)); 1001 ArrayRef<Register> ResRegs = getOrCreateVRegs(U); 1002 ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1)); 1003 ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2)); 1004 1005 const SelectInst &SI = cast<SelectInst>(U); 1006 uint16_t Flags = 0; 1007 if (const CmpInst *Cmp = dyn_cast<CmpInst>(SI.getCondition())) 1008 Flags = MachineInstr::copyFlagsFromInstruction(*Cmp); 1009 1010 for (unsigned i = 0; i < ResRegs.size(); ++i) { 1011 MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags); 1012 } 1013 1014 return true; 1015 } 1016 1017 bool IRTranslator::translateBitCast(const User &U, 1018 MachineIRBuilder &MIRBuilder) { 1019 // If we're bitcasting to the source type, we can reuse the source vreg. 1020 if (getLLTForType(*U.getOperand(0)->getType(), *DL) == 1021 getLLTForType(*U.getType(), *DL)) { 1022 Register SrcReg = getOrCreateVReg(*U.getOperand(0)); 1023 auto &Regs = *VMap.getVRegs(U); 1024 // If we already assigned a vreg for this bitcast, we can't change that. 1025 // Emit a copy to satisfy the users we already emitted. 1026 if (!Regs.empty()) 1027 MIRBuilder.buildCopy(Regs[0], SrcReg); 1028 else { 1029 Regs.push_back(SrcReg); 1030 VMap.getOffsets(U)->push_back(0); 1031 } 1032 return true; 1033 } 1034 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder); 1035 } 1036 1037 bool IRTranslator::translateCast(unsigned Opcode, const User &U, 1038 MachineIRBuilder &MIRBuilder) { 1039 Register Op = getOrCreateVReg(*U.getOperand(0)); 1040 Register Res = getOrCreateVReg(U); 1041 MIRBuilder.buildInstr(Opcode, {Res}, {Op}); 1042 return true; 1043 } 1044 1045 bool IRTranslator::translateGetElementPtr(const User &U, 1046 MachineIRBuilder &MIRBuilder) { 1047 Value &Op0 = *U.getOperand(0); 1048 Register BaseReg = getOrCreateVReg(Op0); 1049 Type *PtrIRTy = Op0.getType(); 1050 LLT PtrTy = getLLTForType(*PtrIRTy, *DL); 1051 Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy); 1052 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); 1053 1054 // Normalize Vector GEP - all scalar operands should be converted to the 1055 // splat vector. 1056 unsigned VectorWidth = 0; 1057 if (auto *VT = dyn_cast<VectorType>(U.getType())) 1058 VectorWidth = VT->getNumElements(); 1059 1060 // We might need to splat the base pointer into a vector if the offsets 1061 // are vectors. 1062 if (VectorWidth && !PtrTy.isVector()) { 1063 BaseReg = 1064 MIRBuilder.buildSplatVector(LLT::vector(VectorWidth, PtrTy), BaseReg) 1065 .getReg(0); 1066 PtrIRTy = VectorType::get(PtrIRTy, VectorWidth); 1067 PtrTy = getLLTForType(*PtrIRTy, *DL); 1068 OffsetIRTy = DL->getIntPtrType(PtrIRTy); 1069 OffsetTy = getLLTForType(*OffsetIRTy, *DL); 1070 } 1071 1072 int64_t Offset = 0; 1073 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U); 1074 GTI != E; ++GTI) { 1075 const Value *Idx = GTI.getOperand(); 1076 if (StructType *StTy = GTI.getStructTypeOrNull()) { 1077 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue(); 1078 Offset += DL->getStructLayout(StTy)->getElementOffset(Field); 1079 continue; 1080 } else { 1081 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType()); 1082 1083 // If this is a scalar constant or a splat vector of constants, 1084 // handle it quickly. 1085 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) { 1086 Offset += ElementSize * CI->getSExtValue(); 1087 continue; 1088 } 1089 1090 if (Offset != 0) { 1091 auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset); 1092 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0)) 1093 .getReg(0); 1094 Offset = 0; 1095 } 1096 1097 Register IdxReg = getOrCreateVReg(*Idx); 1098 LLT IdxTy = MRI->getType(IdxReg); 1099 if (IdxTy != OffsetTy) { 1100 if (!IdxTy.isVector() && VectorWidth) { 1101 IdxReg = MIRBuilder.buildSplatVector( 1102 OffsetTy.changeElementType(IdxTy), IdxReg).getReg(0); 1103 } 1104 1105 IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0); 1106 } 1107 1108 // N = N + Idx * ElementSize; 1109 // Avoid doing it for ElementSize of 1. 1110 Register GepOffsetReg; 1111 if (ElementSize != 1) { 1112 auto ElementSizeMIB = MIRBuilder.buildConstant( 1113 getLLTForType(*OffsetIRTy, *DL), ElementSize); 1114 GepOffsetReg = 1115 MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB).getReg(0); 1116 } else 1117 GepOffsetReg = IdxReg; 1118 1119 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0); 1120 } 1121 } 1122 1123 if (Offset != 0) { 1124 auto OffsetMIB = 1125 MIRBuilder.buildConstant(OffsetTy, Offset); 1126 MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0)); 1127 return true; 1128 } 1129 1130 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg); 1131 return true; 1132 } 1133 1134 bool IRTranslator::translateMemFunc(const CallInst &CI, 1135 MachineIRBuilder &MIRBuilder, 1136 Intrinsic::ID ID) { 1137 1138 // If the source is undef, then just emit a nop. 1139 if (isa<UndefValue>(CI.getArgOperand(1))) 1140 return true; 1141 1142 ArrayRef<Register> Res; 1143 auto ICall = MIRBuilder.buildIntrinsic(ID, Res, true); 1144 for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI) 1145 ICall.addUse(getOrCreateVReg(**AI)); 1146 1147 Align DstAlign; 1148 Align SrcAlign; 1149 unsigned IsVol = 1150 cast<ConstantInt>(CI.getArgOperand(CI.getNumArgOperands() - 1)) 1151 ->getZExtValue(); 1152 1153 if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) { 1154 DstAlign = MCI->getDestAlign().valueOrOne(); 1155 SrcAlign = MCI->getSourceAlign().valueOrOne(); 1156 } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) { 1157 DstAlign = MMI->getDestAlign().valueOrOne(); 1158 SrcAlign = MMI->getSourceAlign().valueOrOne(); 1159 } else { 1160 auto *MSI = cast<MemSetInst>(&CI); 1161 DstAlign = MSI->getDestAlign().valueOrOne(); 1162 } 1163 1164 // We need to propagate the tail call flag from the IR inst as an argument. 1165 // Otherwise, we have to pessimize and assume later that we cannot tail call 1166 // any memory intrinsics. 1167 ICall.addImm(CI.isTailCall() ? 1 : 0); 1168 1169 // Create mem operands to store the alignment and volatile info. 1170 auto VolFlag = IsVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 1171 ICall.addMemOperand(MF->getMachineMemOperand( 1172 MachinePointerInfo(CI.getArgOperand(0)), 1173 MachineMemOperand::MOStore | VolFlag, 1, DstAlign)); 1174 if (ID != Intrinsic::memset) 1175 ICall.addMemOperand(MF->getMachineMemOperand( 1176 MachinePointerInfo(CI.getArgOperand(1)), 1177 MachineMemOperand::MOLoad | VolFlag, 1, SrcAlign)); 1178 1179 return true; 1180 } 1181 1182 void IRTranslator::getStackGuard(Register DstReg, 1183 MachineIRBuilder &MIRBuilder) { 1184 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 1185 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF)); 1186 auto MIB = 1187 MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {}); 1188 1189 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1190 Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent()); 1191 if (!Global) 1192 return; 1193 1194 MachinePointerInfo MPInfo(Global); 1195 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 1196 MachineMemOperand::MODereferenceable; 1197 MachineMemOperand *MemRef = 1198 MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8, 1199 DL->getPointerABIAlignment(0)); 1200 MIB.setMemRefs({MemRef}); 1201 } 1202 1203 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op, 1204 MachineIRBuilder &MIRBuilder) { 1205 ArrayRef<Register> ResRegs = getOrCreateVRegs(CI); 1206 MIRBuilder.buildInstr( 1207 Op, {ResRegs[0], ResRegs[1]}, 1208 {getOrCreateVReg(*CI.getOperand(0)), getOrCreateVReg(*CI.getOperand(1))}); 1209 1210 return true; 1211 } 1212 1213 unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) { 1214 switch (ID) { 1215 default: 1216 break; 1217 case Intrinsic::bswap: 1218 return TargetOpcode::G_BSWAP; 1219 case Intrinsic::bitreverse: 1220 return TargetOpcode::G_BITREVERSE; 1221 case Intrinsic::fshl: 1222 return TargetOpcode::G_FSHL; 1223 case Intrinsic::fshr: 1224 return TargetOpcode::G_FSHR; 1225 case Intrinsic::ceil: 1226 return TargetOpcode::G_FCEIL; 1227 case Intrinsic::cos: 1228 return TargetOpcode::G_FCOS; 1229 case Intrinsic::ctpop: 1230 return TargetOpcode::G_CTPOP; 1231 case Intrinsic::exp: 1232 return TargetOpcode::G_FEXP; 1233 case Intrinsic::exp2: 1234 return TargetOpcode::G_FEXP2; 1235 case Intrinsic::fabs: 1236 return TargetOpcode::G_FABS; 1237 case Intrinsic::copysign: 1238 return TargetOpcode::G_FCOPYSIGN; 1239 case Intrinsic::minnum: 1240 return TargetOpcode::G_FMINNUM; 1241 case Intrinsic::maxnum: 1242 return TargetOpcode::G_FMAXNUM; 1243 case Intrinsic::minimum: 1244 return TargetOpcode::G_FMINIMUM; 1245 case Intrinsic::maximum: 1246 return TargetOpcode::G_FMAXIMUM; 1247 case Intrinsic::canonicalize: 1248 return TargetOpcode::G_FCANONICALIZE; 1249 case Intrinsic::floor: 1250 return TargetOpcode::G_FFLOOR; 1251 case Intrinsic::fma: 1252 return TargetOpcode::G_FMA; 1253 case Intrinsic::log: 1254 return TargetOpcode::G_FLOG; 1255 case Intrinsic::log2: 1256 return TargetOpcode::G_FLOG2; 1257 case Intrinsic::log10: 1258 return TargetOpcode::G_FLOG10; 1259 case Intrinsic::nearbyint: 1260 return TargetOpcode::G_FNEARBYINT; 1261 case Intrinsic::pow: 1262 return TargetOpcode::G_FPOW; 1263 case Intrinsic::rint: 1264 return TargetOpcode::G_FRINT; 1265 case Intrinsic::round: 1266 return TargetOpcode::G_INTRINSIC_ROUND; 1267 case Intrinsic::sin: 1268 return TargetOpcode::G_FSIN; 1269 case Intrinsic::sqrt: 1270 return TargetOpcode::G_FSQRT; 1271 case Intrinsic::trunc: 1272 return TargetOpcode::G_INTRINSIC_TRUNC; 1273 case Intrinsic::readcyclecounter: 1274 return TargetOpcode::G_READCYCLECOUNTER; 1275 } 1276 return Intrinsic::not_intrinsic; 1277 } 1278 1279 bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI, 1280 Intrinsic::ID ID, 1281 MachineIRBuilder &MIRBuilder) { 1282 1283 unsigned Op = getSimpleIntrinsicOpcode(ID); 1284 1285 // Is this a simple intrinsic? 1286 if (Op == Intrinsic::not_intrinsic) 1287 return false; 1288 1289 // Yes. Let's translate it. 1290 SmallVector<llvm::SrcOp, 4> VRegs; 1291 for (auto &Arg : CI.arg_operands()) 1292 VRegs.push_back(getOrCreateVReg(*Arg)); 1293 1294 MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs, 1295 MachineInstr::copyFlagsFromInstruction(CI)); 1296 return true; 1297 } 1298 1299 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, 1300 MachineIRBuilder &MIRBuilder) { 1301 1302 // If this is a simple intrinsic (that is, we just need to add a def of 1303 // a vreg, and uses for each arg operand, then translate it. 1304 if (translateSimpleIntrinsic(CI, ID, MIRBuilder)) 1305 return true; 1306 1307 switch (ID) { 1308 default: 1309 break; 1310 case Intrinsic::lifetime_start: 1311 case Intrinsic::lifetime_end: { 1312 // No stack colouring in O0, discard region information. 1313 if (MF->getTarget().getOptLevel() == CodeGenOpt::None) 1314 return true; 1315 1316 unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START 1317 : TargetOpcode::LIFETIME_END; 1318 1319 // Get the underlying objects for the location passed on the lifetime 1320 // marker. 1321 SmallVector<const Value *, 4> Allocas; 1322 GetUnderlyingObjects(CI.getArgOperand(1), Allocas, *DL); 1323 1324 // Iterate over each underlying object, creating lifetime markers for each 1325 // static alloca. Quit if we find a non-static alloca. 1326 for (const Value *V : Allocas) { 1327 const AllocaInst *AI = dyn_cast<AllocaInst>(V); 1328 if (!AI) 1329 continue; 1330 1331 if (!AI->isStaticAlloca()) 1332 return true; 1333 1334 MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI)); 1335 } 1336 return true; 1337 } 1338 case Intrinsic::dbg_declare: { 1339 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI); 1340 assert(DI.getVariable() && "Missing variable"); 1341 1342 const Value *Address = DI.getAddress(); 1343 if (!Address || isa<UndefValue>(Address)) { 1344 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); 1345 return true; 1346 } 1347 1348 assert(DI.getVariable()->isValidLocationForIntrinsic( 1349 MIRBuilder.getDebugLoc()) && 1350 "Expected inlined-at fields to agree"); 1351 auto AI = dyn_cast<AllocaInst>(Address); 1352 if (AI && AI->isStaticAlloca()) { 1353 // Static allocas are tracked at the MF level, no need for DBG_VALUE 1354 // instructions (in fact, they get ignored if they *do* exist). 1355 MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(), 1356 getOrCreateFrameIndex(*AI), DI.getDebugLoc()); 1357 } else { 1358 // A dbg.declare describes the address of a source variable, so lower it 1359 // into an indirect DBG_VALUE. 1360 MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address), 1361 DI.getVariable(), DI.getExpression()); 1362 } 1363 return true; 1364 } 1365 case Intrinsic::dbg_label: { 1366 const DbgLabelInst &DI = cast<DbgLabelInst>(CI); 1367 assert(DI.getLabel() && "Missing label"); 1368 1369 assert(DI.getLabel()->isValidLocationForIntrinsic( 1370 MIRBuilder.getDebugLoc()) && 1371 "Expected inlined-at fields to agree"); 1372 1373 MIRBuilder.buildDbgLabel(DI.getLabel()); 1374 return true; 1375 } 1376 case Intrinsic::vaend: 1377 // No target I know of cares about va_end. Certainly no in-tree target 1378 // does. Simplest intrinsic ever! 1379 return true; 1380 case Intrinsic::vastart: { 1381 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1382 Value *Ptr = CI.getArgOperand(0); 1383 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8; 1384 1385 // FIXME: Get alignment 1386 MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)}) 1387 .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr), 1388 MachineMemOperand::MOStore, 1389 ListSize, Align(1))); 1390 return true; 1391 } 1392 case Intrinsic::dbg_value: { 1393 // This form of DBG_VALUE is target-independent. 1394 const DbgValueInst &DI = cast<DbgValueInst>(CI); 1395 const Value *V = DI.getValue(); 1396 assert(DI.getVariable()->isValidLocationForIntrinsic( 1397 MIRBuilder.getDebugLoc()) && 1398 "Expected inlined-at fields to agree"); 1399 if (!V) { 1400 // Currently the optimizer can produce this; insert an undef to 1401 // help debugging. Probably the optimizer should not do this. 1402 MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression()); 1403 } else if (const auto *CI = dyn_cast<Constant>(V)) { 1404 MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression()); 1405 } else { 1406 for (Register Reg : getOrCreateVRegs(*V)) { 1407 // FIXME: This does not handle register-indirect values at offset 0. The 1408 // direct/indirect thing shouldn't really be handled by something as 1409 // implicit as reg+noreg vs reg+imm in the first place, but it seems 1410 // pretty baked in right now. 1411 MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression()); 1412 } 1413 } 1414 return true; 1415 } 1416 case Intrinsic::uadd_with_overflow: 1417 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder); 1418 case Intrinsic::sadd_with_overflow: 1419 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder); 1420 case Intrinsic::usub_with_overflow: 1421 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder); 1422 case Intrinsic::ssub_with_overflow: 1423 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder); 1424 case Intrinsic::umul_with_overflow: 1425 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder); 1426 case Intrinsic::smul_with_overflow: 1427 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder); 1428 case Intrinsic::uadd_sat: 1429 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder); 1430 case Intrinsic::sadd_sat: 1431 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder); 1432 case Intrinsic::usub_sat: 1433 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder); 1434 case Intrinsic::ssub_sat: 1435 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder); 1436 case Intrinsic::fmuladd: { 1437 const TargetMachine &TM = MF->getTarget(); 1438 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); 1439 Register Dst = getOrCreateVReg(CI); 1440 Register Op0 = getOrCreateVReg(*CI.getArgOperand(0)); 1441 Register Op1 = getOrCreateVReg(*CI.getArgOperand(1)); 1442 Register Op2 = getOrCreateVReg(*CI.getArgOperand(2)); 1443 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && 1444 TLI.isFMAFasterThanFMulAndFAdd(*MF, 1445 TLI.getValueType(*DL, CI.getType()))) { 1446 // TODO: Revisit this to see if we should move this part of the 1447 // lowering to the combiner. 1448 MIRBuilder.buildFMA(Dst, Op0, Op1, Op2, 1449 MachineInstr::copyFlagsFromInstruction(CI)); 1450 } else { 1451 LLT Ty = getLLTForType(*CI.getType(), *DL); 1452 auto FMul = MIRBuilder.buildFMul( 1453 Ty, Op0, Op1, MachineInstr::copyFlagsFromInstruction(CI)); 1454 MIRBuilder.buildFAdd(Dst, FMul, Op2, 1455 MachineInstr::copyFlagsFromInstruction(CI)); 1456 } 1457 return true; 1458 } 1459 case Intrinsic::memcpy: 1460 case Intrinsic::memmove: 1461 case Intrinsic::memset: 1462 return translateMemFunc(CI, MIRBuilder, ID); 1463 case Intrinsic::eh_typeid_for: { 1464 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0)); 1465 Register Reg = getOrCreateVReg(CI); 1466 unsigned TypeID = MF->getTypeIDFor(GV); 1467 MIRBuilder.buildConstant(Reg, TypeID); 1468 return true; 1469 } 1470 case Intrinsic::objectsize: 1471 llvm_unreachable("llvm.objectsize.* should have been lowered already"); 1472 1473 case Intrinsic::is_constant: 1474 llvm_unreachable("llvm.is.constant.* should have been lowered already"); 1475 1476 case Intrinsic::stackguard: 1477 getStackGuard(getOrCreateVReg(CI), MIRBuilder); 1478 return true; 1479 case Intrinsic::stackprotector: { 1480 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); 1481 Register GuardVal = MRI->createGenericVirtualRegister(PtrTy); 1482 getStackGuard(GuardVal, MIRBuilder); 1483 1484 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1)); 1485 int FI = getOrCreateFrameIndex(*Slot); 1486 MF->getFrameInfo().setStackProtectorIndex(FI); 1487 1488 MIRBuilder.buildStore( 1489 GuardVal, getOrCreateVReg(*Slot), 1490 *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI), 1491 MachineMemOperand::MOStore | 1492 MachineMemOperand::MOVolatile, 1493 PtrTy.getSizeInBits() / 8, Align(8))); 1494 return true; 1495 } 1496 case Intrinsic::stacksave: { 1497 // Save the stack pointer to the location provided by the intrinsic. 1498 Register Reg = getOrCreateVReg(CI); 1499 Register StackPtr = MF->getSubtarget() 1500 .getTargetLowering() 1501 ->getStackPointerRegisterToSaveRestore(); 1502 1503 // If the target doesn't specify a stack pointer, then fall back. 1504 if (!StackPtr) 1505 return false; 1506 1507 MIRBuilder.buildCopy(Reg, StackPtr); 1508 return true; 1509 } 1510 case Intrinsic::stackrestore: { 1511 // Restore the stack pointer from the location provided by the intrinsic. 1512 Register Reg = getOrCreateVReg(*CI.getArgOperand(0)); 1513 Register StackPtr = MF->getSubtarget() 1514 .getTargetLowering() 1515 ->getStackPointerRegisterToSaveRestore(); 1516 1517 // If the target doesn't specify a stack pointer, then fall back. 1518 if (!StackPtr) 1519 return false; 1520 1521 MIRBuilder.buildCopy(StackPtr, Reg); 1522 return true; 1523 } 1524 case Intrinsic::cttz: 1525 case Intrinsic::ctlz: { 1526 ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1)); 1527 bool isTrailing = ID == Intrinsic::cttz; 1528 unsigned Opcode = isTrailing 1529 ? Cst->isZero() ? TargetOpcode::G_CTTZ 1530 : TargetOpcode::G_CTTZ_ZERO_UNDEF 1531 : Cst->isZero() ? TargetOpcode::G_CTLZ 1532 : TargetOpcode::G_CTLZ_ZERO_UNDEF; 1533 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(CI)}, 1534 {getOrCreateVReg(*CI.getArgOperand(0))}); 1535 return true; 1536 } 1537 case Intrinsic::invariant_start: { 1538 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); 1539 Register Undef = MRI->createGenericVirtualRegister(PtrTy); 1540 MIRBuilder.buildUndef(Undef); 1541 return true; 1542 } 1543 case Intrinsic::invariant_end: 1544 return true; 1545 case Intrinsic::assume: 1546 case Intrinsic::var_annotation: 1547 case Intrinsic::sideeffect: 1548 // Discard annotate attributes, assumptions, and artificial side-effects. 1549 return true; 1550 case Intrinsic::read_register: { 1551 Value *Arg = CI.getArgOperand(0); 1552 MIRBuilder 1553 .buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {}) 1554 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata())); 1555 return true; 1556 } 1557 case Intrinsic::write_register: { 1558 Value *Arg = CI.getArgOperand(0); 1559 MIRBuilder.buildInstr(TargetOpcode::G_WRITE_REGISTER) 1560 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata())) 1561 .addUse(getOrCreateVReg(*CI.getArgOperand(1))); 1562 return true; 1563 } 1564 } 1565 return false; 1566 } 1567 1568 bool IRTranslator::translateInlineAsm(const CallInst &CI, 1569 MachineIRBuilder &MIRBuilder) { 1570 const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue()); 1571 StringRef ConstraintStr = IA.getConstraintString(); 1572 1573 bool HasOnlyMemoryClobber = false; 1574 if (!ConstraintStr.empty()) { 1575 // Until we have full inline assembly support, we just try to handle the 1576 // very simple case of just "~{memory}" to avoid falling back so often. 1577 if (ConstraintStr != "~{memory}") 1578 return false; 1579 HasOnlyMemoryClobber = true; 1580 } 1581 1582 unsigned ExtraInfo = 0; 1583 if (IA.hasSideEffects()) 1584 ExtraInfo |= InlineAsm::Extra_HasSideEffects; 1585 if (IA.getDialect() == InlineAsm::AD_Intel) 1586 ExtraInfo |= InlineAsm::Extra_AsmDialect; 1587 1588 // HACK: special casing for ~memory. 1589 if (HasOnlyMemoryClobber) 1590 ExtraInfo |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore); 1591 1592 auto Inst = MIRBuilder.buildInstr(TargetOpcode::INLINEASM) 1593 .addExternalSymbol(IA.getAsmString().c_str()) 1594 .addImm(ExtraInfo); 1595 if (const MDNode *SrcLoc = CI.getMetadata("srcloc")) 1596 Inst.addMetadata(SrcLoc); 1597 1598 return true; 1599 } 1600 1601 bool IRTranslator::translateCallSite(const ImmutableCallSite &CS, 1602 MachineIRBuilder &MIRBuilder) { 1603 const Instruction &I = *CS.getInstruction(); 1604 ArrayRef<Register> Res = getOrCreateVRegs(I); 1605 1606 SmallVector<ArrayRef<Register>, 8> Args; 1607 Register SwiftInVReg = 0; 1608 Register SwiftErrorVReg = 0; 1609 for (auto &Arg : CS.args()) { 1610 if (CLI->supportSwiftError() && isSwiftError(Arg)) { 1611 assert(SwiftInVReg == 0 && "Expected only one swift error argument"); 1612 LLT Ty = getLLTForType(*Arg->getType(), *DL); 1613 SwiftInVReg = MRI->createGenericVirtualRegister(Ty); 1614 MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt( 1615 &I, &MIRBuilder.getMBB(), Arg)); 1616 Args.emplace_back(makeArrayRef(SwiftInVReg)); 1617 SwiftErrorVReg = 1618 SwiftError.getOrCreateVRegDefAt(&I, &MIRBuilder.getMBB(), Arg); 1619 continue; 1620 } 1621 Args.push_back(getOrCreateVRegs(*Arg)); 1622 } 1623 1624 // We don't set HasCalls on MFI here yet because call lowering may decide to 1625 // optimize into tail calls. Instead, we defer that to selection where a final 1626 // scan is done to check if any instructions are calls. 1627 bool Success = 1628 CLI->lowerCall(MIRBuilder, CS, Res, Args, SwiftErrorVReg, 1629 [&]() { return getOrCreateVReg(*CS.getCalledValue()); }); 1630 1631 // Check if we just inserted a tail call. 1632 if (Success) { 1633 assert(!HasTailCall && "Can't tail call return twice from block?"); 1634 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 1635 HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt())); 1636 } 1637 1638 return Success; 1639 } 1640 1641 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) { 1642 const CallInst &CI = cast<CallInst>(U); 1643 auto TII = MF->getTarget().getIntrinsicInfo(); 1644 const Function *F = CI.getCalledFunction(); 1645 1646 // FIXME: support Windows dllimport function calls. 1647 if (F && (F->hasDLLImportStorageClass() || 1648 (MF->getTarget().getTargetTriple().isOSWindows() && 1649 F->hasExternalWeakLinkage()))) 1650 return false; 1651 1652 // FIXME: support control flow guard targets. 1653 if (CI.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget)) 1654 return false; 1655 1656 if (CI.isInlineAsm()) 1657 return translateInlineAsm(CI, MIRBuilder); 1658 1659 Intrinsic::ID ID = Intrinsic::not_intrinsic; 1660 if (F && F->isIntrinsic()) { 1661 ID = F->getIntrinsicID(); 1662 if (TII && ID == Intrinsic::not_intrinsic) 1663 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F)); 1664 } 1665 1666 if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) 1667 return translateCallSite(&CI, MIRBuilder); 1668 1669 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic"); 1670 1671 if (translateKnownIntrinsic(CI, ID, MIRBuilder)) 1672 return true; 1673 1674 ArrayRef<Register> ResultRegs; 1675 if (!CI.getType()->isVoidTy()) 1676 ResultRegs = getOrCreateVRegs(CI); 1677 1678 // Ignore the callsite attributes. Backend code is most likely not expecting 1679 // an intrinsic to sometimes have side effects and sometimes not. 1680 MachineInstrBuilder MIB = 1681 MIRBuilder.buildIntrinsic(ID, ResultRegs, !F->doesNotAccessMemory()); 1682 if (isa<FPMathOperator>(CI)) 1683 MIB->copyIRFlags(CI); 1684 1685 for (auto &Arg : enumerate(CI.arg_operands())) { 1686 // Some intrinsics take metadata parameters. Reject them. 1687 if (isa<MetadataAsValue>(Arg.value())) 1688 return false; 1689 1690 // If this is required to be an immediate, don't materialize it in a 1691 // register. 1692 if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) { 1693 if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) { 1694 // imm arguments are more convenient than cimm (and realistically 1695 // probably sufficient), so use them. 1696 assert(CI->getBitWidth() <= 64 && 1697 "large intrinsic immediates not handled"); 1698 MIB.addImm(CI->getSExtValue()); 1699 } else { 1700 MIB.addFPImm(cast<ConstantFP>(Arg.value())); 1701 } 1702 } else { 1703 ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value()); 1704 if (VRegs.size() > 1) 1705 return false; 1706 MIB.addUse(VRegs[0]); 1707 } 1708 } 1709 1710 // Add a MachineMemOperand if it is a target mem intrinsic. 1711 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); 1712 TargetLowering::IntrinsicInfo Info; 1713 // TODO: Add a GlobalISel version of getTgtMemIntrinsic. 1714 if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) { 1715 Align Alignment = Info.align.getValueOr( 1716 DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext()))); 1717 1718 uint64_t Size = Info.memVT.getStoreSize(); 1719 MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal), 1720 Info.flags, Size, Alignment)); 1721 } 1722 1723 return true; 1724 } 1725 1726 bool IRTranslator::translateInvoke(const User &U, 1727 MachineIRBuilder &MIRBuilder) { 1728 const InvokeInst &I = cast<InvokeInst>(U); 1729 MCContext &Context = MF->getContext(); 1730 1731 const BasicBlock *ReturnBB = I.getSuccessor(0); 1732 const BasicBlock *EHPadBB = I.getSuccessor(1); 1733 1734 const Value *Callee = I.getCalledValue(); 1735 const Function *Fn = dyn_cast<Function>(Callee); 1736 if (isa<InlineAsm>(Callee)) 1737 return false; 1738 1739 // FIXME: support invoking patchpoint and statepoint intrinsics. 1740 if (Fn && Fn->isIntrinsic()) 1741 return false; 1742 1743 // FIXME: support whatever these are. 1744 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) 1745 return false; 1746 1747 // FIXME: support control flow guard targets. 1748 if (I.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget)) 1749 return false; 1750 1751 // FIXME: support Windows exception handling. 1752 if (!isa<LandingPadInst>(EHPadBB->front())) 1753 return false; 1754 1755 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about 1756 // the region covered by the try. 1757 MCSymbol *BeginSymbol = Context.createTempSymbol(); 1758 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol); 1759 1760 if (!translateCallSite(&I, MIRBuilder)) 1761 return false; 1762 1763 MCSymbol *EndSymbol = Context.createTempSymbol(); 1764 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol); 1765 1766 // FIXME: track probabilities. 1767 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB), 1768 &ReturnMBB = getMBB(*ReturnBB); 1769 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol); 1770 MIRBuilder.getMBB().addSuccessor(&ReturnMBB); 1771 MIRBuilder.getMBB().addSuccessor(&EHPadMBB); 1772 MIRBuilder.buildBr(ReturnMBB); 1773 1774 return true; 1775 } 1776 1777 bool IRTranslator::translateCallBr(const User &U, 1778 MachineIRBuilder &MIRBuilder) { 1779 // FIXME: Implement this. 1780 return false; 1781 } 1782 1783 bool IRTranslator::translateLandingPad(const User &U, 1784 MachineIRBuilder &MIRBuilder) { 1785 const LandingPadInst &LP = cast<LandingPadInst>(U); 1786 1787 MachineBasicBlock &MBB = MIRBuilder.getMBB(); 1788 1789 MBB.setIsEHPad(); 1790 1791 // If there aren't registers to copy the values into (e.g., during SjLj 1792 // exceptions), then don't bother. 1793 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1794 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn(); 1795 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 && 1796 TLI.getExceptionSelectorRegister(PersonalityFn) == 0) 1797 return true; 1798 1799 // If landingpad's return type is token type, we don't create DAG nodes 1800 // for its exception pointer and selector value. The extraction of exception 1801 // pointer or selector value from token type landingpads is not currently 1802 // supported. 1803 if (LP.getType()->isTokenTy()) 1804 return true; 1805 1806 // Add a label to mark the beginning of the landing pad. Deletion of the 1807 // landing pad can thus be detected via the MachineModuleInfo. 1808 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL) 1809 .addSym(MF->addLandingPad(&MBB)); 1810 1811 LLT Ty = getLLTForType(*LP.getType(), *DL); 1812 Register Undef = MRI->createGenericVirtualRegister(Ty); 1813 MIRBuilder.buildUndef(Undef); 1814 1815 SmallVector<LLT, 2> Tys; 1816 for (Type *Ty : cast<StructType>(LP.getType())->elements()) 1817 Tys.push_back(getLLTForType(*Ty, *DL)); 1818 assert(Tys.size() == 2 && "Only two-valued landingpads are supported"); 1819 1820 // Mark exception register as live in. 1821 Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn); 1822 if (!ExceptionReg) 1823 return false; 1824 1825 MBB.addLiveIn(ExceptionReg); 1826 ArrayRef<Register> ResRegs = getOrCreateVRegs(LP); 1827 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg); 1828 1829 Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn); 1830 if (!SelectorReg) 1831 return false; 1832 1833 MBB.addLiveIn(SelectorReg); 1834 Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]); 1835 MIRBuilder.buildCopy(PtrVReg, SelectorReg); 1836 MIRBuilder.buildCast(ResRegs[1], PtrVReg); 1837 1838 return true; 1839 } 1840 1841 bool IRTranslator::translateAlloca(const User &U, 1842 MachineIRBuilder &MIRBuilder) { 1843 auto &AI = cast<AllocaInst>(U); 1844 1845 if (AI.isSwiftError()) 1846 return true; 1847 1848 if (AI.isStaticAlloca()) { 1849 Register Res = getOrCreateVReg(AI); 1850 int FI = getOrCreateFrameIndex(AI); 1851 MIRBuilder.buildFrameIndex(Res, FI); 1852 return true; 1853 } 1854 1855 // FIXME: support stack probing for Windows. 1856 if (MF->getTarget().getTargetTriple().isOSWindows()) 1857 return false; 1858 1859 // Now we're in the harder dynamic case. 1860 Register NumElts = getOrCreateVReg(*AI.getArraySize()); 1861 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType()); 1862 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL); 1863 if (MRI->getType(NumElts) != IntPtrTy) { 1864 Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy); 1865 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts); 1866 NumElts = ExtElts; 1867 } 1868 1869 Type *Ty = AI.getAllocatedType(); 1870 1871 Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy); 1872 Register TySize = 1873 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty))); 1874 MIRBuilder.buildMul(AllocSize, NumElts, TySize); 1875 1876 // Round the size of the allocation up to the stack alignment size 1877 // by add SA-1 to the size. This doesn't overflow because we're computing 1878 // an address inside an alloca. 1879 Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign(); 1880 auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign.value() - 1); 1881 auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne, 1882 MachineInstr::NoUWrap); 1883 auto AlignCst = 1884 MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign.value() - 1)); 1885 auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst); 1886 1887 Align Alignment = max(AI.getAlign(), DL->getPrefTypeAlign(Ty)); 1888 if (Alignment <= StackAlign) 1889 Alignment = Align(1); 1890 MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Alignment); 1891 1892 MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI); 1893 assert(MF->getFrameInfo().hasVarSizedObjects()); 1894 return true; 1895 } 1896 1897 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) { 1898 // FIXME: We may need more info about the type. Because of how LLT works, 1899 // we're completely discarding the i64/double distinction here (amongst 1900 // others). Fortunately the ABIs I know of where that matters don't use va_arg 1901 // anyway but that's not guaranteed. 1902 MIRBuilder.buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)}, 1903 {getOrCreateVReg(*U.getOperand(0)), 1904 uint64_t(DL->getABITypeAlignment(U.getType()))}); 1905 return true; 1906 } 1907 1908 bool IRTranslator::translateInsertElement(const User &U, 1909 MachineIRBuilder &MIRBuilder) { 1910 // If it is a <1 x Ty> vector, use the scalar as it is 1911 // not a legal vector type in LLT. 1912 if (U.getType()->getVectorNumElements() == 1) { 1913 Register Elt = getOrCreateVReg(*U.getOperand(1)); 1914 auto &Regs = *VMap.getVRegs(U); 1915 if (Regs.empty()) { 1916 Regs.push_back(Elt); 1917 VMap.getOffsets(U)->push_back(0); 1918 } else { 1919 MIRBuilder.buildCopy(Regs[0], Elt); 1920 } 1921 return true; 1922 } 1923 1924 Register Res = getOrCreateVReg(U); 1925 Register Val = getOrCreateVReg(*U.getOperand(0)); 1926 Register Elt = getOrCreateVReg(*U.getOperand(1)); 1927 Register Idx = getOrCreateVReg(*U.getOperand(2)); 1928 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx); 1929 return true; 1930 } 1931 1932 bool IRTranslator::translateExtractElement(const User &U, 1933 MachineIRBuilder &MIRBuilder) { 1934 // If it is a <1 x Ty> vector, use the scalar as it is 1935 // not a legal vector type in LLT. 1936 if (U.getOperand(0)->getType()->getVectorNumElements() == 1) { 1937 Register Elt = getOrCreateVReg(*U.getOperand(0)); 1938 auto &Regs = *VMap.getVRegs(U); 1939 if (Regs.empty()) { 1940 Regs.push_back(Elt); 1941 VMap.getOffsets(U)->push_back(0); 1942 } else { 1943 MIRBuilder.buildCopy(Regs[0], Elt); 1944 } 1945 return true; 1946 } 1947 Register Res = getOrCreateVReg(U); 1948 Register Val = getOrCreateVReg(*U.getOperand(0)); 1949 const auto &TLI = *MF->getSubtarget().getTargetLowering(); 1950 unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits(); 1951 Register Idx; 1952 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) { 1953 if (CI->getBitWidth() != PreferredVecIdxWidth) { 1954 APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth); 1955 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx); 1956 Idx = getOrCreateVReg(*NewIdxCI); 1957 } 1958 } 1959 if (!Idx) 1960 Idx = getOrCreateVReg(*U.getOperand(1)); 1961 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) { 1962 const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth); 1963 Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx).getReg(0); 1964 } 1965 MIRBuilder.buildExtractVectorElement(Res, Val, Idx); 1966 return true; 1967 } 1968 1969 bool IRTranslator::translateShuffleVector(const User &U, 1970 MachineIRBuilder &MIRBuilder) { 1971 ArrayRef<int> Mask; 1972 if (auto *SVI = dyn_cast<ShuffleVectorInst>(&U)) 1973 Mask = SVI->getShuffleMask(); 1974 else 1975 Mask = cast<ConstantExpr>(U).getShuffleMask(); 1976 ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask); 1977 MIRBuilder 1978 .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)}, 1979 {getOrCreateVReg(*U.getOperand(0)), 1980 getOrCreateVReg(*U.getOperand(1))}) 1981 .addShuffleMask(MaskAlloc); 1982 return true; 1983 } 1984 1985 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) { 1986 const PHINode &PI = cast<PHINode>(U); 1987 1988 SmallVector<MachineInstr *, 4> Insts; 1989 for (auto Reg : getOrCreateVRegs(PI)) { 1990 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {}); 1991 Insts.push_back(MIB.getInstr()); 1992 } 1993 1994 PendingPHIs.emplace_back(&PI, std::move(Insts)); 1995 return true; 1996 } 1997 1998 bool IRTranslator::translateAtomicCmpXchg(const User &U, 1999 MachineIRBuilder &MIRBuilder) { 2000 const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U); 2001 2002 if (I.isWeak()) 2003 return false; 2004 2005 auto &TLI = *MF->getSubtarget().getTargetLowering(); 2006 auto Flags = TLI.getAtomicMemOperandFlags(I, *DL); 2007 2008 Type *ResType = I.getType(); 2009 Type *ValType = ResType->Type::getStructElementType(0); 2010 2011 auto Res = getOrCreateVRegs(I); 2012 Register OldValRes = Res[0]; 2013 Register SuccessRes = Res[1]; 2014 Register Addr = getOrCreateVReg(*I.getPointerOperand()); 2015 Register Cmp = getOrCreateVReg(*I.getCompareOperand()); 2016 Register NewVal = getOrCreateVReg(*I.getNewValOperand()); 2017 2018 AAMDNodes AAMetadata; 2019 I.getAAMetadata(AAMetadata); 2020 2021 MIRBuilder.buildAtomicCmpXchgWithSuccess( 2022 OldValRes, SuccessRes, Addr, Cmp, NewVal, 2023 *MF->getMachineMemOperand( 2024 MachinePointerInfo(I.getPointerOperand()), Flags, 2025 DL->getTypeStoreSize(ValType), getMemOpAlign(I), AAMetadata, nullptr, 2026 I.getSyncScopeID(), I.getSuccessOrdering(), I.getFailureOrdering())); 2027 return true; 2028 } 2029 2030 bool IRTranslator::translateAtomicRMW(const User &U, 2031 MachineIRBuilder &MIRBuilder) { 2032 const AtomicRMWInst &I = cast<AtomicRMWInst>(U); 2033 auto &TLI = *MF->getSubtarget().getTargetLowering(); 2034 auto Flags = TLI.getAtomicMemOperandFlags(I, *DL); 2035 2036 Type *ResType = I.getType(); 2037 2038 Register Res = getOrCreateVReg(I); 2039 Register Addr = getOrCreateVReg(*I.getPointerOperand()); 2040 Register Val = getOrCreateVReg(*I.getValOperand()); 2041 2042 unsigned Opcode = 0; 2043 switch (I.getOperation()) { 2044 default: 2045 return false; 2046 case AtomicRMWInst::Xchg: 2047 Opcode = TargetOpcode::G_ATOMICRMW_XCHG; 2048 break; 2049 case AtomicRMWInst::Add: 2050 Opcode = TargetOpcode::G_ATOMICRMW_ADD; 2051 break; 2052 case AtomicRMWInst::Sub: 2053 Opcode = TargetOpcode::G_ATOMICRMW_SUB; 2054 break; 2055 case AtomicRMWInst::And: 2056 Opcode = TargetOpcode::G_ATOMICRMW_AND; 2057 break; 2058 case AtomicRMWInst::Nand: 2059 Opcode = TargetOpcode::G_ATOMICRMW_NAND; 2060 break; 2061 case AtomicRMWInst::Or: 2062 Opcode = TargetOpcode::G_ATOMICRMW_OR; 2063 break; 2064 case AtomicRMWInst::Xor: 2065 Opcode = TargetOpcode::G_ATOMICRMW_XOR; 2066 break; 2067 case AtomicRMWInst::Max: 2068 Opcode = TargetOpcode::G_ATOMICRMW_MAX; 2069 break; 2070 case AtomicRMWInst::Min: 2071 Opcode = TargetOpcode::G_ATOMICRMW_MIN; 2072 break; 2073 case AtomicRMWInst::UMax: 2074 Opcode = TargetOpcode::G_ATOMICRMW_UMAX; 2075 break; 2076 case AtomicRMWInst::UMin: 2077 Opcode = TargetOpcode::G_ATOMICRMW_UMIN; 2078 break; 2079 case AtomicRMWInst::FAdd: 2080 Opcode = TargetOpcode::G_ATOMICRMW_FADD; 2081 break; 2082 case AtomicRMWInst::FSub: 2083 Opcode = TargetOpcode::G_ATOMICRMW_FSUB; 2084 break; 2085 } 2086 2087 AAMDNodes AAMetadata; 2088 I.getAAMetadata(AAMetadata); 2089 2090 MIRBuilder.buildAtomicRMW( 2091 Opcode, Res, Addr, Val, 2092 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), 2093 Flags, DL->getTypeStoreSize(ResType), 2094 getMemOpAlign(I), AAMetadata, nullptr, 2095 I.getSyncScopeID(), I.getOrdering())); 2096 return true; 2097 } 2098 2099 bool IRTranslator::translateFence(const User &U, 2100 MachineIRBuilder &MIRBuilder) { 2101 const FenceInst &Fence = cast<FenceInst>(U); 2102 MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()), 2103 Fence.getSyncScopeID()); 2104 return true; 2105 } 2106 2107 void IRTranslator::finishPendingPhis() { 2108 #ifndef NDEBUG 2109 DILocationVerifier Verifier; 2110 GISelObserverWrapper WrapperObserver(&Verifier); 2111 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver); 2112 #endif // ifndef NDEBUG 2113 for (auto &Phi : PendingPHIs) { 2114 const PHINode *PI = Phi.first; 2115 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second; 2116 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent(); 2117 EntryBuilder->setDebugLoc(PI->getDebugLoc()); 2118 #ifndef NDEBUG 2119 Verifier.setCurrentInst(PI); 2120 #endif // ifndef NDEBUG 2121 2122 SmallSet<const MachineBasicBlock *, 16> SeenPreds; 2123 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) { 2124 auto IRPred = PI->getIncomingBlock(i); 2125 ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i)); 2126 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) { 2127 if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred)) 2128 continue; 2129 SeenPreds.insert(Pred); 2130 for (unsigned j = 0; j < ValRegs.size(); ++j) { 2131 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]); 2132 MIB.addUse(ValRegs[j]); 2133 MIB.addMBB(Pred); 2134 } 2135 } 2136 } 2137 } 2138 } 2139 2140 bool IRTranslator::valueIsSplit(const Value &V, 2141 SmallVectorImpl<uint64_t> *Offsets) { 2142 SmallVector<LLT, 4> SplitTys; 2143 if (Offsets && !Offsets->empty()) 2144 Offsets->clear(); 2145 computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets); 2146 return SplitTys.size() > 1; 2147 } 2148 2149 bool IRTranslator::translate(const Instruction &Inst) { 2150 CurBuilder->setDebugLoc(Inst.getDebugLoc()); 2151 // We only emit constants into the entry block from here. To prevent jumpy 2152 // debug behaviour set the line to 0. 2153 if (const DebugLoc &DL = Inst.getDebugLoc()) 2154 EntryBuilder->setDebugLoc( 2155 DebugLoc::get(0, 0, DL.getScope(), DL.getInlinedAt())); 2156 else 2157 EntryBuilder->setDebugLoc(DebugLoc()); 2158 2159 switch (Inst.getOpcode()) { 2160 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 2161 case Instruction::OPCODE: \ 2162 return translate##OPCODE(Inst, *CurBuilder.get()); 2163 #include "llvm/IR/Instruction.def" 2164 default: 2165 return false; 2166 } 2167 } 2168 2169 bool IRTranslator::translate(const Constant &C, Register Reg) { 2170 if (auto CI = dyn_cast<ConstantInt>(&C)) 2171 EntryBuilder->buildConstant(Reg, *CI); 2172 else if (auto CF = dyn_cast<ConstantFP>(&C)) 2173 EntryBuilder->buildFConstant(Reg, *CF); 2174 else if (isa<UndefValue>(C)) 2175 EntryBuilder->buildUndef(Reg); 2176 else if (isa<ConstantPointerNull>(C)) 2177 EntryBuilder->buildConstant(Reg, 0); 2178 else if (auto GV = dyn_cast<GlobalValue>(&C)) 2179 EntryBuilder->buildGlobalValue(Reg, GV); 2180 else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) { 2181 if (!CAZ->getType()->isVectorTy()) 2182 return false; 2183 // Return the scalar if it is a <1 x Ty> vector. 2184 if (CAZ->getNumElements() == 1) 2185 return translate(*CAZ->getElementValue(0u), Reg); 2186 SmallVector<Register, 4> Ops; 2187 for (unsigned i = 0; i < CAZ->getNumElements(); ++i) { 2188 Constant &Elt = *CAZ->getElementValue(i); 2189 Ops.push_back(getOrCreateVReg(Elt)); 2190 } 2191 EntryBuilder->buildBuildVector(Reg, Ops); 2192 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) { 2193 // Return the scalar if it is a <1 x Ty> vector. 2194 if (CV->getNumElements() == 1) 2195 return translate(*CV->getElementAsConstant(0), Reg); 2196 SmallVector<Register, 4> Ops; 2197 for (unsigned i = 0; i < CV->getNumElements(); ++i) { 2198 Constant &Elt = *CV->getElementAsConstant(i); 2199 Ops.push_back(getOrCreateVReg(Elt)); 2200 } 2201 EntryBuilder->buildBuildVector(Reg, Ops); 2202 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) { 2203 switch(CE->getOpcode()) { 2204 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 2205 case Instruction::OPCODE: \ 2206 return translate##OPCODE(*CE, *EntryBuilder.get()); 2207 #include "llvm/IR/Instruction.def" 2208 default: 2209 return false; 2210 } 2211 } else if (auto CV = dyn_cast<ConstantVector>(&C)) { 2212 if (CV->getNumOperands() == 1) 2213 return translate(*CV->getOperand(0), Reg); 2214 SmallVector<Register, 4> Ops; 2215 for (unsigned i = 0; i < CV->getNumOperands(); ++i) { 2216 Ops.push_back(getOrCreateVReg(*CV->getOperand(i))); 2217 } 2218 EntryBuilder->buildBuildVector(Reg, Ops); 2219 } else if (auto *BA = dyn_cast<BlockAddress>(&C)) { 2220 EntryBuilder->buildBlockAddress(Reg, BA); 2221 } else 2222 return false; 2223 2224 return true; 2225 } 2226 2227 void IRTranslator::finalizeBasicBlock() { 2228 for (auto &JTCase : SL->JTCases) { 2229 // Emit header first, if it wasn't already emitted. 2230 if (!JTCase.first.Emitted) 2231 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB); 2232 2233 emitJumpTable(JTCase.second, JTCase.second.MBB); 2234 } 2235 SL->JTCases.clear(); 2236 } 2237 2238 void IRTranslator::finalizeFunction() { 2239 // Release the memory used by the different maps we 2240 // needed during the translation. 2241 PendingPHIs.clear(); 2242 VMap.reset(); 2243 FrameIndices.clear(); 2244 MachinePreds.clear(); 2245 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it 2246 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid 2247 // destroying it twice (in ~IRTranslator() and ~LLVMContext()) 2248 EntryBuilder.reset(); 2249 CurBuilder.reset(); 2250 FuncInfo.clear(); 2251 } 2252 2253 /// Returns true if a BasicBlock \p BB within a variadic function contains a 2254 /// variadic musttail call. 2255 static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) { 2256 if (!IsVarArg) 2257 return false; 2258 2259 // Walk the block backwards, because tail calls usually only appear at the end 2260 // of a block. 2261 return std::any_of(BB.rbegin(), BB.rend(), [](const Instruction &I) { 2262 const auto *CI = dyn_cast<CallInst>(&I); 2263 return CI && CI->isMustTailCall(); 2264 }); 2265 } 2266 2267 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { 2268 MF = &CurMF; 2269 const Function &F = MF->getFunction(); 2270 if (F.empty()) 2271 return false; 2272 GISelCSEAnalysisWrapper &Wrapper = 2273 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper(); 2274 // Set the CSEConfig and run the analysis. 2275 GISelCSEInfo *CSEInfo = nullptr; 2276 TPC = &getAnalysis<TargetPassConfig>(); 2277 bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences() 2278 ? EnableCSEInIRTranslator 2279 : TPC->isGISelCSEEnabled(); 2280 2281 if (EnableCSE) { 2282 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF); 2283 CSEInfo = &Wrapper.get(TPC->getCSEConfig()); 2284 EntryBuilder->setCSEInfo(CSEInfo); 2285 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF); 2286 CurBuilder->setCSEInfo(CSEInfo); 2287 } else { 2288 EntryBuilder = std::make_unique<MachineIRBuilder>(); 2289 CurBuilder = std::make_unique<MachineIRBuilder>(); 2290 } 2291 CLI = MF->getSubtarget().getCallLowering(); 2292 CurBuilder->setMF(*MF); 2293 EntryBuilder->setMF(*MF); 2294 MRI = &MF->getRegInfo(); 2295 DL = &F.getParent()->getDataLayout(); 2296 ORE = std::make_unique<OptimizationRemarkEmitter>(&F); 2297 FuncInfo.MF = MF; 2298 FuncInfo.BPI = nullptr; 2299 const auto &TLI = *MF->getSubtarget().getTargetLowering(); 2300 const TargetMachine &TM = MF->getTarget(); 2301 SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo); 2302 SL->init(TLI, TM, *DL); 2303 2304 EnableOpts = TM.getOptLevel() != CodeGenOpt::None && !skipFunction(F); 2305 2306 assert(PendingPHIs.empty() && "stale PHIs"); 2307 2308 if (!DL->isLittleEndian()) { 2309 // Currently we don't properly handle big endian code. 2310 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 2311 F.getSubprogram(), &F.getEntryBlock()); 2312 R << "unable to translate in big endian mode"; 2313 reportTranslationError(*MF, *TPC, *ORE, R); 2314 } 2315 2316 // Release the per-function state when we return, whether we succeeded or not. 2317 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); }); 2318 2319 // Setup a separate basic-block for the arguments and constants 2320 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock(); 2321 MF->push_back(EntryBB); 2322 EntryBuilder->setMBB(*EntryBB); 2323 2324 DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc(); 2325 SwiftError.setFunction(CurMF); 2326 SwiftError.createEntriesInEntryBlock(DbgLoc); 2327 2328 bool IsVarArg = F.isVarArg(); 2329 bool HasMustTailInVarArgFn = false; 2330 2331 // Create all blocks, in IR order, to preserve the layout. 2332 for (const BasicBlock &BB: F) { 2333 auto *&MBB = BBToMBB[&BB]; 2334 2335 MBB = MF->CreateMachineBasicBlock(&BB); 2336 MF->push_back(MBB); 2337 2338 if (BB.hasAddressTaken()) 2339 MBB->setHasAddressTaken(); 2340 2341 if (!HasMustTailInVarArgFn) 2342 HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB); 2343 } 2344 2345 MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn); 2346 2347 // Make our arguments/constants entry block fallthrough to the IR entry block. 2348 EntryBB->addSuccessor(&getMBB(F.front())); 2349 2350 // Lower the actual args into this basic block. 2351 SmallVector<ArrayRef<Register>, 8> VRegArgs; 2352 for (const Argument &Arg: F.args()) { 2353 if (DL->getTypeStoreSize(Arg.getType()) == 0) 2354 continue; // Don't handle zero sized types. 2355 ArrayRef<Register> VRegs = getOrCreateVRegs(Arg); 2356 VRegArgs.push_back(VRegs); 2357 2358 if (Arg.hasSwiftErrorAttr()) { 2359 assert(VRegs.size() == 1 && "Too many vregs for Swift error"); 2360 SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]); 2361 } 2362 } 2363 2364 if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs)) { 2365 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 2366 F.getSubprogram(), &F.getEntryBlock()); 2367 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType()); 2368 reportTranslationError(*MF, *TPC, *ORE, R); 2369 return false; 2370 } 2371 2372 // Need to visit defs before uses when translating instructions. 2373 GISelObserverWrapper WrapperObserver; 2374 if (EnableCSE && CSEInfo) 2375 WrapperObserver.addObserver(CSEInfo); 2376 { 2377 ReversePostOrderTraversal<const Function *> RPOT(&F); 2378 #ifndef NDEBUG 2379 DILocationVerifier Verifier; 2380 WrapperObserver.addObserver(&Verifier); 2381 #endif // ifndef NDEBUG 2382 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver); 2383 RAIIMFObserverInstaller ObsInstall(*MF, WrapperObserver); 2384 for (const BasicBlock *BB : RPOT) { 2385 MachineBasicBlock &MBB = getMBB(*BB); 2386 // Set the insertion point of all the following translations to 2387 // the end of this basic block. 2388 CurBuilder->setMBB(MBB); 2389 HasTailCall = false; 2390 for (const Instruction &Inst : *BB) { 2391 // If we translated a tail call in the last step, then we know 2392 // everything after the call is either a return, or something that is 2393 // handled by the call itself. (E.g. a lifetime marker or assume 2394 // intrinsic.) In this case, we should stop translating the block and 2395 // move on. 2396 if (HasTailCall) 2397 break; 2398 #ifndef NDEBUG 2399 Verifier.setCurrentInst(&Inst); 2400 #endif // ifndef NDEBUG 2401 if (translate(Inst)) 2402 continue; 2403 2404 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 2405 Inst.getDebugLoc(), BB); 2406 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst); 2407 2408 if (ORE->allowExtraAnalysis("gisel-irtranslator")) { 2409 std::string InstStrStorage; 2410 raw_string_ostream InstStr(InstStrStorage); 2411 InstStr << Inst; 2412 2413 R << ": '" << InstStr.str() << "'"; 2414 } 2415 2416 reportTranslationError(*MF, *TPC, *ORE, R); 2417 return false; 2418 } 2419 2420 finalizeBasicBlock(); 2421 } 2422 #ifndef NDEBUG 2423 WrapperObserver.removeObserver(&Verifier); 2424 #endif 2425 } 2426 2427 finishPendingPhis(); 2428 2429 SwiftError.propagateVRegs(); 2430 2431 // Merge the argument lowering and constants block with its single 2432 // successor, the LLVM-IR entry block. We want the basic block to 2433 // be maximal. 2434 assert(EntryBB->succ_size() == 1 && 2435 "Custom BB used for lowering should have only one successor"); 2436 // Get the successor of the current entry block. 2437 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin(); 2438 assert(NewEntryBB.pred_size() == 1 && 2439 "LLVM-IR entry block has a predecessor!?"); 2440 // Move all the instruction from the current entry block to the 2441 // new entry block. 2442 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(), 2443 EntryBB->end()); 2444 2445 // Update the live-in information for the new entry block. 2446 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins()) 2447 NewEntryBB.addLiveIn(LiveIn); 2448 NewEntryBB.sortUniqueLiveIns(); 2449 2450 // Get rid of the now empty basic block. 2451 EntryBB->removeSuccessor(&NewEntryBB); 2452 MF->remove(EntryBB); 2453 MF->DeleteMachineBasicBlock(EntryBB); 2454 2455 assert(&MF->front() == &NewEntryBB && 2456 "New entry wasn't next in the list of basic block!"); 2457 2458 // Initialize stack protector information. 2459 StackProtector &SP = getAnalysis<StackProtector>(); 2460 SP.copyToMachineFrameInfo(MF->getFrameInfo()); 2461 2462 return false; 2463 } 2464