1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the IRTranslator class. 10 //===----------------------------------------------------------------------===// 11 12 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 13 #include "llvm/ADT/PostOrderIterator.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/ADT/ScopeExit.h" 16 #include "llvm/ADT/SmallSet.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/Analysis/BranchProbabilityInfo.h" 19 #include "llvm/Analysis/Loads.h" 20 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 21 #include "llvm/Analysis/ValueTracking.h" 22 #include "llvm/CodeGen/Analysis.h" 23 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 24 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" 25 #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h" 26 #include "llvm/CodeGen/LowLevelType.h" 27 #include "llvm/CodeGen/MachineBasicBlock.h" 28 #include "llvm/CodeGen/MachineFrameInfo.h" 29 #include "llvm/CodeGen/MachineFunction.h" 30 #include "llvm/CodeGen/MachineInstrBuilder.h" 31 #include "llvm/CodeGen/MachineMemOperand.h" 32 #include "llvm/CodeGen/MachineOperand.h" 33 #include "llvm/CodeGen/MachineRegisterInfo.h" 34 #include "llvm/CodeGen/StackProtector.h" 35 #include "llvm/CodeGen/TargetFrameLowering.h" 36 #include "llvm/CodeGen/TargetInstrInfo.h" 37 #include "llvm/CodeGen/TargetLowering.h" 38 #include "llvm/CodeGen/TargetPassConfig.h" 39 #include "llvm/CodeGen/TargetRegisterInfo.h" 40 #include "llvm/CodeGen/TargetSubtargetInfo.h" 41 #include "llvm/IR/BasicBlock.h" 42 #include "llvm/IR/CFG.h" 43 #include "llvm/IR/Constant.h" 44 #include "llvm/IR/Constants.h" 45 #include "llvm/IR/DataLayout.h" 46 #include "llvm/IR/DebugInfo.h" 47 #include "llvm/IR/DerivedTypes.h" 48 #include "llvm/IR/Function.h" 49 #include "llvm/IR/GetElementPtrTypeIterator.h" 50 #include "llvm/IR/InlineAsm.h" 51 #include "llvm/IR/InstrTypes.h" 52 #include "llvm/IR/Instructions.h" 53 #include "llvm/IR/IntrinsicInst.h" 54 #include "llvm/IR/Intrinsics.h" 55 #include "llvm/IR/LLVMContext.h" 56 #include "llvm/IR/Metadata.h" 57 #include "llvm/IR/Type.h" 58 #include "llvm/IR/User.h" 59 #include "llvm/IR/Value.h" 60 #include "llvm/InitializePasses.h" 61 #include "llvm/MC/MCContext.h" 62 #include "llvm/Pass.h" 63 #include "llvm/Support/Casting.h" 64 #include "llvm/Support/CodeGen.h" 65 #include "llvm/Support/Debug.h" 66 #include "llvm/Support/ErrorHandling.h" 67 #include "llvm/Support/LowLevelTypeImpl.h" 68 #include "llvm/Support/MathExtras.h" 69 #include "llvm/Support/raw_ostream.h" 70 #include "llvm/Target/TargetIntrinsicInfo.h" 71 #include "llvm/Target/TargetMachine.h" 72 #include <algorithm> 73 #include <cassert> 74 #include <cstdint> 75 #include <iterator> 76 #include <string> 77 #include <utility> 78 #include <vector> 79 80 #define DEBUG_TYPE "irtranslator" 81 82 using namespace llvm; 83 84 static cl::opt<bool> 85 EnableCSEInIRTranslator("enable-cse-in-irtranslator", 86 cl::desc("Should enable CSE in irtranslator"), 87 cl::Optional, cl::init(false)); 88 char IRTranslator::ID = 0; 89 90 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 91 false, false) 92 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) 93 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass) 94 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 95 false, false) 96 97 static void reportTranslationError(MachineFunction &MF, 98 const TargetPassConfig &TPC, 99 OptimizationRemarkEmitter &ORE, 100 OptimizationRemarkMissed &R) { 101 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel); 102 103 // Print the function name explicitly if we don't have a debug location (which 104 // makes the diagnostic less useful) or if we're going to emit a raw error. 105 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled()) 106 R << (" (in function: " + MF.getName() + ")").str(); 107 108 if (TPC.isGlobalISelAbortEnabled()) 109 report_fatal_error(R.getMsg()); 110 else 111 ORE.emit(R); 112 } 113 114 IRTranslator::IRTranslator() : MachineFunctionPass(ID) { } 115 116 #ifndef NDEBUG 117 namespace { 118 /// Verify that every instruction created has the same DILocation as the 119 /// instruction being translated. 120 class DILocationVerifier : public GISelChangeObserver { 121 const Instruction *CurrInst = nullptr; 122 123 public: 124 DILocationVerifier() = default; 125 ~DILocationVerifier() = default; 126 127 const Instruction *getCurrentInst() const { return CurrInst; } 128 void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; } 129 130 void erasingInstr(MachineInstr &MI) override {} 131 void changingInstr(MachineInstr &MI) override {} 132 void changedInstr(MachineInstr &MI) override {} 133 134 void createdInstr(MachineInstr &MI) override { 135 assert(getCurrentInst() && "Inserted instruction without a current MI"); 136 137 // Only print the check message if we're actually checking it. 138 #ifndef NDEBUG 139 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst 140 << " was copied to " << MI); 141 #endif 142 // We allow insts in the entry block to have a debug loc line of 0 because 143 // they could have originated from constants, and we don't want a jumpy 144 // debug experience. 145 assert((CurrInst->getDebugLoc() == MI.getDebugLoc() || 146 MI.getDebugLoc().getLine() == 0) && 147 "Line info was not transferred to all instructions"); 148 } 149 }; 150 } // namespace 151 #endif // ifndef NDEBUG 152 153 154 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const { 155 AU.addRequired<StackProtector>(); 156 AU.addRequired<TargetPassConfig>(); 157 AU.addRequired<GISelCSEAnalysisWrapperPass>(); 158 getSelectionDAGFallbackAnalysisUsage(AU); 159 MachineFunctionPass::getAnalysisUsage(AU); 160 } 161 162 IRTranslator::ValueToVRegInfo::VRegListT & 163 IRTranslator::allocateVRegs(const Value &Val) { 164 assert(!VMap.contains(Val) && "Value already allocated in VMap"); 165 auto *Regs = VMap.getVRegs(Val); 166 auto *Offsets = VMap.getOffsets(Val); 167 SmallVector<LLT, 4> SplitTys; 168 computeValueLLTs(*DL, *Val.getType(), SplitTys, 169 Offsets->empty() ? Offsets : nullptr); 170 for (unsigned i = 0; i < SplitTys.size(); ++i) 171 Regs->push_back(0); 172 return *Regs; 173 } 174 175 ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) { 176 auto VRegsIt = VMap.findVRegs(Val); 177 if (VRegsIt != VMap.vregs_end()) 178 return *VRegsIt->second; 179 180 if (Val.getType()->isVoidTy()) 181 return *VMap.getVRegs(Val); 182 183 // Create entry for this type. 184 auto *VRegs = VMap.getVRegs(Val); 185 auto *Offsets = VMap.getOffsets(Val); 186 187 assert(Val.getType()->isSized() && 188 "Don't know how to create an empty vreg"); 189 190 SmallVector<LLT, 4> SplitTys; 191 computeValueLLTs(*DL, *Val.getType(), SplitTys, 192 Offsets->empty() ? Offsets : nullptr); 193 194 if (!isa<Constant>(Val)) { 195 for (auto Ty : SplitTys) 196 VRegs->push_back(MRI->createGenericVirtualRegister(Ty)); 197 return *VRegs; 198 } 199 200 if (Val.getType()->isAggregateType()) { 201 // UndefValue, ConstantAggregateZero 202 auto &C = cast<Constant>(Val); 203 unsigned Idx = 0; 204 while (auto Elt = C.getAggregateElement(Idx++)) { 205 auto EltRegs = getOrCreateVRegs(*Elt); 206 llvm::copy(EltRegs, std::back_inserter(*VRegs)); 207 } 208 } else { 209 assert(SplitTys.size() == 1 && "unexpectedly split LLT"); 210 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0])); 211 bool Success = translate(cast<Constant>(Val), VRegs->front()); 212 if (!Success) { 213 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 214 MF->getFunction().getSubprogram(), 215 &MF->getFunction().getEntryBlock()); 216 R << "unable to translate constant: " << ore::NV("Type", Val.getType()); 217 reportTranslationError(*MF, *TPC, *ORE, R); 218 return *VRegs; 219 } 220 } 221 222 return *VRegs; 223 } 224 225 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) { 226 if (FrameIndices.find(&AI) != FrameIndices.end()) 227 return FrameIndices[&AI]; 228 229 uint64_t ElementSize = DL->getTypeAllocSize(AI.getAllocatedType()); 230 uint64_t Size = 231 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue(); 232 233 // Always allocate at least one byte. 234 Size = std::max<uint64_t>(Size, 1u); 235 236 unsigned Alignment = AI.getAlignment(); 237 if (!Alignment) 238 Alignment = DL->getABITypeAlignment(AI.getAllocatedType()); 239 240 int &FI = FrameIndices[&AI]; 241 FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI); 242 return FI; 243 } 244 245 Align IRTranslator::getMemOpAlign(const Instruction &I) { 246 if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) 247 return SI->getAlign(); 248 if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) { 249 return LI->getAlign(); 250 } 251 if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) { 252 // TODO(PR27168): This instruction has no alignment attribute, but unlike 253 // the default alignment for load/store, the default here is to assume 254 // it has NATURAL alignment, not DataLayout-specified alignment. 255 const DataLayout &DL = AI->getModule()->getDataLayout(); 256 return Align(DL.getTypeStoreSize(AI->getCompareOperand()->getType())); 257 } 258 if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) { 259 // TODO(PR27168): This instruction has no alignment attribute, but unlike 260 // the default alignment for load/store, the default here is to assume 261 // it has NATURAL alignment, not DataLayout-specified alignment. 262 const DataLayout &DL = AI->getModule()->getDataLayout(); 263 return Align(DL.getTypeStoreSize(AI->getValOperand()->getType())); 264 } 265 OptimizationRemarkMissed R("gisel-irtranslator", "", &I); 266 R << "unable to translate memop: " << ore::NV("Opcode", &I); 267 reportTranslationError(*MF, *TPC, *ORE, R); 268 return Align(1); 269 } 270 271 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) { 272 MachineBasicBlock *&MBB = BBToMBB[&BB]; 273 assert(MBB && "BasicBlock was not encountered before"); 274 return *MBB; 275 } 276 277 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) { 278 assert(NewPred && "new predecessor must be a real MachineBasicBlock"); 279 MachinePreds[Edge].push_back(NewPred); 280 } 281 282 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U, 283 MachineIRBuilder &MIRBuilder) { 284 // Get or create a virtual register for each value. 285 // Unless the value is a Constant => loadimm cst? 286 // or inline constant each time? 287 // Creation of a virtual register needs to have a size. 288 Register Op0 = getOrCreateVReg(*U.getOperand(0)); 289 Register Op1 = getOrCreateVReg(*U.getOperand(1)); 290 Register Res = getOrCreateVReg(U); 291 uint16_t Flags = 0; 292 if (isa<Instruction>(U)) { 293 const Instruction &I = cast<Instruction>(U); 294 Flags = MachineInstr::copyFlagsFromInstruction(I); 295 } 296 297 MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags); 298 return true; 299 } 300 301 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) { 302 // -0.0 - X --> G_FNEG 303 if (isa<Constant>(U.getOperand(0)) && 304 U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) { 305 Register Op1 = getOrCreateVReg(*U.getOperand(1)); 306 Register Res = getOrCreateVReg(U); 307 uint16_t Flags = 0; 308 if (isa<Instruction>(U)) { 309 const Instruction &I = cast<Instruction>(U); 310 Flags = MachineInstr::copyFlagsFromInstruction(I); 311 } 312 // Negate the last operand of the FSUB 313 MIRBuilder.buildFNeg(Res, Op1, Flags); 314 return true; 315 } 316 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder); 317 } 318 319 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) { 320 Register Op0 = getOrCreateVReg(*U.getOperand(0)); 321 Register Res = getOrCreateVReg(U); 322 uint16_t Flags = 0; 323 if (isa<Instruction>(U)) { 324 const Instruction &I = cast<Instruction>(U); 325 Flags = MachineInstr::copyFlagsFromInstruction(I); 326 } 327 MIRBuilder.buildFNeg(Res, Op0, Flags); 328 return true; 329 } 330 331 bool IRTranslator::translateCompare(const User &U, 332 MachineIRBuilder &MIRBuilder) { 333 auto *CI = dyn_cast<CmpInst>(&U); 334 Register Op0 = getOrCreateVReg(*U.getOperand(0)); 335 Register Op1 = getOrCreateVReg(*U.getOperand(1)); 336 Register Res = getOrCreateVReg(U); 337 CmpInst::Predicate Pred = 338 CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>( 339 cast<ConstantExpr>(U).getPredicate()); 340 if (CmpInst::isIntPredicate(Pred)) 341 MIRBuilder.buildICmp(Pred, Res, Op0, Op1); 342 else if (Pred == CmpInst::FCMP_FALSE) 343 MIRBuilder.buildCopy( 344 Res, getOrCreateVReg(*Constant::getNullValue(U.getType()))); 345 else if (Pred == CmpInst::FCMP_TRUE) 346 MIRBuilder.buildCopy( 347 Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType()))); 348 else { 349 assert(CI && "Instruction should be CmpInst"); 350 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1, 351 MachineInstr::copyFlagsFromInstruction(*CI)); 352 } 353 354 return true; 355 } 356 357 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) { 358 const ReturnInst &RI = cast<ReturnInst>(U); 359 const Value *Ret = RI.getReturnValue(); 360 if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0) 361 Ret = nullptr; 362 363 ArrayRef<Register> VRegs; 364 if (Ret) 365 VRegs = getOrCreateVRegs(*Ret); 366 367 Register SwiftErrorVReg = 0; 368 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) { 369 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt( 370 &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg()); 371 } 372 373 // The target may mess up with the insertion point, but 374 // this is not important as a return is the last instruction 375 // of the block anyway. 376 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, SwiftErrorVReg); 377 } 378 379 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) { 380 const BranchInst &BrInst = cast<BranchInst>(U); 381 unsigned Succ = 0; 382 if (!BrInst.isUnconditional()) { 383 // We want a G_BRCOND to the true BB followed by an unconditional branch. 384 Register Tst = getOrCreateVReg(*BrInst.getCondition()); 385 const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++)); 386 MachineBasicBlock &TrueBB = getMBB(TrueTgt); 387 MIRBuilder.buildBrCond(Tst, TrueBB); 388 } 389 390 const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ)); 391 MachineBasicBlock &TgtBB = getMBB(BrTgt); 392 MachineBasicBlock &CurBB = MIRBuilder.getMBB(); 393 394 // If the unconditional target is the layout successor, fallthrough. 395 if (!CurBB.isLayoutSuccessor(&TgtBB)) 396 MIRBuilder.buildBr(TgtBB); 397 398 // Link successors. 399 for (const BasicBlock *Succ : successors(&BrInst)) 400 CurBB.addSuccessor(&getMBB(*Succ)); 401 return true; 402 } 403 404 void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src, 405 MachineBasicBlock *Dst, 406 BranchProbability Prob) { 407 if (!FuncInfo.BPI) { 408 Src->addSuccessorWithoutProb(Dst); 409 return; 410 } 411 if (Prob.isUnknown()) 412 Prob = getEdgeProbability(Src, Dst); 413 Src->addSuccessor(Dst, Prob); 414 } 415 416 BranchProbability 417 IRTranslator::getEdgeProbability(const MachineBasicBlock *Src, 418 const MachineBasicBlock *Dst) const { 419 const BasicBlock *SrcBB = Src->getBasicBlock(); 420 const BasicBlock *DstBB = Dst->getBasicBlock(); 421 if (!FuncInfo.BPI) { 422 // If BPI is not available, set the default probability as 1 / N, where N is 423 // the number of successors. 424 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1); 425 return BranchProbability(1, SuccSize); 426 } 427 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB); 428 } 429 430 bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) { 431 using namespace SwitchCG; 432 // Extract cases from the switch. 433 const SwitchInst &SI = cast<SwitchInst>(U); 434 BranchProbabilityInfo *BPI = FuncInfo.BPI; 435 CaseClusterVector Clusters; 436 Clusters.reserve(SI.getNumCases()); 437 for (auto &I : SI.cases()) { 438 MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor()); 439 assert(Succ && "Could not find successor mbb in mapping"); 440 const ConstantInt *CaseVal = I.getCaseValue(); 441 BranchProbability Prob = 442 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex()) 443 : BranchProbability(1, SI.getNumCases() + 1); 444 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob)); 445 } 446 447 MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest()); 448 449 // Cluster adjacent cases with the same destination. We do this at all 450 // optimization levels because it's cheap to do and will make codegen faster 451 // if there are many clusters. 452 sortAndRangeify(Clusters); 453 454 MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent()); 455 456 // If there is only the default destination, jump there directly. 457 if (Clusters.empty()) { 458 SwitchMBB->addSuccessor(DefaultMBB); 459 if (DefaultMBB != SwitchMBB->getNextNode()) 460 MIB.buildBr(*DefaultMBB); 461 return true; 462 } 463 464 SL->findJumpTables(Clusters, &SI, DefaultMBB, nullptr, nullptr); 465 466 LLVM_DEBUG({ 467 dbgs() << "Case clusters: "; 468 for (const CaseCluster &C : Clusters) { 469 if (C.Kind == CC_JumpTable) 470 dbgs() << "JT:"; 471 if (C.Kind == CC_BitTests) 472 dbgs() << "BT:"; 473 474 C.Low->getValue().print(dbgs(), true); 475 if (C.Low != C.High) { 476 dbgs() << '-'; 477 C.High->getValue().print(dbgs(), true); 478 } 479 dbgs() << ' '; 480 } 481 dbgs() << '\n'; 482 }); 483 484 assert(!Clusters.empty()); 485 SwitchWorkList WorkList; 486 CaseClusterIt First = Clusters.begin(); 487 CaseClusterIt Last = Clusters.end() - 1; 488 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB); 489 WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb}); 490 491 // FIXME: At the moment we don't do any splitting optimizations here like 492 // SelectionDAG does, so this worklist only has one entry. 493 while (!WorkList.empty()) { 494 SwitchWorkListItem W = WorkList.back(); 495 WorkList.pop_back(); 496 if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB)) 497 return false; 498 } 499 return true; 500 } 501 502 void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT, 503 MachineBasicBlock *MBB) { 504 // Emit the code for the jump table 505 assert(JT.Reg != -1U && "Should lower JT Header first!"); 506 MachineIRBuilder MIB(*MBB->getParent()); 507 MIB.setMBB(*MBB); 508 MIB.setDebugLoc(CurBuilder->getDebugLoc()); 509 510 Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext()); 511 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL); 512 513 auto Table = MIB.buildJumpTable(PtrTy, JT.JTI); 514 MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg); 515 } 516 517 bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT, 518 SwitchCG::JumpTableHeader &JTH, 519 MachineBasicBlock *HeaderBB) { 520 MachineIRBuilder MIB(*HeaderBB->getParent()); 521 MIB.setMBB(*HeaderBB); 522 MIB.setDebugLoc(CurBuilder->getDebugLoc()); 523 524 const Value &SValue = *JTH.SValue; 525 // Subtract the lowest switch case value from the value being switched on. 526 const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL); 527 Register SwitchOpReg = getOrCreateVReg(SValue); 528 auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First); 529 auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst); 530 531 // This value may be smaller or larger than the target's pointer type, and 532 // therefore require extension or truncating. 533 Type *PtrIRTy = SValue.getType()->getPointerTo(); 534 const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy)); 535 Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub); 536 537 JT.Reg = Sub.getReg(0); 538 539 if (JTH.OmitRangeCheck) { 540 if (JT.MBB != HeaderBB->getNextNode()) 541 MIB.buildBr(*JT.MBB); 542 return true; 543 } 544 545 // Emit the range check for the jump table, and branch to the default block 546 // for the switch statement if the value being switched on exceeds the 547 // largest case in the switch. 548 auto Cst = getOrCreateVReg( 549 *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First)); 550 Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0); 551 auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst); 552 553 auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default); 554 555 // Avoid emitting unnecessary branches to the next block. 556 if (JT.MBB != HeaderBB->getNextNode()) 557 BrCond = MIB.buildBr(*JT.MBB); 558 return true; 559 } 560 561 void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB, 562 MachineBasicBlock *SwitchBB, 563 MachineIRBuilder &MIB) { 564 Register CondLHS = getOrCreateVReg(*CB.CmpLHS); 565 Register Cond; 566 DebugLoc OldDbgLoc = MIB.getDebugLoc(); 567 MIB.setDebugLoc(CB.DbgLoc); 568 MIB.setMBB(*CB.ThisBB); 569 570 if (CB.PredInfo.NoCmp) { 571 // Branch or fall through to TrueBB. 572 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb); 573 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()}, 574 CB.ThisBB); 575 CB.ThisBB->normalizeSuccProbs(); 576 if (CB.TrueBB != CB.ThisBB->getNextNode()) 577 MIB.buildBr(*CB.TrueBB); 578 MIB.setDebugLoc(OldDbgLoc); 579 return; 580 } 581 582 const LLT i1Ty = LLT::scalar(1); 583 // Build the compare. 584 if (!CB.CmpMHS) { 585 Register CondRHS = getOrCreateVReg(*CB.CmpRHS); 586 Cond = MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0); 587 } else { 588 assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE && 589 "Can only handle SLE ranges"); 590 591 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue(); 592 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue(); 593 594 Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS); 595 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) { 596 Register CondRHS = getOrCreateVReg(*CB.CmpRHS); 597 Cond = 598 MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0); 599 } else { 600 const LLT CmpTy = MRI->getType(CmpOpReg); 601 auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS); 602 auto Diff = MIB.buildConstant(CmpTy, High - Low); 603 Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0); 604 } 605 } 606 607 // Update successor info 608 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb); 609 610 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()}, 611 CB.ThisBB); 612 613 // TrueBB and FalseBB are always different unless the incoming IR is 614 // degenerate. This only happens when running llc on weird IR. 615 if (CB.TrueBB != CB.FalseBB) 616 addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb); 617 CB.ThisBB->normalizeSuccProbs(); 618 619 // if (SwitchBB->getBasicBlock() != CB.FalseBB->getBasicBlock()) 620 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()}, 621 CB.ThisBB); 622 623 // If the lhs block is the next block, invert the condition so that we can 624 // fall through to the lhs instead of the rhs block. 625 if (CB.TrueBB == CB.ThisBB->getNextNode()) { 626 std::swap(CB.TrueBB, CB.FalseBB); 627 auto True = MIB.buildConstant(i1Ty, 1); 628 Cond = MIB.buildXor(i1Ty, Cond, True).getReg(0); 629 } 630 631 MIB.buildBrCond(Cond, *CB.TrueBB); 632 MIB.buildBr(*CB.FalseBB); 633 MIB.setDebugLoc(OldDbgLoc); 634 } 635 636 bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W, 637 MachineBasicBlock *SwitchMBB, 638 MachineBasicBlock *CurMBB, 639 MachineBasicBlock *DefaultMBB, 640 MachineIRBuilder &MIB, 641 MachineFunction::iterator BBI, 642 BranchProbability UnhandledProbs, 643 SwitchCG::CaseClusterIt I, 644 MachineBasicBlock *Fallthrough, 645 bool FallthroughUnreachable) { 646 using namespace SwitchCG; 647 MachineFunction *CurMF = SwitchMBB->getParent(); 648 // FIXME: Optimize away range check based on pivot comparisons. 649 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first; 650 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second; 651 BranchProbability DefaultProb = W.DefaultProb; 652 653 // The jump block hasn't been inserted yet; insert it here. 654 MachineBasicBlock *JumpMBB = JT->MBB; 655 CurMF->insert(BBI, JumpMBB); 656 657 // Since the jump table block is separate from the switch block, we need 658 // to keep track of it as a machine predecessor to the default block, 659 // otherwise we lose the phi edges. 660 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()}, 661 CurMBB); 662 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()}, 663 JumpMBB); 664 665 auto JumpProb = I->Prob; 666 auto FallthroughProb = UnhandledProbs; 667 668 // If the default statement is a target of the jump table, we evenly 669 // distribute the default probability to successors of CurMBB. Also 670 // update the probability on the edge from JumpMBB to Fallthrough. 671 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(), 672 SE = JumpMBB->succ_end(); 673 SI != SE; ++SI) { 674 if (*SI == DefaultMBB) { 675 JumpProb += DefaultProb / 2; 676 FallthroughProb -= DefaultProb / 2; 677 JumpMBB->setSuccProbability(SI, DefaultProb / 2); 678 JumpMBB->normalizeSuccProbs(); 679 } else { 680 // Also record edges from the jump table block to it's successors. 681 addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()}, 682 JumpMBB); 683 } 684 } 685 686 // Skip the range check if the fallthrough block is unreachable. 687 if (FallthroughUnreachable) 688 JTH->OmitRangeCheck = true; 689 690 if (!JTH->OmitRangeCheck) 691 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb); 692 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb); 693 CurMBB->normalizeSuccProbs(); 694 695 // The jump table header will be inserted in our current block, do the 696 // range check, and fall through to our fallthrough block. 697 JTH->HeaderBB = CurMBB; 698 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader. 699 700 // If we're in the right place, emit the jump table header right now. 701 if (CurMBB == SwitchMBB) { 702 if (!emitJumpTableHeader(*JT, *JTH, CurMBB)) 703 return false; 704 JTH->Emitted = true; 705 } 706 return true; 707 } 708 bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, 709 Value *Cond, 710 MachineBasicBlock *Fallthrough, 711 bool FallthroughUnreachable, 712 BranchProbability UnhandledProbs, 713 MachineBasicBlock *CurMBB, 714 MachineIRBuilder &MIB, 715 MachineBasicBlock *SwitchMBB) { 716 using namespace SwitchCG; 717 const Value *RHS, *LHS, *MHS; 718 CmpInst::Predicate Pred; 719 if (I->Low == I->High) { 720 // Check Cond == I->Low. 721 Pred = CmpInst::ICMP_EQ; 722 LHS = Cond; 723 RHS = I->Low; 724 MHS = nullptr; 725 } else { 726 // Check I->Low <= Cond <= I->High. 727 Pred = CmpInst::ICMP_SLE; 728 LHS = I->Low; 729 MHS = Cond; 730 RHS = I->High; 731 } 732 733 // If Fallthrough is unreachable, fold away the comparison. 734 // The false probability is the sum of all unhandled cases. 735 CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough, 736 CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs); 737 738 emitSwitchCase(CB, SwitchMBB, MIB); 739 return true; 740 } 741 742 bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, 743 Value *Cond, 744 MachineBasicBlock *SwitchMBB, 745 MachineBasicBlock *DefaultMBB, 746 MachineIRBuilder &MIB) { 747 using namespace SwitchCG; 748 MachineFunction *CurMF = FuncInfo.MF; 749 MachineBasicBlock *NextMBB = nullptr; 750 MachineFunction::iterator BBI(W.MBB); 751 if (++BBI != FuncInfo.MF->end()) 752 NextMBB = &*BBI; 753 754 if (EnableOpts) { 755 // Here, we order cases by probability so the most likely case will be 756 // checked first. However, two clusters can have the same probability in 757 // which case their relative ordering is non-deterministic. So we use Low 758 // as a tie-breaker as clusters are guaranteed to never overlap. 759 llvm::sort(W.FirstCluster, W.LastCluster + 1, 760 [](const CaseCluster &a, const CaseCluster &b) { 761 return a.Prob != b.Prob 762 ? a.Prob > b.Prob 763 : a.Low->getValue().slt(b.Low->getValue()); 764 }); 765 766 // Rearrange the case blocks so that the last one falls through if possible 767 // without changing the order of probabilities. 768 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) { 769 --I; 770 if (I->Prob > W.LastCluster->Prob) 771 break; 772 if (I->Kind == CC_Range && I->MBB == NextMBB) { 773 std::swap(*I, *W.LastCluster); 774 break; 775 } 776 } 777 } 778 779 // Compute total probability. 780 BranchProbability DefaultProb = W.DefaultProb; 781 BranchProbability UnhandledProbs = DefaultProb; 782 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I) 783 UnhandledProbs += I->Prob; 784 785 MachineBasicBlock *CurMBB = W.MBB; 786 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) { 787 bool FallthroughUnreachable = false; 788 MachineBasicBlock *Fallthrough; 789 if (I == W.LastCluster) { 790 // For the last cluster, fall through to the default destination. 791 Fallthrough = DefaultMBB; 792 FallthroughUnreachable = isa<UnreachableInst>( 793 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg()); 794 } else { 795 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock()); 796 CurMF->insert(BBI, Fallthrough); 797 } 798 UnhandledProbs -= I->Prob; 799 800 switch (I->Kind) { 801 case CC_BitTests: { 802 LLVM_DEBUG(dbgs() << "Switch to bit test optimization unimplemented"); 803 return false; // Bit tests currently unimplemented. 804 } 805 case CC_JumpTable: { 806 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI, 807 UnhandledProbs, I, Fallthrough, 808 FallthroughUnreachable)) { 809 LLVM_DEBUG(dbgs() << "Failed to lower jump table"); 810 return false; 811 } 812 break; 813 } 814 case CC_Range: { 815 if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough, 816 FallthroughUnreachable, UnhandledProbs, 817 CurMBB, MIB, SwitchMBB)) { 818 LLVM_DEBUG(dbgs() << "Failed to lower switch range"); 819 return false; 820 } 821 break; 822 } 823 } 824 CurMBB = Fallthrough; 825 } 826 827 return true; 828 } 829 830 bool IRTranslator::translateIndirectBr(const User &U, 831 MachineIRBuilder &MIRBuilder) { 832 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U); 833 834 const Register Tgt = getOrCreateVReg(*BrInst.getAddress()); 835 MIRBuilder.buildBrIndirect(Tgt); 836 837 // Link successors. 838 SmallPtrSet<const BasicBlock *, 32> AddedSuccessors; 839 MachineBasicBlock &CurBB = MIRBuilder.getMBB(); 840 for (const BasicBlock *Succ : successors(&BrInst)) { 841 // It's legal for indirectbr instructions to have duplicate blocks in the 842 // destination list. We don't allow this in MIR. Skip anything that's 843 // already a successor. 844 if (!AddedSuccessors.insert(Succ).second) 845 continue; 846 CurBB.addSuccessor(&getMBB(*Succ)); 847 } 848 849 return true; 850 } 851 852 static bool isSwiftError(const Value *V) { 853 if (auto Arg = dyn_cast<Argument>(V)) 854 return Arg->hasSwiftErrorAttr(); 855 if (auto AI = dyn_cast<AllocaInst>(V)) 856 return AI->isSwiftError(); 857 return false; 858 } 859 860 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { 861 const LoadInst &LI = cast<LoadInst>(U); 862 if (DL->getTypeStoreSize(LI.getType()) == 0) 863 return true; 864 865 ArrayRef<Register> Regs = getOrCreateVRegs(LI); 866 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI); 867 Register Base = getOrCreateVReg(*LI.getPointerOperand()); 868 869 Type *OffsetIRTy = DL->getIntPtrType(LI.getPointerOperandType()); 870 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); 871 872 if (CLI->supportSwiftError() && isSwiftError(LI.getPointerOperand())) { 873 assert(Regs.size() == 1 && "swifterror should be single pointer"); 874 Register VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(), 875 LI.getPointerOperand()); 876 MIRBuilder.buildCopy(Regs[0], VReg); 877 return true; 878 } 879 880 auto &TLI = *MF->getSubtarget().getTargetLowering(); 881 MachineMemOperand::Flags Flags = TLI.getLoadMemOperandFlags(LI, *DL); 882 883 const MDNode *Ranges = 884 Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr; 885 for (unsigned i = 0; i < Regs.size(); ++i) { 886 Register Addr; 887 MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8); 888 889 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8); 890 Align BaseAlign = getMemOpAlign(LI); 891 AAMDNodes AAMetadata; 892 LI.getAAMetadata(AAMetadata); 893 auto MMO = MF->getMachineMemOperand( 894 Ptr, Flags, MRI->getType(Regs[i]).getSizeInBytes(), 895 commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, Ranges, 896 LI.getSyncScopeID(), LI.getOrdering()); 897 MIRBuilder.buildLoad(Regs[i], Addr, *MMO); 898 } 899 900 return true; 901 } 902 903 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) { 904 const StoreInst &SI = cast<StoreInst>(U); 905 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0) 906 return true; 907 908 ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand()); 909 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand()); 910 Register Base = getOrCreateVReg(*SI.getPointerOperand()); 911 912 Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType()); 913 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); 914 915 if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) { 916 assert(Vals.size() == 1 && "swifterror should be single pointer"); 917 918 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(), 919 SI.getPointerOperand()); 920 MIRBuilder.buildCopy(VReg, Vals[0]); 921 return true; 922 } 923 924 auto &TLI = *MF->getSubtarget().getTargetLowering(); 925 MachineMemOperand::Flags Flags = TLI.getStoreMemOperandFlags(SI, *DL); 926 927 for (unsigned i = 0; i < Vals.size(); ++i) { 928 Register Addr; 929 MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8); 930 931 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8); 932 Align BaseAlign = getMemOpAlign(SI); 933 AAMDNodes AAMetadata; 934 SI.getAAMetadata(AAMetadata); 935 auto MMO = MF->getMachineMemOperand( 936 Ptr, Flags, MRI->getType(Vals[i]).getSizeInBytes(), 937 commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, nullptr, 938 SI.getSyncScopeID(), SI.getOrdering()); 939 MIRBuilder.buildStore(Vals[i], Addr, *MMO); 940 } 941 return true; 942 } 943 944 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) { 945 const Value *Src = U.getOperand(0); 946 Type *Int32Ty = Type::getInt32Ty(U.getContext()); 947 948 // getIndexedOffsetInType is designed for GEPs, so the first index is the 949 // usual array element rather than looking into the actual aggregate. 950 SmallVector<Value *, 1> Indices; 951 Indices.push_back(ConstantInt::get(Int32Ty, 0)); 952 953 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) { 954 for (auto Idx : EVI->indices()) 955 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 956 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) { 957 for (auto Idx : IVI->indices()) 958 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 959 } else { 960 for (unsigned i = 1; i < U.getNumOperands(); ++i) 961 Indices.push_back(U.getOperand(i)); 962 } 963 964 return 8 * static_cast<uint64_t>( 965 DL.getIndexedOffsetInType(Src->getType(), Indices)); 966 } 967 968 bool IRTranslator::translateExtractValue(const User &U, 969 MachineIRBuilder &MIRBuilder) { 970 const Value *Src = U.getOperand(0); 971 uint64_t Offset = getOffsetFromIndices(U, *DL); 972 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src); 973 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src); 974 unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin(); 975 auto &DstRegs = allocateVRegs(U); 976 977 for (unsigned i = 0; i < DstRegs.size(); ++i) 978 DstRegs[i] = SrcRegs[Idx++]; 979 980 return true; 981 } 982 983 bool IRTranslator::translateInsertValue(const User &U, 984 MachineIRBuilder &MIRBuilder) { 985 const Value *Src = U.getOperand(0); 986 uint64_t Offset = getOffsetFromIndices(U, *DL); 987 auto &DstRegs = allocateVRegs(U); 988 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U); 989 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src); 990 ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1)); 991 auto InsertedIt = InsertedRegs.begin(); 992 993 for (unsigned i = 0; i < DstRegs.size(); ++i) { 994 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end()) 995 DstRegs[i] = *InsertedIt++; 996 else 997 DstRegs[i] = SrcRegs[i]; 998 } 999 1000 return true; 1001 } 1002 1003 bool IRTranslator::translateSelect(const User &U, 1004 MachineIRBuilder &MIRBuilder) { 1005 Register Tst = getOrCreateVReg(*U.getOperand(0)); 1006 ArrayRef<Register> ResRegs = getOrCreateVRegs(U); 1007 ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1)); 1008 ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2)); 1009 1010 uint16_t Flags = 0; 1011 if (const SelectInst *SI = dyn_cast<SelectInst>(&U)) 1012 Flags = MachineInstr::copyFlagsFromInstruction(*SI); 1013 1014 for (unsigned i = 0; i < ResRegs.size(); ++i) { 1015 MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags); 1016 } 1017 1018 return true; 1019 } 1020 1021 bool IRTranslator::translateCopy(const User &U, const Value &V, 1022 MachineIRBuilder &MIRBuilder) { 1023 Register Src = getOrCreateVReg(V); 1024 auto &Regs = *VMap.getVRegs(U); 1025 if (Regs.empty()) { 1026 Regs.push_back(Src); 1027 VMap.getOffsets(U)->push_back(0); 1028 } else { 1029 // If we already assigned a vreg for this instruction, we can't change that. 1030 // Emit a copy to satisfy the users we already emitted. 1031 MIRBuilder.buildCopy(Regs[0], Src); 1032 } 1033 return true; 1034 } 1035 1036 bool IRTranslator::translateBitCast(const User &U, 1037 MachineIRBuilder &MIRBuilder) { 1038 // If we're bitcasting to the source type, we can reuse the source vreg. 1039 if (getLLTForType(*U.getOperand(0)->getType(), *DL) == 1040 getLLTForType(*U.getType(), *DL)) 1041 return translateCopy(U, *U.getOperand(0), MIRBuilder); 1042 1043 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder); 1044 } 1045 1046 bool IRTranslator::translateCast(unsigned Opcode, const User &U, 1047 MachineIRBuilder &MIRBuilder) { 1048 Register Op = getOrCreateVReg(*U.getOperand(0)); 1049 Register Res = getOrCreateVReg(U); 1050 MIRBuilder.buildInstr(Opcode, {Res}, {Op}); 1051 return true; 1052 } 1053 1054 bool IRTranslator::translateGetElementPtr(const User &U, 1055 MachineIRBuilder &MIRBuilder) { 1056 Value &Op0 = *U.getOperand(0); 1057 Register BaseReg = getOrCreateVReg(Op0); 1058 Type *PtrIRTy = Op0.getType(); 1059 LLT PtrTy = getLLTForType(*PtrIRTy, *DL); 1060 Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy); 1061 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); 1062 1063 // Normalize Vector GEP - all scalar operands should be converted to the 1064 // splat vector. 1065 unsigned VectorWidth = 0; 1066 if (auto *VT = dyn_cast<VectorType>(U.getType())) 1067 VectorWidth = VT->getNumElements(); 1068 1069 // We might need to splat the base pointer into a vector if the offsets 1070 // are vectors. 1071 if (VectorWidth && !PtrTy.isVector()) { 1072 BaseReg = 1073 MIRBuilder.buildSplatVector(LLT::vector(VectorWidth, PtrTy), BaseReg) 1074 .getReg(0); 1075 PtrIRTy = VectorType::get(PtrIRTy, VectorWidth); 1076 PtrTy = getLLTForType(*PtrIRTy, *DL); 1077 OffsetIRTy = DL->getIntPtrType(PtrIRTy); 1078 OffsetTy = getLLTForType(*OffsetIRTy, *DL); 1079 } 1080 1081 int64_t Offset = 0; 1082 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U); 1083 GTI != E; ++GTI) { 1084 const Value *Idx = GTI.getOperand(); 1085 if (StructType *StTy = GTI.getStructTypeOrNull()) { 1086 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue(); 1087 Offset += DL->getStructLayout(StTy)->getElementOffset(Field); 1088 continue; 1089 } else { 1090 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType()); 1091 1092 // If this is a scalar constant or a splat vector of constants, 1093 // handle it quickly. 1094 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) { 1095 Offset += ElementSize * CI->getSExtValue(); 1096 continue; 1097 } 1098 1099 if (Offset != 0) { 1100 auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset); 1101 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0)) 1102 .getReg(0); 1103 Offset = 0; 1104 } 1105 1106 Register IdxReg = getOrCreateVReg(*Idx); 1107 LLT IdxTy = MRI->getType(IdxReg); 1108 if (IdxTy != OffsetTy) { 1109 if (!IdxTy.isVector() && VectorWidth) { 1110 IdxReg = MIRBuilder.buildSplatVector( 1111 OffsetTy.changeElementType(IdxTy), IdxReg).getReg(0); 1112 } 1113 1114 IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0); 1115 } 1116 1117 // N = N + Idx * ElementSize; 1118 // Avoid doing it for ElementSize of 1. 1119 Register GepOffsetReg; 1120 if (ElementSize != 1) { 1121 auto ElementSizeMIB = MIRBuilder.buildConstant( 1122 getLLTForType(*OffsetIRTy, *DL), ElementSize); 1123 GepOffsetReg = 1124 MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB).getReg(0); 1125 } else 1126 GepOffsetReg = IdxReg; 1127 1128 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0); 1129 } 1130 } 1131 1132 if (Offset != 0) { 1133 auto OffsetMIB = 1134 MIRBuilder.buildConstant(OffsetTy, Offset); 1135 MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0)); 1136 return true; 1137 } 1138 1139 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg); 1140 return true; 1141 } 1142 1143 bool IRTranslator::translateMemFunc(const CallInst &CI, 1144 MachineIRBuilder &MIRBuilder, 1145 Intrinsic::ID ID) { 1146 1147 // If the source is undef, then just emit a nop. 1148 if (isa<UndefValue>(CI.getArgOperand(1))) 1149 return true; 1150 1151 ArrayRef<Register> Res; 1152 auto ICall = MIRBuilder.buildIntrinsic(ID, Res, true); 1153 for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI) 1154 ICall.addUse(getOrCreateVReg(**AI)); 1155 1156 Align DstAlign; 1157 Align SrcAlign; 1158 unsigned IsVol = 1159 cast<ConstantInt>(CI.getArgOperand(CI.getNumArgOperands() - 1)) 1160 ->getZExtValue(); 1161 1162 if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) { 1163 DstAlign = MCI->getDestAlign().valueOrOne(); 1164 SrcAlign = MCI->getSourceAlign().valueOrOne(); 1165 } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) { 1166 DstAlign = MMI->getDestAlign().valueOrOne(); 1167 SrcAlign = MMI->getSourceAlign().valueOrOne(); 1168 } else { 1169 auto *MSI = cast<MemSetInst>(&CI); 1170 DstAlign = MSI->getDestAlign().valueOrOne(); 1171 } 1172 1173 // We need to propagate the tail call flag from the IR inst as an argument. 1174 // Otherwise, we have to pessimize and assume later that we cannot tail call 1175 // any memory intrinsics. 1176 ICall.addImm(CI.isTailCall() ? 1 : 0); 1177 1178 // Create mem operands to store the alignment and volatile info. 1179 auto VolFlag = IsVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; 1180 ICall.addMemOperand(MF->getMachineMemOperand( 1181 MachinePointerInfo(CI.getArgOperand(0)), 1182 MachineMemOperand::MOStore | VolFlag, 1, DstAlign)); 1183 if (ID != Intrinsic::memset) 1184 ICall.addMemOperand(MF->getMachineMemOperand( 1185 MachinePointerInfo(CI.getArgOperand(1)), 1186 MachineMemOperand::MOLoad | VolFlag, 1, SrcAlign)); 1187 1188 return true; 1189 } 1190 1191 void IRTranslator::getStackGuard(Register DstReg, 1192 MachineIRBuilder &MIRBuilder) { 1193 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 1194 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF)); 1195 auto MIB = 1196 MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {}); 1197 1198 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1199 Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent()); 1200 if (!Global) 1201 return; 1202 1203 MachinePointerInfo MPInfo(Global); 1204 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 1205 MachineMemOperand::MODereferenceable; 1206 MachineMemOperand *MemRef = 1207 MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8, 1208 DL->getPointerABIAlignment(0)); 1209 MIB.setMemRefs({MemRef}); 1210 } 1211 1212 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op, 1213 MachineIRBuilder &MIRBuilder) { 1214 ArrayRef<Register> ResRegs = getOrCreateVRegs(CI); 1215 MIRBuilder.buildInstr( 1216 Op, {ResRegs[0], ResRegs[1]}, 1217 {getOrCreateVReg(*CI.getOperand(0)), getOrCreateVReg(*CI.getOperand(1))}); 1218 1219 return true; 1220 } 1221 1222 unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) { 1223 switch (ID) { 1224 default: 1225 break; 1226 case Intrinsic::bswap: 1227 return TargetOpcode::G_BSWAP; 1228 case Intrinsic::bitreverse: 1229 return TargetOpcode::G_BITREVERSE; 1230 case Intrinsic::fshl: 1231 return TargetOpcode::G_FSHL; 1232 case Intrinsic::fshr: 1233 return TargetOpcode::G_FSHR; 1234 case Intrinsic::ceil: 1235 return TargetOpcode::G_FCEIL; 1236 case Intrinsic::cos: 1237 return TargetOpcode::G_FCOS; 1238 case Intrinsic::ctpop: 1239 return TargetOpcode::G_CTPOP; 1240 case Intrinsic::exp: 1241 return TargetOpcode::G_FEXP; 1242 case Intrinsic::exp2: 1243 return TargetOpcode::G_FEXP2; 1244 case Intrinsic::fabs: 1245 return TargetOpcode::G_FABS; 1246 case Intrinsic::copysign: 1247 return TargetOpcode::G_FCOPYSIGN; 1248 case Intrinsic::minnum: 1249 return TargetOpcode::G_FMINNUM; 1250 case Intrinsic::maxnum: 1251 return TargetOpcode::G_FMAXNUM; 1252 case Intrinsic::minimum: 1253 return TargetOpcode::G_FMINIMUM; 1254 case Intrinsic::maximum: 1255 return TargetOpcode::G_FMAXIMUM; 1256 case Intrinsic::canonicalize: 1257 return TargetOpcode::G_FCANONICALIZE; 1258 case Intrinsic::floor: 1259 return TargetOpcode::G_FFLOOR; 1260 case Intrinsic::fma: 1261 return TargetOpcode::G_FMA; 1262 case Intrinsic::log: 1263 return TargetOpcode::G_FLOG; 1264 case Intrinsic::log2: 1265 return TargetOpcode::G_FLOG2; 1266 case Intrinsic::log10: 1267 return TargetOpcode::G_FLOG10; 1268 case Intrinsic::nearbyint: 1269 return TargetOpcode::G_FNEARBYINT; 1270 case Intrinsic::pow: 1271 return TargetOpcode::G_FPOW; 1272 case Intrinsic::rint: 1273 return TargetOpcode::G_FRINT; 1274 case Intrinsic::round: 1275 return TargetOpcode::G_INTRINSIC_ROUND; 1276 case Intrinsic::sin: 1277 return TargetOpcode::G_FSIN; 1278 case Intrinsic::sqrt: 1279 return TargetOpcode::G_FSQRT; 1280 case Intrinsic::trunc: 1281 return TargetOpcode::G_INTRINSIC_TRUNC; 1282 case Intrinsic::readcyclecounter: 1283 return TargetOpcode::G_READCYCLECOUNTER; 1284 } 1285 return Intrinsic::not_intrinsic; 1286 } 1287 1288 bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI, 1289 Intrinsic::ID ID, 1290 MachineIRBuilder &MIRBuilder) { 1291 1292 unsigned Op = getSimpleIntrinsicOpcode(ID); 1293 1294 // Is this a simple intrinsic? 1295 if (Op == Intrinsic::not_intrinsic) 1296 return false; 1297 1298 // Yes. Let's translate it. 1299 SmallVector<llvm::SrcOp, 4> VRegs; 1300 for (auto &Arg : CI.arg_operands()) 1301 VRegs.push_back(getOrCreateVReg(*Arg)); 1302 1303 MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs, 1304 MachineInstr::copyFlagsFromInstruction(CI)); 1305 return true; 1306 } 1307 1308 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, 1309 MachineIRBuilder &MIRBuilder) { 1310 1311 // If this is a simple intrinsic (that is, we just need to add a def of 1312 // a vreg, and uses for each arg operand, then translate it. 1313 if (translateSimpleIntrinsic(CI, ID, MIRBuilder)) 1314 return true; 1315 1316 switch (ID) { 1317 default: 1318 break; 1319 case Intrinsic::lifetime_start: 1320 case Intrinsic::lifetime_end: { 1321 // No stack colouring in O0, discard region information. 1322 if (MF->getTarget().getOptLevel() == CodeGenOpt::None) 1323 return true; 1324 1325 unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START 1326 : TargetOpcode::LIFETIME_END; 1327 1328 // Get the underlying objects for the location passed on the lifetime 1329 // marker. 1330 SmallVector<const Value *, 4> Allocas; 1331 GetUnderlyingObjects(CI.getArgOperand(1), Allocas, *DL); 1332 1333 // Iterate over each underlying object, creating lifetime markers for each 1334 // static alloca. Quit if we find a non-static alloca. 1335 for (const Value *V : Allocas) { 1336 const AllocaInst *AI = dyn_cast<AllocaInst>(V); 1337 if (!AI) 1338 continue; 1339 1340 if (!AI->isStaticAlloca()) 1341 return true; 1342 1343 MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI)); 1344 } 1345 return true; 1346 } 1347 case Intrinsic::dbg_declare: { 1348 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI); 1349 assert(DI.getVariable() && "Missing variable"); 1350 1351 const Value *Address = DI.getAddress(); 1352 if (!Address || isa<UndefValue>(Address)) { 1353 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); 1354 return true; 1355 } 1356 1357 assert(DI.getVariable()->isValidLocationForIntrinsic( 1358 MIRBuilder.getDebugLoc()) && 1359 "Expected inlined-at fields to agree"); 1360 auto AI = dyn_cast<AllocaInst>(Address); 1361 if (AI && AI->isStaticAlloca()) { 1362 // Static allocas are tracked at the MF level, no need for DBG_VALUE 1363 // instructions (in fact, they get ignored if they *do* exist). 1364 MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(), 1365 getOrCreateFrameIndex(*AI), DI.getDebugLoc()); 1366 } else { 1367 // A dbg.declare describes the address of a source variable, so lower it 1368 // into an indirect DBG_VALUE. 1369 MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address), 1370 DI.getVariable(), DI.getExpression()); 1371 } 1372 return true; 1373 } 1374 case Intrinsic::dbg_label: { 1375 const DbgLabelInst &DI = cast<DbgLabelInst>(CI); 1376 assert(DI.getLabel() && "Missing label"); 1377 1378 assert(DI.getLabel()->isValidLocationForIntrinsic( 1379 MIRBuilder.getDebugLoc()) && 1380 "Expected inlined-at fields to agree"); 1381 1382 MIRBuilder.buildDbgLabel(DI.getLabel()); 1383 return true; 1384 } 1385 case Intrinsic::vaend: 1386 // No target I know of cares about va_end. Certainly no in-tree target 1387 // does. Simplest intrinsic ever! 1388 return true; 1389 case Intrinsic::vastart: { 1390 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1391 Value *Ptr = CI.getArgOperand(0); 1392 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8; 1393 1394 // FIXME: Get alignment 1395 MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)}) 1396 .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr), 1397 MachineMemOperand::MOStore, 1398 ListSize, Align(1))); 1399 return true; 1400 } 1401 case Intrinsic::dbg_value: { 1402 // This form of DBG_VALUE is target-independent. 1403 const DbgValueInst &DI = cast<DbgValueInst>(CI); 1404 const Value *V = DI.getValue(); 1405 assert(DI.getVariable()->isValidLocationForIntrinsic( 1406 MIRBuilder.getDebugLoc()) && 1407 "Expected inlined-at fields to agree"); 1408 if (!V) { 1409 // Currently the optimizer can produce this; insert an undef to 1410 // help debugging. Probably the optimizer should not do this. 1411 MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression()); 1412 } else if (const auto *CI = dyn_cast<Constant>(V)) { 1413 MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression()); 1414 } else { 1415 for (Register Reg : getOrCreateVRegs(*V)) { 1416 // FIXME: This does not handle register-indirect values at offset 0. The 1417 // direct/indirect thing shouldn't really be handled by something as 1418 // implicit as reg+noreg vs reg+imm in the first place, but it seems 1419 // pretty baked in right now. 1420 MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression()); 1421 } 1422 } 1423 return true; 1424 } 1425 case Intrinsic::uadd_with_overflow: 1426 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder); 1427 case Intrinsic::sadd_with_overflow: 1428 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder); 1429 case Intrinsic::usub_with_overflow: 1430 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder); 1431 case Intrinsic::ssub_with_overflow: 1432 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder); 1433 case Intrinsic::umul_with_overflow: 1434 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder); 1435 case Intrinsic::smul_with_overflow: 1436 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder); 1437 case Intrinsic::uadd_sat: 1438 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder); 1439 case Intrinsic::sadd_sat: 1440 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder); 1441 case Intrinsic::usub_sat: 1442 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder); 1443 case Intrinsic::ssub_sat: 1444 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder); 1445 case Intrinsic::fmuladd: { 1446 const TargetMachine &TM = MF->getTarget(); 1447 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); 1448 Register Dst = getOrCreateVReg(CI); 1449 Register Op0 = getOrCreateVReg(*CI.getArgOperand(0)); 1450 Register Op1 = getOrCreateVReg(*CI.getArgOperand(1)); 1451 Register Op2 = getOrCreateVReg(*CI.getArgOperand(2)); 1452 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && 1453 TLI.isFMAFasterThanFMulAndFAdd(*MF, 1454 TLI.getValueType(*DL, CI.getType()))) { 1455 // TODO: Revisit this to see if we should move this part of the 1456 // lowering to the combiner. 1457 MIRBuilder.buildFMA(Dst, Op0, Op1, Op2, 1458 MachineInstr::copyFlagsFromInstruction(CI)); 1459 } else { 1460 LLT Ty = getLLTForType(*CI.getType(), *DL); 1461 auto FMul = MIRBuilder.buildFMul( 1462 Ty, Op0, Op1, MachineInstr::copyFlagsFromInstruction(CI)); 1463 MIRBuilder.buildFAdd(Dst, FMul, Op2, 1464 MachineInstr::copyFlagsFromInstruction(CI)); 1465 } 1466 return true; 1467 } 1468 case Intrinsic::memcpy: 1469 case Intrinsic::memmove: 1470 case Intrinsic::memset: 1471 return translateMemFunc(CI, MIRBuilder, ID); 1472 case Intrinsic::eh_typeid_for: { 1473 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0)); 1474 Register Reg = getOrCreateVReg(CI); 1475 unsigned TypeID = MF->getTypeIDFor(GV); 1476 MIRBuilder.buildConstant(Reg, TypeID); 1477 return true; 1478 } 1479 case Intrinsic::objectsize: 1480 llvm_unreachable("llvm.objectsize.* should have been lowered already"); 1481 1482 case Intrinsic::is_constant: 1483 llvm_unreachable("llvm.is.constant.* should have been lowered already"); 1484 1485 case Intrinsic::stackguard: 1486 getStackGuard(getOrCreateVReg(CI), MIRBuilder); 1487 return true; 1488 case Intrinsic::stackprotector: { 1489 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); 1490 Register GuardVal = MRI->createGenericVirtualRegister(PtrTy); 1491 getStackGuard(GuardVal, MIRBuilder); 1492 1493 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1)); 1494 int FI = getOrCreateFrameIndex(*Slot); 1495 MF->getFrameInfo().setStackProtectorIndex(FI); 1496 1497 MIRBuilder.buildStore( 1498 GuardVal, getOrCreateVReg(*Slot), 1499 *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI), 1500 MachineMemOperand::MOStore | 1501 MachineMemOperand::MOVolatile, 1502 PtrTy.getSizeInBits() / 8, Align(8))); 1503 return true; 1504 } 1505 case Intrinsic::stacksave: { 1506 // Save the stack pointer to the location provided by the intrinsic. 1507 Register Reg = getOrCreateVReg(CI); 1508 Register StackPtr = MF->getSubtarget() 1509 .getTargetLowering() 1510 ->getStackPointerRegisterToSaveRestore(); 1511 1512 // If the target doesn't specify a stack pointer, then fall back. 1513 if (!StackPtr) 1514 return false; 1515 1516 MIRBuilder.buildCopy(Reg, StackPtr); 1517 return true; 1518 } 1519 case Intrinsic::stackrestore: { 1520 // Restore the stack pointer from the location provided by the intrinsic. 1521 Register Reg = getOrCreateVReg(*CI.getArgOperand(0)); 1522 Register StackPtr = MF->getSubtarget() 1523 .getTargetLowering() 1524 ->getStackPointerRegisterToSaveRestore(); 1525 1526 // If the target doesn't specify a stack pointer, then fall back. 1527 if (!StackPtr) 1528 return false; 1529 1530 MIRBuilder.buildCopy(StackPtr, Reg); 1531 return true; 1532 } 1533 case Intrinsic::cttz: 1534 case Intrinsic::ctlz: { 1535 ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1)); 1536 bool isTrailing = ID == Intrinsic::cttz; 1537 unsigned Opcode = isTrailing 1538 ? Cst->isZero() ? TargetOpcode::G_CTTZ 1539 : TargetOpcode::G_CTTZ_ZERO_UNDEF 1540 : Cst->isZero() ? TargetOpcode::G_CTLZ 1541 : TargetOpcode::G_CTLZ_ZERO_UNDEF; 1542 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(CI)}, 1543 {getOrCreateVReg(*CI.getArgOperand(0))}); 1544 return true; 1545 } 1546 case Intrinsic::invariant_start: { 1547 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); 1548 Register Undef = MRI->createGenericVirtualRegister(PtrTy); 1549 MIRBuilder.buildUndef(Undef); 1550 return true; 1551 } 1552 case Intrinsic::invariant_end: 1553 return true; 1554 case Intrinsic::assume: 1555 case Intrinsic::var_annotation: 1556 case Intrinsic::sideeffect: 1557 // Discard annotate attributes, assumptions, and artificial side-effects. 1558 return true; 1559 case Intrinsic::read_register: { 1560 Value *Arg = CI.getArgOperand(0); 1561 MIRBuilder 1562 .buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {}) 1563 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata())); 1564 return true; 1565 } 1566 case Intrinsic::write_register: { 1567 Value *Arg = CI.getArgOperand(0); 1568 MIRBuilder.buildInstr(TargetOpcode::G_WRITE_REGISTER) 1569 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata())) 1570 .addUse(getOrCreateVReg(*CI.getArgOperand(1))); 1571 return true; 1572 } 1573 } 1574 return false; 1575 } 1576 1577 bool IRTranslator::translateInlineAsm(const CallBase &CB, 1578 MachineIRBuilder &MIRBuilder) { 1579 1580 const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering(); 1581 1582 if (!ALI) { 1583 LLVM_DEBUG( 1584 dbgs() << "Inline asm lowering is not supported for this target yet\n"); 1585 return false; 1586 } 1587 1588 return ALI->lowerInlineAsm( 1589 MIRBuilder, CB, [&](const Value &Val) { return getOrCreateVRegs(Val); }); 1590 } 1591 1592 bool IRTranslator::translateCallBase(const CallBase &CB, 1593 MachineIRBuilder &MIRBuilder) { 1594 ArrayRef<Register> Res = getOrCreateVRegs(CB); 1595 1596 SmallVector<ArrayRef<Register>, 8> Args; 1597 Register SwiftInVReg = 0; 1598 Register SwiftErrorVReg = 0; 1599 for (auto &Arg : CB.args()) { 1600 if (CLI->supportSwiftError() && isSwiftError(Arg)) { 1601 assert(SwiftInVReg == 0 && "Expected only one swift error argument"); 1602 LLT Ty = getLLTForType(*Arg->getType(), *DL); 1603 SwiftInVReg = MRI->createGenericVirtualRegister(Ty); 1604 MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt( 1605 &CB, &MIRBuilder.getMBB(), Arg)); 1606 Args.emplace_back(makeArrayRef(SwiftInVReg)); 1607 SwiftErrorVReg = 1608 SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.getMBB(), Arg); 1609 continue; 1610 } 1611 Args.push_back(getOrCreateVRegs(*Arg)); 1612 } 1613 1614 // We don't set HasCalls on MFI here yet because call lowering may decide to 1615 // optimize into tail calls. Instead, we defer that to selection where a final 1616 // scan is done to check if any instructions are calls. 1617 bool Success = 1618 CLI->lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg, 1619 [&]() { return getOrCreateVReg(*CB.getCalledOperand()); }); 1620 1621 // Check if we just inserted a tail call. 1622 if (Success) { 1623 assert(!HasTailCall && "Can't tail call return twice from block?"); 1624 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 1625 HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt())); 1626 } 1627 1628 return Success; 1629 } 1630 1631 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) { 1632 const CallInst &CI = cast<CallInst>(U); 1633 auto TII = MF->getTarget().getIntrinsicInfo(); 1634 const Function *F = CI.getCalledFunction(); 1635 1636 // FIXME: support Windows dllimport function calls. 1637 if (F && (F->hasDLLImportStorageClass() || 1638 (MF->getTarget().getTargetTriple().isOSWindows() && 1639 F->hasExternalWeakLinkage()))) 1640 return false; 1641 1642 // FIXME: support control flow guard targets. 1643 if (CI.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget)) 1644 return false; 1645 1646 if (CI.isInlineAsm()) 1647 return translateInlineAsm(CI, MIRBuilder); 1648 1649 Intrinsic::ID ID = Intrinsic::not_intrinsic; 1650 if (F && F->isIntrinsic()) { 1651 ID = F->getIntrinsicID(); 1652 if (TII && ID == Intrinsic::not_intrinsic) 1653 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F)); 1654 } 1655 1656 if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) 1657 return translateCallBase(CI, MIRBuilder); 1658 1659 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic"); 1660 1661 if (translateKnownIntrinsic(CI, ID, MIRBuilder)) 1662 return true; 1663 1664 ArrayRef<Register> ResultRegs; 1665 if (!CI.getType()->isVoidTy()) 1666 ResultRegs = getOrCreateVRegs(CI); 1667 1668 // Ignore the callsite attributes. Backend code is most likely not expecting 1669 // an intrinsic to sometimes have side effects and sometimes not. 1670 MachineInstrBuilder MIB = 1671 MIRBuilder.buildIntrinsic(ID, ResultRegs, !F->doesNotAccessMemory()); 1672 if (isa<FPMathOperator>(CI)) 1673 MIB->copyIRFlags(CI); 1674 1675 for (auto &Arg : enumerate(CI.arg_operands())) { 1676 // Some intrinsics take metadata parameters. Reject them. 1677 if (isa<MetadataAsValue>(Arg.value())) 1678 return false; 1679 1680 // If this is required to be an immediate, don't materialize it in a 1681 // register. 1682 if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) { 1683 if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) { 1684 // imm arguments are more convenient than cimm (and realistically 1685 // probably sufficient), so use them. 1686 assert(CI->getBitWidth() <= 64 && 1687 "large intrinsic immediates not handled"); 1688 MIB.addImm(CI->getSExtValue()); 1689 } else { 1690 MIB.addFPImm(cast<ConstantFP>(Arg.value())); 1691 } 1692 } else { 1693 ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value()); 1694 if (VRegs.size() > 1) 1695 return false; 1696 MIB.addUse(VRegs[0]); 1697 } 1698 } 1699 1700 // Add a MachineMemOperand if it is a target mem intrinsic. 1701 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); 1702 TargetLowering::IntrinsicInfo Info; 1703 // TODO: Add a GlobalISel version of getTgtMemIntrinsic. 1704 if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) { 1705 Align Alignment = Info.align.getValueOr( 1706 DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext()))); 1707 1708 uint64_t Size = Info.memVT.getStoreSize(); 1709 MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal), 1710 Info.flags, Size, Alignment)); 1711 } 1712 1713 return true; 1714 } 1715 1716 bool IRTranslator::translateInvoke(const User &U, 1717 MachineIRBuilder &MIRBuilder) { 1718 const InvokeInst &I = cast<InvokeInst>(U); 1719 MCContext &Context = MF->getContext(); 1720 1721 const BasicBlock *ReturnBB = I.getSuccessor(0); 1722 const BasicBlock *EHPadBB = I.getSuccessor(1); 1723 1724 const Function *Fn = I.getCalledFunction(); 1725 if (I.isInlineAsm()) 1726 return false; 1727 1728 // FIXME: support invoking patchpoint and statepoint intrinsics. 1729 if (Fn && Fn->isIntrinsic()) 1730 return false; 1731 1732 // FIXME: support whatever these are. 1733 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) 1734 return false; 1735 1736 // FIXME: support control flow guard targets. 1737 if (I.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget)) 1738 return false; 1739 1740 // FIXME: support Windows exception handling. 1741 if (!isa<LandingPadInst>(EHPadBB->front())) 1742 return false; 1743 1744 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about 1745 // the region covered by the try. 1746 MCSymbol *BeginSymbol = Context.createTempSymbol(); 1747 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol); 1748 1749 if (!translateCallBase(I, MIRBuilder)) 1750 return false; 1751 1752 MCSymbol *EndSymbol = Context.createTempSymbol(); 1753 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol); 1754 1755 // FIXME: track probabilities. 1756 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB), 1757 &ReturnMBB = getMBB(*ReturnBB); 1758 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol); 1759 MIRBuilder.getMBB().addSuccessor(&ReturnMBB); 1760 MIRBuilder.getMBB().addSuccessor(&EHPadMBB); 1761 MIRBuilder.buildBr(ReturnMBB); 1762 1763 return true; 1764 } 1765 1766 bool IRTranslator::translateCallBr(const User &U, 1767 MachineIRBuilder &MIRBuilder) { 1768 // FIXME: Implement this. 1769 return false; 1770 } 1771 1772 bool IRTranslator::translateLandingPad(const User &U, 1773 MachineIRBuilder &MIRBuilder) { 1774 const LandingPadInst &LP = cast<LandingPadInst>(U); 1775 1776 MachineBasicBlock &MBB = MIRBuilder.getMBB(); 1777 1778 MBB.setIsEHPad(); 1779 1780 // If there aren't registers to copy the values into (e.g., during SjLj 1781 // exceptions), then don't bother. 1782 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1783 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn(); 1784 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 && 1785 TLI.getExceptionSelectorRegister(PersonalityFn) == 0) 1786 return true; 1787 1788 // If landingpad's return type is token type, we don't create DAG nodes 1789 // for its exception pointer and selector value. The extraction of exception 1790 // pointer or selector value from token type landingpads is not currently 1791 // supported. 1792 if (LP.getType()->isTokenTy()) 1793 return true; 1794 1795 // Add a label to mark the beginning of the landing pad. Deletion of the 1796 // landing pad can thus be detected via the MachineModuleInfo. 1797 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL) 1798 .addSym(MF->addLandingPad(&MBB)); 1799 1800 LLT Ty = getLLTForType(*LP.getType(), *DL); 1801 Register Undef = MRI->createGenericVirtualRegister(Ty); 1802 MIRBuilder.buildUndef(Undef); 1803 1804 SmallVector<LLT, 2> Tys; 1805 for (Type *Ty : cast<StructType>(LP.getType())->elements()) 1806 Tys.push_back(getLLTForType(*Ty, *DL)); 1807 assert(Tys.size() == 2 && "Only two-valued landingpads are supported"); 1808 1809 // Mark exception register as live in. 1810 Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn); 1811 if (!ExceptionReg) 1812 return false; 1813 1814 MBB.addLiveIn(ExceptionReg); 1815 ArrayRef<Register> ResRegs = getOrCreateVRegs(LP); 1816 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg); 1817 1818 Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn); 1819 if (!SelectorReg) 1820 return false; 1821 1822 MBB.addLiveIn(SelectorReg); 1823 Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]); 1824 MIRBuilder.buildCopy(PtrVReg, SelectorReg); 1825 MIRBuilder.buildCast(ResRegs[1], PtrVReg); 1826 1827 return true; 1828 } 1829 1830 bool IRTranslator::translateAlloca(const User &U, 1831 MachineIRBuilder &MIRBuilder) { 1832 auto &AI = cast<AllocaInst>(U); 1833 1834 if (AI.isSwiftError()) 1835 return true; 1836 1837 if (AI.isStaticAlloca()) { 1838 Register Res = getOrCreateVReg(AI); 1839 int FI = getOrCreateFrameIndex(AI); 1840 MIRBuilder.buildFrameIndex(Res, FI); 1841 return true; 1842 } 1843 1844 // FIXME: support stack probing for Windows. 1845 if (MF->getTarget().getTargetTriple().isOSWindows()) 1846 return false; 1847 1848 // Now we're in the harder dynamic case. 1849 Register NumElts = getOrCreateVReg(*AI.getArraySize()); 1850 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType()); 1851 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL); 1852 if (MRI->getType(NumElts) != IntPtrTy) { 1853 Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy); 1854 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts); 1855 NumElts = ExtElts; 1856 } 1857 1858 Type *Ty = AI.getAllocatedType(); 1859 1860 Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy); 1861 Register TySize = 1862 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty))); 1863 MIRBuilder.buildMul(AllocSize, NumElts, TySize); 1864 1865 // Round the size of the allocation up to the stack alignment size 1866 // by add SA-1 to the size. This doesn't overflow because we're computing 1867 // an address inside an alloca. 1868 Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign(); 1869 auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign.value() - 1); 1870 auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne, 1871 MachineInstr::NoUWrap); 1872 auto AlignCst = 1873 MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign.value() - 1)); 1874 auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst); 1875 1876 Align Alignment = std::max(AI.getAlign(), DL->getPrefTypeAlign(Ty)); 1877 if (Alignment <= StackAlign) 1878 Alignment = Align(1); 1879 MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Alignment); 1880 1881 MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI); 1882 assert(MF->getFrameInfo().hasVarSizedObjects()); 1883 return true; 1884 } 1885 1886 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) { 1887 // FIXME: We may need more info about the type. Because of how LLT works, 1888 // we're completely discarding the i64/double distinction here (amongst 1889 // others). Fortunately the ABIs I know of where that matters don't use va_arg 1890 // anyway but that's not guaranteed. 1891 MIRBuilder.buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)}, 1892 {getOrCreateVReg(*U.getOperand(0)), 1893 uint64_t(DL->getABITypeAlignment(U.getType()))}); 1894 return true; 1895 } 1896 1897 bool IRTranslator::translateInsertElement(const User &U, 1898 MachineIRBuilder &MIRBuilder) { 1899 // If it is a <1 x Ty> vector, use the scalar as it is 1900 // not a legal vector type in LLT. 1901 if (cast<VectorType>(U.getType())->getNumElements() == 1) 1902 return translateCopy(U, *U.getOperand(1), MIRBuilder); 1903 1904 Register Res = getOrCreateVReg(U); 1905 Register Val = getOrCreateVReg(*U.getOperand(0)); 1906 Register Elt = getOrCreateVReg(*U.getOperand(1)); 1907 Register Idx = getOrCreateVReg(*U.getOperand(2)); 1908 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx); 1909 return true; 1910 } 1911 1912 bool IRTranslator::translateExtractElement(const User &U, 1913 MachineIRBuilder &MIRBuilder) { 1914 // If it is a <1 x Ty> vector, use the scalar as it is 1915 // not a legal vector type in LLT. 1916 if (cast<VectorType>(U.getOperand(0)->getType())->getNumElements() == 1) 1917 return translateCopy(U, *U.getOperand(0), MIRBuilder); 1918 1919 Register Res = getOrCreateVReg(U); 1920 Register Val = getOrCreateVReg(*U.getOperand(0)); 1921 const auto &TLI = *MF->getSubtarget().getTargetLowering(); 1922 unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits(); 1923 Register Idx; 1924 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) { 1925 if (CI->getBitWidth() != PreferredVecIdxWidth) { 1926 APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth); 1927 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx); 1928 Idx = getOrCreateVReg(*NewIdxCI); 1929 } 1930 } 1931 if (!Idx) 1932 Idx = getOrCreateVReg(*U.getOperand(1)); 1933 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) { 1934 const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth); 1935 Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx).getReg(0); 1936 } 1937 MIRBuilder.buildExtractVectorElement(Res, Val, Idx); 1938 return true; 1939 } 1940 1941 bool IRTranslator::translateShuffleVector(const User &U, 1942 MachineIRBuilder &MIRBuilder) { 1943 ArrayRef<int> Mask; 1944 if (auto *SVI = dyn_cast<ShuffleVectorInst>(&U)) 1945 Mask = SVI->getShuffleMask(); 1946 else 1947 Mask = cast<ConstantExpr>(U).getShuffleMask(); 1948 ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask); 1949 MIRBuilder 1950 .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)}, 1951 {getOrCreateVReg(*U.getOperand(0)), 1952 getOrCreateVReg(*U.getOperand(1))}) 1953 .addShuffleMask(MaskAlloc); 1954 return true; 1955 } 1956 1957 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) { 1958 const PHINode &PI = cast<PHINode>(U); 1959 1960 SmallVector<MachineInstr *, 4> Insts; 1961 for (auto Reg : getOrCreateVRegs(PI)) { 1962 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {}); 1963 Insts.push_back(MIB.getInstr()); 1964 } 1965 1966 PendingPHIs.emplace_back(&PI, std::move(Insts)); 1967 return true; 1968 } 1969 1970 bool IRTranslator::translateAtomicCmpXchg(const User &U, 1971 MachineIRBuilder &MIRBuilder) { 1972 const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U); 1973 1974 if (I.isWeak()) 1975 return false; 1976 1977 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1978 auto Flags = TLI.getAtomicMemOperandFlags(I, *DL); 1979 1980 Type *ResType = I.getType(); 1981 Type *ValType = ResType->Type::getStructElementType(0); 1982 1983 auto Res = getOrCreateVRegs(I); 1984 Register OldValRes = Res[0]; 1985 Register SuccessRes = Res[1]; 1986 Register Addr = getOrCreateVReg(*I.getPointerOperand()); 1987 Register Cmp = getOrCreateVReg(*I.getCompareOperand()); 1988 Register NewVal = getOrCreateVReg(*I.getNewValOperand()); 1989 1990 AAMDNodes AAMetadata; 1991 I.getAAMetadata(AAMetadata); 1992 1993 MIRBuilder.buildAtomicCmpXchgWithSuccess( 1994 OldValRes, SuccessRes, Addr, Cmp, NewVal, 1995 *MF->getMachineMemOperand( 1996 MachinePointerInfo(I.getPointerOperand()), Flags, 1997 DL->getTypeStoreSize(ValType), getMemOpAlign(I), AAMetadata, nullptr, 1998 I.getSyncScopeID(), I.getSuccessOrdering(), I.getFailureOrdering())); 1999 return true; 2000 } 2001 2002 bool IRTranslator::translateAtomicRMW(const User &U, 2003 MachineIRBuilder &MIRBuilder) { 2004 const AtomicRMWInst &I = cast<AtomicRMWInst>(U); 2005 auto &TLI = *MF->getSubtarget().getTargetLowering(); 2006 auto Flags = TLI.getAtomicMemOperandFlags(I, *DL); 2007 2008 Type *ResType = I.getType(); 2009 2010 Register Res = getOrCreateVReg(I); 2011 Register Addr = getOrCreateVReg(*I.getPointerOperand()); 2012 Register Val = getOrCreateVReg(*I.getValOperand()); 2013 2014 unsigned Opcode = 0; 2015 switch (I.getOperation()) { 2016 default: 2017 return false; 2018 case AtomicRMWInst::Xchg: 2019 Opcode = TargetOpcode::G_ATOMICRMW_XCHG; 2020 break; 2021 case AtomicRMWInst::Add: 2022 Opcode = TargetOpcode::G_ATOMICRMW_ADD; 2023 break; 2024 case AtomicRMWInst::Sub: 2025 Opcode = TargetOpcode::G_ATOMICRMW_SUB; 2026 break; 2027 case AtomicRMWInst::And: 2028 Opcode = TargetOpcode::G_ATOMICRMW_AND; 2029 break; 2030 case AtomicRMWInst::Nand: 2031 Opcode = TargetOpcode::G_ATOMICRMW_NAND; 2032 break; 2033 case AtomicRMWInst::Or: 2034 Opcode = TargetOpcode::G_ATOMICRMW_OR; 2035 break; 2036 case AtomicRMWInst::Xor: 2037 Opcode = TargetOpcode::G_ATOMICRMW_XOR; 2038 break; 2039 case AtomicRMWInst::Max: 2040 Opcode = TargetOpcode::G_ATOMICRMW_MAX; 2041 break; 2042 case AtomicRMWInst::Min: 2043 Opcode = TargetOpcode::G_ATOMICRMW_MIN; 2044 break; 2045 case AtomicRMWInst::UMax: 2046 Opcode = TargetOpcode::G_ATOMICRMW_UMAX; 2047 break; 2048 case AtomicRMWInst::UMin: 2049 Opcode = TargetOpcode::G_ATOMICRMW_UMIN; 2050 break; 2051 case AtomicRMWInst::FAdd: 2052 Opcode = TargetOpcode::G_ATOMICRMW_FADD; 2053 break; 2054 case AtomicRMWInst::FSub: 2055 Opcode = TargetOpcode::G_ATOMICRMW_FSUB; 2056 break; 2057 } 2058 2059 AAMDNodes AAMetadata; 2060 I.getAAMetadata(AAMetadata); 2061 2062 MIRBuilder.buildAtomicRMW( 2063 Opcode, Res, Addr, Val, 2064 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), 2065 Flags, DL->getTypeStoreSize(ResType), 2066 getMemOpAlign(I), AAMetadata, nullptr, 2067 I.getSyncScopeID(), I.getOrdering())); 2068 return true; 2069 } 2070 2071 bool IRTranslator::translateFence(const User &U, 2072 MachineIRBuilder &MIRBuilder) { 2073 const FenceInst &Fence = cast<FenceInst>(U); 2074 MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()), 2075 Fence.getSyncScopeID()); 2076 return true; 2077 } 2078 2079 bool IRTranslator::translateFreeze(const User &U, 2080 MachineIRBuilder &MIRBuilder) { 2081 const ArrayRef<Register> DstRegs = getOrCreateVRegs(U); 2082 const ArrayRef<Register> SrcRegs = getOrCreateVRegs(*U.getOperand(0)); 2083 2084 assert(DstRegs.size() == SrcRegs.size() && 2085 "Freeze with different source and destination type?"); 2086 2087 for (unsigned I = 0; I < DstRegs.size(); ++I) { 2088 MIRBuilder.buildFreeze(DstRegs[I], SrcRegs[I]); 2089 } 2090 2091 return true; 2092 } 2093 2094 void IRTranslator::finishPendingPhis() { 2095 #ifndef NDEBUG 2096 DILocationVerifier Verifier; 2097 GISelObserverWrapper WrapperObserver(&Verifier); 2098 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver); 2099 #endif // ifndef NDEBUG 2100 for (auto &Phi : PendingPHIs) { 2101 const PHINode *PI = Phi.first; 2102 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second; 2103 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent(); 2104 EntryBuilder->setDebugLoc(PI->getDebugLoc()); 2105 #ifndef NDEBUG 2106 Verifier.setCurrentInst(PI); 2107 #endif // ifndef NDEBUG 2108 2109 SmallSet<const MachineBasicBlock *, 16> SeenPreds; 2110 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) { 2111 auto IRPred = PI->getIncomingBlock(i); 2112 ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i)); 2113 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) { 2114 if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred)) 2115 continue; 2116 SeenPreds.insert(Pred); 2117 for (unsigned j = 0; j < ValRegs.size(); ++j) { 2118 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]); 2119 MIB.addUse(ValRegs[j]); 2120 MIB.addMBB(Pred); 2121 } 2122 } 2123 } 2124 } 2125 } 2126 2127 bool IRTranslator::valueIsSplit(const Value &V, 2128 SmallVectorImpl<uint64_t> *Offsets) { 2129 SmallVector<LLT, 4> SplitTys; 2130 if (Offsets && !Offsets->empty()) 2131 Offsets->clear(); 2132 computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets); 2133 return SplitTys.size() > 1; 2134 } 2135 2136 bool IRTranslator::translate(const Instruction &Inst) { 2137 CurBuilder->setDebugLoc(Inst.getDebugLoc()); 2138 // We only emit constants into the entry block from here. To prevent jumpy 2139 // debug behaviour set the line to 0. 2140 if (const DebugLoc &DL = Inst.getDebugLoc()) 2141 EntryBuilder->setDebugLoc( 2142 DebugLoc::get(0, 0, DL.getScope(), DL.getInlinedAt())); 2143 else 2144 EntryBuilder->setDebugLoc(DebugLoc()); 2145 2146 switch (Inst.getOpcode()) { 2147 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 2148 case Instruction::OPCODE: \ 2149 return translate##OPCODE(Inst, *CurBuilder.get()); 2150 #include "llvm/IR/Instruction.def" 2151 default: 2152 return false; 2153 } 2154 } 2155 2156 bool IRTranslator::translate(const Constant &C, Register Reg) { 2157 if (auto CI = dyn_cast<ConstantInt>(&C)) 2158 EntryBuilder->buildConstant(Reg, *CI); 2159 else if (auto CF = dyn_cast<ConstantFP>(&C)) 2160 EntryBuilder->buildFConstant(Reg, *CF); 2161 else if (isa<UndefValue>(C)) 2162 EntryBuilder->buildUndef(Reg); 2163 else if (isa<ConstantPointerNull>(C)) 2164 EntryBuilder->buildConstant(Reg, 0); 2165 else if (auto GV = dyn_cast<GlobalValue>(&C)) 2166 EntryBuilder->buildGlobalValue(Reg, GV); 2167 else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) { 2168 if (!CAZ->getType()->isVectorTy()) 2169 return false; 2170 // Return the scalar if it is a <1 x Ty> vector. 2171 if (CAZ->getNumElements() == 1) 2172 return translateCopy(C, *CAZ->getElementValue(0u), *EntryBuilder.get()); 2173 SmallVector<Register, 4> Ops; 2174 for (unsigned i = 0; i < CAZ->getNumElements(); ++i) { 2175 Constant &Elt = *CAZ->getElementValue(i); 2176 Ops.push_back(getOrCreateVReg(Elt)); 2177 } 2178 EntryBuilder->buildBuildVector(Reg, Ops); 2179 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) { 2180 // Return the scalar if it is a <1 x Ty> vector. 2181 if (CV->getNumElements() == 1) 2182 return translateCopy(C, *CV->getElementAsConstant(0), 2183 *EntryBuilder.get()); 2184 SmallVector<Register, 4> Ops; 2185 for (unsigned i = 0; i < CV->getNumElements(); ++i) { 2186 Constant &Elt = *CV->getElementAsConstant(i); 2187 Ops.push_back(getOrCreateVReg(Elt)); 2188 } 2189 EntryBuilder->buildBuildVector(Reg, Ops); 2190 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) { 2191 switch(CE->getOpcode()) { 2192 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 2193 case Instruction::OPCODE: \ 2194 return translate##OPCODE(*CE, *EntryBuilder.get()); 2195 #include "llvm/IR/Instruction.def" 2196 default: 2197 return false; 2198 } 2199 } else if (auto CV = dyn_cast<ConstantVector>(&C)) { 2200 if (CV->getNumOperands() == 1) 2201 return translateCopy(C, *CV->getOperand(0), *EntryBuilder.get()); 2202 SmallVector<Register, 4> Ops; 2203 for (unsigned i = 0; i < CV->getNumOperands(); ++i) { 2204 Ops.push_back(getOrCreateVReg(*CV->getOperand(i))); 2205 } 2206 EntryBuilder->buildBuildVector(Reg, Ops); 2207 } else if (auto *BA = dyn_cast<BlockAddress>(&C)) { 2208 EntryBuilder->buildBlockAddress(Reg, BA); 2209 } else 2210 return false; 2211 2212 return true; 2213 } 2214 2215 void IRTranslator::finalizeBasicBlock() { 2216 for (auto &JTCase : SL->JTCases) { 2217 // Emit header first, if it wasn't already emitted. 2218 if (!JTCase.first.Emitted) 2219 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB); 2220 2221 emitJumpTable(JTCase.second, JTCase.second.MBB); 2222 } 2223 SL->JTCases.clear(); 2224 } 2225 2226 void IRTranslator::finalizeFunction() { 2227 // Release the memory used by the different maps we 2228 // needed during the translation. 2229 PendingPHIs.clear(); 2230 VMap.reset(); 2231 FrameIndices.clear(); 2232 MachinePreds.clear(); 2233 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it 2234 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid 2235 // destroying it twice (in ~IRTranslator() and ~LLVMContext()) 2236 EntryBuilder.reset(); 2237 CurBuilder.reset(); 2238 FuncInfo.clear(); 2239 } 2240 2241 /// Returns true if a BasicBlock \p BB within a variadic function contains a 2242 /// variadic musttail call. 2243 static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) { 2244 if (!IsVarArg) 2245 return false; 2246 2247 // Walk the block backwards, because tail calls usually only appear at the end 2248 // of a block. 2249 return std::any_of(BB.rbegin(), BB.rend(), [](const Instruction &I) { 2250 const auto *CI = dyn_cast<CallInst>(&I); 2251 return CI && CI->isMustTailCall(); 2252 }); 2253 } 2254 2255 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { 2256 MF = &CurMF; 2257 const Function &F = MF->getFunction(); 2258 if (F.empty()) 2259 return false; 2260 GISelCSEAnalysisWrapper &Wrapper = 2261 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper(); 2262 // Set the CSEConfig and run the analysis. 2263 GISelCSEInfo *CSEInfo = nullptr; 2264 TPC = &getAnalysis<TargetPassConfig>(); 2265 bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences() 2266 ? EnableCSEInIRTranslator 2267 : TPC->isGISelCSEEnabled(); 2268 2269 if (EnableCSE) { 2270 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF); 2271 CSEInfo = &Wrapper.get(TPC->getCSEConfig()); 2272 EntryBuilder->setCSEInfo(CSEInfo); 2273 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF); 2274 CurBuilder->setCSEInfo(CSEInfo); 2275 } else { 2276 EntryBuilder = std::make_unique<MachineIRBuilder>(); 2277 CurBuilder = std::make_unique<MachineIRBuilder>(); 2278 } 2279 CLI = MF->getSubtarget().getCallLowering(); 2280 CurBuilder->setMF(*MF); 2281 EntryBuilder->setMF(*MF); 2282 MRI = &MF->getRegInfo(); 2283 DL = &F.getParent()->getDataLayout(); 2284 ORE = std::make_unique<OptimizationRemarkEmitter>(&F); 2285 FuncInfo.MF = MF; 2286 FuncInfo.BPI = nullptr; 2287 const auto &TLI = *MF->getSubtarget().getTargetLowering(); 2288 const TargetMachine &TM = MF->getTarget(); 2289 SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo); 2290 SL->init(TLI, TM, *DL); 2291 2292 EnableOpts = TM.getOptLevel() != CodeGenOpt::None && !skipFunction(F); 2293 2294 assert(PendingPHIs.empty() && "stale PHIs"); 2295 2296 if (!DL->isLittleEndian()) { 2297 // Currently we don't properly handle big endian code. 2298 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 2299 F.getSubprogram(), &F.getEntryBlock()); 2300 R << "unable to translate in big endian mode"; 2301 reportTranslationError(*MF, *TPC, *ORE, R); 2302 } 2303 2304 // Release the per-function state when we return, whether we succeeded or not. 2305 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); }); 2306 2307 // Setup a separate basic-block for the arguments and constants 2308 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock(); 2309 MF->push_back(EntryBB); 2310 EntryBuilder->setMBB(*EntryBB); 2311 2312 DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc(); 2313 SwiftError.setFunction(CurMF); 2314 SwiftError.createEntriesInEntryBlock(DbgLoc); 2315 2316 bool IsVarArg = F.isVarArg(); 2317 bool HasMustTailInVarArgFn = false; 2318 2319 // Create all blocks, in IR order, to preserve the layout. 2320 for (const BasicBlock &BB: F) { 2321 auto *&MBB = BBToMBB[&BB]; 2322 2323 MBB = MF->CreateMachineBasicBlock(&BB); 2324 MF->push_back(MBB); 2325 2326 if (BB.hasAddressTaken()) 2327 MBB->setHasAddressTaken(); 2328 2329 if (!HasMustTailInVarArgFn) 2330 HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB); 2331 } 2332 2333 MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn); 2334 2335 // Make our arguments/constants entry block fallthrough to the IR entry block. 2336 EntryBB->addSuccessor(&getMBB(F.front())); 2337 2338 // Lower the actual args into this basic block. 2339 SmallVector<ArrayRef<Register>, 8> VRegArgs; 2340 for (const Argument &Arg: F.args()) { 2341 if (DL->getTypeStoreSize(Arg.getType()) == 0) 2342 continue; // Don't handle zero sized types. 2343 ArrayRef<Register> VRegs = getOrCreateVRegs(Arg); 2344 VRegArgs.push_back(VRegs); 2345 2346 if (Arg.hasSwiftErrorAttr()) { 2347 assert(VRegs.size() == 1 && "Too many vregs for Swift error"); 2348 SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]); 2349 } 2350 } 2351 2352 if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs)) { 2353 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 2354 F.getSubprogram(), &F.getEntryBlock()); 2355 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType()); 2356 reportTranslationError(*MF, *TPC, *ORE, R); 2357 return false; 2358 } 2359 2360 // Need to visit defs before uses when translating instructions. 2361 GISelObserverWrapper WrapperObserver; 2362 if (EnableCSE && CSEInfo) 2363 WrapperObserver.addObserver(CSEInfo); 2364 { 2365 ReversePostOrderTraversal<const Function *> RPOT(&F); 2366 #ifndef NDEBUG 2367 DILocationVerifier Verifier; 2368 WrapperObserver.addObserver(&Verifier); 2369 #endif // ifndef NDEBUG 2370 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver); 2371 RAIIMFObserverInstaller ObsInstall(*MF, WrapperObserver); 2372 for (const BasicBlock *BB : RPOT) { 2373 MachineBasicBlock &MBB = getMBB(*BB); 2374 // Set the insertion point of all the following translations to 2375 // the end of this basic block. 2376 CurBuilder->setMBB(MBB); 2377 HasTailCall = false; 2378 for (const Instruction &Inst : *BB) { 2379 // If we translated a tail call in the last step, then we know 2380 // everything after the call is either a return, or something that is 2381 // handled by the call itself. (E.g. a lifetime marker or assume 2382 // intrinsic.) In this case, we should stop translating the block and 2383 // move on. 2384 if (HasTailCall) 2385 break; 2386 #ifndef NDEBUG 2387 Verifier.setCurrentInst(&Inst); 2388 #endif // ifndef NDEBUG 2389 if (translate(Inst)) 2390 continue; 2391 2392 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 2393 Inst.getDebugLoc(), BB); 2394 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst); 2395 2396 if (ORE->allowExtraAnalysis("gisel-irtranslator")) { 2397 std::string InstStrStorage; 2398 raw_string_ostream InstStr(InstStrStorage); 2399 InstStr << Inst; 2400 2401 R << ": '" << InstStr.str() << "'"; 2402 } 2403 2404 reportTranslationError(*MF, *TPC, *ORE, R); 2405 return false; 2406 } 2407 2408 finalizeBasicBlock(); 2409 } 2410 #ifndef NDEBUG 2411 WrapperObserver.removeObserver(&Verifier); 2412 #endif 2413 } 2414 2415 finishPendingPhis(); 2416 2417 SwiftError.propagateVRegs(); 2418 2419 // Merge the argument lowering and constants block with its single 2420 // successor, the LLVM-IR entry block. We want the basic block to 2421 // be maximal. 2422 assert(EntryBB->succ_size() == 1 && 2423 "Custom BB used for lowering should have only one successor"); 2424 // Get the successor of the current entry block. 2425 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin(); 2426 assert(NewEntryBB.pred_size() == 1 && 2427 "LLVM-IR entry block has a predecessor!?"); 2428 // Move all the instruction from the current entry block to the 2429 // new entry block. 2430 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(), 2431 EntryBB->end()); 2432 2433 // Update the live-in information for the new entry block. 2434 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins()) 2435 NewEntryBB.addLiveIn(LiveIn); 2436 NewEntryBB.sortUniqueLiveIns(); 2437 2438 // Get rid of the now empty basic block. 2439 EntryBB->removeSuccessor(&NewEntryBB); 2440 MF->remove(EntryBB); 2441 MF->DeleteMachineBasicBlock(EntryBB); 2442 2443 assert(&MF->front() == &NewEntryBB && 2444 "New entry wasn't next in the list of basic block!"); 2445 2446 // Initialize stack protector information. 2447 StackProtector &SP = getAnalysis<StackProtector>(); 2448 SP.copyToMachineFrameInfo(MF->getFrameInfo()); 2449 2450 return false; 2451 } 2452