1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file implements the IRTranslator class. 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 14 #include "llvm/ADT/PostOrderIterator.h" 15 #include "llvm/ADT/STLExtras.h" 16 #include "llvm/ADT/ScopeExit.h" 17 #include "llvm/ADT/SmallSet.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 20 #include "llvm/CodeGen/Analysis.h" 21 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 22 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" 23 #include "llvm/CodeGen/LowLevelType.h" 24 #include "llvm/CodeGen/MachineBasicBlock.h" 25 #include "llvm/CodeGen/MachineFrameInfo.h" 26 #include "llvm/CodeGen/MachineFunction.h" 27 #include "llvm/CodeGen/MachineInstrBuilder.h" 28 #include "llvm/CodeGen/MachineMemOperand.h" 29 #include "llvm/CodeGen/MachineOperand.h" 30 #include "llvm/CodeGen/MachineRegisterInfo.h" 31 #include "llvm/CodeGen/StackProtector.h" 32 #include "llvm/CodeGen/TargetFrameLowering.h" 33 #include "llvm/CodeGen/TargetLowering.h" 34 #include "llvm/CodeGen/TargetPassConfig.h" 35 #include "llvm/CodeGen/TargetRegisterInfo.h" 36 #include "llvm/CodeGen/TargetSubtargetInfo.h" 37 #include "llvm/IR/BasicBlock.h" 38 #include "llvm/IR/CFG.h" 39 #include "llvm/IR/Constant.h" 40 #include "llvm/IR/Constants.h" 41 #include "llvm/IR/DataLayout.h" 42 #include "llvm/IR/DebugInfo.h" 43 #include "llvm/IR/DerivedTypes.h" 44 #include "llvm/IR/Function.h" 45 #include "llvm/IR/GetElementPtrTypeIterator.h" 46 #include "llvm/IR/InlineAsm.h" 47 #include "llvm/IR/InstrTypes.h" 48 #include "llvm/IR/Instructions.h" 49 #include "llvm/IR/IntrinsicInst.h" 50 #include "llvm/IR/Intrinsics.h" 51 #include "llvm/IR/LLVMContext.h" 52 #include "llvm/IR/Metadata.h" 53 #include "llvm/IR/Type.h" 54 #include "llvm/IR/User.h" 55 #include "llvm/IR/Value.h" 56 #include "llvm/MC/MCContext.h" 57 #include "llvm/Pass.h" 58 #include "llvm/Support/Casting.h" 59 #include "llvm/Support/CodeGen.h" 60 #include "llvm/Support/Debug.h" 61 #include "llvm/Support/ErrorHandling.h" 62 #include "llvm/Support/LowLevelTypeImpl.h" 63 #include "llvm/Support/MathExtras.h" 64 #include "llvm/Support/raw_ostream.h" 65 #include "llvm/Target/TargetIntrinsicInfo.h" 66 #include "llvm/Target/TargetMachine.h" 67 #include <algorithm> 68 #include <cassert> 69 #include <cstdint> 70 #include <iterator> 71 #include <string> 72 #include <utility> 73 #include <vector> 74 75 #define DEBUG_TYPE "irtranslator" 76 77 using namespace llvm; 78 79 static cl::opt<bool> 80 EnableCSEInIRTranslator("enable-cse-in-irtranslator", 81 cl::desc("Should enable CSE in irtranslator"), 82 cl::Optional, cl::init(false)); 83 char IRTranslator::ID = 0; 84 85 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 86 false, false) 87 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) 88 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass) 89 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 90 false, false) 91 92 static void reportTranslationError(MachineFunction &MF, 93 const TargetPassConfig &TPC, 94 OptimizationRemarkEmitter &ORE, 95 OptimizationRemarkMissed &R) { 96 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel); 97 98 // Print the function name explicitly if we don't have a debug location (which 99 // makes the diagnostic less useful) or if we're going to emit a raw error. 100 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled()) 101 R << (" (in function: " + MF.getName() + ")").str(); 102 103 if (TPC.isGlobalISelAbortEnabled()) 104 report_fatal_error(R.getMsg()); 105 else 106 ORE.emit(R); 107 } 108 109 IRTranslator::IRTranslator() : MachineFunctionPass(ID) { 110 initializeIRTranslatorPass(*PassRegistry::getPassRegistry()); 111 } 112 113 #ifndef NDEBUG 114 namespace { 115 /// Verify that every instruction created has the same DILocation as the 116 /// instruction being translated. 117 class DILocationVerifier : public GISelChangeObserver { 118 const Instruction *CurrInst = nullptr; 119 120 public: 121 DILocationVerifier() = default; 122 ~DILocationVerifier() = default; 123 124 const Instruction *getCurrentInst() const { return CurrInst; } 125 void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; } 126 127 void erasingInstr(MachineInstr &MI) override {} 128 void changingInstr(MachineInstr &MI) override {} 129 void changedInstr(MachineInstr &MI) override {} 130 131 void createdInstr(MachineInstr &MI) override { 132 assert(getCurrentInst() && "Inserted instruction without a current MI"); 133 134 // Only print the check message if we're actually checking it. 135 #ifndef NDEBUG 136 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst 137 << " was copied to " << MI); 138 #endif 139 assert(CurrInst->getDebugLoc() == MI.getDebugLoc() && 140 "Line info was not transferred to all instructions"); 141 } 142 }; 143 } // namespace 144 #endif // ifndef NDEBUG 145 146 147 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const { 148 AU.addRequired<StackProtector>(); 149 AU.addRequired<TargetPassConfig>(); 150 AU.addRequired<GISelCSEAnalysisWrapperPass>(); 151 getSelectionDAGFallbackAnalysisUsage(AU); 152 MachineFunctionPass::getAnalysisUsage(AU); 153 } 154 155 static void computeValueLLTs(const DataLayout &DL, Type &Ty, 156 SmallVectorImpl<LLT> &ValueTys, 157 SmallVectorImpl<uint64_t> *Offsets = nullptr, 158 uint64_t StartingOffset = 0) { 159 // Given a struct type, recursively traverse the elements. 160 if (StructType *STy = dyn_cast<StructType>(&Ty)) { 161 const StructLayout *SL = DL.getStructLayout(STy); 162 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) 163 computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets, 164 StartingOffset + SL->getElementOffset(I)); 165 return; 166 } 167 // Given an array type, recursively traverse the elements. 168 if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) { 169 Type *EltTy = ATy->getElementType(); 170 uint64_t EltSize = DL.getTypeAllocSize(EltTy); 171 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) 172 computeValueLLTs(DL, *EltTy, ValueTys, Offsets, 173 StartingOffset + i * EltSize); 174 return; 175 } 176 // Interpret void as zero return values. 177 if (Ty.isVoidTy()) 178 return; 179 // Base case: we can get an LLT for this LLVM IR type. 180 ValueTys.push_back(getLLTForType(Ty, DL)); 181 if (Offsets != nullptr) 182 Offsets->push_back(StartingOffset * 8); 183 } 184 185 IRTranslator::ValueToVRegInfo::VRegListT & 186 IRTranslator::allocateVRegs(const Value &Val) { 187 assert(!VMap.contains(Val) && "Value already allocated in VMap"); 188 auto *Regs = VMap.getVRegs(Val); 189 auto *Offsets = VMap.getOffsets(Val); 190 SmallVector<LLT, 4> SplitTys; 191 computeValueLLTs(*DL, *Val.getType(), SplitTys, 192 Offsets->empty() ? Offsets : nullptr); 193 for (unsigned i = 0; i < SplitTys.size(); ++i) 194 Regs->push_back(0); 195 return *Regs; 196 } 197 198 ArrayRef<unsigned> IRTranslator::getOrCreateVRegs(const Value &Val) { 199 auto VRegsIt = VMap.findVRegs(Val); 200 if (VRegsIt != VMap.vregs_end()) 201 return *VRegsIt->second; 202 203 if (Val.getType()->isVoidTy()) 204 return *VMap.getVRegs(Val); 205 206 // Create entry for this type. 207 auto *VRegs = VMap.getVRegs(Val); 208 auto *Offsets = VMap.getOffsets(Val); 209 210 assert(Val.getType()->isSized() && 211 "Don't know how to create an empty vreg"); 212 213 SmallVector<LLT, 4> SplitTys; 214 computeValueLLTs(*DL, *Val.getType(), SplitTys, 215 Offsets->empty() ? Offsets : nullptr); 216 217 if (!isa<Constant>(Val)) { 218 for (auto Ty : SplitTys) 219 VRegs->push_back(MRI->createGenericVirtualRegister(Ty)); 220 return *VRegs; 221 } 222 223 if (Val.getType()->isAggregateType()) { 224 // UndefValue, ConstantAggregateZero 225 auto &C = cast<Constant>(Val); 226 unsigned Idx = 0; 227 while (auto Elt = C.getAggregateElement(Idx++)) { 228 auto EltRegs = getOrCreateVRegs(*Elt); 229 llvm::copy(EltRegs, std::back_inserter(*VRegs)); 230 } 231 } else { 232 assert(SplitTys.size() == 1 && "unexpectedly split LLT"); 233 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0])); 234 bool Success = translate(cast<Constant>(Val), VRegs->front()); 235 if (!Success) { 236 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 237 MF->getFunction().getSubprogram(), 238 &MF->getFunction().getEntryBlock()); 239 R << "unable to translate constant: " << ore::NV("Type", Val.getType()); 240 reportTranslationError(*MF, *TPC, *ORE, R); 241 return *VRegs; 242 } 243 } 244 245 return *VRegs; 246 } 247 248 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) { 249 if (FrameIndices.find(&AI) != FrameIndices.end()) 250 return FrameIndices[&AI]; 251 252 unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType()); 253 unsigned Size = 254 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue(); 255 256 // Always allocate at least one byte. 257 Size = std::max(Size, 1u); 258 259 unsigned Alignment = AI.getAlignment(); 260 if (!Alignment) 261 Alignment = DL->getABITypeAlignment(AI.getAllocatedType()); 262 263 int &FI = FrameIndices[&AI]; 264 FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI); 265 return FI; 266 } 267 268 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) { 269 unsigned Alignment = 0; 270 Type *ValTy = nullptr; 271 if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) { 272 Alignment = SI->getAlignment(); 273 ValTy = SI->getValueOperand()->getType(); 274 } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) { 275 Alignment = LI->getAlignment(); 276 ValTy = LI->getType(); 277 } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) { 278 // TODO(PR27168): This instruction has no alignment attribute, but unlike 279 // the default alignment for load/store, the default here is to assume 280 // it has NATURAL alignment, not DataLayout-specified alignment. 281 const DataLayout &DL = AI->getModule()->getDataLayout(); 282 Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType()); 283 ValTy = AI->getCompareOperand()->getType(); 284 } else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) { 285 // TODO(PR27168): This instruction has no alignment attribute, but unlike 286 // the default alignment for load/store, the default here is to assume 287 // it has NATURAL alignment, not DataLayout-specified alignment. 288 const DataLayout &DL = AI->getModule()->getDataLayout(); 289 Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType()); 290 ValTy = AI->getType(); 291 } else { 292 OptimizationRemarkMissed R("gisel-irtranslator", "", &I); 293 R << "unable to translate memop: " << ore::NV("Opcode", &I); 294 reportTranslationError(*MF, *TPC, *ORE, R); 295 return 1; 296 } 297 298 return Alignment ? Alignment : DL->getABITypeAlignment(ValTy); 299 } 300 301 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) { 302 MachineBasicBlock *&MBB = BBToMBB[&BB]; 303 assert(MBB && "BasicBlock was not encountered before"); 304 return *MBB; 305 } 306 307 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) { 308 assert(NewPred && "new predecessor must be a real MachineBasicBlock"); 309 MachinePreds[Edge].push_back(NewPred); 310 } 311 312 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U, 313 MachineIRBuilder &MIRBuilder) { 314 // FIXME: handle signed/unsigned wrapping flags. 315 316 // Get or create a virtual register for each value. 317 // Unless the value is a Constant => loadimm cst? 318 // or inline constant each time? 319 // Creation of a virtual register needs to have a size. 320 unsigned Op0 = getOrCreateVReg(*U.getOperand(0)); 321 unsigned Op1 = getOrCreateVReg(*U.getOperand(1)); 322 unsigned Res = getOrCreateVReg(U); 323 auto FBinOp = MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1); 324 if (isa<Instruction>(U)) { 325 MachineInstr *FBinOpMI = FBinOp.getInstr(); 326 const Instruction &I = cast<Instruction>(U); 327 FBinOpMI->copyIRFlags(I); 328 } 329 return true; 330 } 331 332 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) { 333 // -0.0 - X --> G_FNEG 334 if (isa<Constant>(U.getOperand(0)) && 335 U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) { 336 MIRBuilder.buildInstr(TargetOpcode::G_FNEG) 337 .addDef(getOrCreateVReg(U)) 338 .addUse(getOrCreateVReg(*U.getOperand(1))); 339 return true; 340 } 341 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder); 342 } 343 344 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) { 345 MIRBuilder.buildInstr(TargetOpcode::G_FNEG) 346 .addDef(getOrCreateVReg(U)) 347 .addUse(getOrCreateVReg(*U.getOperand(1))); 348 return true; 349 } 350 351 bool IRTranslator::translateCompare(const User &U, 352 MachineIRBuilder &MIRBuilder) { 353 const CmpInst *CI = dyn_cast<CmpInst>(&U); 354 unsigned Op0 = getOrCreateVReg(*U.getOperand(0)); 355 unsigned Op1 = getOrCreateVReg(*U.getOperand(1)); 356 unsigned Res = getOrCreateVReg(U); 357 CmpInst::Predicate Pred = 358 CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>( 359 cast<ConstantExpr>(U).getPredicate()); 360 if (CmpInst::isIntPredicate(Pred)) 361 MIRBuilder.buildICmp(Pred, Res, Op0, Op1); 362 else if (Pred == CmpInst::FCMP_FALSE) 363 MIRBuilder.buildCopy( 364 Res, getOrCreateVReg(*Constant::getNullValue(CI->getType()))); 365 else if (Pred == CmpInst::FCMP_TRUE) 366 MIRBuilder.buildCopy( 367 Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType()))); 368 else { 369 auto FCmp = MIRBuilder.buildFCmp(Pred, Res, Op0, Op1); 370 FCmp->copyIRFlags(*CI); 371 } 372 373 return true; 374 } 375 376 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) { 377 const ReturnInst &RI = cast<ReturnInst>(U); 378 const Value *Ret = RI.getReturnValue(); 379 if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0) 380 Ret = nullptr; 381 382 ArrayRef<unsigned> VRegs; 383 if (Ret) 384 VRegs = getOrCreateVRegs(*Ret); 385 386 // The target may mess up with the insertion point, but 387 // this is not important as a return is the last instruction 388 // of the block anyway. 389 390 return CLI->lowerReturn(MIRBuilder, Ret, VRegs); 391 } 392 393 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) { 394 const BranchInst &BrInst = cast<BranchInst>(U); 395 unsigned Succ = 0; 396 if (!BrInst.isUnconditional()) { 397 // We want a G_BRCOND to the true BB followed by an unconditional branch. 398 unsigned Tst = getOrCreateVReg(*BrInst.getCondition()); 399 const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++)); 400 MachineBasicBlock &TrueBB = getMBB(TrueTgt); 401 MIRBuilder.buildBrCond(Tst, TrueBB); 402 } 403 404 const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ)); 405 MachineBasicBlock &TgtBB = getMBB(BrTgt); 406 MachineBasicBlock &CurBB = MIRBuilder.getMBB(); 407 408 // If the unconditional target is the layout successor, fallthrough. 409 if (!CurBB.isLayoutSuccessor(&TgtBB)) 410 MIRBuilder.buildBr(TgtBB); 411 412 // Link successors. 413 for (const BasicBlock *Succ : successors(&BrInst)) 414 CurBB.addSuccessor(&getMBB(*Succ)); 415 return true; 416 } 417 418 bool IRTranslator::translateSwitch(const User &U, 419 MachineIRBuilder &MIRBuilder) { 420 // For now, just translate as a chain of conditional branches. 421 // FIXME: could we share most of the logic/code in 422 // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel? 423 // At first sight, it seems most of the logic in there is independent of 424 // SelectionDAG-specifics and a lot of work went in to optimize switch 425 // lowering in there. 426 427 const SwitchInst &SwInst = cast<SwitchInst>(U); 428 const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition()); 429 const BasicBlock *OrigBB = SwInst.getParent(); 430 431 LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL); 432 for (auto &CaseIt : SwInst.cases()) { 433 const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue()); 434 const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1); 435 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue); 436 MachineBasicBlock &CurMBB = MIRBuilder.getMBB(); 437 const BasicBlock *TrueBB = CaseIt.getCaseSuccessor(); 438 MachineBasicBlock &TrueMBB = getMBB(*TrueBB); 439 440 MIRBuilder.buildBrCond(Tst, TrueMBB); 441 CurMBB.addSuccessor(&TrueMBB); 442 addMachineCFGPred({OrigBB, TrueBB}, &CurMBB); 443 444 MachineBasicBlock *FalseMBB = 445 MF->CreateMachineBasicBlock(SwInst.getParent()); 446 // Insert the comparison blocks one after the other. 447 MF->insert(std::next(CurMBB.getIterator()), FalseMBB); 448 MIRBuilder.buildBr(*FalseMBB); 449 CurMBB.addSuccessor(FalseMBB); 450 451 MIRBuilder.setMBB(*FalseMBB); 452 } 453 // handle default case 454 const BasicBlock *DefaultBB = SwInst.getDefaultDest(); 455 MachineBasicBlock &DefaultMBB = getMBB(*DefaultBB); 456 MIRBuilder.buildBr(DefaultMBB); 457 MachineBasicBlock &CurMBB = MIRBuilder.getMBB(); 458 CurMBB.addSuccessor(&DefaultMBB); 459 addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB); 460 461 return true; 462 } 463 464 bool IRTranslator::translateIndirectBr(const User &U, 465 MachineIRBuilder &MIRBuilder) { 466 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U); 467 468 const unsigned Tgt = getOrCreateVReg(*BrInst.getAddress()); 469 MIRBuilder.buildBrIndirect(Tgt); 470 471 // Link successors. 472 MachineBasicBlock &CurBB = MIRBuilder.getMBB(); 473 for (const BasicBlock *Succ : successors(&BrInst)) 474 CurBB.addSuccessor(&getMBB(*Succ)); 475 476 return true; 477 } 478 479 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { 480 const LoadInst &LI = cast<LoadInst>(U); 481 482 auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile 483 : MachineMemOperand::MONone; 484 Flags |= MachineMemOperand::MOLoad; 485 486 if (DL->getTypeStoreSize(LI.getType()) == 0) 487 return true; 488 489 ArrayRef<unsigned> Regs = getOrCreateVRegs(LI); 490 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI); 491 unsigned Base = getOrCreateVReg(*LI.getPointerOperand()); 492 493 for (unsigned i = 0; i < Regs.size(); ++i) { 494 unsigned Addr = 0; 495 MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8); 496 497 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8); 498 unsigned BaseAlign = getMemOpAlignment(LI); 499 auto MMO = MF->getMachineMemOperand( 500 Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8, 501 MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr, 502 LI.getSyncScopeID(), LI.getOrdering()); 503 MIRBuilder.buildLoad(Regs[i], Addr, *MMO); 504 } 505 506 return true; 507 } 508 509 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) { 510 const StoreInst &SI = cast<StoreInst>(U); 511 auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile 512 : MachineMemOperand::MONone; 513 Flags |= MachineMemOperand::MOStore; 514 515 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0) 516 return true; 517 518 ArrayRef<unsigned> Vals = getOrCreateVRegs(*SI.getValueOperand()); 519 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand()); 520 unsigned Base = getOrCreateVReg(*SI.getPointerOperand()); 521 522 for (unsigned i = 0; i < Vals.size(); ++i) { 523 unsigned Addr = 0; 524 MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8); 525 526 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8); 527 unsigned BaseAlign = getMemOpAlignment(SI); 528 auto MMO = MF->getMachineMemOperand( 529 Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8, 530 MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr, 531 SI.getSyncScopeID(), SI.getOrdering()); 532 MIRBuilder.buildStore(Vals[i], Addr, *MMO); 533 } 534 return true; 535 } 536 537 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) { 538 const Value *Src = U.getOperand(0); 539 Type *Int32Ty = Type::getInt32Ty(U.getContext()); 540 541 // getIndexedOffsetInType is designed for GEPs, so the first index is the 542 // usual array element rather than looking into the actual aggregate. 543 SmallVector<Value *, 1> Indices; 544 Indices.push_back(ConstantInt::get(Int32Ty, 0)); 545 546 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) { 547 for (auto Idx : EVI->indices()) 548 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 549 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) { 550 for (auto Idx : IVI->indices()) 551 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 552 } else { 553 for (unsigned i = 1; i < U.getNumOperands(); ++i) 554 Indices.push_back(U.getOperand(i)); 555 } 556 557 return 8 * static_cast<uint64_t>( 558 DL.getIndexedOffsetInType(Src->getType(), Indices)); 559 } 560 561 bool IRTranslator::translateExtractValue(const User &U, 562 MachineIRBuilder &MIRBuilder) { 563 const Value *Src = U.getOperand(0); 564 uint64_t Offset = getOffsetFromIndices(U, *DL); 565 ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src); 566 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src); 567 unsigned Idx = std::lower_bound(Offsets.begin(), Offsets.end(), Offset) - 568 Offsets.begin(); 569 auto &DstRegs = allocateVRegs(U); 570 571 for (unsigned i = 0; i < DstRegs.size(); ++i) 572 DstRegs[i] = SrcRegs[Idx++]; 573 574 return true; 575 } 576 577 bool IRTranslator::translateInsertValue(const User &U, 578 MachineIRBuilder &MIRBuilder) { 579 const Value *Src = U.getOperand(0); 580 uint64_t Offset = getOffsetFromIndices(U, *DL); 581 auto &DstRegs = allocateVRegs(U); 582 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U); 583 ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src); 584 ArrayRef<unsigned> InsertedRegs = getOrCreateVRegs(*U.getOperand(1)); 585 auto InsertedIt = InsertedRegs.begin(); 586 587 for (unsigned i = 0; i < DstRegs.size(); ++i) { 588 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end()) 589 DstRegs[i] = *InsertedIt++; 590 else 591 DstRegs[i] = SrcRegs[i]; 592 } 593 594 return true; 595 } 596 597 bool IRTranslator::translateSelect(const User &U, 598 MachineIRBuilder &MIRBuilder) { 599 unsigned Tst = getOrCreateVReg(*U.getOperand(0)); 600 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(U); 601 ArrayRef<unsigned> Op0Regs = getOrCreateVRegs(*U.getOperand(1)); 602 ArrayRef<unsigned> Op1Regs = getOrCreateVRegs(*U.getOperand(2)); 603 604 const SelectInst &SI = cast<SelectInst>(U); 605 const CmpInst *Cmp = dyn_cast<CmpInst>(SI.getCondition()); 606 for (unsigned i = 0; i < ResRegs.size(); ++i) { 607 auto Select = 608 MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i]); 609 if (Cmp && isa<FPMathOperator>(Cmp)) { 610 Select->copyIRFlags(*Cmp); 611 } 612 } 613 614 return true; 615 } 616 617 bool IRTranslator::translateBitCast(const User &U, 618 MachineIRBuilder &MIRBuilder) { 619 // If we're bitcasting to the source type, we can reuse the source vreg. 620 if (getLLTForType(*U.getOperand(0)->getType(), *DL) == 621 getLLTForType(*U.getType(), *DL)) { 622 unsigned SrcReg = getOrCreateVReg(*U.getOperand(0)); 623 auto &Regs = *VMap.getVRegs(U); 624 // If we already assigned a vreg for this bitcast, we can't change that. 625 // Emit a copy to satisfy the users we already emitted. 626 if (!Regs.empty()) 627 MIRBuilder.buildCopy(Regs[0], SrcReg); 628 else { 629 Regs.push_back(SrcReg); 630 VMap.getOffsets(U)->push_back(0); 631 } 632 return true; 633 } 634 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder); 635 } 636 637 bool IRTranslator::translateCast(unsigned Opcode, const User &U, 638 MachineIRBuilder &MIRBuilder) { 639 unsigned Op = getOrCreateVReg(*U.getOperand(0)); 640 unsigned Res = getOrCreateVReg(U); 641 MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op); 642 return true; 643 } 644 645 bool IRTranslator::translateGetElementPtr(const User &U, 646 MachineIRBuilder &MIRBuilder) { 647 // FIXME: support vector GEPs. 648 if (U.getType()->isVectorTy()) 649 return false; 650 651 Value &Op0 = *U.getOperand(0); 652 unsigned BaseReg = getOrCreateVReg(Op0); 653 Type *PtrIRTy = Op0.getType(); 654 LLT PtrTy = getLLTForType(*PtrIRTy, *DL); 655 Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy); 656 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); 657 658 int64_t Offset = 0; 659 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U); 660 GTI != E; ++GTI) { 661 const Value *Idx = GTI.getOperand(); 662 if (StructType *StTy = GTI.getStructTypeOrNull()) { 663 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue(); 664 Offset += DL->getStructLayout(StTy)->getElementOffset(Field); 665 continue; 666 } else { 667 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType()); 668 669 // If this is a scalar constant or a splat vector of constants, 670 // handle it quickly. 671 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) { 672 Offset += ElementSize * CI->getSExtValue(); 673 continue; 674 } 675 676 if (Offset != 0) { 677 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy); 678 unsigned OffsetReg = 679 getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset)); 680 MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg); 681 682 BaseReg = NewBaseReg; 683 Offset = 0; 684 } 685 686 unsigned IdxReg = getOrCreateVReg(*Idx); 687 if (MRI->getType(IdxReg) != OffsetTy) { 688 unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy); 689 MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg); 690 IdxReg = NewIdxReg; 691 } 692 693 // N = N + Idx * ElementSize; 694 // Avoid doing it for ElementSize of 1. 695 unsigned GepOffsetReg; 696 if (ElementSize != 1) { 697 unsigned ElementSizeReg = 698 getOrCreateVReg(*ConstantInt::get(OffsetIRTy, ElementSize)); 699 700 GepOffsetReg = MRI->createGenericVirtualRegister(OffsetTy); 701 MIRBuilder.buildMul(GepOffsetReg, ElementSizeReg, IdxReg); 702 } else 703 GepOffsetReg = IdxReg; 704 705 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy); 706 MIRBuilder.buildGEP(NewBaseReg, BaseReg, GepOffsetReg); 707 BaseReg = NewBaseReg; 708 } 709 } 710 711 if (Offset != 0) { 712 unsigned OffsetReg = getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset)); 713 MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg); 714 return true; 715 } 716 717 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg); 718 return true; 719 } 720 721 bool IRTranslator::translateMemfunc(const CallInst &CI, 722 MachineIRBuilder &MIRBuilder, 723 unsigned ID) { 724 LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL); 725 Type *DstTy = CI.getArgOperand(0)->getType(); 726 if (cast<PointerType>(DstTy)->getAddressSpace() != 0 || 727 SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0)) 728 return false; 729 730 SmallVector<CallLowering::ArgInfo, 8> Args; 731 for (int i = 0; i < 3; ++i) { 732 const auto &Arg = CI.getArgOperand(i); 733 Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType()); 734 } 735 736 const char *Callee; 737 switch (ID) { 738 case Intrinsic::memmove: 739 case Intrinsic::memcpy: { 740 Type *SrcTy = CI.getArgOperand(1)->getType(); 741 if(cast<PointerType>(SrcTy)->getAddressSpace() != 0) 742 return false; 743 Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove"; 744 break; 745 } 746 case Intrinsic::memset: 747 Callee = "memset"; 748 break; 749 default: 750 return false; 751 } 752 753 return CLI->lowerCall(MIRBuilder, CI.getCallingConv(), 754 MachineOperand::CreateES(Callee), 755 CallLowering::ArgInfo(0, CI.getType()), Args); 756 } 757 758 void IRTranslator::getStackGuard(unsigned DstReg, 759 MachineIRBuilder &MIRBuilder) { 760 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 761 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF)); 762 auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD); 763 MIB.addDef(DstReg); 764 765 auto &TLI = *MF->getSubtarget().getTargetLowering(); 766 Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent()); 767 if (!Global) 768 return; 769 770 MachinePointerInfo MPInfo(Global); 771 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 772 MachineMemOperand::MODereferenceable; 773 MachineMemOperand *MemRef = 774 MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8, 775 DL->getPointerABIAlignment(0)); 776 MIB.setMemRefs({MemRef}); 777 } 778 779 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op, 780 MachineIRBuilder &MIRBuilder) { 781 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(CI); 782 MIRBuilder.buildInstr(Op) 783 .addDef(ResRegs[0]) 784 .addDef(ResRegs[1]) 785 .addUse(getOrCreateVReg(*CI.getOperand(0))) 786 .addUse(getOrCreateVReg(*CI.getOperand(1))); 787 788 return true; 789 } 790 791 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, 792 MachineIRBuilder &MIRBuilder) { 793 switch (ID) { 794 default: 795 break; 796 case Intrinsic::lifetime_start: 797 case Intrinsic::lifetime_end: 798 // Stack coloring is not enabled in O0 (which we care about now) so we can 799 // drop these. Make sure someone notices when we start compiling at higher 800 // opts though. 801 if (MF->getTarget().getOptLevel() != CodeGenOpt::None) 802 return false; 803 return true; 804 case Intrinsic::dbg_declare: { 805 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI); 806 assert(DI.getVariable() && "Missing variable"); 807 808 const Value *Address = DI.getAddress(); 809 if (!Address || isa<UndefValue>(Address)) { 810 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); 811 return true; 812 } 813 814 assert(DI.getVariable()->isValidLocationForIntrinsic( 815 MIRBuilder.getDebugLoc()) && 816 "Expected inlined-at fields to agree"); 817 auto AI = dyn_cast<AllocaInst>(Address); 818 if (AI && AI->isStaticAlloca()) { 819 // Static allocas are tracked at the MF level, no need for DBG_VALUE 820 // instructions (in fact, they get ignored if they *do* exist). 821 MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(), 822 getOrCreateFrameIndex(*AI), DI.getDebugLoc()); 823 } else { 824 // A dbg.declare describes the address of a source variable, so lower it 825 // into an indirect DBG_VALUE. 826 MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address), 827 DI.getVariable(), DI.getExpression()); 828 } 829 return true; 830 } 831 case Intrinsic::dbg_label: { 832 const DbgLabelInst &DI = cast<DbgLabelInst>(CI); 833 assert(DI.getLabel() && "Missing label"); 834 835 assert(DI.getLabel()->isValidLocationForIntrinsic( 836 MIRBuilder.getDebugLoc()) && 837 "Expected inlined-at fields to agree"); 838 839 MIRBuilder.buildDbgLabel(DI.getLabel()); 840 return true; 841 } 842 case Intrinsic::vaend: 843 // No target I know of cares about va_end. Certainly no in-tree target 844 // does. Simplest intrinsic ever! 845 return true; 846 case Intrinsic::vastart: { 847 auto &TLI = *MF->getSubtarget().getTargetLowering(); 848 Value *Ptr = CI.getArgOperand(0); 849 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8; 850 851 MIRBuilder.buildInstr(TargetOpcode::G_VASTART) 852 .addUse(getOrCreateVReg(*Ptr)) 853 .addMemOperand(MF->getMachineMemOperand( 854 MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 0)); 855 return true; 856 } 857 case Intrinsic::dbg_value: { 858 // This form of DBG_VALUE is target-independent. 859 const DbgValueInst &DI = cast<DbgValueInst>(CI); 860 const Value *V = DI.getValue(); 861 assert(DI.getVariable()->isValidLocationForIntrinsic( 862 MIRBuilder.getDebugLoc()) && 863 "Expected inlined-at fields to agree"); 864 if (!V) { 865 // Currently the optimizer can produce this; insert an undef to 866 // help debugging. Probably the optimizer should not do this. 867 MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression()); 868 } else if (const auto *CI = dyn_cast<Constant>(V)) { 869 MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression()); 870 } else { 871 unsigned Reg = getOrCreateVReg(*V); 872 // FIXME: This does not handle register-indirect values at offset 0. The 873 // direct/indirect thing shouldn't really be handled by something as 874 // implicit as reg+noreg vs reg+imm in the first palce, but it seems 875 // pretty baked in right now. 876 MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression()); 877 } 878 return true; 879 } 880 case Intrinsic::uadd_with_overflow: 881 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder); 882 case Intrinsic::sadd_with_overflow: 883 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder); 884 case Intrinsic::usub_with_overflow: 885 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder); 886 case Intrinsic::ssub_with_overflow: 887 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder); 888 case Intrinsic::umul_with_overflow: 889 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder); 890 case Intrinsic::smul_with_overflow: 891 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder); 892 case Intrinsic::pow: { 893 auto Pow = MIRBuilder.buildInstr(TargetOpcode::G_FPOW) 894 .addDef(getOrCreateVReg(CI)) 895 .addUse(getOrCreateVReg(*CI.getArgOperand(0))) 896 .addUse(getOrCreateVReg(*CI.getArgOperand(1))); 897 Pow->copyIRFlags(CI); 898 return true; 899 } 900 case Intrinsic::exp: { 901 auto Exp = MIRBuilder.buildInstr(TargetOpcode::G_FEXP) 902 .addDef(getOrCreateVReg(CI)) 903 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 904 Exp->copyIRFlags(CI); 905 return true; 906 } 907 case Intrinsic::exp2: { 908 auto Exp2 = MIRBuilder.buildInstr(TargetOpcode::G_FEXP2) 909 .addDef(getOrCreateVReg(CI)) 910 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 911 Exp2->copyIRFlags(CI); 912 return true; 913 } 914 case Intrinsic::log: { 915 auto Log = MIRBuilder.buildInstr(TargetOpcode::G_FLOG) 916 .addDef(getOrCreateVReg(CI)) 917 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 918 Log->copyIRFlags(CI); 919 return true; 920 } 921 case Intrinsic::log2: { 922 auto Log2 = MIRBuilder.buildInstr(TargetOpcode::G_FLOG2) 923 .addDef(getOrCreateVReg(CI)) 924 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 925 Log2->copyIRFlags(CI); 926 return true; 927 } 928 case Intrinsic::log10: { 929 auto Log10 = MIRBuilder.buildInstr(TargetOpcode::G_FLOG10) 930 .addDef(getOrCreateVReg(CI)) 931 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 932 Log10->copyIRFlags(CI); 933 return true; 934 } 935 case Intrinsic::fabs: { 936 auto Fabs = MIRBuilder.buildInstr(TargetOpcode::G_FABS) 937 .addDef(getOrCreateVReg(CI)) 938 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 939 Fabs->copyIRFlags(CI); 940 return true; 941 } 942 case Intrinsic::trunc: 943 MIRBuilder.buildInstr(TargetOpcode::G_INTRINSIC_TRUNC) 944 .addDef(getOrCreateVReg(CI)) 945 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 946 return true; 947 case Intrinsic::round: 948 MIRBuilder.buildInstr(TargetOpcode::G_INTRINSIC_ROUND) 949 .addDef(getOrCreateVReg(CI)) 950 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 951 return true; 952 case Intrinsic::fma: { 953 auto FMA = MIRBuilder.buildInstr(TargetOpcode::G_FMA) 954 .addDef(getOrCreateVReg(CI)) 955 .addUse(getOrCreateVReg(*CI.getArgOperand(0))) 956 .addUse(getOrCreateVReg(*CI.getArgOperand(1))) 957 .addUse(getOrCreateVReg(*CI.getArgOperand(2))); 958 FMA->copyIRFlags(CI); 959 return true; 960 } 961 case Intrinsic::fmuladd: { 962 const TargetMachine &TM = MF->getTarget(); 963 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); 964 unsigned Dst = getOrCreateVReg(CI); 965 unsigned Op0 = getOrCreateVReg(*CI.getArgOperand(0)); 966 unsigned Op1 = getOrCreateVReg(*CI.getArgOperand(1)); 967 unsigned Op2 = getOrCreateVReg(*CI.getArgOperand(2)); 968 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && 969 TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) { 970 // TODO: Revisit this to see if we should move this part of the 971 // lowering to the combiner. 972 auto FMA = MIRBuilder.buildInstr(TargetOpcode::G_FMA, {Dst}, {Op0, Op1, Op2}); 973 FMA->copyIRFlags(CI); 974 } else { 975 LLT Ty = getLLTForType(*CI.getType(), *DL); 976 auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, {Ty}, {Op0, Op1}); 977 FMul->copyIRFlags(CI); 978 auto FAdd = MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Dst}, {FMul, Op2}); 979 FAdd->copyIRFlags(CI); 980 } 981 return true; 982 } 983 case Intrinsic::memcpy: 984 case Intrinsic::memmove: 985 case Intrinsic::memset: 986 return translateMemfunc(CI, MIRBuilder, ID); 987 case Intrinsic::eh_typeid_for: { 988 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0)); 989 unsigned Reg = getOrCreateVReg(CI); 990 unsigned TypeID = MF->getTypeIDFor(GV); 991 MIRBuilder.buildConstant(Reg, TypeID); 992 return true; 993 } 994 case Intrinsic::objectsize: { 995 // If we don't know by now, we're never going to know. 996 const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1)); 997 998 MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0); 999 return true; 1000 } 1001 case Intrinsic::is_constant: 1002 // If this wasn't constant-folded away by now, then it's not a 1003 // constant. 1004 MIRBuilder.buildConstant(getOrCreateVReg(CI), 0); 1005 return true; 1006 case Intrinsic::stackguard: 1007 getStackGuard(getOrCreateVReg(CI), MIRBuilder); 1008 return true; 1009 case Intrinsic::stackprotector: { 1010 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); 1011 unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy); 1012 getStackGuard(GuardVal, MIRBuilder); 1013 1014 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1)); 1015 int FI = getOrCreateFrameIndex(*Slot); 1016 MF->getFrameInfo().setStackProtectorIndex(FI); 1017 1018 MIRBuilder.buildStore( 1019 GuardVal, getOrCreateVReg(*Slot), 1020 *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI), 1021 MachineMemOperand::MOStore | 1022 MachineMemOperand::MOVolatile, 1023 PtrTy.getSizeInBits() / 8, 8)); 1024 return true; 1025 } 1026 case Intrinsic::cttz: 1027 case Intrinsic::ctlz: { 1028 ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1)); 1029 bool isTrailing = ID == Intrinsic::cttz; 1030 unsigned Opcode = isTrailing 1031 ? Cst->isZero() ? TargetOpcode::G_CTTZ 1032 : TargetOpcode::G_CTTZ_ZERO_UNDEF 1033 : Cst->isZero() ? TargetOpcode::G_CTLZ 1034 : TargetOpcode::G_CTLZ_ZERO_UNDEF; 1035 MIRBuilder.buildInstr(Opcode) 1036 .addDef(getOrCreateVReg(CI)) 1037 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 1038 return true; 1039 } 1040 case Intrinsic::ctpop: { 1041 MIRBuilder.buildInstr(TargetOpcode::G_CTPOP) 1042 .addDef(getOrCreateVReg(CI)) 1043 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 1044 return true; 1045 } 1046 case Intrinsic::invariant_start: { 1047 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); 1048 unsigned Undef = MRI->createGenericVirtualRegister(PtrTy); 1049 MIRBuilder.buildUndef(Undef); 1050 return true; 1051 } 1052 case Intrinsic::invariant_end: 1053 return true; 1054 case Intrinsic::ceil: 1055 MIRBuilder.buildInstr(TargetOpcode::G_FCEIL) 1056 .addDef(getOrCreateVReg(CI)) 1057 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 1058 return true; 1059 } 1060 return false; 1061 } 1062 1063 bool IRTranslator::translateInlineAsm(const CallInst &CI, 1064 MachineIRBuilder &MIRBuilder) { 1065 const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue()); 1066 if (!IA.getConstraintString().empty()) 1067 return false; 1068 1069 unsigned ExtraInfo = 0; 1070 if (IA.hasSideEffects()) 1071 ExtraInfo |= InlineAsm::Extra_HasSideEffects; 1072 if (IA.getDialect() == InlineAsm::AD_Intel) 1073 ExtraInfo |= InlineAsm::Extra_AsmDialect; 1074 1075 MIRBuilder.buildInstr(TargetOpcode::INLINEASM) 1076 .addExternalSymbol(IA.getAsmString().c_str()) 1077 .addImm(ExtraInfo); 1078 1079 return true; 1080 } 1081 1082 unsigned IRTranslator::packRegs(const Value &V, 1083 MachineIRBuilder &MIRBuilder) { 1084 ArrayRef<unsigned> Regs = getOrCreateVRegs(V); 1085 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V); 1086 LLT BigTy = getLLTForType(*V.getType(), *DL); 1087 1088 if (Regs.size() == 1) 1089 return Regs[0]; 1090 1091 unsigned Dst = MRI->createGenericVirtualRegister(BigTy); 1092 MIRBuilder.buildUndef(Dst); 1093 for (unsigned i = 0; i < Regs.size(); ++i) { 1094 unsigned NewDst = MRI->createGenericVirtualRegister(BigTy); 1095 MIRBuilder.buildInsert(NewDst, Dst, Regs[i], Offsets[i]); 1096 Dst = NewDst; 1097 } 1098 return Dst; 1099 } 1100 1101 void IRTranslator::unpackRegs(const Value &V, unsigned Src, 1102 MachineIRBuilder &MIRBuilder) { 1103 ArrayRef<unsigned> Regs = getOrCreateVRegs(V); 1104 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V); 1105 1106 for (unsigned i = 0; i < Regs.size(); ++i) 1107 MIRBuilder.buildExtract(Regs[i], Src, Offsets[i]); 1108 } 1109 1110 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) { 1111 const CallInst &CI = cast<CallInst>(U); 1112 auto TII = MF->getTarget().getIntrinsicInfo(); 1113 const Function *F = CI.getCalledFunction(); 1114 1115 // FIXME: support Windows dllimport function calls. 1116 if (F && F->hasDLLImportStorageClass()) 1117 return false; 1118 1119 if (CI.isInlineAsm()) 1120 return translateInlineAsm(CI, MIRBuilder); 1121 1122 Intrinsic::ID ID = Intrinsic::not_intrinsic; 1123 if (F && F->isIntrinsic()) { 1124 ID = F->getIntrinsicID(); 1125 if (TII && ID == Intrinsic::not_intrinsic) 1126 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F)); 1127 } 1128 1129 bool IsSplitType = valueIsSplit(CI); 1130 if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) { 1131 unsigned Res = IsSplitType ? MRI->createGenericVirtualRegister( 1132 getLLTForType(*CI.getType(), *DL)) 1133 : getOrCreateVReg(CI); 1134 1135 SmallVector<unsigned, 8> Args; 1136 for (auto &Arg: CI.arg_operands()) 1137 Args.push_back(packRegs(*Arg, MIRBuilder)); 1138 1139 MF->getFrameInfo().setHasCalls(true); 1140 bool Success = CLI->lowerCall(MIRBuilder, &CI, Res, Args, [&]() { 1141 return getOrCreateVReg(*CI.getCalledValue()); 1142 }); 1143 1144 if (IsSplitType) 1145 unpackRegs(CI, Res, MIRBuilder); 1146 return Success; 1147 } 1148 1149 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic"); 1150 1151 if (translateKnownIntrinsic(CI, ID, MIRBuilder)) 1152 return true; 1153 1154 unsigned Res = 0; 1155 if (!CI.getType()->isVoidTy()) { 1156 if (IsSplitType) 1157 Res = 1158 MRI->createGenericVirtualRegister(getLLTForType(*CI.getType(), *DL)); 1159 else 1160 Res = getOrCreateVReg(CI); 1161 } 1162 MachineInstrBuilder MIB = 1163 MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory()); 1164 1165 for (auto &Arg : CI.arg_operands()) { 1166 // Some intrinsics take metadata parameters. Reject them. 1167 if (isa<MetadataAsValue>(Arg)) 1168 return false; 1169 MIB.addUse(packRegs(*Arg, MIRBuilder)); 1170 } 1171 1172 if (IsSplitType) 1173 unpackRegs(CI, Res, MIRBuilder); 1174 1175 // Add a MachineMemOperand if it is a target mem intrinsic. 1176 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); 1177 TargetLowering::IntrinsicInfo Info; 1178 // TODO: Add a GlobalISel version of getTgtMemIntrinsic. 1179 if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) { 1180 uint64_t Size = Info.memVT.getStoreSize(); 1181 MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal), 1182 Info.flags, Size, Info.align)); 1183 } 1184 1185 return true; 1186 } 1187 1188 bool IRTranslator::translateInvoke(const User &U, 1189 MachineIRBuilder &MIRBuilder) { 1190 const InvokeInst &I = cast<InvokeInst>(U); 1191 MCContext &Context = MF->getContext(); 1192 1193 const BasicBlock *ReturnBB = I.getSuccessor(0); 1194 const BasicBlock *EHPadBB = I.getSuccessor(1); 1195 1196 const Value *Callee = I.getCalledValue(); 1197 const Function *Fn = dyn_cast<Function>(Callee); 1198 if (isa<InlineAsm>(Callee)) 1199 return false; 1200 1201 // FIXME: support invoking patchpoint and statepoint intrinsics. 1202 if (Fn && Fn->isIntrinsic()) 1203 return false; 1204 1205 // FIXME: support whatever these are. 1206 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) 1207 return false; 1208 1209 // FIXME: support Windows exception handling. 1210 if (!isa<LandingPadInst>(EHPadBB->front())) 1211 return false; 1212 1213 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about 1214 // the region covered by the try. 1215 MCSymbol *BeginSymbol = Context.createTempSymbol(); 1216 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol); 1217 1218 unsigned Res = 1219 MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL)); 1220 SmallVector<unsigned, 8> Args; 1221 for (auto &Arg: I.arg_operands()) 1222 Args.push_back(packRegs(*Arg, MIRBuilder)); 1223 1224 if (!CLI->lowerCall(MIRBuilder, &I, Res, Args, 1225 [&]() { return getOrCreateVReg(*I.getCalledValue()); })) 1226 return false; 1227 1228 unpackRegs(I, Res, MIRBuilder); 1229 1230 MCSymbol *EndSymbol = Context.createTempSymbol(); 1231 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol); 1232 1233 // FIXME: track probabilities. 1234 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB), 1235 &ReturnMBB = getMBB(*ReturnBB); 1236 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol); 1237 MIRBuilder.getMBB().addSuccessor(&ReturnMBB); 1238 MIRBuilder.getMBB().addSuccessor(&EHPadMBB); 1239 MIRBuilder.buildBr(ReturnMBB); 1240 1241 return true; 1242 } 1243 1244 bool IRTranslator::translateLandingPad(const User &U, 1245 MachineIRBuilder &MIRBuilder) { 1246 const LandingPadInst &LP = cast<LandingPadInst>(U); 1247 1248 MachineBasicBlock &MBB = MIRBuilder.getMBB(); 1249 1250 MBB.setIsEHPad(); 1251 1252 // If there aren't registers to copy the values into (e.g., during SjLj 1253 // exceptions), then don't bother. 1254 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1255 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn(); 1256 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 && 1257 TLI.getExceptionSelectorRegister(PersonalityFn) == 0) 1258 return true; 1259 1260 // If landingpad's return type is token type, we don't create DAG nodes 1261 // for its exception pointer and selector value. The extraction of exception 1262 // pointer or selector value from token type landingpads is not currently 1263 // supported. 1264 if (LP.getType()->isTokenTy()) 1265 return true; 1266 1267 // Add a label to mark the beginning of the landing pad. Deletion of the 1268 // landing pad can thus be detected via the MachineModuleInfo. 1269 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL) 1270 .addSym(MF->addLandingPad(&MBB)); 1271 1272 LLT Ty = getLLTForType(*LP.getType(), *DL); 1273 unsigned Undef = MRI->createGenericVirtualRegister(Ty); 1274 MIRBuilder.buildUndef(Undef); 1275 1276 SmallVector<LLT, 2> Tys; 1277 for (Type *Ty : cast<StructType>(LP.getType())->elements()) 1278 Tys.push_back(getLLTForType(*Ty, *DL)); 1279 assert(Tys.size() == 2 && "Only two-valued landingpads are supported"); 1280 1281 // Mark exception register as live in. 1282 unsigned ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn); 1283 if (!ExceptionReg) 1284 return false; 1285 1286 MBB.addLiveIn(ExceptionReg); 1287 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(LP); 1288 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg); 1289 1290 unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn); 1291 if (!SelectorReg) 1292 return false; 1293 1294 MBB.addLiveIn(SelectorReg); 1295 unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]); 1296 MIRBuilder.buildCopy(PtrVReg, SelectorReg); 1297 MIRBuilder.buildCast(ResRegs[1], PtrVReg); 1298 1299 return true; 1300 } 1301 1302 bool IRTranslator::translateAlloca(const User &U, 1303 MachineIRBuilder &MIRBuilder) { 1304 auto &AI = cast<AllocaInst>(U); 1305 1306 if (AI.isSwiftError()) 1307 return false; 1308 1309 if (AI.isStaticAlloca()) { 1310 unsigned Res = getOrCreateVReg(AI); 1311 int FI = getOrCreateFrameIndex(AI); 1312 MIRBuilder.buildFrameIndex(Res, FI); 1313 return true; 1314 } 1315 1316 // FIXME: support stack probing for Windows. 1317 if (MF->getTarget().getTargetTriple().isOSWindows()) 1318 return false; 1319 1320 // Now we're in the harder dynamic case. 1321 Type *Ty = AI.getAllocatedType(); 1322 unsigned Align = 1323 std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment()); 1324 1325 unsigned NumElts = getOrCreateVReg(*AI.getArraySize()); 1326 1327 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType()); 1328 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL); 1329 if (MRI->getType(NumElts) != IntPtrTy) { 1330 unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy); 1331 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts); 1332 NumElts = ExtElts; 1333 } 1334 1335 unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy); 1336 unsigned TySize = 1337 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty))); 1338 MIRBuilder.buildMul(AllocSize, NumElts, TySize); 1339 1340 LLT PtrTy = getLLTForType(*AI.getType(), *DL); 1341 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1342 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore(); 1343 1344 unsigned SPTmp = MRI->createGenericVirtualRegister(PtrTy); 1345 MIRBuilder.buildCopy(SPTmp, SPReg); 1346 1347 unsigned AllocTmp = MRI->createGenericVirtualRegister(PtrTy); 1348 MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize); 1349 1350 // Handle alignment. We have to realign if the allocation granule was smaller 1351 // than stack alignment, or the specific alloca requires more than stack 1352 // alignment. 1353 unsigned StackAlign = 1354 MF->getSubtarget().getFrameLowering()->getStackAlignment(); 1355 Align = std::max(Align, StackAlign); 1356 if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) { 1357 // Round the size of the allocation up to the stack alignment size 1358 // by add SA-1 to the size. This doesn't overflow because we're computing 1359 // an address inside an alloca. 1360 unsigned AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy); 1361 MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align)); 1362 AllocTmp = AlignedAlloc; 1363 } 1364 1365 MIRBuilder.buildCopy(SPReg, AllocTmp); 1366 MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp); 1367 1368 MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI); 1369 assert(MF->getFrameInfo().hasVarSizedObjects()); 1370 return true; 1371 } 1372 1373 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) { 1374 // FIXME: We may need more info about the type. Because of how LLT works, 1375 // we're completely discarding the i64/double distinction here (amongst 1376 // others). Fortunately the ABIs I know of where that matters don't use va_arg 1377 // anyway but that's not guaranteed. 1378 MIRBuilder.buildInstr(TargetOpcode::G_VAARG) 1379 .addDef(getOrCreateVReg(U)) 1380 .addUse(getOrCreateVReg(*U.getOperand(0))) 1381 .addImm(DL->getABITypeAlignment(U.getType())); 1382 return true; 1383 } 1384 1385 bool IRTranslator::translateInsertElement(const User &U, 1386 MachineIRBuilder &MIRBuilder) { 1387 // If it is a <1 x Ty> vector, use the scalar as it is 1388 // not a legal vector type in LLT. 1389 if (U.getType()->getVectorNumElements() == 1) { 1390 unsigned Elt = getOrCreateVReg(*U.getOperand(1)); 1391 auto &Regs = *VMap.getVRegs(U); 1392 if (Regs.empty()) { 1393 Regs.push_back(Elt); 1394 VMap.getOffsets(U)->push_back(0); 1395 } else { 1396 MIRBuilder.buildCopy(Regs[0], Elt); 1397 } 1398 return true; 1399 } 1400 1401 unsigned Res = getOrCreateVReg(U); 1402 unsigned Val = getOrCreateVReg(*U.getOperand(0)); 1403 unsigned Elt = getOrCreateVReg(*U.getOperand(1)); 1404 unsigned Idx = getOrCreateVReg(*U.getOperand(2)); 1405 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx); 1406 return true; 1407 } 1408 1409 bool IRTranslator::translateExtractElement(const User &U, 1410 MachineIRBuilder &MIRBuilder) { 1411 // If it is a <1 x Ty> vector, use the scalar as it is 1412 // not a legal vector type in LLT. 1413 if (U.getOperand(0)->getType()->getVectorNumElements() == 1) { 1414 unsigned Elt = getOrCreateVReg(*U.getOperand(0)); 1415 auto &Regs = *VMap.getVRegs(U); 1416 if (Regs.empty()) { 1417 Regs.push_back(Elt); 1418 VMap.getOffsets(U)->push_back(0); 1419 } else { 1420 MIRBuilder.buildCopy(Regs[0], Elt); 1421 } 1422 return true; 1423 } 1424 unsigned Res = getOrCreateVReg(U); 1425 unsigned Val = getOrCreateVReg(*U.getOperand(0)); 1426 const auto &TLI = *MF->getSubtarget().getTargetLowering(); 1427 unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits(); 1428 unsigned Idx = 0; 1429 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) { 1430 if (CI->getBitWidth() != PreferredVecIdxWidth) { 1431 APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth); 1432 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx); 1433 Idx = getOrCreateVReg(*NewIdxCI); 1434 } 1435 } 1436 if (!Idx) 1437 Idx = getOrCreateVReg(*U.getOperand(1)); 1438 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) { 1439 const LLT &VecIdxTy = LLT::scalar(PreferredVecIdxWidth); 1440 Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx)->getOperand(0).getReg(); 1441 } 1442 MIRBuilder.buildExtractVectorElement(Res, Val, Idx); 1443 return true; 1444 } 1445 1446 bool IRTranslator::translateShuffleVector(const User &U, 1447 MachineIRBuilder &MIRBuilder) { 1448 MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR) 1449 .addDef(getOrCreateVReg(U)) 1450 .addUse(getOrCreateVReg(*U.getOperand(0))) 1451 .addUse(getOrCreateVReg(*U.getOperand(1))) 1452 .addUse(getOrCreateVReg(*U.getOperand(2))); 1453 return true; 1454 } 1455 1456 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) { 1457 const PHINode &PI = cast<PHINode>(U); 1458 1459 SmallVector<MachineInstr *, 4> Insts; 1460 for (auto Reg : getOrCreateVRegs(PI)) { 1461 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {}); 1462 Insts.push_back(MIB.getInstr()); 1463 } 1464 1465 PendingPHIs.emplace_back(&PI, std::move(Insts)); 1466 return true; 1467 } 1468 1469 bool IRTranslator::translateAtomicCmpXchg(const User &U, 1470 MachineIRBuilder &MIRBuilder) { 1471 const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U); 1472 1473 if (I.isWeak()) 1474 return false; 1475 1476 auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile 1477 : MachineMemOperand::MONone; 1478 Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 1479 1480 Type *ResType = I.getType(); 1481 Type *ValType = ResType->Type::getStructElementType(0); 1482 1483 auto Res = getOrCreateVRegs(I); 1484 unsigned OldValRes = Res[0]; 1485 unsigned SuccessRes = Res[1]; 1486 unsigned Addr = getOrCreateVReg(*I.getPointerOperand()); 1487 unsigned Cmp = getOrCreateVReg(*I.getCompareOperand()); 1488 unsigned NewVal = getOrCreateVReg(*I.getNewValOperand()); 1489 1490 MIRBuilder.buildAtomicCmpXchgWithSuccess( 1491 OldValRes, SuccessRes, Addr, Cmp, NewVal, 1492 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), 1493 Flags, DL->getTypeStoreSize(ValType), 1494 getMemOpAlignment(I), AAMDNodes(), nullptr, 1495 I.getSyncScopeID(), I.getSuccessOrdering(), 1496 I.getFailureOrdering())); 1497 return true; 1498 } 1499 1500 bool IRTranslator::translateAtomicRMW(const User &U, 1501 MachineIRBuilder &MIRBuilder) { 1502 const AtomicRMWInst &I = cast<AtomicRMWInst>(U); 1503 1504 auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile 1505 : MachineMemOperand::MONone; 1506 Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 1507 1508 Type *ResType = I.getType(); 1509 1510 unsigned Res = getOrCreateVReg(I); 1511 unsigned Addr = getOrCreateVReg(*I.getPointerOperand()); 1512 unsigned Val = getOrCreateVReg(*I.getValOperand()); 1513 1514 unsigned Opcode = 0; 1515 switch (I.getOperation()) { 1516 default: 1517 llvm_unreachable("Unknown atomicrmw op"); 1518 return false; 1519 case AtomicRMWInst::Xchg: 1520 Opcode = TargetOpcode::G_ATOMICRMW_XCHG; 1521 break; 1522 case AtomicRMWInst::Add: 1523 Opcode = TargetOpcode::G_ATOMICRMW_ADD; 1524 break; 1525 case AtomicRMWInst::Sub: 1526 Opcode = TargetOpcode::G_ATOMICRMW_SUB; 1527 break; 1528 case AtomicRMWInst::And: 1529 Opcode = TargetOpcode::G_ATOMICRMW_AND; 1530 break; 1531 case AtomicRMWInst::Nand: 1532 Opcode = TargetOpcode::G_ATOMICRMW_NAND; 1533 break; 1534 case AtomicRMWInst::Or: 1535 Opcode = TargetOpcode::G_ATOMICRMW_OR; 1536 break; 1537 case AtomicRMWInst::Xor: 1538 Opcode = TargetOpcode::G_ATOMICRMW_XOR; 1539 break; 1540 case AtomicRMWInst::Max: 1541 Opcode = TargetOpcode::G_ATOMICRMW_MAX; 1542 break; 1543 case AtomicRMWInst::Min: 1544 Opcode = TargetOpcode::G_ATOMICRMW_MIN; 1545 break; 1546 case AtomicRMWInst::UMax: 1547 Opcode = TargetOpcode::G_ATOMICRMW_UMAX; 1548 break; 1549 case AtomicRMWInst::UMin: 1550 Opcode = TargetOpcode::G_ATOMICRMW_UMIN; 1551 break; 1552 } 1553 1554 MIRBuilder.buildAtomicRMW( 1555 Opcode, Res, Addr, Val, 1556 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), 1557 Flags, DL->getTypeStoreSize(ResType), 1558 getMemOpAlignment(I), AAMDNodes(), nullptr, 1559 I.getSyncScopeID(), I.getOrdering())); 1560 return true; 1561 } 1562 1563 void IRTranslator::finishPendingPhis() { 1564 #ifndef NDEBUG 1565 DILocationVerifier Verifier; 1566 GISelObserverWrapper WrapperObserver(&Verifier); 1567 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver); 1568 #endif // ifndef NDEBUG 1569 for (auto &Phi : PendingPHIs) { 1570 const PHINode *PI = Phi.first; 1571 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second; 1572 EntryBuilder->setDebugLoc(PI->getDebugLoc()); 1573 #ifndef NDEBUG 1574 Verifier.setCurrentInst(PI); 1575 #endif // ifndef NDEBUG 1576 1577 // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator 1578 // won't create extra control flow here, otherwise we need to find the 1579 // dominating predecessor here (or perhaps force the weirder IRTranslators 1580 // to provide a simple boundary). 1581 SmallSet<const BasicBlock *, 4> HandledPreds; 1582 1583 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) { 1584 auto IRPred = PI->getIncomingBlock(i); 1585 if (HandledPreds.count(IRPred)) 1586 continue; 1587 1588 HandledPreds.insert(IRPred); 1589 ArrayRef<unsigned> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i)); 1590 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) { 1591 assert(Pred->isSuccessor(ComponentPHIs[0]->getParent()) && 1592 "incorrect CFG at MachineBasicBlock level"); 1593 for (unsigned j = 0; j < ValRegs.size(); ++j) { 1594 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]); 1595 MIB.addUse(ValRegs[j]); 1596 MIB.addMBB(Pred); 1597 } 1598 } 1599 } 1600 } 1601 } 1602 1603 bool IRTranslator::valueIsSplit(const Value &V, 1604 SmallVectorImpl<uint64_t> *Offsets) { 1605 SmallVector<LLT, 4> SplitTys; 1606 if (Offsets && !Offsets->empty()) 1607 Offsets->clear(); 1608 computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets); 1609 return SplitTys.size() > 1; 1610 } 1611 1612 bool IRTranslator::translate(const Instruction &Inst) { 1613 CurBuilder->setDebugLoc(Inst.getDebugLoc()); 1614 EntryBuilder->setDebugLoc(Inst.getDebugLoc()); 1615 switch(Inst.getOpcode()) { 1616 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 1617 case Instruction::OPCODE: \ 1618 return translate##OPCODE(Inst, *CurBuilder.get()); 1619 #include "llvm/IR/Instruction.def" 1620 default: 1621 return false; 1622 } 1623 } 1624 1625 bool IRTranslator::translate(const Constant &C, unsigned Reg) { 1626 if (auto CI = dyn_cast<ConstantInt>(&C)) 1627 EntryBuilder->buildConstant(Reg, *CI); 1628 else if (auto CF = dyn_cast<ConstantFP>(&C)) 1629 EntryBuilder->buildFConstant(Reg, *CF); 1630 else if (isa<UndefValue>(C)) 1631 EntryBuilder->buildUndef(Reg); 1632 else if (isa<ConstantPointerNull>(C)) { 1633 // As we are trying to build a constant val of 0 into a pointer, 1634 // insert a cast to make them correct with respect to types. 1635 unsigned NullSize = DL->getTypeSizeInBits(C.getType()); 1636 auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize); 1637 auto *ZeroVal = ConstantInt::get(ZeroTy, 0); 1638 unsigned ZeroReg = getOrCreateVReg(*ZeroVal); 1639 EntryBuilder->buildCast(Reg, ZeroReg); 1640 } else if (auto GV = dyn_cast<GlobalValue>(&C)) 1641 EntryBuilder->buildGlobalValue(Reg, GV); 1642 else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) { 1643 if (!CAZ->getType()->isVectorTy()) 1644 return false; 1645 // Return the scalar if it is a <1 x Ty> vector. 1646 if (CAZ->getNumElements() == 1) 1647 return translate(*CAZ->getElementValue(0u), Reg); 1648 SmallVector<unsigned, 4> Ops; 1649 for (unsigned i = 0; i < CAZ->getNumElements(); ++i) { 1650 Constant &Elt = *CAZ->getElementValue(i); 1651 Ops.push_back(getOrCreateVReg(Elt)); 1652 } 1653 EntryBuilder->buildBuildVector(Reg, Ops); 1654 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) { 1655 // Return the scalar if it is a <1 x Ty> vector. 1656 if (CV->getNumElements() == 1) 1657 return translate(*CV->getElementAsConstant(0), Reg); 1658 SmallVector<unsigned, 4> Ops; 1659 for (unsigned i = 0; i < CV->getNumElements(); ++i) { 1660 Constant &Elt = *CV->getElementAsConstant(i); 1661 Ops.push_back(getOrCreateVReg(Elt)); 1662 } 1663 EntryBuilder->buildBuildVector(Reg, Ops); 1664 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) { 1665 switch(CE->getOpcode()) { 1666 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 1667 case Instruction::OPCODE: \ 1668 return translate##OPCODE(*CE, *EntryBuilder.get()); 1669 #include "llvm/IR/Instruction.def" 1670 default: 1671 return false; 1672 } 1673 } else if (auto CV = dyn_cast<ConstantVector>(&C)) { 1674 if (CV->getNumOperands() == 1) 1675 return translate(*CV->getOperand(0), Reg); 1676 SmallVector<unsigned, 4> Ops; 1677 for (unsigned i = 0; i < CV->getNumOperands(); ++i) { 1678 Ops.push_back(getOrCreateVReg(*CV->getOperand(i))); 1679 } 1680 EntryBuilder->buildBuildVector(Reg, Ops); 1681 } else if (auto *BA = dyn_cast<BlockAddress>(&C)) { 1682 EntryBuilder->buildBlockAddress(Reg, BA); 1683 } else 1684 return false; 1685 1686 return true; 1687 } 1688 1689 void IRTranslator::finalizeFunction() { 1690 // Release the memory used by the different maps we 1691 // needed during the translation. 1692 PendingPHIs.clear(); 1693 VMap.reset(); 1694 FrameIndices.clear(); 1695 MachinePreds.clear(); 1696 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it 1697 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid 1698 // destroying it twice (in ~IRTranslator() and ~LLVMContext()) 1699 EntryBuilder.reset(); 1700 CurBuilder.reset(); 1701 } 1702 1703 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { 1704 MF = &CurMF; 1705 const Function &F = MF->getFunction(); 1706 if (F.empty()) 1707 return false; 1708 GISelCSEAnalysisWrapper &Wrapper = 1709 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper(); 1710 // Set the CSEConfig and run the analysis. 1711 GISelCSEInfo *CSEInfo = nullptr; 1712 TPC = &getAnalysis<TargetPassConfig>(); 1713 bool IsO0 = TPC->getOptLevel() == CodeGenOpt::Level::None; 1714 // Disable CSE for O0. 1715 bool EnableCSE = !IsO0 && EnableCSEInIRTranslator; 1716 if (EnableCSE) { 1717 EntryBuilder = make_unique<CSEMIRBuilder>(CurMF); 1718 std::unique_ptr<CSEConfig> Config = make_unique<CSEConfig>(); 1719 CSEInfo = &Wrapper.get(std::move(Config)); 1720 EntryBuilder->setCSEInfo(CSEInfo); 1721 CurBuilder = make_unique<CSEMIRBuilder>(CurMF); 1722 CurBuilder->setCSEInfo(CSEInfo); 1723 } else { 1724 EntryBuilder = make_unique<MachineIRBuilder>(); 1725 CurBuilder = make_unique<MachineIRBuilder>(); 1726 } 1727 CLI = MF->getSubtarget().getCallLowering(); 1728 CurBuilder->setMF(*MF); 1729 EntryBuilder->setMF(*MF); 1730 MRI = &MF->getRegInfo(); 1731 DL = &F.getParent()->getDataLayout(); 1732 ORE = llvm::make_unique<OptimizationRemarkEmitter>(&F); 1733 1734 assert(PendingPHIs.empty() && "stale PHIs"); 1735 1736 if (!DL->isLittleEndian()) { 1737 // Currently we don't properly handle big endian code. 1738 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 1739 F.getSubprogram(), &F.getEntryBlock()); 1740 R << "unable to translate in big endian mode"; 1741 reportTranslationError(*MF, *TPC, *ORE, R); 1742 } 1743 1744 // Release the per-function state when we return, whether we succeeded or not. 1745 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); }); 1746 1747 // Setup a separate basic-block for the arguments and constants 1748 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock(); 1749 MF->push_back(EntryBB); 1750 EntryBuilder->setMBB(*EntryBB); 1751 1752 // Create all blocks, in IR order, to preserve the layout. 1753 for (const BasicBlock &BB: F) { 1754 auto *&MBB = BBToMBB[&BB]; 1755 1756 MBB = MF->CreateMachineBasicBlock(&BB); 1757 MF->push_back(MBB); 1758 1759 if (BB.hasAddressTaken()) 1760 MBB->setHasAddressTaken(); 1761 } 1762 1763 // Make our arguments/constants entry block fallthrough to the IR entry block. 1764 EntryBB->addSuccessor(&getMBB(F.front())); 1765 1766 // Lower the actual args into this basic block. 1767 SmallVector<unsigned, 8> VRegArgs; 1768 for (const Argument &Arg: F.args()) { 1769 if (DL->getTypeStoreSize(Arg.getType()) == 0) 1770 continue; // Don't handle zero sized types. 1771 VRegArgs.push_back( 1772 MRI->createGenericVirtualRegister(getLLTForType(*Arg.getType(), *DL))); 1773 } 1774 1775 // We don't currently support translating swifterror or swiftself functions. 1776 for (auto &Arg : F.args()) { 1777 if (Arg.hasSwiftErrorAttr() || Arg.hasSwiftSelfAttr()) { 1778 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 1779 F.getSubprogram(), &F.getEntryBlock()); 1780 R << "unable to lower arguments due to swifterror/swiftself: " 1781 << ore::NV("Prototype", F.getType()); 1782 reportTranslationError(*MF, *TPC, *ORE, R); 1783 return false; 1784 } 1785 } 1786 1787 if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs)) { 1788 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 1789 F.getSubprogram(), &F.getEntryBlock()); 1790 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType()); 1791 reportTranslationError(*MF, *TPC, *ORE, R); 1792 return false; 1793 } 1794 1795 auto ArgIt = F.arg_begin(); 1796 for (auto &VArg : VRegArgs) { 1797 // If the argument is an unsplit scalar then don't use unpackRegs to avoid 1798 // creating redundant copies. 1799 if (!valueIsSplit(*ArgIt, VMap.getOffsets(*ArgIt))) { 1800 auto &VRegs = *VMap.getVRegs(cast<Value>(*ArgIt)); 1801 assert(VRegs.empty() && "VRegs already populated?"); 1802 VRegs.push_back(VArg); 1803 } else { 1804 unpackRegs(*ArgIt, VArg, *EntryBuilder.get()); 1805 } 1806 ArgIt++; 1807 } 1808 1809 // Need to visit defs before uses when translating instructions. 1810 GISelObserverWrapper WrapperObserver; 1811 if (EnableCSE && CSEInfo) 1812 WrapperObserver.addObserver(CSEInfo); 1813 { 1814 ReversePostOrderTraversal<const Function *> RPOT(&F); 1815 #ifndef NDEBUG 1816 DILocationVerifier Verifier; 1817 WrapperObserver.addObserver(&Verifier); 1818 #endif // ifndef NDEBUG 1819 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver); 1820 for (const BasicBlock *BB : RPOT) { 1821 MachineBasicBlock &MBB = getMBB(*BB); 1822 // Set the insertion point of all the following translations to 1823 // the end of this basic block. 1824 CurBuilder->setMBB(MBB); 1825 1826 for (const Instruction &Inst : *BB) { 1827 #ifndef NDEBUG 1828 Verifier.setCurrentInst(&Inst); 1829 #endif // ifndef NDEBUG 1830 if (translate(Inst)) 1831 continue; 1832 1833 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 1834 Inst.getDebugLoc(), BB); 1835 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst); 1836 1837 if (ORE->allowExtraAnalysis("gisel-irtranslator")) { 1838 std::string InstStrStorage; 1839 raw_string_ostream InstStr(InstStrStorage); 1840 InstStr << Inst; 1841 1842 R << ": '" << InstStr.str() << "'"; 1843 } 1844 1845 reportTranslationError(*MF, *TPC, *ORE, R); 1846 return false; 1847 } 1848 } 1849 #ifndef NDEBUG 1850 WrapperObserver.removeObserver(&Verifier); 1851 #endif 1852 } 1853 1854 finishPendingPhis(); 1855 1856 // Merge the argument lowering and constants block with its single 1857 // successor, the LLVM-IR entry block. We want the basic block to 1858 // be maximal. 1859 assert(EntryBB->succ_size() == 1 && 1860 "Custom BB used for lowering should have only one successor"); 1861 // Get the successor of the current entry block. 1862 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin(); 1863 assert(NewEntryBB.pred_size() == 1 && 1864 "LLVM-IR entry block has a predecessor!?"); 1865 // Move all the instruction from the current entry block to the 1866 // new entry block. 1867 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(), 1868 EntryBB->end()); 1869 1870 // Update the live-in information for the new entry block. 1871 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins()) 1872 NewEntryBB.addLiveIn(LiveIn); 1873 NewEntryBB.sortUniqueLiveIns(); 1874 1875 // Get rid of the now empty basic block. 1876 EntryBB->removeSuccessor(&NewEntryBB); 1877 MF->remove(EntryBB); 1878 MF->DeleteMachineBasicBlock(EntryBB); 1879 1880 assert(&MF->front() == &NewEntryBB && 1881 "New entry wasn't next in the list of basic block!"); 1882 1883 // Initialize stack protector information. 1884 StackProtector &SP = getAnalysis<StackProtector>(); 1885 SP.copyToMachineFrameInfo(MF->getFrameInfo()); 1886 1887 return false; 1888 } 1889