1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file implements the IRTranslator class. 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 14 #include "llvm/ADT/PostOrderIterator.h" 15 #include "llvm/ADT/STLExtras.h" 16 #include "llvm/ADT/ScopeExit.h" 17 #include "llvm/ADT/SmallSet.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 20 #include "llvm/CodeGen/Analysis.h" 21 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 22 #include "llvm/CodeGen/LowLevelType.h" 23 #include "llvm/CodeGen/MachineBasicBlock.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineFunction.h" 26 #include "llvm/CodeGen/MachineInstrBuilder.h" 27 #include "llvm/CodeGen/MachineMemOperand.h" 28 #include "llvm/CodeGen/MachineOperand.h" 29 #include "llvm/CodeGen/MachineRegisterInfo.h" 30 #include "llvm/CodeGen/StackProtector.h" 31 #include "llvm/CodeGen/TargetFrameLowering.h" 32 #include "llvm/CodeGen/TargetLowering.h" 33 #include "llvm/CodeGen/TargetPassConfig.h" 34 #include "llvm/CodeGen/TargetRegisterInfo.h" 35 #include "llvm/CodeGen/TargetSubtargetInfo.h" 36 #include "llvm/IR/BasicBlock.h" 37 #include "llvm/IR/CFG.h" 38 #include "llvm/IR/Constant.h" 39 #include "llvm/IR/Constants.h" 40 #include "llvm/IR/DataLayout.h" 41 #include "llvm/IR/DebugInfo.h" 42 #include "llvm/IR/DerivedTypes.h" 43 #include "llvm/IR/Function.h" 44 #include "llvm/IR/GetElementPtrTypeIterator.h" 45 #include "llvm/IR/InlineAsm.h" 46 #include "llvm/IR/InstrTypes.h" 47 #include "llvm/IR/Instructions.h" 48 #include "llvm/IR/IntrinsicInst.h" 49 #include "llvm/IR/Intrinsics.h" 50 #include "llvm/IR/LLVMContext.h" 51 #include "llvm/IR/Metadata.h" 52 #include "llvm/IR/Type.h" 53 #include "llvm/IR/User.h" 54 #include "llvm/IR/Value.h" 55 #include "llvm/MC/MCContext.h" 56 #include "llvm/Pass.h" 57 #include "llvm/Support/Casting.h" 58 #include "llvm/Support/CodeGen.h" 59 #include "llvm/Support/Debug.h" 60 #include "llvm/Support/ErrorHandling.h" 61 #include "llvm/Support/LowLevelTypeImpl.h" 62 #include "llvm/Support/MathExtras.h" 63 #include "llvm/Support/raw_ostream.h" 64 #include "llvm/Target/TargetIntrinsicInfo.h" 65 #include "llvm/Target/TargetMachine.h" 66 #include <algorithm> 67 #include <cassert> 68 #include <cstdint> 69 #include <iterator> 70 #include <string> 71 #include <utility> 72 #include <vector> 73 74 #define DEBUG_TYPE "irtranslator" 75 76 using namespace llvm; 77 78 char IRTranslator::ID = 0; 79 80 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 81 false, false) 82 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) 83 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 84 false, false) 85 86 static void reportTranslationError(MachineFunction &MF, 87 const TargetPassConfig &TPC, 88 OptimizationRemarkEmitter &ORE, 89 OptimizationRemarkMissed &R) { 90 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel); 91 92 // Print the function name explicitly if we don't have a debug location (which 93 // makes the diagnostic less useful) or if we're going to emit a raw error. 94 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled()) 95 R << (" (in function: " + MF.getName() + ")").str(); 96 97 if (TPC.isGlobalISelAbortEnabled()) 98 report_fatal_error(R.getMsg()); 99 else 100 ORE.emit(R); 101 } 102 103 IRTranslator::IRTranslator() : MachineFunctionPass(ID) { 104 initializeIRTranslatorPass(*PassRegistry::getPassRegistry()); 105 } 106 107 #ifndef NDEBUG 108 /// Verify that every instruction created has the same DILocation as the 109 /// instruction being translated. 110 class DILocationVerifier : MachineFunction::Delegate { 111 MachineFunction &MF; 112 const Instruction *CurrInst = nullptr; 113 114 public: 115 DILocationVerifier(MachineFunction &MF) : MF(MF) { MF.setDelegate(this); } 116 ~DILocationVerifier() { MF.resetDelegate(this); } 117 118 const Instruction *getCurrentInst() const { return CurrInst; } 119 void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; } 120 121 void MF_HandleInsertion(const MachineInstr &MI) override { 122 assert(getCurrentInst() && "Inserted instruction without a current MI"); 123 124 // Only print the check message if we're actually checking it. 125 #ifndef NDEBUG 126 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst 127 << " was copied to " << MI); 128 #endif 129 assert(CurrInst->getDebugLoc() == MI.getDebugLoc() && 130 "Line info was not transferred to all instructions"); 131 } 132 void MF_HandleRemoval(const MachineInstr &MI) override {} 133 }; 134 #endif // ifndef NDEBUG 135 136 137 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const { 138 AU.addRequired<StackProtector>(); 139 AU.addRequired<TargetPassConfig>(); 140 getSelectionDAGFallbackAnalysisUsage(AU); 141 MachineFunctionPass::getAnalysisUsage(AU); 142 } 143 144 static void computeValueLLTs(const DataLayout &DL, Type &Ty, 145 SmallVectorImpl<LLT> &ValueTys, 146 SmallVectorImpl<uint64_t> *Offsets = nullptr, 147 uint64_t StartingOffset = 0) { 148 // Given a struct type, recursively traverse the elements. 149 if (StructType *STy = dyn_cast<StructType>(&Ty)) { 150 const StructLayout *SL = DL.getStructLayout(STy); 151 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) 152 computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets, 153 StartingOffset + SL->getElementOffset(I)); 154 return; 155 } 156 // Given an array type, recursively traverse the elements. 157 if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) { 158 Type *EltTy = ATy->getElementType(); 159 uint64_t EltSize = DL.getTypeAllocSize(EltTy); 160 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) 161 computeValueLLTs(DL, *EltTy, ValueTys, Offsets, 162 StartingOffset + i * EltSize); 163 return; 164 } 165 // Interpret void as zero return values. 166 if (Ty.isVoidTy()) 167 return; 168 // Base case: we can get an LLT for this LLVM IR type. 169 ValueTys.push_back(getLLTForType(Ty, DL)); 170 if (Offsets != nullptr) 171 Offsets->push_back(StartingOffset * 8); 172 } 173 174 IRTranslator::ValueToVRegInfo::VRegListT & 175 IRTranslator::allocateVRegs(const Value &Val) { 176 assert(!VMap.contains(Val) && "Value already allocated in VMap"); 177 auto *Regs = VMap.getVRegs(Val); 178 auto *Offsets = VMap.getOffsets(Val); 179 SmallVector<LLT, 4> SplitTys; 180 computeValueLLTs(*DL, *Val.getType(), SplitTys, 181 Offsets->empty() ? Offsets : nullptr); 182 for (unsigned i = 0; i < SplitTys.size(); ++i) 183 Regs->push_back(0); 184 return *Regs; 185 } 186 187 ArrayRef<unsigned> IRTranslator::getOrCreateVRegs(const Value &Val) { 188 auto VRegsIt = VMap.findVRegs(Val); 189 if (VRegsIt != VMap.vregs_end()) 190 return *VRegsIt->second; 191 192 if (Val.getType()->isVoidTy()) 193 return *VMap.getVRegs(Val); 194 195 // Create entry for this type. 196 auto *VRegs = VMap.getVRegs(Val); 197 auto *Offsets = VMap.getOffsets(Val); 198 199 assert(Val.getType()->isSized() && 200 "Don't know how to create an empty vreg"); 201 202 SmallVector<LLT, 4> SplitTys; 203 computeValueLLTs(*DL, *Val.getType(), SplitTys, 204 Offsets->empty() ? Offsets : nullptr); 205 206 if (!isa<Constant>(Val)) { 207 for (auto Ty : SplitTys) 208 VRegs->push_back(MRI->createGenericVirtualRegister(Ty)); 209 return *VRegs; 210 } 211 212 if (Val.getType()->isAggregateType()) { 213 // UndefValue, ConstantAggregateZero 214 auto &C = cast<Constant>(Val); 215 unsigned Idx = 0; 216 while (auto Elt = C.getAggregateElement(Idx++)) { 217 auto EltRegs = getOrCreateVRegs(*Elt); 218 std::copy(EltRegs.begin(), EltRegs.end(), std::back_inserter(*VRegs)); 219 } 220 } else { 221 assert(SplitTys.size() == 1 && "unexpectedly split LLT"); 222 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0])); 223 bool Success = translate(cast<Constant>(Val), VRegs->front()); 224 if (!Success) { 225 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 226 MF->getFunction().getSubprogram(), 227 &MF->getFunction().getEntryBlock()); 228 R << "unable to translate constant: " << ore::NV("Type", Val.getType()); 229 reportTranslationError(*MF, *TPC, *ORE, R); 230 return *VRegs; 231 } 232 } 233 234 return *VRegs; 235 } 236 237 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) { 238 if (FrameIndices.find(&AI) != FrameIndices.end()) 239 return FrameIndices[&AI]; 240 241 unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType()); 242 unsigned Size = 243 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue(); 244 245 // Always allocate at least one byte. 246 Size = std::max(Size, 1u); 247 248 unsigned Alignment = AI.getAlignment(); 249 if (!Alignment) 250 Alignment = DL->getABITypeAlignment(AI.getAllocatedType()); 251 252 int &FI = FrameIndices[&AI]; 253 FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI); 254 return FI; 255 } 256 257 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) { 258 unsigned Alignment = 0; 259 Type *ValTy = nullptr; 260 if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) { 261 Alignment = SI->getAlignment(); 262 ValTy = SI->getValueOperand()->getType(); 263 } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) { 264 Alignment = LI->getAlignment(); 265 ValTy = LI->getType(); 266 } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) { 267 // TODO(PR27168): This instruction has no alignment attribute, but unlike 268 // the default alignment for load/store, the default here is to assume 269 // it has NATURAL alignment, not DataLayout-specified alignment. 270 const DataLayout &DL = AI->getModule()->getDataLayout(); 271 Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType()); 272 ValTy = AI->getCompareOperand()->getType(); 273 } else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) { 274 // TODO(PR27168): This instruction has no alignment attribute, but unlike 275 // the default alignment for load/store, the default here is to assume 276 // it has NATURAL alignment, not DataLayout-specified alignment. 277 const DataLayout &DL = AI->getModule()->getDataLayout(); 278 Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType()); 279 ValTy = AI->getType(); 280 } else { 281 OptimizationRemarkMissed R("gisel-irtranslator", "", &I); 282 R << "unable to translate memop: " << ore::NV("Opcode", &I); 283 reportTranslationError(*MF, *TPC, *ORE, R); 284 return 1; 285 } 286 287 return Alignment ? Alignment : DL->getABITypeAlignment(ValTy); 288 } 289 290 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) { 291 MachineBasicBlock *&MBB = BBToMBB[&BB]; 292 assert(MBB && "BasicBlock was not encountered before"); 293 return *MBB; 294 } 295 296 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) { 297 assert(NewPred && "new predecessor must be a real MachineBasicBlock"); 298 MachinePreds[Edge].push_back(NewPred); 299 } 300 301 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U, 302 MachineIRBuilder &MIRBuilder) { 303 // FIXME: handle signed/unsigned wrapping flags. 304 305 // Get or create a virtual register for each value. 306 // Unless the value is a Constant => loadimm cst? 307 // or inline constant each time? 308 // Creation of a virtual register needs to have a size. 309 unsigned Op0 = getOrCreateVReg(*U.getOperand(0)); 310 unsigned Op1 = getOrCreateVReg(*U.getOperand(1)); 311 unsigned Res = getOrCreateVReg(U); 312 auto FBinOp = MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1); 313 if (isa<Instruction>(U)) { 314 MachineInstr *FBinOpMI = FBinOp.getInstr(); 315 const Instruction &I = cast<Instruction>(U); 316 FBinOpMI->copyIRFlags(I); 317 } 318 return true; 319 } 320 321 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) { 322 // -0.0 - X --> G_FNEG 323 if (isa<Constant>(U.getOperand(0)) && 324 U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) { 325 MIRBuilder.buildInstr(TargetOpcode::G_FNEG) 326 .addDef(getOrCreateVReg(U)) 327 .addUse(getOrCreateVReg(*U.getOperand(1))); 328 return true; 329 } 330 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder); 331 } 332 333 bool IRTranslator::translateCompare(const User &U, 334 MachineIRBuilder &MIRBuilder) { 335 const CmpInst *CI = dyn_cast<CmpInst>(&U); 336 unsigned Op0 = getOrCreateVReg(*U.getOperand(0)); 337 unsigned Op1 = getOrCreateVReg(*U.getOperand(1)); 338 unsigned Res = getOrCreateVReg(U); 339 CmpInst::Predicate Pred = 340 CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>( 341 cast<ConstantExpr>(U).getPredicate()); 342 if (CmpInst::isIntPredicate(Pred)) 343 MIRBuilder.buildICmp(Pred, Res, Op0, Op1); 344 else if (Pred == CmpInst::FCMP_FALSE) 345 MIRBuilder.buildCopy( 346 Res, getOrCreateVReg(*Constant::getNullValue(CI->getType()))); 347 else if (Pred == CmpInst::FCMP_TRUE) 348 MIRBuilder.buildCopy( 349 Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType()))); 350 else 351 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1); 352 353 return true; 354 } 355 356 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) { 357 const ReturnInst &RI = cast<ReturnInst>(U); 358 const Value *Ret = RI.getReturnValue(); 359 if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0) 360 Ret = nullptr; 361 362 ArrayRef<unsigned> VRegs; 363 if (Ret) 364 VRegs = getOrCreateVRegs(*Ret); 365 366 // The target may mess up with the insertion point, but 367 // this is not important as a return is the last instruction 368 // of the block anyway. 369 370 return CLI->lowerReturn(MIRBuilder, Ret, VRegs); 371 } 372 373 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) { 374 const BranchInst &BrInst = cast<BranchInst>(U); 375 unsigned Succ = 0; 376 if (!BrInst.isUnconditional()) { 377 // We want a G_BRCOND to the true BB followed by an unconditional branch. 378 unsigned Tst = getOrCreateVReg(*BrInst.getCondition()); 379 const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++)); 380 MachineBasicBlock &TrueBB = getMBB(TrueTgt); 381 MIRBuilder.buildBrCond(Tst, TrueBB); 382 } 383 384 const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ)); 385 MachineBasicBlock &TgtBB = getMBB(BrTgt); 386 MachineBasicBlock &CurBB = MIRBuilder.getMBB(); 387 388 // If the unconditional target is the layout successor, fallthrough. 389 if (!CurBB.isLayoutSuccessor(&TgtBB)) 390 MIRBuilder.buildBr(TgtBB); 391 392 // Link successors. 393 for (const BasicBlock *Succ : successors(&BrInst)) 394 CurBB.addSuccessor(&getMBB(*Succ)); 395 return true; 396 } 397 398 bool IRTranslator::translateSwitch(const User &U, 399 MachineIRBuilder &MIRBuilder) { 400 // For now, just translate as a chain of conditional branches. 401 // FIXME: could we share most of the logic/code in 402 // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel? 403 // At first sight, it seems most of the logic in there is independent of 404 // SelectionDAG-specifics and a lot of work went in to optimize switch 405 // lowering in there. 406 407 const SwitchInst &SwInst = cast<SwitchInst>(U); 408 const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition()); 409 const BasicBlock *OrigBB = SwInst.getParent(); 410 411 LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL); 412 for (auto &CaseIt : SwInst.cases()) { 413 const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue()); 414 const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1); 415 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue); 416 MachineBasicBlock &CurMBB = MIRBuilder.getMBB(); 417 const BasicBlock *TrueBB = CaseIt.getCaseSuccessor(); 418 MachineBasicBlock &TrueMBB = getMBB(*TrueBB); 419 420 MIRBuilder.buildBrCond(Tst, TrueMBB); 421 CurMBB.addSuccessor(&TrueMBB); 422 addMachineCFGPred({OrigBB, TrueBB}, &CurMBB); 423 424 MachineBasicBlock *FalseMBB = 425 MF->CreateMachineBasicBlock(SwInst.getParent()); 426 // Insert the comparison blocks one after the other. 427 MF->insert(std::next(CurMBB.getIterator()), FalseMBB); 428 MIRBuilder.buildBr(*FalseMBB); 429 CurMBB.addSuccessor(FalseMBB); 430 431 MIRBuilder.setMBB(*FalseMBB); 432 } 433 // handle default case 434 const BasicBlock *DefaultBB = SwInst.getDefaultDest(); 435 MachineBasicBlock &DefaultMBB = getMBB(*DefaultBB); 436 MIRBuilder.buildBr(DefaultMBB); 437 MachineBasicBlock &CurMBB = MIRBuilder.getMBB(); 438 CurMBB.addSuccessor(&DefaultMBB); 439 addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB); 440 441 return true; 442 } 443 444 bool IRTranslator::translateIndirectBr(const User &U, 445 MachineIRBuilder &MIRBuilder) { 446 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U); 447 448 const unsigned Tgt = getOrCreateVReg(*BrInst.getAddress()); 449 MIRBuilder.buildBrIndirect(Tgt); 450 451 // Link successors. 452 MachineBasicBlock &CurBB = MIRBuilder.getMBB(); 453 for (const BasicBlock *Succ : successors(&BrInst)) 454 CurBB.addSuccessor(&getMBB(*Succ)); 455 456 return true; 457 } 458 459 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { 460 const LoadInst &LI = cast<LoadInst>(U); 461 462 auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile 463 : MachineMemOperand::MONone; 464 Flags |= MachineMemOperand::MOLoad; 465 466 if (DL->getTypeStoreSize(LI.getType()) == 0) 467 return true; 468 469 ArrayRef<unsigned> Regs = getOrCreateVRegs(LI); 470 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI); 471 unsigned Base = getOrCreateVReg(*LI.getPointerOperand()); 472 473 for (unsigned i = 0; i < Regs.size(); ++i) { 474 unsigned Addr = 0; 475 MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8); 476 477 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8); 478 unsigned BaseAlign = getMemOpAlignment(LI); 479 auto MMO = MF->getMachineMemOperand( 480 Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8, 481 MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr, 482 LI.getSyncScopeID(), LI.getOrdering()); 483 MIRBuilder.buildLoad(Regs[i], Addr, *MMO); 484 } 485 486 return true; 487 } 488 489 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) { 490 const StoreInst &SI = cast<StoreInst>(U); 491 auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile 492 : MachineMemOperand::MONone; 493 Flags |= MachineMemOperand::MOStore; 494 495 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0) 496 return true; 497 498 ArrayRef<unsigned> Vals = getOrCreateVRegs(*SI.getValueOperand()); 499 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand()); 500 unsigned Base = getOrCreateVReg(*SI.getPointerOperand()); 501 502 for (unsigned i = 0; i < Vals.size(); ++i) { 503 unsigned Addr = 0; 504 MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8); 505 506 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8); 507 unsigned BaseAlign = getMemOpAlignment(SI); 508 auto MMO = MF->getMachineMemOperand( 509 Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8, 510 MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr, 511 SI.getSyncScopeID(), SI.getOrdering()); 512 MIRBuilder.buildStore(Vals[i], Addr, *MMO); 513 } 514 return true; 515 } 516 517 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) { 518 const Value *Src = U.getOperand(0); 519 Type *Int32Ty = Type::getInt32Ty(U.getContext()); 520 521 // getIndexedOffsetInType is designed for GEPs, so the first index is the 522 // usual array element rather than looking into the actual aggregate. 523 SmallVector<Value *, 1> Indices; 524 Indices.push_back(ConstantInt::get(Int32Ty, 0)); 525 526 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) { 527 for (auto Idx : EVI->indices()) 528 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 529 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) { 530 for (auto Idx : IVI->indices()) 531 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 532 } else { 533 for (unsigned i = 1; i < U.getNumOperands(); ++i) 534 Indices.push_back(U.getOperand(i)); 535 } 536 537 return 8 * static_cast<uint64_t>( 538 DL.getIndexedOffsetInType(Src->getType(), Indices)); 539 } 540 541 bool IRTranslator::translateExtractValue(const User &U, 542 MachineIRBuilder &MIRBuilder) { 543 const Value *Src = U.getOperand(0); 544 uint64_t Offset = getOffsetFromIndices(U, *DL); 545 ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src); 546 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src); 547 unsigned Idx = std::lower_bound(Offsets.begin(), Offsets.end(), Offset) - 548 Offsets.begin(); 549 auto &DstRegs = allocateVRegs(U); 550 551 for (unsigned i = 0; i < DstRegs.size(); ++i) 552 DstRegs[i] = SrcRegs[Idx++]; 553 554 return true; 555 } 556 557 bool IRTranslator::translateInsertValue(const User &U, 558 MachineIRBuilder &MIRBuilder) { 559 const Value *Src = U.getOperand(0); 560 uint64_t Offset = getOffsetFromIndices(U, *DL); 561 auto &DstRegs = allocateVRegs(U); 562 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U); 563 ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src); 564 ArrayRef<unsigned> InsertedRegs = getOrCreateVRegs(*U.getOperand(1)); 565 auto InsertedIt = InsertedRegs.begin(); 566 567 for (unsigned i = 0; i < DstRegs.size(); ++i) { 568 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end()) 569 DstRegs[i] = *InsertedIt++; 570 else 571 DstRegs[i] = SrcRegs[i]; 572 } 573 574 return true; 575 } 576 577 bool IRTranslator::translateSelect(const User &U, 578 MachineIRBuilder &MIRBuilder) { 579 unsigned Tst = getOrCreateVReg(*U.getOperand(0)); 580 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(U); 581 ArrayRef<unsigned> Op0Regs = getOrCreateVRegs(*U.getOperand(1)); 582 ArrayRef<unsigned> Op1Regs = getOrCreateVRegs(*U.getOperand(2)); 583 584 for (unsigned i = 0; i < ResRegs.size(); ++i) 585 MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i]); 586 587 return true; 588 } 589 590 bool IRTranslator::translateBitCast(const User &U, 591 MachineIRBuilder &MIRBuilder) { 592 // If we're bitcasting to the source type, we can reuse the source vreg. 593 if (getLLTForType(*U.getOperand(0)->getType(), *DL) == 594 getLLTForType(*U.getType(), *DL)) { 595 unsigned SrcReg = getOrCreateVReg(*U.getOperand(0)); 596 auto &Regs = *VMap.getVRegs(U); 597 // If we already assigned a vreg for this bitcast, we can't change that. 598 // Emit a copy to satisfy the users we already emitted. 599 if (!Regs.empty()) 600 MIRBuilder.buildCopy(Regs[0], SrcReg); 601 else { 602 Regs.push_back(SrcReg); 603 VMap.getOffsets(U)->push_back(0); 604 } 605 return true; 606 } 607 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder); 608 } 609 610 bool IRTranslator::translateCast(unsigned Opcode, const User &U, 611 MachineIRBuilder &MIRBuilder) { 612 unsigned Op = getOrCreateVReg(*U.getOperand(0)); 613 unsigned Res = getOrCreateVReg(U); 614 MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op); 615 return true; 616 } 617 618 bool IRTranslator::translateGetElementPtr(const User &U, 619 MachineIRBuilder &MIRBuilder) { 620 // FIXME: support vector GEPs. 621 if (U.getType()->isVectorTy()) 622 return false; 623 624 Value &Op0 = *U.getOperand(0); 625 unsigned BaseReg = getOrCreateVReg(Op0); 626 Type *PtrIRTy = Op0.getType(); 627 LLT PtrTy = getLLTForType(*PtrIRTy, *DL); 628 Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy); 629 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); 630 631 int64_t Offset = 0; 632 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U); 633 GTI != E; ++GTI) { 634 const Value *Idx = GTI.getOperand(); 635 if (StructType *StTy = GTI.getStructTypeOrNull()) { 636 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue(); 637 Offset += DL->getStructLayout(StTy)->getElementOffset(Field); 638 continue; 639 } else { 640 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType()); 641 642 // If this is a scalar constant or a splat vector of constants, 643 // handle it quickly. 644 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) { 645 Offset += ElementSize * CI->getSExtValue(); 646 continue; 647 } 648 649 if (Offset != 0) { 650 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy); 651 unsigned OffsetReg = 652 getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset)); 653 MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg); 654 655 BaseReg = NewBaseReg; 656 Offset = 0; 657 } 658 659 unsigned IdxReg = getOrCreateVReg(*Idx); 660 if (MRI->getType(IdxReg) != OffsetTy) { 661 unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy); 662 MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg); 663 IdxReg = NewIdxReg; 664 } 665 666 // N = N + Idx * ElementSize; 667 // Avoid doing it for ElementSize of 1. 668 unsigned GepOffsetReg; 669 if (ElementSize != 1) { 670 unsigned ElementSizeReg = 671 getOrCreateVReg(*ConstantInt::get(OffsetIRTy, ElementSize)); 672 673 GepOffsetReg = MRI->createGenericVirtualRegister(OffsetTy); 674 MIRBuilder.buildMul(GepOffsetReg, ElementSizeReg, IdxReg); 675 } else 676 GepOffsetReg = IdxReg; 677 678 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy); 679 MIRBuilder.buildGEP(NewBaseReg, BaseReg, GepOffsetReg); 680 BaseReg = NewBaseReg; 681 } 682 } 683 684 if (Offset != 0) { 685 unsigned OffsetReg = getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset)); 686 MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg); 687 return true; 688 } 689 690 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg); 691 return true; 692 } 693 694 bool IRTranslator::translateMemfunc(const CallInst &CI, 695 MachineIRBuilder &MIRBuilder, 696 unsigned ID) { 697 LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL); 698 Type *DstTy = CI.getArgOperand(0)->getType(); 699 if (cast<PointerType>(DstTy)->getAddressSpace() != 0 || 700 SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0)) 701 return false; 702 703 SmallVector<CallLowering::ArgInfo, 8> Args; 704 for (int i = 0; i < 3; ++i) { 705 const auto &Arg = CI.getArgOperand(i); 706 Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType()); 707 } 708 709 const char *Callee; 710 switch (ID) { 711 case Intrinsic::memmove: 712 case Intrinsic::memcpy: { 713 Type *SrcTy = CI.getArgOperand(1)->getType(); 714 if(cast<PointerType>(SrcTy)->getAddressSpace() != 0) 715 return false; 716 Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove"; 717 break; 718 } 719 case Intrinsic::memset: 720 Callee = "memset"; 721 break; 722 default: 723 return false; 724 } 725 726 return CLI->lowerCall(MIRBuilder, CI.getCallingConv(), 727 MachineOperand::CreateES(Callee), 728 CallLowering::ArgInfo(0, CI.getType()), Args); 729 } 730 731 void IRTranslator::getStackGuard(unsigned DstReg, 732 MachineIRBuilder &MIRBuilder) { 733 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 734 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF)); 735 auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD); 736 MIB.addDef(DstReg); 737 738 auto &TLI = *MF->getSubtarget().getTargetLowering(); 739 Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent()); 740 if (!Global) 741 return; 742 743 MachinePointerInfo MPInfo(Global); 744 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 745 MachineMemOperand::MODereferenceable; 746 MachineMemOperand *MemRef = 747 MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8, 748 DL->getPointerABIAlignment(0)); 749 MIB.setMemRefs({MemRef}); 750 } 751 752 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op, 753 MachineIRBuilder &MIRBuilder) { 754 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(CI); 755 MIRBuilder.buildInstr(Op) 756 .addDef(ResRegs[0]) 757 .addDef(ResRegs[1]) 758 .addUse(getOrCreateVReg(*CI.getOperand(0))) 759 .addUse(getOrCreateVReg(*CI.getOperand(1))); 760 761 return true; 762 } 763 764 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, 765 MachineIRBuilder &MIRBuilder) { 766 switch (ID) { 767 default: 768 break; 769 case Intrinsic::lifetime_start: 770 case Intrinsic::lifetime_end: 771 // Stack coloring is not enabled in O0 (which we care about now) so we can 772 // drop these. Make sure someone notices when we start compiling at higher 773 // opts though. 774 if (MF->getTarget().getOptLevel() != CodeGenOpt::None) 775 return false; 776 return true; 777 case Intrinsic::dbg_declare: { 778 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI); 779 assert(DI.getVariable() && "Missing variable"); 780 781 const Value *Address = DI.getAddress(); 782 if (!Address || isa<UndefValue>(Address)) { 783 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); 784 return true; 785 } 786 787 assert(DI.getVariable()->isValidLocationForIntrinsic( 788 MIRBuilder.getDebugLoc()) && 789 "Expected inlined-at fields to agree"); 790 auto AI = dyn_cast<AllocaInst>(Address); 791 if (AI && AI->isStaticAlloca()) { 792 // Static allocas are tracked at the MF level, no need for DBG_VALUE 793 // instructions (in fact, they get ignored if they *do* exist). 794 MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(), 795 getOrCreateFrameIndex(*AI), DI.getDebugLoc()); 796 } else { 797 // A dbg.declare describes the address of a source variable, so lower it 798 // into an indirect DBG_VALUE. 799 MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address), 800 DI.getVariable(), DI.getExpression()); 801 } 802 return true; 803 } 804 case Intrinsic::dbg_label: { 805 const DbgLabelInst &DI = cast<DbgLabelInst>(CI); 806 assert(DI.getLabel() && "Missing label"); 807 808 assert(DI.getLabel()->isValidLocationForIntrinsic( 809 MIRBuilder.getDebugLoc()) && 810 "Expected inlined-at fields to agree"); 811 812 MIRBuilder.buildDbgLabel(DI.getLabel()); 813 return true; 814 } 815 case Intrinsic::vaend: 816 // No target I know of cares about va_end. Certainly no in-tree target 817 // does. Simplest intrinsic ever! 818 return true; 819 case Intrinsic::vastart: { 820 auto &TLI = *MF->getSubtarget().getTargetLowering(); 821 Value *Ptr = CI.getArgOperand(0); 822 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8; 823 824 MIRBuilder.buildInstr(TargetOpcode::G_VASTART) 825 .addUse(getOrCreateVReg(*Ptr)) 826 .addMemOperand(MF->getMachineMemOperand( 827 MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 0)); 828 return true; 829 } 830 case Intrinsic::dbg_value: { 831 // This form of DBG_VALUE is target-independent. 832 const DbgValueInst &DI = cast<DbgValueInst>(CI); 833 const Value *V = DI.getValue(); 834 assert(DI.getVariable()->isValidLocationForIntrinsic( 835 MIRBuilder.getDebugLoc()) && 836 "Expected inlined-at fields to agree"); 837 if (!V) { 838 // Currently the optimizer can produce this; insert an undef to 839 // help debugging. Probably the optimizer should not do this. 840 MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression()); 841 } else if (const auto *CI = dyn_cast<Constant>(V)) { 842 MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression()); 843 } else { 844 unsigned Reg = getOrCreateVReg(*V); 845 // FIXME: This does not handle register-indirect values at offset 0. The 846 // direct/indirect thing shouldn't really be handled by something as 847 // implicit as reg+noreg vs reg+imm in the first palce, but it seems 848 // pretty baked in right now. 849 MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression()); 850 } 851 return true; 852 } 853 case Intrinsic::uadd_with_overflow: 854 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder); 855 case Intrinsic::sadd_with_overflow: 856 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder); 857 case Intrinsic::usub_with_overflow: 858 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder); 859 case Intrinsic::ssub_with_overflow: 860 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder); 861 case Intrinsic::umul_with_overflow: 862 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder); 863 case Intrinsic::smul_with_overflow: 864 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder); 865 case Intrinsic::pow: 866 MIRBuilder.buildInstr(TargetOpcode::G_FPOW) 867 .addDef(getOrCreateVReg(CI)) 868 .addUse(getOrCreateVReg(*CI.getArgOperand(0))) 869 .addUse(getOrCreateVReg(*CI.getArgOperand(1))); 870 return true; 871 case Intrinsic::exp: 872 MIRBuilder.buildInstr(TargetOpcode::G_FEXP) 873 .addDef(getOrCreateVReg(CI)) 874 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 875 return true; 876 case Intrinsic::exp2: 877 MIRBuilder.buildInstr(TargetOpcode::G_FEXP2) 878 .addDef(getOrCreateVReg(CI)) 879 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 880 return true; 881 case Intrinsic::log: 882 MIRBuilder.buildInstr(TargetOpcode::G_FLOG) 883 .addDef(getOrCreateVReg(CI)) 884 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 885 return true; 886 case Intrinsic::log2: 887 MIRBuilder.buildInstr(TargetOpcode::G_FLOG2) 888 .addDef(getOrCreateVReg(CI)) 889 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 890 return true; 891 case Intrinsic::fabs: 892 MIRBuilder.buildInstr(TargetOpcode::G_FABS) 893 .addDef(getOrCreateVReg(CI)) 894 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 895 return true; 896 case Intrinsic::trunc: 897 MIRBuilder.buildInstr(TargetOpcode::G_INTRINSIC_TRUNC) 898 .addDef(getOrCreateVReg(CI)) 899 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 900 return true; 901 case Intrinsic::round: 902 MIRBuilder.buildInstr(TargetOpcode::G_INTRINSIC_ROUND) 903 .addDef(getOrCreateVReg(CI)) 904 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 905 return true; 906 case Intrinsic::fma: 907 MIRBuilder.buildInstr(TargetOpcode::G_FMA) 908 .addDef(getOrCreateVReg(CI)) 909 .addUse(getOrCreateVReg(*CI.getArgOperand(0))) 910 .addUse(getOrCreateVReg(*CI.getArgOperand(1))) 911 .addUse(getOrCreateVReg(*CI.getArgOperand(2))); 912 return true; 913 case Intrinsic::fmuladd: { 914 const TargetMachine &TM = MF->getTarget(); 915 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); 916 unsigned Dst = getOrCreateVReg(CI); 917 unsigned Op0 = getOrCreateVReg(*CI.getArgOperand(0)); 918 unsigned Op1 = getOrCreateVReg(*CI.getArgOperand(1)); 919 unsigned Op2 = getOrCreateVReg(*CI.getArgOperand(2)); 920 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && 921 TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) { 922 // TODO: Revisit this to see if we should move this part of the 923 // lowering to the combiner. 924 MIRBuilder.buildInstr(TargetOpcode::G_FMA, Dst, Op0, Op1, Op2); 925 } else { 926 LLT Ty = getLLTForType(*CI.getType(), *DL); 927 auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, Ty, Op0, Op1); 928 MIRBuilder.buildInstr(TargetOpcode::G_FADD, Dst, FMul, Op2); 929 } 930 return true; 931 } 932 case Intrinsic::memcpy: 933 case Intrinsic::memmove: 934 case Intrinsic::memset: 935 return translateMemfunc(CI, MIRBuilder, ID); 936 case Intrinsic::eh_typeid_for: { 937 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0)); 938 unsigned Reg = getOrCreateVReg(CI); 939 unsigned TypeID = MF->getTypeIDFor(GV); 940 MIRBuilder.buildConstant(Reg, TypeID); 941 return true; 942 } 943 case Intrinsic::objectsize: { 944 // If we don't know by now, we're never going to know. 945 const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1)); 946 947 MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0); 948 return true; 949 } 950 case Intrinsic::stackguard: 951 getStackGuard(getOrCreateVReg(CI), MIRBuilder); 952 return true; 953 case Intrinsic::stackprotector: { 954 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); 955 unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy); 956 getStackGuard(GuardVal, MIRBuilder); 957 958 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1)); 959 MIRBuilder.buildStore( 960 GuardVal, getOrCreateVReg(*Slot), 961 *MF->getMachineMemOperand( 962 MachinePointerInfo::getFixedStack(*MF, 963 getOrCreateFrameIndex(*Slot)), 964 MachineMemOperand::MOStore | MachineMemOperand::MOVolatile, 965 PtrTy.getSizeInBits() / 8, 8)); 966 return true; 967 } 968 case Intrinsic::cttz: 969 case Intrinsic::ctlz: { 970 ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1)); 971 bool isTrailing = ID == Intrinsic::cttz; 972 unsigned Opcode = isTrailing 973 ? Cst->isZero() ? TargetOpcode::G_CTTZ 974 : TargetOpcode::G_CTTZ_ZERO_UNDEF 975 : Cst->isZero() ? TargetOpcode::G_CTLZ 976 : TargetOpcode::G_CTLZ_ZERO_UNDEF; 977 MIRBuilder.buildInstr(Opcode) 978 .addDef(getOrCreateVReg(CI)) 979 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 980 return true; 981 } 982 case Intrinsic::ctpop: { 983 MIRBuilder.buildInstr(TargetOpcode::G_CTPOP) 984 .addDef(getOrCreateVReg(CI)) 985 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 986 return true; 987 } 988 case Intrinsic::invariant_start: { 989 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); 990 unsigned Undef = MRI->createGenericVirtualRegister(PtrTy); 991 MIRBuilder.buildUndef(Undef); 992 return true; 993 } 994 case Intrinsic::invariant_end: 995 return true; 996 } 997 return false; 998 } 999 1000 bool IRTranslator::translateInlineAsm(const CallInst &CI, 1001 MachineIRBuilder &MIRBuilder) { 1002 const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue()); 1003 if (!IA.getConstraintString().empty()) 1004 return false; 1005 1006 unsigned ExtraInfo = 0; 1007 if (IA.hasSideEffects()) 1008 ExtraInfo |= InlineAsm::Extra_HasSideEffects; 1009 if (IA.getDialect() == InlineAsm::AD_Intel) 1010 ExtraInfo |= InlineAsm::Extra_AsmDialect; 1011 1012 MIRBuilder.buildInstr(TargetOpcode::INLINEASM) 1013 .addExternalSymbol(IA.getAsmString().c_str()) 1014 .addImm(ExtraInfo); 1015 1016 return true; 1017 } 1018 1019 unsigned IRTranslator::packRegs(const Value &V, 1020 MachineIRBuilder &MIRBuilder) { 1021 ArrayRef<unsigned> Regs = getOrCreateVRegs(V); 1022 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V); 1023 LLT BigTy = getLLTForType(*V.getType(), *DL); 1024 1025 if (Regs.size() == 1) 1026 return Regs[0]; 1027 1028 unsigned Dst = MRI->createGenericVirtualRegister(BigTy); 1029 MIRBuilder.buildUndef(Dst); 1030 for (unsigned i = 0; i < Regs.size(); ++i) { 1031 unsigned NewDst = MRI->createGenericVirtualRegister(BigTy); 1032 MIRBuilder.buildInsert(NewDst, Dst, Regs[i], Offsets[i]); 1033 Dst = NewDst; 1034 } 1035 return Dst; 1036 } 1037 1038 void IRTranslator::unpackRegs(const Value &V, unsigned Src, 1039 MachineIRBuilder &MIRBuilder) { 1040 ArrayRef<unsigned> Regs = getOrCreateVRegs(V); 1041 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V); 1042 1043 for (unsigned i = 0; i < Regs.size(); ++i) 1044 MIRBuilder.buildExtract(Regs[i], Src, Offsets[i]); 1045 } 1046 1047 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) { 1048 const CallInst &CI = cast<CallInst>(U); 1049 auto TII = MF->getTarget().getIntrinsicInfo(); 1050 const Function *F = CI.getCalledFunction(); 1051 1052 // FIXME: support Windows dllimport function calls. 1053 if (F && F->hasDLLImportStorageClass()) 1054 return false; 1055 1056 if (CI.isInlineAsm()) 1057 return translateInlineAsm(CI, MIRBuilder); 1058 1059 Intrinsic::ID ID = Intrinsic::not_intrinsic; 1060 if (F && F->isIntrinsic()) { 1061 ID = F->getIntrinsicID(); 1062 if (TII && ID == Intrinsic::not_intrinsic) 1063 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F)); 1064 } 1065 1066 bool IsSplitType = valueIsSplit(CI); 1067 if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) { 1068 unsigned Res = IsSplitType ? MRI->createGenericVirtualRegister( 1069 getLLTForType(*CI.getType(), *DL)) 1070 : getOrCreateVReg(CI); 1071 1072 SmallVector<unsigned, 8> Args; 1073 for (auto &Arg: CI.arg_operands()) 1074 Args.push_back(packRegs(*Arg, MIRBuilder)); 1075 1076 MF->getFrameInfo().setHasCalls(true); 1077 bool Success = CLI->lowerCall(MIRBuilder, &CI, Res, Args, [&]() { 1078 return getOrCreateVReg(*CI.getCalledValue()); 1079 }); 1080 1081 if (IsSplitType) 1082 unpackRegs(CI, Res, MIRBuilder); 1083 return Success; 1084 } 1085 1086 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic"); 1087 1088 if (translateKnownIntrinsic(CI, ID, MIRBuilder)) 1089 return true; 1090 1091 unsigned Res = 0; 1092 if (!CI.getType()->isVoidTy()) { 1093 if (IsSplitType) 1094 Res = 1095 MRI->createGenericVirtualRegister(getLLTForType(*CI.getType(), *DL)); 1096 else 1097 Res = getOrCreateVReg(CI); 1098 } 1099 MachineInstrBuilder MIB = 1100 MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory()); 1101 1102 for (auto &Arg : CI.arg_operands()) { 1103 // Some intrinsics take metadata parameters. Reject them. 1104 if (isa<MetadataAsValue>(Arg)) 1105 return false; 1106 MIB.addUse(packRegs(*Arg, MIRBuilder)); 1107 } 1108 1109 if (IsSplitType) 1110 unpackRegs(CI, Res, MIRBuilder); 1111 1112 // Add a MachineMemOperand if it is a target mem intrinsic. 1113 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); 1114 TargetLowering::IntrinsicInfo Info; 1115 // TODO: Add a GlobalISel version of getTgtMemIntrinsic. 1116 if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) { 1117 uint64_t Size = Info.memVT.getStoreSize(); 1118 MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal), 1119 Info.flags, Size, Info.align)); 1120 } 1121 1122 return true; 1123 } 1124 1125 bool IRTranslator::translateInvoke(const User &U, 1126 MachineIRBuilder &MIRBuilder) { 1127 const InvokeInst &I = cast<InvokeInst>(U); 1128 MCContext &Context = MF->getContext(); 1129 1130 const BasicBlock *ReturnBB = I.getSuccessor(0); 1131 const BasicBlock *EHPadBB = I.getSuccessor(1); 1132 1133 const Value *Callee = I.getCalledValue(); 1134 const Function *Fn = dyn_cast<Function>(Callee); 1135 if (isa<InlineAsm>(Callee)) 1136 return false; 1137 1138 // FIXME: support invoking patchpoint and statepoint intrinsics. 1139 if (Fn && Fn->isIntrinsic()) 1140 return false; 1141 1142 // FIXME: support whatever these are. 1143 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) 1144 return false; 1145 1146 // FIXME: support Windows exception handling. 1147 if (!isa<LandingPadInst>(EHPadBB->front())) 1148 return false; 1149 1150 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about 1151 // the region covered by the try. 1152 MCSymbol *BeginSymbol = Context.createTempSymbol(); 1153 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol); 1154 1155 unsigned Res = 1156 MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL)); 1157 SmallVector<unsigned, 8> Args; 1158 for (auto &Arg: I.arg_operands()) 1159 Args.push_back(packRegs(*Arg, MIRBuilder)); 1160 1161 if (!CLI->lowerCall(MIRBuilder, &I, Res, Args, 1162 [&]() { return getOrCreateVReg(*I.getCalledValue()); })) 1163 return false; 1164 1165 unpackRegs(I, Res, MIRBuilder); 1166 1167 MCSymbol *EndSymbol = Context.createTempSymbol(); 1168 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol); 1169 1170 // FIXME: track probabilities. 1171 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB), 1172 &ReturnMBB = getMBB(*ReturnBB); 1173 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol); 1174 MIRBuilder.getMBB().addSuccessor(&ReturnMBB); 1175 MIRBuilder.getMBB().addSuccessor(&EHPadMBB); 1176 MIRBuilder.buildBr(ReturnMBB); 1177 1178 return true; 1179 } 1180 1181 bool IRTranslator::translateLandingPad(const User &U, 1182 MachineIRBuilder &MIRBuilder) { 1183 const LandingPadInst &LP = cast<LandingPadInst>(U); 1184 1185 MachineBasicBlock &MBB = MIRBuilder.getMBB(); 1186 1187 MBB.setIsEHPad(); 1188 1189 // If there aren't registers to copy the values into (e.g., during SjLj 1190 // exceptions), then don't bother. 1191 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1192 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn(); 1193 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 && 1194 TLI.getExceptionSelectorRegister(PersonalityFn) == 0) 1195 return true; 1196 1197 // If landingpad's return type is token type, we don't create DAG nodes 1198 // for its exception pointer and selector value. The extraction of exception 1199 // pointer or selector value from token type landingpads is not currently 1200 // supported. 1201 if (LP.getType()->isTokenTy()) 1202 return true; 1203 1204 // Add a label to mark the beginning of the landing pad. Deletion of the 1205 // landing pad can thus be detected via the MachineModuleInfo. 1206 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL) 1207 .addSym(MF->addLandingPad(&MBB)); 1208 1209 LLT Ty = getLLTForType(*LP.getType(), *DL); 1210 unsigned Undef = MRI->createGenericVirtualRegister(Ty); 1211 MIRBuilder.buildUndef(Undef); 1212 1213 SmallVector<LLT, 2> Tys; 1214 for (Type *Ty : cast<StructType>(LP.getType())->elements()) 1215 Tys.push_back(getLLTForType(*Ty, *DL)); 1216 assert(Tys.size() == 2 && "Only two-valued landingpads are supported"); 1217 1218 // Mark exception register as live in. 1219 unsigned ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn); 1220 if (!ExceptionReg) 1221 return false; 1222 1223 MBB.addLiveIn(ExceptionReg); 1224 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(LP); 1225 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg); 1226 1227 unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn); 1228 if (!SelectorReg) 1229 return false; 1230 1231 MBB.addLiveIn(SelectorReg); 1232 unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]); 1233 MIRBuilder.buildCopy(PtrVReg, SelectorReg); 1234 MIRBuilder.buildCast(ResRegs[1], PtrVReg); 1235 1236 return true; 1237 } 1238 1239 bool IRTranslator::translateAlloca(const User &U, 1240 MachineIRBuilder &MIRBuilder) { 1241 auto &AI = cast<AllocaInst>(U); 1242 1243 if (AI.isSwiftError()) 1244 return false; 1245 1246 if (AI.isStaticAlloca()) { 1247 unsigned Res = getOrCreateVReg(AI); 1248 int FI = getOrCreateFrameIndex(AI); 1249 MIRBuilder.buildFrameIndex(Res, FI); 1250 return true; 1251 } 1252 1253 // FIXME: support stack probing for Windows. 1254 if (MF->getTarget().getTargetTriple().isOSWindows()) 1255 return false; 1256 1257 // Now we're in the harder dynamic case. 1258 Type *Ty = AI.getAllocatedType(); 1259 unsigned Align = 1260 std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment()); 1261 1262 unsigned NumElts = getOrCreateVReg(*AI.getArraySize()); 1263 1264 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType()); 1265 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL); 1266 if (MRI->getType(NumElts) != IntPtrTy) { 1267 unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy); 1268 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts); 1269 NumElts = ExtElts; 1270 } 1271 1272 unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy); 1273 unsigned TySize = 1274 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty))); 1275 MIRBuilder.buildMul(AllocSize, NumElts, TySize); 1276 1277 LLT PtrTy = getLLTForType(*AI.getType(), *DL); 1278 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1279 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore(); 1280 1281 unsigned SPTmp = MRI->createGenericVirtualRegister(PtrTy); 1282 MIRBuilder.buildCopy(SPTmp, SPReg); 1283 1284 unsigned AllocTmp = MRI->createGenericVirtualRegister(PtrTy); 1285 MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize); 1286 1287 // Handle alignment. We have to realign if the allocation granule was smaller 1288 // than stack alignment, or the specific alloca requires more than stack 1289 // alignment. 1290 unsigned StackAlign = 1291 MF->getSubtarget().getFrameLowering()->getStackAlignment(); 1292 Align = std::max(Align, StackAlign); 1293 if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) { 1294 // Round the size of the allocation up to the stack alignment size 1295 // by add SA-1 to the size. This doesn't overflow because we're computing 1296 // an address inside an alloca. 1297 unsigned AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy); 1298 MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align)); 1299 AllocTmp = AlignedAlloc; 1300 } 1301 1302 MIRBuilder.buildCopy(SPReg, AllocTmp); 1303 MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp); 1304 1305 MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI); 1306 assert(MF->getFrameInfo().hasVarSizedObjects()); 1307 return true; 1308 } 1309 1310 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) { 1311 // FIXME: We may need more info about the type. Because of how LLT works, 1312 // we're completely discarding the i64/double distinction here (amongst 1313 // others). Fortunately the ABIs I know of where that matters don't use va_arg 1314 // anyway but that's not guaranteed. 1315 MIRBuilder.buildInstr(TargetOpcode::G_VAARG) 1316 .addDef(getOrCreateVReg(U)) 1317 .addUse(getOrCreateVReg(*U.getOperand(0))) 1318 .addImm(DL->getABITypeAlignment(U.getType())); 1319 return true; 1320 } 1321 1322 bool IRTranslator::translateInsertElement(const User &U, 1323 MachineIRBuilder &MIRBuilder) { 1324 // If it is a <1 x Ty> vector, use the scalar as it is 1325 // not a legal vector type in LLT. 1326 if (U.getType()->getVectorNumElements() == 1) { 1327 unsigned Elt = getOrCreateVReg(*U.getOperand(1)); 1328 auto &Regs = *VMap.getVRegs(U); 1329 if (Regs.empty()) { 1330 Regs.push_back(Elt); 1331 VMap.getOffsets(U)->push_back(0); 1332 } else { 1333 MIRBuilder.buildCopy(Regs[0], Elt); 1334 } 1335 return true; 1336 } 1337 1338 unsigned Res = getOrCreateVReg(U); 1339 unsigned Val = getOrCreateVReg(*U.getOperand(0)); 1340 unsigned Elt = getOrCreateVReg(*U.getOperand(1)); 1341 unsigned Idx = getOrCreateVReg(*U.getOperand(2)); 1342 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx); 1343 return true; 1344 } 1345 1346 bool IRTranslator::translateExtractElement(const User &U, 1347 MachineIRBuilder &MIRBuilder) { 1348 // If it is a <1 x Ty> vector, use the scalar as it is 1349 // not a legal vector type in LLT. 1350 if (U.getOperand(0)->getType()->getVectorNumElements() == 1) { 1351 unsigned Elt = getOrCreateVReg(*U.getOperand(0)); 1352 auto &Regs = *VMap.getVRegs(U); 1353 if (Regs.empty()) { 1354 Regs.push_back(Elt); 1355 VMap.getOffsets(U)->push_back(0); 1356 } else { 1357 MIRBuilder.buildCopy(Regs[0], Elt); 1358 } 1359 return true; 1360 } 1361 unsigned Res = getOrCreateVReg(U); 1362 unsigned Val = getOrCreateVReg(*U.getOperand(0)); 1363 const auto &TLI = *MF->getSubtarget().getTargetLowering(); 1364 unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits(); 1365 unsigned Idx = 0; 1366 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) { 1367 if (CI->getBitWidth() != PreferredVecIdxWidth) { 1368 APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth); 1369 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx); 1370 Idx = getOrCreateVReg(*NewIdxCI); 1371 } 1372 } 1373 if (!Idx) 1374 Idx = getOrCreateVReg(*U.getOperand(1)); 1375 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) { 1376 const LLT &VecIdxTy = LLT::scalar(PreferredVecIdxWidth); 1377 Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx)->getOperand(0).getReg(); 1378 } 1379 MIRBuilder.buildExtractVectorElement(Res, Val, Idx); 1380 return true; 1381 } 1382 1383 bool IRTranslator::translateShuffleVector(const User &U, 1384 MachineIRBuilder &MIRBuilder) { 1385 MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR) 1386 .addDef(getOrCreateVReg(U)) 1387 .addUse(getOrCreateVReg(*U.getOperand(0))) 1388 .addUse(getOrCreateVReg(*U.getOperand(1))) 1389 .addUse(getOrCreateVReg(*U.getOperand(2))); 1390 return true; 1391 } 1392 1393 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) { 1394 const PHINode &PI = cast<PHINode>(U); 1395 1396 SmallVector<MachineInstr *, 4> Insts; 1397 for (auto Reg : getOrCreateVRegs(PI)) { 1398 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, Reg); 1399 Insts.push_back(MIB.getInstr()); 1400 } 1401 1402 PendingPHIs.emplace_back(&PI, std::move(Insts)); 1403 return true; 1404 } 1405 1406 bool IRTranslator::translateAtomicCmpXchg(const User &U, 1407 MachineIRBuilder &MIRBuilder) { 1408 const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U); 1409 1410 if (I.isWeak()) 1411 return false; 1412 1413 auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile 1414 : MachineMemOperand::MONone; 1415 Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 1416 1417 Type *ResType = I.getType(); 1418 Type *ValType = ResType->Type::getStructElementType(0); 1419 1420 auto Res = getOrCreateVRegs(I); 1421 unsigned OldValRes = Res[0]; 1422 unsigned SuccessRes = Res[1]; 1423 unsigned Addr = getOrCreateVReg(*I.getPointerOperand()); 1424 unsigned Cmp = getOrCreateVReg(*I.getCompareOperand()); 1425 unsigned NewVal = getOrCreateVReg(*I.getNewValOperand()); 1426 1427 MIRBuilder.buildAtomicCmpXchgWithSuccess( 1428 OldValRes, SuccessRes, Addr, Cmp, NewVal, 1429 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), 1430 Flags, DL->getTypeStoreSize(ValType), 1431 getMemOpAlignment(I), AAMDNodes(), nullptr, 1432 I.getSyncScopeID(), I.getSuccessOrdering(), 1433 I.getFailureOrdering())); 1434 return true; 1435 } 1436 1437 bool IRTranslator::translateAtomicRMW(const User &U, 1438 MachineIRBuilder &MIRBuilder) { 1439 const AtomicRMWInst &I = cast<AtomicRMWInst>(U); 1440 1441 auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile 1442 : MachineMemOperand::MONone; 1443 Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 1444 1445 Type *ResType = I.getType(); 1446 1447 unsigned Res = getOrCreateVReg(I); 1448 unsigned Addr = getOrCreateVReg(*I.getPointerOperand()); 1449 unsigned Val = getOrCreateVReg(*I.getValOperand()); 1450 1451 unsigned Opcode = 0; 1452 switch (I.getOperation()) { 1453 default: 1454 llvm_unreachable("Unknown atomicrmw op"); 1455 return false; 1456 case AtomicRMWInst::Xchg: 1457 Opcode = TargetOpcode::G_ATOMICRMW_XCHG; 1458 break; 1459 case AtomicRMWInst::Add: 1460 Opcode = TargetOpcode::G_ATOMICRMW_ADD; 1461 break; 1462 case AtomicRMWInst::Sub: 1463 Opcode = TargetOpcode::G_ATOMICRMW_SUB; 1464 break; 1465 case AtomicRMWInst::And: 1466 Opcode = TargetOpcode::G_ATOMICRMW_AND; 1467 break; 1468 case AtomicRMWInst::Nand: 1469 Opcode = TargetOpcode::G_ATOMICRMW_NAND; 1470 break; 1471 case AtomicRMWInst::Or: 1472 Opcode = TargetOpcode::G_ATOMICRMW_OR; 1473 break; 1474 case AtomicRMWInst::Xor: 1475 Opcode = TargetOpcode::G_ATOMICRMW_XOR; 1476 break; 1477 case AtomicRMWInst::Max: 1478 Opcode = TargetOpcode::G_ATOMICRMW_MAX; 1479 break; 1480 case AtomicRMWInst::Min: 1481 Opcode = TargetOpcode::G_ATOMICRMW_MIN; 1482 break; 1483 case AtomicRMWInst::UMax: 1484 Opcode = TargetOpcode::G_ATOMICRMW_UMAX; 1485 break; 1486 case AtomicRMWInst::UMin: 1487 Opcode = TargetOpcode::G_ATOMICRMW_UMIN; 1488 break; 1489 } 1490 1491 MIRBuilder.buildAtomicRMW( 1492 Opcode, Res, Addr, Val, 1493 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), 1494 Flags, DL->getTypeStoreSize(ResType), 1495 getMemOpAlignment(I), AAMDNodes(), nullptr, 1496 I.getSyncScopeID(), I.getOrdering())); 1497 return true; 1498 } 1499 1500 void IRTranslator::finishPendingPhis() { 1501 #ifndef NDEBUG 1502 DILocationVerifier Verifier(*MF); 1503 #endif // ifndef NDEBUG 1504 for (auto &Phi : PendingPHIs) { 1505 const PHINode *PI = Phi.first; 1506 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second; 1507 EntryBuilder.setDebugLoc(PI->getDebugLoc()); 1508 #ifndef NDEBUG 1509 Verifier.setCurrentInst(PI); 1510 #endif // ifndef NDEBUG 1511 1512 // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator 1513 // won't create extra control flow here, otherwise we need to find the 1514 // dominating predecessor here (or perhaps force the weirder IRTranslators 1515 // to provide a simple boundary). 1516 SmallSet<const BasicBlock *, 4> HandledPreds; 1517 1518 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) { 1519 auto IRPred = PI->getIncomingBlock(i); 1520 if (HandledPreds.count(IRPred)) 1521 continue; 1522 1523 HandledPreds.insert(IRPred); 1524 ArrayRef<unsigned> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i)); 1525 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) { 1526 assert(Pred->isSuccessor(ComponentPHIs[0]->getParent()) && 1527 "incorrect CFG at MachineBasicBlock level"); 1528 for (unsigned j = 0; j < ValRegs.size(); ++j) { 1529 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]); 1530 MIB.addUse(ValRegs[j]); 1531 MIB.addMBB(Pred); 1532 } 1533 } 1534 } 1535 } 1536 } 1537 1538 bool IRTranslator::valueIsSplit(const Value &V, 1539 SmallVectorImpl<uint64_t> *Offsets) { 1540 SmallVector<LLT, 4> SplitTys; 1541 if (Offsets && !Offsets->empty()) 1542 Offsets->clear(); 1543 computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets); 1544 return SplitTys.size() > 1; 1545 } 1546 1547 bool IRTranslator::translate(const Instruction &Inst) { 1548 CurBuilder.setDebugLoc(Inst.getDebugLoc()); 1549 EntryBuilder.setDebugLoc(Inst.getDebugLoc()); 1550 switch(Inst.getOpcode()) { 1551 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 1552 case Instruction::OPCODE: return translate##OPCODE(Inst, CurBuilder); 1553 #include "llvm/IR/Instruction.def" 1554 default: 1555 return false; 1556 } 1557 } 1558 1559 bool IRTranslator::translate(const Constant &C, unsigned Reg) { 1560 if (auto CI = dyn_cast<ConstantInt>(&C)) 1561 EntryBuilder.buildConstant(Reg, *CI); 1562 else if (auto CF = dyn_cast<ConstantFP>(&C)) 1563 EntryBuilder.buildFConstant(Reg, *CF); 1564 else if (isa<UndefValue>(C)) 1565 EntryBuilder.buildUndef(Reg); 1566 else if (isa<ConstantPointerNull>(C)) { 1567 // As we are trying to build a constant val of 0 into a pointer, 1568 // insert a cast to make them correct with respect to types. 1569 unsigned NullSize = DL->getTypeSizeInBits(C.getType()); 1570 auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize); 1571 auto *ZeroVal = ConstantInt::get(ZeroTy, 0); 1572 unsigned ZeroReg = getOrCreateVReg(*ZeroVal); 1573 EntryBuilder.buildCast(Reg, ZeroReg); 1574 } else if (auto GV = dyn_cast<GlobalValue>(&C)) 1575 EntryBuilder.buildGlobalValue(Reg, GV); 1576 else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) { 1577 if (!CAZ->getType()->isVectorTy()) 1578 return false; 1579 // Return the scalar if it is a <1 x Ty> vector. 1580 if (CAZ->getNumElements() == 1) 1581 return translate(*CAZ->getElementValue(0u), Reg); 1582 std::vector<unsigned> Ops; 1583 for (unsigned i = 0; i < CAZ->getNumElements(); ++i) { 1584 Constant &Elt = *CAZ->getElementValue(i); 1585 Ops.push_back(getOrCreateVReg(Elt)); 1586 } 1587 EntryBuilder.buildMerge(Reg, Ops); 1588 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) { 1589 // Return the scalar if it is a <1 x Ty> vector. 1590 if (CV->getNumElements() == 1) 1591 return translate(*CV->getElementAsConstant(0), Reg); 1592 std::vector<unsigned> Ops; 1593 for (unsigned i = 0; i < CV->getNumElements(); ++i) { 1594 Constant &Elt = *CV->getElementAsConstant(i); 1595 Ops.push_back(getOrCreateVReg(Elt)); 1596 } 1597 EntryBuilder.buildMerge(Reg, Ops); 1598 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) { 1599 switch(CE->getOpcode()) { 1600 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 1601 case Instruction::OPCODE: return translate##OPCODE(*CE, EntryBuilder); 1602 #include "llvm/IR/Instruction.def" 1603 default: 1604 return false; 1605 } 1606 } else if (auto CV = dyn_cast<ConstantVector>(&C)) { 1607 if (CV->getNumOperands() == 1) 1608 return translate(*CV->getOperand(0), Reg); 1609 SmallVector<unsigned, 4> Ops; 1610 for (unsigned i = 0; i < CV->getNumOperands(); ++i) { 1611 Ops.push_back(getOrCreateVReg(*CV->getOperand(i))); 1612 } 1613 EntryBuilder.buildMerge(Reg, Ops); 1614 } else if (auto *BA = dyn_cast<BlockAddress>(&C)) { 1615 EntryBuilder.buildBlockAddress(Reg, BA); 1616 } else 1617 return false; 1618 1619 return true; 1620 } 1621 1622 void IRTranslator::finalizeFunction() { 1623 // Release the memory used by the different maps we 1624 // needed during the translation. 1625 PendingPHIs.clear(); 1626 VMap.reset(); 1627 FrameIndices.clear(); 1628 MachinePreds.clear(); 1629 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it 1630 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid 1631 // destroying it twice (in ~IRTranslator() and ~LLVMContext()) 1632 EntryBuilder = MachineIRBuilder(); 1633 CurBuilder = MachineIRBuilder(); 1634 } 1635 1636 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { 1637 MF = &CurMF; 1638 const Function &F = MF->getFunction(); 1639 if (F.empty()) 1640 return false; 1641 CLI = MF->getSubtarget().getCallLowering(); 1642 CurBuilder.setMF(*MF); 1643 EntryBuilder.setMF(*MF); 1644 MRI = &MF->getRegInfo(); 1645 DL = &F.getParent()->getDataLayout(); 1646 TPC = &getAnalysis<TargetPassConfig>(); 1647 ORE = llvm::make_unique<OptimizationRemarkEmitter>(&F); 1648 1649 assert(PendingPHIs.empty() && "stale PHIs"); 1650 1651 if (!DL->isLittleEndian()) { 1652 // Currently we don't properly handle big endian code. 1653 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 1654 F.getSubprogram(), &F.getEntryBlock()); 1655 R << "unable to translate in big endian mode"; 1656 reportTranslationError(*MF, *TPC, *ORE, R); 1657 } 1658 1659 // Release the per-function state when we return, whether we succeeded or not. 1660 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); }); 1661 1662 // Setup a separate basic-block for the arguments and constants 1663 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock(); 1664 MF->push_back(EntryBB); 1665 EntryBuilder.setMBB(*EntryBB); 1666 1667 // Create all blocks, in IR order, to preserve the layout. 1668 for (const BasicBlock &BB: F) { 1669 auto *&MBB = BBToMBB[&BB]; 1670 1671 MBB = MF->CreateMachineBasicBlock(&BB); 1672 MF->push_back(MBB); 1673 1674 if (BB.hasAddressTaken()) 1675 MBB->setHasAddressTaken(); 1676 } 1677 1678 // Make our arguments/constants entry block fallthrough to the IR entry block. 1679 EntryBB->addSuccessor(&getMBB(F.front())); 1680 1681 // Lower the actual args into this basic block. 1682 SmallVector<unsigned, 8> VRegArgs; 1683 for (const Argument &Arg: F.args()) { 1684 if (DL->getTypeStoreSize(Arg.getType()) == 0) 1685 continue; // Don't handle zero sized types. 1686 VRegArgs.push_back( 1687 MRI->createGenericVirtualRegister(getLLTForType(*Arg.getType(), *DL))); 1688 } 1689 1690 // We don't currently support translating swifterror or swiftself functions. 1691 for (auto &Arg : F.args()) { 1692 if (Arg.hasSwiftErrorAttr() || Arg.hasSwiftSelfAttr()) { 1693 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 1694 F.getSubprogram(), &F.getEntryBlock()); 1695 R << "unable to lower arguments due to swifterror/swiftself: " 1696 << ore::NV("Prototype", F.getType()); 1697 reportTranslationError(*MF, *TPC, *ORE, R); 1698 return false; 1699 } 1700 } 1701 1702 if (!CLI->lowerFormalArguments(EntryBuilder, F, VRegArgs)) { 1703 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 1704 F.getSubprogram(), &F.getEntryBlock()); 1705 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType()); 1706 reportTranslationError(*MF, *TPC, *ORE, R); 1707 return false; 1708 } 1709 1710 auto ArgIt = F.arg_begin(); 1711 for (auto &VArg : VRegArgs) { 1712 // If the argument is an unsplit scalar then don't use unpackRegs to avoid 1713 // creating redundant copies. 1714 if (!valueIsSplit(*ArgIt, VMap.getOffsets(*ArgIt))) { 1715 auto &VRegs = *VMap.getVRegs(cast<Value>(*ArgIt)); 1716 assert(VRegs.empty() && "VRegs already populated?"); 1717 VRegs.push_back(VArg); 1718 } else { 1719 unpackRegs(*ArgIt, VArg, EntryBuilder); 1720 } 1721 ArgIt++; 1722 } 1723 1724 // Need to visit defs before uses when translating instructions. 1725 { 1726 ReversePostOrderTraversal<const Function *> RPOT(&F); 1727 #ifndef NDEBUG 1728 DILocationVerifier Verifier(*MF); 1729 #endif // ifndef NDEBUG 1730 for (const BasicBlock *BB : RPOT) { 1731 MachineBasicBlock &MBB = getMBB(*BB); 1732 // Set the insertion point of all the following translations to 1733 // the end of this basic block. 1734 CurBuilder.setMBB(MBB); 1735 1736 for (const Instruction &Inst : *BB) { 1737 #ifndef NDEBUG 1738 Verifier.setCurrentInst(&Inst); 1739 #endif // ifndef NDEBUG 1740 if (translate(Inst)) 1741 continue; 1742 1743 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 1744 Inst.getDebugLoc(), BB); 1745 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst); 1746 1747 if (ORE->allowExtraAnalysis("gisel-irtranslator")) { 1748 std::string InstStrStorage; 1749 raw_string_ostream InstStr(InstStrStorage); 1750 InstStr << Inst; 1751 1752 R << ": '" << InstStr.str() << "'"; 1753 } 1754 1755 reportTranslationError(*MF, *TPC, *ORE, R); 1756 return false; 1757 } 1758 } 1759 } 1760 1761 finishPendingPhis(); 1762 1763 // Merge the argument lowering and constants block with its single 1764 // successor, the LLVM-IR entry block. We want the basic block to 1765 // be maximal. 1766 assert(EntryBB->succ_size() == 1 && 1767 "Custom BB used for lowering should have only one successor"); 1768 // Get the successor of the current entry block. 1769 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin(); 1770 assert(NewEntryBB.pred_size() == 1 && 1771 "LLVM-IR entry block has a predecessor!?"); 1772 // Move all the instruction from the current entry block to the 1773 // new entry block. 1774 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(), 1775 EntryBB->end()); 1776 1777 // Update the live-in information for the new entry block. 1778 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins()) 1779 NewEntryBB.addLiveIn(LiveIn); 1780 NewEntryBB.sortUniqueLiveIns(); 1781 1782 // Get rid of the now empty basic block. 1783 EntryBB->removeSuccessor(&NewEntryBB); 1784 MF->remove(EntryBB); 1785 MF->DeleteMachineBasicBlock(EntryBB); 1786 1787 assert(&MF->front() == &NewEntryBB && 1788 "New entry wasn't next in the list of basic block!"); 1789 1790 // Initialize stack protector information. 1791 StackProtector &SP = getAnalysis<StackProtector>(); 1792 SP.copyToMachineFrameInfo(MF->getFrameInfo()); 1793 1794 return false; 1795 } 1796