1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file implements the IRTranslator class. 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 14 #include "llvm/ADT/PostOrderIterator.h" 15 #include "llvm/ADT/STLExtras.h" 16 #include "llvm/ADT/ScopeExit.h" 17 #include "llvm/ADT/SmallSet.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 20 #include "llvm/CodeGen/Analysis.h" 21 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 22 #include "llvm/CodeGen/LowLevelType.h" 23 #include "llvm/CodeGen/MachineBasicBlock.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineFunction.h" 26 #include "llvm/CodeGen/MachineInstrBuilder.h" 27 #include "llvm/CodeGen/MachineMemOperand.h" 28 #include "llvm/CodeGen/MachineOperand.h" 29 #include "llvm/CodeGen/MachineRegisterInfo.h" 30 #include "llvm/CodeGen/StackProtector.h" 31 #include "llvm/CodeGen/TargetFrameLowering.h" 32 #include "llvm/CodeGen/TargetLowering.h" 33 #include "llvm/CodeGen/TargetPassConfig.h" 34 #include "llvm/CodeGen/TargetRegisterInfo.h" 35 #include "llvm/CodeGen/TargetSubtargetInfo.h" 36 #include "llvm/IR/BasicBlock.h" 37 #include "llvm/IR/CFG.h" 38 #include "llvm/IR/Constant.h" 39 #include "llvm/IR/Constants.h" 40 #include "llvm/IR/DataLayout.h" 41 #include "llvm/IR/DebugInfo.h" 42 #include "llvm/IR/DerivedTypes.h" 43 #include "llvm/IR/Function.h" 44 #include "llvm/IR/GetElementPtrTypeIterator.h" 45 #include "llvm/IR/InlineAsm.h" 46 #include "llvm/IR/InstrTypes.h" 47 #include "llvm/IR/Instructions.h" 48 #include "llvm/IR/IntrinsicInst.h" 49 #include "llvm/IR/Intrinsics.h" 50 #include "llvm/IR/LLVMContext.h" 51 #include "llvm/IR/Metadata.h" 52 #include "llvm/IR/Type.h" 53 #include "llvm/IR/User.h" 54 #include "llvm/IR/Value.h" 55 #include "llvm/MC/MCContext.h" 56 #include "llvm/Pass.h" 57 #include "llvm/Support/Casting.h" 58 #include "llvm/Support/CodeGen.h" 59 #include "llvm/Support/Debug.h" 60 #include "llvm/Support/ErrorHandling.h" 61 #include "llvm/Support/LowLevelTypeImpl.h" 62 #include "llvm/Support/MathExtras.h" 63 #include "llvm/Support/raw_ostream.h" 64 #include "llvm/Target/TargetIntrinsicInfo.h" 65 #include "llvm/Target/TargetMachine.h" 66 #include <algorithm> 67 #include <cassert> 68 #include <cstdint> 69 #include <iterator> 70 #include <string> 71 #include <utility> 72 #include <vector> 73 74 #define DEBUG_TYPE "irtranslator" 75 76 using namespace llvm; 77 78 char IRTranslator::ID = 0; 79 80 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 81 false, false) 82 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) 83 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 84 false, false) 85 86 static void reportTranslationError(MachineFunction &MF, 87 const TargetPassConfig &TPC, 88 OptimizationRemarkEmitter &ORE, 89 OptimizationRemarkMissed &R) { 90 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel); 91 92 // Print the function name explicitly if we don't have a debug location (which 93 // makes the diagnostic less useful) or if we're going to emit a raw error. 94 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled()) 95 R << (" (in function: " + MF.getName() + ")").str(); 96 97 if (TPC.isGlobalISelAbortEnabled()) 98 report_fatal_error(R.getMsg()); 99 else 100 ORE.emit(R); 101 } 102 103 IRTranslator::IRTranslator() : MachineFunctionPass(ID) { 104 initializeIRTranslatorPass(*PassRegistry::getPassRegistry()); 105 } 106 107 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const { 108 AU.addRequired<StackProtector>(); 109 AU.addRequired<TargetPassConfig>(); 110 getSelectionDAGFallbackAnalysisUsage(AU); 111 MachineFunctionPass::getAnalysisUsage(AU); 112 } 113 114 static void computeValueLLTs(const DataLayout &DL, Type &Ty, 115 SmallVectorImpl<LLT> &ValueTys, 116 SmallVectorImpl<uint64_t> *Offsets = nullptr, 117 uint64_t StartingOffset = 0) { 118 // Given a struct type, recursively traverse the elements. 119 if (StructType *STy = dyn_cast<StructType>(&Ty)) { 120 const StructLayout *SL = DL.getStructLayout(STy); 121 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) 122 computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets, 123 StartingOffset + SL->getElementOffset(I)); 124 return; 125 } 126 // Given an array type, recursively traverse the elements. 127 if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) { 128 Type *EltTy = ATy->getElementType(); 129 uint64_t EltSize = DL.getTypeAllocSize(EltTy); 130 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) 131 computeValueLLTs(DL, *EltTy, ValueTys, Offsets, 132 StartingOffset + i * EltSize); 133 return; 134 } 135 // Interpret void as zero return values. 136 if (Ty.isVoidTy()) 137 return; 138 // Base case: we can get an LLT for this LLVM IR type. 139 ValueTys.push_back(getLLTForType(Ty, DL)); 140 if (Offsets != nullptr) 141 Offsets->push_back(StartingOffset * 8); 142 } 143 144 IRTranslator::ValueToVRegInfo::VRegListT & 145 IRTranslator::allocateVRegs(const Value &Val) { 146 assert(!VMap.contains(Val) && "Value already allocated in VMap"); 147 auto *Regs = VMap.getVRegs(Val); 148 auto *Offsets = VMap.getOffsets(Val); 149 SmallVector<LLT, 4> SplitTys; 150 computeValueLLTs(*DL, *Val.getType(), SplitTys, 151 Offsets->empty() ? Offsets : nullptr); 152 for (unsigned i = 0; i < SplitTys.size(); ++i) 153 Regs->push_back(0); 154 return *Regs; 155 } 156 157 ArrayRef<unsigned> IRTranslator::getOrCreateVRegs(const Value &Val) { 158 auto VRegsIt = VMap.findVRegs(Val); 159 if (VRegsIt != VMap.vregs_end()) 160 return *VRegsIt->second; 161 162 if (Val.getType()->isVoidTy()) 163 return *VMap.getVRegs(Val); 164 165 // Create entry for this type. 166 auto *VRegs = VMap.getVRegs(Val); 167 auto *Offsets = VMap.getOffsets(Val); 168 169 assert(Val.getType()->isSized() && 170 "Don't know how to create an empty vreg"); 171 172 SmallVector<LLT, 4> SplitTys; 173 computeValueLLTs(*DL, *Val.getType(), SplitTys, 174 Offsets->empty() ? Offsets : nullptr); 175 176 if (!isa<Constant>(Val)) { 177 for (auto Ty : SplitTys) 178 VRegs->push_back(MRI->createGenericVirtualRegister(Ty)); 179 return *VRegs; 180 } 181 182 if (Val.getType()->isAggregateType()) { 183 // UndefValue, ConstantAggregateZero 184 auto &C = cast<Constant>(Val); 185 unsigned Idx = 0; 186 while (auto Elt = C.getAggregateElement(Idx++)) { 187 auto EltRegs = getOrCreateVRegs(*Elt); 188 std::copy(EltRegs.begin(), EltRegs.end(), std::back_inserter(*VRegs)); 189 } 190 } else { 191 assert(SplitTys.size() == 1 && "unexpectedly split LLT"); 192 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0])); 193 bool Success = translate(cast<Constant>(Val), VRegs->front()); 194 if (!Success) { 195 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 196 MF->getFunction().getSubprogram(), 197 &MF->getFunction().getEntryBlock()); 198 R << "unable to translate constant: " << ore::NV("Type", Val.getType()); 199 reportTranslationError(*MF, *TPC, *ORE, R); 200 return *VRegs; 201 } 202 } 203 204 return *VRegs; 205 } 206 207 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) { 208 if (FrameIndices.find(&AI) != FrameIndices.end()) 209 return FrameIndices[&AI]; 210 211 unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType()); 212 unsigned Size = 213 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue(); 214 215 // Always allocate at least one byte. 216 Size = std::max(Size, 1u); 217 218 unsigned Alignment = AI.getAlignment(); 219 if (!Alignment) 220 Alignment = DL->getABITypeAlignment(AI.getAllocatedType()); 221 222 int &FI = FrameIndices[&AI]; 223 FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI); 224 return FI; 225 } 226 227 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) { 228 unsigned Alignment = 0; 229 Type *ValTy = nullptr; 230 if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) { 231 Alignment = SI->getAlignment(); 232 ValTy = SI->getValueOperand()->getType(); 233 } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) { 234 Alignment = LI->getAlignment(); 235 ValTy = LI->getType(); 236 } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) { 237 // TODO(PR27168): This instruction has no alignment attribute, but unlike 238 // the default alignment for load/store, the default here is to assume 239 // it has NATURAL alignment, not DataLayout-specified alignment. 240 const DataLayout &DL = AI->getModule()->getDataLayout(); 241 Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType()); 242 ValTy = AI->getCompareOperand()->getType(); 243 } else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) { 244 // TODO(PR27168): This instruction has no alignment attribute, but unlike 245 // the default alignment for load/store, the default here is to assume 246 // it has NATURAL alignment, not DataLayout-specified alignment. 247 const DataLayout &DL = AI->getModule()->getDataLayout(); 248 Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType()); 249 ValTy = AI->getType(); 250 } else { 251 OptimizationRemarkMissed R("gisel-irtranslator", "", &I); 252 R << "unable to translate memop: " << ore::NV("Opcode", &I); 253 reportTranslationError(*MF, *TPC, *ORE, R); 254 return 1; 255 } 256 257 return Alignment ? Alignment : DL->getABITypeAlignment(ValTy); 258 } 259 260 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) { 261 MachineBasicBlock *&MBB = BBToMBB[&BB]; 262 assert(MBB && "BasicBlock was not encountered before"); 263 return *MBB; 264 } 265 266 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) { 267 assert(NewPred && "new predecessor must be a real MachineBasicBlock"); 268 MachinePreds[Edge].push_back(NewPred); 269 } 270 271 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U, 272 MachineIRBuilder &MIRBuilder) { 273 // FIXME: handle signed/unsigned wrapping flags. 274 275 // Get or create a virtual register for each value. 276 // Unless the value is a Constant => loadimm cst? 277 // or inline constant each time? 278 // Creation of a virtual register needs to have a size. 279 unsigned Op0 = getOrCreateVReg(*U.getOperand(0)); 280 unsigned Op1 = getOrCreateVReg(*U.getOperand(1)); 281 unsigned Res = getOrCreateVReg(U); 282 MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1); 283 return true; 284 } 285 286 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) { 287 // -0.0 - X --> G_FNEG 288 if (isa<Constant>(U.getOperand(0)) && 289 U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) { 290 MIRBuilder.buildInstr(TargetOpcode::G_FNEG) 291 .addDef(getOrCreateVReg(U)) 292 .addUse(getOrCreateVReg(*U.getOperand(1))); 293 return true; 294 } 295 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder); 296 } 297 298 bool IRTranslator::translateCompare(const User &U, 299 MachineIRBuilder &MIRBuilder) { 300 const CmpInst *CI = dyn_cast<CmpInst>(&U); 301 unsigned Op0 = getOrCreateVReg(*U.getOperand(0)); 302 unsigned Op1 = getOrCreateVReg(*U.getOperand(1)); 303 unsigned Res = getOrCreateVReg(U); 304 CmpInst::Predicate Pred = 305 CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>( 306 cast<ConstantExpr>(U).getPredicate()); 307 if (CmpInst::isIntPredicate(Pred)) 308 MIRBuilder.buildICmp(Pred, Res, Op0, Op1); 309 else if (Pred == CmpInst::FCMP_FALSE) 310 MIRBuilder.buildCopy( 311 Res, getOrCreateVReg(*Constant::getNullValue(CI->getType()))); 312 else if (Pred == CmpInst::FCMP_TRUE) 313 MIRBuilder.buildCopy( 314 Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType()))); 315 else 316 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1); 317 318 return true; 319 } 320 321 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) { 322 const ReturnInst &RI = cast<ReturnInst>(U); 323 const Value *Ret = RI.getReturnValue(); 324 if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0) 325 Ret = nullptr; 326 327 ArrayRef<unsigned> VRegs; 328 if (Ret) 329 VRegs = getOrCreateVRegs(*Ret); 330 331 // The target may mess up with the insertion point, but 332 // this is not important as a return is the last instruction 333 // of the block anyway. 334 335 return CLI->lowerReturn(MIRBuilder, Ret, VRegs); 336 } 337 338 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) { 339 const BranchInst &BrInst = cast<BranchInst>(U); 340 unsigned Succ = 0; 341 if (!BrInst.isUnconditional()) { 342 // We want a G_BRCOND to the true BB followed by an unconditional branch. 343 unsigned Tst = getOrCreateVReg(*BrInst.getCondition()); 344 const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++)); 345 MachineBasicBlock &TrueBB = getMBB(TrueTgt); 346 MIRBuilder.buildBrCond(Tst, TrueBB); 347 } 348 349 const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ)); 350 MachineBasicBlock &TgtBB = getMBB(BrTgt); 351 MachineBasicBlock &CurBB = MIRBuilder.getMBB(); 352 353 // If the unconditional target is the layout successor, fallthrough. 354 if (!CurBB.isLayoutSuccessor(&TgtBB)) 355 MIRBuilder.buildBr(TgtBB); 356 357 // Link successors. 358 for (const BasicBlock *Succ : BrInst.successors()) 359 CurBB.addSuccessor(&getMBB(*Succ)); 360 return true; 361 } 362 363 bool IRTranslator::translateSwitch(const User &U, 364 MachineIRBuilder &MIRBuilder) { 365 // For now, just translate as a chain of conditional branches. 366 // FIXME: could we share most of the logic/code in 367 // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel? 368 // At first sight, it seems most of the logic in there is independent of 369 // SelectionDAG-specifics and a lot of work went in to optimize switch 370 // lowering in there. 371 372 const SwitchInst &SwInst = cast<SwitchInst>(U); 373 const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition()); 374 const BasicBlock *OrigBB = SwInst.getParent(); 375 376 LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL); 377 for (auto &CaseIt : SwInst.cases()) { 378 const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue()); 379 const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1); 380 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue); 381 MachineBasicBlock &CurMBB = MIRBuilder.getMBB(); 382 const BasicBlock *TrueBB = CaseIt.getCaseSuccessor(); 383 MachineBasicBlock &TrueMBB = getMBB(*TrueBB); 384 385 MIRBuilder.buildBrCond(Tst, TrueMBB); 386 CurMBB.addSuccessor(&TrueMBB); 387 addMachineCFGPred({OrigBB, TrueBB}, &CurMBB); 388 389 MachineBasicBlock *FalseMBB = 390 MF->CreateMachineBasicBlock(SwInst.getParent()); 391 // Insert the comparison blocks one after the other. 392 MF->insert(std::next(CurMBB.getIterator()), FalseMBB); 393 MIRBuilder.buildBr(*FalseMBB); 394 CurMBB.addSuccessor(FalseMBB); 395 396 MIRBuilder.setMBB(*FalseMBB); 397 } 398 // handle default case 399 const BasicBlock *DefaultBB = SwInst.getDefaultDest(); 400 MachineBasicBlock &DefaultMBB = getMBB(*DefaultBB); 401 MIRBuilder.buildBr(DefaultMBB); 402 MachineBasicBlock &CurMBB = MIRBuilder.getMBB(); 403 CurMBB.addSuccessor(&DefaultMBB); 404 addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB); 405 406 return true; 407 } 408 409 bool IRTranslator::translateIndirectBr(const User &U, 410 MachineIRBuilder &MIRBuilder) { 411 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U); 412 413 const unsigned Tgt = getOrCreateVReg(*BrInst.getAddress()); 414 MIRBuilder.buildBrIndirect(Tgt); 415 416 // Link successors. 417 MachineBasicBlock &CurBB = MIRBuilder.getMBB(); 418 for (const BasicBlock *Succ : BrInst.successors()) 419 CurBB.addSuccessor(&getMBB(*Succ)); 420 421 return true; 422 } 423 424 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { 425 const LoadInst &LI = cast<LoadInst>(U); 426 427 auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile 428 : MachineMemOperand::MONone; 429 Flags |= MachineMemOperand::MOLoad; 430 431 if (DL->getTypeStoreSize(LI.getType()) == 0) 432 return true; 433 434 ArrayRef<unsigned> Regs = getOrCreateVRegs(LI); 435 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI); 436 unsigned Base = getOrCreateVReg(*LI.getPointerOperand()); 437 438 for (unsigned i = 0; i < Regs.size(); ++i) { 439 unsigned Addr = 0; 440 MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8); 441 442 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8); 443 unsigned BaseAlign = getMemOpAlignment(LI); 444 auto MMO = MF->getMachineMemOperand( 445 Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8, 446 MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr, 447 LI.getSyncScopeID(), LI.getOrdering()); 448 MIRBuilder.buildLoad(Regs[i], Addr, *MMO); 449 } 450 451 return true; 452 } 453 454 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) { 455 const StoreInst &SI = cast<StoreInst>(U); 456 auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile 457 : MachineMemOperand::MONone; 458 Flags |= MachineMemOperand::MOStore; 459 460 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0) 461 return true; 462 463 ArrayRef<unsigned> Vals = getOrCreateVRegs(*SI.getValueOperand()); 464 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand()); 465 unsigned Base = getOrCreateVReg(*SI.getPointerOperand()); 466 467 for (unsigned i = 0; i < Vals.size(); ++i) { 468 unsigned Addr = 0; 469 MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8); 470 471 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8); 472 unsigned BaseAlign = getMemOpAlignment(SI); 473 auto MMO = MF->getMachineMemOperand( 474 Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8, 475 MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr, 476 SI.getSyncScopeID(), SI.getOrdering()); 477 MIRBuilder.buildStore(Vals[i], Addr, *MMO); 478 } 479 return true; 480 } 481 482 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) { 483 const Value *Src = U.getOperand(0); 484 Type *Int32Ty = Type::getInt32Ty(U.getContext()); 485 486 // getIndexedOffsetInType is designed for GEPs, so the first index is the 487 // usual array element rather than looking into the actual aggregate. 488 SmallVector<Value *, 1> Indices; 489 Indices.push_back(ConstantInt::get(Int32Ty, 0)); 490 491 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) { 492 for (auto Idx : EVI->indices()) 493 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 494 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) { 495 for (auto Idx : IVI->indices()) 496 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 497 } else { 498 for (unsigned i = 1; i < U.getNumOperands(); ++i) 499 Indices.push_back(U.getOperand(i)); 500 } 501 502 return 8 * static_cast<uint64_t>( 503 DL.getIndexedOffsetInType(Src->getType(), Indices)); 504 } 505 506 bool IRTranslator::translateExtractValue(const User &U, 507 MachineIRBuilder &MIRBuilder) { 508 const Value *Src = U.getOperand(0); 509 uint64_t Offset = getOffsetFromIndices(U, *DL); 510 ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src); 511 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src); 512 unsigned Idx = std::lower_bound(Offsets.begin(), Offsets.end(), Offset) - 513 Offsets.begin(); 514 auto &DstRegs = allocateVRegs(U); 515 516 for (unsigned i = 0; i < DstRegs.size(); ++i) 517 DstRegs[i] = SrcRegs[Idx++]; 518 519 return true; 520 } 521 522 bool IRTranslator::translateInsertValue(const User &U, 523 MachineIRBuilder &MIRBuilder) { 524 const Value *Src = U.getOperand(0); 525 uint64_t Offset = getOffsetFromIndices(U, *DL); 526 auto &DstRegs = allocateVRegs(U); 527 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U); 528 ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src); 529 ArrayRef<unsigned> InsertedRegs = getOrCreateVRegs(*U.getOperand(1)); 530 auto InsertedIt = InsertedRegs.begin(); 531 532 for (unsigned i = 0; i < DstRegs.size(); ++i) { 533 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end()) 534 DstRegs[i] = *InsertedIt++; 535 else 536 DstRegs[i] = SrcRegs[i]; 537 } 538 539 return true; 540 } 541 542 bool IRTranslator::translateSelect(const User &U, 543 MachineIRBuilder &MIRBuilder) { 544 unsigned Tst = getOrCreateVReg(*U.getOperand(0)); 545 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(U); 546 ArrayRef<unsigned> Op0Regs = getOrCreateVRegs(*U.getOperand(1)); 547 ArrayRef<unsigned> Op1Regs = getOrCreateVRegs(*U.getOperand(2)); 548 549 for (unsigned i = 0; i < ResRegs.size(); ++i) 550 MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i]); 551 552 return true; 553 } 554 555 bool IRTranslator::translateBitCast(const User &U, 556 MachineIRBuilder &MIRBuilder) { 557 // If we're bitcasting to the source type, we can reuse the source vreg. 558 if (getLLTForType(*U.getOperand(0)->getType(), *DL) == 559 getLLTForType(*U.getType(), *DL)) { 560 unsigned SrcReg = getOrCreateVReg(*U.getOperand(0)); 561 auto &Regs = *VMap.getVRegs(U); 562 // If we already assigned a vreg for this bitcast, we can't change that. 563 // Emit a copy to satisfy the users we already emitted. 564 if (!Regs.empty()) 565 MIRBuilder.buildCopy(Regs[0], SrcReg); 566 else { 567 Regs.push_back(SrcReg); 568 VMap.getOffsets(U)->push_back(0); 569 } 570 return true; 571 } 572 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder); 573 } 574 575 bool IRTranslator::translateCast(unsigned Opcode, const User &U, 576 MachineIRBuilder &MIRBuilder) { 577 unsigned Op = getOrCreateVReg(*U.getOperand(0)); 578 unsigned Res = getOrCreateVReg(U); 579 MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op); 580 return true; 581 } 582 583 bool IRTranslator::translateGetElementPtr(const User &U, 584 MachineIRBuilder &MIRBuilder) { 585 // FIXME: support vector GEPs. 586 if (U.getType()->isVectorTy()) 587 return false; 588 589 Value &Op0 = *U.getOperand(0); 590 unsigned BaseReg = getOrCreateVReg(Op0); 591 Type *PtrIRTy = Op0.getType(); 592 LLT PtrTy = getLLTForType(*PtrIRTy, *DL); 593 Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy); 594 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); 595 596 int64_t Offset = 0; 597 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U); 598 GTI != E; ++GTI) { 599 const Value *Idx = GTI.getOperand(); 600 if (StructType *StTy = GTI.getStructTypeOrNull()) { 601 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue(); 602 Offset += DL->getStructLayout(StTy)->getElementOffset(Field); 603 continue; 604 } else { 605 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType()); 606 607 // If this is a scalar constant or a splat vector of constants, 608 // handle it quickly. 609 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) { 610 Offset += ElementSize * CI->getSExtValue(); 611 continue; 612 } 613 614 if (Offset != 0) { 615 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy); 616 unsigned OffsetReg = 617 getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset)); 618 MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg); 619 620 BaseReg = NewBaseReg; 621 Offset = 0; 622 } 623 624 unsigned IdxReg = getOrCreateVReg(*Idx); 625 if (MRI->getType(IdxReg) != OffsetTy) { 626 unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy); 627 MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg); 628 IdxReg = NewIdxReg; 629 } 630 631 // N = N + Idx * ElementSize; 632 // Avoid doing it for ElementSize of 1. 633 unsigned GepOffsetReg; 634 if (ElementSize != 1) { 635 unsigned ElementSizeReg = 636 getOrCreateVReg(*ConstantInt::get(OffsetIRTy, ElementSize)); 637 638 GepOffsetReg = MRI->createGenericVirtualRegister(OffsetTy); 639 MIRBuilder.buildMul(GepOffsetReg, ElementSizeReg, IdxReg); 640 } else 641 GepOffsetReg = IdxReg; 642 643 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy); 644 MIRBuilder.buildGEP(NewBaseReg, BaseReg, GepOffsetReg); 645 BaseReg = NewBaseReg; 646 } 647 } 648 649 if (Offset != 0) { 650 unsigned OffsetReg = getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset)); 651 MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg); 652 return true; 653 } 654 655 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg); 656 return true; 657 } 658 659 bool IRTranslator::translateMemfunc(const CallInst &CI, 660 MachineIRBuilder &MIRBuilder, 661 unsigned ID) { 662 LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL); 663 Type *DstTy = CI.getArgOperand(0)->getType(); 664 if (cast<PointerType>(DstTy)->getAddressSpace() != 0 || 665 SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0)) 666 return false; 667 668 SmallVector<CallLowering::ArgInfo, 8> Args; 669 for (int i = 0; i < 3; ++i) { 670 const auto &Arg = CI.getArgOperand(i); 671 Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType()); 672 } 673 674 const char *Callee; 675 switch (ID) { 676 case Intrinsic::memmove: 677 case Intrinsic::memcpy: { 678 Type *SrcTy = CI.getArgOperand(1)->getType(); 679 if(cast<PointerType>(SrcTy)->getAddressSpace() != 0) 680 return false; 681 Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove"; 682 break; 683 } 684 case Intrinsic::memset: 685 Callee = "memset"; 686 break; 687 default: 688 return false; 689 } 690 691 return CLI->lowerCall(MIRBuilder, CI.getCallingConv(), 692 MachineOperand::CreateES(Callee), 693 CallLowering::ArgInfo(0, CI.getType()), Args); 694 } 695 696 void IRTranslator::getStackGuard(unsigned DstReg, 697 MachineIRBuilder &MIRBuilder) { 698 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 699 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF)); 700 auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD); 701 MIB.addDef(DstReg); 702 703 auto &TLI = *MF->getSubtarget().getTargetLowering(); 704 Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent()); 705 if (!Global) 706 return; 707 708 MachinePointerInfo MPInfo(Global); 709 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 710 MachineMemOperand::MODereferenceable; 711 MachineMemOperand *MemRef = 712 MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8, 713 DL->getPointerABIAlignment(0)); 714 MIB.setMemRefs({MemRef}); 715 } 716 717 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op, 718 MachineIRBuilder &MIRBuilder) { 719 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(CI); 720 auto MIB = MIRBuilder.buildInstr(Op) 721 .addDef(ResRegs[0]) 722 .addDef(ResRegs[1]) 723 .addUse(getOrCreateVReg(*CI.getOperand(0))) 724 .addUse(getOrCreateVReg(*CI.getOperand(1))); 725 726 if (Op == TargetOpcode::G_UADDE || Op == TargetOpcode::G_USUBE) { 727 unsigned Zero = getOrCreateVReg( 728 *Constant::getNullValue(Type::getInt1Ty(CI.getContext()))); 729 MIB.addUse(Zero); 730 } 731 732 return true; 733 } 734 735 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, 736 MachineIRBuilder &MIRBuilder) { 737 switch (ID) { 738 default: 739 break; 740 case Intrinsic::lifetime_start: 741 case Intrinsic::lifetime_end: 742 // Stack coloring is not enabled in O0 (which we care about now) so we can 743 // drop these. Make sure someone notices when we start compiling at higher 744 // opts though. 745 if (MF->getTarget().getOptLevel() != CodeGenOpt::None) 746 return false; 747 return true; 748 case Intrinsic::dbg_declare: { 749 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI); 750 assert(DI.getVariable() && "Missing variable"); 751 752 const Value *Address = DI.getAddress(); 753 if (!Address || isa<UndefValue>(Address)) { 754 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); 755 return true; 756 } 757 758 assert(DI.getVariable()->isValidLocationForIntrinsic( 759 MIRBuilder.getDebugLoc()) && 760 "Expected inlined-at fields to agree"); 761 auto AI = dyn_cast<AllocaInst>(Address); 762 if (AI && AI->isStaticAlloca()) { 763 // Static allocas are tracked at the MF level, no need for DBG_VALUE 764 // instructions (in fact, they get ignored if they *do* exist). 765 MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(), 766 getOrCreateFrameIndex(*AI), DI.getDebugLoc()); 767 } else 768 MIRBuilder.buildDirectDbgValue(getOrCreateVReg(*Address), 769 DI.getVariable(), DI.getExpression()); 770 return true; 771 } 772 case Intrinsic::dbg_label: { 773 const DbgLabelInst &DI = cast<DbgLabelInst>(CI); 774 assert(DI.getLabel() && "Missing label"); 775 776 assert(DI.getLabel()->isValidLocationForIntrinsic( 777 MIRBuilder.getDebugLoc()) && 778 "Expected inlined-at fields to agree"); 779 780 MIRBuilder.buildDbgLabel(DI.getLabel()); 781 return true; 782 } 783 case Intrinsic::vaend: 784 // No target I know of cares about va_end. Certainly no in-tree target 785 // does. Simplest intrinsic ever! 786 return true; 787 case Intrinsic::vastart: { 788 auto &TLI = *MF->getSubtarget().getTargetLowering(); 789 Value *Ptr = CI.getArgOperand(0); 790 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8; 791 792 MIRBuilder.buildInstr(TargetOpcode::G_VASTART) 793 .addUse(getOrCreateVReg(*Ptr)) 794 .addMemOperand(MF->getMachineMemOperand( 795 MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 0)); 796 return true; 797 } 798 case Intrinsic::dbg_value: { 799 // This form of DBG_VALUE is target-independent. 800 const DbgValueInst &DI = cast<DbgValueInst>(CI); 801 const Value *V = DI.getValue(); 802 assert(DI.getVariable()->isValidLocationForIntrinsic( 803 MIRBuilder.getDebugLoc()) && 804 "Expected inlined-at fields to agree"); 805 if (!V) { 806 // Currently the optimizer can produce this; insert an undef to 807 // help debugging. Probably the optimizer should not do this. 808 MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression()); 809 } else if (const auto *CI = dyn_cast<Constant>(V)) { 810 MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression()); 811 } else { 812 unsigned Reg = getOrCreateVReg(*V); 813 // FIXME: This does not handle register-indirect values at offset 0. The 814 // direct/indirect thing shouldn't really be handled by something as 815 // implicit as reg+noreg vs reg+imm in the first palce, but it seems 816 // pretty baked in right now. 817 MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression()); 818 } 819 return true; 820 } 821 case Intrinsic::uadd_with_overflow: 822 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDE, MIRBuilder); 823 case Intrinsic::sadd_with_overflow: 824 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder); 825 case Intrinsic::usub_with_overflow: 826 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBE, MIRBuilder); 827 case Intrinsic::ssub_with_overflow: 828 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder); 829 case Intrinsic::umul_with_overflow: 830 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder); 831 case Intrinsic::smul_with_overflow: 832 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder); 833 case Intrinsic::pow: 834 MIRBuilder.buildInstr(TargetOpcode::G_FPOW) 835 .addDef(getOrCreateVReg(CI)) 836 .addUse(getOrCreateVReg(*CI.getArgOperand(0))) 837 .addUse(getOrCreateVReg(*CI.getArgOperand(1))); 838 return true; 839 case Intrinsic::exp: 840 MIRBuilder.buildInstr(TargetOpcode::G_FEXP) 841 .addDef(getOrCreateVReg(CI)) 842 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 843 return true; 844 case Intrinsic::exp2: 845 MIRBuilder.buildInstr(TargetOpcode::G_FEXP2) 846 .addDef(getOrCreateVReg(CI)) 847 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 848 return true; 849 case Intrinsic::log: 850 MIRBuilder.buildInstr(TargetOpcode::G_FLOG) 851 .addDef(getOrCreateVReg(CI)) 852 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 853 return true; 854 case Intrinsic::log2: 855 MIRBuilder.buildInstr(TargetOpcode::G_FLOG2) 856 .addDef(getOrCreateVReg(CI)) 857 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 858 return true; 859 case Intrinsic::fabs: 860 MIRBuilder.buildInstr(TargetOpcode::G_FABS) 861 .addDef(getOrCreateVReg(CI)) 862 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 863 return true; 864 case Intrinsic::trunc: 865 MIRBuilder.buildInstr(TargetOpcode::G_INTRINSIC_TRUNC) 866 .addDef(getOrCreateVReg(CI)) 867 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 868 return true; 869 case Intrinsic::round: 870 MIRBuilder.buildInstr(TargetOpcode::G_INTRINSIC_ROUND) 871 .addDef(getOrCreateVReg(CI)) 872 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 873 return true; 874 case Intrinsic::fma: 875 MIRBuilder.buildInstr(TargetOpcode::G_FMA) 876 .addDef(getOrCreateVReg(CI)) 877 .addUse(getOrCreateVReg(*CI.getArgOperand(0))) 878 .addUse(getOrCreateVReg(*CI.getArgOperand(1))) 879 .addUse(getOrCreateVReg(*CI.getArgOperand(2))); 880 return true; 881 case Intrinsic::fmuladd: { 882 const TargetMachine &TM = MF->getTarget(); 883 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); 884 unsigned Dst = getOrCreateVReg(CI); 885 unsigned Op0 = getOrCreateVReg(*CI.getArgOperand(0)); 886 unsigned Op1 = getOrCreateVReg(*CI.getArgOperand(1)); 887 unsigned Op2 = getOrCreateVReg(*CI.getArgOperand(2)); 888 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && 889 TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) { 890 // TODO: Revisit this to see if we should move this part of the 891 // lowering to the combiner. 892 MIRBuilder.buildInstr(TargetOpcode::G_FMA, Dst, Op0, Op1, Op2); 893 } else { 894 LLT Ty = getLLTForType(*CI.getType(), *DL); 895 auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, Ty, Op0, Op1); 896 MIRBuilder.buildInstr(TargetOpcode::G_FADD, Dst, FMul, Op2); 897 } 898 return true; 899 } 900 case Intrinsic::memcpy: 901 case Intrinsic::memmove: 902 case Intrinsic::memset: 903 return translateMemfunc(CI, MIRBuilder, ID); 904 case Intrinsic::eh_typeid_for: { 905 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0)); 906 unsigned Reg = getOrCreateVReg(CI); 907 unsigned TypeID = MF->getTypeIDFor(GV); 908 MIRBuilder.buildConstant(Reg, TypeID); 909 return true; 910 } 911 case Intrinsic::objectsize: { 912 // If we don't know by now, we're never going to know. 913 const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1)); 914 915 MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0); 916 return true; 917 } 918 case Intrinsic::stackguard: 919 getStackGuard(getOrCreateVReg(CI), MIRBuilder); 920 return true; 921 case Intrinsic::stackprotector: { 922 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); 923 unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy); 924 getStackGuard(GuardVal, MIRBuilder); 925 926 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1)); 927 MIRBuilder.buildStore( 928 GuardVal, getOrCreateVReg(*Slot), 929 *MF->getMachineMemOperand( 930 MachinePointerInfo::getFixedStack(*MF, 931 getOrCreateFrameIndex(*Slot)), 932 MachineMemOperand::MOStore | MachineMemOperand::MOVolatile, 933 PtrTy.getSizeInBits() / 8, 8)); 934 return true; 935 } 936 case Intrinsic::cttz: 937 case Intrinsic::ctlz: { 938 ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1)); 939 bool isTrailing = ID == Intrinsic::cttz; 940 unsigned Opcode = isTrailing 941 ? Cst->isZero() ? TargetOpcode::G_CTTZ 942 : TargetOpcode::G_CTTZ_ZERO_UNDEF 943 : Cst->isZero() ? TargetOpcode::G_CTLZ 944 : TargetOpcode::G_CTLZ_ZERO_UNDEF; 945 MIRBuilder.buildInstr(Opcode) 946 .addDef(getOrCreateVReg(CI)) 947 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 948 return true; 949 } 950 case Intrinsic::ctpop: { 951 MIRBuilder.buildInstr(TargetOpcode::G_CTPOP) 952 .addDef(getOrCreateVReg(CI)) 953 .addUse(getOrCreateVReg(*CI.getArgOperand(0))); 954 return true; 955 } 956 } 957 return false; 958 } 959 960 bool IRTranslator::translateInlineAsm(const CallInst &CI, 961 MachineIRBuilder &MIRBuilder) { 962 const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue()); 963 if (!IA.getConstraintString().empty()) 964 return false; 965 966 unsigned ExtraInfo = 0; 967 if (IA.hasSideEffects()) 968 ExtraInfo |= InlineAsm::Extra_HasSideEffects; 969 if (IA.getDialect() == InlineAsm::AD_Intel) 970 ExtraInfo |= InlineAsm::Extra_AsmDialect; 971 972 MIRBuilder.buildInstr(TargetOpcode::INLINEASM) 973 .addExternalSymbol(IA.getAsmString().c_str()) 974 .addImm(ExtraInfo); 975 976 return true; 977 } 978 979 unsigned IRTranslator::packRegs(const Value &V, 980 MachineIRBuilder &MIRBuilder) { 981 ArrayRef<unsigned> Regs = getOrCreateVRegs(V); 982 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V); 983 LLT BigTy = getLLTForType(*V.getType(), *DL); 984 985 if (Regs.size() == 1) 986 return Regs[0]; 987 988 unsigned Dst = MRI->createGenericVirtualRegister(BigTy); 989 MIRBuilder.buildUndef(Dst); 990 for (unsigned i = 0; i < Regs.size(); ++i) { 991 unsigned NewDst = MRI->createGenericVirtualRegister(BigTy); 992 MIRBuilder.buildInsert(NewDst, Dst, Regs[i], Offsets[i]); 993 Dst = NewDst; 994 } 995 return Dst; 996 } 997 998 void IRTranslator::unpackRegs(const Value &V, unsigned Src, 999 MachineIRBuilder &MIRBuilder) { 1000 ArrayRef<unsigned> Regs = getOrCreateVRegs(V); 1001 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V); 1002 1003 for (unsigned i = 0; i < Regs.size(); ++i) 1004 MIRBuilder.buildExtract(Regs[i], Src, Offsets[i]); 1005 } 1006 1007 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) { 1008 const CallInst &CI = cast<CallInst>(U); 1009 auto TII = MF->getTarget().getIntrinsicInfo(); 1010 const Function *F = CI.getCalledFunction(); 1011 1012 // FIXME: support Windows dllimport function calls. 1013 if (F && F->hasDLLImportStorageClass()) 1014 return false; 1015 1016 if (CI.isInlineAsm()) 1017 return translateInlineAsm(CI, MIRBuilder); 1018 1019 Intrinsic::ID ID = Intrinsic::not_intrinsic; 1020 if (F && F->isIntrinsic()) { 1021 ID = F->getIntrinsicID(); 1022 if (TII && ID == Intrinsic::not_intrinsic) 1023 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F)); 1024 } 1025 1026 bool IsSplitType = valueIsSplit(CI); 1027 if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) { 1028 unsigned Res = IsSplitType ? MRI->createGenericVirtualRegister( 1029 getLLTForType(*CI.getType(), *DL)) 1030 : getOrCreateVReg(CI); 1031 1032 SmallVector<unsigned, 8> Args; 1033 for (auto &Arg: CI.arg_operands()) 1034 Args.push_back(packRegs(*Arg, MIRBuilder)); 1035 1036 MF->getFrameInfo().setHasCalls(true); 1037 bool Success = CLI->lowerCall(MIRBuilder, &CI, Res, Args, [&]() { 1038 return getOrCreateVReg(*CI.getCalledValue()); 1039 }); 1040 1041 if (IsSplitType) 1042 unpackRegs(CI, Res, MIRBuilder); 1043 return Success; 1044 } 1045 1046 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic"); 1047 1048 if (translateKnownIntrinsic(CI, ID, MIRBuilder)) 1049 return true; 1050 1051 unsigned Res = 0; 1052 if (!CI.getType()->isVoidTy()) { 1053 if (IsSplitType) 1054 Res = 1055 MRI->createGenericVirtualRegister(getLLTForType(*CI.getType(), *DL)); 1056 else 1057 Res = getOrCreateVReg(CI); 1058 } 1059 MachineInstrBuilder MIB = 1060 MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory()); 1061 1062 for (auto &Arg : CI.arg_operands()) { 1063 // Some intrinsics take metadata parameters. Reject them. 1064 if (isa<MetadataAsValue>(Arg)) 1065 return false; 1066 MIB.addUse(packRegs(*Arg, MIRBuilder)); 1067 } 1068 1069 if (IsSplitType) 1070 unpackRegs(CI, Res, MIRBuilder); 1071 1072 // Add a MachineMemOperand if it is a target mem intrinsic. 1073 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); 1074 TargetLowering::IntrinsicInfo Info; 1075 // TODO: Add a GlobalISel version of getTgtMemIntrinsic. 1076 if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) { 1077 uint64_t Size = Info.memVT.getStoreSize(); 1078 MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal), 1079 Info.flags, Size, Info.align)); 1080 } 1081 1082 return true; 1083 } 1084 1085 bool IRTranslator::translateInvoke(const User &U, 1086 MachineIRBuilder &MIRBuilder) { 1087 const InvokeInst &I = cast<InvokeInst>(U); 1088 MCContext &Context = MF->getContext(); 1089 1090 const BasicBlock *ReturnBB = I.getSuccessor(0); 1091 const BasicBlock *EHPadBB = I.getSuccessor(1); 1092 1093 const Value *Callee = I.getCalledValue(); 1094 const Function *Fn = dyn_cast<Function>(Callee); 1095 if (isa<InlineAsm>(Callee)) 1096 return false; 1097 1098 // FIXME: support invoking patchpoint and statepoint intrinsics. 1099 if (Fn && Fn->isIntrinsic()) 1100 return false; 1101 1102 // FIXME: support whatever these are. 1103 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) 1104 return false; 1105 1106 // FIXME: support Windows exception handling. 1107 if (!isa<LandingPadInst>(EHPadBB->front())) 1108 return false; 1109 1110 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about 1111 // the region covered by the try. 1112 MCSymbol *BeginSymbol = Context.createTempSymbol(); 1113 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol); 1114 1115 unsigned Res = 1116 MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL)); 1117 SmallVector<unsigned, 8> Args; 1118 for (auto &Arg: I.arg_operands()) 1119 Args.push_back(packRegs(*Arg, MIRBuilder)); 1120 1121 if (!CLI->lowerCall(MIRBuilder, &I, Res, Args, 1122 [&]() { return getOrCreateVReg(*I.getCalledValue()); })) 1123 return false; 1124 1125 unpackRegs(I, Res, MIRBuilder); 1126 1127 MCSymbol *EndSymbol = Context.createTempSymbol(); 1128 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol); 1129 1130 // FIXME: track probabilities. 1131 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB), 1132 &ReturnMBB = getMBB(*ReturnBB); 1133 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol); 1134 MIRBuilder.getMBB().addSuccessor(&ReturnMBB); 1135 MIRBuilder.getMBB().addSuccessor(&EHPadMBB); 1136 MIRBuilder.buildBr(ReturnMBB); 1137 1138 return true; 1139 } 1140 1141 bool IRTranslator::translateLandingPad(const User &U, 1142 MachineIRBuilder &MIRBuilder) { 1143 const LandingPadInst &LP = cast<LandingPadInst>(U); 1144 1145 MachineBasicBlock &MBB = MIRBuilder.getMBB(); 1146 addLandingPadInfo(LP, MBB); 1147 1148 MBB.setIsEHPad(); 1149 1150 // If there aren't registers to copy the values into (e.g., during SjLj 1151 // exceptions), then don't bother. 1152 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1153 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn(); 1154 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 && 1155 TLI.getExceptionSelectorRegister(PersonalityFn) == 0) 1156 return true; 1157 1158 // If landingpad's return type is token type, we don't create DAG nodes 1159 // for its exception pointer and selector value. The extraction of exception 1160 // pointer or selector value from token type landingpads is not currently 1161 // supported. 1162 if (LP.getType()->isTokenTy()) 1163 return true; 1164 1165 // Add a label to mark the beginning of the landing pad. Deletion of the 1166 // landing pad can thus be detected via the MachineModuleInfo. 1167 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL) 1168 .addSym(MF->addLandingPad(&MBB)); 1169 1170 LLT Ty = getLLTForType(*LP.getType(), *DL); 1171 unsigned Undef = MRI->createGenericVirtualRegister(Ty); 1172 MIRBuilder.buildUndef(Undef); 1173 1174 SmallVector<LLT, 2> Tys; 1175 for (Type *Ty : cast<StructType>(LP.getType())->elements()) 1176 Tys.push_back(getLLTForType(*Ty, *DL)); 1177 assert(Tys.size() == 2 && "Only two-valued landingpads are supported"); 1178 1179 // Mark exception register as live in. 1180 unsigned ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn); 1181 if (!ExceptionReg) 1182 return false; 1183 1184 MBB.addLiveIn(ExceptionReg); 1185 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(LP); 1186 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg); 1187 1188 unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn); 1189 if (!SelectorReg) 1190 return false; 1191 1192 MBB.addLiveIn(SelectorReg); 1193 unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]); 1194 MIRBuilder.buildCopy(PtrVReg, SelectorReg); 1195 MIRBuilder.buildCast(ResRegs[1], PtrVReg); 1196 1197 return true; 1198 } 1199 1200 bool IRTranslator::translateAlloca(const User &U, 1201 MachineIRBuilder &MIRBuilder) { 1202 auto &AI = cast<AllocaInst>(U); 1203 1204 if (AI.isSwiftError()) 1205 return false; 1206 1207 if (AI.isStaticAlloca()) { 1208 unsigned Res = getOrCreateVReg(AI); 1209 int FI = getOrCreateFrameIndex(AI); 1210 MIRBuilder.buildFrameIndex(Res, FI); 1211 return true; 1212 } 1213 1214 // FIXME: support stack probing for Windows. 1215 if (MF->getTarget().getTargetTriple().isOSWindows()) 1216 return false; 1217 1218 // Now we're in the harder dynamic case. 1219 Type *Ty = AI.getAllocatedType(); 1220 unsigned Align = 1221 std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment()); 1222 1223 unsigned NumElts = getOrCreateVReg(*AI.getArraySize()); 1224 1225 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType()); 1226 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL); 1227 if (MRI->getType(NumElts) != IntPtrTy) { 1228 unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy); 1229 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts); 1230 NumElts = ExtElts; 1231 } 1232 1233 unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy); 1234 unsigned TySize = 1235 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty))); 1236 MIRBuilder.buildMul(AllocSize, NumElts, TySize); 1237 1238 LLT PtrTy = getLLTForType(*AI.getType(), *DL); 1239 auto &TLI = *MF->getSubtarget().getTargetLowering(); 1240 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore(); 1241 1242 unsigned SPTmp = MRI->createGenericVirtualRegister(PtrTy); 1243 MIRBuilder.buildCopy(SPTmp, SPReg); 1244 1245 unsigned AllocTmp = MRI->createGenericVirtualRegister(PtrTy); 1246 MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize); 1247 1248 // Handle alignment. We have to realign if the allocation granule was smaller 1249 // than stack alignment, or the specific alloca requires more than stack 1250 // alignment. 1251 unsigned StackAlign = 1252 MF->getSubtarget().getFrameLowering()->getStackAlignment(); 1253 Align = std::max(Align, StackAlign); 1254 if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) { 1255 // Round the size of the allocation up to the stack alignment size 1256 // by add SA-1 to the size. This doesn't overflow because we're computing 1257 // an address inside an alloca. 1258 unsigned AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy); 1259 MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align)); 1260 AllocTmp = AlignedAlloc; 1261 } 1262 1263 MIRBuilder.buildCopy(SPReg, AllocTmp); 1264 MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp); 1265 1266 MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI); 1267 assert(MF->getFrameInfo().hasVarSizedObjects()); 1268 return true; 1269 } 1270 1271 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) { 1272 // FIXME: We may need more info about the type. Because of how LLT works, 1273 // we're completely discarding the i64/double distinction here (amongst 1274 // others). Fortunately the ABIs I know of where that matters don't use va_arg 1275 // anyway but that's not guaranteed. 1276 MIRBuilder.buildInstr(TargetOpcode::G_VAARG) 1277 .addDef(getOrCreateVReg(U)) 1278 .addUse(getOrCreateVReg(*U.getOperand(0))) 1279 .addImm(DL->getABITypeAlignment(U.getType())); 1280 return true; 1281 } 1282 1283 bool IRTranslator::translateInsertElement(const User &U, 1284 MachineIRBuilder &MIRBuilder) { 1285 // If it is a <1 x Ty> vector, use the scalar as it is 1286 // not a legal vector type in LLT. 1287 if (U.getType()->getVectorNumElements() == 1) { 1288 unsigned Elt = getOrCreateVReg(*U.getOperand(1)); 1289 auto &Regs = *VMap.getVRegs(U); 1290 if (Regs.empty()) { 1291 Regs.push_back(Elt); 1292 VMap.getOffsets(U)->push_back(0); 1293 } else { 1294 MIRBuilder.buildCopy(Regs[0], Elt); 1295 } 1296 return true; 1297 } 1298 1299 unsigned Res = getOrCreateVReg(U); 1300 unsigned Val = getOrCreateVReg(*U.getOperand(0)); 1301 unsigned Elt = getOrCreateVReg(*U.getOperand(1)); 1302 unsigned Idx = getOrCreateVReg(*U.getOperand(2)); 1303 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx); 1304 return true; 1305 } 1306 1307 bool IRTranslator::translateExtractElement(const User &U, 1308 MachineIRBuilder &MIRBuilder) { 1309 // If it is a <1 x Ty> vector, use the scalar as it is 1310 // not a legal vector type in LLT. 1311 if (U.getOperand(0)->getType()->getVectorNumElements() == 1) { 1312 unsigned Elt = getOrCreateVReg(*U.getOperand(0)); 1313 auto &Regs = *VMap.getVRegs(U); 1314 if (Regs.empty()) { 1315 Regs.push_back(Elt); 1316 VMap.getOffsets(U)->push_back(0); 1317 } else { 1318 MIRBuilder.buildCopy(Regs[0], Elt); 1319 } 1320 return true; 1321 } 1322 unsigned Res = getOrCreateVReg(U); 1323 unsigned Val = getOrCreateVReg(*U.getOperand(0)); 1324 unsigned Idx = getOrCreateVReg(*U.getOperand(1)); 1325 MIRBuilder.buildExtractVectorElement(Res, Val, Idx); 1326 return true; 1327 } 1328 1329 bool IRTranslator::translateShuffleVector(const User &U, 1330 MachineIRBuilder &MIRBuilder) { 1331 MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR) 1332 .addDef(getOrCreateVReg(U)) 1333 .addUse(getOrCreateVReg(*U.getOperand(0))) 1334 .addUse(getOrCreateVReg(*U.getOperand(1))) 1335 .addUse(getOrCreateVReg(*U.getOperand(2))); 1336 return true; 1337 } 1338 1339 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) { 1340 const PHINode &PI = cast<PHINode>(U); 1341 1342 SmallVector<MachineInstr *, 4> Insts; 1343 for (auto Reg : getOrCreateVRegs(PI)) { 1344 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, Reg); 1345 Insts.push_back(MIB.getInstr()); 1346 } 1347 1348 PendingPHIs.emplace_back(&PI, std::move(Insts)); 1349 return true; 1350 } 1351 1352 bool IRTranslator::translateAtomicCmpXchg(const User &U, 1353 MachineIRBuilder &MIRBuilder) { 1354 const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U); 1355 1356 if (I.isWeak()) 1357 return false; 1358 1359 auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile 1360 : MachineMemOperand::MONone; 1361 Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 1362 1363 Type *ResType = I.getType(); 1364 Type *ValType = ResType->Type::getStructElementType(0); 1365 1366 auto Res = getOrCreateVRegs(I); 1367 unsigned OldValRes = Res[0]; 1368 unsigned SuccessRes = Res[1]; 1369 unsigned Addr = getOrCreateVReg(*I.getPointerOperand()); 1370 unsigned Cmp = getOrCreateVReg(*I.getCompareOperand()); 1371 unsigned NewVal = getOrCreateVReg(*I.getNewValOperand()); 1372 1373 MIRBuilder.buildAtomicCmpXchgWithSuccess( 1374 OldValRes, SuccessRes, Addr, Cmp, NewVal, 1375 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), 1376 Flags, DL->getTypeStoreSize(ValType), 1377 getMemOpAlignment(I), AAMDNodes(), nullptr, 1378 I.getSyncScopeID(), I.getSuccessOrdering(), 1379 I.getFailureOrdering())); 1380 return true; 1381 } 1382 1383 bool IRTranslator::translateAtomicRMW(const User &U, 1384 MachineIRBuilder &MIRBuilder) { 1385 const AtomicRMWInst &I = cast<AtomicRMWInst>(U); 1386 1387 auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile 1388 : MachineMemOperand::MONone; 1389 Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 1390 1391 Type *ResType = I.getType(); 1392 1393 unsigned Res = getOrCreateVReg(I); 1394 unsigned Addr = getOrCreateVReg(*I.getPointerOperand()); 1395 unsigned Val = getOrCreateVReg(*I.getValOperand()); 1396 1397 unsigned Opcode = 0; 1398 switch (I.getOperation()) { 1399 default: 1400 llvm_unreachable("Unknown atomicrmw op"); 1401 return false; 1402 case AtomicRMWInst::Xchg: 1403 Opcode = TargetOpcode::G_ATOMICRMW_XCHG; 1404 break; 1405 case AtomicRMWInst::Add: 1406 Opcode = TargetOpcode::G_ATOMICRMW_ADD; 1407 break; 1408 case AtomicRMWInst::Sub: 1409 Opcode = TargetOpcode::G_ATOMICRMW_SUB; 1410 break; 1411 case AtomicRMWInst::And: 1412 Opcode = TargetOpcode::G_ATOMICRMW_AND; 1413 break; 1414 case AtomicRMWInst::Nand: 1415 Opcode = TargetOpcode::G_ATOMICRMW_NAND; 1416 break; 1417 case AtomicRMWInst::Or: 1418 Opcode = TargetOpcode::G_ATOMICRMW_OR; 1419 break; 1420 case AtomicRMWInst::Xor: 1421 Opcode = TargetOpcode::G_ATOMICRMW_XOR; 1422 break; 1423 case AtomicRMWInst::Max: 1424 Opcode = TargetOpcode::G_ATOMICRMW_MAX; 1425 break; 1426 case AtomicRMWInst::Min: 1427 Opcode = TargetOpcode::G_ATOMICRMW_MIN; 1428 break; 1429 case AtomicRMWInst::UMax: 1430 Opcode = TargetOpcode::G_ATOMICRMW_UMAX; 1431 break; 1432 case AtomicRMWInst::UMin: 1433 Opcode = TargetOpcode::G_ATOMICRMW_UMIN; 1434 break; 1435 } 1436 1437 MIRBuilder.buildAtomicRMW( 1438 Opcode, Res, Addr, Val, 1439 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), 1440 Flags, DL->getTypeStoreSize(ResType), 1441 getMemOpAlignment(I), AAMDNodes(), nullptr, 1442 I.getSyncScopeID(), I.getOrdering())); 1443 return true; 1444 } 1445 1446 void IRTranslator::finishPendingPhis() { 1447 for (auto &Phi : PendingPHIs) { 1448 const PHINode *PI = Phi.first; 1449 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second; 1450 1451 // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator 1452 // won't create extra control flow here, otherwise we need to find the 1453 // dominating predecessor here (or perhaps force the weirder IRTranslators 1454 // to provide a simple boundary). 1455 SmallSet<const BasicBlock *, 4> HandledPreds; 1456 1457 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) { 1458 auto IRPred = PI->getIncomingBlock(i); 1459 if (HandledPreds.count(IRPred)) 1460 continue; 1461 1462 HandledPreds.insert(IRPred); 1463 ArrayRef<unsigned> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i)); 1464 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) { 1465 assert(Pred->isSuccessor(ComponentPHIs[0]->getParent()) && 1466 "incorrect CFG at MachineBasicBlock level"); 1467 for (unsigned j = 0; j < ValRegs.size(); ++j) { 1468 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]); 1469 MIB.addUse(ValRegs[j]); 1470 MIB.addMBB(Pred); 1471 } 1472 } 1473 } 1474 } 1475 } 1476 1477 bool IRTranslator::valueIsSplit(const Value &V, 1478 SmallVectorImpl<uint64_t> *Offsets) { 1479 SmallVector<LLT, 4> SplitTys; 1480 if (Offsets && !Offsets->empty()) 1481 Offsets->clear(); 1482 computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets); 1483 return SplitTys.size() > 1; 1484 } 1485 1486 bool IRTranslator::translate(const Instruction &Inst) { 1487 CurBuilder.setDebugLoc(Inst.getDebugLoc()); 1488 switch(Inst.getOpcode()) { 1489 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 1490 case Instruction::OPCODE: return translate##OPCODE(Inst, CurBuilder); 1491 #include "llvm/IR/Instruction.def" 1492 default: 1493 return false; 1494 } 1495 } 1496 1497 bool IRTranslator::translate(const Constant &C, unsigned Reg) { 1498 if (auto CI = dyn_cast<ConstantInt>(&C)) 1499 EntryBuilder.buildConstant(Reg, *CI); 1500 else if (auto CF = dyn_cast<ConstantFP>(&C)) 1501 EntryBuilder.buildFConstant(Reg, *CF); 1502 else if (isa<UndefValue>(C)) 1503 EntryBuilder.buildUndef(Reg); 1504 else if (isa<ConstantPointerNull>(C)) { 1505 // As we are trying to build a constant val of 0 into a pointer, 1506 // insert a cast to make them correct with respect to types. 1507 unsigned NullSize = DL->getTypeSizeInBits(C.getType()); 1508 auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize); 1509 auto *ZeroVal = ConstantInt::get(ZeroTy, 0); 1510 unsigned ZeroReg = getOrCreateVReg(*ZeroVal); 1511 EntryBuilder.buildCast(Reg, ZeroReg); 1512 } else if (auto GV = dyn_cast<GlobalValue>(&C)) 1513 EntryBuilder.buildGlobalValue(Reg, GV); 1514 else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) { 1515 if (!CAZ->getType()->isVectorTy()) 1516 return false; 1517 // Return the scalar if it is a <1 x Ty> vector. 1518 if (CAZ->getNumElements() == 1) 1519 return translate(*CAZ->getElementValue(0u), Reg); 1520 std::vector<unsigned> Ops; 1521 for (unsigned i = 0; i < CAZ->getNumElements(); ++i) { 1522 Constant &Elt = *CAZ->getElementValue(i); 1523 Ops.push_back(getOrCreateVReg(Elt)); 1524 } 1525 EntryBuilder.buildMerge(Reg, Ops); 1526 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) { 1527 // Return the scalar if it is a <1 x Ty> vector. 1528 if (CV->getNumElements() == 1) 1529 return translate(*CV->getElementAsConstant(0), Reg); 1530 std::vector<unsigned> Ops; 1531 for (unsigned i = 0; i < CV->getNumElements(); ++i) { 1532 Constant &Elt = *CV->getElementAsConstant(i); 1533 Ops.push_back(getOrCreateVReg(Elt)); 1534 } 1535 EntryBuilder.buildMerge(Reg, Ops); 1536 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) { 1537 switch(CE->getOpcode()) { 1538 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 1539 case Instruction::OPCODE: return translate##OPCODE(*CE, EntryBuilder); 1540 #include "llvm/IR/Instruction.def" 1541 default: 1542 return false; 1543 } 1544 } else if (auto CV = dyn_cast<ConstantVector>(&C)) { 1545 if (CV->getNumOperands() == 1) 1546 return translate(*CV->getOperand(0), Reg); 1547 SmallVector<unsigned, 4> Ops; 1548 for (unsigned i = 0; i < CV->getNumOperands(); ++i) { 1549 Ops.push_back(getOrCreateVReg(*CV->getOperand(i))); 1550 } 1551 EntryBuilder.buildMerge(Reg, Ops); 1552 } else if (auto *BA = dyn_cast<BlockAddress>(&C)) { 1553 EntryBuilder.buildBlockAddress(Reg, BA); 1554 } else 1555 return false; 1556 1557 return true; 1558 } 1559 1560 void IRTranslator::finalizeFunction() { 1561 // Release the memory used by the different maps we 1562 // needed during the translation. 1563 PendingPHIs.clear(); 1564 VMap.reset(); 1565 FrameIndices.clear(); 1566 MachinePreds.clear(); 1567 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it 1568 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid 1569 // destroying it twice (in ~IRTranslator() and ~LLVMContext()) 1570 EntryBuilder = MachineIRBuilder(); 1571 CurBuilder = MachineIRBuilder(); 1572 } 1573 1574 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { 1575 MF = &CurMF; 1576 const Function &F = MF->getFunction(); 1577 if (F.empty()) 1578 return false; 1579 CLI = MF->getSubtarget().getCallLowering(); 1580 CurBuilder.setMF(*MF); 1581 EntryBuilder.setMF(*MF); 1582 MRI = &MF->getRegInfo(); 1583 DL = &F.getParent()->getDataLayout(); 1584 TPC = &getAnalysis<TargetPassConfig>(); 1585 ORE = llvm::make_unique<OptimizationRemarkEmitter>(&F); 1586 1587 assert(PendingPHIs.empty() && "stale PHIs"); 1588 1589 if (!DL->isLittleEndian()) { 1590 // Currently we don't properly handle big endian code. 1591 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 1592 F.getSubprogram(), &F.getEntryBlock()); 1593 R << "unable to translate in big endian mode"; 1594 reportTranslationError(*MF, *TPC, *ORE, R); 1595 } 1596 1597 // Release the per-function state when we return, whether we succeeded or not. 1598 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); }); 1599 1600 // Setup a separate basic-block for the arguments and constants 1601 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock(); 1602 MF->push_back(EntryBB); 1603 EntryBuilder.setMBB(*EntryBB); 1604 1605 // Create all blocks, in IR order, to preserve the layout. 1606 for (const BasicBlock &BB: F) { 1607 auto *&MBB = BBToMBB[&BB]; 1608 1609 MBB = MF->CreateMachineBasicBlock(&BB); 1610 MF->push_back(MBB); 1611 1612 if (BB.hasAddressTaken()) 1613 MBB->setHasAddressTaken(); 1614 } 1615 1616 // Make our arguments/constants entry block fallthrough to the IR entry block. 1617 EntryBB->addSuccessor(&getMBB(F.front())); 1618 1619 // Lower the actual args into this basic block. 1620 SmallVector<unsigned, 8> VRegArgs; 1621 for (const Argument &Arg: F.args()) { 1622 if (DL->getTypeStoreSize(Arg.getType()) == 0) 1623 continue; // Don't handle zero sized types. 1624 VRegArgs.push_back( 1625 MRI->createGenericVirtualRegister(getLLTForType(*Arg.getType(), *DL))); 1626 } 1627 1628 // We don't currently support translating swifterror or swiftself functions. 1629 for (auto &Arg : F.args()) { 1630 if (Arg.hasSwiftErrorAttr() || Arg.hasSwiftSelfAttr()) { 1631 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 1632 F.getSubprogram(), &F.getEntryBlock()); 1633 R << "unable to lower arguments due to swifterror/swiftself: " 1634 << ore::NV("Prototype", F.getType()); 1635 reportTranslationError(*MF, *TPC, *ORE, R); 1636 return false; 1637 } 1638 } 1639 1640 if (!CLI->lowerFormalArguments(EntryBuilder, F, VRegArgs)) { 1641 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 1642 F.getSubprogram(), &F.getEntryBlock()); 1643 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType()); 1644 reportTranslationError(*MF, *TPC, *ORE, R); 1645 return false; 1646 } 1647 1648 auto ArgIt = F.arg_begin(); 1649 for (auto &VArg : VRegArgs) { 1650 // If the argument is an unsplit scalar then don't use unpackRegs to avoid 1651 // creating redundant copies. 1652 if (!valueIsSplit(*ArgIt, VMap.getOffsets(*ArgIt))) { 1653 auto &VRegs = *VMap.getVRegs(cast<Value>(*ArgIt)); 1654 assert(VRegs.empty() && "VRegs already populated?"); 1655 VRegs.push_back(VArg); 1656 } else { 1657 unpackRegs(*ArgIt, VArg, EntryBuilder); 1658 } 1659 ArgIt++; 1660 } 1661 1662 // Need to visit defs before uses when translating instructions. 1663 ReversePostOrderTraversal<const Function *> RPOT(&F); 1664 for (const BasicBlock *BB : RPOT) { 1665 MachineBasicBlock &MBB = getMBB(*BB); 1666 // Set the insertion point of all the following translations to 1667 // the end of this basic block. 1668 CurBuilder.setMBB(MBB); 1669 1670 for (const Instruction &Inst : *BB) { 1671 if (translate(Inst)) 1672 continue; 1673 1674 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 1675 Inst.getDebugLoc(), BB); 1676 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst); 1677 1678 if (ORE->allowExtraAnalysis("gisel-irtranslator")) { 1679 std::string InstStrStorage; 1680 raw_string_ostream InstStr(InstStrStorage); 1681 InstStr << Inst; 1682 1683 R << ": '" << InstStr.str() << "'"; 1684 } 1685 1686 reportTranslationError(*MF, *TPC, *ORE, R); 1687 return false; 1688 } 1689 } 1690 1691 finishPendingPhis(); 1692 1693 // Merge the argument lowering and constants block with its single 1694 // successor, the LLVM-IR entry block. We want the basic block to 1695 // be maximal. 1696 assert(EntryBB->succ_size() == 1 && 1697 "Custom BB used for lowering should have only one successor"); 1698 // Get the successor of the current entry block. 1699 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin(); 1700 assert(NewEntryBB.pred_size() == 1 && 1701 "LLVM-IR entry block has a predecessor!?"); 1702 // Move all the instruction from the current entry block to the 1703 // new entry block. 1704 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(), 1705 EntryBB->end()); 1706 1707 // Update the live-in information for the new entry block. 1708 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins()) 1709 NewEntryBB.addLiveIn(LiveIn); 1710 NewEntryBB.sortUniqueLiveIns(); 1711 1712 // Get rid of the now empty basic block. 1713 EntryBB->removeSuccessor(&NewEntryBB); 1714 MF->remove(EntryBB); 1715 MF->DeleteMachineBasicBlock(EntryBB); 1716 1717 assert(&MF->front() == &NewEntryBB && 1718 "New entry wasn't next in the list of basic block!"); 1719 1720 // Initialize stack protector information. 1721 StackProtector &SP = getAnalysis<StackProtector>(); 1722 SP.copyToMachineFrameInfo(MF->getFrameInfo()); 1723 1724 return false; 1725 } 1726