1 //===-- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator --*- C++ -*-==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file implements the IRTranslator class. 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 14 15 #include "llvm/ADT/ScopeExit.h" 16 #include "llvm/ADT/SmallSet.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/Analysis/OptimizationDiagnosticInfo.h" 19 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 20 #include "llvm/CodeGen/Analysis.h" 21 #include "llvm/CodeGen/MachineFunction.h" 22 #include "llvm/CodeGen/MachineFrameInfo.h" 23 #include "llvm/CodeGen/MachineModuleInfo.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/CodeGen/TargetPassConfig.h" 26 #include "llvm/IR/Constant.h" 27 #include "llvm/IR/DebugInfo.h" 28 #include "llvm/IR/Function.h" 29 #include "llvm/IR/GetElementPtrTypeIterator.h" 30 #include "llvm/IR/IntrinsicInst.h" 31 #include "llvm/IR/Type.h" 32 #include "llvm/IR/Value.h" 33 #include "llvm/Target/TargetFrameLowering.h" 34 #include "llvm/Target/TargetIntrinsicInfo.h" 35 #include "llvm/Target/TargetLowering.h" 36 37 #define DEBUG_TYPE "irtranslator" 38 39 using namespace llvm; 40 41 char IRTranslator::ID = 0; 42 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 43 false, false) 44 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) 45 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 46 false, false) 47 48 static void reportTranslationError(MachineFunction &MF, 49 const TargetPassConfig &TPC, 50 OptimizationRemarkEmitter &ORE, 51 OptimizationRemarkMissed &R) { 52 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel); 53 54 // Print the function name explicitly if we don't have a debug location (which 55 // makes the diagnostic less useful) or if we're going to emit a raw error. 56 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled()) 57 R << (" (in function: " + MF.getName() + ")").str(); 58 59 if (TPC.isGlobalISelAbortEnabled()) 60 report_fatal_error(R.getMsg()); 61 else 62 ORE.emit(R); 63 } 64 65 IRTranslator::IRTranslator() : MachineFunctionPass(ID), MRI(nullptr) { 66 initializeIRTranslatorPass(*PassRegistry::getPassRegistry()); 67 } 68 69 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const { 70 AU.addRequired<TargetPassConfig>(); 71 MachineFunctionPass::getAnalysisUsage(AU); 72 } 73 74 75 unsigned IRTranslator::getOrCreateVReg(const Value &Val) { 76 unsigned &ValReg = ValToVReg[&Val]; 77 78 if (ValReg) 79 return ValReg; 80 81 // Fill ValRegsSequence with the sequence of registers 82 // we need to concat together to produce the value. 83 assert(Val.getType()->isSized() && 84 "Don't know how to create an empty vreg"); 85 unsigned VReg = 86 MRI->createGenericVirtualRegister(getLLTForType(*Val.getType(), *DL)); 87 ValReg = VReg; 88 89 if (auto CV = dyn_cast<Constant>(&Val)) { 90 bool Success = translate(*CV, VReg); 91 if (!Success) { 92 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 93 MF->getFunction()->getSubprogram(), 94 &MF->getFunction()->getEntryBlock()); 95 R << "unable to translate constant: " << ore::NV("Type", Val.getType()); 96 reportTranslationError(*MF, *TPC, *ORE, R); 97 return VReg; 98 } 99 } 100 101 return VReg; 102 } 103 104 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) { 105 if (FrameIndices.find(&AI) != FrameIndices.end()) 106 return FrameIndices[&AI]; 107 108 unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType()); 109 unsigned Size = 110 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue(); 111 112 // Always allocate at least one byte. 113 Size = std::max(Size, 1u); 114 115 unsigned Alignment = AI.getAlignment(); 116 if (!Alignment) 117 Alignment = DL->getABITypeAlignment(AI.getAllocatedType()); 118 119 int &FI = FrameIndices[&AI]; 120 FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI); 121 return FI; 122 } 123 124 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) { 125 unsigned Alignment = 0; 126 Type *ValTy = nullptr; 127 if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) { 128 Alignment = SI->getAlignment(); 129 ValTy = SI->getValueOperand()->getType(); 130 } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) { 131 Alignment = LI->getAlignment(); 132 ValTy = LI->getType(); 133 } else { 134 OptimizationRemarkMissed R("gisel-irtranslator", "", &I); 135 R << "unable to translate memop: " << ore::NV("Opcode", &I); 136 reportTranslationError(*MF, *TPC, *ORE, R); 137 return 1; 138 } 139 140 return Alignment ? Alignment : DL->getABITypeAlignment(ValTy); 141 } 142 143 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) { 144 MachineBasicBlock *&MBB = BBToMBB[&BB]; 145 assert(MBB && "BasicBlock was not encountered before"); 146 return *MBB; 147 } 148 149 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) { 150 assert(NewPred && "new predecessor must be a real MachineBasicBlock"); 151 MachinePreds[Edge].push_back(NewPred); 152 } 153 154 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U, 155 MachineIRBuilder &MIRBuilder) { 156 // FIXME: handle signed/unsigned wrapping flags. 157 158 // Get or create a virtual register for each value. 159 // Unless the value is a Constant => loadimm cst? 160 // or inline constant each time? 161 // Creation of a virtual register needs to have a size. 162 unsigned Op0 = getOrCreateVReg(*U.getOperand(0)); 163 unsigned Op1 = getOrCreateVReg(*U.getOperand(1)); 164 unsigned Res = getOrCreateVReg(U); 165 MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1); 166 return true; 167 } 168 169 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) { 170 // -0.0 - X --> G_FNEG 171 if (isa<Constant>(U.getOperand(0)) && 172 U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) { 173 MIRBuilder.buildInstr(TargetOpcode::G_FNEG) 174 .addDef(getOrCreateVReg(U)) 175 .addUse(getOrCreateVReg(*U.getOperand(1))); 176 return true; 177 } 178 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder); 179 } 180 181 bool IRTranslator::translateCompare(const User &U, 182 MachineIRBuilder &MIRBuilder) { 183 const CmpInst *CI = dyn_cast<CmpInst>(&U); 184 unsigned Op0 = getOrCreateVReg(*U.getOperand(0)); 185 unsigned Op1 = getOrCreateVReg(*U.getOperand(1)); 186 unsigned Res = getOrCreateVReg(U); 187 CmpInst::Predicate Pred = 188 CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>( 189 cast<ConstantExpr>(U).getPredicate()); 190 if (CmpInst::isIntPredicate(Pred)) 191 MIRBuilder.buildICmp(Pred, Res, Op0, Op1); 192 else if (Pred == CmpInst::FCMP_FALSE) 193 MIRBuilder.buildCopy( 194 Res, getOrCreateVReg(*Constant::getNullValue(CI->getType()))); 195 else if (Pred == CmpInst::FCMP_TRUE) 196 MIRBuilder.buildCopy( 197 Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType()))); 198 else 199 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1); 200 201 return true; 202 } 203 204 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) { 205 const ReturnInst &RI = cast<ReturnInst>(U); 206 const Value *Ret = RI.getReturnValue(); 207 // The target may mess up with the insertion point, but 208 // this is not important as a return is the last instruction 209 // of the block anyway. 210 return CLI->lowerReturn(MIRBuilder, Ret, !Ret ? 0 : getOrCreateVReg(*Ret)); 211 } 212 213 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) { 214 const BranchInst &BrInst = cast<BranchInst>(U); 215 unsigned Succ = 0; 216 if (!BrInst.isUnconditional()) { 217 // We want a G_BRCOND to the true BB followed by an unconditional branch. 218 unsigned Tst = getOrCreateVReg(*BrInst.getCondition()); 219 const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++)); 220 MachineBasicBlock &TrueBB = getMBB(TrueTgt); 221 MIRBuilder.buildBrCond(Tst, TrueBB); 222 } 223 224 const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ)); 225 MachineBasicBlock &TgtBB = getMBB(BrTgt); 226 MachineBasicBlock &CurBB = MIRBuilder.getMBB(); 227 228 // If the unconditional target is the layout successor, fallthrough. 229 if (!CurBB.isLayoutSuccessor(&TgtBB)) 230 MIRBuilder.buildBr(TgtBB); 231 232 // Link successors. 233 for (const BasicBlock *Succ : BrInst.successors()) 234 CurBB.addSuccessor(&getMBB(*Succ)); 235 return true; 236 } 237 238 bool IRTranslator::translateSwitch(const User &U, 239 MachineIRBuilder &MIRBuilder) { 240 // For now, just translate as a chain of conditional branches. 241 // FIXME: could we share most of the logic/code in 242 // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel? 243 // At first sight, it seems most of the logic in there is independent of 244 // SelectionDAG-specifics and a lot of work went in to optimize switch 245 // lowering in there. 246 247 const SwitchInst &SwInst = cast<SwitchInst>(U); 248 const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition()); 249 const BasicBlock *OrigBB = SwInst.getParent(); 250 251 LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL); 252 for (auto &CaseIt : SwInst.cases()) { 253 const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue()); 254 const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1); 255 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue); 256 MachineBasicBlock &CurMBB = MIRBuilder.getMBB(); 257 const BasicBlock *TrueBB = CaseIt.getCaseSuccessor(); 258 MachineBasicBlock &TrueMBB = getMBB(*TrueBB); 259 260 MIRBuilder.buildBrCond(Tst, TrueMBB); 261 CurMBB.addSuccessor(&TrueMBB); 262 addMachineCFGPred({OrigBB, TrueBB}, &CurMBB); 263 264 MachineBasicBlock *FalseMBB = 265 MF->CreateMachineBasicBlock(SwInst.getParent()); 266 // Insert the comparison blocks one after the other. 267 MF->insert(std::next(CurMBB.getIterator()), FalseMBB); 268 MIRBuilder.buildBr(*FalseMBB); 269 CurMBB.addSuccessor(FalseMBB); 270 271 MIRBuilder.setMBB(*FalseMBB); 272 } 273 // handle default case 274 const BasicBlock *DefaultBB = SwInst.getDefaultDest(); 275 MachineBasicBlock &DefaultMBB = getMBB(*DefaultBB); 276 MIRBuilder.buildBr(DefaultMBB); 277 MachineBasicBlock &CurMBB = MIRBuilder.getMBB(); 278 CurMBB.addSuccessor(&DefaultMBB); 279 addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB); 280 281 return true; 282 } 283 284 bool IRTranslator::translateIndirectBr(const User &U, 285 MachineIRBuilder &MIRBuilder) { 286 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U); 287 288 const unsigned Tgt = getOrCreateVReg(*BrInst.getAddress()); 289 MIRBuilder.buildBrIndirect(Tgt); 290 291 // Link successors. 292 MachineBasicBlock &CurBB = MIRBuilder.getMBB(); 293 for (const BasicBlock *Succ : BrInst.successors()) 294 CurBB.addSuccessor(&getMBB(*Succ)); 295 296 return true; 297 } 298 299 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { 300 const LoadInst &LI = cast<LoadInst>(U); 301 302 auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile 303 : MachineMemOperand::MONone; 304 Flags |= MachineMemOperand::MOLoad; 305 306 unsigned Res = getOrCreateVReg(LI); 307 unsigned Addr = getOrCreateVReg(*LI.getPointerOperand()); 308 309 MIRBuilder.buildLoad( 310 Res, Addr, 311 *MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()), 312 Flags, DL->getTypeStoreSize(LI.getType()), 313 getMemOpAlignment(LI), AAMDNodes(), nullptr, 314 LI.getSynchScope(), LI.getOrdering())); 315 return true; 316 } 317 318 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) { 319 const StoreInst &SI = cast<StoreInst>(U); 320 auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile 321 : MachineMemOperand::MONone; 322 Flags |= MachineMemOperand::MOStore; 323 324 unsigned Val = getOrCreateVReg(*SI.getValueOperand()); 325 unsigned Addr = getOrCreateVReg(*SI.getPointerOperand()); 326 327 MIRBuilder.buildStore( 328 Val, Addr, 329 *MF->getMachineMemOperand( 330 MachinePointerInfo(SI.getPointerOperand()), Flags, 331 DL->getTypeStoreSize(SI.getValueOperand()->getType()), 332 getMemOpAlignment(SI), AAMDNodes(), nullptr, SI.getSynchScope(), 333 SI.getOrdering())); 334 return true; 335 } 336 337 bool IRTranslator::translateExtractValue(const User &U, 338 MachineIRBuilder &MIRBuilder) { 339 const Value *Src = U.getOperand(0); 340 Type *Int32Ty = Type::getInt32Ty(U.getContext()); 341 SmallVector<Value *, 1> Indices; 342 343 // getIndexedOffsetInType is designed for GEPs, so the first index is the 344 // usual array element rather than looking into the actual aggregate. 345 Indices.push_back(ConstantInt::get(Int32Ty, 0)); 346 347 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) { 348 for (auto Idx : EVI->indices()) 349 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 350 } else { 351 for (unsigned i = 1; i < U.getNumOperands(); ++i) 352 Indices.push_back(U.getOperand(i)); 353 } 354 355 uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices); 356 357 unsigned Res = getOrCreateVReg(U); 358 MIRBuilder.buildExtract(Res, getOrCreateVReg(*Src), Offset); 359 360 return true; 361 } 362 363 bool IRTranslator::translateInsertValue(const User &U, 364 MachineIRBuilder &MIRBuilder) { 365 const Value *Src = U.getOperand(0); 366 Type *Int32Ty = Type::getInt32Ty(U.getContext()); 367 SmallVector<Value *, 1> Indices; 368 369 // getIndexedOffsetInType is designed for GEPs, so the first index is the 370 // usual array element rather than looking into the actual aggregate. 371 Indices.push_back(ConstantInt::get(Int32Ty, 0)); 372 373 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) { 374 for (auto Idx : IVI->indices()) 375 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 376 } else { 377 for (unsigned i = 2; i < U.getNumOperands(); ++i) 378 Indices.push_back(U.getOperand(i)); 379 } 380 381 uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices); 382 383 unsigned Res = getOrCreateVReg(U); 384 unsigned Inserted = getOrCreateVReg(*U.getOperand(1)); 385 MIRBuilder.buildInsert(Res, getOrCreateVReg(*Src), Inserted, Offset); 386 387 return true; 388 } 389 390 bool IRTranslator::translateSelect(const User &U, 391 MachineIRBuilder &MIRBuilder) { 392 unsigned Res = getOrCreateVReg(U); 393 unsigned Tst = getOrCreateVReg(*U.getOperand(0)); 394 unsigned Op0 = getOrCreateVReg(*U.getOperand(1)); 395 unsigned Op1 = getOrCreateVReg(*U.getOperand(2)); 396 MIRBuilder.buildSelect(Res, Tst, Op0, Op1); 397 return true; 398 } 399 400 bool IRTranslator::translateBitCast(const User &U, 401 MachineIRBuilder &MIRBuilder) { 402 // If we're bitcasting to the source type, we can reuse the source vreg. 403 if (getLLTForType(*U.getOperand(0)->getType(), *DL) == 404 getLLTForType(*U.getType(), *DL)) { 405 // Get the source vreg now, to avoid invalidating ValToVReg. 406 unsigned SrcReg = getOrCreateVReg(*U.getOperand(0)); 407 unsigned &Reg = ValToVReg[&U]; 408 // If we already assigned a vreg for this bitcast, we can't change that. 409 // Emit a copy to satisfy the users we already emitted. 410 if (Reg) 411 MIRBuilder.buildCopy(Reg, SrcReg); 412 else 413 Reg = SrcReg; 414 return true; 415 } 416 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder); 417 } 418 419 bool IRTranslator::translateCast(unsigned Opcode, const User &U, 420 MachineIRBuilder &MIRBuilder) { 421 unsigned Op = getOrCreateVReg(*U.getOperand(0)); 422 unsigned Res = getOrCreateVReg(U); 423 MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op); 424 return true; 425 } 426 427 bool IRTranslator::translateGetElementPtr(const User &U, 428 MachineIRBuilder &MIRBuilder) { 429 // FIXME: support vector GEPs. 430 if (U.getType()->isVectorTy()) 431 return false; 432 433 Value &Op0 = *U.getOperand(0); 434 unsigned BaseReg = getOrCreateVReg(Op0); 435 Type *PtrIRTy = Op0.getType(); 436 LLT PtrTy = getLLTForType(*PtrIRTy, *DL); 437 Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy); 438 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); 439 440 int64_t Offset = 0; 441 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U); 442 GTI != E; ++GTI) { 443 const Value *Idx = GTI.getOperand(); 444 if (StructType *StTy = GTI.getStructTypeOrNull()) { 445 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue(); 446 Offset += DL->getStructLayout(StTy)->getElementOffset(Field); 447 continue; 448 } else { 449 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType()); 450 451 // If this is a scalar constant or a splat vector of constants, 452 // handle it quickly. 453 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) { 454 Offset += ElementSize * CI->getSExtValue(); 455 continue; 456 } 457 458 if (Offset != 0) { 459 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy); 460 unsigned OffsetReg = 461 getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset)); 462 MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg); 463 464 BaseReg = NewBaseReg; 465 Offset = 0; 466 } 467 468 // N = N + Idx * ElementSize; 469 unsigned ElementSizeReg = 470 getOrCreateVReg(*ConstantInt::get(OffsetIRTy, ElementSize)); 471 472 unsigned IdxReg = getOrCreateVReg(*Idx); 473 if (MRI->getType(IdxReg) != OffsetTy) { 474 unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy); 475 MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg); 476 IdxReg = NewIdxReg; 477 } 478 479 unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy); 480 MIRBuilder.buildMul(OffsetReg, ElementSizeReg, IdxReg); 481 482 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy); 483 MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg); 484 BaseReg = NewBaseReg; 485 } 486 } 487 488 if (Offset != 0) { 489 unsigned OffsetReg = getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset)); 490 MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg); 491 return true; 492 } 493 494 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg); 495 return true; 496 } 497 498 bool IRTranslator::translateMemfunc(const CallInst &CI, 499 MachineIRBuilder &MIRBuilder, 500 unsigned ID) { 501 LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL); 502 Type *DstTy = CI.getArgOperand(0)->getType(); 503 if (cast<PointerType>(DstTy)->getAddressSpace() != 0 || 504 SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0)) 505 return false; 506 507 SmallVector<CallLowering::ArgInfo, 8> Args; 508 for (int i = 0; i < 3; ++i) { 509 const auto &Arg = CI.getArgOperand(i); 510 Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType()); 511 } 512 513 const char *Callee; 514 switch (ID) { 515 case Intrinsic::memmove: 516 case Intrinsic::memcpy: { 517 Type *SrcTy = CI.getArgOperand(1)->getType(); 518 if(cast<PointerType>(SrcTy)->getAddressSpace() != 0) 519 return false; 520 Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove"; 521 break; 522 } 523 case Intrinsic::memset: 524 Callee = "memset"; 525 break; 526 default: 527 return false; 528 } 529 530 return CLI->lowerCall(MIRBuilder, CI.getCallingConv(), 531 MachineOperand::CreateES(Callee), 532 CallLowering::ArgInfo(0, CI.getType()), Args); 533 } 534 535 void IRTranslator::getStackGuard(unsigned DstReg, 536 MachineIRBuilder &MIRBuilder) { 537 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 538 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF)); 539 auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD); 540 MIB.addDef(DstReg); 541 542 auto &TLI = *MF->getSubtarget().getTargetLowering(); 543 Value *Global = TLI.getSDagStackGuard(*MF->getFunction()->getParent()); 544 if (!Global) 545 return; 546 547 MachinePointerInfo MPInfo(Global); 548 MachineInstr::mmo_iterator MemRefs = MF->allocateMemRefsArray(1); 549 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 550 MachineMemOperand::MODereferenceable; 551 *MemRefs = 552 MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8, 553 DL->getPointerABIAlignment()); 554 MIB.setMemRefs(MemRefs, MemRefs + 1); 555 } 556 557 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op, 558 MachineIRBuilder &MIRBuilder) { 559 LLT Ty = getLLTForType(*CI.getOperand(0)->getType(), *DL); 560 LLT s1 = LLT::scalar(1); 561 unsigned Width = Ty.getSizeInBits(); 562 unsigned Res = MRI->createGenericVirtualRegister(Ty); 563 unsigned Overflow = MRI->createGenericVirtualRegister(s1); 564 auto MIB = MIRBuilder.buildInstr(Op) 565 .addDef(Res) 566 .addDef(Overflow) 567 .addUse(getOrCreateVReg(*CI.getOperand(0))) 568 .addUse(getOrCreateVReg(*CI.getOperand(1))); 569 570 if (Op == TargetOpcode::G_UADDE || Op == TargetOpcode::G_USUBE) { 571 unsigned Zero = getOrCreateVReg( 572 *Constant::getNullValue(Type::getInt1Ty(CI.getContext()))); 573 MIB.addUse(Zero); 574 } 575 576 MIRBuilder.buildSequence(getOrCreateVReg(CI), Res, 0, Overflow, Width); 577 return true; 578 } 579 580 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, 581 MachineIRBuilder &MIRBuilder) { 582 switch (ID) { 583 default: 584 break; 585 case Intrinsic::lifetime_start: 586 case Intrinsic::lifetime_end: 587 // Stack coloring is not enabled in O0 (which we care about now) so we can 588 // drop these. Make sure someone notices when we start compiling at higher 589 // opts though. 590 if (MF->getTarget().getOptLevel() != CodeGenOpt::None) 591 return false; 592 return true; 593 case Intrinsic::dbg_declare: { 594 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI); 595 assert(DI.getVariable() && "Missing variable"); 596 597 const Value *Address = DI.getAddress(); 598 if (!Address || isa<UndefValue>(Address)) { 599 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); 600 return true; 601 } 602 603 assert(DI.getVariable()->isValidLocationForIntrinsic( 604 MIRBuilder.getDebugLoc()) && 605 "Expected inlined-at fields to agree"); 606 auto AI = dyn_cast<AllocaInst>(Address); 607 if (AI && AI->isStaticAlloca()) { 608 // Static allocas are tracked at the MF level, no need for DBG_VALUE 609 // instructions (in fact, they get ignored if they *do* exist). 610 MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(), 611 getOrCreateFrameIndex(*AI), DI.getDebugLoc()); 612 } else 613 MIRBuilder.buildDirectDbgValue(getOrCreateVReg(*Address), 614 DI.getVariable(), DI.getExpression()); 615 return true; 616 } 617 case Intrinsic::vaend: 618 // No target I know of cares about va_end. Certainly no in-tree target 619 // does. Simplest intrinsic ever! 620 return true; 621 case Intrinsic::vastart: { 622 auto &TLI = *MF->getSubtarget().getTargetLowering(); 623 Value *Ptr = CI.getArgOperand(0); 624 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8; 625 626 MIRBuilder.buildInstr(TargetOpcode::G_VASTART) 627 .addUse(getOrCreateVReg(*Ptr)) 628 .addMemOperand(MF->getMachineMemOperand( 629 MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 0)); 630 return true; 631 } 632 case Intrinsic::dbg_value: { 633 // This form of DBG_VALUE is target-independent. 634 const DbgValueInst &DI = cast<DbgValueInst>(CI); 635 const Value *V = DI.getValue(); 636 assert(DI.getVariable()->isValidLocationForIntrinsic( 637 MIRBuilder.getDebugLoc()) && 638 "Expected inlined-at fields to agree"); 639 if (!V) { 640 // Currently the optimizer can produce this; insert an undef to 641 // help debugging. Probably the optimizer should not do this. 642 MIRBuilder.buildIndirectDbgValue(0, DI.getOffset(), DI.getVariable(), 643 DI.getExpression()); 644 } else if (const auto *CI = dyn_cast<Constant>(V)) { 645 MIRBuilder.buildConstDbgValue(*CI, DI.getOffset(), DI.getVariable(), 646 DI.getExpression()); 647 } else { 648 unsigned Reg = getOrCreateVReg(*V); 649 // FIXME: This does not handle register-indirect values at offset 0. The 650 // direct/indirect thing shouldn't really be handled by something as 651 // implicit as reg+noreg vs reg+imm in the first palce, but it seems 652 // pretty baked in right now. 653 if (DI.getOffset() != 0) 654 MIRBuilder.buildIndirectDbgValue(Reg, DI.getOffset(), DI.getVariable(), 655 DI.getExpression()); 656 else 657 MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), 658 DI.getExpression()); 659 } 660 return true; 661 } 662 case Intrinsic::uadd_with_overflow: 663 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDE, MIRBuilder); 664 case Intrinsic::sadd_with_overflow: 665 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder); 666 case Intrinsic::usub_with_overflow: 667 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBE, MIRBuilder); 668 case Intrinsic::ssub_with_overflow: 669 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder); 670 case Intrinsic::umul_with_overflow: 671 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder); 672 case Intrinsic::smul_with_overflow: 673 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder); 674 case Intrinsic::pow: 675 MIRBuilder.buildInstr(TargetOpcode::G_FPOW) 676 .addDef(getOrCreateVReg(CI)) 677 .addUse(getOrCreateVReg(*CI.getArgOperand(0))) 678 .addUse(getOrCreateVReg(*CI.getArgOperand(1))); 679 return true; 680 case Intrinsic::memcpy: 681 case Intrinsic::memmove: 682 case Intrinsic::memset: 683 return translateMemfunc(CI, MIRBuilder, ID); 684 case Intrinsic::eh_typeid_for: { 685 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0)); 686 unsigned Reg = getOrCreateVReg(CI); 687 unsigned TypeID = MF->getTypeIDFor(GV); 688 MIRBuilder.buildConstant(Reg, TypeID); 689 return true; 690 } 691 case Intrinsic::objectsize: { 692 // If we don't know by now, we're never going to know. 693 const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1)); 694 695 MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0); 696 return true; 697 } 698 case Intrinsic::stackguard: 699 getStackGuard(getOrCreateVReg(CI), MIRBuilder); 700 return true; 701 case Intrinsic::stackprotector: { 702 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); 703 unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy); 704 getStackGuard(GuardVal, MIRBuilder); 705 706 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1)); 707 MIRBuilder.buildStore( 708 GuardVal, getOrCreateVReg(*Slot), 709 *MF->getMachineMemOperand( 710 MachinePointerInfo::getFixedStack(*MF, 711 getOrCreateFrameIndex(*Slot)), 712 MachineMemOperand::MOStore | MachineMemOperand::MOVolatile, 713 PtrTy.getSizeInBits() / 8, 8)); 714 return true; 715 } 716 } 717 return false; 718 } 719 720 bool IRTranslator::translateInlineAsm(const CallInst &CI, 721 MachineIRBuilder &MIRBuilder) { 722 const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue()); 723 if (!IA.getConstraintString().empty()) 724 return false; 725 726 unsigned ExtraInfo = 0; 727 if (IA.hasSideEffects()) 728 ExtraInfo |= InlineAsm::Extra_HasSideEffects; 729 if (IA.getDialect() == InlineAsm::AD_Intel) 730 ExtraInfo |= InlineAsm::Extra_AsmDialect; 731 732 MIRBuilder.buildInstr(TargetOpcode::INLINEASM) 733 .addExternalSymbol(IA.getAsmString().c_str()) 734 .addImm(ExtraInfo); 735 736 return true; 737 } 738 739 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) { 740 const CallInst &CI = cast<CallInst>(U); 741 auto TII = MF->getTarget().getIntrinsicInfo(); 742 const Function *F = CI.getCalledFunction(); 743 744 if (CI.isInlineAsm()) 745 return translateInlineAsm(CI, MIRBuilder); 746 747 if (!F || !F->isIntrinsic()) { 748 unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI); 749 SmallVector<unsigned, 8> Args; 750 for (auto &Arg: CI.arg_operands()) 751 Args.push_back(getOrCreateVReg(*Arg)); 752 753 MF->getFrameInfo().setHasCalls(true); 754 return CLI->lowerCall(MIRBuilder, &CI, Res, Args, [&]() { 755 return getOrCreateVReg(*CI.getCalledValue()); 756 }); 757 } 758 759 Intrinsic::ID ID = F->getIntrinsicID(); 760 if (TII && ID == Intrinsic::not_intrinsic) 761 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F)); 762 763 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic"); 764 765 if (translateKnownIntrinsic(CI, ID, MIRBuilder)) 766 return true; 767 768 unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI); 769 MachineInstrBuilder MIB = 770 MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory()); 771 772 for (auto &Arg : CI.arg_operands()) { 773 // Some intrinsics take metadata parameters. Reject them. 774 if (isa<MetadataAsValue>(Arg)) 775 return false; 776 MIB.addUse(getOrCreateVReg(*Arg)); 777 } 778 return true; 779 } 780 781 bool IRTranslator::translateInvoke(const User &U, 782 MachineIRBuilder &MIRBuilder) { 783 const InvokeInst &I = cast<InvokeInst>(U); 784 MCContext &Context = MF->getContext(); 785 786 const BasicBlock *ReturnBB = I.getSuccessor(0); 787 const BasicBlock *EHPadBB = I.getSuccessor(1); 788 789 const Value *Callee = I.getCalledValue(); 790 const Function *Fn = dyn_cast<Function>(Callee); 791 if (isa<InlineAsm>(Callee)) 792 return false; 793 794 // FIXME: support invoking patchpoint and statepoint intrinsics. 795 if (Fn && Fn->isIntrinsic()) 796 return false; 797 798 // FIXME: support whatever these are. 799 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) 800 return false; 801 802 // FIXME: support Windows exception handling. 803 if (!isa<LandingPadInst>(EHPadBB->front())) 804 return false; 805 806 807 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about 808 // the region covered by the try. 809 MCSymbol *BeginSymbol = Context.createTempSymbol(); 810 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol); 811 812 unsigned Res = I.getType()->isVoidTy() ? 0 : getOrCreateVReg(I); 813 SmallVector<unsigned, 8> Args; 814 for (auto &Arg: I.arg_operands()) 815 Args.push_back(getOrCreateVReg(*Arg)); 816 817 if (!CLI->lowerCall(MIRBuilder, &I, Res, Args, 818 [&]() { return getOrCreateVReg(*I.getCalledValue()); })) 819 return false; 820 821 MCSymbol *EndSymbol = Context.createTempSymbol(); 822 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol); 823 824 // FIXME: track probabilities. 825 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB), 826 &ReturnMBB = getMBB(*ReturnBB); 827 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol); 828 MIRBuilder.getMBB().addSuccessor(&ReturnMBB); 829 MIRBuilder.getMBB().addSuccessor(&EHPadMBB); 830 MIRBuilder.buildBr(ReturnMBB); 831 832 return true; 833 } 834 835 bool IRTranslator::translateLandingPad(const User &U, 836 MachineIRBuilder &MIRBuilder) { 837 const LandingPadInst &LP = cast<LandingPadInst>(U); 838 839 MachineBasicBlock &MBB = MIRBuilder.getMBB(); 840 addLandingPadInfo(LP, MBB); 841 842 MBB.setIsEHPad(); 843 844 // If there aren't registers to copy the values into (e.g., during SjLj 845 // exceptions), then don't bother. 846 auto &TLI = *MF->getSubtarget().getTargetLowering(); 847 const Constant *PersonalityFn = MF->getFunction()->getPersonalityFn(); 848 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 && 849 TLI.getExceptionSelectorRegister(PersonalityFn) == 0) 850 return true; 851 852 // If landingpad's return type is token type, we don't create DAG nodes 853 // for its exception pointer and selector value. The extraction of exception 854 // pointer or selector value from token type landingpads is not currently 855 // supported. 856 if (LP.getType()->isTokenTy()) 857 return true; 858 859 // Add a label to mark the beginning of the landing pad. Deletion of the 860 // landing pad can thus be detected via the MachineModuleInfo. 861 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL) 862 .addSym(MF->addLandingPad(&MBB)); 863 864 LLT Ty = getLLTForType(*LP.getType(), *DL); 865 unsigned Undef = MRI->createGenericVirtualRegister(Ty); 866 MIRBuilder.buildUndef(Undef); 867 868 SmallVector<LLT, 2> Tys; 869 for (Type *Ty : cast<StructType>(LP.getType())->elements()) 870 Tys.push_back(getLLTForType(*Ty, *DL)); 871 assert(Tys.size() == 2 && "Only two-valued landingpads are supported"); 872 873 // Mark exception register as live in. 874 unsigned ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn); 875 if (!ExceptionReg) 876 return false; 877 878 MBB.addLiveIn(ExceptionReg); 879 unsigned VReg = MRI->createGenericVirtualRegister(Tys[0]), 880 Tmp = MRI->createGenericVirtualRegister(Ty); 881 MIRBuilder.buildCopy(VReg, ExceptionReg); 882 MIRBuilder.buildInsert(Tmp, Undef, VReg, 0); 883 884 unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn); 885 if (!SelectorReg) 886 return false; 887 888 MBB.addLiveIn(SelectorReg); 889 890 // N.b. the exception selector register always has pointer type and may not 891 // match the actual IR-level type in the landingpad so an extra cast is 892 // needed. 893 unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]); 894 MIRBuilder.buildCopy(PtrVReg, SelectorReg); 895 896 VReg = MRI->createGenericVirtualRegister(Tys[1]); 897 MIRBuilder.buildInstr(TargetOpcode::G_PTRTOINT).addDef(VReg).addUse(PtrVReg); 898 MIRBuilder.buildInsert(getOrCreateVReg(LP), Tmp, VReg, 899 Tys[0].getSizeInBits()); 900 return true; 901 } 902 903 bool IRTranslator::translateAlloca(const User &U, 904 MachineIRBuilder &MIRBuilder) { 905 auto &AI = cast<AllocaInst>(U); 906 907 if (AI.isStaticAlloca()) { 908 unsigned Res = getOrCreateVReg(AI); 909 int FI = getOrCreateFrameIndex(AI); 910 MIRBuilder.buildFrameIndex(Res, FI); 911 return true; 912 } 913 914 // Now we're in the harder dynamic case. 915 Type *Ty = AI.getAllocatedType(); 916 unsigned Align = 917 std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment()); 918 919 unsigned NumElts = getOrCreateVReg(*AI.getArraySize()); 920 921 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType()); 922 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL); 923 if (MRI->getType(NumElts) != IntPtrTy) { 924 unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy); 925 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts); 926 NumElts = ExtElts; 927 } 928 929 unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy); 930 unsigned TySize = 931 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty))); 932 MIRBuilder.buildMul(AllocSize, NumElts, TySize); 933 934 LLT PtrTy = getLLTForType(*AI.getType(), *DL); 935 auto &TLI = *MF->getSubtarget().getTargetLowering(); 936 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore(); 937 938 unsigned SPTmp = MRI->createGenericVirtualRegister(PtrTy); 939 MIRBuilder.buildCopy(SPTmp, SPReg); 940 941 unsigned AllocTmp = MRI->createGenericVirtualRegister(PtrTy); 942 MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize); 943 944 // Handle alignment. We have to realign if the allocation granule was smaller 945 // than stack alignment, or the specific alloca requires more than stack 946 // alignment. 947 unsigned StackAlign = 948 MF->getSubtarget().getFrameLowering()->getStackAlignment(); 949 Align = std::max(Align, StackAlign); 950 if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) { 951 // Round the size of the allocation up to the stack alignment size 952 // by add SA-1 to the size. This doesn't overflow because we're computing 953 // an address inside an alloca. 954 unsigned AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy); 955 MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align)); 956 AllocTmp = AlignedAlloc; 957 } 958 959 MIRBuilder.buildCopy(SPReg, AllocTmp); 960 MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp); 961 962 MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI); 963 assert(MF->getFrameInfo().hasVarSizedObjects()); 964 return true; 965 } 966 967 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) { 968 // FIXME: We may need more info about the type. Because of how LLT works, 969 // we're completely discarding the i64/double distinction here (amongst 970 // others). Fortunately the ABIs I know of where that matters don't use va_arg 971 // anyway but that's not guaranteed. 972 MIRBuilder.buildInstr(TargetOpcode::G_VAARG) 973 .addDef(getOrCreateVReg(U)) 974 .addUse(getOrCreateVReg(*U.getOperand(0))) 975 .addImm(DL->getABITypeAlignment(U.getType())); 976 return true; 977 } 978 979 bool IRTranslator::translateInsertElement(const User &U, 980 MachineIRBuilder &MIRBuilder) { 981 // If it is a <1 x Ty> vector, use the scalar as it is 982 // not a legal vector type in LLT. 983 if (U.getType()->getVectorNumElements() == 1) { 984 unsigned Elt = getOrCreateVReg(*U.getOperand(1)); 985 ValToVReg[&U] = Elt; 986 return true; 987 } 988 unsigned Res = getOrCreateVReg(U); 989 unsigned Val = getOrCreateVReg(*U.getOperand(0)); 990 unsigned Elt = getOrCreateVReg(*U.getOperand(1)); 991 unsigned Idx = getOrCreateVReg(*U.getOperand(2)); 992 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx); 993 return true; 994 } 995 996 bool IRTranslator::translateExtractElement(const User &U, 997 MachineIRBuilder &MIRBuilder) { 998 // If it is a <1 x Ty> vector, use the scalar as it is 999 // not a legal vector type in LLT. 1000 if (U.getOperand(0)->getType()->getVectorNumElements() == 1) { 1001 unsigned Elt = getOrCreateVReg(*U.getOperand(0)); 1002 ValToVReg[&U] = Elt; 1003 return true; 1004 } 1005 unsigned Res = getOrCreateVReg(U); 1006 unsigned Val = getOrCreateVReg(*U.getOperand(0)); 1007 unsigned Idx = getOrCreateVReg(*U.getOperand(1)); 1008 MIRBuilder.buildExtractVectorElement(Res, Val, Idx); 1009 return true; 1010 } 1011 1012 bool IRTranslator::translateShuffleVector(const User &U, 1013 MachineIRBuilder &MIRBuilder) { 1014 MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR) 1015 .addDef(getOrCreateVReg(U)) 1016 .addUse(getOrCreateVReg(*U.getOperand(0))) 1017 .addUse(getOrCreateVReg(*U.getOperand(1))) 1018 .addUse(getOrCreateVReg(*U.getOperand(2))); 1019 return true; 1020 } 1021 1022 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) { 1023 const PHINode &PI = cast<PHINode>(U); 1024 auto MIB = MIRBuilder.buildInstr(TargetOpcode::PHI); 1025 MIB.addDef(getOrCreateVReg(PI)); 1026 1027 PendingPHIs.emplace_back(&PI, MIB.getInstr()); 1028 return true; 1029 } 1030 1031 void IRTranslator::finishPendingPhis() { 1032 for (std::pair<const PHINode *, MachineInstr *> &Phi : PendingPHIs) { 1033 const PHINode *PI = Phi.first; 1034 MachineInstrBuilder MIB(*MF, Phi.second); 1035 1036 // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator 1037 // won't create extra control flow here, otherwise we need to find the 1038 // dominating predecessor here (or perhaps force the weirder IRTranslators 1039 // to provide a simple boundary). 1040 SmallSet<const BasicBlock *, 4> HandledPreds; 1041 1042 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) { 1043 auto IRPred = PI->getIncomingBlock(i); 1044 if (HandledPreds.count(IRPred)) 1045 continue; 1046 1047 HandledPreds.insert(IRPred); 1048 unsigned ValReg = getOrCreateVReg(*PI->getIncomingValue(i)); 1049 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) { 1050 assert(Pred->isSuccessor(MIB->getParent()) && 1051 "incorrect CFG at MachineBasicBlock level"); 1052 MIB.addUse(ValReg); 1053 MIB.addMBB(Pred); 1054 } 1055 } 1056 } 1057 } 1058 1059 bool IRTranslator::translate(const Instruction &Inst) { 1060 CurBuilder.setDebugLoc(Inst.getDebugLoc()); 1061 switch(Inst.getOpcode()) { 1062 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 1063 case Instruction::OPCODE: return translate##OPCODE(Inst, CurBuilder); 1064 #include "llvm/IR/Instruction.def" 1065 default: 1066 return false; 1067 } 1068 } 1069 1070 bool IRTranslator::translate(const Constant &C, unsigned Reg) { 1071 if (auto CI = dyn_cast<ConstantInt>(&C)) 1072 EntryBuilder.buildConstant(Reg, *CI); 1073 else if (auto CF = dyn_cast<ConstantFP>(&C)) 1074 EntryBuilder.buildFConstant(Reg, *CF); 1075 else if (isa<UndefValue>(C)) 1076 EntryBuilder.buildUndef(Reg); 1077 else if (isa<ConstantPointerNull>(C)) 1078 EntryBuilder.buildConstant(Reg, 0); 1079 else if (auto GV = dyn_cast<GlobalValue>(&C)) 1080 EntryBuilder.buildGlobalValue(Reg, GV); 1081 else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) { 1082 if (!CAZ->getType()->isVectorTy()) 1083 return false; 1084 // Return the scalar if it is a <1 x Ty> vector. 1085 if (CAZ->getNumElements() == 1) 1086 return translate(*CAZ->getElementValue(0u), Reg); 1087 std::vector<unsigned> Ops; 1088 for (unsigned i = 0; i < CAZ->getNumElements(); ++i) { 1089 Constant &Elt = *CAZ->getElementValue(i); 1090 Ops.push_back(getOrCreateVReg(Elt)); 1091 } 1092 EntryBuilder.buildMerge(Reg, Ops); 1093 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) { 1094 // Return the scalar if it is a <1 x Ty> vector. 1095 if (CV->getNumElements() == 1) 1096 return translate(*CV->getElementAsConstant(0), Reg); 1097 std::vector<unsigned> Ops; 1098 for (unsigned i = 0; i < CV->getNumElements(); ++i) { 1099 Constant &Elt = *CV->getElementAsConstant(i); 1100 Ops.push_back(getOrCreateVReg(Elt)); 1101 } 1102 EntryBuilder.buildMerge(Reg, Ops); 1103 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) { 1104 switch(CE->getOpcode()) { 1105 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 1106 case Instruction::OPCODE: return translate##OPCODE(*CE, EntryBuilder); 1107 #include "llvm/IR/Instruction.def" 1108 default: 1109 return false; 1110 } 1111 } else if (auto CV = dyn_cast<ConstantVector>(&C)) { 1112 if (CV->getNumOperands() == 1) 1113 return translate(*CV->getOperand(0), Reg); 1114 SmallVector<unsigned, 4> Ops; 1115 for (unsigned i = 0; i < CV->getNumOperands(); ++i) { 1116 Ops.push_back(getOrCreateVReg(*CV->getOperand(i))); 1117 } 1118 EntryBuilder.buildMerge(Reg, Ops); 1119 } else 1120 return false; 1121 1122 return true; 1123 } 1124 1125 void IRTranslator::finalizeFunction() { 1126 // Release the memory used by the different maps we 1127 // needed during the translation. 1128 PendingPHIs.clear(); 1129 ValToVReg.clear(); 1130 FrameIndices.clear(); 1131 MachinePreds.clear(); 1132 } 1133 1134 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { 1135 MF = &CurMF; 1136 const Function &F = *MF->getFunction(); 1137 if (F.empty()) 1138 return false; 1139 CLI = MF->getSubtarget().getCallLowering(); 1140 CurBuilder.setMF(*MF); 1141 EntryBuilder.setMF(*MF); 1142 MRI = &MF->getRegInfo(); 1143 DL = &F.getParent()->getDataLayout(); 1144 TPC = &getAnalysis<TargetPassConfig>(); 1145 ORE = make_unique<OptimizationRemarkEmitter>(&F); 1146 1147 assert(PendingPHIs.empty() && "stale PHIs"); 1148 1149 // Release the per-function state when we return, whether we succeeded or not. 1150 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); }); 1151 1152 // Setup a separate basic-block for the arguments and constants 1153 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock(); 1154 MF->push_back(EntryBB); 1155 EntryBuilder.setMBB(*EntryBB); 1156 1157 // Create all blocks, in IR order, to preserve the layout. 1158 for (const BasicBlock &BB: F) { 1159 auto *&MBB = BBToMBB[&BB]; 1160 1161 MBB = MF->CreateMachineBasicBlock(&BB); 1162 MF->push_back(MBB); 1163 1164 if (BB.hasAddressTaken()) 1165 MBB->setHasAddressTaken(); 1166 } 1167 1168 // Make our arguments/constants entry block fallthrough to the IR entry block. 1169 EntryBB->addSuccessor(&getMBB(F.front())); 1170 1171 // Lower the actual args into this basic block. 1172 SmallVector<unsigned, 8> VRegArgs; 1173 for (const Argument &Arg: F.args()) 1174 VRegArgs.push_back(getOrCreateVReg(Arg)); 1175 if (!CLI->lowerFormalArguments(EntryBuilder, F, VRegArgs)) { 1176 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 1177 MF->getFunction()->getSubprogram(), 1178 &MF->getFunction()->getEntryBlock()); 1179 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType()); 1180 reportTranslationError(*MF, *TPC, *ORE, R); 1181 return false; 1182 } 1183 1184 // And translate the function! 1185 for (const BasicBlock &BB: F) { 1186 MachineBasicBlock &MBB = getMBB(BB); 1187 // Set the insertion point of all the following translations to 1188 // the end of this basic block. 1189 CurBuilder.setMBB(MBB); 1190 1191 for (const Instruction &Inst: BB) { 1192 if (translate(Inst)) 1193 continue; 1194 1195 std::string InstStrStorage; 1196 raw_string_ostream InstStr(InstStrStorage); 1197 InstStr << Inst; 1198 1199 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure", 1200 Inst.getDebugLoc(), &BB); 1201 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst) 1202 << ": '" << InstStr.str() << "'"; 1203 reportTranslationError(*MF, *TPC, *ORE, R); 1204 return false; 1205 } 1206 } 1207 1208 finishPendingPhis(); 1209 1210 // Merge the argument lowering and constants block with its single 1211 // successor, the LLVM-IR entry block. We want the basic block to 1212 // be maximal. 1213 assert(EntryBB->succ_size() == 1 && 1214 "Custom BB used for lowering should have only one successor"); 1215 // Get the successor of the current entry block. 1216 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin(); 1217 assert(NewEntryBB.pred_size() == 1 && 1218 "LLVM-IR entry block has a predecessor!?"); 1219 // Move all the instruction from the current entry block to the 1220 // new entry block. 1221 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(), 1222 EntryBB->end()); 1223 1224 // Update the live-in information for the new entry block. 1225 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins()) 1226 NewEntryBB.addLiveIn(LiveIn); 1227 NewEntryBB.sortUniqueLiveIns(); 1228 1229 // Get rid of the now empty basic block. 1230 EntryBB->removeSuccessor(&NewEntryBB); 1231 MF->remove(EntryBB); 1232 MF->DeleteMachineBasicBlock(EntryBB); 1233 1234 assert(&MF->front() == &NewEntryBB && 1235 "New entry wasn't next in the list of basic block!"); 1236 1237 return false; 1238 } 1239