1 //===-- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator --*- C++ -*-==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file implements the IRTranslator class. 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 14 15 #include "llvm/ADT/SmallSet.h" 16 #include "llvm/ADT/SmallVector.h" 17 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 18 #include "llvm/CodeGen/Analysis.h" 19 #include "llvm/CodeGen/MachineFunction.h" 20 #include "llvm/CodeGen/MachineFrameInfo.h" 21 #include "llvm/CodeGen/MachineModuleInfo.h" 22 #include "llvm/CodeGen/MachineRegisterInfo.h" 23 #include "llvm/CodeGen/TargetPassConfig.h" 24 #include "llvm/IR/Constant.h" 25 #include "llvm/IR/Function.h" 26 #include "llvm/IR/GetElementPtrTypeIterator.h" 27 #include "llvm/IR/IntrinsicInst.h" 28 #include "llvm/IR/Type.h" 29 #include "llvm/IR/Value.h" 30 #include "llvm/Target/TargetIntrinsicInfo.h" 31 #include "llvm/Target/TargetLowering.h" 32 33 #define DEBUG_TYPE "irtranslator" 34 35 using namespace llvm; 36 37 char IRTranslator::ID = 0; 38 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 39 false, false) 40 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) 41 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 42 false, false) 43 44 static void reportTranslationError(const Value &V, const Twine &Message) { 45 std::string ErrStorage; 46 raw_string_ostream Err(ErrStorage); 47 Err << Message << ": " << V << '\n'; 48 report_fatal_error(Err.str()); 49 } 50 51 IRTranslator::IRTranslator() : MachineFunctionPass(ID), MRI(nullptr) { 52 initializeIRTranslatorPass(*PassRegistry::getPassRegistry()); 53 } 54 55 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const { 56 AU.addRequired<TargetPassConfig>(); 57 MachineFunctionPass::getAnalysisUsage(AU); 58 } 59 60 61 unsigned IRTranslator::getOrCreateVReg(const Value &Val) { 62 unsigned &ValReg = ValToVReg[&Val]; 63 // Check if this is the first time we see Val. 64 if (!ValReg) { 65 // Fill ValRegsSequence with the sequence of registers 66 // we need to concat together to produce the value. 67 assert(Val.getType()->isSized() && 68 "Don't know how to create an empty vreg"); 69 unsigned VReg = MRI->createGenericVirtualRegister(LLT{*Val.getType(), *DL}); 70 ValReg = VReg; 71 72 if (auto CV = dyn_cast<Constant>(&Val)) { 73 bool Success = translate(*CV, VReg); 74 if (!Success) { 75 if (!TPC->isGlobalISelAbortEnabled()) { 76 MF->getProperties().set( 77 MachineFunctionProperties::Property::FailedISel); 78 return VReg; 79 } 80 reportTranslationError(Val, "unable to translate constant"); 81 } 82 } 83 } 84 return ValReg; 85 } 86 87 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) { 88 if (FrameIndices.find(&AI) != FrameIndices.end()) 89 return FrameIndices[&AI]; 90 91 unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType()); 92 unsigned Size = 93 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue(); 94 95 // Always allocate at least one byte. 96 Size = std::max(Size, 1u); 97 98 unsigned Alignment = AI.getAlignment(); 99 if (!Alignment) 100 Alignment = DL->getABITypeAlignment(AI.getAllocatedType()); 101 102 int &FI = FrameIndices[&AI]; 103 FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI); 104 return FI; 105 } 106 107 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) { 108 unsigned Alignment = 0; 109 Type *ValTy = nullptr; 110 if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) { 111 Alignment = SI->getAlignment(); 112 ValTy = SI->getValueOperand()->getType(); 113 } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) { 114 Alignment = LI->getAlignment(); 115 ValTy = LI->getType(); 116 } else if (!TPC->isGlobalISelAbortEnabled()) { 117 MF->getProperties().set( 118 MachineFunctionProperties::Property::FailedISel); 119 return 1; 120 } else 121 llvm_unreachable("unhandled memory instruction"); 122 123 return Alignment ? Alignment : DL->getABITypeAlignment(ValTy); 124 } 125 126 MachineBasicBlock &IRTranslator::getOrCreateBB(const BasicBlock &BB) { 127 MachineBasicBlock *&MBB = BBToMBB[&BB]; 128 if (!MBB) { 129 MBB = MF->CreateMachineBasicBlock(&BB); 130 MF->push_back(MBB); 131 132 if (BB.hasAddressTaken()) 133 MBB->setHasAddressTaken(); 134 } 135 return *MBB; 136 } 137 138 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) { 139 assert(NewPred && "new predecessor must be a real MachineBasicBlock"); 140 MachinePreds[Edge].push_back(NewPred); 141 } 142 143 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U, 144 MachineIRBuilder &MIRBuilder) { 145 // FIXME: handle signed/unsigned wrapping flags. 146 147 // Get or create a virtual register for each value. 148 // Unless the value is a Constant => loadimm cst? 149 // or inline constant each time? 150 // Creation of a virtual register needs to have a size. 151 unsigned Op0 = getOrCreateVReg(*U.getOperand(0)); 152 unsigned Op1 = getOrCreateVReg(*U.getOperand(1)); 153 unsigned Res = getOrCreateVReg(U); 154 MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1); 155 return true; 156 } 157 158 bool IRTranslator::translateCompare(const User &U, 159 MachineIRBuilder &MIRBuilder) { 160 const CmpInst *CI = dyn_cast<CmpInst>(&U); 161 unsigned Op0 = getOrCreateVReg(*U.getOperand(0)); 162 unsigned Op1 = getOrCreateVReg(*U.getOperand(1)); 163 unsigned Res = getOrCreateVReg(U); 164 CmpInst::Predicate Pred = 165 CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>( 166 cast<ConstantExpr>(U).getPredicate()); 167 168 if (CmpInst::isIntPredicate(Pred)) 169 MIRBuilder.buildICmp(Pred, Res, Op0, Op1); 170 else 171 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1); 172 173 return true; 174 } 175 176 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) { 177 const ReturnInst &RI = cast<ReturnInst>(U); 178 const Value *Ret = RI.getReturnValue(); 179 // The target may mess up with the insertion point, but 180 // this is not important as a return is the last instruction 181 // of the block anyway. 182 return CLI->lowerReturn(MIRBuilder, Ret, !Ret ? 0 : getOrCreateVReg(*Ret)); 183 } 184 185 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) { 186 const BranchInst &BrInst = cast<BranchInst>(U); 187 unsigned Succ = 0; 188 if (!BrInst.isUnconditional()) { 189 // We want a G_BRCOND to the true BB followed by an unconditional branch. 190 unsigned Tst = getOrCreateVReg(*BrInst.getCondition()); 191 const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++)); 192 MachineBasicBlock &TrueBB = getOrCreateBB(TrueTgt); 193 MIRBuilder.buildBrCond(Tst, TrueBB); 194 } 195 196 const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ)); 197 MachineBasicBlock &TgtBB = getOrCreateBB(BrTgt); 198 MIRBuilder.buildBr(TgtBB); 199 200 // Link successors. 201 MachineBasicBlock &CurBB = MIRBuilder.getMBB(); 202 for (const BasicBlock *Succ : BrInst.successors()) 203 CurBB.addSuccessor(&getOrCreateBB(*Succ)); 204 return true; 205 } 206 207 bool IRTranslator::translateSwitch(const User &U, 208 MachineIRBuilder &MIRBuilder) { 209 // For now, just translate as a chain of conditional branches. 210 // FIXME: could we share most of the logic/code in 211 // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel? 212 // At first sight, it seems most of the logic in there is independent of 213 // SelectionDAG-specifics and a lot of work went in to optimize switch 214 // lowering in there. 215 216 const SwitchInst &SwInst = cast<SwitchInst>(U); 217 const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition()); 218 const BasicBlock *OrigBB = SwInst.getParent(); 219 220 LLT LLTi1 = LLT(*Type::getInt1Ty(U.getContext()), *DL); 221 for (auto &CaseIt : SwInst.cases()) { 222 const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue()); 223 const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1); 224 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue); 225 MachineBasicBlock &CurMBB = MIRBuilder.getMBB(); 226 const BasicBlock *TrueBB = CaseIt.getCaseSuccessor(); 227 MachineBasicBlock &TrueMBB = getOrCreateBB(*TrueBB); 228 229 MIRBuilder.buildBrCond(Tst, TrueMBB); 230 CurMBB.addSuccessor(&TrueMBB); 231 addMachineCFGPred({OrigBB, TrueBB}, &CurMBB); 232 233 MachineBasicBlock *FalseMBB = 234 MF->CreateMachineBasicBlock(SwInst.getParent()); 235 MF->push_back(FalseMBB); 236 MIRBuilder.buildBr(*FalseMBB); 237 CurMBB.addSuccessor(FalseMBB); 238 239 MIRBuilder.setMBB(*FalseMBB); 240 } 241 // handle default case 242 const BasicBlock *DefaultBB = SwInst.getDefaultDest(); 243 MachineBasicBlock &DefaultMBB = getOrCreateBB(*DefaultBB); 244 MIRBuilder.buildBr(DefaultMBB); 245 MachineBasicBlock &CurMBB = MIRBuilder.getMBB(); 246 CurMBB.addSuccessor(&DefaultMBB); 247 addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB); 248 249 return true; 250 } 251 252 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { 253 const LoadInst &LI = cast<LoadInst>(U); 254 255 if (!TPC->isGlobalISelAbortEnabled() && LI.isAtomic()) 256 return false; 257 258 assert(!LI.isAtomic() && "only non-atomic loads are supported at the moment"); 259 auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile 260 : MachineMemOperand::MONone; 261 Flags |= MachineMemOperand::MOLoad; 262 263 unsigned Res = getOrCreateVReg(LI); 264 unsigned Addr = getOrCreateVReg(*LI.getPointerOperand()); 265 LLT VTy{*LI.getType(), *DL}, PTy{*LI.getPointerOperand()->getType(), *DL}; 266 MIRBuilder.buildLoad( 267 Res, Addr, 268 *MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()), 269 Flags, DL->getTypeStoreSize(LI.getType()), 270 getMemOpAlignment(LI))); 271 return true; 272 } 273 274 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) { 275 const StoreInst &SI = cast<StoreInst>(U); 276 277 if (!TPC->isGlobalISelAbortEnabled() && SI.isAtomic()) 278 return false; 279 280 assert(!SI.isAtomic() && "only non-atomic stores supported at the moment"); 281 auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile 282 : MachineMemOperand::MONone; 283 Flags |= MachineMemOperand::MOStore; 284 285 unsigned Val = getOrCreateVReg(*SI.getValueOperand()); 286 unsigned Addr = getOrCreateVReg(*SI.getPointerOperand()); 287 LLT VTy{*SI.getValueOperand()->getType(), *DL}, 288 PTy{*SI.getPointerOperand()->getType(), *DL}; 289 290 MIRBuilder.buildStore( 291 Val, Addr, 292 *MF->getMachineMemOperand( 293 MachinePointerInfo(SI.getPointerOperand()), Flags, 294 DL->getTypeStoreSize(SI.getValueOperand()->getType()), 295 getMemOpAlignment(SI))); 296 return true; 297 } 298 299 bool IRTranslator::translateExtractValue(const User &U, 300 MachineIRBuilder &MIRBuilder) { 301 const Value *Src = U.getOperand(0); 302 Type *Int32Ty = Type::getInt32Ty(U.getContext()); 303 SmallVector<Value *, 1> Indices; 304 305 // getIndexedOffsetInType is designed for GEPs, so the first index is the 306 // usual array element rather than looking into the actual aggregate. 307 Indices.push_back(ConstantInt::get(Int32Ty, 0)); 308 309 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) { 310 for (auto Idx : EVI->indices()) 311 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 312 } else { 313 for (unsigned i = 1; i < U.getNumOperands(); ++i) 314 Indices.push_back(U.getOperand(i)); 315 } 316 317 uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices); 318 319 unsigned Res = getOrCreateVReg(U); 320 MIRBuilder.buildExtract(Res, Offset, getOrCreateVReg(*Src)); 321 322 return true; 323 } 324 325 bool IRTranslator::translateInsertValue(const User &U, 326 MachineIRBuilder &MIRBuilder) { 327 const Value *Src = U.getOperand(0); 328 Type *Int32Ty = Type::getInt32Ty(U.getContext()); 329 SmallVector<Value *, 1> Indices; 330 331 // getIndexedOffsetInType is designed for GEPs, so the first index is the 332 // usual array element rather than looking into the actual aggregate. 333 Indices.push_back(ConstantInt::get(Int32Ty, 0)); 334 335 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) { 336 for (auto Idx : IVI->indices()) 337 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 338 } else { 339 for (unsigned i = 2; i < U.getNumOperands(); ++i) 340 Indices.push_back(U.getOperand(i)); 341 } 342 343 uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices); 344 345 unsigned Res = getOrCreateVReg(U); 346 const Value &Inserted = *U.getOperand(1); 347 MIRBuilder.buildInsert(Res, getOrCreateVReg(*Src), getOrCreateVReg(Inserted), 348 Offset); 349 350 return true; 351 } 352 353 bool IRTranslator::translateSelect(const User &U, 354 MachineIRBuilder &MIRBuilder) { 355 MIRBuilder.buildSelect(getOrCreateVReg(U), getOrCreateVReg(*U.getOperand(0)), 356 getOrCreateVReg(*U.getOperand(1)), 357 getOrCreateVReg(*U.getOperand(2))); 358 return true; 359 } 360 361 bool IRTranslator::translateBitCast(const User &U, 362 MachineIRBuilder &MIRBuilder) { 363 if (LLT{*U.getOperand(0)->getType(), *DL} == LLT{*U.getType(), *DL}) { 364 unsigned &Reg = ValToVReg[&U]; 365 if (Reg) 366 MIRBuilder.buildCopy(Reg, getOrCreateVReg(*U.getOperand(0))); 367 else 368 Reg = getOrCreateVReg(*U.getOperand(0)); 369 return true; 370 } 371 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder); 372 } 373 374 bool IRTranslator::translateCast(unsigned Opcode, const User &U, 375 MachineIRBuilder &MIRBuilder) { 376 unsigned Op = getOrCreateVReg(*U.getOperand(0)); 377 unsigned Res = getOrCreateVReg(U); 378 MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op); 379 return true; 380 } 381 382 bool IRTranslator::translateGetElementPtr(const User &U, 383 MachineIRBuilder &MIRBuilder) { 384 // FIXME: support vector GEPs. 385 if (U.getType()->isVectorTy()) 386 return false; 387 388 Value &Op0 = *U.getOperand(0); 389 unsigned BaseReg = getOrCreateVReg(Op0); 390 LLT PtrTy{*Op0.getType(), *DL}; 391 unsigned PtrSize = DL->getPointerSizeInBits(PtrTy.getAddressSpace()); 392 LLT OffsetTy = LLT::scalar(PtrSize); 393 394 int64_t Offset = 0; 395 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U); 396 GTI != E; ++GTI) { 397 const Value *Idx = GTI.getOperand(); 398 if (StructType *StTy = GTI.getStructTypeOrNull()) { 399 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue(); 400 Offset += DL->getStructLayout(StTy)->getElementOffset(Field); 401 continue; 402 } else { 403 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType()); 404 405 // If this is a scalar constant or a splat vector of constants, 406 // handle it quickly. 407 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) { 408 Offset += ElementSize * CI->getSExtValue(); 409 continue; 410 } 411 412 if (Offset != 0) { 413 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy); 414 unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy); 415 MIRBuilder.buildConstant(OffsetReg, Offset); 416 MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg); 417 418 BaseReg = NewBaseReg; 419 Offset = 0; 420 } 421 422 // N = N + Idx * ElementSize; 423 unsigned ElementSizeReg = MRI->createGenericVirtualRegister(OffsetTy); 424 MIRBuilder.buildConstant(ElementSizeReg, ElementSize); 425 426 unsigned IdxReg = getOrCreateVReg(*Idx); 427 if (MRI->getType(IdxReg) != OffsetTy) { 428 unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy); 429 MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg); 430 IdxReg = NewIdxReg; 431 } 432 433 unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy); 434 MIRBuilder.buildMul(OffsetReg, ElementSizeReg, IdxReg); 435 436 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy); 437 MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg); 438 BaseReg = NewBaseReg; 439 } 440 } 441 442 if (Offset != 0) { 443 unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy); 444 MIRBuilder.buildConstant(OffsetReg, Offset); 445 MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg); 446 return true; 447 } 448 449 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg); 450 return true; 451 } 452 453 bool IRTranslator::translateMemcpy(const CallInst &CI, 454 MachineIRBuilder &MIRBuilder) { 455 LLT SizeTy{*CI.getArgOperand(2)->getType(), *DL}; 456 if (cast<PointerType>(CI.getArgOperand(0)->getType())->getAddressSpace() != 457 0 || 458 cast<PointerType>(CI.getArgOperand(1)->getType())->getAddressSpace() != 459 0 || 460 SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0)) 461 return false; 462 463 SmallVector<CallLowering::ArgInfo, 8> Args; 464 for (int i = 0; i < 3; ++i) { 465 const auto &Arg = CI.getArgOperand(i); 466 Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType()); 467 } 468 469 MachineOperand Callee = MachineOperand::CreateES("memcpy"); 470 471 return CLI->lowerCall(MIRBuilder, Callee, 472 CallLowering::ArgInfo(0, CI.getType()), Args); 473 } 474 475 void IRTranslator::getStackGuard(unsigned DstReg, 476 MachineIRBuilder &MIRBuilder) { 477 auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD); 478 MIB.addDef(DstReg); 479 480 auto &TLI = *MF->getSubtarget().getTargetLowering(); 481 Value *Global = TLI.getSDagStackGuard(*MF->getFunction()->getParent()); 482 if (!Global) 483 return; 484 485 MachinePointerInfo MPInfo(Global); 486 MachineInstr::mmo_iterator MemRefs = MF->allocateMemRefsArray(1); 487 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 488 MachineMemOperand::MODereferenceable; 489 *MemRefs = 490 MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8, 491 DL->getPointerABIAlignment()); 492 MIB.setMemRefs(MemRefs, MemRefs + 1); 493 } 494 495 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op, 496 MachineIRBuilder &MIRBuilder) { 497 LLT Ty{*CI.getOperand(0)->getType(), *DL}; 498 LLT s1 = LLT::scalar(1); 499 unsigned Width = Ty.getSizeInBits(); 500 unsigned Res = MRI->createGenericVirtualRegister(Ty); 501 unsigned Overflow = MRI->createGenericVirtualRegister(s1); 502 auto MIB = MIRBuilder.buildInstr(Op) 503 .addDef(Res) 504 .addDef(Overflow) 505 .addUse(getOrCreateVReg(*CI.getOperand(0))) 506 .addUse(getOrCreateVReg(*CI.getOperand(1))); 507 508 if (Op == TargetOpcode::G_UADDE || Op == TargetOpcode::G_USUBE) { 509 unsigned Zero = MRI->createGenericVirtualRegister(s1); 510 EntryBuilder.buildConstant(Zero, 0); 511 MIB.addUse(Zero); 512 } 513 514 MIRBuilder.buildSequence(getOrCreateVReg(CI), Res, 0, Overflow, Width); 515 return true; 516 } 517 518 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, 519 MachineIRBuilder &MIRBuilder) { 520 switch (ID) { 521 default: 522 break; 523 case Intrinsic::dbg_declare: 524 case Intrinsic::dbg_value: 525 // FIXME: these obviously need to be supported properly. 526 MF->getProperties().set( 527 MachineFunctionProperties::Property::FailedISel); 528 return true; 529 case Intrinsic::uadd_with_overflow: 530 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDE, MIRBuilder); 531 case Intrinsic::sadd_with_overflow: 532 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder); 533 case Intrinsic::usub_with_overflow: 534 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBE, MIRBuilder); 535 case Intrinsic::ssub_with_overflow: 536 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder); 537 case Intrinsic::umul_with_overflow: 538 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder); 539 case Intrinsic::smul_with_overflow: 540 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder); 541 case Intrinsic::memcpy: 542 return translateMemcpy(CI, MIRBuilder); 543 case Intrinsic::eh_typeid_for: { 544 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0)); 545 unsigned Reg = getOrCreateVReg(CI); 546 unsigned TypeID = MF->getTypeIDFor(GV); 547 MIRBuilder.buildConstant(Reg, TypeID); 548 return true; 549 } 550 case Intrinsic::objectsize: { 551 // If we don't know by now, we're never going to know. 552 const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1)); 553 554 MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0); 555 return true; 556 } 557 case Intrinsic::stackguard: 558 getStackGuard(getOrCreateVReg(CI), MIRBuilder); 559 return true; 560 case Intrinsic::stackprotector: { 561 LLT PtrTy{*CI.getArgOperand(0)->getType(), *DL}; 562 unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy); 563 getStackGuard(GuardVal, MIRBuilder); 564 565 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1)); 566 MIRBuilder.buildStore( 567 GuardVal, getOrCreateVReg(*Slot), 568 *MF->getMachineMemOperand( 569 MachinePointerInfo::getFixedStack(*MF, 570 getOrCreateFrameIndex(*Slot)), 571 MachineMemOperand::MOStore | MachineMemOperand::MOVolatile, 572 PtrTy.getSizeInBits() / 8, 8)); 573 return true; 574 } 575 } 576 return false; 577 } 578 579 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) { 580 const CallInst &CI = cast<CallInst>(U); 581 auto TII = MF->getTarget().getIntrinsicInfo(); 582 const Function *F = CI.getCalledFunction(); 583 584 if (!F || !F->isIntrinsic()) { 585 unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI); 586 SmallVector<unsigned, 8> Args; 587 for (auto &Arg: CI.arg_operands()) 588 Args.push_back(getOrCreateVReg(*Arg)); 589 590 return CLI->lowerCall(MIRBuilder, CI, Res, Args, [&]() { 591 return getOrCreateVReg(*CI.getCalledValue()); 592 }); 593 } 594 595 Intrinsic::ID ID = F->getIntrinsicID(); 596 if (TII && ID == Intrinsic::not_intrinsic) 597 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F)); 598 599 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic"); 600 601 if (translateKnownIntrinsic(CI, ID, MIRBuilder)) 602 return true; 603 604 unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI); 605 MachineInstrBuilder MIB = 606 MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory()); 607 608 for (auto &Arg : CI.arg_operands()) { 609 if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) 610 MIB.addImm(CI->getSExtValue()); 611 else 612 MIB.addUse(getOrCreateVReg(*Arg)); 613 } 614 return true; 615 } 616 617 bool IRTranslator::translateInvoke(const User &U, 618 MachineIRBuilder &MIRBuilder) { 619 const InvokeInst &I = cast<InvokeInst>(U); 620 MCContext &Context = MF->getContext(); 621 622 const BasicBlock *ReturnBB = I.getSuccessor(0); 623 const BasicBlock *EHPadBB = I.getSuccessor(1); 624 625 const Value *Callee(I.getCalledValue()); 626 const Function *Fn = dyn_cast<Function>(Callee); 627 if (isa<InlineAsm>(Callee)) 628 return false; 629 630 // FIXME: support invoking patchpoint and statepoint intrinsics. 631 if (Fn && Fn->isIntrinsic()) 632 return false; 633 634 // FIXME: support whatever these are. 635 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) 636 return false; 637 638 // FIXME: support Windows exception handling. 639 if (!isa<LandingPadInst>(EHPadBB->front())) 640 return false; 641 642 643 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about 644 // the region covered by the try. 645 MCSymbol *BeginSymbol = Context.createTempSymbol(); 646 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol); 647 648 unsigned Res = I.getType()->isVoidTy() ? 0 : getOrCreateVReg(I); 649 SmallVector<CallLowering::ArgInfo, 8> Args; 650 for (auto &Arg: I.arg_operands()) 651 Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType()); 652 653 if (!CLI->lowerCall(MIRBuilder, MachineOperand::CreateGA(Fn, 0), 654 CallLowering::ArgInfo(Res, I.getType()), Args)) 655 return false; 656 657 MCSymbol *EndSymbol = Context.createTempSymbol(); 658 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol); 659 660 // FIXME: track probabilities. 661 MachineBasicBlock &EHPadMBB = getOrCreateBB(*EHPadBB), 662 &ReturnMBB = getOrCreateBB(*ReturnBB); 663 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol); 664 MIRBuilder.getMBB().addSuccessor(&ReturnMBB); 665 MIRBuilder.getMBB().addSuccessor(&EHPadMBB); 666 667 return true; 668 } 669 670 bool IRTranslator::translateLandingPad(const User &U, 671 MachineIRBuilder &MIRBuilder) { 672 const LandingPadInst &LP = cast<LandingPadInst>(U); 673 674 MachineBasicBlock &MBB = MIRBuilder.getMBB(); 675 addLandingPadInfo(LP, MBB); 676 677 MBB.setIsEHPad(); 678 679 // If there aren't registers to copy the values into (e.g., during SjLj 680 // exceptions), then don't bother. 681 auto &TLI = *MF->getSubtarget().getTargetLowering(); 682 const Constant *PersonalityFn = MF->getFunction()->getPersonalityFn(); 683 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 && 684 TLI.getExceptionSelectorRegister(PersonalityFn) == 0) 685 return true; 686 687 // If landingpad's return type is token type, we don't create DAG nodes 688 // for its exception pointer and selector value. The extraction of exception 689 // pointer or selector value from token type landingpads is not currently 690 // supported. 691 if (LP.getType()->isTokenTy()) 692 return true; 693 694 // Add a label to mark the beginning of the landing pad. Deletion of the 695 // landing pad can thus be detected via the MachineModuleInfo. 696 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL) 697 .addSym(MF->addLandingPad(&MBB)); 698 699 // Mark exception register as live in. 700 SmallVector<unsigned, 2> Regs; 701 SmallVector<uint64_t, 2> Offsets; 702 LLT p0 = LLT::pointer(0, DL->getPointerSizeInBits()); 703 if (unsigned Reg = TLI.getExceptionPointerRegister(PersonalityFn)) { 704 unsigned VReg = MRI->createGenericVirtualRegister(p0); 705 MIRBuilder.buildCopy(VReg, Reg); 706 Regs.push_back(VReg); 707 Offsets.push_back(0); 708 } 709 710 if (unsigned Reg = TLI.getExceptionSelectorRegister(PersonalityFn)) { 711 unsigned VReg = MRI->createGenericVirtualRegister(p0); 712 MIRBuilder.buildCopy(VReg, Reg); 713 Regs.push_back(VReg); 714 Offsets.push_back(p0.getSizeInBits()); 715 } 716 717 MIRBuilder.buildSequence(getOrCreateVReg(LP), Regs, Offsets); 718 return true; 719 } 720 721 bool IRTranslator::translateStaticAlloca(const AllocaInst &AI, 722 MachineIRBuilder &MIRBuilder) { 723 if (!TPC->isGlobalISelAbortEnabled() && !AI.isStaticAlloca()) 724 return false; 725 726 assert(AI.isStaticAlloca() && "only handle static allocas now"); 727 unsigned Res = getOrCreateVReg(AI); 728 int FI = getOrCreateFrameIndex(AI); 729 MIRBuilder.buildFrameIndex(Res, FI); 730 return true; 731 } 732 733 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) { 734 const PHINode &PI = cast<PHINode>(U); 735 auto MIB = MIRBuilder.buildInstr(TargetOpcode::PHI); 736 MIB.addDef(getOrCreateVReg(PI)); 737 738 PendingPHIs.emplace_back(&PI, MIB.getInstr()); 739 return true; 740 } 741 742 void IRTranslator::finishPendingPhis() { 743 for (std::pair<const PHINode *, MachineInstr *> &Phi : PendingPHIs) { 744 const PHINode *PI = Phi.first; 745 MachineInstrBuilder MIB(*MF, Phi.second); 746 747 // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator 748 // won't create extra control flow here, otherwise we need to find the 749 // dominating predecessor here (or perhaps force the weirder IRTranslators 750 // to provide a simple boundary). 751 SmallSet<const BasicBlock *, 4> HandledPreds; 752 753 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) { 754 auto IRPred = PI->getIncomingBlock(i); 755 if (HandledPreds.count(IRPred)) 756 continue; 757 758 HandledPreds.insert(IRPred); 759 unsigned ValReg = getOrCreateVReg(*PI->getIncomingValue(i)); 760 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) { 761 assert(Pred->isSuccessor(MIB->getParent()) && 762 "incorrect CFG at MachineBasicBlock level"); 763 MIB.addUse(ValReg); 764 MIB.addMBB(Pred); 765 } 766 } 767 } 768 } 769 770 bool IRTranslator::translate(const Instruction &Inst) { 771 CurBuilder.setDebugLoc(Inst.getDebugLoc()); 772 switch(Inst.getOpcode()) { 773 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 774 case Instruction::OPCODE: return translate##OPCODE(Inst, CurBuilder); 775 #include "llvm/IR/Instruction.def" 776 default: 777 if (!TPC->isGlobalISelAbortEnabled()) 778 return false; 779 llvm_unreachable("unknown opcode"); 780 } 781 } 782 783 bool IRTranslator::translate(const Constant &C, unsigned Reg) { 784 if (auto CI = dyn_cast<ConstantInt>(&C)) 785 EntryBuilder.buildConstant(Reg, *CI); 786 else if (auto CF = dyn_cast<ConstantFP>(&C)) 787 EntryBuilder.buildFConstant(Reg, *CF); 788 else if (isa<UndefValue>(C)) 789 EntryBuilder.buildInstr(TargetOpcode::IMPLICIT_DEF).addDef(Reg); 790 else if (isa<ConstantPointerNull>(C)) 791 EntryBuilder.buildConstant(Reg, 0); 792 else if (auto GV = dyn_cast<GlobalValue>(&C)) 793 EntryBuilder.buildGlobalValue(Reg, GV); 794 else if (auto CE = dyn_cast<ConstantExpr>(&C)) { 795 switch(CE->getOpcode()) { 796 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 797 case Instruction::OPCODE: return translate##OPCODE(*CE, EntryBuilder); 798 #include "llvm/IR/Instruction.def" 799 default: 800 if (!TPC->isGlobalISelAbortEnabled()) 801 return false; 802 llvm_unreachable("unknown opcode"); 803 } 804 } else if (!TPC->isGlobalISelAbortEnabled()) 805 return false; 806 else 807 llvm_unreachable("unhandled constant kind"); 808 809 return true; 810 } 811 812 void IRTranslator::finalizeFunction() { 813 // Release the memory used by the different maps we 814 // needed during the translation. 815 PendingPHIs.clear(); 816 ValToVReg.clear(); 817 FrameIndices.clear(); 818 Constants.clear(); 819 MachinePreds.clear(); 820 } 821 822 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { 823 MF = &CurMF; 824 const Function &F = *MF->getFunction(); 825 if (F.empty()) 826 return false; 827 CLI = MF->getSubtarget().getCallLowering(); 828 CurBuilder.setMF(*MF); 829 EntryBuilder.setMF(*MF); 830 MRI = &MF->getRegInfo(); 831 DL = &F.getParent()->getDataLayout(); 832 TPC = &getAnalysis<TargetPassConfig>(); 833 834 assert(PendingPHIs.empty() && "stale PHIs"); 835 836 // Setup a separate basic-block for the arguments and constants, falling 837 // through to the IR-level Function's entry block. 838 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock(); 839 MF->push_back(EntryBB); 840 EntryBB->addSuccessor(&getOrCreateBB(F.front())); 841 EntryBuilder.setMBB(*EntryBB); 842 843 // Lower the actual args into this basic block. 844 SmallVector<unsigned, 8> VRegArgs; 845 for (const Argument &Arg: F.args()) 846 VRegArgs.push_back(getOrCreateVReg(Arg)); 847 bool Succeeded = CLI->lowerFormalArguments(EntryBuilder, F, VRegArgs); 848 if (!Succeeded) { 849 if (!TPC->isGlobalISelAbortEnabled()) { 850 MF->getProperties().set( 851 MachineFunctionProperties::Property::FailedISel); 852 finalizeFunction(); 853 return false; 854 } 855 report_fatal_error("Unable to lower arguments"); 856 } 857 858 // And translate the function! 859 for (const BasicBlock &BB: F) { 860 MachineBasicBlock &MBB = getOrCreateBB(BB); 861 // Set the insertion point of all the following translations to 862 // the end of this basic block. 863 CurBuilder.setMBB(MBB); 864 865 for (const Instruction &Inst: BB) { 866 Succeeded &= translate(Inst); 867 if (!Succeeded) { 868 if (TPC->isGlobalISelAbortEnabled()) 869 reportTranslationError(Inst, "unable to translate instruction"); 870 MF->getProperties().set( 871 MachineFunctionProperties::Property::FailedISel); 872 break; 873 } 874 } 875 } 876 877 if (Succeeded) { 878 finishPendingPhis(); 879 880 // Now that the MachineFrameInfo has been configured, no further changes to 881 // the reserved registers are possible. 882 MRI->freezeReservedRegs(*MF); 883 884 // Merge the argument lowering and constants block with its single 885 // successor, the LLVM-IR entry block. We want the basic block to 886 // be maximal. 887 assert(EntryBB->succ_size() == 1 && 888 "Custom BB used for lowering should have only one successor"); 889 // Get the successor of the current entry block. 890 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin(); 891 assert(NewEntryBB.pred_size() == 1 && 892 "LLVM-IR entry block has a predecessor!?"); 893 // Move all the instruction from the current entry block to the 894 // new entry block. 895 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(), 896 EntryBB->end()); 897 898 // Update the live-in information for the new entry block. 899 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins()) 900 NewEntryBB.addLiveIn(LiveIn); 901 NewEntryBB.sortUniqueLiveIns(); 902 903 // Get rid of the now empty basic block. 904 EntryBB->removeSuccessor(&NewEntryBB); 905 MF->remove(EntryBB); 906 907 assert(&MF->front() == &NewEntryBB && 908 "New entry wasn't next in the list of basic block!"); 909 } 910 911 finalizeFunction(); 912 913 return false; 914 } 915