1 //===-- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator --*- C++ -*-==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file implements the IRTranslator class. 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 14 15 #include "llvm/ADT/SmallVector.h" 16 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 17 #include "llvm/CodeGen/Analysis.h" 18 #include "llvm/CodeGen/MachineFunction.h" 19 #include "llvm/CodeGen/MachineFrameInfo.h" 20 #include "llvm/CodeGen/MachineModuleInfo.h" 21 #include "llvm/CodeGen/MachineRegisterInfo.h" 22 #include "llvm/CodeGen/TargetPassConfig.h" 23 #include "llvm/IR/Constant.h" 24 #include "llvm/IR/Function.h" 25 #include "llvm/IR/GetElementPtrTypeIterator.h" 26 #include "llvm/IR/IntrinsicInst.h" 27 #include "llvm/IR/Type.h" 28 #include "llvm/IR/Value.h" 29 #include "llvm/Target/TargetIntrinsicInfo.h" 30 #include "llvm/Target/TargetLowering.h" 31 32 #define DEBUG_TYPE "irtranslator" 33 34 using namespace llvm; 35 36 char IRTranslator::ID = 0; 37 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 38 false, false) 39 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) 40 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI", 41 false, false) 42 43 static void reportTranslationError(const Value &V, const Twine &Message) { 44 std::string ErrStorage; 45 raw_string_ostream Err(ErrStorage); 46 Err << Message << ": " << V << '\n'; 47 report_fatal_error(Err.str()); 48 } 49 50 IRTranslator::IRTranslator() : MachineFunctionPass(ID), MRI(nullptr) { 51 initializeIRTranslatorPass(*PassRegistry::getPassRegistry()); 52 } 53 54 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const { 55 AU.addRequired<TargetPassConfig>(); 56 MachineFunctionPass::getAnalysisUsage(AU); 57 } 58 59 60 unsigned IRTranslator::getOrCreateVReg(const Value &Val) { 61 unsigned &ValReg = ValToVReg[&Val]; 62 // Check if this is the first time we see Val. 63 if (!ValReg) { 64 // Fill ValRegsSequence with the sequence of registers 65 // we need to concat together to produce the value. 66 assert(Val.getType()->isSized() && 67 "Don't know how to create an empty vreg"); 68 unsigned VReg = MRI->createGenericVirtualRegister(LLT{*Val.getType(), *DL}); 69 ValReg = VReg; 70 71 if (auto CV = dyn_cast<Constant>(&Val)) { 72 bool Success = translate(*CV, VReg); 73 if (!Success) { 74 if (!TPC->isGlobalISelAbortEnabled()) { 75 MF->getProperties().set( 76 MachineFunctionProperties::Property::FailedISel); 77 return VReg; 78 } 79 reportTranslationError(Val, "unable to translate constant"); 80 } 81 } 82 } 83 return ValReg; 84 } 85 86 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) { 87 if (FrameIndices.find(&AI) != FrameIndices.end()) 88 return FrameIndices[&AI]; 89 90 unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType()); 91 unsigned Size = 92 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue(); 93 94 // Always allocate at least one byte. 95 Size = std::max(Size, 1u); 96 97 unsigned Alignment = AI.getAlignment(); 98 if (!Alignment) 99 Alignment = DL->getABITypeAlignment(AI.getAllocatedType()); 100 101 int &FI = FrameIndices[&AI]; 102 FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI); 103 return FI; 104 } 105 106 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) { 107 unsigned Alignment = 0; 108 Type *ValTy = nullptr; 109 if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) { 110 Alignment = SI->getAlignment(); 111 ValTy = SI->getValueOperand()->getType(); 112 } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) { 113 Alignment = LI->getAlignment(); 114 ValTy = LI->getType(); 115 } else if (!TPC->isGlobalISelAbortEnabled()) { 116 MF->getProperties().set( 117 MachineFunctionProperties::Property::FailedISel); 118 return 1; 119 } else 120 llvm_unreachable("unhandled memory instruction"); 121 122 return Alignment ? Alignment : DL->getABITypeAlignment(ValTy); 123 } 124 125 MachineBasicBlock &IRTranslator::getOrCreateBB(const BasicBlock &BB) { 126 MachineBasicBlock *&MBB = BBToMBB[&BB]; 127 if (!MBB) { 128 MBB = MF->CreateMachineBasicBlock(&BB); 129 MF->push_back(MBB); 130 131 if (BB.hasAddressTaken()) 132 MBB->setHasAddressTaken(); 133 } 134 return *MBB; 135 } 136 137 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U, 138 MachineIRBuilder &MIRBuilder) { 139 // FIXME: handle signed/unsigned wrapping flags. 140 141 // Get or create a virtual register for each value. 142 // Unless the value is a Constant => loadimm cst? 143 // or inline constant each time? 144 // Creation of a virtual register needs to have a size. 145 unsigned Op0 = getOrCreateVReg(*U.getOperand(0)); 146 unsigned Op1 = getOrCreateVReg(*U.getOperand(1)); 147 unsigned Res = getOrCreateVReg(U); 148 MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1); 149 return true; 150 } 151 152 bool IRTranslator::translateCompare(const User &U, 153 MachineIRBuilder &MIRBuilder) { 154 const CmpInst *CI = dyn_cast<CmpInst>(&U); 155 unsigned Op0 = getOrCreateVReg(*U.getOperand(0)); 156 unsigned Op1 = getOrCreateVReg(*U.getOperand(1)); 157 unsigned Res = getOrCreateVReg(U); 158 CmpInst::Predicate Pred = 159 CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>( 160 cast<ConstantExpr>(U).getPredicate()); 161 162 if (CmpInst::isIntPredicate(Pred)) 163 MIRBuilder.buildICmp(Pred, Res, Op0, Op1); 164 else 165 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1); 166 167 return true; 168 } 169 170 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) { 171 const ReturnInst &RI = cast<ReturnInst>(U); 172 const Value *Ret = RI.getReturnValue(); 173 // The target may mess up with the insertion point, but 174 // this is not important as a return is the last instruction 175 // of the block anyway. 176 return CLI->lowerReturn(MIRBuilder, Ret, !Ret ? 0 : getOrCreateVReg(*Ret)); 177 } 178 179 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) { 180 const BranchInst &BrInst = cast<BranchInst>(U); 181 unsigned Succ = 0; 182 if (!BrInst.isUnconditional()) { 183 // We want a G_BRCOND to the true BB followed by an unconditional branch. 184 unsigned Tst = getOrCreateVReg(*BrInst.getCondition()); 185 const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++)); 186 MachineBasicBlock &TrueBB = getOrCreateBB(TrueTgt); 187 MIRBuilder.buildBrCond(Tst, TrueBB); 188 } 189 190 const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ)); 191 MachineBasicBlock &TgtBB = getOrCreateBB(BrTgt); 192 MIRBuilder.buildBr(TgtBB); 193 194 // Link successors. 195 MachineBasicBlock &CurBB = MIRBuilder.getMBB(); 196 for (const BasicBlock *Succ : BrInst.successors()) 197 CurBB.addSuccessor(&getOrCreateBB(*Succ)); 198 return true; 199 } 200 201 bool IRTranslator::translateSwitch(const User &U, 202 MachineIRBuilder &MIRBuilder) { 203 // For now, just translate as a chain of conditional branches. 204 // FIXME: could we share most of the logic/code in 205 // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel? 206 // At first sight, it seems most of the logic in there is independent of 207 // SelectionDAG-specifics and a lot of work went in to optimize switch 208 // lowering in there. 209 210 const SwitchInst &SwInst = cast<SwitchInst>(U); 211 const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition()); 212 213 LLT LLTi1 = LLT(*Type::getInt1Ty(U.getContext()), *DL); 214 for (auto &CaseIt : SwInst.cases()) { 215 const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue()); 216 const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1); 217 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue); 218 MachineBasicBlock &CurBB = MIRBuilder.getMBB(); 219 MachineBasicBlock &TrueBB = getOrCreateBB(*CaseIt.getCaseSuccessor()); 220 221 MIRBuilder.buildBrCond(Tst, TrueBB); 222 CurBB.addSuccessor(&TrueBB); 223 224 MachineBasicBlock *FalseBB = 225 MF->CreateMachineBasicBlock(SwInst.getParent()); 226 MF->push_back(FalseBB); 227 MIRBuilder.buildBr(*FalseBB); 228 CurBB.addSuccessor(FalseBB); 229 230 MIRBuilder.setMBB(*FalseBB); 231 } 232 // handle default case 233 MachineBasicBlock &DefaultBB = getOrCreateBB(*SwInst.getDefaultDest()); 234 MIRBuilder.buildBr(DefaultBB); 235 MIRBuilder.getMBB().addSuccessor(&DefaultBB); 236 237 return true; 238 } 239 240 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) { 241 const LoadInst &LI = cast<LoadInst>(U); 242 243 if (!TPC->isGlobalISelAbortEnabled() && LI.isAtomic()) 244 return false; 245 246 assert(!LI.isAtomic() && "only non-atomic loads are supported at the moment"); 247 auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile 248 : MachineMemOperand::MONone; 249 Flags |= MachineMemOperand::MOLoad; 250 251 unsigned Res = getOrCreateVReg(LI); 252 unsigned Addr = getOrCreateVReg(*LI.getPointerOperand()); 253 LLT VTy{*LI.getType(), *DL}, PTy{*LI.getPointerOperand()->getType(), *DL}; 254 MIRBuilder.buildLoad( 255 Res, Addr, 256 *MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()), 257 Flags, DL->getTypeStoreSize(LI.getType()), 258 getMemOpAlignment(LI))); 259 return true; 260 } 261 262 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) { 263 const StoreInst &SI = cast<StoreInst>(U); 264 265 if (!TPC->isGlobalISelAbortEnabled() && SI.isAtomic()) 266 return false; 267 268 assert(!SI.isAtomic() && "only non-atomic stores supported at the moment"); 269 auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile 270 : MachineMemOperand::MONone; 271 Flags |= MachineMemOperand::MOStore; 272 273 unsigned Val = getOrCreateVReg(*SI.getValueOperand()); 274 unsigned Addr = getOrCreateVReg(*SI.getPointerOperand()); 275 LLT VTy{*SI.getValueOperand()->getType(), *DL}, 276 PTy{*SI.getPointerOperand()->getType(), *DL}; 277 278 MIRBuilder.buildStore( 279 Val, Addr, 280 *MF->getMachineMemOperand( 281 MachinePointerInfo(SI.getPointerOperand()), Flags, 282 DL->getTypeStoreSize(SI.getValueOperand()->getType()), 283 getMemOpAlignment(SI))); 284 return true; 285 } 286 287 bool IRTranslator::translateExtractValue(const User &U, 288 MachineIRBuilder &MIRBuilder) { 289 const Value *Src = U.getOperand(0); 290 Type *Int32Ty = Type::getInt32Ty(U.getContext()); 291 SmallVector<Value *, 1> Indices; 292 293 // getIndexedOffsetInType is designed for GEPs, so the first index is the 294 // usual array element rather than looking into the actual aggregate. 295 Indices.push_back(ConstantInt::get(Int32Ty, 0)); 296 297 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) { 298 for (auto Idx : EVI->indices()) 299 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 300 } else { 301 for (unsigned i = 1; i < U.getNumOperands(); ++i) 302 Indices.push_back(U.getOperand(i)); 303 } 304 305 uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices); 306 307 unsigned Res = getOrCreateVReg(U); 308 MIRBuilder.buildExtract(Res, Offset, getOrCreateVReg(*Src)); 309 310 return true; 311 } 312 313 bool IRTranslator::translateInsertValue(const User &U, 314 MachineIRBuilder &MIRBuilder) { 315 const Value *Src = U.getOperand(0); 316 Type *Int32Ty = Type::getInt32Ty(U.getContext()); 317 SmallVector<Value *, 1> Indices; 318 319 // getIndexedOffsetInType is designed for GEPs, so the first index is the 320 // usual array element rather than looking into the actual aggregate. 321 Indices.push_back(ConstantInt::get(Int32Ty, 0)); 322 323 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) { 324 for (auto Idx : IVI->indices()) 325 Indices.push_back(ConstantInt::get(Int32Ty, Idx)); 326 } else { 327 for (unsigned i = 2; i < U.getNumOperands(); ++i) 328 Indices.push_back(U.getOperand(i)); 329 } 330 331 uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices); 332 333 unsigned Res = getOrCreateVReg(U); 334 const Value &Inserted = *U.getOperand(1); 335 MIRBuilder.buildInsert(Res, getOrCreateVReg(*Src), getOrCreateVReg(Inserted), 336 Offset); 337 338 return true; 339 } 340 341 bool IRTranslator::translateSelect(const User &U, 342 MachineIRBuilder &MIRBuilder) { 343 MIRBuilder.buildSelect(getOrCreateVReg(U), getOrCreateVReg(*U.getOperand(0)), 344 getOrCreateVReg(*U.getOperand(1)), 345 getOrCreateVReg(*U.getOperand(2))); 346 return true; 347 } 348 349 bool IRTranslator::translateBitCast(const User &U, 350 MachineIRBuilder &MIRBuilder) { 351 if (LLT{*U.getOperand(0)->getType(), *DL} == LLT{*U.getType(), *DL}) { 352 unsigned &Reg = ValToVReg[&U]; 353 if (Reg) 354 MIRBuilder.buildCopy(Reg, getOrCreateVReg(*U.getOperand(0))); 355 else 356 Reg = getOrCreateVReg(*U.getOperand(0)); 357 return true; 358 } 359 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder); 360 } 361 362 bool IRTranslator::translateCast(unsigned Opcode, const User &U, 363 MachineIRBuilder &MIRBuilder) { 364 unsigned Op = getOrCreateVReg(*U.getOperand(0)); 365 unsigned Res = getOrCreateVReg(U); 366 MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op); 367 return true; 368 } 369 370 bool IRTranslator::translateGetElementPtr(const User &U, 371 MachineIRBuilder &MIRBuilder) { 372 // FIXME: support vector GEPs. 373 if (U.getType()->isVectorTy()) 374 return false; 375 376 Value &Op0 = *U.getOperand(0); 377 unsigned BaseReg = getOrCreateVReg(Op0); 378 LLT PtrTy{*Op0.getType(), *DL}; 379 unsigned PtrSize = DL->getPointerSizeInBits(PtrTy.getAddressSpace()); 380 LLT OffsetTy = LLT::scalar(PtrSize); 381 382 int64_t Offset = 0; 383 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U); 384 GTI != E; ++GTI) { 385 const Value *Idx = GTI.getOperand(); 386 if (StructType *StTy = GTI.getStructTypeOrNull()) { 387 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue(); 388 Offset += DL->getStructLayout(StTy)->getElementOffset(Field); 389 continue; 390 } else { 391 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType()); 392 393 // If this is a scalar constant or a splat vector of constants, 394 // handle it quickly. 395 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) { 396 Offset += ElementSize * CI->getSExtValue(); 397 continue; 398 } 399 400 if (Offset != 0) { 401 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy); 402 unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy); 403 MIRBuilder.buildConstant(OffsetReg, Offset); 404 MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg); 405 406 BaseReg = NewBaseReg; 407 Offset = 0; 408 } 409 410 // N = N + Idx * ElementSize; 411 unsigned ElementSizeReg = MRI->createGenericVirtualRegister(OffsetTy); 412 MIRBuilder.buildConstant(ElementSizeReg, ElementSize); 413 414 unsigned IdxReg = getOrCreateVReg(*Idx); 415 if (MRI->getType(IdxReg) != OffsetTy) { 416 unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy); 417 MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg); 418 IdxReg = NewIdxReg; 419 } 420 421 unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy); 422 MIRBuilder.buildMul(OffsetReg, ElementSizeReg, IdxReg); 423 424 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy); 425 MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg); 426 BaseReg = NewBaseReg; 427 } 428 } 429 430 if (Offset != 0) { 431 unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy); 432 MIRBuilder.buildConstant(OffsetReg, Offset); 433 MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg); 434 return true; 435 } 436 437 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg); 438 return true; 439 } 440 441 bool IRTranslator::translateMemcpy(const CallInst &CI, 442 MachineIRBuilder &MIRBuilder) { 443 LLT SizeTy{*CI.getArgOperand(2)->getType(), *DL}; 444 if (cast<PointerType>(CI.getArgOperand(0)->getType())->getAddressSpace() != 445 0 || 446 cast<PointerType>(CI.getArgOperand(1)->getType())->getAddressSpace() != 447 0 || 448 SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0)) 449 return false; 450 451 SmallVector<CallLowering::ArgInfo, 8> Args; 452 for (int i = 0; i < 3; ++i) { 453 const auto &Arg = CI.getArgOperand(i); 454 Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType()); 455 } 456 457 MachineOperand Callee = MachineOperand::CreateES("memcpy"); 458 459 return CLI->lowerCall(MIRBuilder, Callee, 460 CallLowering::ArgInfo(0, CI.getType()), Args); 461 } 462 463 void IRTranslator::getStackGuard(unsigned DstReg, 464 MachineIRBuilder &MIRBuilder) { 465 auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD); 466 MIB.addDef(DstReg); 467 468 auto &TLI = *MF->getSubtarget().getTargetLowering(); 469 Value *Global = TLI.getSDagStackGuard(*MF->getFunction()->getParent()); 470 if (!Global) 471 return; 472 473 MachinePointerInfo MPInfo(Global); 474 MachineInstr::mmo_iterator MemRefs = MF->allocateMemRefsArray(1); 475 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 476 MachineMemOperand::MODereferenceable; 477 *MemRefs = 478 MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8, 479 DL->getPointerABIAlignment()); 480 MIB.setMemRefs(MemRefs, MemRefs + 1); 481 } 482 483 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op, 484 MachineIRBuilder &MIRBuilder) { 485 LLT Ty{*CI.getOperand(0)->getType(), *DL}; 486 LLT s1 = LLT::scalar(1); 487 unsigned Width = Ty.getSizeInBits(); 488 unsigned Res = MRI->createGenericVirtualRegister(Ty); 489 unsigned Overflow = MRI->createGenericVirtualRegister(s1); 490 auto MIB = MIRBuilder.buildInstr(Op) 491 .addDef(Res) 492 .addDef(Overflow) 493 .addUse(getOrCreateVReg(*CI.getOperand(0))) 494 .addUse(getOrCreateVReg(*CI.getOperand(1))); 495 496 if (Op == TargetOpcode::G_UADDE || Op == TargetOpcode::G_USUBE) { 497 unsigned Zero = MRI->createGenericVirtualRegister(s1); 498 EntryBuilder.buildConstant(Zero, 0); 499 MIB.addUse(Zero); 500 } 501 502 MIRBuilder.buildSequence(getOrCreateVReg(CI), Res, 0, Overflow, Width); 503 return true; 504 } 505 506 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, 507 MachineIRBuilder &MIRBuilder) { 508 switch (ID) { 509 default: 510 break; 511 case Intrinsic::dbg_declare: 512 case Intrinsic::dbg_value: 513 // FIXME: these obviously need to be supported properly. 514 MF->getProperties().set( 515 MachineFunctionProperties::Property::FailedISel); 516 return true; 517 case Intrinsic::uadd_with_overflow: 518 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDE, MIRBuilder); 519 case Intrinsic::sadd_with_overflow: 520 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder); 521 case Intrinsic::usub_with_overflow: 522 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBE, MIRBuilder); 523 case Intrinsic::ssub_with_overflow: 524 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder); 525 case Intrinsic::umul_with_overflow: 526 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder); 527 case Intrinsic::smul_with_overflow: 528 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder); 529 case Intrinsic::memcpy: 530 return translateMemcpy(CI, MIRBuilder); 531 case Intrinsic::eh_typeid_for: { 532 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0)); 533 unsigned Reg = getOrCreateVReg(CI); 534 unsigned TypeID = MF->getTypeIDFor(GV); 535 MIRBuilder.buildConstant(Reg, TypeID); 536 return true; 537 } 538 case Intrinsic::objectsize: { 539 // If we don't know by now, we're never going to know. 540 const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1)); 541 542 MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0); 543 return true; 544 } 545 case Intrinsic::stackguard: 546 getStackGuard(getOrCreateVReg(CI), MIRBuilder); 547 return true; 548 case Intrinsic::stackprotector: { 549 LLT PtrTy{*CI.getArgOperand(0)->getType(), *DL}; 550 unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy); 551 getStackGuard(GuardVal, MIRBuilder); 552 553 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1)); 554 MIRBuilder.buildStore( 555 GuardVal, getOrCreateVReg(*Slot), 556 *MF->getMachineMemOperand( 557 MachinePointerInfo::getFixedStack(*MF, 558 getOrCreateFrameIndex(*Slot)), 559 MachineMemOperand::MOStore | MachineMemOperand::MOVolatile, 560 PtrTy.getSizeInBits() / 8, 8)); 561 return true; 562 } 563 } 564 return false; 565 } 566 567 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) { 568 const CallInst &CI = cast<CallInst>(U); 569 auto TII = MF->getTarget().getIntrinsicInfo(); 570 const Function *F = CI.getCalledFunction(); 571 572 if (!F || !F->isIntrinsic()) { 573 unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI); 574 SmallVector<unsigned, 8> Args; 575 for (auto &Arg: CI.arg_operands()) 576 Args.push_back(getOrCreateVReg(*Arg)); 577 578 return CLI->lowerCall(MIRBuilder, CI, Res, Args, [&]() { 579 return getOrCreateVReg(*CI.getCalledValue()); 580 }); 581 } 582 583 Intrinsic::ID ID = F->getIntrinsicID(); 584 if (TII && ID == Intrinsic::not_intrinsic) 585 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F)); 586 587 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic"); 588 589 if (translateKnownIntrinsic(CI, ID, MIRBuilder)) 590 return true; 591 592 unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI); 593 MachineInstrBuilder MIB = 594 MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory()); 595 596 for (auto &Arg : CI.arg_operands()) { 597 if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) 598 MIB.addImm(CI->getSExtValue()); 599 else 600 MIB.addUse(getOrCreateVReg(*Arg)); 601 } 602 return true; 603 } 604 605 bool IRTranslator::translateInvoke(const User &U, 606 MachineIRBuilder &MIRBuilder) { 607 const InvokeInst &I = cast<InvokeInst>(U); 608 MCContext &Context = MF->getContext(); 609 610 const BasicBlock *ReturnBB = I.getSuccessor(0); 611 const BasicBlock *EHPadBB = I.getSuccessor(1); 612 613 const Value *Callee(I.getCalledValue()); 614 const Function *Fn = dyn_cast<Function>(Callee); 615 if (isa<InlineAsm>(Callee)) 616 return false; 617 618 // FIXME: support invoking patchpoint and statepoint intrinsics. 619 if (Fn && Fn->isIntrinsic()) 620 return false; 621 622 // FIXME: support whatever these are. 623 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) 624 return false; 625 626 // FIXME: support Windows exception handling. 627 if (!isa<LandingPadInst>(EHPadBB->front())) 628 return false; 629 630 631 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about 632 // the region covered by the try. 633 MCSymbol *BeginSymbol = Context.createTempSymbol(); 634 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol); 635 636 unsigned Res = I.getType()->isVoidTy() ? 0 : getOrCreateVReg(I); 637 SmallVector<CallLowering::ArgInfo, 8> Args; 638 for (auto &Arg: I.arg_operands()) 639 Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType()); 640 641 if (!CLI->lowerCall(MIRBuilder, MachineOperand::CreateGA(Fn, 0), 642 CallLowering::ArgInfo(Res, I.getType()), Args)) 643 return false; 644 645 MCSymbol *EndSymbol = Context.createTempSymbol(); 646 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol); 647 648 // FIXME: track probabilities. 649 MachineBasicBlock &EHPadMBB = getOrCreateBB(*EHPadBB), 650 &ReturnMBB = getOrCreateBB(*ReturnBB); 651 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol); 652 MIRBuilder.getMBB().addSuccessor(&ReturnMBB); 653 MIRBuilder.getMBB().addSuccessor(&EHPadMBB); 654 655 return true; 656 } 657 658 bool IRTranslator::translateLandingPad(const User &U, 659 MachineIRBuilder &MIRBuilder) { 660 const LandingPadInst &LP = cast<LandingPadInst>(U); 661 662 MachineBasicBlock &MBB = MIRBuilder.getMBB(); 663 addLandingPadInfo(LP, MBB); 664 665 MBB.setIsEHPad(); 666 667 // If there aren't registers to copy the values into (e.g., during SjLj 668 // exceptions), then don't bother. 669 auto &TLI = *MF->getSubtarget().getTargetLowering(); 670 const Constant *PersonalityFn = MF->getFunction()->getPersonalityFn(); 671 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 && 672 TLI.getExceptionSelectorRegister(PersonalityFn) == 0) 673 return true; 674 675 // If landingpad's return type is token type, we don't create DAG nodes 676 // for its exception pointer and selector value. The extraction of exception 677 // pointer or selector value from token type landingpads is not currently 678 // supported. 679 if (LP.getType()->isTokenTy()) 680 return true; 681 682 // Add a label to mark the beginning of the landing pad. Deletion of the 683 // landing pad can thus be detected via the MachineModuleInfo. 684 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL) 685 .addSym(MF->addLandingPad(&MBB)); 686 687 // Mark exception register as live in. 688 SmallVector<unsigned, 2> Regs; 689 SmallVector<uint64_t, 2> Offsets; 690 LLT p0 = LLT::pointer(0, DL->getPointerSizeInBits()); 691 if (unsigned Reg = TLI.getExceptionPointerRegister(PersonalityFn)) { 692 unsigned VReg = MRI->createGenericVirtualRegister(p0); 693 MIRBuilder.buildCopy(VReg, Reg); 694 Regs.push_back(VReg); 695 Offsets.push_back(0); 696 } 697 698 if (unsigned Reg = TLI.getExceptionSelectorRegister(PersonalityFn)) { 699 unsigned VReg = MRI->createGenericVirtualRegister(p0); 700 MIRBuilder.buildCopy(VReg, Reg); 701 Regs.push_back(VReg); 702 Offsets.push_back(p0.getSizeInBits()); 703 } 704 705 MIRBuilder.buildSequence(getOrCreateVReg(LP), Regs, Offsets); 706 return true; 707 } 708 709 bool IRTranslator::translateStaticAlloca(const AllocaInst &AI, 710 MachineIRBuilder &MIRBuilder) { 711 if (!TPC->isGlobalISelAbortEnabled() && !AI.isStaticAlloca()) 712 return false; 713 714 assert(AI.isStaticAlloca() && "only handle static allocas now"); 715 unsigned Res = getOrCreateVReg(AI); 716 int FI = getOrCreateFrameIndex(AI); 717 MIRBuilder.buildFrameIndex(Res, FI); 718 return true; 719 } 720 721 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) { 722 const PHINode &PI = cast<PHINode>(U); 723 auto MIB = MIRBuilder.buildInstr(TargetOpcode::PHI); 724 MIB.addDef(getOrCreateVReg(PI)); 725 726 PendingPHIs.emplace_back(&PI, MIB.getInstr()); 727 return true; 728 } 729 730 void IRTranslator::finishPendingPhis() { 731 for (std::pair<const PHINode *, MachineInstr *> &Phi : PendingPHIs) { 732 const PHINode *PI = Phi.first; 733 MachineInstrBuilder MIB(*MF, Phi.second); 734 735 // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator 736 // won't create extra control flow here, otherwise we need to find the 737 // dominating predecessor here (or perhaps force the weirder IRTranslators 738 // to provide a simple boundary). 739 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) { 740 assert(BBToMBB[PI->getIncomingBlock(i)]->isSuccessor(MIB->getParent()) && 741 "I appear to have misunderstood Machine PHIs"); 742 MIB.addUse(getOrCreateVReg(*PI->getIncomingValue(i))); 743 MIB.addMBB(BBToMBB[PI->getIncomingBlock(i)]); 744 } 745 } 746 } 747 748 bool IRTranslator::translate(const Instruction &Inst) { 749 CurBuilder.setDebugLoc(Inst.getDebugLoc()); 750 switch(Inst.getOpcode()) { 751 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 752 case Instruction::OPCODE: return translate##OPCODE(Inst, CurBuilder); 753 #include "llvm/IR/Instruction.def" 754 default: 755 if (!TPC->isGlobalISelAbortEnabled()) 756 return false; 757 llvm_unreachable("unknown opcode"); 758 } 759 } 760 761 bool IRTranslator::translate(const Constant &C, unsigned Reg) { 762 if (auto CI = dyn_cast<ConstantInt>(&C)) 763 EntryBuilder.buildConstant(Reg, *CI); 764 else if (auto CF = dyn_cast<ConstantFP>(&C)) 765 EntryBuilder.buildFConstant(Reg, *CF); 766 else if (isa<UndefValue>(C)) 767 EntryBuilder.buildInstr(TargetOpcode::IMPLICIT_DEF).addDef(Reg); 768 else if (isa<ConstantPointerNull>(C)) 769 EntryBuilder.buildConstant(Reg, 0); 770 else if (auto GV = dyn_cast<GlobalValue>(&C)) 771 EntryBuilder.buildGlobalValue(Reg, GV); 772 else if (auto CE = dyn_cast<ConstantExpr>(&C)) { 773 switch(CE->getOpcode()) { 774 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 775 case Instruction::OPCODE: return translate##OPCODE(*CE, EntryBuilder); 776 #include "llvm/IR/Instruction.def" 777 default: 778 if (!TPC->isGlobalISelAbortEnabled()) 779 return false; 780 llvm_unreachable("unknown opcode"); 781 } 782 } else if (!TPC->isGlobalISelAbortEnabled()) 783 return false; 784 else 785 llvm_unreachable("unhandled constant kind"); 786 787 return true; 788 } 789 790 void IRTranslator::finalizeFunction() { 791 // Release the memory used by the different maps we 792 // needed during the translation. 793 PendingPHIs.clear(); 794 ValToVReg.clear(); 795 FrameIndices.clear(); 796 Constants.clear(); 797 } 798 799 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { 800 MF = &CurMF; 801 const Function &F = *MF->getFunction(); 802 if (F.empty()) 803 return false; 804 CLI = MF->getSubtarget().getCallLowering(); 805 CurBuilder.setMF(*MF); 806 EntryBuilder.setMF(*MF); 807 MRI = &MF->getRegInfo(); 808 DL = &F.getParent()->getDataLayout(); 809 TPC = &getAnalysis<TargetPassConfig>(); 810 811 assert(PendingPHIs.empty() && "stale PHIs"); 812 813 // Setup a separate basic-block for the arguments and constants, falling 814 // through to the IR-level Function's entry block. 815 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock(); 816 MF->push_back(EntryBB); 817 EntryBB->addSuccessor(&getOrCreateBB(F.front())); 818 EntryBuilder.setMBB(*EntryBB); 819 820 // Lower the actual args into this basic block. 821 SmallVector<unsigned, 8> VRegArgs; 822 for (const Argument &Arg: F.args()) 823 VRegArgs.push_back(getOrCreateVReg(Arg)); 824 bool Succeeded = CLI->lowerFormalArguments(EntryBuilder, F, VRegArgs); 825 if (!Succeeded) { 826 if (!TPC->isGlobalISelAbortEnabled()) { 827 MF->getProperties().set( 828 MachineFunctionProperties::Property::FailedISel); 829 finalizeFunction(); 830 return false; 831 } 832 report_fatal_error("Unable to lower arguments"); 833 } 834 835 // And translate the function! 836 for (const BasicBlock &BB: F) { 837 MachineBasicBlock &MBB = getOrCreateBB(BB); 838 // Set the insertion point of all the following translations to 839 // the end of this basic block. 840 CurBuilder.setMBB(MBB); 841 842 for (const Instruction &Inst: BB) { 843 Succeeded &= translate(Inst); 844 if (!Succeeded) { 845 if (TPC->isGlobalISelAbortEnabled()) 846 reportTranslationError(Inst, "unable to translate instruction"); 847 MF->getProperties().set( 848 MachineFunctionProperties::Property::FailedISel); 849 break; 850 } 851 } 852 } 853 854 if (Succeeded) { 855 finishPendingPhis(); 856 857 // Now that the MachineFrameInfo has been configured, no further changes to 858 // the reserved registers are possible. 859 MRI->freezeReservedRegs(*MF); 860 861 // Merge the argument lowering and constants block with its single 862 // successor, the LLVM-IR entry block. We want the basic block to 863 // be maximal. 864 assert(EntryBB->succ_size() == 1 && 865 "Custom BB used for lowering should have only one successor"); 866 // Get the successor of the current entry block. 867 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin(); 868 assert(NewEntryBB.pred_size() == 1 && 869 "LLVM-IR entry block has a predecessor!?"); 870 // Move all the instruction from the current entry block to the 871 // new entry block. 872 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(), 873 EntryBB->end()); 874 875 // Update the live-in information for the new entry block. 876 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins()) 877 NewEntryBB.addLiveIn(LiveIn); 878 NewEntryBB.sortUniqueLiveIns(); 879 880 // Get rid of the now empty basic block. 881 EntryBB->removeSuccessor(&NewEntryBB); 882 MF->remove(EntryBB); 883 884 assert(&MF->front() == &NewEntryBB && 885 "New entry wasn't next in the list of basic block!"); 886 } 887 888 finalizeFunction(); 889 890 return false; 891 } 892