1 //===-- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator --*- C++ -*-==//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the IRTranslator class.
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
14 
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
17 #include "llvm/CodeGen/MachineFunction.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/TargetPassConfig.h"
21 #include "llvm/IR/Constant.h"
22 #include "llvm/IR/Function.h"
23 #include "llvm/IR/GetElementPtrTypeIterator.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/Type.h"
26 #include "llvm/IR/Value.h"
27 #include "llvm/Target/TargetIntrinsicInfo.h"
28 #include "llvm/Target/TargetLowering.h"
29 
30 #define DEBUG_TYPE "irtranslator"
31 
32 using namespace llvm;
33 
34 char IRTranslator::ID = 0;
35 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
36                 false, false)
37 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
38 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
39                 false, false)
40 
41 IRTranslator::IRTranslator() : MachineFunctionPass(ID), MRI(nullptr) {
42   initializeIRTranslatorPass(*PassRegistry::getPassRegistry());
43 }
44 
45 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
46   AU.addRequired<TargetPassConfig>();
47   MachineFunctionPass::getAnalysisUsage(AU);
48 }
49 
50 
51 unsigned IRTranslator::getOrCreateVReg(const Value &Val) {
52   unsigned &ValReg = ValToVReg[&Val];
53   // Check if this is the first time we see Val.
54   if (!ValReg) {
55     // Fill ValRegsSequence with the sequence of registers
56     // we need to concat together to produce the value.
57     assert(Val.getType()->isSized() &&
58            "Don't know how to create an empty vreg");
59     unsigned VReg = MRI->createGenericVirtualRegister(LLT{*Val.getType(), *DL});
60     ValReg = VReg;
61 
62     if (auto CV = dyn_cast<Constant>(&Val)) {
63       bool Success = translate(*CV, VReg);
64       if (!Success) {
65         if (!TPC->isGlobalISelAbortEnabled()) {
66           MIRBuilder.getMF().getProperties().set(
67               MachineFunctionProperties::Property::FailedISel);
68           return 0;
69         }
70         report_fatal_error("unable to translate constant");
71       }
72     }
73   }
74   return ValReg;
75 }
76 
77 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
78   unsigned Alignment = 0;
79   Type *ValTy = nullptr;
80   if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
81     Alignment = SI->getAlignment();
82     ValTy = SI->getValueOperand()->getType();
83   } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
84     Alignment = LI->getAlignment();
85     ValTy = LI->getType();
86   } else if (!TPC->isGlobalISelAbortEnabled()) {
87     MIRBuilder.getMF().getProperties().set(
88         MachineFunctionProperties::Property::FailedISel);
89     return 1;
90   } else
91     llvm_unreachable("unhandled memory instruction");
92 
93   return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
94 }
95 
96 MachineBasicBlock &IRTranslator::getOrCreateBB(const BasicBlock &BB) {
97   MachineBasicBlock *&MBB = BBToMBB[&BB];
98   if (!MBB) {
99     MachineFunction &MF = MIRBuilder.getMF();
100     MBB = MF.CreateMachineBasicBlock();
101     MF.push_back(MBB);
102   }
103   return *MBB;
104 }
105 
106 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U) {
107   // FIXME: handle signed/unsigned wrapping flags.
108 
109   // Get or create a virtual register for each value.
110   // Unless the value is a Constant => loadimm cst?
111   // or inline constant each time?
112   // Creation of a virtual register needs to have a size.
113   unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
114   unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
115   unsigned Res = getOrCreateVReg(U);
116   MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1);
117   return true;
118 }
119 
120 bool IRTranslator::translateCompare(const User &U) {
121   const CmpInst *CI = dyn_cast<CmpInst>(&U);
122   unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
123   unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
124   unsigned Res = getOrCreateVReg(U);
125   CmpInst::Predicate Pred =
126       CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
127                                     cast<ConstantExpr>(U).getPredicate());
128 
129   if (CmpInst::isIntPredicate(Pred))
130     MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
131   else
132     MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
133 
134   return true;
135 }
136 
137 bool IRTranslator::translateRet(const User &U) {
138   const ReturnInst &RI = cast<ReturnInst>(U);
139   const Value *Ret = RI.getReturnValue();
140   // The target may mess up with the insertion point, but
141   // this is not important as a return is the last instruction
142   // of the block anyway.
143   return CLI->lowerReturn(MIRBuilder, Ret, !Ret ? 0 : getOrCreateVReg(*Ret));
144 }
145 
146 bool IRTranslator::translateBr(const User &U) {
147   const BranchInst &BrInst = cast<BranchInst>(U);
148   unsigned Succ = 0;
149   if (!BrInst.isUnconditional()) {
150     // We want a G_BRCOND to the true BB followed by an unconditional branch.
151     unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
152     const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
153     MachineBasicBlock &TrueBB = getOrCreateBB(TrueTgt);
154     MIRBuilder.buildBrCond(Tst, TrueBB);
155   }
156 
157   const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
158   MachineBasicBlock &TgtBB = getOrCreateBB(BrTgt);
159   MIRBuilder.buildBr(TgtBB);
160 
161   // Link successors.
162   MachineBasicBlock &CurBB = MIRBuilder.getMBB();
163   for (const BasicBlock *Succ : BrInst.successors())
164     CurBB.addSuccessor(&getOrCreateBB(*Succ));
165   return true;
166 }
167 
168 bool IRTranslator::translateLoad(const User &U) {
169   const LoadInst &LI = cast<LoadInst>(U);
170 
171   if (!TPC->isGlobalISelAbortEnabled() && !LI.isSimple())
172     return false;
173 
174   assert(LI.isSimple() && "only simple loads are supported at the moment");
175 
176   MachineFunction &MF = MIRBuilder.getMF();
177   unsigned Res = getOrCreateVReg(LI);
178   unsigned Addr = getOrCreateVReg(*LI.getPointerOperand());
179   LLT VTy{*LI.getType(), *DL}, PTy{*LI.getPointerOperand()->getType(), *DL};
180 
181   MIRBuilder.buildLoad(
182       Res, Addr,
183       *MF.getMachineMemOperand(
184           MachinePointerInfo(LI.getPointerOperand()), MachineMemOperand::MOLoad,
185           DL->getTypeStoreSize(LI.getType()), getMemOpAlignment(LI)));
186   return true;
187 }
188 
189 bool IRTranslator::translateStore(const User &U) {
190   const StoreInst &SI = cast<StoreInst>(U);
191 
192   if (!TPC->isGlobalISelAbortEnabled() && !SI.isSimple())
193     return false;
194 
195   assert(SI.isSimple() && "only simple loads are supported at the moment");
196 
197   MachineFunction &MF = MIRBuilder.getMF();
198   unsigned Val = getOrCreateVReg(*SI.getValueOperand());
199   unsigned Addr = getOrCreateVReg(*SI.getPointerOperand());
200   LLT VTy{*SI.getValueOperand()->getType(), *DL},
201       PTy{*SI.getPointerOperand()->getType(), *DL};
202 
203   MIRBuilder.buildStore(
204       Val, Addr,
205       *MF.getMachineMemOperand(
206           MachinePointerInfo(SI.getPointerOperand()),
207           MachineMemOperand::MOStore,
208           DL->getTypeStoreSize(SI.getValueOperand()->getType()),
209           getMemOpAlignment(SI)));
210   return true;
211 }
212 
213 bool IRTranslator::translateExtractValue(const User &U) {
214   const Value *Src = U.getOperand(0);
215   Type *Int32Ty = Type::getInt32Ty(U.getContext());
216   SmallVector<Value *, 1> Indices;
217 
218   // getIndexedOffsetInType is designed for GEPs, so the first index is the
219   // usual array element rather than looking into the actual aggregate.
220   Indices.push_back(ConstantInt::get(Int32Ty, 0));
221 
222   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
223     for (auto Idx : EVI->indices())
224       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
225   } else {
226     for (unsigned i = 1; i < U.getNumOperands(); ++i)
227       Indices.push_back(U.getOperand(i));
228   }
229 
230   uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
231 
232   unsigned Res = getOrCreateVReg(U);
233   MIRBuilder.buildExtract(Res, Offset, getOrCreateVReg(*Src));
234 
235   return true;
236 }
237 
238 bool IRTranslator::translateInsertValue(const User &U) {
239   const Value *Src = U.getOperand(0);
240   Type *Int32Ty = Type::getInt32Ty(U.getContext());
241   SmallVector<Value *, 1> Indices;
242 
243   // getIndexedOffsetInType is designed for GEPs, so the first index is the
244   // usual array element rather than looking into the actual aggregate.
245   Indices.push_back(ConstantInt::get(Int32Ty, 0));
246 
247   if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
248     for (auto Idx : IVI->indices())
249       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
250   } else {
251     for (unsigned i = 2; i < U.getNumOperands(); ++i)
252       Indices.push_back(U.getOperand(i));
253   }
254 
255   uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
256 
257   unsigned Res = getOrCreateVReg(U);
258   const Value &Inserted = *U.getOperand(1);
259   MIRBuilder.buildInsert(Res, getOrCreateVReg(*Src), getOrCreateVReg(Inserted),
260                          Offset);
261 
262   return true;
263 }
264 
265 bool IRTranslator::translateSelect(const User &U) {
266   MIRBuilder.buildSelect(getOrCreateVReg(U), getOrCreateVReg(*U.getOperand(0)),
267                          getOrCreateVReg(*U.getOperand(1)),
268                          getOrCreateVReg(*U.getOperand(2)));
269   return true;
270 }
271 
272 bool IRTranslator::translateBitCast(const User &U) {
273   if (LLT{*U.getOperand(0)->getType(), *DL} == LLT{*U.getType(), *DL}) {
274     unsigned &Reg = ValToVReg[&U];
275     if (Reg)
276       MIRBuilder.buildCopy(Reg, getOrCreateVReg(*U.getOperand(0)));
277     else
278       Reg = getOrCreateVReg(*U.getOperand(0));
279     return true;
280   }
281   return translateCast(TargetOpcode::G_BITCAST, U);
282 }
283 
284 bool IRTranslator::translateCast(unsigned Opcode, const User &U) {
285   unsigned Op = getOrCreateVReg(*U.getOperand(0));
286   unsigned Res = getOrCreateVReg(U);
287   MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op);
288   return true;
289 }
290 
291 bool IRTranslator::translateGetElementPtr(const User &U) {
292   // FIXME: support vector GEPs.
293   if (U.getType()->isVectorTy())
294     return false;
295 
296   Value &Op0 = *U.getOperand(0);
297   unsigned BaseReg = getOrCreateVReg(Op0);
298   LLT PtrTy{*Op0.getType(), *DL};
299   unsigned PtrSize = DL->getPointerSizeInBits(PtrTy.getAddressSpace());
300   LLT OffsetTy = LLT::scalar(PtrSize);
301 
302   int64_t Offset = 0;
303   for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
304        GTI != E; ++GTI) {
305     const Value *Idx = GTI.getOperand();
306     if (StructType *StTy = dyn_cast<StructType>(*GTI)) {
307       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
308       Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
309       continue;
310     } else {
311       uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
312 
313       // If this is a scalar constant or a splat vector of constants,
314       // handle it quickly.
315       if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
316         Offset += ElementSize * CI->getSExtValue();
317         continue;
318       }
319 
320       if (Offset != 0) {
321         unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
322         unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
323         MIRBuilder.buildConstant(OffsetReg, Offset);
324         MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
325 
326         BaseReg = NewBaseReg;
327         Offset = 0;
328       }
329 
330       // N = N + Idx * ElementSize;
331       unsigned ElementSizeReg = MRI->createGenericVirtualRegister(OffsetTy);
332       MIRBuilder.buildConstant(ElementSizeReg, ElementSize);
333 
334       unsigned IdxReg = getOrCreateVReg(*Idx);
335       if (MRI->getType(IdxReg) != OffsetTy) {
336         unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
337         MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
338         IdxReg = NewIdxReg;
339       }
340 
341       unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
342       MIRBuilder.buildMul(OffsetReg, ElementSizeReg, IdxReg);
343 
344       unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
345       MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
346       BaseReg = NewBaseReg;
347     }
348   }
349 
350   if (Offset != 0) {
351     unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
352     MIRBuilder.buildConstant(OffsetReg, Offset);
353     MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg);
354     return true;
355   }
356 
357   MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
358   return true;
359 }
360 
361 
362 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI,
363                                            Intrinsic::ID ID) {
364   unsigned Op = 0;
365   switch (ID) {
366   default: return false;
367   case Intrinsic::uadd_with_overflow: Op = TargetOpcode::G_UADDE; break;
368   case Intrinsic::sadd_with_overflow: Op = TargetOpcode::G_SADDO; break;
369   case Intrinsic::usub_with_overflow: Op = TargetOpcode::G_USUBE; break;
370   case Intrinsic::ssub_with_overflow: Op = TargetOpcode::G_SSUBO; break;
371   case Intrinsic::umul_with_overflow: Op = TargetOpcode::G_UMULO; break;
372   case Intrinsic::smul_with_overflow: Op = TargetOpcode::G_SMULO; break;
373   }
374 
375   LLT Ty{*CI.getOperand(0)->getType(), *DL};
376   LLT s1 = LLT::scalar(1);
377   unsigned Width = Ty.getSizeInBits();
378   unsigned Res = MRI->createGenericVirtualRegister(Ty);
379   unsigned Overflow = MRI->createGenericVirtualRegister(s1);
380   auto MIB = MIRBuilder.buildInstr(Op)
381                  .addDef(Res)
382                  .addDef(Overflow)
383                  .addUse(getOrCreateVReg(*CI.getOperand(0)))
384                  .addUse(getOrCreateVReg(*CI.getOperand(1)));
385 
386   if (Op == TargetOpcode::G_UADDE || Op == TargetOpcode::G_USUBE) {
387     unsigned Zero = MRI->createGenericVirtualRegister(s1);
388     EntryBuilder.buildConstant(Zero, 0);
389     MIB.addUse(Zero);
390   }
391 
392   MIRBuilder.buildSequence(getOrCreateVReg(CI), Res, 0, Overflow, Width);
393   return true;
394 }
395 
396 bool IRTranslator::translateCall(const User &U) {
397   const CallInst &CI = cast<CallInst>(U);
398   auto TII = MIRBuilder.getMF().getTarget().getIntrinsicInfo();
399   const Function *F = CI.getCalledFunction();
400 
401   if (!F || !F->isIntrinsic()) {
402     unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
403     SmallVector<unsigned, 8> Args;
404     for (auto &Arg: CI.arg_operands())
405       Args.push_back(getOrCreateVReg(*Arg));
406 
407     return CLI->lowerCall(MIRBuilder, CI, Res, Args, [&]() {
408       return getOrCreateVReg(*CI.getCalledValue());
409     });
410   }
411 
412   Intrinsic::ID ID = F->getIntrinsicID();
413   if (TII && ID == Intrinsic::not_intrinsic)
414     ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
415 
416   assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
417 
418   if (translateKnownIntrinsic(CI, ID))
419     return true;
420 
421   unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
422   MachineInstrBuilder MIB =
423       MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory());
424 
425   for (auto &Arg : CI.arg_operands()) {
426     if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg))
427       MIB.addImm(CI->getSExtValue());
428     else
429       MIB.addUse(getOrCreateVReg(*Arg));
430   }
431   return true;
432 }
433 
434 bool IRTranslator::translateStaticAlloca(const AllocaInst &AI) {
435   if (!TPC->isGlobalISelAbortEnabled() && !AI.isStaticAlloca())
436     return false;
437 
438   assert(AI.isStaticAlloca() && "only handle static allocas now");
439   MachineFunction &MF = MIRBuilder.getMF();
440   unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
441   unsigned Size =
442       ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
443 
444   // Always allocate at least one byte.
445   Size = std::max(Size, 1u);
446 
447   unsigned Alignment = AI.getAlignment();
448   if (!Alignment)
449     Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
450 
451   unsigned Res = getOrCreateVReg(AI);
452   int FI = MF.getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
453   MIRBuilder.buildFrameIndex(Res, FI);
454   return true;
455 }
456 
457 bool IRTranslator::translatePHI(const User &U) {
458   const PHINode &PI = cast<PHINode>(U);
459   auto MIB = MIRBuilder.buildInstr(TargetOpcode::PHI);
460   MIB.addDef(getOrCreateVReg(PI));
461 
462   PendingPHIs.emplace_back(&PI, MIB.getInstr());
463   return true;
464 }
465 
466 void IRTranslator::finishPendingPhis() {
467   for (std::pair<const PHINode *, MachineInstr *> &Phi : PendingPHIs) {
468     const PHINode *PI = Phi.first;
469     MachineInstrBuilder MIB(MIRBuilder.getMF(), Phi.second);
470 
471     // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
472     // won't create extra control flow here, otherwise we need to find the
473     // dominating predecessor here (or perhaps force the weirder IRTranslators
474     // to provide a simple boundary).
475     for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
476       assert(BBToMBB[PI->getIncomingBlock(i)]->isSuccessor(MIB->getParent()) &&
477              "I appear to have misunderstood Machine PHIs");
478       MIB.addUse(getOrCreateVReg(*PI->getIncomingValue(i)));
479       MIB.addMBB(BBToMBB[PI->getIncomingBlock(i)]);
480     }
481   }
482 
483   PendingPHIs.clear();
484 }
485 
486 bool IRTranslator::translate(const Instruction &Inst) {
487   MIRBuilder.setDebugLoc(Inst.getDebugLoc());
488   switch(Inst.getOpcode()) {
489 #define HANDLE_INST(NUM, OPCODE, CLASS) \
490     case Instruction::OPCODE: return translate##OPCODE(Inst);
491 #include "llvm/IR/Instruction.def"
492   default:
493     if (!TPC->isGlobalISelAbortEnabled())
494       return false;
495     llvm_unreachable("unknown opcode");
496   }
497 }
498 
499 bool IRTranslator::translate(const Constant &C, unsigned Reg) {
500   if (auto CI = dyn_cast<ConstantInt>(&C))
501     EntryBuilder.buildConstant(Reg, CI->getZExtValue());
502   else if (auto CF = dyn_cast<ConstantFP>(&C))
503     EntryBuilder.buildFConstant(Reg, *CF);
504   else if (isa<UndefValue>(C))
505     EntryBuilder.buildInstr(TargetOpcode::IMPLICIT_DEF).addDef(Reg);
506   else if (isa<ConstantPointerNull>(C))
507     EntryBuilder.buildInstr(TargetOpcode::G_CONSTANT)
508         .addDef(Reg)
509         .addImm(0);
510   else if (auto GV = dyn_cast<GlobalValue>(&C))
511     EntryBuilder.buildGlobalValue(Reg, GV);
512   else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
513     switch(CE->getOpcode()) {
514 #define HANDLE_INST(NUM, OPCODE, CLASS)                         \
515       case Instruction::OPCODE: return translate##OPCODE(*CE);
516 #include "llvm/IR/Instruction.def"
517     default:
518       if (!TPC->isGlobalISelAbortEnabled())
519         return false;
520       llvm_unreachable("unknown opcode");
521     }
522   } else if (!TPC->isGlobalISelAbortEnabled())
523     return false;
524   else
525     llvm_unreachable("unhandled constant kind");
526 
527   return true;
528 }
529 
530 
531 void IRTranslator::finalizeFunction() {
532   finishPendingPhis();
533 
534   // Release the memory used by the different maps we
535   // needed during the translation.
536   ValToVReg.clear();
537   Constants.clear();
538 }
539 
540 bool IRTranslator::runOnMachineFunction(MachineFunction &MF) {
541   const Function &F = *MF.getFunction();
542   if (F.empty())
543     return false;
544   CLI = MF.getSubtarget().getCallLowering();
545   MIRBuilder.setMF(MF);
546   EntryBuilder.setMF(MF);
547   MRI = &MF.getRegInfo();
548   DL = &F.getParent()->getDataLayout();
549   TPC = &getAnalysis<TargetPassConfig>();
550 
551   assert(PendingPHIs.empty() && "stale PHIs");
552 
553   // Setup the arguments.
554   MachineBasicBlock &MBB = getOrCreateBB(F.front());
555   MIRBuilder.setMBB(MBB);
556   SmallVector<unsigned, 8> VRegArgs;
557   for (const Argument &Arg: F.args())
558     VRegArgs.push_back(getOrCreateVReg(Arg));
559   bool Succeeded = CLI->lowerFormalArguments(MIRBuilder, F, VRegArgs);
560   if (!Succeeded) {
561     if (!TPC->isGlobalISelAbortEnabled()) {
562       MIRBuilder.getMF().getProperties().set(
563           MachineFunctionProperties::Property::FailedISel);
564       return false;
565     }
566     report_fatal_error("Unable to lower arguments");
567   }
568 
569   // Now that we've got the ABI handling code, it's safe to set a location for
570   // any Constants we find in the IR.
571   if (MBB.empty())
572     EntryBuilder.setMBB(MBB);
573   else
574     EntryBuilder.setInstr(MBB.back(), /* Before */ false);
575 
576   for (const BasicBlock &BB: F) {
577     MachineBasicBlock &MBB = getOrCreateBB(BB);
578     // Set the insertion point of all the following translations to
579     // the end of this basic block.
580     MIRBuilder.setMBB(MBB);
581     for (const Instruction &Inst: BB) {
582       bool Succeeded = translate(Inst);
583       if (!Succeeded) {
584         DEBUG(dbgs() << "Cannot translate: " << Inst << '\n');
585         if (TPC->isGlobalISelAbortEnabled())
586           report_fatal_error("Unable to translate instruction");
587         MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
588         break;
589       }
590     }
591   }
592 
593   finalizeFunction();
594 
595   // Now that the MachineFrameInfo has been configured, no further changes to
596   // the reserved registers are possible.
597   MRI->freezeReservedRegs(MF);
598 
599   return false;
600 }
601