1 //===-- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator --*- C++ -*-==//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the IRTranslator class.
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
14 
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
17 #include "llvm/CodeGen/MachineFunction.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/TargetPassConfig.h"
21 #include "llvm/IR/Constant.h"
22 #include "llvm/IR/Function.h"
23 #include "llvm/IR/GetElementPtrTypeIterator.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/Type.h"
26 #include "llvm/IR/Value.h"
27 #include "llvm/Target/TargetIntrinsicInfo.h"
28 #include "llvm/Target/TargetLowering.h"
29 
30 #define DEBUG_TYPE "irtranslator"
31 
32 using namespace llvm;
33 
34 char IRTranslator::ID = 0;
35 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
36                 false, false)
37 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
38 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
39                 false, false)
40 
41 IRTranslator::IRTranslator() : MachineFunctionPass(ID), MRI(nullptr) {
42   initializeIRTranslatorPass(*PassRegistry::getPassRegistry());
43 }
44 
45 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
46   AU.addRequired<TargetPassConfig>();
47   MachineFunctionPass::getAnalysisUsage(AU);
48 }
49 
50 
51 unsigned IRTranslator::getOrCreateVReg(const Value &Val) {
52   unsigned &ValReg = ValToVReg[&Val];
53   // Check if this is the first time we see Val.
54   if (!ValReg) {
55     // Fill ValRegsSequence with the sequence of registers
56     // we need to concat together to produce the value.
57     assert(Val.getType()->isSized() &&
58            "Don't know how to create an empty vreg");
59     unsigned VReg = MRI->createGenericVirtualRegister(LLT{*Val.getType(), DL});
60     ValReg = VReg;
61 
62     if (auto CV = dyn_cast<Constant>(&Val)) {
63       bool Success = translate(*CV, VReg);
64       if (!Success) {
65         if (!TPC->isGlobalISelAbortEnabled()) {
66           MIRBuilder.getMF().getProperties().set(
67               MachineFunctionProperties::Property::FailedISel);
68           return 0;
69         }
70         report_fatal_error("unable to translate constant");
71       }
72     }
73   }
74   return ValReg;
75 }
76 
77 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
78   unsigned Alignment = 0;
79   Type *ValTy = nullptr;
80   if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
81     Alignment = SI->getAlignment();
82     ValTy = SI->getValueOperand()->getType();
83   } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
84     Alignment = LI->getAlignment();
85     ValTy = LI->getType();
86   } else if (!TPC->isGlobalISelAbortEnabled()) {
87     MIRBuilder.getMF().getProperties().set(
88         MachineFunctionProperties::Property::FailedISel);
89     return 1;
90   } else
91     llvm_unreachable("unhandled memory instruction");
92 
93   return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
94 }
95 
96 MachineBasicBlock &IRTranslator::getOrCreateBB(const BasicBlock &BB) {
97   MachineBasicBlock *&MBB = BBToMBB[&BB];
98   if (!MBB) {
99     MachineFunction &MF = MIRBuilder.getMF();
100     MBB = MF.CreateMachineBasicBlock();
101     MF.push_back(MBB);
102   }
103   return *MBB;
104 }
105 
106 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U) {
107   // FIXME: handle signed/unsigned wrapping flags.
108 
109   // Get or create a virtual register for each value.
110   // Unless the value is a Constant => loadimm cst?
111   // or inline constant each time?
112   // Creation of a virtual register needs to have a size.
113   unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
114   unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
115   unsigned Res = getOrCreateVReg(U);
116   MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1);
117   return true;
118 }
119 
120 bool IRTranslator::translateCompare(const User &U) {
121   const CmpInst *CI = dyn_cast<CmpInst>(&U);
122   unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
123   unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
124   unsigned Res = getOrCreateVReg(U);
125   CmpInst::Predicate Pred =
126       CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
127                                     cast<ConstantExpr>(U).getPredicate());
128 
129   if (CmpInst::isIntPredicate(Pred))
130     MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
131   else
132     MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
133 
134   return true;
135 }
136 
137 bool IRTranslator::translateRet(const User &U) {
138   const ReturnInst &RI = cast<ReturnInst>(U);
139   const Value *Ret = RI.getReturnValue();
140   // The target may mess up with the insertion point, but
141   // this is not important as a return is the last instruction
142   // of the block anyway.
143   return CLI->lowerReturn(MIRBuilder, Ret, !Ret ? 0 : getOrCreateVReg(*Ret));
144 }
145 
146 bool IRTranslator::translateBr(const User &U) {
147   const BranchInst &BrInst = cast<BranchInst>(U);
148   unsigned Succ = 0;
149   if (!BrInst.isUnconditional()) {
150     // We want a G_BRCOND to the true BB followed by an unconditional branch.
151     unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
152     const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
153     MachineBasicBlock &TrueBB = getOrCreateBB(TrueTgt);
154     MIRBuilder.buildBrCond(Tst, TrueBB);
155   }
156 
157   const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
158   MachineBasicBlock &TgtBB = getOrCreateBB(BrTgt);
159   MIRBuilder.buildBr(TgtBB);
160 
161   // Link successors.
162   MachineBasicBlock &CurBB = MIRBuilder.getMBB();
163   for (const BasicBlock *Succ : BrInst.successors())
164     CurBB.addSuccessor(&getOrCreateBB(*Succ));
165   return true;
166 }
167 
168 bool IRTranslator::translateLoad(const User &U) {
169   const LoadInst &LI = cast<LoadInst>(U);
170 
171   if (!TPC->isGlobalISelAbortEnabled() && !LI.isSimple())
172     return false;
173 
174   assert(LI.isSimple() && "only simple loads are supported at the moment");
175 
176   MachineFunction &MF = MIRBuilder.getMF();
177   unsigned Res = getOrCreateVReg(LI);
178   unsigned Addr = getOrCreateVReg(*LI.getPointerOperand());
179   LLT VTy{*LI.getType(), DL}, PTy{*LI.getPointerOperand()->getType()};
180 
181   MIRBuilder.buildLoad(
182       Res, Addr,
183       *MF.getMachineMemOperand(
184           MachinePointerInfo(LI.getPointerOperand()), MachineMemOperand::MOLoad,
185           DL->getTypeStoreSize(LI.getType()), getMemOpAlignment(LI)));
186   return true;
187 }
188 
189 bool IRTranslator::translateStore(const User &U) {
190   const StoreInst &SI = cast<StoreInst>(U);
191 
192   if (!TPC->isGlobalISelAbortEnabled() && !SI.isSimple())
193     return false;
194 
195   assert(SI.isSimple() && "only simple loads are supported at the moment");
196 
197   MachineFunction &MF = MIRBuilder.getMF();
198   unsigned Val = getOrCreateVReg(*SI.getValueOperand());
199   unsigned Addr = getOrCreateVReg(*SI.getPointerOperand());
200   LLT VTy{*SI.getValueOperand()->getType(), DL},
201       PTy{*SI.getPointerOperand()->getType()};
202 
203   MIRBuilder.buildStore(
204       Val, Addr,
205       *MF.getMachineMemOperand(
206           MachinePointerInfo(SI.getPointerOperand()),
207           MachineMemOperand::MOStore,
208           DL->getTypeStoreSize(SI.getValueOperand()->getType()),
209           getMemOpAlignment(SI)));
210   return true;
211 }
212 
213 bool IRTranslator::translateExtractValue(const User &U) {
214   const Value *Src = U.getOperand(0);
215   Type *Int32Ty = Type::getInt32Ty(U.getContext());
216   SmallVector<Value *, 1> Indices;
217 
218   // getIndexedOffsetInType is designed for GEPs, so the first index is the
219   // usual array element rather than looking into the actual aggregate.
220   Indices.push_back(ConstantInt::get(Int32Ty, 0));
221 
222   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
223     for (auto Idx : EVI->indices())
224       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
225   } else {
226     for (unsigned i = 1; i < U.getNumOperands(); ++i)
227       Indices.push_back(U.getOperand(i));
228   }
229 
230   uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
231 
232   unsigned Res = getOrCreateVReg(U);
233   MIRBuilder.buildExtract(Res, Offset, getOrCreateVReg(*Src));
234 
235   return true;
236 }
237 
238 bool IRTranslator::translateInsertValue(const User &U) {
239   const Value *Src = U.getOperand(0);
240   Type *Int32Ty = Type::getInt32Ty(U.getContext());
241   SmallVector<Value *, 1> Indices;
242 
243   // getIndexedOffsetInType is designed for GEPs, so the first index is the
244   // usual array element rather than looking into the actual aggregate.
245   Indices.push_back(ConstantInt::get(Int32Ty, 0));
246 
247   if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
248     for (auto Idx : IVI->indices())
249       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
250   } else {
251     for (unsigned i = 2; i < U.getNumOperands(); ++i)
252       Indices.push_back(U.getOperand(i));
253   }
254 
255   uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
256 
257   unsigned Res = getOrCreateVReg(U);
258   const Value &Inserted = *U.getOperand(1);
259   MIRBuilder.buildInsert(Res, getOrCreateVReg(*Src), getOrCreateVReg(Inserted),
260                          Offset);
261 
262   return true;
263 }
264 
265 bool IRTranslator::translateSelect(const User &U) {
266   MIRBuilder.buildSelect(getOrCreateVReg(U), getOrCreateVReg(*U.getOperand(0)),
267                          getOrCreateVReg(*U.getOperand(1)),
268                          getOrCreateVReg(*U.getOperand(2)));
269   return true;
270 }
271 
272 bool IRTranslator::translateBitCast(const User &U) {
273   if (LLT{*U.getOperand(0)->getType()} == LLT{*U.getType()}) {
274     unsigned &Reg = ValToVReg[&U];
275     if (Reg)
276       MIRBuilder.buildCopy(Reg, getOrCreateVReg(*U.getOperand(0)));
277     else
278       Reg = getOrCreateVReg(*U.getOperand(0));
279     return true;
280   }
281   return translateCast(TargetOpcode::G_BITCAST, U);
282 }
283 
284 bool IRTranslator::translateCast(unsigned Opcode, const User &U) {
285   unsigned Op = getOrCreateVReg(*U.getOperand(0));
286   unsigned Res = getOrCreateVReg(U);
287   MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op);
288   return true;
289 }
290 
291 bool IRTranslator::translateGetElementPtr(const User &U) {
292   // FIXME: support vector GEPs.
293   if (U.getType()->isVectorTy())
294     return false;
295 
296   Value &Op0 = *U.getOperand(0);
297   unsigned BaseReg = getOrCreateVReg(Op0);
298   LLT PtrTy(*Op0.getType());
299   unsigned PtrSize = DL->getPointerSizeInBits(PtrTy.getAddressSpace());
300   LLT OffsetTy = LLT::scalar(PtrSize);
301 
302   int64_t Offset = 0;
303   for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
304        GTI != E; ++GTI) {
305     const Value *Idx = GTI.getOperand();
306     if (StructType *StTy = dyn_cast<StructType>(*GTI)) {
307       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
308       Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
309       continue;
310     } else {
311       uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
312 
313       // If this is a scalar constant or a splat vector of constants,
314       // handle it quickly.
315       if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
316         Offset += ElementSize * CI->getSExtValue();
317         continue;
318       }
319 
320       if (Offset != 0) {
321         unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
322         unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
323         MIRBuilder.buildConstant(OffsetReg, Offset);
324         MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
325 
326         BaseReg = NewBaseReg;
327         Offset = 0;
328       }
329 
330       // N = N + Idx * ElementSize;
331       unsigned ElementSizeReg = MRI->createGenericVirtualRegister(OffsetTy);
332       MIRBuilder.buildConstant(ElementSizeReg, ElementSize);
333 
334       unsigned IdxReg = getOrCreateVReg(*Idx);
335       if (MRI->getType(IdxReg) != OffsetTy) {
336         unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
337         MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
338         IdxReg = NewIdxReg;
339       }
340 
341       unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
342       MIRBuilder.buildMul(OffsetReg, ElementSizeReg, IdxReg);
343 
344       unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
345       MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
346       BaseReg = NewBaseReg;
347     }
348   }
349 
350   if (Offset != 0) {
351     unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
352     MIRBuilder.buildConstant(OffsetReg, Offset);
353     MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg);
354     return true;
355   }
356 
357   MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
358   return true;
359 }
360 
361 
362 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI,
363                                            Intrinsic::ID ID) {
364   unsigned Op = 0;
365   switch (ID) {
366   default: return false;
367   case Intrinsic::uadd_with_overflow: Op = TargetOpcode::G_UADDE; break;
368   case Intrinsic::sadd_with_overflow: Op = TargetOpcode::G_SADDO; break;
369   case Intrinsic::usub_with_overflow: Op = TargetOpcode::G_USUBE; break;
370   case Intrinsic::ssub_with_overflow: Op = TargetOpcode::G_SSUBO; break;
371   case Intrinsic::umul_with_overflow: Op = TargetOpcode::G_UMULO; break;
372   case Intrinsic::smul_with_overflow: Op = TargetOpcode::G_SMULO; break;
373   }
374 
375   LLT Ty{*CI.getOperand(0)->getType()};
376   LLT s1 = LLT::scalar(1);
377   unsigned Width = Ty.getSizeInBits();
378   unsigned Res = MRI->createGenericVirtualRegister(Ty);
379   unsigned Overflow = MRI->createGenericVirtualRegister(s1);
380   auto MIB = MIRBuilder.buildInstr(Op)
381                  .addDef(Res)
382                  .addDef(Overflow)
383                  .addUse(getOrCreateVReg(*CI.getOperand(0)))
384                  .addUse(getOrCreateVReg(*CI.getOperand(1)));
385 
386   if (Op == TargetOpcode::G_UADDE || Op == TargetOpcode::G_USUBE) {
387     unsigned Zero = MRI->createGenericVirtualRegister(s1);
388     EntryBuilder.buildConstant(Zero, 0);
389     MIB.addUse(Zero);
390   }
391 
392   MIRBuilder.buildSequence(getOrCreateVReg(CI), Res, 0, Overflow, Width);
393   return true;
394 }
395 
396 bool IRTranslator::translateCall(const User &U) {
397   const CallInst &CI = cast<CallInst>(U);
398   auto TII = MIRBuilder.getMF().getTarget().getIntrinsicInfo();
399   const Function *F = CI.getCalledFunction();
400 
401   if (!F || !F->isIntrinsic()) {
402     // FIXME: handle multiple return values.
403     unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
404     SmallVector<unsigned, 8> Args;
405     for (auto &Arg: CI.arg_operands())
406       Args.push_back(getOrCreateVReg(*Arg));
407 
408     return CLI->lowerCall(MIRBuilder, CI, Res, Args, [&]() {
409       return getOrCreateVReg(*CI.getCalledValue());
410     });
411   }
412 
413   Intrinsic::ID ID = F->getIntrinsicID();
414   if (TII && ID == Intrinsic::not_intrinsic)
415     ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
416 
417   assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
418 
419   if (translateKnownIntrinsic(CI, ID))
420     return true;
421 
422   unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
423   MachineInstrBuilder MIB =
424       MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory());
425 
426   for (auto &Arg : CI.arg_operands()) {
427     if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg))
428       MIB.addImm(CI->getSExtValue());
429     else
430       MIB.addUse(getOrCreateVReg(*Arg));
431   }
432   return true;
433 }
434 
435 bool IRTranslator::translateStaticAlloca(const AllocaInst &AI) {
436   if (!TPC->isGlobalISelAbortEnabled() && !AI.isStaticAlloca())
437     return false;
438 
439   assert(AI.isStaticAlloca() && "only handle static allocas now");
440   MachineFunction &MF = MIRBuilder.getMF();
441   unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
442   unsigned Size =
443       ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
444 
445   // Always allocate at least one byte.
446   Size = std::max(Size, 1u);
447 
448   unsigned Alignment = AI.getAlignment();
449   if (!Alignment)
450     Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
451 
452   unsigned Res = getOrCreateVReg(AI);
453   int FI = MF.getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
454   MIRBuilder.buildFrameIndex(Res, FI);
455   return true;
456 }
457 
458 bool IRTranslator::translatePHI(const User &U) {
459   const PHINode &PI = cast<PHINode>(U);
460   auto MIB = MIRBuilder.buildInstr(TargetOpcode::PHI);
461   MIB.addDef(getOrCreateVReg(PI));
462 
463   PendingPHIs.emplace_back(&PI, MIB.getInstr());
464   return true;
465 }
466 
467 void IRTranslator::finishPendingPhis() {
468   for (std::pair<const PHINode *, MachineInstr *> &Phi : PendingPHIs) {
469     const PHINode *PI = Phi.first;
470     MachineInstrBuilder MIB(MIRBuilder.getMF(), Phi.second);
471 
472     // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
473     // won't create extra control flow here, otherwise we need to find the
474     // dominating predecessor here (or perhaps force the weirder IRTranslators
475     // to provide a simple boundary).
476     for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
477       assert(BBToMBB[PI->getIncomingBlock(i)]->isSuccessor(MIB->getParent()) &&
478              "I appear to have misunderstood Machine PHIs");
479       MIB.addUse(getOrCreateVReg(*PI->getIncomingValue(i)));
480       MIB.addMBB(BBToMBB[PI->getIncomingBlock(i)]);
481     }
482   }
483 
484   PendingPHIs.clear();
485 }
486 
487 bool IRTranslator::translate(const Instruction &Inst) {
488   MIRBuilder.setDebugLoc(Inst.getDebugLoc());
489   switch(Inst.getOpcode()) {
490 #define HANDLE_INST(NUM, OPCODE, CLASS) \
491     case Instruction::OPCODE: return translate##OPCODE(Inst);
492 #include "llvm/IR/Instruction.def"
493   default:
494     if (!TPC->isGlobalISelAbortEnabled())
495       return false;
496     llvm_unreachable("unknown opcode");
497   }
498 }
499 
500 bool IRTranslator::translate(const Constant &C, unsigned Reg) {
501   if (auto CI = dyn_cast<ConstantInt>(&C))
502     EntryBuilder.buildConstant(Reg, CI->getZExtValue());
503   else if (auto CF = dyn_cast<ConstantFP>(&C))
504     EntryBuilder.buildFConstant(Reg, *CF);
505   else if (isa<UndefValue>(C))
506     EntryBuilder.buildInstr(TargetOpcode::IMPLICIT_DEF).addDef(Reg);
507   else if (isa<ConstantPointerNull>(C))
508     EntryBuilder.buildInstr(TargetOpcode::G_CONSTANT)
509         .addDef(Reg)
510         .addImm(0);
511   else if (auto GV = dyn_cast<GlobalValue>(&C))
512     EntryBuilder.buildGlobalValue(Reg, GV);
513   else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
514     switch(CE->getOpcode()) {
515 #define HANDLE_INST(NUM, OPCODE, CLASS)                         \
516       case Instruction::OPCODE: return translate##OPCODE(*CE);
517 #include "llvm/IR/Instruction.def"
518     default:
519       if (!TPC->isGlobalISelAbortEnabled())
520         return false;
521       llvm_unreachable("unknown opcode");
522     }
523   } else if (!TPC->isGlobalISelAbortEnabled())
524     return false;
525   else
526     llvm_unreachable("unhandled constant kind");
527 
528   return true;
529 }
530 
531 
532 void IRTranslator::finalizeFunction() {
533   finishPendingPhis();
534 
535   // Release the memory used by the different maps we
536   // needed during the translation.
537   ValToVReg.clear();
538   Constants.clear();
539 }
540 
541 bool IRTranslator::runOnMachineFunction(MachineFunction &MF) {
542   const Function &F = *MF.getFunction();
543   if (F.empty())
544     return false;
545   CLI = MF.getSubtarget().getCallLowering();
546   MIRBuilder.setMF(MF);
547   EntryBuilder.setMF(MF);
548   MRI = &MF.getRegInfo();
549   DL = &F.getParent()->getDataLayout();
550   TPC = &getAnalysis<TargetPassConfig>();
551 
552   assert(PendingPHIs.empty() && "stale PHIs");
553 
554   // Setup the arguments.
555   MachineBasicBlock &MBB = getOrCreateBB(F.front());
556   MIRBuilder.setMBB(MBB);
557   SmallVector<unsigned, 8> VRegArgs;
558   for (const Argument &Arg: F.args())
559     VRegArgs.push_back(getOrCreateVReg(Arg));
560   bool Succeeded =
561       CLI->lowerFormalArguments(MIRBuilder, F.getArgumentList(), VRegArgs);
562   if (!Succeeded) {
563     if (!TPC->isGlobalISelAbortEnabled()) {
564       MIRBuilder.getMF().getProperties().set(
565           MachineFunctionProperties::Property::FailedISel);
566       return false;
567     }
568     report_fatal_error("Unable to lower arguments");
569   }
570 
571   // Now that we've got the ABI handling code, it's safe to set a location for
572   // any Constants we find in the IR.
573   if (MBB.empty())
574     EntryBuilder.setMBB(MBB);
575   else
576     EntryBuilder.setInstr(MBB.back(), /* Before */ false);
577 
578   for (const BasicBlock &BB: F) {
579     MachineBasicBlock &MBB = getOrCreateBB(BB);
580     // Set the insertion point of all the following translations to
581     // the end of this basic block.
582     MIRBuilder.setMBB(MBB);
583     for (const Instruction &Inst: BB) {
584       bool Succeeded = translate(Inst);
585       if (!Succeeded) {
586         DEBUG(dbgs() << "Cannot translate: " << Inst << '\n');
587         if (TPC->isGlobalISelAbortEnabled())
588           report_fatal_error("Unable to translate instruction");
589         MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
590         break;
591       }
592     }
593   }
594 
595   finalizeFunction();
596 
597   // Now that the MachineFrameInfo has been configured, no further changes to
598   // the reserved registers are possible.
599   MRI->freezeReservedRegs(MF);
600 
601   return false;
602 }
603