1 //===-- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator --*- C++ -*-==//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the IRTranslator class.
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
14 
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
17 #include "llvm/CodeGen/MachineFunction.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/TargetPassConfig.h"
21 #include "llvm/IR/Constant.h"
22 #include "llvm/IR/Function.h"
23 #include "llvm/IR/GetElementPtrTypeIterator.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/Type.h"
26 #include "llvm/IR/Value.h"
27 #include "llvm/Target/TargetIntrinsicInfo.h"
28 #include "llvm/Target/TargetLowering.h"
29 
30 #define DEBUG_TYPE "irtranslator"
31 
32 using namespace llvm;
33 
34 char IRTranslator::ID = 0;
35 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
36                 false, false)
37 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
38 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
39                 false, false)
40 
41 IRTranslator::IRTranslator() : MachineFunctionPass(ID), MRI(nullptr) {
42   initializeIRTranslatorPass(*PassRegistry::getPassRegistry());
43 }
44 
45 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
46   AU.addRequired<TargetPassConfig>();
47   MachineFunctionPass::getAnalysisUsage(AU);
48 }
49 
50 
51 unsigned IRTranslator::getOrCreateVReg(const Value &Val) {
52   unsigned &ValReg = ValToVReg[&Val];
53   // Check if this is the first time we see Val.
54   if (!ValReg) {
55     // Fill ValRegsSequence with the sequence of registers
56     // we need to concat together to produce the value.
57     assert(Val.getType()->isSized() &&
58            "Don't know how to create an empty vreg");
59     unsigned VReg = MRI->createGenericVirtualRegister(LLT{*Val.getType(), *DL});
60     ValReg = VReg;
61 
62     if (auto CV = dyn_cast<Constant>(&Val)) {
63       bool Success = translate(*CV, VReg);
64       if (!Success) {
65         if (!TPC->isGlobalISelAbortEnabled()) {
66           MIRBuilder.getMF().getProperties().set(
67               MachineFunctionProperties::Property::FailedISel);
68           return 0;
69         }
70         report_fatal_error("unable to translate constant");
71       }
72     }
73   }
74   return ValReg;
75 }
76 
77 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
78   unsigned Alignment = 0;
79   Type *ValTy = nullptr;
80   if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
81     Alignment = SI->getAlignment();
82     ValTy = SI->getValueOperand()->getType();
83   } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
84     Alignment = LI->getAlignment();
85     ValTy = LI->getType();
86   } else if (!TPC->isGlobalISelAbortEnabled()) {
87     MIRBuilder.getMF().getProperties().set(
88         MachineFunctionProperties::Property::FailedISel);
89     return 1;
90   } else
91     llvm_unreachable("unhandled memory instruction");
92 
93   return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
94 }
95 
96 MachineBasicBlock &IRTranslator::getOrCreateBB(const BasicBlock &BB) {
97   MachineBasicBlock *&MBB = BBToMBB[&BB];
98   if (!MBB) {
99     MachineFunction &MF = MIRBuilder.getMF();
100     MBB = MF.CreateMachineBasicBlock();
101     MF.push_back(MBB);
102   }
103   return *MBB;
104 }
105 
106 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U) {
107   // FIXME: handle signed/unsigned wrapping flags.
108 
109   // Get or create a virtual register for each value.
110   // Unless the value is a Constant => loadimm cst?
111   // or inline constant each time?
112   // Creation of a virtual register needs to have a size.
113   unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
114   unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
115   unsigned Res = getOrCreateVReg(U);
116   MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1);
117   return true;
118 }
119 
120 bool IRTranslator::translateCompare(const User &U) {
121   const CmpInst *CI = dyn_cast<CmpInst>(&U);
122   unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
123   unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
124   unsigned Res = getOrCreateVReg(U);
125   CmpInst::Predicate Pred =
126       CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
127                                     cast<ConstantExpr>(U).getPredicate());
128 
129   if (CmpInst::isIntPredicate(Pred))
130     MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
131   else
132     MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
133 
134   return true;
135 }
136 
137 bool IRTranslator::translateRet(const User &U) {
138   const ReturnInst &RI = cast<ReturnInst>(U);
139   const Value *Ret = RI.getReturnValue();
140   // The target may mess up with the insertion point, but
141   // this is not important as a return is the last instruction
142   // of the block anyway.
143   return CLI->lowerReturn(MIRBuilder, Ret, !Ret ? 0 : getOrCreateVReg(*Ret));
144 }
145 
146 bool IRTranslator::translateBr(const User &U) {
147   const BranchInst &BrInst = cast<BranchInst>(U);
148   unsigned Succ = 0;
149   if (!BrInst.isUnconditional()) {
150     // We want a G_BRCOND to the true BB followed by an unconditional branch.
151     unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
152     const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
153     MachineBasicBlock &TrueBB = getOrCreateBB(TrueTgt);
154     MIRBuilder.buildBrCond(Tst, TrueBB);
155   }
156 
157   const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
158   MachineBasicBlock &TgtBB = getOrCreateBB(BrTgt);
159   MIRBuilder.buildBr(TgtBB);
160 
161   // Link successors.
162   MachineBasicBlock &CurBB = MIRBuilder.getMBB();
163   for (const BasicBlock *Succ : BrInst.successors())
164     CurBB.addSuccessor(&getOrCreateBB(*Succ));
165   return true;
166 }
167 
168 bool IRTranslator::translateLoad(const User &U) {
169   const LoadInst &LI = cast<LoadInst>(U);
170 
171   if (!TPC->isGlobalISelAbortEnabled() && LI.isAtomic())
172     return false;
173 
174   assert(!LI.isAtomic() && "only non-atomic loads are supported at the moment");
175   auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
176                                : MachineMemOperand::MONone;
177   Flags |= MachineMemOperand::MOLoad;
178 
179   MachineFunction &MF = MIRBuilder.getMF();
180   unsigned Res = getOrCreateVReg(LI);
181   unsigned Addr = getOrCreateVReg(*LI.getPointerOperand());
182   LLT VTy{*LI.getType(), *DL}, PTy{*LI.getPointerOperand()->getType(), *DL};
183   MIRBuilder.buildLoad(
184       Res, Addr,
185       *MF.getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()),
186                                Flags, DL->getTypeStoreSize(LI.getType()),
187                                getMemOpAlignment(LI)));
188   return true;
189 }
190 
191 bool IRTranslator::translateStore(const User &U) {
192   const StoreInst &SI = cast<StoreInst>(U);
193 
194   if (!TPC->isGlobalISelAbortEnabled() && SI.isAtomic())
195     return false;
196 
197   assert(!SI.isAtomic() && "only non-atomic stores supported at the moment");
198   auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
199                                : MachineMemOperand::MONone;
200   Flags |= MachineMemOperand::MOStore;
201 
202   MachineFunction &MF = MIRBuilder.getMF();
203   unsigned Val = getOrCreateVReg(*SI.getValueOperand());
204   unsigned Addr = getOrCreateVReg(*SI.getPointerOperand());
205   LLT VTy{*SI.getValueOperand()->getType(), *DL},
206       PTy{*SI.getPointerOperand()->getType(), *DL};
207 
208   MIRBuilder.buildStore(
209       Val, Addr, *MF.getMachineMemOperand(
210                      MachinePointerInfo(SI.getPointerOperand()), Flags,
211                      DL->getTypeStoreSize(SI.getValueOperand()->getType()),
212                      getMemOpAlignment(SI)));
213   return true;
214 }
215 
216 bool IRTranslator::translateExtractValue(const User &U) {
217   const Value *Src = U.getOperand(0);
218   Type *Int32Ty = Type::getInt32Ty(U.getContext());
219   SmallVector<Value *, 1> Indices;
220 
221   // getIndexedOffsetInType is designed for GEPs, so the first index is the
222   // usual array element rather than looking into the actual aggregate.
223   Indices.push_back(ConstantInt::get(Int32Ty, 0));
224 
225   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
226     for (auto Idx : EVI->indices())
227       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
228   } else {
229     for (unsigned i = 1; i < U.getNumOperands(); ++i)
230       Indices.push_back(U.getOperand(i));
231   }
232 
233   uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
234 
235   unsigned Res = getOrCreateVReg(U);
236   MIRBuilder.buildExtract(Res, Offset, getOrCreateVReg(*Src));
237 
238   return true;
239 }
240 
241 bool IRTranslator::translateInsertValue(const User &U) {
242   const Value *Src = U.getOperand(0);
243   Type *Int32Ty = Type::getInt32Ty(U.getContext());
244   SmallVector<Value *, 1> Indices;
245 
246   // getIndexedOffsetInType is designed for GEPs, so the first index is the
247   // usual array element rather than looking into the actual aggregate.
248   Indices.push_back(ConstantInt::get(Int32Ty, 0));
249 
250   if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
251     for (auto Idx : IVI->indices())
252       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
253   } else {
254     for (unsigned i = 2; i < U.getNumOperands(); ++i)
255       Indices.push_back(U.getOperand(i));
256   }
257 
258   uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
259 
260   unsigned Res = getOrCreateVReg(U);
261   const Value &Inserted = *U.getOperand(1);
262   MIRBuilder.buildInsert(Res, getOrCreateVReg(*Src), getOrCreateVReg(Inserted),
263                          Offset);
264 
265   return true;
266 }
267 
268 bool IRTranslator::translateSelect(const User &U) {
269   MIRBuilder.buildSelect(getOrCreateVReg(U), getOrCreateVReg(*U.getOperand(0)),
270                          getOrCreateVReg(*U.getOperand(1)),
271                          getOrCreateVReg(*U.getOperand(2)));
272   return true;
273 }
274 
275 bool IRTranslator::translateBitCast(const User &U) {
276   if (LLT{*U.getOperand(0)->getType(), *DL} == LLT{*U.getType(), *DL}) {
277     unsigned &Reg = ValToVReg[&U];
278     if (Reg)
279       MIRBuilder.buildCopy(Reg, getOrCreateVReg(*U.getOperand(0)));
280     else
281       Reg = getOrCreateVReg(*U.getOperand(0));
282     return true;
283   }
284   return translateCast(TargetOpcode::G_BITCAST, U);
285 }
286 
287 bool IRTranslator::translateCast(unsigned Opcode, const User &U) {
288   unsigned Op = getOrCreateVReg(*U.getOperand(0));
289   unsigned Res = getOrCreateVReg(U);
290   MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op);
291   return true;
292 }
293 
294 bool IRTranslator::translateGetElementPtr(const User &U) {
295   // FIXME: support vector GEPs.
296   if (U.getType()->isVectorTy())
297     return false;
298 
299   Value &Op0 = *U.getOperand(0);
300   unsigned BaseReg = getOrCreateVReg(Op0);
301   LLT PtrTy{*Op0.getType(), *DL};
302   unsigned PtrSize = DL->getPointerSizeInBits(PtrTy.getAddressSpace());
303   LLT OffsetTy = LLT::scalar(PtrSize);
304 
305   int64_t Offset = 0;
306   for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
307        GTI != E; ++GTI) {
308     const Value *Idx = GTI.getOperand();
309     if (StructType *StTy = dyn_cast<StructType>(*GTI)) {
310       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
311       Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
312       continue;
313     } else {
314       uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
315 
316       // If this is a scalar constant or a splat vector of constants,
317       // handle it quickly.
318       if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
319         Offset += ElementSize * CI->getSExtValue();
320         continue;
321       }
322 
323       if (Offset != 0) {
324         unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
325         unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
326         MIRBuilder.buildConstant(OffsetReg, Offset);
327         MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
328 
329         BaseReg = NewBaseReg;
330         Offset = 0;
331       }
332 
333       // N = N + Idx * ElementSize;
334       unsigned ElementSizeReg = MRI->createGenericVirtualRegister(OffsetTy);
335       MIRBuilder.buildConstant(ElementSizeReg, ElementSize);
336 
337       unsigned IdxReg = getOrCreateVReg(*Idx);
338       if (MRI->getType(IdxReg) != OffsetTy) {
339         unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
340         MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
341         IdxReg = NewIdxReg;
342       }
343 
344       unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
345       MIRBuilder.buildMul(OffsetReg, ElementSizeReg, IdxReg);
346 
347       unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
348       MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
349       BaseReg = NewBaseReg;
350     }
351   }
352 
353   if (Offset != 0) {
354     unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
355     MIRBuilder.buildConstant(OffsetReg, Offset);
356     MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg);
357     return true;
358   }
359 
360   MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
361   return true;
362 }
363 
364 bool IRTranslator::translateMemcpy(const CallInst &CI) {
365   LLT SizeTy{*CI.getArgOperand(2)->getType(), *DL};
366   if (cast<PointerType>(CI.getArgOperand(0)->getType())->getAddressSpace() !=
367           0 ||
368       cast<PointerType>(CI.getArgOperand(1)->getType())->getAddressSpace() !=
369           0 ||
370       SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
371     return false;
372 
373   SmallVector<CallLowering::ArgInfo, 8> Args;
374   for (int i = 0; i < 3; ++i) {
375     const auto &Arg = CI.getArgOperand(i);
376     Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
377   }
378 
379   MachineOperand Callee = MachineOperand::CreateES("memcpy");
380 
381   return CLI->lowerCall(MIRBuilder, Callee,
382                         CallLowering::ArgInfo(0, CI.getType()), Args);
383 }
384 
385 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI,
386                                            Intrinsic::ID ID) {
387   unsigned Op = 0;
388   switch (ID) {
389   default: return false;
390   case Intrinsic::uadd_with_overflow: Op = TargetOpcode::G_UADDE; break;
391   case Intrinsic::sadd_with_overflow: Op = TargetOpcode::G_SADDO; break;
392   case Intrinsic::usub_with_overflow: Op = TargetOpcode::G_USUBE; break;
393   case Intrinsic::ssub_with_overflow: Op = TargetOpcode::G_SSUBO; break;
394   case Intrinsic::umul_with_overflow: Op = TargetOpcode::G_UMULO; break;
395   case Intrinsic::smul_with_overflow: Op = TargetOpcode::G_SMULO; break;
396   case Intrinsic::memcpy:
397     return translateMemcpy(CI);
398   case Intrinsic::objectsize: {
399     // If we don't know by now, we're never going to know.
400     const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
401 
402     MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
403     return true;
404   }
405   }
406 
407   LLT Ty{*CI.getOperand(0)->getType(), *DL};
408   LLT s1 = LLT::scalar(1);
409   unsigned Width = Ty.getSizeInBits();
410   unsigned Res = MRI->createGenericVirtualRegister(Ty);
411   unsigned Overflow = MRI->createGenericVirtualRegister(s1);
412   auto MIB = MIRBuilder.buildInstr(Op)
413                  .addDef(Res)
414                  .addDef(Overflow)
415                  .addUse(getOrCreateVReg(*CI.getOperand(0)))
416                  .addUse(getOrCreateVReg(*CI.getOperand(1)));
417 
418   if (Op == TargetOpcode::G_UADDE || Op == TargetOpcode::G_USUBE) {
419     unsigned Zero = MRI->createGenericVirtualRegister(s1);
420     EntryBuilder.buildConstant(Zero, 0);
421     MIB.addUse(Zero);
422   }
423 
424   MIRBuilder.buildSequence(getOrCreateVReg(CI), Res, 0, Overflow, Width);
425   return true;
426 }
427 
428 bool IRTranslator::translateCall(const User &U) {
429   const CallInst &CI = cast<CallInst>(U);
430   auto TII = MIRBuilder.getMF().getTarget().getIntrinsicInfo();
431   const Function *F = CI.getCalledFunction();
432 
433   if (!F || !F->isIntrinsic()) {
434     unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
435     SmallVector<unsigned, 8> Args;
436     for (auto &Arg: CI.arg_operands())
437       Args.push_back(getOrCreateVReg(*Arg));
438 
439     return CLI->lowerCall(MIRBuilder, CI, Res, Args, [&]() {
440       return getOrCreateVReg(*CI.getCalledValue());
441     });
442   }
443 
444   Intrinsic::ID ID = F->getIntrinsicID();
445   if (TII && ID == Intrinsic::not_intrinsic)
446     ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
447 
448   assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
449 
450   if (translateKnownIntrinsic(CI, ID))
451     return true;
452 
453   unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
454   MachineInstrBuilder MIB =
455       MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory());
456 
457   for (auto &Arg : CI.arg_operands()) {
458     if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg))
459       MIB.addImm(CI->getSExtValue());
460     else
461       MIB.addUse(getOrCreateVReg(*Arg));
462   }
463   return true;
464 }
465 
466 bool IRTranslator::translateStaticAlloca(const AllocaInst &AI) {
467   if (!TPC->isGlobalISelAbortEnabled() && !AI.isStaticAlloca())
468     return false;
469 
470   assert(AI.isStaticAlloca() && "only handle static allocas now");
471   MachineFunction &MF = MIRBuilder.getMF();
472   unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
473   unsigned Size =
474       ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
475 
476   // Always allocate at least one byte.
477   Size = std::max(Size, 1u);
478 
479   unsigned Alignment = AI.getAlignment();
480   if (!Alignment)
481     Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
482 
483   unsigned Res = getOrCreateVReg(AI);
484   int FI = MF.getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
485   MIRBuilder.buildFrameIndex(Res, FI);
486   return true;
487 }
488 
489 bool IRTranslator::translatePHI(const User &U) {
490   const PHINode &PI = cast<PHINode>(U);
491   auto MIB = MIRBuilder.buildInstr(TargetOpcode::PHI);
492   MIB.addDef(getOrCreateVReg(PI));
493 
494   PendingPHIs.emplace_back(&PI, MIB.getInstr());
495   return true;
496 }
497 
498 void IRTranslator::finishPendingPhis() {
499   for (std::pair<const PHINode *, MachineInstr *> &Phi : PendingPHIs) {
500     const PHINode *PI = Phi.first;
501     MachineInstrBuilder MIB(MIRBuilder.getMF(), Phi.second);
502 
503     // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
504     // won't create extra control flow here, otherwise we need to find the
505     // dominating predecessor here (or perhaps force the weirder IRTranslators
506     // to provide a simple boundary).
507     for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
508       assert(BBToMBB[PI->getIncomingBlock(i)]->isSuccessor(MIB->getParent()) &&
509              "I appear to have misunderstood Machine PHIs");
510       MIB.addUse(getOrCreateVReg(*PI->getIncomingValue(i)));
511       MIB.addMBB(BBToMBB[PI->getIncomingBlock(i)]);
512     }
513   }
514 
515   PendingPHIs.clear();
516 }
517 
518 bool IRTranslator::translate(const Instruction &Inst) {
519   MIRBuilder.setDebugLoc(Inst.getDebugLoc());
520   switch(Inst.getOpcode()) {
521 #define HANDLE_INST(NUM, OPCODE, CLASS) \
522     case Instruction::OPCODE: return translate##OPCODE(Inst);
523 #include "llvm/IR/Instruction.def"
524   default:
525     if (!TPC->isGlobalISelAbortEnabled())
526       return false;
527     llvm_unreachable("unknown opcode");
528   }
529 }
530 
531 bool IRTranslator::translate(const Constant &C, unsigned Reg) {
532   if (auto CI = dyn_cast<ConstantInt>(&C))
533     EntryBuilder.buildConstant(Reg, CI->getZExtValue());
534   else if (auto CF = dyn_cast<ConstantFP>(&C))
535     EntryBuilder.buildFConstant(Reg, *CF);
536   else if (isa<UndefValue>(C))
537     EntryBuilder.buildInstr(TargetOpcode::IMPLICIT_DEF).addDef(Reg);
538   else if (isa<ConstantPointerNull>(C))
539     EntryBuilder.buildInstr(TargetOpcode::G_CONSTANT)
540         .addDef(Reg)
541         .addImm(0);
542   else if (auto GV = dyn_cast<GlobalValue>(&C))
543     EntryBuilder.buildGlobalValue(Reg, GV);
544   else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
545     switch(CE->getOpcode()) {
546 #define HANDLE_INST(NUM, OPCODE, CLASS)                         \
547       case Instruction::OPCODE: return translate##OPCODE(*CE);
548 #include "llvm/IR/Instruction.def"
549     default:
550       if (!TPC->isGlobalISelAbortEnabled())
551         return false;
552       llvm_unreachable("unknown opcode");
553     }
554   } else if (!TPC->isGlobalISelAbortEnabled())
555     return false;
556   else
557     llvm_unreachable("unhandled constant kind");
558 
559   return true;
560 }
561 
562 
563 void IRTranslator::finalizeFunction() {
564   finishPendingPhis();
565 
566   // Release the memory used by the different maps we
567   // needed during the translation.
568   ValToVReg.clear();
569   Constants.clear();
570 }
571 
572 bool IRTranslator::runOnMachineFunction(MachineFunction &MF) {
573   const Function &F = *MF.getFunction();
574   if (F.empty())
575     return false;
576   CLI = MF.getSubtarget().getCallLowering();
577   MIRBuilder.setMF(MF);
578   EntryBuilder.setMF(MF);
579   MRI = &MF.getRegInfo();
580   DL = &F.getParent()->getDataLayout();
581   TPC = &getAnalysis<TargetPassConfig>();
582 
583   assert(PendingPHIs.empty() && "stale PHIs");
584 
585   // Setup the arguments.
586   MachineBasicBlock &MBB = getOrCreateBB(F.front());
587   MIRBuilder.setMBB(MBB);
588   SmallVector<unsigned, 8> VRegArgs;
589   for (const Argument &Arg: F.args())
590     VRegArgs.push_back(getOrCreateVReg(Arg));
591   bool Succeeded = CLI->lowerFormalArguments(MIRBuilder, F, VRegArgs);
592   if (!Succeeded) {
593     if (!TPC->isGlobalISelAbortEnabled()) {
594       MIRBuilder.getMF().getProperties().set(
595           MachineFunctionProperties::Property::FailedISel);
596       return false;
597     }
598     report_fatal_error("Unable to lower arguments");
599   }
600 
601   // Now that we've got the ABI handling code, it's safe to set a location for
602   // any Constants we find in the IR.
603   if (MBB.empty())
604     EntryBuilder.setMBB(MBB);
605   else
606     EntryBuilder.setInstr(MBB.back(), /* Before */ false);
607 
608   for (const BasicBlock &BB: F) {
609     MachineBasicBlock &MBB = getOrCreateBB(BB);
610     // Set the insertion point of all the following translations to
611     // the end of this basic block.
612     MIRBuilder.setMBB(MBB);
613     for (const Instruction &Inst: BB) {
614       bool Succeeded = translate(Inst);
615       if (!Succeeded) {
616         DEBUG(dbgs() << "Cannot translate: " << Inst << '\n');
617         if (TPC->isGlobalISelAbortEnabled())
618           report_fatal_error("Unable to translate instruction");
619         MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
620         break;
621       }
622     }
623   }
624 
625   finalizeFunction();
626 
627   // Now that the MachineFrameInfo has been configured, no further changes to
628   // the reserved registers are possible.
629   MRI->freezeReservedRegs(MF);
630 
631   return false;
632 }
633