1 //===-- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator --*- C++ -*-==//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the IRTranslator class.
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
14 
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
17 #include "llvm/CodeGen/Analysis.h"
18 #include "llvm/CodeGen/FunctionLoweringInfo.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineModuleInfo.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/TargetPassConfig.h"
24 #include "llvm/IR/Constant.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/GetElementPtrTypeIterator.h"
27 #include "llvm/IR/IntrinsicInst.h"
28 #include "llvm/IR/Type.h"
29 #include "llvm/IR/Value.h"
30 #include "llvm/Target/TargetIntrinsicInfo.h"
31 #include "llvm/Target/TargetLowering.h"
32 
33 #define DEBUG_TYPE "irtranslator"
34 
35 using namespace llvm;
36 
37 char IRTranslator::ID = 0;
38 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
39                 false, false)
40 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
41 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
42                 false, false)
43 
44 static void reportTranslationError(const Value &V, const Twine &Message) {
45   std::string ErrStorage;
46   raw_string_ostream Err(ErrStorage);
47   Err << Message << ": " << V << '\n';
48   report_fatal_error(Err.str());
49 }
50 
51 IRTranslator::IRTranslator() : MachineFunctionPass(ID), MRI(nullptr) {
52   initializeIRTranslatorPass(*PassRegistry::getPassRegistry());
53 }
54 
55 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
56   AU.addRequired<TargetPassConfig>();
57   MachineFunctionPass::getAnalysisUsage(AU);
58 }
59 
60 
61 unsigned IRTranslator::getOrCreateVReg(const Value &Val) {
62   unsigned &ValReg = ValToVReg[&Val];
63   // Check if this is the first time we see Val.
64   if (!ValReg) {
65     // Fill ValRegsSequence with the sequence of registers
66     // we need to concat together to produce the value.
67     assert(Val.getType()->isSized() &&
68            "Don't know how to create an empty vreg");
69     unsigned VReg = MRI->createGenericVirtualRegister(LLT{*Val.getType(), *DL});
70     ValReg = VReg;
71 
72     if (auto CV = dyn_cast<Constant>(&Val)) {
73       bool Success = translate(*CV, VReg);
74       if (!Success) {
75         if (!TPC->isGlobalISelAbortEnabled()) {
76           MIRBuilder.getMF().getProperties().set(
77               MachineFunctionProperties::Property::FailedISel);
78           return 0;
79         }
80         reportTranslationError(Val, "unable to translate constant");
81       }
82     }
83   }
84   return ValReg;
85 }
86 
87 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
88   if (FrameIndices.find(&AI) != FrameIndices.end())
89     return FrameIndices[&AI];
90 
91   MachineFunction &MF = MIRBuilder.getMF();
92   unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
93   unsigned Size =
94       ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
95 
96   // Always allocate at least one byte.
97   Size = std::max(Size, 1u);
98 
99   unsigned Alignment = AI.getAlignment();
100   if (!Alignment)
101     Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
102 
103   int &FI = FrameIndices[&AI];
104   FI = MF.getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
105   return FI;
106 }
107 
108 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
109   unsigned Alignment = 0;
110   Type *ValTy = nullptr;
111   if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
112     Alignment = SI->getAlignment();
113     ValTy = SI->getValueOperand()->getType();
114   } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
115     Alignment = LI->getAlignment();
116     ValTy = LI->getType();
117   } else if (!TPC->isGlobalISelAbortEnabled()) {
118     MIRBuilder.getMF().getProperties().set(
119         MachineFunctionProperties::Property::FailedISel);
120     return 1;
121   } else
122     llvm_unreachable("unhandled memory instruction");
123 
124   return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
125 }
126 
127 MachineBasicBlock &IRTranslator::getOrCreateBB(const BasicBlock &BB) {
128   MachineBasicBlock *&MBB = BBToMBB[&BB];
129   if (!MBB) {
130     MachineFunction &MF = MIRBuilder.getMF();
131     MBB = MF.CreateMachineBasicBlock();
132     MF.push_back(MBB);
133   }
134   return *MBB;
135 }
136 
137 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U) {
138   // FIXME: handle signed/unsigned wrapping flags.
139 
140   // Get or create a virtual register for each value.
141   // Unless the value is a Constant => loadimm cst?
142   // or inline constant each time?
143   // Creation of a virtual register needs to have a size.
144   unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
145   unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
146   unsigned Res = getOrCreateVReg(U);
147   MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1);
148   return true;
149 }
150 
151 bool IRTranslator::translateCompare(const User &U) {
152   const CmpInst *CI = dyn_cast<CmpInst>(&U);
153   unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
154   unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
155   unsigned Res = getOrCreateVReg(U);
156   CmpInst::Predicate Pred =
157       CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
158                                     cast<ConstantExpr>(U).getPredicate());
159 
160   if (CmpInst::isIntPredicate(Pred))
161     MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
162   else
163     MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
164 
165   return true;
166 }
167 
168 bool IRTranslator::translateRet(const User &U) {
169   const ReturnInst &RI = cast<ReturnInst>(U);
170   const Value *Ret = RI.getReturnValue();
171   // The target may mess up with the insertion point, but
172   // this is not important as a return is the last instruction
173   // of the block anyway.
174   return CLI->lowerReturn(MIRBuilder, Ret, !Ret ? 0 : getOrCreateVReg(*Ret));
175 }
176 
177 bool IRTranslator::translateBr(const User &U) {
178   const BranchInst &BrInst = cast<BranchInst>(U);
179   unsigned Succ = 0;
180   if (!BrInst.isUnconditional()) {
181     // We want a G_BRCOND to the true BB followed by an unconditional branch.
182     unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
183     const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
184     MachineBasicBlock &TrueBB = getOrCreateBB(TrueTgt);
185     MIRBuilder.buildBrCond(Tst, TrueBB);
186   }
187 
188   const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
189   MachineBasicBlock &TgtBB = getOrCreateBB(BrTgt);
190   MIRBuilder.buildBr(TgtBB);
191 
192   // Link successors.
193   MachineBasicBlock &CurBB = MIRBuilder.getMBB();
194   for (const BasicBlock *Succ : BrInst.successors())
195     CurBB.addSuccessor(&getOrCreateBB(*Succ));
196   return true;
197 }
198 
199 bool IRTranslator::translateLoad(const User &U) {
200   const LoadInst &LI = cast<LoadInst>(U);
201 
202   if (!TPC->isGlobalISelAbortEnabled() && LI.isAtomic())
203     return false;
204 
205   assert(!LI.isAtomic() && "only non-atomic loads are supported at the moment");
206   auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
207                                : MachineMemOperand::MONone;
208   Flags |= MachineMemOperand::MOLoad;
209 
210   MachineFunction &MF = MIRBuilder.getMF();
211   unsigned Res = getOrCreateVReg(LI);
212   unsigned Addr = getOrCreateVReg(*LI.getPointerOperand());
213   LLT VTy{*LI.getType(), *DL}, PTy{*LI.getPointerOperand()->getType(), *DL};
214   MIRBuilder.buildLoad(
215       Res, Addr,
216       *MF.getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()),
217                                Flags, DL->getTypeStoreSize(LI.getType()),
218                                getMemOpAlignment(LI)));
219   return true;
220 }
221 
222 bool IRTranslator::translateStore(const User &U) {
223   const StoreInst &SI = cast<StoreInst>(U);
224 
225   if (!TPC->isGlobalISelAbortEnabled() && SI.isAtomic())
226     return false;
227 
228   assert(!SI.isAtomic() && "only non-atomic stores supported at the moment");
229   auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
230                                : MachineMemOperand::MONone;
231   Flags |= MachineMemOperand::MOStore;
232 
233   MachineFunction &MF = MIRBuilder.getMF();
234   unsigned Val = getOrCreateVReg(*SI.getValueOperand());
235   unsigned Addr = getOrCreateVReg(*SI.getPointerOperand());
236   LLT VTy{*SI.getValueOperand()->getType(), *DL},
237       PTy{*SI.getPointerOperand()->getType(), *DL};
238 
239   MIRBuilder.buildStore(
240       Val, Addr, *MF.getMachineMemOperand(
241                      MachinePointerInfo(SI.getPointerOperand()), Flags,
242                      DL->getTypeStoreSize(SI.getValueOperand()->getType()),
243                      getMemOpAlignment(SI)));
244   return true;
245 }
246 
247 bool IRTranslator::translateExtractValue(const User &U) {
248   const Value *Src = U.getOperand(0);
249   Type *Int32Ty = Type::getInt32Ty(U.getContext());
250   SmallVector<Value *, 1> Indices;
251 
252   // getIndexedOffsetInType is designed for GEPs, so the first index is the
253   // usual array element rather than looking into the actual aggregate.
254   Indices.push_back(ConstantInt::get(Int32Ty, 0));
255 
256   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
257     for (auto Idx : EVI->indices())
258       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
259   } else {
260     for (unsigned i = 1; i < U.getNumOperands(); ++i)
261       Indices.push_back(U.getOperand(i));
262   }
263 
264   uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
265 
266   unsigned Res = getOrCreateVReg(U);
267   MIRBuilder.buildExtract(Res, Offset, getOrCreateVReg(*Src));
268 
269   return true;
270 }
271 
272 bool IRTranslator::translateInsertValue(const User &U) {
273   const Value *Src = U.getOperand(0);
274   Type *Int32Ty = Type::getInt32Ty(U.getContext());
275   SmallVector<Value *, 1> Indices;
276 
277   // getIndexedOffsetInType is designed for GEPs, so the first index is the
278   // usual array element rather than looking into the actual aggregate.
279   Indices.push_back(ConstantInt::get(Int32Ty, 0));
280 
281   if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
282     for (auto Idx : IVI->indices())
283       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
284   } else {
285     for (unsigned i = 2; i < U.getNumOperands(); ++i)
286       Indices.push_back(U.getOperand(i));
287   }
288 
289   uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
290 
291   unsigned Res = getOrCreateVReg(U);
292   const Value &Inserted = *U.getOperand(1);
293   MIRBuilder.buildInsert(Res, getOrCreateVReg(*Src), getOrCreateVReg(Inserted),
294                          Offset);
295 
296   return true;
297 }
298 
299 bool IRTranslator::translateSelect(const User &U) {
300   MIRBuilder.buildSelect(getOrCreateVReg(U), getOrCreateVReg(*U.getOperand(0)),
301                          getOrCreateVReg(*U.getOperand(1)),
302                          getOrCreateVReg(*U.getOperand(2)));
303   return true;
304 }
305 
306 bool IRTranslator::translateBitCast(const User &U) {
307   if (LLT{*U.getOperand(0)->getType(), *DL} == LLT{*U.getType(), *DL}) {
308     unsigned &Reg = ValToVReg[&U];
309     if (Reg)
310       MIRBuilder.buildCopy(Reg, getOrCreateVReg(*U.getOperand(0)));
311     else
312       Reg = getOrCreateVReg(*U.getOperand(0));
313     return true;
314   }
315   return translateCast(TargetOpcode::G_BITCAST, U);
316 }
317 
318 bool IRTranslator::translateCast(unsigned Opcode, const User &U) {
319   unsigned Op = getOrCreateVReg(*U.getOperand(0));
320   unsigned Res = getOrCreateVReg(U);
321   MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op);
322   return true;
323 }
324 
325 bool IRTranslator::translateGetElementPtr(const User &U) {
326   // FIXME: support vector GEPs.
327   if (U.getType()->isVectorTy())
328     return false;
329 
330   Value &Op0 = *U.getOperand(0);
331   unsigned BaseReg = getOrCreateVReg(Op0);
332   LLT PtrTy{*Op0.getType(), *DL};
333   unsigned PtrSize = DL->getPointerSizeInBits(PtrTy.getAddressSpace());
334   LLT OffsetTy = LLT::scalar(PtrSize);
335 
336   int64_t Offset = 0;
337   for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
338        GTI != E; ++GTI) {
339     const Value *Idx = GTI.getOperand();
340     if (StructType *StTy = dyn_cast<StructType>(*GTI)) {
341       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
342       Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
343       continue;
344     } else {
345       uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
346 
347       // If this is a scalar constant or a splat vector of constants,
348       // handle it quickly.
349       if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
350         Offset += ElementSize * CI->getSExtValue();
351         continue;
352       }
353 
354       if (Offset != 0) {
355         unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
356         unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
357         MIRBuilder.buildConstant(OffsetReg, Offset);
358         MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
359 
360         BaseReg = NewBaseReg;
361         Offset = 0;
362       }
363 
364       // N = N + Idx * ElementSize;
365       unsigned ElementSizeReg = MRI->createGenericVirtualRegister(OffsetTy);
366       MIRBuilder.buildConstant(ElementSizeReg, ElementSize);
367 
368       unsigned IdxReg = getOrCreateVReg(*Idx);
369       if (MRI->getType(IdxReg) != OffsetTy) {
370         unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
371         MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
372         IdxReg = NewIdxReg;
373       }
374 
375       unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
376       MIRBuilder.buildMul(OffsetReg, ElementSizeReg, IdxReg);
377 
378       unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
379       MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
380       BaseReg = NewBaseReg;
381     }
382   }
383 
384   if (Offset != 0) {
385     unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
386     MIRBuilder.buildConstant(OffsetReg, Offset);
387     MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg);
388     return true;
389   }
390 
391   MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
392   return true;
393 }
394 
395 bool IRTranslator::translateMemcpy(const CallInst &CI) {
396   LLT SizeTy{*CI.getArgOperand(2)->getType(), *DL};
397   if (cast<PointerType>(CI.getArgOperand(0)->getType())->getAddressSpace() !=
398           0 ||
399       cast<PointerType>(CI.getArgOperand(1)->getType())->getAddressSpace() !=
400           0 ||
401       SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
402     return false;
403 
404   SmallVector<CallLowering::ArgInfo, 8> Args;
405   for (int i = 0; i < 3; ++i) {
406     const auto &Arg = CI.getArgOperand(i);
407     Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
408   }
409 
410   MachineOperand Callee = MachineOperand::CreateES("memcpy");
411 
412   return CLI->lowerCall(MIRBuilder, Callee,
413                         CallLowering::ArgInfo(0, CI.getType()), Args);
414 }
415 
416 void IRTranslator::getStackGuard(unsigned DstReg) {
417   auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
418   MIB.addDef(DstReg);
419 
420   auto &MF = MIRBuilder.getMF();
421   auto &TLI = *MF.getSubtarget().getTargetLowering();
422   Value *Global = TLI.getSDagStackGuard(*MF.getFunction()->getParent());
423   if (!Global)
424     return;
425 
426   MachinePointerInfo MPInfo(Global);
427   MachineInstr::mmo_iterator MemRefs = MF.allocateMemRefsArray(1);
428   auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
429                MachineMemOperand::MODereferenceable;
430   *MemRefs =
431       MF.getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
432                               DL->getPointerABIAlignment());
433   MIB.setMemRefs(MemRefs, MemRefs + 1);
434 }
435 
436 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI,
437                                            Intrinsic::ID ID) {
438   unsigned Op = 0;
439   switch (ID) {
440   default: return false;
441   case Intrinsic::uadd_with_overflow: Op = TargetOpcode::G_UADDE; break;
442   case Intrinsic::sadd_with_overflow: Op = TargetOpcode::G_SADDO; break;
443   case Intrinsic::usub_with_overflow: Op = TargetOpcode::G_USUBE; break;
444   case Intrinsic::ssub_with_overflow: Op = TargetOpcode::G_SSUBO; break;
445   case Intrinsic::umul_with_overflow: Op = TargetOpcode::G_UMULO; break;
446   case Intrinsic::smul_with_overflow: Op = TargetOpcode::G_SMULO; break;
447   case Intrinsic::memcpy:
448     return translateMemcpy(CI);
449   case Intrinsic::eh_typeid_for: {
450     GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
451     unsigned Reg = getOrCreateVReg(CI);
452     unsigned TypeID = MIRBuilder.getMF().getMMI().getTypeIDFor(GV);
453     MIRBuilder.buildConstant(Reg, TypeID);
454     return true;
455   }
456   case Intrinsic::objectsize: {
457     // If we don't know by now, we're never going to know.
458     const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
459 
460     MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
461     return true;
462   }
463   case Intrinsic::stackguard:
464     getStackGuard(getOrCreateVReg(CI));
465     return true;
466   case Intrinsic::stackprotector: {
467     MachineFunction &MF = MIRBuilder.getMF();
468     LLT PtrTy{*CI.getArgOperand(0)->getType(), *DL};
469     unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
470     getStackGuard(GuardVal);
471 
472     AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
473     MIRBuilder.buildStore(
474         GuardVal, getOrCreateVReg(*Slot),
475         *MF.getMachineMemOperand(
476             MachinePointerInfo::getFixedStack(MF, getOrCreateFrameIndex(*Slot)),
477             MachineMemOperand::MOStore | MachineMemOperand::MOVolatile,
478             PtrTy.getSizeInBits() / 8, 8));
479     return true;
480   }
481   }
482 
483   LLT Ty{*CI.getOperand(0)->getType(), *DL};
484   LLT s1 = LLT::scalar(1);
485   unsigned Width = Ty.getSizeInBits();
486   unsigned Res = MRI->createGenericVirtualRegister(Ty);
487   unsigned Overflow = MRI->createGenericVirtualRegister(s1);
488   auto MIB = MIRBuilder.buildInstr(Op)
489                  .addDef(Res)
490                  .addDef(Overflow)
491                  .addUse(getOrCreateVReg(*CI.getOperand(0)))
492                  .addUse(getOrCreateVReg(*CI.getOperand(1)));
493 
494   if (Op == TargetOpcode::G_UADDE || Op == TargetOpcode::G_USUBE) {
495     unsigned Zero = MRI->createGenericVirtualRegister(s1);
496     EntryBuilder.buildConstant(Zero, 0);
497     MIB.addUse(Zero);
498   }
499 
500   MIRBuilder.buildSequence(getOrCreateVReg(CI), Res, 0, Overflow, Width);
501   return true;
502 }
503 
504 bool IRTranslator::translateCall(const User &U) {
505   const CallInst &CI = cast<CallInst>(U);
506   auto TII = MIRBuilder.getMF().getTarget().getIntrinsicInfo();
507   const Function *F = CI.getCalledFunction();
508 
509   if (!F || !F->isIntrinsic()) {
510     unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
511     SmallVector<unsigned, 8> Args;
512     for (auto &Arg: CI.arg_operands())
513       Args.push_back(getOrCreateVReg(*Arg));
514 
515     return CLI->lowerCall(MIRBuilder, CI, Res, Args, [&]() {
516       return getOrCreateVReg(*CI.getCalledValue());
517     });
518   }
519 
520   Intrinsic::ID ID = F->getIntrinsicID();
521   if (TII && ID == Intrinsic::not_intrinsic)
522     ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
523 
524   assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
525 
526   if (translateKnownIntrinsic(CI, ID))
527     return true;
528 
529   unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
530   MachineInstrBuilder MIB =
531       MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory());
532 
533   for (auto &Arg : CI.arg_operands()) {
534     if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg))
535       MIB.addImm(CI->getSExtValue());
536     else
537       MIB.addUse(getOrCreateVReg(*Arg));
538   }
539   return true;
540 }
541 
542 bool IRTranslator::translateInvoke(const User &U) {
543   const InvokeInst &I = cast<InvokeInst>(U);
544   MachineFunction &MF = MIRBuilder.getMF();
545   MachineModuleInfo &MMI = MF.getMMI();
546 
547   const BasicBlock *ReturnBB = I.getSuccessor(0);
548   const BasicBlock *EHPadBB = I.getSuccessor(1);
549 
550   const Value *Callee(I.getCalledValue());
551   const Function *Fn = dyn_cast<Function>(Callee);
552   if (isa<InlineAsm>(Callee))
553     return false;
554 
555   // FIXME: support invoking patchpoint and statepoint intrinsics.
556   if (Fn && Fn->isIntrinsic())
557     return false;
558 
559   // FIXME: support whatever these are.
560   if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
561     return false;
562 
563   // FIXME: support Windows exception handling.
564   if (!isa<LandingPadInst>(EHPadBB->front()))
565     return false;
566 
567 
568   // Emit the actual call, bracketed by EH_LABELs so that the MMI knows about
569   // the region covered by the try.
570   MCSymbol *BeginSymbol = MMI.getContext().createTempSymbol();
571   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
572 
573   unsigned Res = I.getType()->isVoidTy() ? 0 : getOrCreateVReg(I);
574   SmallVector<CallLowering::ArgInfo, 8> Args;
575   for (auto &Arg: I.arg_operands())
576     Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
577 
578   if (!CLI->lowerCall(MIRBuilder, MachineOperand::CreateGA(Fn, 0),
579                       CallLowering::ArgInfo(Res, I.getType()), Args))
580     return false;
581 
582   MCSymbol *EndSymbol = MMI.getContext().createTempSymbol();
583   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
584 
585   // FIXME: track probabilities.
586   MachineBasicBlock &EHPadMBB = getOrCreateBB(*EHPadBB),
587                     &ReturnMBB = getOrCreateBB(*ReturnBB);
588   MMI.addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
589   MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
590   MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
591 
592   return true;
593 }
594 
595 bool IRTranslator::translateLandingPad(const User &U) {
596   const LandingPadInst &LP = cast<LandingPadInst>(U);
597 
598   MachineBasicBlock &MBB = MIRBuilder.getMBB();
599   MachineFunction &MF = MIRBuilder.getMF();
600   MachineModuleInfo &MMI = MF.getMMI();
601   AddLandingPadInfo(LP, MMI, &MBB);
602 
603   MBB.setIsEHPad();
604 
605   // If there aren't registers to copy the values into (e.g., during SjLj
606   // exceptions), then don't bother.
607   auto &TLI = *MF.getSubtarget().getTargetLowering();
608   const Constant *PersonalityFn = MF.getFunction()->getPersonalityFn();
609   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
610       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
611     return true;
612 
613   // If landingpad's return type is token type, we don't create DAG nodes
614   // for its exception pointer and selector value. The extraction of exception
615   // pointer or selector value from token type landingpads is not currently
616   // supported.
617   if (LP.getType()->isTokenTy())
618     return true;
619 
620   // Add a label to mark the beginning of the landing pad.  Deletion of the
621   // landing pad can thus be detected via the MachineModuleInfo.
622   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
623     .addSym(MMI.addLandingPad(&MBB));
624 
625   // Mark exception register as live in.
626   SmallVector<unsigned, 2> Regs;
627   SmallVector<uint64_t, 2> Offsets;
628   LLT p0 = LLT::pointer(0, DL->getPointerSizeInBits());
629   if (unsigned Reg = TLI.getExceptionPointerRegister(PersonalityFn)) {
630     unsigned VReg = MRI->createGenericVirtualRegister(p0);
631     MIRBuilder.buildCopy(VReg, Reg);
632     Regs.push_back(VReg);
633     Offsets.push_back(0);
634   }
635 
636   if (unsigned Reg = TLI.getExceptionSelectorRegister(PersonalityFn)) {
637     unsigned VReg = MRI->createGenericVirtualRegister(p0);
638     MIRBuilder.buildCopy(VReg, Reg);
639     Regs.push_back(VReg);
640     Offsets.push_back(p0.getSizeInBits());
641   }
642 
643   MIRBuilder.buildSequence(getOrCreateVReg(LP), Regs, Offsets);
644   return true;
645 }
646 
647 bool IRTranslator::translateStaticAlloca(const AllocaInst &AI) {
648   if (!TPC->isGlobalISelAbortEnabled() && !AI.isStaticAlloca())
649     return false;
650 
651   assert(AI.isStaticAlloca() && "only handle static allocas now");
652   unsigned Res = getOrCreateVReg(AI);
653   int FI = getOrCreateFrameIndex(AI);
654   MIRBuilder.buildFrameIndex(Res, FI);
655   return true;
656 }
657 
658 bool IRTranslator::translatePHI(const User &U) {
659   const PHINode &PI = cast<PHINode>(U);
660   auto MIB = MIRBuilder.buildInstr(TargetOpcode::PHI);
661   MIB.addDef(getOrCreateVReg(PI));
662 
663   PendingPHIs.emplace_back(&PI, MIB.getInstr());
664   return true;
665 }
666 
667 void IRTranslator::finishPendingPhis() {
668   for (std::pair<const PHINode *, MachineInstr *> &Phi : PendingPHIs) {
669     const PHINode *PI = Phi.first;
670     MachineInstrBuilder MIB(MIRBuilder.getMF(), Phi.second);
671 
672     // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
673     // won't create extra control flow here, otherwise we need to find the
674     // dominating predecessor here (or perhaps force the weirder IRTranslators
675     // to provide a simple boundary).
676     for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
677       assert(BBToMBB[PI->getIncomingBlock(i)]->isSuccessor(MIB->getParent()) &&
678              "I appear to have misunderstood Machine PHIs");
679       MIB.addUse(getOrCreateVReg(*PI->getIncomingValue(i)));
680       MIB.addMBB(BBToMBB[PI->getIncomingBlock(i)]);
681     }
682   }
683 
684   PendingPHIs.clear();
685 }
686 
687 bool IRTranslator::translate(const Instruction &Inst) {
688   MIRBuilder.setDebugLoc(Inst.getDebugLoc());
689   switch(Inst.getOpcode()) {
690 #define HANDLE_INST(NUM, OPCODE, CLASS) \
691     case Instruction::OPCODE: return translate##OPCODE(Inst);
692 #include "llvm/IR/Instruction.def"
693   default:
694     if (!TPC->isGlobalISelAbortEnabled())
695       return false;
696     llvm_unreachable("unknown opcode");
697   }
698 }
699 
700 bool IRTranslator::translate(const Constant &C, unsigned Reg) {
701   if (auto CI = dyn_cast<ConstantInt>(&C))
702     EntryBuilder.buildConstant(Reg, CI->getZExtValue());
703   else if (auto CF = dyn_cast<ConstantFP>(&C))
704     EntryBuilder.buildFConstant(Reg, *CF);
705   else if (isa<UndefValue>(C))
706     EntryBuilder.buildInstr(TargetOpcode::IMPLICIT_DEF).addDef(Reg);
707   else if (isa<ConstantPointerNull>(C))
708     EntryBuilder.buildInstr(TargetOpcode::G_CONSTANT)
709         .addDef(Reg)
710         .addImm(0);
711   else if (auto GV = dyn_cast<GlobalValue>(&C))
712     EntryBuilder.buildGlobalValue(Reg, GV);
713   else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
714     switch(CE->getOpcode()) {
715 #define HANDLE_INST(NUM, OPCODE, CLASS)                         \
716       case Instruction::OPCODE: return translate##OPCODE(*CE);
717 #include "llvm/IR/Instruction.def"
718     default:
719       if (!TPC->isGlobalISelAbortEnabled())
720         return false;
721       llvm_unreachable("unknown opcode");
722     }
723   } else if (!TPC->isGlobalISelAbortEnabled())
724     return false;
725   else
726     llvm_unreachable("unhandled constant kind");
727 
728   return true;
729 }
730 
731 void IRTranslator::finalizeFunction() {
732   finishPendingPhis();
733 
734   // Release the memory used by the different maps we
735   // needed during the translation.
736   ValToVReg.clear();
737   FrameIndices.clear();
738   Constants.clear();
739 }
740 
741 bool IRTranslator::runOnMachineFunction(MachineFunction &MF) {
742   const Function &F = *MF.getFunction();
743   if (F.empty())
744     return false;
745   CLI = MF.getSubtarget().getCallLowering();
746   MIRBuilder.setMF(MF);
747   EntryBuilder.setMF(MF);
748   MRI = &MF.getRegInfo();
749   DL = &F.getParent()->getDataLayout();
750   TPC = &getAnalysis<TargetPassConfig>();
751 
752   assert(PendingPHIs.empty() && "stale PHIs");
753 
754   // Setup the arguments.
755   MachineBasicBlock &MBB = getOrCreateBB(F.front());
756   MIRBuilder.setMBB(MBB);
757   SmallVector<unsigned, 8> VRegArgs;
758   for (const Argument &Arg: F.args())
759     VRegArgs.push_back(getOrCreateVReg(Arg));
760   bool Succeeded = CLI->lowerFormalArguments(MIRBuilder, F, VRegArgs);
761   if (!Succeeded) {
762     if (!TPC->isGlobalISelAbortEnabled()) {
763       MIRBuilder.getMF().getProperties().set(
764           MachineFunctionProperties::Property::FailedISel);
765       return false;
766     }
767     report_fatal_error("Unable to lower arguments");
768   }
769 
770   // Now that we've got the ABI handling code, it's safe to set a location for
771   // any Constants we find in the IR.
772   if (MBB.empty())
773     EntryBuilder.setMBB(MBB);
774   else
775     EntryBuilder.setInstr(MBB.back(), /* Before */ false);
776 
777   for (const BasicBlock &BB: F) {
778     MachineBasicBlock &MBB = getOrCreateBB(BB);
779     // Set the insertion point of all the following translations to
780     // the end of this basic block.
781     MIRBuilder.setMBB(MBB);
782 
783     for (const Instruction &Inst: BB) {
784       bool Succeeded = translate(Inst);
785       if (!Succeeded) {
786         if (TPC->isGlobalISelAbortEnabled())
787           reportTranslationError(Inst, "unable to translate instruction");
788         MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
789         break;
790       }
791     }
792   }
793 
794   finalizeFunction();
795 
796   // Now that the MachineFrameInfo has been configured, no further changes to
797   // the reserved registers are possible.
798   MRI->freezeReservedRegs(MF);
799 
800   return false;
801 }
802