1 //===-- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator --*- C++ -*-==//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the IRTranslator class.
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
14 
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
17 #include "llvm/CodeGen/Analysis.h"
18 #include "llvm/CodeGen/MachineFunction.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineModuleInfo.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/TargetPassConfig.h"
23 #include "llvm/IR/Constant.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/IR/GetElementPtrTypeIterator.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 #include "llvm/IR/Type.h"
28 #include "llvm/IR/Value.h"
29 #include "llvm/Target/TargetIntrinsicInfo.h"
30 #include "llvm/Target/TargetLowering.h"
31 
32 #define DEBUG_TYPE "irtranslator"
33 
34 using namespace llvm;
35 
36 char IRTranslator::ID = 0;
37 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
38                 false, false)
39 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
40 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
41                 false, false)
42 
43 static void reportTranslationError(const Value &V, const Twine &Message) {
44   std::string ErrStorage;
45   raw_string_ostream Err(ErrStorage);
46   Err << Message << ": " << V << '\n';
47   report_fatal_error(Err.str());
48 }
49 
50 IRTranslator::IRTranslator() : MachineFunctionPass(ID), MRI(nullptr) {
51   initializeIRTranslatorPass(*PassRegistry::getPassRegistry());
52 }
53 
54 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
55   AU.addRequired<TargetPassConfig>();
56   MachineFunctionPass::getAnalysisUsage(AU);
57 }
58 
59 
60 unsigned IRTranslator::getOrCreateVReg(const Value &Val) {
61   unsigned &ValReg = ValToVReg[&Val];
62   // Check if this is the first time we see Val.
63   if (!ValReg) {
64     // Fill ValRegsSequence with the sequence of registers
65     // we need to concat together to produce the value.
66     assert(Val.getType()->isSized() &&
67            "Don't know how to create an empty vreg");
68     unsigned VReg = MRI->createGenericVirtualRegister(LLT{*Val.getType(), *DL});
69     ValReg = VReg;
70 
71     if (auto CV = dyn_cast<Constant>(&Val)) {
72       bool Success = translate(*CV, VReg);
73       if (!Success) {
74         if (!TPC->isGlobalISelAbortEnabled()) {
75           MIRBuilder.getMF().getProperties().set(
76               MachineFunctionProperties::Property::FailedISel);
77           return 0;
78         }
79         reportTranslationError(Val, "unable to translate constant");
80       }
81     }
82   }
83   return ValReg;
84 }
85 
86 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
87   if (FrameIndices.find(&AI) != FrameIndices.end())
88     return FrameIndices[&AI];
89 
90   MachineFunction &MF = MIRBuilder.getMF();
91   unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
92   unsigned Size =
93       ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
94 
95   // Always allocate at least one byte.
96   Size = std::max(Size, 1u);
97 
98   unsigned Alignment = AI.getAlignment();
99   if (!Alignment)
100     Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
101 
102   int &FI = FrameIndices[&AI];
103   FI = MF.getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
104   return FI;
105 }
106 
107 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
108   unsigned Alignment = 0;
109   Type *ValTy = nullptr;
110   if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
111     Alignment = SI->getAlignment();
112     ValTy = SI->getValueOperand()->getType();
113   } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
114     Alignment = LI->getAlignment();
115     ValTy = LI->getType();
116   } else if (!TPC->isGlobalISelAbortEnabled()) {
117     MIRBuilder.getMF().getProperties().set(
118         MachineFunctionProperties::Property::FailedISel);
119     return 1;
120   } else
121     llvm_unreachable("unhandled memory instruction");
122 
123   return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
124 }
125 
126 MachineBasicBlock &IRTranslator::getOrCreateBB(const BasicBlock &BB) {
127   MachineBasicBlock *&MBB = BBToMBB[&BB];
128   if (!MBB) {
129     MachineFunction &MF = MIRBuilder.getMF();
130     MBB = MF.CreateMachineBasicBlock();
131     MF.push_back(MBB);
132   }
133   return *MBB;
134 }
135 
136 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U) {
137   // FIXME: handle signed/unsigned wrapping flags.
138 
139   // Get or create a virtual register for each value.
140   // Unless the value is a Constant => loadimm cst?
141   // or inline constant each time?
142   // Creation of a virtual register needs to have a size.
143   unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
144   unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
145   unsigned Res = getOrCreateVReg(U);
146   MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1);
147   return true;
148 }
149 
150 bool IRTranslator::translateCompare(const User &U) {
151   const CmpInst *CI = dyn_cast<CmpInst>(&U);
152   unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
153   unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
154   unsigned Res = getOrCreateVReg(U);
155   CmpInst::Predicate Pred =
156       CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
157                                     cast<ConstantExpr>(U).getPredicate());
158 
159   if (CmpInst::isIntPredicate(Pred))
160     MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
161   else
162     MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
163 
164   return true;
165 }
166 
167 bool IRTranslator::translateRet(const User &U) {
168   const ReturnInst &RI = cast<ReturnInst>(U);
169   const Value *Ret = RI.getReturnValue();
170   // The target may mess up with the insertion point, but
171   // this is not important as a return is the last instruction
172   // of the block anyway.
173   return CLI->lowerReturn(MIRBuilder, Ret, !Ret ? 0 : getOrCreateVReg(*Ret));
174 }
175 
176 bool IRTranslator::translateBr(const User &U) {
177   const BranchInst &BrInst = cast<BranchInst>(U);
178   unsigned Succ = 0;
179   if (!BrInst.isUnconditional()) {
180     // We want a G_BRCOND to the true BB followed by an unconditional branch.
181     unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
182     const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
183     MachineBasicBlock &TrueBB = getOrCreateBB(TrueTgt);
184     MIRBuilder.buildBrCond(Tst, TrueBB);
185   }
186 
187   const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
188   MachineBasicBlock &TgtBB = getOrCreateBB(BrTgt);
189   MIRBuilder.buildBr(TgtBB);
190 
191   // Link successors.
192   MachineBasicBlock &CurBB = MIRBuilder.getMBB();
193   for (const BasicBlock *Succ : BrInst.successors())
194     CurBB.addSuccessor(&getOrCreateBB(*Succ));
195   return true;
196 }
197 
198 bool IRTranslator::translateLoad(const User &U) {
199   const LoadInst &LI = cast<LoadInst>(U);
200 
201   if (!TPC->isGlobalISelAbortEnabled() && LI.isAtomic())
202     return false;
203 
204   assert(!LI.isAtomic() && "only non-atomic loads are supported at the moment");
205   auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
206                                : MachineMemOperand::MONone;
207   Flags |= MachineMemOperand::MOLoad;
208 
209   MachineFunction &MF = MIRBuilder.getMF();
210   unsigned Res = getOrCreateVReg(LI);
211   unsigned Addr = getOrCreateVReg(*LI.getPointerOperand());
212   LLT VTy{*LI.getType(), *DL}, PTy{*LI.getPointerOperand()->getType(), *DL};
213   MIRBuilder.buildLoad(
214       Res, Addr,
215       *MF.getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()),
216                                Flags, DL->getTypeStoreSize(LI.getType()),
217                                getMemOpAlignment(LI)));
218   return true;
219 }
220 
221 bool IRTranslator::translateStore(const User &U) {
222   const StoreInst &SI = cast<StoreInst>(U);
223 
224   if (!TPC->isGlobalISelAbortEnabled() && SI.isAtomic())
225     return false;
226 
227   assert(!SI.isAtomic() && "only non-atomic stores supported at the moment");
228   auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
229                                : MachineMemOperand::MONone;
230   Flags |= MachineMemOperand::MOStore;
231 
232   MachineFunction &MF = MIRBuilder.getMF();
233   unsigned Val = getOrCreateVReg(*SI.getValueOperand());
234   unsigned Addr = getOrCreateVReg(*SI.getPointerOperand());
235   LLT VTy{*SI.getValueOperand()->getType(), *DL},
236       PTy{*SI.getPointerOperand()->getType(), *DL};
237 
238   MIRBuilder.buildStore(
239       Val, Addr, *MF.getMachineMemOperand(
240                      MachinePointerInfo(SI.getPointerOperand()), Flags,
241                      DL->getTypeStoreSize(SI.getValueOperand()->getType()),
242                      getMemOpAlignment(SI)));
243   return true;
244 }
245 
246 bool IRTranslator::translateExtractValue(const User &U) {
247   const Value *Src = U.getOperand(0);
248   Type *Int32Ty = Type::getInt32Ty(U.getContext());
249   SmallVector<Value *, 1> Indices;
250 
251   // getIndexedOffsetInType is designed for GEPs, so the first index is the
252   // usual array element rather than looking into the actual aggregate.
253   Indices.push_back(ConstantInt::get(Int32Ty, 0));
254 
255   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
256     for (auto Idx : EVI->indices())
257       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
258   } else {
259     for (unsigned i = 1; i < U.getNumOperands(); ++i)
260       Indices.push_back(U.getOperand(i));
261   }
262 
263   uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
264 
265   unsigned Res = getOrCreateVReg(U);
266   MIRBuilder.buildExtract(Res, Offset, getOrCreateVReg(*Src));
267 
268   return true;
269 }
270 
271 bool IRTranslator::translateInsertValue(const User &U) {
272   const Value *Src = U.getOperand(0);
273   Type *Int32Ty = Type::getInt32Ty(U.getContext());
274   SmallVector<Value *, 1> Indices;
275 
276   // getIndexedOffsetInType is designed for GEPs, so the first index is the
277   // usual array element rather than looking into the actual aggregate.
278   Indices.push_back(ConstantInt::get(Int32Ty, 0));
279 
280   if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
281     for (auto Idx : IVI->indices())
282       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
283   } else {
284     for (unsigned i = 2; i < U.getNumOperands(); ++i)
285       Indices.push_back(U.getOperand(i));
286   }
287 
288   uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
289 
290   unsigned Res = getOrCreateVReg(U);
291   const Value &Inserted = *U.getOperand(1);
292   MIRBuilder.buildInsert(Res, getOrCreateVReg(*Src), getOrCreateVReg(Inserted),
293                          Offset);
294 
295   return true;
296 }
297 
298 bool IRTranslator::translateSelect(const User &U) {
299   MIRBuilder.buildSelect(getOrCreateVReg(U), getOrCreateVReg(*U.getOperand(0)),
300                          getOrCreateVReg(*U.getOperand(1)),
301                          getOrCreateVReg(*U.getOperand(2)));
302   return true;
303 }
304 
305 bool IRTranslator::translateBitCast(const User &U) {
306   if (LLT{*U.getOperand(0)->getType(), *DL} == LLT{*U.getType(), *DL}) {
307     unsigned &Reg = ValToVReg[&U];
308     if (Reg)
309       MIRBuilder.buildCopy(Reg, getOrCreateVReg(*U.getOperand(0)));
310     else
311       Reg = getOrCreateVReg(*U.getOperand(0));
312     return true;
313   }
314   return translateCast(TargetOpcode::G_BITCAST, U);
315 }
316 
317 bool IRTranslator::translateCast(unsigned Opcode, const User &U) {
318   unsigned Op = getOrCreateVReg(*U.getOperand(0));
319   unsigned Res = getOrCreateVReg(U);
320   MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op);
321   return true;
322 }
323 
324 bool IRTranslator::translateGetElementPtr(const User &U) {
325   // FIXME: support vector GEPs.
326   if (U.getType()->isVectorTy())
327     return false;
328 
329   Value &Op0 = *U.getOperand(0);
330   unsigned BaseReg = getOrCreateVReg(Op0);
331   LLT PtrTy{*Op0.getType(), *DL};
332   unsigned PtrSize = DL->getPointerSizeInBits(PtrTy.getAddressSpace());
333   LLT OffsetTy = LLT::scalar(PtrSize);
334 
335   int64_t Offset = 0;
336   for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
337        GTI != E; ++GTI) {
338     const Value *Idx = GTI.getOperand();
339     if (StructType *StTy = dyn_cast<StructType>(*GTI)) {
340       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
341       Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
342       continue;
343     } else {
344       uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
345 
346       // If this is a scalar constant or a splat vector of constants,
347       // handle it quickly.
348       if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
349         Offset += ElementSize * CI->getSExtValue();
350         continue;
351       }
352 
353       if (Offset != 0) {
354         unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
355         unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
356         MIRBuilder.buildConstant(OffsetReg, Offset);
357         MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
358 
359         BaseReg = NewBaseReg;
360         Offset = 0;
361       }
362 
363       // N = N + Idx * ElementSize;
364       unsigned ElementSizeReg = MRI->createGenericVirtualRegister(OffsetTy);
365       MIRBuilder.buildConstant(ElementSizeReg, ElementSize);
366 
367       unsigned IdxReg = getOrCreateVReg(*Idx);
368       if (MRI->getType(IdxReg) != OffsetTy) {
369         unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
370         MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
371         IdxReg = NewIdxReg;
372       }
373 
374       unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
375       MIRBuilder.buildMul(OffsetReg, ElementSizeReg, IdxReg);
376 
377       unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
378       MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
379       BaseReg = NewBaseReg;
380     }
381   }
382 
383   if (Offset != 0) {
384     unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
385     MIRBuilder.buildConstant(OffsetReg, Offset);
386     MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg);
387     return true;
388   }
389 
390   MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
391   return true;
392 }
393 
394 bool IRTranslator::translateMemcpy(const CallInst &CI) {
395   LLT SizeTy{*CI.getArgOperand(2)->getType(), *DL};
396   if (cast<PointerType>(CI.getArgOperand(0)->getType())->getAddressSpace() !=
397           0 ||
398       cast<PointerType>(CI.getArgOperand(1)->getType())->getAddressSpace() !=
399           0 ||
400       SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
401     return false;
402 
403   SmallVector<CallLowering::ArgInfo, 8> Args;
404   for (int i = 0; i < 3; ++i) {
405     const auto &Arg = CI.getArgOperand(i);
406     Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
407   }
408 
409   MachineOperand Callee = MachineOperand::CreateES("memcpy");
410 
411   return CLI->lowerCall(MIRBuilder, Callee,
412                         CallLowering::ArgInfo(0, CI.getType()), Args);
413 }
414 
415 void IRTranslator::getStackGuard(unsigned DstReg) {
416   auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
417   MIB.addDef(DstReg);
418 
419   auto &MF = MIRBuilder.getMF();
420   auto &TLI = *MF.getSubtarget().getTargetLowering();
421   Value *Global = TLI.getSDagStackGuard(*MF.getFunction()->getParent());
422   if (!Global)
423     return;
424 
425   MachinePointerInfo MPInfo(Global);
426   MachineInstr::mmo_iterator MemRefs = MF.allocateMemRefsArray(1);
427   auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
428                MachineMemOperand::MODereferenceable;
429   *MemRefs =
430       MF.getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
431                               DL->getPointerABIAlignment());
432   MIB.setMemRefs(MemRefs, MemRefs + 1);
433 }
434 
435 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI,
436                                            Intrinsic::ID ID) {
437   unsigned Op = 0;
438   switch (ID) {
439   default: return false;
440   case Intrinsic::uadd_with_overflow: Op = TargetOpcode::G_UADDE; break;
441   case Intrinsic::sadd_with_overflow: Op = TargetOpcode::G_SADDO; break;
442   case Intrinsic::usub_with_overflow: Op = TargetOpcode::G_USUBE; break;
443   case Intrinsic::ssub_with_overflow: Op = TargetOpcode::G_SSUBO; break;
444   case Intrinsic::umul_with_overflow: Op = TargetOpcode::G_UMULO; break;
445   case Intrinsic::smul_with_overflow: Op = TargetOpcode::G_SMULO; break;
446   case Intrinsic::memcpy:
447     return translateMemcpy(CI);
448   case Intrinsic::eh_typeid_for: {
449     GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
450     unsigned Reg = getOrCreateVReg(CI);
451     unsigned TypeID = MIRBuilder.getMF().getMMI().getTypeIDFor(GV);
452     MIRBuilder.buildConstant(Reg, TypeID);
453     return true;
454   }
455   case Intrinsic::objectsize: {
456     // If we don't know by now, we're never going to know.
457     const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
458 
459     MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
460     return true;
461   }
462   case Intrinsic::stackguard:
463     getStackGuard(getOrCreateVReg(CI));
464     return true;
465   case Intrinsic::stackprotector: {
466     MachineFunction &MF = MIRBuilder.getMF();
467     LLT PtrTy{*CI.getArgOperand(0)->getType(), *DL};
468     unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
469     getStackGuard(GuardVal);
470 
471     AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
472     MIRBuilder.buildStore(
473         GuardVal, getOrCreateVReg(*Slot),
474         *MF.getMachineMemOperand(
475             MachinePointerInfo::getFixedStack(MF, getOrCreateFrameIndex(*Slot)),
476             MachineMemOperand::MOStore | MachineMemOperand::MOVolatile,
477             PtrTy.getSizeInBits() / 8, 8));
478     return true;
479   }
480   }
481 
482   LLT Ty{*CI.getOperand(0)->getType(), *DL};
483   LLT s1 = LLT::scalar(1);
484   unsigned Width = Ty.getSizeInBits();
485   unsigned Res = MRI->createGenericVirtualRegister(Ty);
486   unsigned Overflow = MRI->createGenericVirtualRegister(s1);
487   auto MIB = MIRBuilder.buildInstr(Op)
488                  .addDef(Res)
489                  .addDef(Overflow)
490                  .addUse(getOrCreateVReg(*CI.getOperand(0)))
491                  .addUse(getOrCreateVReg(*CI.getOperand(1)));
492 
493   if (Op == TargetOpcode::G_UADDE || Op == TargetOpcode::G_USUBE) {
494     unsigned Zero = MRI->createGenericVirtualRegister(s1);
495     EntryBuilder.buildConstant(Zero, 0);
496     MIB.addUse(Zero);
497   }
498 
499   MIRBuilder.buildSequence(getOrCreateVReg(CI), Res, 0, Overflow, Width);
500   return true;
501 }
502 
503 bool IRTranslator::translateCall(const User &U) {
504   const CallInst &CI = cast<CallInst>(U);
505   auto TII = MIRBuilder.getMF().getTarget().getIntrinsicInfo();
506   const Function *F = CI.getCalledFunction();
507 
508   if (!F || !F->isIntrinsic()) {
509     unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
510     SmallVector<unsigned, 8> Args;
511     for (auto &Arg: CI.arg_operands())
512       Args.push_back(getOrCreateVReg(*Arg));
513 
514     return CLI->lowerCall(MIRBuilder, CI, Res, Args, [&]() {
515       return getOrCreateVReg(*CI.getCalledValue());
516     });
517   }
518 
519   Intrinsic::ID ID = F->getIntrinsicID();
520   if (TII && ID == Intrinsic::not_intrinsic)
521     ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
522 
523   assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
524 
525   if (translateKnownIntrinsic(CI, ID))
526     return true;
527 
528   unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
529   MachineInstrBuilder MIB =
530       MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory());
531 
532   for (auto &Arg : CI.arg_operands()) {
533     if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg))
534       MIB.addImm(CI->getSExtValue());
535     else
536       MIB.addUse(getOrCreateVReg(*Arg));
537   }
538   return true;
539 }
540 
541 bool IRTranslator::translateInvoke(const User &U) {
542   const InvokeInst &I = cast<InvokeInst>(U);
543   MachineFunction &MF = MIRBuilder.getMF();
544   MachineModuleInfo &MMI = MF.getMMI();
545 
546   const BasicBlock *ReturnBB = I.getSuccessor(0);
547   const BasicBlock *EHPadBB = I.getSuccessor(1);
548 
549   const Value *Callee(I.getCalledValue());
550   const Function *Fn = dyn_cast<Function>(Callee);
551   if (isa<InlineAsm>(Callee))
552     return false;
553 
554   // FIXME: support invoking patchpoint and statepoint intrinsics.
555   if (Fn && Fn->isIntrinsic())
556     return false;
557 
558   // FIXME: support whatever these are.
559   if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
560     return false;
561 
562   // FIXME: support Windows exception handling.
563   if (!isa<LandingPadInst>(EHPadBB->front()))
564     return false;
565 
566 
567   // Emit the actual call, bracketed by EH_LABELs so that the MMI knows about
568   // the region covered by the try.
569   MCSymbol *BeginSymbol = MMI.getContext().createTempSymbol();
570   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
571 
572   unsigned Res = I.getType()->isVoidTy() ? 0 : getOrCreateVReg(I);
573   SmallVector<CallLowering::ArgInfo, 8> Args;
574   for (auto &Arg: I.arg_operands())
575     Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
576 
577   if (!CLI->lowerCall(MIRBuilder, MachineOperand::CreateGA(Fn, 0),
578                       CallLowering::ArgInfo(Res, I.getType()), Args))
579     return false;
580 
581   MCSymbol *EndSymbol = MMI.getContext().createTempSymbol();
582   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
583 
584   // FIXME: track probabilities.
585   MachineBasicBlock &EHPadMBB = getOrCreateBB(*EHPadBB),
586                     &ReturnMBB = getOrCreateBB(*ReturnBB);
587   MMI.addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
588   MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
589   MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
590 
591   return true;
592 }
593 
594 bool IRTranslator::translateLandingPad(const User &U) {
595   const LandingPadInst &LP = cast<LandingPadInst>(U);
596 
597   MachineBasicBlock &MBB = MIRBuilder.getMBB();
598   MachineFunction &MF = MIRBuilder.getMF();
599   MachineModuleInfo &MMI = MF.getMMI();
600   addLandingPadInfo(LP, MMI, MBB);
601 
602   MBB.setIsEHPad();
603 
604   // If there aren't registers to copy the values into (e.g., during SjLj
605   // exceptions), then don't bother.
606   auto &TLI = *MF.getSubtarget().getTargetLowering();
607   const Constant *PersonalityFn = MF.getFunction()->getPersonalityFn();
608   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
609       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
610     return true;
611 
612   // If landingpad's return type is token type, we don't create DAG nodes
613   // for its exception pointer and selector value. The extraction of exception
614   // pointer or selector value from token type landingpads is not currently
615   // supported.
616   if (LP.getType()->isTokenTy())
617     return true;
618 
619   // Add a label to mark the beginning of the landing pad.  Deletion of the
620   // landing pad can thus be detected via the MachineModuleInfo.
621   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
622     .addSym(MMI.addLandingPad(&MBB));
623 
624   // Mark exception register as live in.
625   SmallVector<unsigned, 2> Regs;
626   SmallVector<uint64_t, 2> Offsets;
627   LLT p0 = LLT::pointer(0, DL->getPointerSizeInBits());
628   if (unsigned Reg = TLI.getExceptionPointerRegister(PersonalityFn)) {
629     unsigned VReg = MRI->createGenericVirtualRegister(p0);
630     MIRBuilder.buildCopy(VReg, Reg);
631     Regs.push_back(VReg);
632     Offsets.push_back(0);
633   }
634 
635   if (unsigned Reg = TLI.getExceptionSelectorRegister(PersonalityFn)) {
636     unsigned VReg = MRI->createGenericVirtualRegister(p0);
637     MIRBuilder.buildCopy(VReg, Reg);
638     Regs.push_back(VReg);
639     Offsets.push_back(p0.getSizeInBits());
640   }
641 
642   MIRBuilder.buildSequence(getOrCreateVReg(LP), Regs, Offsets);
643   return true;
644 }
645 
646 bool IRTranslator::translateStaticAlloca(const AllocaInst &AI) {
647   if (!TPC->isGlobalISelAbortEnabled() && !AI.isStaticAlloca())
648     return false;
649 
650   assert(AI.isStaticAlloca() && "only handle static allocas now");
651   unsigned Res = getOrCreateVReg(AI);
652   int FI = getOrCreateFrameIndex(AI);
653   MIRBuilder.buildFrameIndex(Res, FI);
654   return true;
655 }
656 
657 bool IRTranslator::translatePHI(const User &U) {
658   const PHINode &PI = cast<PHINode>(U);
659   auto MIB = MIRBuilder.buildInstr(TargetOpcode::PHI);
660   MIB.addDef(getOrCreateVReg(PI));
661 
662   PendingPHIs.emplace_back(&PI, MIB.getInstr());
663   return true;
664 }
665 
666 void IRTranslator::finishPendingPhis() {
667   for (std::pair<const PHINode *, MachineInstr *> &Phi : PendingPHIs) {
668     const PHINode *PI = Phi.first;
669     MachineInstrBuilder MIB(MIRBuilder.getMF(), Phi.second);
670 
671     // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
672     // won't create extra control flow here, otherwise we need to find the
673     // dominating predecessor here (or perhaps force the weirder IRTranslators
674     // to provide a simple boundary).
675     for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
676       assert(BBToMBB[PI->getIncomingBlock(i)]->isSuccessor(MIB->getParent()) &&
677              "I appear to have misunderstood Machine PHIs");
678       MIB.addUse(getOrCreateVReg(*PI->getIncomingValue(i)));
679       MIB.addMBB(BBToMBB[PI->getIncomingBlock(i)]);
680     }
681   }
682 
683   PendingPHIs.clear();
684 }
685 
686 bool IRTranslator::translate(const Instruction &Inst) {
687   MIRBuilder.setDebugLoc(Inst.getDebugLoc());
688   switch(Inst.getOpcode()) {
689 #define HANDLE_INST(NUM, OPCODE, CLASS) \
690     case Instruction::OPCODE: return translate##OPCODE(Inst);
691 #include "llvm/IR/Instruction.def"
692   default:
693     if (!TPC->isGlobalISelAbortEnabled())
694       return false;
695     llvm_unreachable("unknown opcode");
696   }
697 }
698 
699 bool IRTranslator::translate(const Constant &C, unsigned Reg) {
700   if (auto CI = dyn_cast<ConstantInt>(&C))
701     EntryBuilder.buildConstant(Reg, CI->getZExtValue());
702   else if (auto CF = dyn_cast<ConstantFP>(&C))
703     EntryBuilder.buildFConstant(Reg, *CF);
704   else if (isa<UndefValue>(C))
705     EntryBuilder.buildInstr(TargetOpcode::IMPLICIT_DEF).addDef(Reg);
706   else if (isa<ConstantPointerNull>(C))
707     EntryBuilder.buildInstr(TargetOpcode::G_CONSTANT)
708         .addDef(Reg)
709         .addImm(0);
710   else if (auto GV = dyn_cast<GlobalValue>(&C))
711     EntryBuilder.buildGlobalValue(Reg, GV);
712   else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
713     switch(CE->getOpcode()) {
714 #define HANDLE_INST(NUM, OPCODE, CLASS)                         \
715       case Instruction::OPCODE: return translate##OPCODE(*CE);
716 #include "llvm/IR/Instruction.def"
717     default:
718       if (!TPC->isGlobalISelAbortEnabled())
719         return false;
720       llvm_unreachable("unknown opcode");
721     }
722   } else if (!TPC->isGlobalISelAbortEnabled())
723     return false;
724   else
725     llvm_unreachable("unhandled constant kind");
726 
727   return true;
728 }
729 
730 void IRTranslator::finalizeFunction() {
731   finishPendingPhis();
732 
733   // Release the memory used by the different maps we
734   // needed during the translation.
735   ValToVReg.clear();
736   FrameIndices.clear();
737   Constants.clear();
738 }
739 
740 bool IRTranslator::runOnMachineFunction(MachineFunction &MF) {
741   const Function &F = *MF.getFunction();
742   if (F.empty())
743     return false;
744   CLI = MF.getSubtarget().getCallLowering();
745   MIRBuilder.setMF(MF);
746   EntryBuilder.setMF(MF);
747   MRI = &MF.getRegInfo();
748   DL = &F.getParent()->getDataLayout();
749   TPC = &getAnalysis<TargetPassConfig>();
750 
751   assert(PendingPHIs.empty() && "stale PHIs");
752 
753   // Setup the arguments.
754   MachineBasicBlock &MBB = getOrCreateBB(F.front());
755   MIRBuilder.setMBB(MBB);
756   SmallVector<unsigned, 8> VRegArgs;
757   for (const Argument &Arg: F.args())
758     VRegArgs.push_back(getOrCreateVReg(Arg));
759   bool Succeeded = CLI->lowerFormalArguments(MIRBuilder, F, VRegArgs);
760   if (!Succeeded) {
761     if (!TPC->isGlobalISelAbortEnabled()) {
762       MIRBuilder.getMF().getProperties().set(
763           MachineFunctionProperties::Property::FailedISel);
764       return false;
765     }
766     report_fatal_error("Unable to lower arguments");
767   }
768 
769   // Now that we've got the ABI handling code, it's safe to set a location for
770   // any Constants we find in the IR.
771   if (MBB.empty())
772     EntryBuilder.setMBB(MBB);
773   else
774     EntryBuilder.setInstr(MBB.back(), /* Before */ false);
775 
776   for (const BasicBlock &BB: F) {
777     MachineBasicBlock &MBB = getOrCreateBB(BB);
778     // Set the insertion point of all the following translations to
779     // the end of this basic block.
780     MIRBuilder.setMBB(MBB);
781 
782     for (const Instruction &Inst: BB) {
783       bool Succeeded = translate(Inst);
784       if (!Succeeded) {
785         if (TPC->isGlobalISelAbortEnabled())
786           reportTranslationError(Inst, "unable to translate instruction");
787         MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
788         break;
789       }
790     }
791   }
792 
793   finalizeFunction();
794 
795   // Now that the MachineFrameInfo has been configured, no further changes to
796   // the reserved registers are possible.
797   MRI->freezeReservedRegs(MF);
798 
799   return false;
800 }
801