1 //===-- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator --*- C++ -*-==//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the IRTranslator class.
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
14 
15 #include "llvm/ADT/SmallSet.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
18 #include "llvm/CodeGen/Analysis.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineModuleInfo.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/TargetPassConfig.h"
24 #include "llvm/IR/Constant.h"
25 #include "llvm/IR/DebugInfo.h"
26 #include "llvm/IR/Function.h"
27 #include "llvm/IR/GetElementPtrTypeIterator.h"
28 #include "llvm/IR/IntrinsicInst.h"
29 #include "llvm/IR/Type.h"
30 #include "llvm/IR/Value.h"
31 #include "llvm/Target/TargetIntrinsicInfo.h"
32 #include "llvm/Target/TargetLowering.h"
33 
34 #define DEBUG_TYPE "irtranslator"
35 
36 using namespace llvm;
37 
38 char IRTranslator::ID = 0;
39 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
40                 false, false)
41 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
42 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
43                 false, false)
44 
45 static void reportTranslationError(const Value &V, const Twine &Message) {
46   std::string ErrStorage;
47   raw_string_ostream Err(ErrStorage);
48   Err << Message << ": " << V << '\n';
49   report_fatal_error(Err.str());
50 }
51 
52 IRTranslator::IRTranslator() : MachineFunctionPass(ID), MRI(nullptr) {
53   initializeIRTranslatorPass(*PassRegistry::getPassRegistry());
54 }
55 
56 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
57   AU.addRequired<TargetPassConfig>();
58   MachineFunctionPass::getAnalysisUsage(AU);
59 }
60 
61 
62 unsigned IRTranslator::getOrCreateVReg(const Value &Val) {
63   unsigned &ValReg = ValToVReg[&Val];
64 
65   if (ValReg)
66     return ValReg;
67 
68   // Fill ValRegsSequence with the sequence of registers
69   // we need to concat together to produce the value.
70   assert(Val.getType()->isSized() &&
71          "Don't know how to create an empty vreg");
72   unsigned VReg = MRI->createGenericVirtualRegister(LLT{*Val.getType(), *DL});
73   ValReg = VReg;
74 
75   if (auto CV = dyn_cast<Constant>(&Val)) {
76     bool Success = translate(*CV, VReg);
77     if (!Success) {
78       if (!TPC->isGlobalISelAbortEnabled()) {
79         MF->getProperties().set(
80             MachineFunctionProperties::Property::FailedISel);
81         return VReg;
82       }
83       reportTranslationError(Val, "unable to translate constant");
84     }
85   }
86 
87   return VReg;
88 }
89 
90 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
91   if (FrameIndices.find(&AI) != FrameIndices.end())
92     return FrameIndices[&AI];
93 
94   unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
95   unsigned Size =
96       ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
97 
98   // Always allocate at least one byte.
99   Size = std::max(Size, 1u);
100 
101   unsigned Alignment = AI.getAlignment();
102   if (!Alignment)
103     Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
104 
105   int &FI = FrameIndices[&AI];
106   FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
107   return FI;
108 }
109 
110 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
111   unsigned Alignment = 0;
112   Type *ValTy = nullptr;
113   if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
114     Alignment = SI->getAlignment();
115     ValTy = SI->getValueOperand()->getType();
116   } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
117     Alignment = LI->getAlignment();
118     ValTy = LI->getType();
119   } else if (!TPC->isGlobalISelAbortEnabled()) {
120     MF->getProperties().set(
121         MachineFunctionProperties::Property::FailedISel);
122     return 1;
123   } else
124     llvm_unreachable("unhandled memory instruction");
125 
126   return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
127 }
128 
129 MachineBasicBlock &IRTranslator::getOrCreateBB(const BasicBlock &BB) {
130   MachineBasicBlock *&MBB = BBToMBB[&BB];
131   if (!MBB) {
132     MBB = MF->CreateMachineBasicBlock(&BB);
133     MF->push_back(MBB);
134 
135     if (BB.hasAddressTaken())
136       MBB->setHasAddressTaken();
137   }
138   return *MBB;
139 }
140 
141 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
142   assert(NewPred && "new predecessor must be a real MachineBasicBlock");
143   MachinePreds[Edge].push_back(NewPred);
144 }
145 
146 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
147                                      MachineIRBuilder &MIRBuilder) {
148   // FIXME: handle signed/unsigned wrapping flags.
149 
150   // Get or create a virtual register for each value.
151   // Unless the value is a Constant => loadimm cst?
152   // or inline constant each time?
153   // Creation of a virtual register needs to have a size.
154   unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
155   unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
156   unsigned Res = getOrCreateVReg(U);
157   MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1);
158   return true;
159 }
160 
161 bool IRTranslator::translateCompare(const User &U,
162                                     MachineIRBuilder &MIRBuilder) {
163   const CmpInst *CI = dyn_cast<CmpInst>(&U);
164   unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
165   unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
166   unsigned Res = getOrCreateVReg(U);
167   CmpInst::Predicate Pred =
168       CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
169                                     cast<ConstantExpr>(U).getPredicate());
170 
171   if (CmpInst::isIntPredicate(Pred))
172     MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
173   else
174     MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
175 
176   return true;
177 }
178 
179 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
180   const ReturnInst &RI = cast<ReturnInst>(U);
181   const Value *Ret = RI.getReturnValue();
182   // The target may mess up with the insertion point, but
183   // this is not important as a return is the last instruction
184   // of the block anyway.
185   return CLI->lowerReturn(MIRBuilder, Ret, !Ret ? 0 : getOrCreateVReg(*Ret));
186 }
187 
188 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
189   const BranchInst &BrInst = cast<BranchInst>(U);
190   unsigned Succ = 0;
191   if (!BrInst.isUnconditional()) {
192     // We want a G_BRCOND to the true BB followed by an unconditional branch.
193     unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
194     const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
195     MachineBasicBlock &TrueBB = getOrCreateBB(TrueTgt);
196     MIRBuilder.buildBrCond(Tst, TrueBB);
197   }
198 
199   const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
200   MachineBasicBlock &TgtBB = getOrCreateBB(BrTgt);
201   MIRBuilder.buildBr(TgtBB);
202 
203   // Link successors.
204   MachineBasicBlock &CurBB = MIRBuilder.getMBB();
205   for (const BasicBlock *Succ : BrInst.successors())
206     CurBB.addSuccessor(&getOrCreateBB(*Succ));
207   return true;
208 }
209 
210 bool IRTranslator::translateSwitch(const User &U,
211                                    MachineIRBuilder &MIRBuilder) {
212   // For now, just translate as a chain of conditional branches.
213   // FIXME: could we share most of the logic/code in
214   // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel?
215   // At first sight, it seems most of the logic in there is independent of
216   // SelectionDAG-specifics and a lot of work went in to optimize switch
217   // lowering in there.
218 
219   const SwitchInst &SwInst = cast<SwitchInst>(U);
220   const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition());
221   const BasicBlock *OrigBB = SwInst.getParent();
222 
223   LLT LLTi1 = LLT(*Type::getInt1Ty(U.getContext()), *DL);
224   for (auto &CaseIt : SwInst.cases()) {
225     const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue());
226     const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1);
227     MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue);
228     MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
229     const BasicBlock *TrueBB = CaseIt.getCaseSuccessor();
230     MachineBasicBlock &TrueMBB = getOrCreateBB(*TrueBB);
231 
232     MIRBuilder.buildBrCond(Tst, TrueMBB);
233     CurMBB.addSuccessor(&TrueMBB);
234     addMachineCFGPred({OrigBB, TrueBB}, &CurMBB);
235 
236     MachineBasicBlock *FalseMBB =
237         MF->CreateMachineBasicBlock(SwInst.getParent());
238     MF->push_back(FalseMBB);
239     MIRBuilder.buildBr(*FalseMBB);
240     CurMBB.addSuccessor(FalseMBB);
241 
242     MIRBuilder.setMBB(*FalseMBB);
243   }
244   // handle default case
245   const BasicBlock *DefaultBB = SwInst.getDefaultDest();
246   MachineBasicBlock &DefaultMBB = getOrCreateBB(*DefaultBB);
247   MIRBuilder.buildBr(DefaultMBB);
248   MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
249   CurMBB.addSuccessor(&DefaultMBB);
250   addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB);
251 
252   return true;
253 }
254 
255 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
256   const LoadInst &LI = cast<LoadInst>(U);
257 
258   if (!TPC->isGlobalISelAbortEnabled() && LI.isAtomic())
259     return false;
260 
261   assert(!LI.isAtomic() && "only non-atomic loads are supported at the moment");
262   auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
263                                : MachineMemOperand::MONone;
264   Flags |= MachineMemOperand::MOLoad;
265 
266   unsigned Res = getOrCreateVReg(LI);
267   unsigned Addr = getOrCreateVReg(*LI.getPointerOperand());
268   LLT VTy{*LI.getType(), *DL}, PTy{*LI.getPointerOperand()->getType(), *DL};
269   MIRBuilder.buildLoad(
270       Res, Addr,
271       *MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()),
272                                 Flags, DL->getTypeStoreSize(LI.getType()),
273                                 getMemOpAlignment(LI)));
274   return true;
275 }
276 
277 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
278   const StoreInst &SI = cast<StoreInst>(U);
279 
280   if (!TPC->isGlobalISelAbortEnabled() && SI.isAtomic())
281     return false;
282 
283   assert(!SI.isAtomic() && "only non-atomic stores supported at the moment");
284   auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
285                                : MachineMemOperand::MONone;
286   Flags |= MachineMemOperand::MOStore;
287 
288   unsigned Val = getOrCreateVReg(*SI.getValueOperand());
289   unsigned Addr = getOrCreateVReg(*SI.getPointerOperand());
290   LLT VTy{*SI.getValueOperand()->getType(), *DL},
291       PTy{*SI.getPointerOperand()->getType(), *DL};
292 
293   MIRBuilder.buildStore(
294       Val, Addr,
295       *MF->getMachineMemOperand(
296           MachinePointerInfo(SI.getPointerOperand()), Flags,
297           DL->getTypeStoreSize(SI.getValueOperand()->getType()),
298           getMemOpAlignment(SI)));
299   return true;
300 }
301 
302 bool IRTranslator::translateExtractValue(const User &U,
303                                          MachineIRBuilder &MIRBuilder) {
304   const Value *Src = U.getOperand(0);
305   Type *Int32Ty = Type::getInt32Ty(U.getContext());
306   SmallVector<Value *, 1> Indices;
307 
308   // getIndexedOffsetInType is designed for GEPs, so the first index is the
309   // usual array element rather than looking into the actual aggregate.
310   Indices.push_back(ConstantInt::get(Int32Ty, 0));
311 
312   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
313     for (auto Idx : EVI->indices())
314       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
315   } else {
316     for (unsigned i = 1; i < U.getNumOperands(); ++i)
317       Indices.push_back(U.getOperand(i));
318   }
319 
320   uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
321 
322   unsigned Res = getOrCreateVReg(U);
323   MIRBuilder.buildExtract(Res, Offset, getOrCreateVReg(*Src));
324 
325   return true;
326 }
327 
328 bool IRTranslator::translateInsertValue(const User &U,
329                                         MachineIRBuilder &MIRBuilder) {
330   const Value *Src = U.getOperand(0);
331   Type *Int32Ty = Type::getInt32Ty(U.getContext());
332   SmallVector<Value *, 1> Indices;
333 
334   // getIndexedOffsetInType is designed for GEPs, so the first index is the
335   // usual array element rather than looking into the actual aggregate.
336   Indices.push_back(ConstantInt::get(Int32Ty, 0));
337 
338   if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
339     for (auto Idx : IVI->indices())
340       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
341   } else {
342     for (unsigned i = 2; i < U.getNumOperands(); ++i)
343       Indices.push_back(U.getOperand(i));
344   }
345 
346   uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
347 
348   unsigned Res = getOrCreateVReg(U);
349   const Value &Inserted = *U.getOperand(1);
350   MIRBuilder.buildInsert(Res, getOrCreateVReg(*Src), getOrCreateVReg(Inserted),
351                          Offset);
352 
353   return true;
354 }
355 
356 bool IRTranslator::translateSelect(const User &U,
357                                    MachineIRBuilder &MIRBuilder) {
358   MIRBuilder.buildSelect(getOrCreateVReg(U), getOrCreateVReg(*U.getOperand(0)),
359                          getOrCreateVReg(*U.getOperand(1)),
360                          getOrCreateVReg(*U.getOperand(2)));
361   return true;
362 }
363 
364 bool IRTranslator::translateBitCast(const User &U,
365                                     MachineIRBuilder &MIRBuilder) {
366   if (LLT{*U.getOperand(0)->getType(), *DL} == LLT{*U.getType(), *DL}) {
367     unsigned &Reg = ValToVReg[&U];
368     if (Reg)
369       MIRBuilder.buildCopy(Reg, getOrCreateVReg(*U.getOperand(0)));
370     else
371       Reg = getOrCreateVReg(*U.getOperand(0));
372     return true;
373   }
374   return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
375 }
376 
377 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
378                                  MachineIRBuilder &MIRBuilder) {
379   unsigned Op = getOrCreateVReg(*U.getOperand(0));
380   unsigned Res = getOrCreateVReg(U);
381   MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op);
382   return true;
383 }
384 
385 bool IRTranslator::translateGetElementPtr(const User &U,
386                                           MachineIRBuilder &MIRBuilder) {
387   // FIXME: support vector GEPs.
388   if (U.getType()->isVectorTy())
389     return false;
390 
391   Value &Op0 = *U.getOperand(0);
392   unsigned BaseReg = getOrCreateVReg(Op0);
393   LLT PtrTy{*Op0.getType(), *DL};
394   unsigned PtrSize = DL->getPointerSizeInBits(PtrTy.getAddressSpace());
395   LLT OffsetTy = LLT::scalar(PtrSize);
396 
397   int64_t Offset = 0;
398   for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
399        GTI != E; ++GTI) {
400     const Value *Idx = GTI.getOperand();
401     if (StructType *StTy = GTI.getStructTypeOrNull()) {
402       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
403       Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
404       continue;
405     } else {
406       uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
407 
408       // If this is a scalar constant or a splat vector of constants,
409       // handle it quickly.
410       if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
411         Offset += ElementSize * CI->getSExtValue();
412         continue;
413       }
414 
415       if (Offset != 0) {
416         unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
417         unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
418         MIRBuilder.buildConstant(OffsetReg, Offset);
419         MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
420 
421         BaseReg = NewBaseReg;
422         Offset = 0;
423       }
424 
425       // N = N + Idx * ElementSize;
426       unsigned ElementSizeReg = MRI->createGenericVirtualRegister(OffsetTy);
427       MIRBuilder.buildConstant(ElementSizeReg, ElementSize);
428 
429       unsigned IdxReg = getOrCreateVReg(*Idx);
430       if (MRI->getType(IdxReg) != OffsetTy) {
431         unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
432         MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
433         IdxReg = NewIdxReg;
434       }
435 
436       unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
437       MIRBuilder.buildMul(OffsetReg, ElementSizeReg, IdxReg);
438 
439       unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
440       MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
441       BaseReg = NewBaseReg;
442     }
443   }
444 
445   if (Offset != 0) {
446     unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
447     MIRBuilder.buildConstant(OffsetReg, Offset);
448     MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg);
449     return true;
450   }
451 
452   MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
453   return true;
454 }
455 
456 bool IRTranslator::translateMemcpy(const CallInst &CI,
457                                    MachineIRBuilder &MIRBuilder) {
458   LLT SizeTy{*CI.getArgOperand(2)->getType(), *DL};
459   if (cast<PointerType>(CI.getArgOperand(0)->getType())->getAddressSpace() !=
460           0 ||
461       cast<PointerType>(CI.getArgOperand(1)->getType())->getAddressSpace() !=
462           0 ||
463       SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
464     return false;
465 
466   SmallVector<CallLowering::ArgInfo, 8> Args;
467   for (int i = 0; i < 3; ++i) {
468     const auto &Arg = CI.getArgOperand(i);
469     Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
470   }
471 
472   MachineOperand Callee = MachineOperand::CreateES("memcpy");
473 
474   return CLI->lowerCall(MIRBuilder, Callee,
475                         CallLowering::ArgInfo(0, CI.getType()), Args);
476 }
477 
478 void IRTranslator::getStackGuard(unsigned DstReg,
479                                  MachineIRBuilder &MIRBuilder) {
480   const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
481   MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
482   auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
483   MIB.addDef(DstReg);
484 
485   auto &TLI = *MF->getSubtarget().getTargetLowering();
486   Value *Global = TLI.getSDagStackGuard(*MF->getFunction()->getParent());
487   if (!Global)
488     return;
489 
490   MachinePointerInfo MPInfo(Global);
491   MachineInstr::mmo_iterator MemRefs = MF->allocateMemRefsArray(1);
492   auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
493                MachineMemOperand::MODereferenceable;
494   *MemRefs =
495       MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
496                                DL->getPointerABIAlignment());
497   MIB.setMemRefs(MemRefs, MemRefs + 1);
498 }
499 
500 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
501                                               MachineIRBuilder &MIRBuilder) {
502   LLT Ty{*CI.getOperand(0)->getType(), *DL};
503   LLT s1 = LLT::scalar(1);
504   unsigned Width = Ty.getSizeInBits();
505   unsigned Res = MRI->createGenericVirtualRegister(Ty);
506   unsigned Overflow = MRI->createGenericVirtualRegister(s1);
507   auto MIB = MIRBuilder.buildInstr(Op)
508                  .addDef(Res)
509                  .addDef(Overflow)
510                  .addUse(getOrCreateVReg(*CI.getOperand(0)))
511                  .addUse(getOrCreateVReg(*CI.getOperand(1)));
512 
513   if (Op == TargetOpcode::G_UADDE || Op == TargetOpcode::G_USUBE) {
514     unsigned Zero = MRI->createGenericVirtualRegister(s1);
515     EntryBuilder.buildConstant(Zero, 0);
516     MIB.addUse(Zero);
517   }
518 
519   MIRBuilder.buildSequence(getOrCreateVReg(CI), Res, 0, Overflow, Width);
520   return true;
521 }
522 
523 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
524                                            MachineIRBuilder &MIRBuilder) {
525   switch (ID) {
526   default:
527     break;
528   case Intrinsic::dbg_declare: {
529     const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
530     assert(DI.getVariable() && "Missing variable");
531 
532     const Value *Address = DI.getAddress();
533     if (!Address || isa<UndefValue>(Address)) {
534       DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
535       return true;
536     }
537 
538     unsigned Reg = getOrCreateVReg(*Address);
539     auto RegDef = MRI->def_instr_begin(Reg);
540     assert(DI.getVariable()->isValidLocationForIntrinsic(
541                MIRBuilder.getDebugLoc()) &&
542            "Expected inlined-at fields to agree");
543 
544     if (RegDef != MRI->def_instr_end() &&
545         RegDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
546       MIRBuilder.buildFIDbgValue(RegDef->getOperand(1).getIndex(),
547                                  DI.getVariable(), DI.getExpression());
548     } else
549       MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
550     return true;
551   }
552   case Intrinsic::dbg_value: {
553     // This form of DBG_VALUE is target-independent.
554     const DbgValueInst &DI = cast<DbgValueInst>(CI);
555     const Value *V = DI.getValue();
556     assert(DI.getVariable()->isValidLocationForIntrinsic(
557                MIRBuilder.getDebugLoc()) &&
558            "Expected inlined-at fields to agree");
559     if (!V) {
560       // Currently the optimizer can produce this; insert an undef to
561       // help debugging.  Probably the optimizer should not do this.
562       MIRBuilder.buildIndirectDbgValue(0, DI.getOffset(), DI.getVariable(),
563                                        DI.getExpression());
564     } else if (const auto *CI = dyn_cast<Constant>(V)) {
565       MIRBuilder.buildConstDbgValue(*CI, DI.getOffset(), DI.getVariable(),
566                                     DI.getExpression());
567     } else {
568       unsigned Reg = getOrCreateVReg(*V);
569       // FIXME: This does not handle register-indirect values at offset 0. The
570       // direct/indirect thing shouldn't really be handled by something as
571       // implicit as reg+noreg vs reg+imm in the first palce, but it seems
572       // pretty baked in right now.
573       if (DI.getOffset() != 0)
574         MIRBuilder.buildIndirectDbgValue(Reg, DI.getOffset(), DI.getVariable(),
575                                          DI.getExpression());
576       else
577         MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(),
578                                        DI.getExpression());
579     }
580     return true;
581   }
582   case Intrinsic::uadd_with_overflow:
583     return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDE, MIRBuilder);
584   case Intrinsic::sadd_with_overflow:
585     return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
586   case Intrinsic::usub_with_overflow:
587     return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBE, MIRBuilder);
588   case Intrinsic::ssub_with_overflow:
589     return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
590   case Intrinsic::umul_with_overflow:
591     return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
592   case Intrinsic::smul_with_overflow:
593     return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
594   case Intrinsic::memcpy:
595     return translateMemcpy(CI, MIRBuilder);
596   case Intrinsic::eh_typeid_for: {
597     GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
598     unsigned Reg = getOrCreateVReg(CI);
599     unsigned TypeID = MF->getTypeIDFor(GV);
600     MIRBuilder.buildConstant(Reg, TypeID);
601     return true;
602   }
603   case Intrinsic::objectsize: {
604     // If we don't know by now, we're never going to know.
605     const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
606 
607     MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
608     return true;
609   }
610   case Intrinsic::stackguard:
611     getStackGuard(getOrCreateVReg(CI), MIRBuilder);
612     return true;
613   case Intrinsic::stackprotector: {
614     LLT PtrTy{*CI.getArgOperand(0)->getType(), *DL};
615     unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
616     getStackGuard(GuardVal, MIRBuilder);
617 
618     AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
619     MIRBuilder.buildStore(
620         GuardVal, getOrCreateVReg(*Slot),
621         *MF->getMachineMemOperand(
622             MachinePointerInfo::getFixedStack(*MF,
623                                               getOrCreateFrameIndex(*Slot)),
624             MachineMemOperand::MOStore | MachineMemOperand::MOVolatile,
625             PtrTy.getSizeInBits() / 8, 8));
626     return true;
627   }
628   }
629   return false;
630 }
631 
632 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
633   const CallInst &CI = cast<CallInst>(U);
634   auto TII = MF->getTarget().getIntrinsicInfo();
635   const Function *F = CI.getCalledFunction();
636 
637   if (CI.isInlineAsm())
638     return false;
639 
640   if (!F || !F->isIntrinsic()) {
641     unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
642     SmallVector<unsigned, 8> Args;
643     for (auto &Arg: CI.arg_operands())
644       Args.push_back(getOrCreateVReg(*Arg));
645 
646     return CLI->lowerCall(MIRBuilder, CI, Res, Args, [&]() {
647       return getOrCreateVReg(*CI.getCalledValue());
648     });
649   }
650 
651   Intrinsic::ID ID = F->getIntrinsicID();
652   if (TII && ID == Intrinsic::not_intrinsic)
653     ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
654 
655   assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
656 
657   if (translateKnownIntrinsic(CI, ID, MIRBuilder))
658     return true;
659 
660   unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
661   MachineInstrBuilder MIB =
662       MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory());
663 
664   for (auto &Arg : CI.arg_operands()) {
665     if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg))
666       MIB.addImm(CI->getSExtValue());
667     else
668       MIB.addUse(getOrCreateVReg(*Arg));
669   }
670   return true;
671 }
672 
673 bool IRTranslator::translateInvoke(const User &U,
674                                    MachineIRBuilder &MIRBuilder) {
675   const InvokeInst &I = cast<InvokeInst>(U);
676   MCContext &Context = MF->getContext();
677 
678   const BasicBlock *ReturnBB = I.getSuccessor(0);
679   const BasicBlock *EHPadBB = I.getSuccessor(1);
680 
681   const Value *Callee(I.getCalledValue());
682   const Function *Fn = dyn_cast<Function>(Callee);
683   if (isa<InlineAsm>(Callee))
684     return false;
685 
686   // FIXME: support invoking patchpoint and statepoint intrinsics.
687   if (Fn && Fn->isIntrinsic())
688     return false;
689 
690   // FIXME: support whatever these are.
691   if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
692     return false;
693 
694   // FIXME: support Windows exception handling.
695   if (!isa<LandingPadInst>(EHPadBB->front()))
696     return false;
697 
698 
699   // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
700   // the region covered by the try.
701   MCSymbol *BeginSymbol = Context.createTempSymbol();
702   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
703 
704   unsigned Res = I.getType()->isVoidTy() ? 0 : getOrCreateVReg(I);
705   SmallVector<CallLowering::ArgInfo, 8> Args;
706   for (auto &Arg: I.arg_operands())
707     Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
708 
709   if (!CLI->lowerCall(MIRBuilder, MachineOperand::CreateGA(Fn, 0),
710                       CallLowering::ArgInfo(Res, I.getType()), Args))
711     return false;
712 
713   MCSymbol *EndSymbol = Context.createTempSymbol();
714   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
715 
716   // FIXME: track probabilities.
717   MachineBasicBlock &EHPadMBB = getOrCreateBB(*EHPadBB),
718                     &ReturnMBB = getOrCreateBB(*ReturnBB);
719   MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
720   MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
721   MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
722 
723   return true;
724 }
725 
726 bool IRTranslator::translateLandingPad(const User &U,
727                                        MachineIRBuilder &MIRBuilder) {
728   const LandingPadInst &LP = cast<LandingPadInst>(U);
729 
730   MachineBasicBlock &MBB = MIRBuilder.getMBB();
731   addLandingPadInfo(LP, MBB);
732 
733   MBB.setIsEHPad();
734 
735   // If there aren't registers to copy the values into (e.g., during SjLj
736   // exceptions), then don't bother.
737   auto &TLI = *MF->getSubtarget().getTargetLowering();
738   const Constant *PersonalityFn = MF->getFunction()->getPersonalityFn();
739   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
740       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
741     return true;
742 
743   // If landingpad's return type is token type, we don't create DAG nodes
744   // for its exception pointer and selector value. The extraction of exception
745   // pointer or selector value from token type landingpads is not currently
746   // supported.
747   if (LP.getType()->isTokenTy())
748     return true;
749 
750   // Add a label to mark the beginning of the landing pad.  Deletion of the
751   // landing pad can thus be detected via the MachineModuleInfo.
752   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
753     .addSym(MF->addLandingPad(&MBB));
754 
755   SmallVector<LLT, 2> Tys;
756   for (Type *Ty : cast<StructType>(LP.getType())->elements())
757     Tys.push_back(LLT{*Ty, *DL});
758   assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
759 
760   // Mark exception register as live in.
761   SmallVector<unsigned, 2> Regs;
762   SmallVector<uint64_t, 2> Offsets;
763   if (unsigned Reg = TLI.getExceptionPointerRegister(PersonalityFn)) {
764     MBB.addLiveIn(Reg);
765     unsigned VReg = MRI->createGenericVirtualRegister(Tys[0]);
766     MIRBuilder.buildCopy(VReg, Reg);
767     Regs.push_back(VReg);
768     Offsets.push_back(0);
769   }
770 
771   if (unsigned Reg = TLI.getExceptionSelectorRegister(PersonalityFn)) {
772     MBB.addLiveIn(Reg);
773     unsigned VReg = MRI->createGenericVirtualRegister(Tys[1]);
774     MIRBuilder.buildCopy(VReg, Reg);
775     Regs.push_back(VReg);
776     Offsets.push_back(Tys[0].getSizeInBits());
777   }
778 
779   MIRBuilder.buildSequence(getOrCreateVReg(LP), Regs, Offsets);
780   return true;
781 }
782 
783 bool IRTranslator::translateStaticAlloca(const AllocaInst &AI,
784                                          MachineIRBuilder &MIRBuilder) {
785   if (!TPC->isGlobalISelAbortEnabled() && !AI.isStaticAlloca())
786     return false;
787 
788   assert(AI.isStaticAlloca() && "only handle static allocas now");
789   unsigned Res = getOrCreateVReg(AI);
790   int FI = getOrCreateFrameIndex(AI);
791   MIRBuilder.buildFrameIndex(Res, FI);
792   return true;
793 }
794 
795 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
796   const PHINode &PI = cast<PHINode>(U);
797   auto MIB = MIRBuilder.buildInstr(TargetOpcode::PHI);
798   MIB.addDef(getOrCreateVReg(PI));
799 
800   PendingPHIs.emplace_back(&PI, MIB.getInstr());
801   return true;
802 }
803 
804 void IRTranslator::finishPendingPhis() {
805   for (std::pair<const PHINode *, MachineInstr *> &Phi : PendingPHIs) {
806     const PHINode *PI = Phi.first;
807     MachineInstrBuilder MIB(*MF, Phi.second);
808 
809     // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
810     // won't create extra control flow here, otherwise we need to find the
811     // dominating predecessor here (or perhaps force the weirder IRTranslators
812     // to provide a simple boundary).
813     SmallSet<const BasicBlock *, 4> HandledPreds;
814 
815     for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
816       auto IRPred = PI->getIncomingBlock(i);
817       if (HandledPreds.count(IRPred))
818         continue;
819 
820       HandledPreds.insert(IRPred);
821       unsigned ValReg = getOrCreateVReg(*PI->getIncomingValue(i));
822       for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
823         assert(Pred->isSuccessor(MIB->getParent()) &&
824                "incorrect CFG at MachineBasicBlock level");
825         MIB.addUse(ValReg);
826         MIB.addMBB(Pred);
827       }
828     }
829   }
830 }
831 
832 bool IRTranslator::translate(const Instruction &Inst) {
833   CurBuilder.setDebugLoc(Inst.getDebugLoc());
834   switch(Inst.getOpcode()) {
835 #define HANDLE_INST(NUM, OPCODE, CLASS) \
836     case Instruction::OPCODE: return translate##OPCODE(Inst, CurBuilder);
837 #include "llvm/IR/Instruction.def"
838   default:
839     if (!TPC->isGlobalISelAbortEnabled())
840       return false;
841     llvm_unreachable("unknown opcode");
842   }
843 }
844 
845 bool IRTranslator::translate(const Constant &C, unsigned Reg) {
846   if (auto CI = dyn_cast<ConstantInt>(&C))
847     EntryBuilder.buildConstant(Reg, *CI);
848   else if (auto CF = dyn_cast<ConstantFP>(&C))
849     EntryBuilder.buildFConstant(Reg, *CF);
850   else if (isa<UndefValue>(C))
851     EntryBuilder.buildInstr(TargetOpcode::IMPLICIT_DEF).addDef(Reg);
852   else if (isa<ConstantPointerNull>(C))
853     EntryBuilder.buildConstant(Reg, 0);
854   else if (auto GV = dyn_cast<GlobalValue>(&C))
855     EntryBuilder.buildGlobalValue(Reg, GV);
856   else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
857     switch(CE->getOpcode()) {
858 #define HANDLE_INST(NUM, OPCODE, CLASS)                         \
859       case Instruction::OPCODE: return translate##OPCODE(*CE, EntryBuilder);
860 #include "llvm/IR/Instruction.def"
861     default:
862       if (!TPC->isGlobalISelAbortEnabled())
863         return false;
864       llvm_unreachable("unknown opcode");
865     }
866   } else if (!TPC->isGlobalISelAbortEnabled())
867     return false;
868   else
869     llvm_unreachable("unhandled constant kind");
870 
871   return true;
872 }
873 
874 void IRTranslator::finalizeFunction() {
875   // Release the memory used by the different maps we
876   // needed during the translation.
877   PendingPHIs.clear();
878   ValToVReg.clear();
879   FrameIndices.clear();
880   Constants.clear();
881   MachinePreds.clear();
882 }
883 
884 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
885   MF = &CurMF;
886   const Function &F = *MF->getFunction();
887   if (F.empty())
888     return false;
889   CLI = MF->getSubtarget().getCallLowering();
890   CurBuilder.setMF(*MF);
891   EntryBuilder.setMF(*MF);
892   MRI = &MF->getRegInfo();
893   DL = &F.getParent()->getDataLayout();
894   TPC = &getAnalysis<TargetPassConfig>();
895 
896   assert(PendingPHIs.empty() && "stale PHIs");
897 
898   // Setup a separate basic-block for the arguments and constants, falling
899   // through to the IR-level Function's entry block.
900   MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
901   MF->push_back(EntryBB);
902   EntryBB->addSuccessor(&getOrCreateBB(F.front()));
903   EntryBuilder.setMBB(*EntryBB);
904 
905   // Lower the actual args into this basic block.
906   SmallVector<unsigned, 8> VRegArgs;
907   for (const Argument &Arg: F.args())
908     VRegArgs.push_back(getOrCreateVReg(Arg));
909   bool Succeeded = CLI->lowerFormalArguments(EntryBuilder, F, VRegArgs);
910   if (!Succeeded) {
911     if (!TPC->isGlobalISelAbortEnabled()) {
912       MF->getProperties().set(
913           MachineFunctionProperties::Property::FailedISel);
914       finalizeFunction();
915       return false;
916     }
917     report_fatal_error("Unable to lower arguments");
918   }
919 
920   // And translate the function!
921   for (const BasicBlock &BB: F) {
922     MachineBasicBlock &MBB = getOrCreateBB(BB);
923     // Set the insertion point of all the following translations to
924     // the end of this basic block.
925     CurBuilder.setMBB(MBB);
926 
927     for (const Instruction &Inst: BB) {
928       Succeeded &= translate(Inst);
929       if (!Succeeded) {
930         if (TPC->isGlobalISelAbortEnabled())
931           reportTranslationError(Inst, "unable to translate instruction");
932         MF->getProperties().set(
933             MachineFunctionProperties::Property::FailedISel);
934         break;
935       }
936     }
937   }
938 
939   if (Succeeded) {
940     finishPendingPhis();
941 
942     // Now that the MachineFrameInfo has been configured, no further changes to
943     // the reserved registers are possible.
944     MRI->freezeReservedRegs(*MF);
945 
946     // Merge the argument lowering and constants block with its single
947     // successor, the LLVM-IR entry block.  We want the basic block to
948     // be maximal.
949     assert(EntryBB->succ_size() == 1 &&
950            "Custom BB used for lowering should have only one successor");
951     // Get the successor of the current entry block.
952     MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
953     assert(NewEntryBB.pred_size() == 1 &&
954            "LLVM-IR entry block has a predecessor!?");
955     // Move all the instruction from the current entry block to the
956     // new entry block.
957     NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
958                       EntryBB->end());
959 
960     // Update the live-in information for the new entry block.
961     for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
962       NewEntryBB.addLiveIn(LiveIn);
963     NewEntryBB.sortUniqueLiveIns();
964 
965     // Get rid of the now empty basic block.
966     EntryBB->removeSuccessor(&NewEntryBB);
967     MF->remove(EntryBB);
968     MF->DeleteMachineBasicBlock(EntryBB);
969 
970     assert(&MF->front() == &NewEntryBB &&
971            "New entry wasn't next in the list of basic block!");
972   }
973 
974   finalizeFunction();
975 
976   return false;
977 }
978