1 //===-- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator --*- C++ -*-==//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the IRTranslator class.
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
14 
15 #include "llvm/ADT/SmallSet.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
18 #include "llvm/CodeGen/Analysis.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineModuleInfo.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/TargetPassConfig.h"
24 #include "llvm/IR/Constant.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/GetElementPtrTypeIterator.h"
27 #include "llvm/IR/IntrinsicInst.h"
28 #include "llvm/IR/Type.h"
29 #include "llvm/IR/Value.h"
30 #include "llvm/Target/TargetIntrinsicInfo.h"
31 #include "llvm/Target/TargetLowering.h"
32 
33 #define DEBUG_TYPE "irtranslator"
34 
35 using namespace llvm;
36 
37 char IRTranslator::ID = 0;
38 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
39                 false, false)
40 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
41 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
42                 false, false)
43 
44 static void reportTranslationError(const Value &V, const Twine &Message) {
45   std::string ErrStorage;
46   raw_string_ostream Err(ErrStorage);
47   Err << Message << ": " << V << '\n';
48   report_fatal_error(Err.str());
49 }
50 
51 IRTranslator::IRTranslator() : MachineFunctionPass(ID), MRI(nullptr) {
52   initializeIRTranslatorPass(*PassRegistry::getPassRegistry());
53 }
54 
55 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
56   AU.addRequired<TargetPassConfig>();
57   MachineFunctionPass::getAnalysisUsage(AU);
58 }
59 
60 
61 unsigned IRTranslator::getOrCreateVReg(const Value &Val) {
62   unsigned &ValReg = ValToVReg[&Val];
63   // Check if this is the first time we see Val.
64   if (!ValReg) {
65     // Fill ValRegsSequence with the sequence of registers
66     // we need to concat together to produce the value.
67     assert(Val.getType()->isSized() &&
68            "Don't know how to create an empty vreg");
69     unsigned VReg = MRI->createGenericVirtualRegister(LLT{*Val.getType(), *DL});
70     ValReg = VReg;
71 
72     if (auto CV = dyn_cast<Constant>(&Val)) {
73       bool Success = translate(*CV, VReg);
74       if (!Success) {
75         if (!TPC->isGlobalISelAbortEnabled()) {
76           MF->getProperties().set(
77               MachineFunctionProperties::Property::FailedISel);
78           return VReg;
79         }
80         reportTranslationError(Val, "unable to translate constant");
81       }
82     }
83   }
84 
85   // Look Val up again in case the reference has been invalidated since.
86   return ValToVReg[&Val];
87 }
88 
89 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
90   if (FrameIndices.find(&AI) != FrameIndices.end())
91     return FrameIndices[&AI];
92 
93   unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
94   unsigned Size =
95       ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
96 
97   // Always allocate at least one byte.
98   Size = std::max(Size, 1u);
99 
100   unsigned Alignment = AI.getAlignment();
101   if (!Alignment)
102     Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
103 
104   int &FI = FrameIndices[&AI];
105   FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
106   return FI;
107 }
108 
109 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
110   unsigned Alignment = 0;
111   Type *ValTy = nullptr;
112   if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
113     Alignment = SI->getAlignment();
114     ValTy = SI->getValueOperand()->getType();
115   } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
116     Alignment = LI->getAlignment();
117     ValTy = LI->getType();
118   } else if (!TPC->isGlobalISelAbortEnabled()) {
119     MF->getProperties().set(
120         MachineFunctionProperties::Property::FailedISel);
121     return 1;
122   } else
123     llvm_unreachable("unhandled memory instruction");
124 
125   return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
126 }
127 
128 MachineBasicBlock &IRTranslator::getOrCreateBB(const BasicBlock &BB) {
129   MachineBasicBlock *&MBB = BBToMBB[&BB];
130   if (!MBB) {
131     MBB = MF->CreateMachineBasicBlock(&BB);
132     MF->push_back(MBB);
133 
134     if (BB.hasAddressTaken())
135       MBB->setHasAddressTaken();
136   }
137   return *MBB;
138 }
139 
140 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
141   assert(NewPred && "new predecessor must be a real MachineBasicBlock");
142   MachinePreds[Edge].push_back(NewPred);
143 }
144 
145 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
146                                      MachineIRBuilder &MIRBuilder) {
147   // FIXME: handle signed/unsigned wrapping flags.
148 
149   // Get or create a virtual register for each value.
150   // Unless the value is a Constant => loadimm cst?
151   // or inline constant each time?
152   // Creation of a virtual register needs to have a size.
153   unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
154   unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
155   unsigned Res = getOrCreateVReg(U);
156   MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1);
157   return true;
158 }
159 
160 bool IRTranslator::translateCompare(const User &U,
161                                     MachineIRBuilder &MIRBuilder) {
162   const CmpInst *CI = dyn_cast<CmpInst>(&U);
163   unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
164   unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
165   unsigned Res = getOrCreateVReg(U);
166   CmpInst::Predicate Pred =
167       CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
168                                     cast<ConstantExpr>(U).getPredicate());
169 
170   if (CmpInst::isIntPredicate(Pred))
171     MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
172   else
173     MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
174 
175   return true;
176 }
177 
178 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
179   const ReturnInst &RI = cast<ReturnInst>(U);
180   const Value *Ret = RI.getReturnValue();
181   // The target may mess up with the insertion point, but
182   // this is not important as a return is the last instruction
183   // of the block anyway.
184   return CLI->lowerReturn(MIRBuilder, Ret, !Ret ? 0 : getOrCreateVReg(*Ret));
185 }
186 
187 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
188   const BranchInst &BrInst = cast<BranchInst>(U);
189   unsigned Succ = 0;
190   if (!BrInst.isUnconditional()) {
191     // We want a G_BRCOND to the true BB followed by an unconditional branch.
192     unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
193     const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
194     MachineBasicBlock &TrueBB = getOrCreateBB(TrueTgt);
195     MIRBuilder.buildBrCond(Tst, TrueBB);
196   }
197 
198   const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
199   MachineBasicBlock &TgtBB = getOrCreateBB(BrTgt);
200   MIRBuilder.buildBr(TgtBB);
201 
202   // Link successors.
203   MachineBasicBlock &CurBB = MIRBuilder.getMBB();
204   for (const BasicBlock *Succ : BrInst.successors())
205     CurBB.addSuccessor(&getOrCreateBB(*Succ));
206   return true;
207 }
208 
209 bool IRTranslator::translateSwitch(const User &U,
210                                    MachineIRBuilder &MIRBuilder) {
211   // For now, just translate as a chain of conditional branches.
212   // FIXME: could we share most of the logic/code in
213   // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel?
214   // At first sight, it seems most of the logic in there is independent of
215   // SelectionDAG-specifics and a lot of work went in to optimize switch
216   // lowering in there.
217 
218   const SwitchInst &SwInst = cast<SwitchInst>(U);
219   const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition());
220   const BasicBlock *OrigBB = SwInst.getParent();
221 
222   LLT LLTi1 = LLT(*Type::getInt1Ty(U.getContext()), *DL);
223   for (auto &CaseIt : SwInst.cases()) {
224     const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue());
225     const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1);
226     MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue);
227     MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
228     const BasicBlock *TrueBB = CaseIt.getCaseSuccessor();
229     MachineBasicBlock &TrueMBB = getOrCreateBB(*TrueBB);
230 
231     MIRBuilder.buildBrCond(Tst, TrueMBB);
232     CurMBB.addSuccessor(&TrueMBB);
233     addMachineCFGPred({OrigBB, TrueBB}, &CurMBB);
234 
235     MachineBasicBlock *FalseMBB =
236         MF->CreateMachineBasicBlock(SwInst.getParent());
237     MF->push_back(FalseMBB);
238     MIRBuilder.buildBr(*FalseMBB);
239     CurMBB.addSuccessor(FalseMBB);
240 
241     MIRBuilder.setMBB(*FalseMBB);
242   }
243   // handle default case
244   const BasicBlock *DefaultBB = SwInst.getDefaultDest();
245   MachineBasicBlock &DefaultMBB = getOrCreateBB(*DefaultBB);
246   MIRBuilder.buildBr(DefaultMBB);
247   MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
248   CurMBB.addSuccessor(&DefaultMBB);
249   addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB);
250 
251   return true;
252 }
253 
254 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
255   const LoadInst &LI = cast<LoadInst>(U);
256 
257   if (!TPC->isGlobalISelAbortEnabled() && LI.isAtomic())
258     return false;
259 
260   assert(!LI.isAtomic() && "only non-atomic loads are supported at the moment");
261   auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
262                                : MachineMemOperand::MONone;
263   Flags |= MachineMemOperand::MOLoad;
264 
265   unsigned Res = getOrCreateVReg(LI);
266   unsigned Addr = getOrCreateVReg(*LI.getPointerOperand());
267   LLT VTy{*LI.getType(), *DL}, PTy{*LI.getPointerOperand()->getType(), *DL};
268   MIRBuilder.buildLoad(
269       Res, Addr,
270       *MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()),
271                                 Flags, DL->getTypeStoreSize(LI.getType()),
272                                 getMemOpAlignment(LI)));
273   return true;
274 }
275 
276 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
277   const StoreInst &SI = cast<StoreInst>(U);
278 
279   if (!TPC->isGlobalISelAbortEnabled() && SI.isAtomic())
280     return false;
281 
282   assert(!SI.isAtomic() && "only non-atomic stores supported at the moment");
283   auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
284                                : MachineMemOperand::MONone;
285   Flags |= MachineMemOperand::MOStore;
286 
287   unsigned Val = getOrCreateVReg(*SI.getValueOperand());
288   unsigned Addr = getOrCreateVReg(*SI.getPointerOperand());
289   LLT VTy{*SI.getValueOperand()->getType(), *DL},
290       PTy{*SI.getPointerOperand()->getType(), *DL};
291 
292   MIRBuilder.buildStore(
293       Val, Addr,
294       *MF->getMachineMemOperand(
295           MachinePointerInfo(SI.getPointerOperand()), Flags,
296           DL->getTypeStoreSize(SI.getValueOperand()->getType()),
297           getMemOpAlignment(SI)));
298   return true;
299 }
300 
301 bool IRTranslator::translateExtractValue(const User &U,
302                                          MachineIRBuilder &MIRBuilder) {
303   const Value *Src = U.getOperand(0);
304   Type *Int32Ty = Type::getInt32Ty(U.getContext());
305   SmallVector<Value *, 1> Indices;
306 
307   // getIndexedOffsetInType is designed for GEPs, so the first index is the
308   // usual array element rather than looking into the actual aggregate.
309   Indices.push_back(ConstantInt::get(Int32Ty, 0));
310 
311   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
312     for (auto Idx : EVI->indices())
313       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
314   } else {
315     for (unsigned i = 1; i < U.getNumOperands(); ++i)
316       Indices.push_back(U.getOperand(i));
317   }
318 
319   uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
320 
321   unsigned Res = getOrCreateVReg(U);
322   MIRBuilder.buildExtract(Res, Offset, getOrCreateVReg(*Src));
323 
324   return true;
325 }
326 
327 bool IRTranslator::translateInsertValue(const User &U,
328                                         MachineIRBuilder &MIRBuilder) {
329   const Value *Src = U.getOperand(0);
330   Type *Int32Ty = Type::getInt32Ty(U.getContext());
331   SmallVector<Value *, 1> Indices;
332 
333   // getIndexedOffsetInType is designed for GEPs, so the first index is the
334   // usual array element rather than looking into the actual aggregate.
335   Indices.push_back(ConstantInt::get(Int32Ty, 0));
336 
337   if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
338     for (auto Idx : IVI->indices())
339       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
340   } else {
341     for (unsigned i = 2; i < U.getNumOperands(); ++i)
342       Indices.push_back(U.getOperand(i));
343   }
344 
345   uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
346 
347   unsigned Res = getOrCreateVReg(U);
348   const Value &Inserted = *U.getOperand(1);
349   MIRBuilder.buildInsert(Res, getOrCreateVReg(*Src), getOrCreateVReg(Inserted),
350                          Offset);
351 
352   return true;
353 }
354 
355 bool IRTranslator::translateSelect(const User &U,
356                                    MachineIRBuilder &MIRBuilder) {
357   MIRBuilder.buildSelect(getOrCreateVReg(U), getOrCreateVReg(*U.getOperand(0)),
358                          getOrCreateVReg(*U.getOperand(1)),
359                          getOrCreateVReg(*U.getOperand(2)));
360   return true;
361 }
362 
363 bool IRTranslator::translateBitCast(const User &U,
364                                     MachineIRBuilder &MIRBuilder) {
365   if (LLT{*U.getOperand(0)->getType(), *DL} == LLT{*U.getType(), *DL}) {
366     unsigned &Reg = ValToVReg[&U];
367     if (Reg)
368       MIRBuilder.buildCopy(Reg, getOrCreateVReg(*U.getOperand(0)));
369     else
370       Reg = getOrCreateVReg(*U.getOperand(0));
371     return true;
372   }
373   return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
374 }
375 
376 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
377                                  MachineIRBuilder &MIRBuilder) {
378   unsigned Op = getOrCreateVReg(*U.getOperand(0));
379   unsigned Res = getOrCreateVReg(U);
380   MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op);
381   return true;
382 }
383 
384 bool IRTranslator::translateGetElementPtr(const User &U,
385                                           MachineIRBuilder &MIRBuilder) {
386   // FIXME: support vector GEPs.
387   if (U.getType()->isVectorTy())
388     return false;
389 
390   Value &Op0 = *U.getOperand(0);
391   unsigned BaseReg = getOrCreateVReg(Op0);
392   LLT PtrTy{*Op0.getType(), *DL};
393   unsigned PtrSize = DL->getPointerSizeInBits(PtrTy.getAddressSpace());
394   LLT OffsetTy = LLT::scalar(PtrSize);
395 
396   int64_t Offset = 0;
397   for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
398        GTI != E; ++GTI) {
399     const Value *Idx = GTI.getOperand();
400     if (StructType *StTy = GTI.getStructTypeOrNull()) {
401       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
402       Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
403       continue;
404     } else {
405       uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
406 
407       // If this is a scalar constant or a splat vector of constants,
408       // handle it quickly.
409       if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
410         Offset += ElementSize * CI->getSExtValue();
411         continue;
412       }
413 
414       if (Offset != 0) {
415         unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
416         unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
417         MIRBuilder.buildConstant(OffsetReg, Offset);
418         MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
419 
420         BaseReg = NewBaseReg;
421         Offset = 0;
422       }
423 
424       // N = N + Idx * ElementSize;
425       unsigned ElementSizeReg = MRI->createGenericVirtualRegister(OffsetTy);
426       MIRBuilder.buildConstant(ElementSizeReg, ElementSize);
427 
428       unsigned IdxReg = getOrCreateVReg(*Idx);
429       if (MRI->getType(IdxReg) != OffsetTy) {
430         unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
431         MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
432         IdxReg = NewIdxReg;
433       }
434 
435       unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
436       MIRBuilder.buildMul(OffsetReg, ElementSizeReg, IdxReg);
437 
438       unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
439       MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
440       BaseReg = NewBaseReg;
441     }
442   }
443 
444   if (Offset != 0) {
445     unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
446     MIRBuilder.buildConstant(OffsetReg, Offset);
447     MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg);
448     return true;
449   }
450 
451   MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
452   return true;
453 }
454 
455 bool IRTranslator::translateMemcpy(const CallInst &CI,
456                                    MachineIRBuilder &MIRBuilder) {
457   LLT SizeTy{*CI.getArgOperand(2)->getType(), *DL};
458   if (cast<PointerType>(CI.getArgOperand(0)->getType())->getAddressSpace() !=
459           0 ||
460       cast<PointerType>(CI.getArgOperand(1)->getType())->getAddressSpace() !=
461           0 ||
462       SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
463     return false;
464 
465   SmallVector<CallLowering::ArgInfo, 8> Args;
466   for (int i = 0; i < 3; ++i) {
467     const auto &Arg = CI.getArgOperand(i);
468     Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
469   }
470 
471   MachineOperand Callee = MachineOperand::CreateES("memcpy");
472 
473   return CLI->lowerCall(MIRBuilder, Callee,
474                         CallLowering::ArgInfo(0, CI.getType()), Args);
475 }
476 
477 void IRTranslator::getStackGuard(unsigned DstReg,
478                                  MachineIRBuilder &MIRBuilder) {
479   auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
480   MIB.addDef(DstReg);
481 
482   auto &TLI = *MF->getSubtarget().getTargetLowering();
483   Value *Global = TLI.getSDagStackGuard(*MF->getFunction()->getParent());
484   if (!Global)
485     return;
486 
487   MachinePointerInfo MPInfo(Global);
488   MachineInstr::mmo_iterator MemRefs = MF->allocateMemRefsArray(1);
489   auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
490                MachineMemOperand::MODereferenceable;
491   *MemRefs =
492       MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
493                                DL->getPointerABIAlignment());
494   MIB.setMemRefs(MemRefs, MemRefs + 1);
495 }
496 
497 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
498                                               MachineIRBuilder &MIRBuilder) {
499   LLT Ty{*CI.getOperand(0)->getType(), *DL};
500   LLT s1 = LLT::scalar(1);
501   unsigned Width = Ty.getSizeInBits();
502   unsigned Res = MRI->createGenericVirtualRegister(Ty);
503   unsigned Overflow = MRI->createGenericVirtualRegister(s1);
504   auto MIB = MIRBuilder.buildInstr(Op)
505                  .addDef(Res)
506                  .addDef(Overflow)
507                  .addUse(getOrCreateVReg(*CI.getOperand(0)))
508                  .addUse(getOrCreateVReg(*CI.getOperand(1)));
509 
510   if (Op == TargetOpcode::G_UADDE || Op == TargetOpcode::G_USUBE) {
511     unsigned Zero = MRI->createGenericVirtualRegister(s1);
512     EntryBuilder.buildConstant(Zero, 0);
513     MIB.addUse(Zero);
514   }
515 
516   MIRBuilder.buildSequence(getOrCreateVReg(CI), Res, 0, Overflow, Width);
517   return true;
518 }
519 
520 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
521                                            MachineIRBuilder &MIRBuilder) {
522   switch (ID) {
523   default:
524     break;
525   case Intrinsic::dbg_declare:
526   case Intrinsic::dbg_value:
527     // FIXME: these obviously need to be supported properly.
528     if (!TPC->isGlobalISelAbortEnabled())
529       MF->getProperties().set(MachineFunctionProperties::Property::FailedISel);
530     return true;
531   case Intrinsic::uadd_with_overflow:
532     return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDE, MIRBuilder);
533   case Intrinsic::sadd_with_overflow:
534     return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
535   case Intrinsic::usub_with_overflow:
536     return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBE, MIRBuilder);
537   case Intrinsic::ssub_with_overflow:
538     return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
539   case Intrinsic::umul_with_overflow:
540     return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
541   case Intrinsic::smul_with_overflow:
542     return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
543   case Intrinsic::memcpy:
544     return translateMemcpy(CI, MIRBuilder);
545   case Intrinsic::eh_typeid_for: {
546     GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
547     unsigned Reg = getOrCreateVReg(CI);
548     unsigned TypeID = MF->getTypeIDFor(GV);
549     MIRBuilder.buildConstant(Reg, TypeID);
550     return true;
551   }
552   case Intrinsic::objectsize: {
553     // If we don't know by now, we're never going to know.
554     const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
555 
556     MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
557     return true;
558   }
559   case Intrinsic::stackguard:
560     getStackGuard(getOrCreateVReg(CI), MIRBuilder);
561     return true;
562   case Intrinsic::stackprotector: {
563     LLT PtrTy{*CI.getArgOperand(0)->getType(), *DL};
564     unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
565     getStackGuard(GuardVal, MIRBuilder);
566 
567     AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
568     MIRBuilder.buildStore(
569         GuardVal, getOrCreateVReg(*Slot),
570         *MF->getMachineMemOperand(
571             MachinePointerInfo::getFixedStack(*MF,
572                                               getOrCreateFrameIndex(*Slot)),
573             MachineMemOperand::MOStore | MachineMemOperand::MOVolatile,
574             PtrTy.getSizeInBits() / 8, 8));
575     return true;
576   }
577   }
578   return false;
579 }
580 
581 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
582   const CallInst &CI = cast<CallInst>(U);
583   auto TII = MF->getTarget().getIntrinsicInfo();
584   const Function *F = CI.getCalledFunction();
585 
586   if (CI.isInlineAsm())
587     return false;
588 
589   if (!F || !F->isIntrinsic()) {
590     unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
591     SmallVector<unsigned, 8> Args;
592     for (auto &Arg: CI.arg_operands())
593       Args.push_back(getOrCreateVReg(*Arg));
594 
595     return CLI->lowerCall(MIRBuilder, CI, Res, Args, [&]() {
596       return getOrCreateVReg(*CI.getCalledValue());
597     });
598   }
599 
600   Intrinsic::ID ID = F->getIntrinsicID();
601   if (TII && ID == Intrinsic::not_intrinsic)
602     ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
603 
604   assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
605 
606   if (translateKnownIntrinsic(CI, ID, MIRBuilder))
607     return true;
608 
609   unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
610   MachineInstrBuilder MIB =
611       MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory());
612 
613   for (auto &Arg : CI.arg_operands()) {
614     if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg))
615       MIB.addImm(CI->getSExtValue());
616     else
617       MIB.addUse(getOrCreateVReg(*Arg));
618   }
619   return true;
620 }
621 
622 bool IRTranslator::translateInvoke(const User &U,
623                                    MachineIRBuilder &MIRBuilder) {
624   const InvokeInst &I = cast<InvokeInst>(U);
625   MCContext &Context = MF->getContext();
626 
627   const BasicBlock *ReturnBB = I.getSuccessor(0);
628   const BasicBlock *EHPadBB = I.getSuccessor(1);
629 
630   const Value *Callee(I.getCalledValue());
631   const Function *Fn = dyn_cast<Function>(Callee);
632   if (isa<InlineAsm>(Callee))
633     return false;
634 
635   // FIXME: support invoking patchpoint and statepoint intrinsics.
636   if (Fn && Fn->isIntrinsic())
637     return false;
638 
639   // FIXME: support whatever these are.
640   if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
641     return false;
642 
643   // FIXME: support Windows exception handling.
644   if (!isa<LandingPadInst>(EHPadBB->front()))
645     return false;
646 
647 
648   // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
649   // the region covered by the try.
650   MCSymbol *BeginSymbol = Context.createTempSymbol();
651   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
652 
653   unsigned Res = I.getType()->isVoidTy() ? 0 : getOrCreateVReg(I);
654   SmallVector<CallLowering::ArgInfo, 8> Args;
655   for (auto &Arg: I.arg_operands())
656     Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
657 
658   if (!CLI->lowerCall(MIRBuilder, MachineOperand::CreateGA(Fn, 0),
659                       CallLowering::ArgInfo(Res, I.getType()), Args))
660     return false;
661 
662   MCSymbol *EndSymbol = Context.createTempSymbol();
663   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
664 
665   // FIXME: track probabilities.
666   MachineBasicBlock &EHPadMBB = getOrCreateBB(*EHPadBB),
667                     &ReturnMBB = getOrCreateBB(*ReturnBB);
668   MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
669   MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
670   MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
671 
672   return true;
673 }
674 
675 bool IRTranslator::translateLandingPad(const User &U,
676                                        MachineIRBuilder &MIRBuilder) {
677   const LandingPadInst &LP = cast<LandingPadInst>(U);
678 
679   MachineBasicBlock &MBB = MIRBuilder.getMBB();
680   addLandingPadInfo(LP, MBB);
681 
682   MBB.setIsEHPad();
683 
684   // If there aren't registers to copy the values into (e.g., during SjLj
685   // exceptions), then don't bother.
686   auto &TLI = *MF->getSubtarget().getTargetLowering();
687   const Constant *PersonalityFn = MF->getFunction()->getPersonalityFn();
688   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
689       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
690     return true;
691 
692   // If landingpad's return type is token type, we don't create DAG nodes
693   // for its exception pointer and selector value. The extraction of exception
694   // pointer or selector value from token type landingpads is not currently
695   // supported.
696   if (LP.getType()->isTokenTy())
697     return true;
698 
699   // Add a label to mark the beginning of the landing pad.  Deletion of the
700   // landing pad can thus be detected via the MachineModuleInfo.
701   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
702     .addSym(MF->addLandingPad(&MBB));
703 
704   // Mark exception register as live in.
705   SmallVector<unsigned, 2> Regs;
706   SmallVector<uint64_t, 2> Offsets;
707   LLT p0 = LLT::pointer(0, DL->getPointerSizeInBits());
708   if (unsigned Reg = TLI.getExceptionPointerRegister(PersonalityFn)) {
709     unsigned VReg = MRI->createGenericVirtualRegister(p0);
710     MIRBuilder.buildCopy(VReg, Reg);
711     Regs.push_back(VReg);
712     Offsets.push_back(0);
713   }
714 
715   if (unsigned Reg = TLI.getExceptionSelectorRegister(PersonalityFn)) {
716     unsigned VReg = MRI->createGenericVirtualRegister(p0);
717     MIRBuilder.buildCopy(VReg, Reg);
718     Regs.push_back(VReg);
719     Offsets.push_back(p0.getSizeInBits());
720   }
721 
722   MIRBuilder.buildSequence(getOrCreateVReg(LP), Regs, Offsets);
723   return true;
724 }
725 
726 bool IRTranslator::translateStaticAlloca(const AllocaInst &AI,
727                                          MachineIRBuilder &MIRBuilder) {
728   if (!TPC->isGlobalISelAbortEnabled() && !AI.isStaticAlloca())
729     return false;
730 
731   assert(AI.isStaticAlloca() && "only handle static allocas now");
732   unsigned Res = getOrCreateVReg(AI);
733   int FI = getOrCreateFrameIndex(AI);
734   MIRBuilder.buildFrameIndex(Res, FI);
735   return true;
736 }
737 
738 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
739   const PHINode &PI = cast<PHINode>(U);
740   auto MIB = MIRBuilder.buildInstr(TargetOpcode::PHI);
741   MIB.addDef(getOrCreateVReg(PI));
742 
743   PendingPHIs.emplace_back(&PI, MIB.getInstr());
744   return true;
745 }
746 
747 void IRTranslator::finishPendingPhis() {
748   for (std::pair<const PHINode *, MachineInstr *> &Phi : PendingPHIs) {
749     const PHINode *PI = Phi.first;
750     MachineInstrBuilder MIB(*MF, Phi.second);
751 
752     // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
753     // won't create extra control flow here, otherwise we need to find the
754     // dominating predecessor here (or perhaps force the weirder IRTranslators
755     // to provide a simple boundary).
756     SmallSet<const BasicBlock *, 4> HandledPreds;
757 
758     for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
759       auto IRPred = PI->getIncomingBlock(i);
760       if (HandledPreds.count(IRPred))
761         continue;
762 
763       HandledPreds.insert(IRPred);
764       unsigned ValReg = getOrCreateVReg(*PI->getIncomingValue(i));
765       for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
766         assert(Pred->isSuccessor(MIB->getParent()) &&
767                "incorrect CFG at MachineBasicBlock level");
768         MIB.addUse(ValReg);
769         MIB.addMBB(Pred);
770       }
771     }
772   }
773 }
774 
775 bool IRTranslator::translate(const Instruction &Inst) {
776   CurBuilder.setDebugLoc(Inst.getDebugLoc());
777   switch(Inst.getOpcode()) {
778 #define HANDLE_INST(NUM, OPCODE, CLASS) \
779     case Instruction::OPCODE: return translate##OPCODE(Inst, CurBuilder);
780 #include "llvm/IR/Instruction.def"
781   default:
782     if (!TPC->isGlobalISelAbortEnabled())
783       return false;
784     llvm_unreachable("unknown opcode");
785   }
786 }
787 
788 bool IRTranslator::translate(const Constant &C, unsigned Reg) {
789   if (auto CI = dyn_cast<ConstantInt>(&C))
790     EntryBuilder.buildConstant(Reg, *CI);
791   else if (auto CF = dyn_cast<ConstantFP>(&C))
792     EntryBuilder.buildFConstant(Reg, *CF);
793   else if (isa<UndefValue>(C))
794     EntryBuilder.buildInstr(TargetOpcode::IMPLICIT_DEF).addDef(Reg);
795   else if (isa<ConstantPointerNull>(C))
796     EntryBuilder.buildConstant(Reg, 0);
797   else if (auto GV = dyn_cast<GlobalValue>(&C))
798     EntryBuilder.buildGlobalValue(Reg, GV);
799   else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
800     switch(CE->getOpcode()) {
801 #define HANDLE_INST(NUM, OPCODE, CLASS)                         \
802       case Instruction::OPCODE: return translate##OPCODE(*CE, EntryBuilder);
803 #include "llvm/IR/Instruction.def"
804     default:
805       if (!TPC->isGlobalISelAbortEnabled())
806         return false;
807       llvm_unreachable("unknown opcode");
808     }
809   } else if (!TPC->isGlobalISelAbortEnabled())
810     return false;
811   else
812     llvm_unreachable("unhandled constant kind");
813 
814   return true;
815 }
816 
817 void IRTranslator::finalizeFunction() {
818   // Release the memory used by the different maps we
819   // needed during the translation.
820   PendingPHIs.clear();
821   ValToVReg.clear();
822   FrameIndices.clear();
823   Constants.clear();
824   MachinePreds.clear();
825 }
826 
827 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
828   MF = &CurMF;
829   const Function &F = *MF->getFunction();
830   if (F.empty())
831     return false;
832   CLI = MF->getSubtarget().getCallLowering();
833   CurBuilder.setMF(*MF);
834   EntryBuilder.setMF(*MF);
835   MRI = &MF->getRegInfo();
836   DL = &F.getParent()->getDataLayout();
837   TPC = &getAnalysis<TargetPassConfig>();
838 
839   assert(PendingPHIs.empty() && "stale PHIs");
840 
841   // Setup a separate basic-block for the arguments and constants, falling
842   // through to the IR-level Function's entry block.
843   MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
844   MF->push_back(EntryBB);
845   EntryBB->addSuccessor(&getOrCreateBB(F.front()));
846   EntryBuilder.setMBB(*EntryBB);
847 
848   // Lower the actual args into this basic block.
849   SmallVector<unsigned, 8> VRegArgs;
850   for (const Argument &Arg: F.args())
851     VRegArgs.push_back(getOrCreateVReg(Arg));
852   bool Succeeded = CLI->lowerFormalArguments(EntryBuilder, F, VRegArgs);
853   if (!Succeeded) {
854     if (!TPC->isGlobalISelAbortEnabled()) {
855       MF->getProperties().set(
856           MachineFunctionProperties::Property::FailedISel);
857       finalizeFunction();
858       return false;
859     }
860     report_fatal_error("Unable to lower arguments");
861   }
862 
863   // And translate the function!
864   for (const BasicBlock &BB: F) {
865     MachineBasicBlock &MBB = getOrCreateBB(BB);
866     // Set the insertion point of all the following translations to
867     // the end of this basic block.
868     CurBuilder.setMBB(MBB);
869 
870     for (const Instruction &Inst: BB) {
871       Succeeded &= translate(Inst);
872       if (!Succeeded) {
873         if (TPC->isGlobalISelAbortEnabled())
874           reportTranslationError(Inst, "unable to translate instruction");
875         MF->getProperties().set(
876             MachineFunctionProperties::Property::FailedISel);
877         break;
878       }
879     }
880   }
881 
882   if (Succeeded) {
883     finishPendingPhis();
884 
885     // Now that the MachineFrameInfo has been configured, no further changes to
886     // the reserved registers are possible.
887     MRI->freezeReservedRegs(*MF);
888 
889     // Merge the argument lowering and constants block with its single
890     // successor, the LLVM-IR entry block.  We want the basic block to
891     // be maximal.
892     assert(EntryBB->succ_size() == 1 &&
893            "Custom BB used for lowering should have only one successor");
894     // Get the successor of the current entry block.
895     MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
896     assert(NewEntryBB.pred_size() == 1 &&
897            "LLVM-IR entry block has a predecessor!?");
898     // Move all the instruction from the current entry block to the
899     // new entry block.
900     NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
901                       EntryBB->end());
902 
903     // Update the live-in information for the new entry block.
904     for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
905       NewEntryBB.addLiveIn(LiveIn);
906     NewEntryBB.sortUniqueLiveIns();
907 
908     // Get rid of the now empty basic block.
909     EntryBB->removeSuccessor(&NewEntryBB);
910     MF->remove(EntryBB);
911 
912     assert(&MF->front() == &NewEntryBB &&
913            "New entry wasn't next in the list of basic block!");
914   }
915 
916   finalizeFunction();
917 
918   return false;
919 }
920