1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the IRTranslator class.
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
14 #include "llvm/ADT/PostOrderIterator.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/ScopeExit.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
20 #include "llvm/CodeGen/Analysis.h"
21 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
22 #include "llvm/CodeGen/LowLevelType.h"
23 #include "llvm/CodeGen/MachineBasicBlock.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineMemOperand.h"
28 #include "llvm/CodeGen/MachineOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/StackProtector.h"
31 #include "llvm/CodeGen/TargetFrameLowering.h"
32 #include "llvm/CodeGen/TargetLowering.h"
33 #include "llvm/CodeGen/TargetPassConfig.h"
34 #include "llvm/CodeGen/TargetRegisterInfo.h"
35 #include "llvm/CodeGen/TargetSubtargetInfo.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/CFG.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/Constants.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DebugInfo.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/Function.h"
44 #include "llvm/IR/GetElementPtrTypeIterator.h"
45 #include "llvm/IR/InlineAsm.h"
46 #include "llvm/IR/InstrTypes.h"
47 #include "llvm/IR/Instructions.h"
48 #include "llvm/IR/IntrinsicInst.h"
49 #include "llvm/IR/Intrinsics.h"
50 #include "llvm/IR/LLVMContext.h"
51 #include "llvm/IR/Metadata.h"
52 #include "llvm/IR/Type.h"
53 #include "llvm/IR/User.h"
54 #include "llvm/IR/Value.h"
55 #include "llvm/MC/MCContext.h"
56 #include "llvm/Pass.h"
57 #include "llvm/Support/Casting.h"
58 #include "llvm/Support/CodeGen.h"
59 #include "llvm/Support/Debug.h"
60 #include "llvm/Support/ErrorHandling.h"
61 #include "llvm/Support/LowLevelTypeImpl.h"
62 #include "llvm/Support/MathExtras.h"
63 #include "llvm/Support/raw_ostream.h"
64 #include "llvm/Target/TargetIntrinsicInfo.h"
65 #include "llvm/Target/TargetMachine.h"
66 #include <algorithm>
67 #include <cassert>
68 #include <cstdint>
69 #include <iterator>
70 #include <string>
71 #include <utility>
72 #include <vector>
73 
74 #define DEBUG_TYPE "irtranslator"
75 
76 using namespace llvm;
77 
78 char IRTranslator::ID = 0;
79 
80 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
81                 false, false)
82 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
83 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
84                 false, false)
85 
86 static void reportTranslationError(MachineFunction &MF,
87                                    const TargetPassConfig &TPC,
88                                    OptimizationRemarkEmitter &ORE,
89                                    OptimizationRemarkMissed &R) {
90   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
91 
92   // Print the function name explicitly if we don't have a debug location (which
93   // makes the diagnostic less useful) or if we're going to emit a raw error.
94   if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
95     R << (" (in function: " + MF.getName() + ")").str();
96 
97   if (TPC.isGlobalISelAbortEnabled())
98     report_fatal_error(R.getMsg());
99   else
100     ORE.emit(R);
101 }
102 
103 IRTranslator::IRTranslator() : MachineFunctionPass(ID) {
104   initializeIRTranslatorPass(*PassRegistry::getPassRegistry());
105 }
106 
107 #ifndef NDEBUG
108 /// Verify that every instruction created has the same DILocation as the
109 /// instruction being translated.
110 class DILocationVerifier : MachineFunction::Delegate {
111   MachineFunction &MF;
112   const Instruction *CurrInst = nullptr;
113 
114 public:
115   DILocationVerifier(MachineFunction &MF) : MF(MF) { MF.setDelegate(this); }
116   ~DILocationVerifier() { MF.resetDelegate(this); }
117 
118   const Instruction *getCurrentInst() const { return CurrInst; }
119   void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
120 
121   void MF_HandleInsertion(const MachineInstr &MI) override {
122     assert(getCurrentInst() && "Inserted instruction without a current MI");
123 
124     // Only print the check message if we're actually checking it.
125 #ifndef NDEBUG
126     LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
127                       << " was copied to " << MI);
128 #endif
129     assert(CurrInst->getDebugLoc() == MI.getDebugLoc() &&
130            "Line info was not transferred to all instructions");
131   }
132   void MF_HandleRemoval(const MachineInstr &MI) override {}
133 };
134 #endif // ifndef NDEBUG
135 
136 
137 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
138   AU.addRequired<StackProtector>();
139   AU.addRequired<TargetPassConfig>();
140   getSelectionDAGFallbackAnalysisUsage(AU);
141   MachineFunctionPass::getAnalysisUsage(AU);
142 }
143 
144 static void computeValueLLTs(const DataLayout &DL, Type &Ty,
145                              SmallVectorImpl<LLT> &ValueTys,
146                              SmallVectorImpl<uint64_t> *Offsets = nullptr,
147                              uint64_t StartingOffset = 0) {
148   // Given a struct type, recursively traverse the elements.
149   if (StructType *STy = dyn_cast<StructType>(&Ty)) {
150     const StructLayout *SL = DL.getStructLayout(STy);
151     for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I)
152       computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets,
153                        StartingOffset + SL->getElementOffset(I));
154     return;
155   }
156   // Given an array type, recursively traverse the elements.
157   if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
158     Type *EltTy = ATy->getElementType();
159     uint64_t EltSize = DL.getTypeAllocSize(EltTy);
160     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
161       computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
162                        StartingOffset + i * EltSize);
163     return;
164   }
165   // Interpret void as zero return values.
166   if (Ty.isVoidTy())
167     return;
168   // Base case: we can get an LLT for this LLVM IR type.
169   ValueTys.push_back(getLLTForType(Ty, DL));
170   if (Offsets != nullptr)
171     Offsets->push_back(StartingOffset * 8);
172 }
173 
174 IRTranslator::ValueToVRegInfo::VRegListT &
175 IRTranslator::allocateVRegs(const Value &Val) {
176   assert(!VMap.contains(Val) && "Value already allocated in VMap");
177   auto *Regs = VMap.getVRegs(Val);
178   auto *Offsets = VMap.getOffsets(Val);
179   SmallVector<LLT, 4> SplitTys;
180   computeValueLLTs(*DL, *Val.getType(), SplitTys,
181                    Offsets->empty() ? Offsets : nullptr);
182   for (unsigned i = 0; i < SplitTys.size(); ++i)
183     Regs->push_back(0);
184   return *Regs;
185 }
186 
187 ArrayRef<unsigned> IRTranslator::getOrCreateVRegs(const Value &Val) {
188   auto VRegsIt = VMap.findVRegs(Val);
189   if (VRegsIt != VMap.vregs_end())
190     return *VRegsIt->second;
191 
192   if (Val.getType()->isVoidTy())
193     return *VMap.getVRegs(Val);
194 
195   // Create entry for this type.
196   auto *VRegs = VMap.getVRegs(Val);
197   auto *Offsets = VMap.getOffsets(Val);
198 
199   assert(Val.getType()->isSized() &&
200          "Don't know how to create an empty vreg");
201 
202   SmallVector<LLT, 4> SplitTys;
203   computeValueLLTs(*DL, *Val.getType(), SplitTys,
204                    Offsets->empty() ? Offsets : nullptr);
205 
206   if (!isa<Constant>(Val)) {
207     for (auto Ty : SplitTys)
208       VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
209     return *VRegs;
210   }
211 
212   if (Val.getType()->isAggregateType()) {
213     // UndefValue, ConstantAggregateZero
214     auto &C = cast<Constant>(Val);
215     unsigned Idx = 0;
216     while (auto Elt = C.getAggregateElement(Idx++)) {
217       auto EltRegs = getOrCreateVRegs(*Elt);
218       std::copy(EltRegs.begin(), EltRegs.end(), std::back_inserter(*VRegs));
219     }
220   } else {
221     assert(SplitTys.size() == 1 && "unexpectedly split LLT");
222     VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
223     bool Success = translate(cast<Constant>(Val), VRegs->front());
224     if (!Success) {
225       OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
226                                  MF->getFunction().getSubprogram(),
227                                  &MF->getFunction().getEntryBlock());
228       R << "unable to translate constant: " << ore::NV("Type", Val.getType());
229       reportTranslationError(*MF, *TPC, *ORE, R);
230       return *VRegs;
231     }
232   }
233 
234   return *VRegs;
235 }
236 
237 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
238   if (FrameIndices.find(&AI) != FrameIndices.end())
239     return FrameIndices[&AI];
240 
241   unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
242   unsigned Size =
243       ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
244 
245   // Always allocate at least one byte.
246   Size = std::max(Size, 1u);
247 
248   unsigned Alignment = AI.getAlignment();
249   if (!Alignment)
250     Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
251 
252   int &FI = FrameIndices[&AI];
253   FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
254   return FI;
255 }
256 
257 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
258   unsigned Alignment = 0;
259   Type *ValTy = nullptr;
260   if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
261     Alignment = SI->getAlignment();
262     ValTy = SI->getValueOperand()->getType();
263   } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
264     Alignment = LI->getAlignment();
265     ValTy = LI->getType();
266   } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
267     // TODO(PR27168): This instruction has no alignment attribute, but unlike
268     // the default alignment for load/store, the default here is to assume
269     // it has NATURAL alignment, not DataLayout-specified alignment.
270     const DataLayout &DL = AI->getModule()->getDataLayout();
271     Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
272     ValTy = AI->getCompareOperand()->getType();
273   } else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) {
274     // TODO(PR27168): This instruction has no alignment attribute, but unlike
275     // the default alignment for load/store, the default here is to assume
276     // it has NATURAL alignment, not DataLayout-specified alignment.
277     const DataLayout &DL = AI->getModule()->getDataLayout();
278     Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType());
279     ValTy = AI->getType();
280   } else {
281     OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
282     R << "unable to translate memop: " << ore::NV("Opcode", &I);
283     reportTranslationError(*MF, *TPC, *ORE, R);
284     return 1;
285   }
286 
287   return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
288 }
289 
290 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
291   MachineBasicBlock *&MBB = BBToMBB[&BB];
292   assert(MBB && "BasicBlock was not encountered before");
293   return *MBB;
294 }
295 
296 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
297   assert(NewPred && "new predecessor must be a real MachineBasicBlock");
298   MachinePreds[Edge].push_back(NewPred);
299 }
300 
301 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
302                                      MachineIRBuilder &MIRBuilder) {
303   // FIXME: handle signed/unsigned wrapping flags.
304 
305   // Get or create a virtual register for each value.
306   // Unless the value is a Constant => loadimm cst?
307   // or inline constant each time?
308   // Creation of a virtual register needs to have a size.
309   unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
310   unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
311   unsigned Res = getOrCreateVReg(U);
312   auto FBinOp = MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1);
313   if (isa<Instruction>(U)) {
314     MachineInstr *FBinOpMI = FBinOp.getInstr();
315     const Instruction &I = cast<Instruction>(U);
316     FBinOpMI->copyIRFlags(I);
317   }
318   return true;
319 }
320 
321 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
322   // -0.0 - X --> G_FNEG
323   if (isa<Constant>(U.getOperand(0)) &&
324       U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) {
325     MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
326         .addDef(getOrCreateVReg(U))
327         .addUse(getOrCreateVReg(*U.getOperand(1)));
328     return true;
329   }
330   return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
331 }
332 
333 bool IRTranslator::translateCompare(const User &U,
334                                     MachineIRBuilder &MIRBuilder) {
335   const CmpInst *CI = dyn_cast<CmpInst>(&U);
336   unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
337   unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
338   unsigned Res = getOrCreateVReg(U);
339   CmpInst::Predicate Pred =
340       CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
341                                     cast<ConstantExpr>(U).getPredicate());
342   if (CmpInst::isIntPredicate(Pred))
343     MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
344   else if (Pred == CmpInst::FCMP_FALSE)
345     MIRBuilder.buildCopy(
346         Res, getOrCreateVReg(*Constant::getNullValue(CI->getType())));
347   else if (Pred == CmpInst::FCMP_TRUE)
348     MIRBuilder.buildCopy(
349         Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType())));
350   else
351     MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
352 
353   return true;
354 }
355 
356 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
357   const ReturnInst &RI = cast<ReturnInst>(U);
358   const Value *Ret = RI.getReturnValue();
359   if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
360     Ret = nullptr;
361 
362   ArrayRef<unsigned> VRegs;
363   if (Ret)
364     VRegs = getOrCreateVRegs(*Ret);
365 
366   // The target may mess up with the insertion point, but
367   // this is not important as a return is the last instruction
368   // of the block anyway.
369 
370   return CLI->lowerReturn(MIRBuilder, Ret, VRegs);
371 }
372 
373 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
374   const BranchInst &BrInst = cast<BranchInst>(U);
375   unsigned Succ = 0;
376   if (!BrInst.isUnconditional()) {
377     // We want a G_BRCOND to the true BB followed by an unconditional branch.
378     unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
379     const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
380     MachineBasicBlock &TrueBB = getMBB(TrueTgt);
381     MIRBuilder.buildBrCond(Tst, TrueBB);
382   }
383 
384   const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
385   MachineBasicBlock &TgtBB = getMBB(BrTgt);
386   MachineBasicBlock &CurBB = MIRBuilder.getMBB();
387 
388   // If the unconditional target is the layout successor, fallthrough.
389   if (!CurBB.isLayoutSuccessor(&TgtBB))
390     MIRBuilder.buildBr(TgtBB);
391 
392   // Link successors.
393   for (const BasicBlock *Succ : successors(&BrInst))
394     CurBB.addSuccessor(&getMBB(*Succ));
395   return true;
396 }
397 
398 bool IRTranslator::translateSwitch(const User &U,
399                                    MachineIRBuilder &MIRBuilder) {
400   // For now, just translate as a chain of conditional branches.
401   // FIXME: could we share most of the logic/code in
402   // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel?
403   // At first sight, it seems most of the logic in there is independent of
404   // SelectionDAG-specifics and a lot of work went in to optimize switch
405   // lowering in there.
406 
407   const SwitchInst &SwInst = cast<SwitchInst>(U);
408   const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition());
409   const BasicBlock *OrigBB = SwInst.getParent();
410 
411   LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL);
412   for (auto &CaseIt : SwInst.cases()) {
413     const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue());
414     const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1);
415     MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue);
416     MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
417     const BasicBlock *TrueBB = CaseIt.getCaseSuccessor();
418     MachineBasicBlock &TrueMBB = getMBB(*TrueBB);
419 
420     MIRBuilder.buildBrCond(Tst, TrueMBB);
421     CurMBB.addSuccessor(&TrueMBB);
422     addMachineCFGPred({OrigBB, TrueBB}, &CurMBB);
423 
424     MachineBasicBlock *FalseMBB =
425         MF->CreateMachineBasicBlock(SwInst.getParent());
426     // Insert the comparison blocks one after the other.
427     MF->insert(std::next(CurMBB.getIterator()), FalseMBB);
428     MIRBuilder.buildBr(*FalseMBB);
429     CurMBB.addSuccessor(FalseMBB);
430 
431     MIRBuilder.setMBB(*FalseMBB);
432   }
433   // handle default case
434   const BasicBlock *DefaultBB = SwInst.getDefaultDest();
435   MachineBasicBlock &DefaultMBB = getMBB(*DefaultBB);
436   MIRBuilder.buildBr(DefaultMBB);
437   MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
438   CurMBB.addSuccessor(&DefaultMBB);
439   addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB);
440 
441   return true;
442 }
443 
444 bool IRTranslator::translateIndirectBr(const User &U,
445                                        MachineIRBuilder &MIRBuilder) {
446   const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
447 
448   const unsigned Tgt = getOrCreateVReg(*BrInst.getAddress());
449   MIRBuilder.buildBrIndirect(Tgt);
450 
451   // Link successors.
452   MachineBasicBlock &CurBB = MIRBuilder.getMBB();
453   for (const BasicBlock *Succ : successors(&BrInst))
454     CurBB.addSuccessor(&getMBB(*Succ));
455 
456   return true;
457 }
458 
459 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
460   const LoadInst &LI = cast<LoadInst>(U);
461 
462   auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
463                                : MachineMemOperand::MONone;
464   Flags |= MachineMemOperand::MOLoad;
465 
466   if (DL->getTypeStoreSize(LI.getType()) == 0)
467     return true;
468 
469   ArrayRef<unsigned> Regs = getOrCreateVRegs(LI);
470   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
471   unsigned Base = getOrCreateVReg(*LI.getPointerOperand());
472 
473   for (unsigned i = 0; i < Regs.size(); ++i) {
474     unsigned Addr = 0;
475     MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8);
476 
477     MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
478     unsigned BaseAlign = getMemOpAlignment(LI);
479     auto MMO = MF->getMachineMemOperand(
480         Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8,
481         MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
482         LI.getSyncScopeID(), LI.getOrdering());
483     MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
484   }
485 
486   return true;
487 }
488 
489 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
490   const StoreInst &SI = cast<StoreInst>(U);
491   auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
492                                : MachineMemOperand::MONone;
493   Flags |= MachineMemOperand::MOStore;
494 
495   if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
496     return true;
497 
498   ArrayRef<unsigned> Vals = getOrCreateVRegs(*SI.getValueOperand());
499   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
500   unsigned Base = getOrCreateVReg(*SI.getPointerOperand());
501 
502   for (unsigned i = 0; i < Vals.size(); ++i) {
503     unsigned Addr = 0;
504     MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8);
505 
506     MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
507     unsigned BaseAlign = getMemOpAlignment(SI);
508     auto MMO = MF->getMachineMemOperand(
509         Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8,
510         MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
511         SI.getSyncScopeID(), SI.getOrdering());
512     MIRBuilder.buildStore(Vals[i], Addr, *MMO);
513   }
514   return true;
515 }
516 
517 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
518   const Value *Src = U.getOperand(0);
519   Type *Int32Ty = Type::getInt32Ty(U.getContext());
520 
521   // getIndexedOffsetInType is designed for GEPs, so the first index is the
522   // usual array element rather than looking into the actual aggregate.
523   SmallVector<Value *, 1> Indices;
524   Indices.push_back(ConstantInt::get(Int32Ty, 0));
525 
526   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
527     for (auto Idx : EVI->indices())
528       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
529   } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
530     for (auto Idx : IVI->indices())
531       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
532   } else {
533     for (unsigned i = 1; i < U.getNumOperands(); ++i)
534       Indices.push_back(U.getOperand(i));
535   }
536 
537   return 8 * static_cast<uint64_t>(
538                  DL.getIndexedOffsetInType(Src->getType(), Indices));
539 }
540 
541 bool IRTranslator::translateExtractValue(const User &U,
542                                          MachineIRBuilder &MIRBuilder) {
543   const Value *Src = U.getOperand(0);
544   uint64_t Offset = getOffsetFromIndices(U, *DL);
545   ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
546   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
547   unsigned Idx = std::lower_bound(Offsets.begin(), Offsets.end(), Offset) -
548                  Offsets.begin();
549   auto &DstRegs = allocateVRegs(U);
550 
551   for (unsigned i = 0; i < DstRegs.size(); ++i)
552     DstRegs[i] = SrcRegs[Idx++];
553 
554   return true;
555 }
556 
557 bool IRTranslator::translateInsertValue(const User &U,
558                                         MachineIRBuilder &MIRBuilder) {
559   const Value *Src = U.getOperand(0);
560   uint64_t Offset = getOffsetFromIndices(U, *DL);
561   auto &DstRegs = allocateVRegs(U);
562   ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
563   ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
564   ArrayRef<unsigned> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
565   auto InsertedIt = InsertedRegs.begin();
566 
567   for (unsigned i = 0; i < DstRegs.size(); ++i) {
568     if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
569       DstRegs[i] = *InsertedIt++;
570     else
571       DstRegs[i] = SrcRegs[i];
572   }
573 
574   return true;
575 }
576 
577 bool IRTranslator::translateSelect(const User &U,
578                                    MachineIRBuilder &MIRBuilder) {
579   unsigned Tst = getOrCreateVReg(*U.getOperand(0));
580   ArrayRef<unsigned> ResRegs = getOrCreateVRegs(U);
581   ArrayRef<unsigned> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
582   ArrayRef<unsigned> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
583 
584   for (unsigned i = 0; i < ResRegs.size(); ++i)
585     MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i]);
586 
587   return true;
588 }
589 
590 bool IRTranslator::translateBitCast(const User &U,
591                                     MachineIRBuilder &MIRBuilder) {
592   // If we're bitcasting to the source type, we can reuse the source vreg.
593   if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
594       getLLTForType(*U.getType(), *DL)) {
595     unsigned SrcReg = getOrCreateVReg(*U.getOperand(0));
596     auto &Regs = *VMap.getVRegs(U);
597     // If we already assigned a vreg for this bitcast, we can't change that.
598     // Emit a copy to satisfy the users we already emitted.
599     if (!Regs.empty())
600       MIRBuilder.buildCopy(Regs[0], SrcReg);
601     else {
602       Regs.push_back(SrcReg);
603       VMap.getOffsets(U)->push_back(0);
604     }
605     return true;
606   }
607   return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
608 }
609 
610 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
611                                  MachineIRBuilder &MIRBuilder) {
612   unsigned Op = getOrCreateVReg(*U.getOperand(0));
613   unsigned Res = getOrCreateVReg(U);
614   MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op);
615   return true;
616 }
617 
618 bool IRTranslator::translateGetElementPtr(const User &U,
619                                           MachineIRBuilder &MIRBuilder) {
620   // FIXME: support vector GEPs.
621   if (U.getType()->isVectorTy())
622     return false;
623 
624   Value &Op0 = *U.getOperand(0);
625   unsigned BaseReg = getOrCreateVReg(Op0);
626   Type *PtrIRTy = Op0.getType();
627   LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
628   Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
629   LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
630 
631   int64_t Offset = 0;
632   for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
633        GTI != E; ++GTI) {
634     const Value *Idx = GTI.getOperand();
635     if (StructType *StTy = GTI.getStructTypeOrNull()) {
636       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
637       Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
638       continue;
639     } else {
640       uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
641 
642       // If this is a scalar constant or a splat vector of constants,
643       // handle it quickly.
644       if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
645         Offset += ElementSize * CI->getSExtValue();
646         continue;
647       }
648 
649       if (Offset != 0) {
650         unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
651         unsigned OffsetReg =
652             getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
653         MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
654 
655         BaseReg = NewBaseReg;
656         Offset = 0;
657       }
658 
659       unsigned IdxReg = getOrCreateVReg(*Idx);
660       if (MRI->getType(IdxReg) != OffsetTy) {
661         unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
662         MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
663         IdxReg = NewIdxReg;
664       }
665 
666       // N = N + Idx * ElementSize;
667       // Avoid doing it for ElementSize of 1.
668       unsigned GepOffsetReg;
669       if (ElementSize != 1) {
670         unsigned ElementSizeReg =
671             getOrCreateVReg(*ConstantInt::get(OffsetIRTy, ElementSize));
672 
673         GepOffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
674         MIRBuilder.buildMul(GepOffsetReg, ElementSizeReg, IdxReg);
675       } else
676         GepOffsetReg = IdxReg;
677 
678       unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
679       MIRBuilder.buildGEP(NewBaseReg, BaseReg, GepOffsetReg);
680       BaseReg = NewBaseReg;
681     }
682   }
683 
684   if (Offset != 0) {
685     unsigned OffsetReg = getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
686     MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg);
687     return true;
688   }
689 
690   MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
691   return true;
692 }
693 
694 bool IRTranslator::translateMemfunc(const CallInst &CI,
695                                     MachineIRBuilder &MIRBuilder,
696                                     unsigned ID) {
697   LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL);
698   Type *DstTy = CI.getArgOperand(0)->getType();
699   if (cast<PointerType>(DstTy)->getAddressSpace() != 0 ||
700       SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
701     return false;
702 
703   SmallVector<CallLowering::ArgInfo, 8> Args;
704   for (int i = 0; i < 3; ++i) {
705     const auto &Arg = CI.getArgOperand(i);
706     Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
707   }
708 
709   const char *Callee;
710   switch (ID) {
711   case Intrinsic::memmove:
712   case Intrinsic::memcpy: {
713     Type *SrcTy = CI.getArgOperand(1)->getType();
714     if(cast<PointerType>(SrcTy)->getAddressSpace() != 0)
715       return false;
716     Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove";
717     break;
718   }
719   case Intrinsic::memset:
720     Callee = "memset";
721     break;
722   default:
723     return false;
724   }
725 
726   return CLI->lowerCall(MIRBuilder, CI.getCallingConv(),
727                         MachineOperand::CreateES(Callee),
728                         CallLowering::ArgInfo(0, CI.getType()), Args);
729 }
730 
731 void IRTranslator::getStackGuard(unsigned DstReg,
732                                  MachineIRBuilder &MIRBuilder) {
733   const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
734   MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
735   auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
736   MIB.addDef(DstReg);
737 
738   auto &TLI = *MF->getSubtarget().getTargetLowering();
739   Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
740   if (!Global)
741     return;
742 
743   MachinePointerInfo MPInfo(Global);
744   auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
745                MachineMemOperand::MODereferenceable;
746   MachineMemOperand *MemRef =
747       MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
748                                DL->getPointerABIAlignment(0));
749   MIB.setMemRefs({MemRef});
750 }
751 
752 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
753                                               MachineIRBuilder &MIRBuilder) {
754   ArrayRef<unsigned> ResRegs = getOrCreateVRegs(CI);
755   MIRBuilder.buildInstr(Op)
756       .addDef(ResRegs[0])
757       .addDef(ResRegs[1])
758       .addUse(getOrCreateVReg(*CI.getOperand(0)))
759       .addUse(getOrCreateVReg(*CI.getOperand(1)));
760 
761   return true;
762 }
763 
764 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
765                                            MachineIRBuilder &MIRBuilder) {
766   switch (ID) {
767   default:
768     break;
769   case Intrinsic::lifetime_start:
770   case Intrinsic::lifetime_end:
771     // Stack coloring is not enabled in O0 (which we care about now) so we can
772     // drop these. Make sure someone notices when we start compiling at higher
773     // opts though.
774     if (MF->getTarget().getOptLevel() != CodeGenOpt::None)
775       return false;
776     return true;
777   case Intrinsic::dbg_declare: {
778     const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
779     assert(DI.getVariable() && "Missing variable");
780 
781     const Value *Address = DI.getAddress();
782     if (!Address || isa<UndefValue>(Address)) {
783       LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
784       return true;
785     }
786 
787     assert(DI.getVariable()->isValidLocationForIntrinsic(
788                MIRBuilder.getDebugLoc()) &&
789            "Expected inlined-at fields to agree");
790     auto AI = dyn_cast<AllocaInst>(Address);
791     if (AI && AI->isStaticAlloca()) {
792       // Static allocas are tracked at the MF level, no need for DBG_VALUE
793       // instructions (in fact, they get ignored if they *do* exist).
794       MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
795                              getOrCreateFrameIndex(*AI), DI.getDebugLoc());
796     } else {
797       // A dbg.declare describes the address of a source variable, so lower it
798       // into an indirect DBG_VALUE.
799       MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
800                                        DI.getVariable(), DI.getExpression());
801     }
802     return true;
803   }
804   case Intrinsic::dbg_label: {
805     const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
806     assert(DI.getLabel() && "Missing label");
807 
808     assert(DI.getLabel()->isValidLocationForIntrinsic(
809                MIRBuilder.getDebugLoc()) &&
810            "Expected inlined-at fields to agree");
811 
812     MIRBuilder.buildDbgLabel(DI.getLabel());
813     return true;
814   }
815   case Intrinsic::vaend:
816     // No target I know of cares about va_end. Certainly no in-tree target
817     // does. Simplest intrinsic ever!
818     return true;
819   case Intrinsic::vastart: {
820     auto &TLI = *MF->getSubtarget().getTargetLowering();
821     Value *Ptr = CI.getArgOperand(0);
822     unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
823 
824     MIRBuilder.buildInstr(TargetOpcode::G_VASTART)
825         .addUse(getOrCreateVReg(*Ptr))
826         .addMemOperand(MF->getMachineMemOperand(
827             MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 0));
828     return true;
829   }
830   case Intrinsic::dbg_value: {
831     // This form of DBG_VALUE is target-independent.
832     const DbgValueInst &DI = cast<DbgValueInst>(CI);
833     const Value *V = DI.getValue();
834     assert(DI.getVariable()->isValidLocationForIntrinsic(
835                MIRBuilder.getDebugLoc()) &&
836            "Expected inlined-at fields to agree");
837     if (!V) {
838       // Currently the optimizer can produce this; insert an undef to
839       // help debugging.  Probably the optimizer should not do this.
840       MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
841     } else if (const auto *CI = dyn_cast<Constant>(V)) {
842       MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
843     } else {
844       unsigned Reg = getOrCreateVReg(*V);
845       // FIXME: This does not handle register-indirect values at offset 0. The
846       // direct/indirect thing shouldn't really be handled by something as
847       // implicit as reg+noreg vs reg+imm in the first palce, but it seems
848       // pretty baked in right now.
849       MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
850     }
851     return true;
852   }
853   case Intrinsic::uadd_with_overflow:
854     return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
855   case Intrinsic::sadd_with_overflow:
856     return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
857   case Intrinsic::usub_with_overflow:
858     return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
859   case Intrinsic::ssub_with_overflow:
860     return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
861   case Intrinsic::umul_with_overflow:
862     return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
863   case Intrinsic::smul_with_overflow:
864     return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
865   case Intrinsic::pow:
866     MIRBuilder.buildInstr(TargetOpcode::G_FPOW)
867         .addDef(getOrCreateVReg(CI))
868         .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
869         .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
870     return true;
871   case Intrinsic::exp:
872     MIRBuilder.buildInstr(TargetOpcode::G_FEXP)
873         .addDef(getOrCreateVReg(CI))
874         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
875     return true;
876   case Intrinsic::exp2:
877     MIRBuilder.buildInstr(TargetOpcode::G_FEXP2)
878         .addDef(getOrCreateVReg(CI))
879         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
880     return true;
881   case Intrinsic::log:
882     MIRBuilder.buildInstr(TargetOpcode::G_FLOG)
883         .addDef(getOrCreateVReg(CI))
884         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
885     return true;
886   case Intrinsic::log2:
887     MIRBuilder.buildInstr(TargetOpcode::G_FLOG2)
888         .addDef(getOrCreateVReg(CI))
889         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
890     return true;
891   case Intrinsic::fabs:
892     MIRBuilder.buildInstr(TargetOpcode::G_FABS)
893         .addDef(getOrCreateVReg(CI))
894         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
895     return true;
896   case Intrinsic::trunc:
897     MIRBuilder.buildInstr(TargetOpcode::G_INTRINSIC_TRUNC)
898         .addDef(getOrCreateVReg(CI))
899         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
900     return true;
901   case Intrinsic::round:
902     MIRBuilder.buildInstr(TargetOpcode::G_INTRINSIC_ROUND)
903         .addDef(getOrCreateVReg(CI))
904         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
905     return true;
906   case Intrinsic::fma:
907     MIRBuilder.buildInstr(TargetOpcode::G_FMA)
908         .addDef(getOrCreateVReg(CI))
909         .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
910         .addUse(getOrCreateVReg(*CI.getArgOperand(1)))
911         .addUse(getOrCreateVReg(*CI.getArgOperand(2)));
912     return true;
913   case Intrinsic::fmuladd: {
914     const TargetMachine &TM = MF->getTarget();
915     const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
916     unsigned Dst = getOrCreateVReg(CI);
917     unsigned Op0 = getOrCreateVReg(*CI.getArgOperand(0));
918     unsigned Op1 = getOrCreateVReg(*CI.getArgOperand(1));
919     unsigned Op2 = getOrCreateVReg(*CI.getArgOperand(2));
920     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
921         TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) {
922       // TODO: Revisit this to see if we should move this part of the
923       // lowering to the combiner.
924       MIRBuilder.buildInstr(TargetOpcode::G_FMA, Dst, Op0, Op1, Op2);
925     } else {
926       LLT Ty = getLLTForType(*CI.getType(), *DL);
927       auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, Ty, Op0, Op1);
928       MIRBuilder.buildInstr(TargetOpcode::G_FADD, Dst, FMul, Op2);
929     }
930     return true;
931   }
932   case Intrinsic::memcpy:
933   case Intrinsic::memmove:
934   case Intrinsic::memset:
935     return translateMemfunc(CI, MIRBuilder, ID);
936   case Intrinsic::eh_typeid_for: {
937     GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
938     unsigned Reg = getOrCreateVReg(CI);
939     unsigned TypeID = MF->getTypeIDFor(GV);
940     MIRBuilder.buildConstant(Reg, TypeID);
941     return true;
942   }
943   case Intrinsic::objectsize: {
944     // If we don't know by now, we're never going to know.
945     const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
946 
947     MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
948     return true;
949   }
950   case Intrinsic::is_constant:
951     // If this wasn't constant-folded away by now, then it's not a
952     // constant.
953     MIRBuilder.buildConstant(getOrCreateVReg(CI), 0);
954     return true;
955   case Intrinsic::stackguard:
956     getStackGuard(getOrCreateVReg(CI), MIRBuilder);
957     return true;
958   case Intrinsic::stackprotector: {
959     LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
960     unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
961     getStackGuard(GuardVal, MIRBuilder);
962 
963     AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
964     MIRBuilder.buildStore(
965         GuardVal, getOrCreateVReg(*Slot),
966         *MF->getMachineMemOperand(
967             MachinePointerInfo::getFixedStack(*MF,
968                                               getOrCreateFrameIndex(*Slot)),
969             MachineMemOperand::MOStore | MachineMemOperand::MOVolatile,
970             PtrTy.getSizeInBits() / 8, 8));
971     return true;
972   }
973   case Intrinsic::cttz:
974   case Intrinsic::ctlz: {
975     ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
976     bool isTrailing = ID == Intrinsic::cttz;
977     unsigned Opcode = isTrailing
978                           ? Cst->isZero() ? TargetOpcode::G_CTTZ
979                                           : TargetOpcode::G_CTTZ_ZERO_UNDEF
980                           : Cst->isZero() ? TargetOpcode::G_CTLZ
981                                           : TargetOpcode::G_CTLZ_ZERO_UNDEF;
982     MIRBuilder.buildInstr(Opcode)
983         .addDef(getOrCreateVReg(CI))
984         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
985     return true;
986   }
987   case Intrinsic::ctpop: {
988     MIRBuilder.buildInstr(TargetOpcode::G_CTPOP)
989         .addDef(getOrCreateVReg(CI))
990         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
991     return true;
992   }
993   case Intrinsic::invariant_start: {
994     LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
995     unsigned Undef = MRI->createGenericVirtualRegister(PtrTy);
996     MIRBuilder.buildUndef(Undef);
997     return true;
998   }
999   case Intrinsic::invariant_end:
1000     return true;
1001   }
1002   return false;
1003 }
1004 
1005 bool IRTranslator::translateInlineAsm(const CallInst &CI,
1006                                       MachineIRBuilder &MIRBuilder) {
1007   const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue());
1008   if (!IA.getConstraintString().empty())
1009     return false;
1010 
1011   unsigned ExtraInfo = 0;
1012   if (IA.hasSideEffects())
1013     ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1014   if (IA.getDialect() == InlineAsm::AD_Intel)
1015     ExtraInfo |= InlineAsm::Extra_AsmDialect;
1016 
1017   MIRBuilder.buildInstr(TargetOpcode::INLINEASM)
1018     .addExternalSymbol(IA.getAsmString().c_str())
1019     .addImm(ExtraInfo);
1020 
1021   return true;
1022 }
1023 
1024 unsigned IRTranslator::packRegs(const Value &V,
1025                                   MachineIRBuilder &MIRBuilder) {
1026   ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
1027   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
1028   LLT BigTy = getLLTForType(*V.getType(), *DL);
1029 
1030   if (Regs.size() == 1)
1031     return Regs[0];
1032 
1033   unsigned Dst = MRI->createGenericVirtualRegister(BigTy);
1034   MIRBuilder.buildUndef(Dst);
1035   for (unsigned i = 0; i < Regs.size(); ++i) {
1036     unsigned NewDst = MRI->createGenericVirtualRegister(BigTy);
1037     MIRBuilder.buildInsert(NewDst, Dst, Regs[i], Offsets[i]);
1038     Dst = NewDst;
1039   }
1040   return Dst;
1041 }
1042 
1043 void IRTranslator::unpackRegs(const Value &V, unsigned Src,
1044                                 MachineIRBuilder &MIRBuilder) {
1045   ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
1046   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
1047 
1048   for (unsigned i = 0; i < Regs.size(); ++i)
1049     MIRBuilder.buildExtract(Regs[i], Src, Offsets[i]);
1050 }
1051 
1052 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
1053   const CallInst &CI = cast<CallInst>(U);
1054   auto TII = MF->getTarget().getIntrinsicInfo();
1055   const Function *F = CI.getCalledFunction();
1056 
1057   // FIXME: support Windows dllimport function calls.
1058   if (F && F->hasDLLImportStorageClass())
1059     return false;
1060 
1061   if (CI.isInlineAsm())
1062     return translateInlineAsm(CI, MIRBuilder);
1063 
1064   Intrinsic::ID ID = Intrinsic::not_intrinsic;
1065   if (F && F->isIntrinsic()) {
1066     ID = F->getIntrinsicID();
1067     if (TII && ID == Intrinsic::not_intrinsic)
1068       ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
1069   }
1070 
1071   bool IsSplitType = valueIsSplit(CI);
1072   if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) {
1073     unsigned Res = IsSplitType ? MRI->createGenericVirtualRegister(
1074                                      getLLTForType(*CI.getType(), *DL))
1075                                : getOrCreateVReg(CI);
1076 
1077     SmallVector<unsigned, 8> Args;
1078     for (auto &Arg: CI.arg_operands())
1079       Args.push_back(packRegs(*Arg, MIRBuilder));
1080 
1081     MF->getFrameInfo().setHasCalls(true);
1082     bool Success = CLI->lowerCall(MIRBuilder, &CI, Res, Args, [&]() {
1083       return getOrCreateVReg(*CI.getCalledValue());
1084     });
1085 
1086     if (IsSplitType)
1087       unpackRegs(CI, Res, MIRBuilder);
1088     return Success;
1089   }
1090 
1091   assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
1092 
1093   if (translateKnownIntrinsic(CI, ID, MIRBuilder))
1094     return true;
1095 
1096   unsigned Res = 0;
1097   if (!CI.getType()->isVoidTy()) {
1098     if (IsSplitType)
1099       Res =
1100           MRI->createGenericVirtualRegister(getLLTForType(*CI.getType(), *DL));
1101     else
1102       Res = getOrCreateVReg(CI);
1103   }
1104   MachineInstrBuilder MIB =
1105       MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory());
1106 
1107   for (auto &Arg : CI.arg_operands()) {
1108     // Some intrinsics take metadata parameters. Reject them.
1109     if (isa<MetadataAsValue>(Arg))
1110       return false;
1111     MIB.addUse(packRegs(*Arg, MIRBuilder));
1112   }
1113 
1114   if (IsSplitType)
1115     unpackRegs(CI, Res, MIRBuilder);
1116 
1117   // Add a MachineMemOperand if it is a target mem intrinsic.
1118   const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1119   TargetLowering::IntrinsicInfo Info;
1120   // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
1121   if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
1122     uint64_t Size = Info.memVT.getStoreSize();
1123     MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal),
1124                                                Info.flags, Size, Info.align));
1125   }
1126 
1127   return true;
1128 }
1129 
1130 bool IRTranslator::translateInvoke(const User &U,
1131                                    MachineIRBuilder &MIRBuilder) {
1132   const InvokeInst &I = cast<InvokeInst>(U);
1133   MCContext &Context = MF->getContext();
1134 
1135   const BasicBlock *ReturnBB = I.getSuccessor(0);
1136   const BasicBlock *EHPadBB = I.getSuccessor(1);
1137 
1138   const Value *Callee = I.getCalledValue();
1139   const Function *Fn = dyn_cast<Function>(Callee);
1140   if (isa<InlineAsm>(Callee))
1141     return false;
1142 
1143   // FIXME: support invoking patchpoint and statepoint intrinsics.
1144   if (Fn && Fn->isIntrinsic())
1145     return false;
1146 
1147   // FIXME: support whatever these are.
1148   if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
1149     return false;
1150 
1151   // FIXME: support Windows exception handling.
1152   if (!isa<LandingPadInst>(EHPadBB->front()))
1153     return false;
1154 
1155   // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
1156   // the region covered by the try.
1157   MCSymbol *BeginSymbol = Context.createTempSymbol();
1158   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
1159 
1160   unsigned Res =
1161         MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL));
1162   SmallVector<unsigned, 8> Args;
1163   for (auto &Arg: I.arg_operands())
1164     Args.push_back(packRegs(*Arg, MIRBuilder));
1165 
1166   if (!CLI->lowerCall(MIRBuilder, &I, Res, Args,
1167                       [&]() { return getOrCreateVReg(*I.getCalledValue()); }))
1168     return false;
1169 
1170   unpackRegs(I, Res, MIRBuilder);
1171 
1172   MCSymbol *EndSymbol = Context.createTempSymbol();
1173   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
1174 
1175   // FIXME: track probabilities.
1176   MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
1177                     &ReturnMBB = getMBB(*ReturnBB);
1178   MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
1179   MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
1180   MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
1181   MIRBuilder.buildBr(ReturnMBB);
1182 
1183   return true;
1184 }
1185 
1186 bool IRTranslator::translateLandingPad(const User &U,
1187                                        MachineIRBuilder &MIRBuilder) {
1188   const LandingPadInst &LP = cast<LandingPadInst>(U);
1189 
1190   MachineBasicBlock &MBB = MIRBuilder.getMBB();
1191 
1192   MBB.setIsEHPad();
1193 
1194   // If there aren't registers to copy the values into (e.g., during SjLj
1195   // exceptions), then don't bother.
1196   auto &TLI = *MF->getSubtarget().getTargetLowering();
1197   const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
1198   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
1199       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
1200     return true;
1201 
1202   // If landingpad's return type is token type, we don't create DAG nodes
1203   // for its exception pointer and selector value. The extraction of exception
1204   // pointer or selector value from token type landingpads is not currently
1205   // supported.
1206   if (LP.getType()->isTokenTy())
1207     return true;
1208 
1209   // Add a label to mark the beginning of the landing pad.  Deletion of the
1210   // landing pad can thus be detected via the MachineModuleInfo.
1211   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
1212     .addSym(MF->addLandingPad(&MBB));
1213 
1214   LLT Ty = getLLTForType(*LP.getType(), *DL);
1215   unsigned Undef = MRI->createGenericVirtualRegister(Ty);
1216   MIRBuilder.buildUndef(Undef);
1217 
1218   SmallVector<LLT, 2> Tys;
1219   for (Type *Ty : cast<StructType>(LP.getType())->elements())
1220     Tys.push_back(getLLTForType(*Ty, *DL));
1221   assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
1222 
1223   // Mark exception register as live in.
1224   unsigned ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
1225   if (!ExceptionReg)
1226     return false;
1227 
1228   MBB.addLiveIn(ExceptionReg);
1229   ArrayRef<unsigned> ResRegs = getOrCreateVRegs(LP);
1230   MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
1231 
1232   unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
1233   if (!SelectorReg)
1234     return false;
1235 
1236   MBB.addLiveIn(SelectorReg);
1237   unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
1238   MIRBuilder.buildCopy(PtrVReg, SelectorReg);
1239   MIRBuilder.buildCast(ResRegs[1], PtrVReg);
1240 
1241   return true;
1242 }
1243 
1244 bool IRTranslator::translateAlloca(const User &U,
1245                                    MachineIRBuilder &MIRBuilder) {
1246   auto &AI = cast<AllocaInst>(U);
1247 
1248   if (AI.isSwiftError())
1249     return false;
1250 
1251   if (AI.isStaticAlloca()) {
1252     unsigned Res = getOrCreateVReg(AI);
1253     int FI = getOrCreateFrameIndex(AI);
1254     MIRBuilder.buildFrameIndex(Res, FI);
1255     return true;
1256   }
1257 
1258   // FIXME: support stack probing for Windows.
1259   if (MF->getTarget().getTargetTriple().isOSWindows())
1260     return false;
1261 
1262   // Now we're in the harder dynamic case.
1263   Type *Ty = AI.getAllocatedType();
1264   unsigned Align =
1265       std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment());
1266 
1267   unsigned NumElts = getOrCreateVReg(*AI.getArraySize());
1268 
1269   Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
1270   LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
1271   if (MRI->getType(NumElts) != IntPtrTy) {
1272     unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
1273     MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
1274     NumElts = ExtElts;
1275   }
1276 
1277   unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
1278   unsigned TySize =
1279       getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty)));
1280   MIRBuilder.buildMul(AllocSize, NumElts, TySize);
1281 
1282   LLT PtrTy = getLLTForType(*AI.getType(), *DL);
1283   auto &TLI = *MF->getSubtarget().getTargetLowering();
1284   unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
1285 
1286   unsigned SPTmp = MRI->createGenericVirtualRegister(PtrTy);
1287   MIRBuilder.buildCopy(SPTmp, SPReg);
1288 
1289   unsigned AllocTmp = MRI->createGenericVirtualRegister(PtrTy);
1290   MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize);
1291 
1292   // Handle alignment. We have to realign if the allocation granule was smaller
1293   // than stack alignment, or the specific alloca requires more than stack
1294   // alignment.
1295   unsigned StackAlign =
1296       MF->getSubtarget().getFrameLowering()->getStackAlignment();
1297   Align = std::max(Align, StackAlign);
1298   if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) {
1299     // Round the size of the allocation up to the stack alignment size
1300     // by add SA-1 to the size. This doesn't overflow because we're computing
1301     // an address inside an alloca.
1302     unsigned AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy);
1303     MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align));
1304     AllocTmp = AlignedAlloc;
1305   }
1306 
1307   MIRBuilder.buildCopy(SPReg, AllocTmp);
1308   MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp);
1309 
1310   MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI);
1311   assert(MF->getFrameInfo().hasVarSizedObjects());
1312   return true;
1313 }
1314 
1315 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
1316   // FIXME: We may need more info about the type. Because of how LLT works,
1317   // we're completely discarding the i64/double distinction here (amongst
1318   // others). Fortunately the ABIs I know of where that matters don't use va_arg
1319   // anyway but that's not guaranteed.
1320   MIRBuilder.buildInstr(TargetOpcode::G_VAARG)
1321     .addDef(getOrCreateVReg(U))
1322     .addUse(getOrCreateVReg(*U.getOperand(0)))
1323     .addImm(DL->getABITypeAlignment(U.getType()));
1324   return true;
1325 }
1326 
1327 bool IRTranslator::translateInsertElement(const User &U,
1328                                           MachineIRBuilder &MIRBuilder) {
1329   // If it is a <1 x Ty> vector, use the scalar as it is
1330   // not a legal vector type in LLT.
1331   if (U.getType()->getVectorNumElements() == 1) {
1332     unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1333     auto &Regs = *VMap.getVRegs(U);
1334     if (Regs.empty()) {
1335       Regs.push_back(Elt);
1336       VMap.getOffsets(U)->push_back(0);
1337     } else {
1338       MIRBuilder.buildCopy(Regs[0], Elt);
1339     }
1340     return true;
1341   }
1342 
1343   unsigned Res = getOrCreateVReg(U);
1344   unsigned Val = getOrCreateVReg(*U.getOperand(0));
1345   unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1346   unsigned Idx = getOrCreateVReg(*U.getOperand(2));
1347   MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
1348   return true;
1349 }
1350 
1351 bool IRTranslator::translateExtractElement(const User &U,
1352                                            MachineIRBuilder &MIRBuilder) {
1353   // If it is a <1 x Ty> vector, use the scalar as it is
1354   // not a legal vector type in LLT.
1355   if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
1356     unsigned Elt = getOrCreateVReg(*U.getOperand(0));
1357     auto &Regs = *VMap.getVRegs(U);
1358     if (Regs.empty()) {
1359       Regs.push_back(Elt);
1360       VMap.getOffsets(U)->push_back(0);
1361     } else {
1362       MIRBuilder.buildCopy(Regs[0], Elt);
1363     }
1364     return true;
1365   }
1366   unsigned Res = getOrCreateVReg(U);
1367   unsigned Val = getOrCreateVReg(*U.getOperand(0));
1368   const auto &TLI = *MF->getSubtarget().getTargetLowering();
1369   unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
1370   unsigned Idx = 0;
1371   if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
1372     if (CI->getBitWidth() != PreferredVecIdxWidth) {
1373       APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth);
1374       auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
1375       Idx = getOrCreateVReg(*NewIdxCI);
1376     }
1377   }
1378   if (!Idx)
1379     Idx = getOrCreateVReg(*U.getOperand(1));
1380   if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
1381     const LLT &VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
1382     Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx)->getOperand(0).getReg();
1383   }
1384   MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
1385   return true;
1386 }
1387 
1388 bool IRTranslator::translateShuffleVector(const User &U,
1389                                           MachineIRBuilder &MIRBuilder) {
1390   MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR)
1391       .addDef(getOrCreateVReg(U))
1392       .addUse(getOrCreateVReg(*U.getOperand(0)))
1393       .addUse(getOrCreateVReg(*U.getOperand(1)))
1394       .addUse(getOrCreateVReg(*U.getOperand(2)));
1395   return true;
1396 }
1397 
1398 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
1399   const PHINode &PI = cast<PHINode>(U);
1400 
1401   SmallVector<MachineInstr *, 4> Insts;
1402   for (auto Reg : getOrCreateVRegs(PI)) {
1403     auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, Reg);
1404     Insts.push_back(MIB.getInstr());
1405   }
1406 
1407   PendingPHIs.emplace_back(&PI, std::move(Insts));
1408   return true;
1409 }
1410 
1411 bool IRTranslator::translateAtomicCmpXchg(const User &U,
1412                                           MachineIRBuilder &MIRBuilder) {
1413   const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
1414 
1415   if (I.isWeak())
1416     return false;
1417 
1418   auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1419                               : MachineMemOperand::MONone;
1420   Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1421 
1422   Type *ResType = I.getType();
1423   Type *ValType = ResType->Type::getStructElementType(0);
1424 
1425   auto Res = getOrCreateVRegs(I);
1426   unsigned OldValRes = Res[0];
1427   unsigned SuccessRes = Res[1];
1428   unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
1429   unsigned Cmp = getOrCreateVReg(*I.getCompareOperand());
1430   unsigned NewVal = getOrCreateVReg(*I.getNewValOperand());
1431 
1432   MIRBuilder.buildAtomicCmpXchgWithSuccess(
1433       OldValRes, SuccessRes, Addr, Cmp, NewVal,
1434       *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1435                                 Flags, DL->getTypeStoreSize(ValType),
1436                                 getMemOpAlignment(I), AAMDNodes(), nullptr,
1437                                 I.getSyncScopeID(), I.getSuccessOrdering(),
1438                                 I.getFailureOrdering()));
1439   return true;
1440 }
1441 
1442 bool IRTranslator::translateAtomicRMW(const User &U,
1443                                       MachineIRBuilder &MIRBuilder) {
1444   const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
1445 
1446   auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1447                               : MachineMemOperand::MONone;
1448   Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1449 
1450   Type *ResType = I.getType();
1451 
1452   unsigned Res = getOrCreateVReg(I);
1453   unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
1454   unsigned Val = getOrCreateVReg(*I.getValOperand());
1455 
1456   unsigned Opcode = 0;
1457   switch (I.getOperation()) {
1458   default:
1459     llvm_unreachable("Unknown atomicrmw op");
1460     return false;
1461   case AtomicRMWInst::Xchg:
1462     Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
1463     break;
1464   case AtomicRMWInst::Add:
1465     Opcode = TargetOpcode::G_ATOMICRMW_ADD;
1466     break;
1467   case AtomicRMWInst::Sub:
1468     Opcode = TargetOpcode::G_ATOMICRMW_SUB;
1469     break;
1470   case AtomicRMWInst::And:
1471     Opcode = TargetOpcode::G_ATOMICRMW_AND;
1472     break;
1473   case AtomicRMWInst::Nand:
1474     Opcode = TargetOpcode::G_ATOMICRMW_NAND;
1475     break;
1476   case AtomicRMWInst::Or:
1477     Opcode = TargetOpcode::G_ATOMICRMW_OR;
1478     break;
1479   case AtomicRMWInst::Xor:
1480     Opcode = TargetOpcode::G_ATOMICRMW_XOR;
1481     break;
1482   case AtomicRMWInst::Max:
1483     Opcode = TargetOpcode::G_ATOMICRMW_MAX;
1484     break;
1485   case AtomicRMWInst::Min:
1486     Opcode = TargetOpcode::G_ATOMICRMW_MIN;
1487     break;
1488   case AtomicRMWInst::UMax:
1489     Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
1490     break;
1491   case AtomicRMWInst::UMin:
1492     Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
1493     break;
1494   }
1495 
1496   MIRBuilder.buildAtomicRMW(
1497       Opcode, Res, Addr, Val,
1498       *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1499                                 Flags, DL->getTypeStoreSize(ResType),
1500                                 getMemOpAlignment(I), AAMDNodes(), nullptr,
1501                                 I.getSyncScopeID(), I.getOrdering()));
1502   return true;
1503 }
1504 
1505 void IRTranslator::finishPendingPhis() {
1506 #ifndef NDEBUG
1507   DILocationVerifier Verifier(*MF);
1508 #endif // ifndef NDEBUG
1509   for (auto &Phi : PendingPHIs) {
1510     const PHINode *PI = Phi.first;
1511     ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
1512     EntryBuilder.setDebugLoc(PI->getDebugLoc());
1513 #ifndef NDEBUG
1514     Verifier.setCurrentInst(PI);
1515 #endif // ifndef NDEBUG
1516 
1517     // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
1518     // won't create extra control flow here, otherwise we need to find the
1519     // dominating predecessor here (or perhaps force the weirder IRTranslators
1520     // to provide a simple boundary).
1521     SmallSet<const BasicBlock *, 4> HandledPreds;
1522 
1523     for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
1524       auto IRPred = PI->getIncomingBlock(i);
1525       if (HandledPreds.count(IRPred))
1526         continue;
1527 
1528       HandledPreds.insert(IRPred);
1529       ArrayRef<unsigned> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
1530       for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
1531         assert(Pred->isSuccessor(ComponentPHIs[0]->getParent()) &&
1532                "incorrect CFG at MachineBasicBlock level");
1533         for (unsigned j = 0; j < ValRegs.size(); ++j) {
1534           MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
1535           MIB.addUse(ValRegs[j]);
1536           MIB.addMBB(Pred);
1537         }
1538       }
1539     }
1540   }
1541 }
1542 
1543 bool IRTranslator::valueIsSplit(const Value &V,
1544                                 SmallVectorImpl<uint64_t> *Offsets) {
1545   SmallVector<LLT, 4> SplitTys;
1546   if (Offsets && !Offsets->empty())
1547     Offsets->clear();
1548   computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
1549   return SplitTys.size() > 1;
1550 }
1551 
1552 bool IRTranslator::translate(const Instruction &Inst) {
1553   CurBuilder.setDebugLoc(Inst.getDebugLoc());
1554   EntryBuilder.setDebugLoc(Inst.getDebugLoc());
1555   switch(Inst.getOpcode()) {
1556 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1557     case Instruction::OPCODE: return translate##OPCODE(Inst, CurBuilder);
1558 #include "llvm/IR/Instruction.def"
1559   default:
1560     return false;
1561   }
1562 }
1563 
1564 bool IRTranslator::translate(const Constant &C, unsigned Reg) {
1565   if (auto CI = dyn_cast<ConstantInt>(&C))
1566     EntryBuilder.buildConstant(Reg, *CI);
1567   else if (auto CF = dyn_cast<ConstantFP>(&C))
1568     EntryBuilder.buildFConstant(Reg, *CF);
1569   else if (isa<UndefValue>(C))
1570     EntryBuilder.buildUndef(Reg);
1571   else if (isa<ConstantPointerNull>(C)) {
1572     // As we are trying to build a constant val of 0 into a pointer,
1573     // insert a cast to make them correct with respect to types.
1574     unsigned NullSize = DL->getTypeSizeInBits(C.getType());
1575     auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize);
1576     auto *ZeroVal = ConstantInt::get(ZeroTy, 0);
1577     unsigned ZeroReg = getOrCreateVReg(*ZeroVal);
1578     EntryBuilder.buildCast(Reg, ZeroReg);
1579   } else if (auto GV = dyn_cast<GlobalValue>(&C))
1580     EntryBuilder.buildGlobalValue(Reg, GV);
1581   else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
1582     if (!CAZ->getType()->isVectorTy())
1583       return false;
1584     // Return the scalar if it is a <1 x Ty> vector.
1585     if (CAZ->getNumElements() == 1)
1586       return translate(*CAZ->getElementValue(0u), Reg);
1587     std::vector<unsigned> Ops;
1588     for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
1589       Constant &Elt = *CAZ->getElementValue(i);
1590       Ops.push_back(getOrCreateVReg(Elt));
1591     }
1592     EntryBuilder.buildMerge(Reg, Ops);
1593   } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
1594     // Return the scalar if it is a <1 x Ty> vector.
1595     if (CV->getNumElements() == 1)
1596       return translate(*CV->getElementAsConstant(0), Reg);
1597     std::vector<unsigned> Ops;
1598     for (unsigned i = 0; i < CV->getNumElements(); ++i) {
1599       Constant &Elt = *CV->getElementAsConstant(i);
1600       Ops.push_back(getOrCreateVReg(Elt));
1601     }
1602     EntryBuilder.buildMerge(Reg, Ops);
1603   } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
1604     switch(CE->getOpcode()) {
1605 #define HANDLE_INST(NUM, OPCODE, CLASS)                         \
1606       case Instruction::OPCODE: return translate##OPCODE(*CE, EntryBuilder);
1607 #include "llvm/IR/Instruction.def"
1608     default:
1609       return false;
1610     }
1611   } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
1612     if (CV->getNumOperands() == 1)
1613       return translate(*CV->getOperand(0), Reg);
1614     SmallVector<unsigned, 4> Ops;
1615     for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
1616       Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
1617     }
1618     EntryBuilder.buildMerge(Reg, Ops);
1619   } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
1620     EntryBuilder.buildBlockAddress(Reg, BA);
1621   } else
1622     return false;
1623 
1624   return true;
1625 }
1626 
1627 void IRTranslator::finalizeFunction() {
1628   // Release the memory used by the different maps we
1629   // needed during the translation.
1630   PendingPHIs.clear();
1631   VMap.reset();
1632   FrameIndices.clear();
1633   MachinePreds.clear();
1634   // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
1635   // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
1636   // destroying it twice (in ~IRTranslator() and ~LLVMContext())
1637   EntryBuilder = MachineIRBuilder();
1638   CurBuilder = MachineIRBuilder();
1639 }
1640 
1641 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
1642   MF = &CurMF;
1643   const Function &F = MF->getFunction();
1644   if (F.empty())
1645     return false;
1646   CLI = MF->getSubtarget().getCallLowering();
1647   CurBuilder.setMF(*MF);
1648   EntryBuilder.setMF(*MF);
1649   MRI = &MF->getRegInfo();
1650   DL = &F.getParent()->getDataLayout();
1651   TPC = &getAnalysis<TargetPassConfig>();
1652   ORE = llvm::make_unique<OptimizationRemarkEmitter>(&F);
1653 
1654   assert(PendingPHIs.empty() && "stale PHIs");
1655 
1656   if (!DL->isLittleEndian()) {
1657     // Currently we don't properly handle big endian code.
1658     OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1659                                F.getSubprogram(), &F.getEntryBlock());
1660     R << "unable to translate in big endian mode";
1661     reportTranslationError(*MF, *TPC, *ORE, R);
1662   }
1663 
1664   // Release the per-function state when we return, whether we succeeded or not.
1665   auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
1666 
1667   // Setup a separate basic-block for the arguments and constants
1668   MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
1669   MF->push_back(EntryBB);
1670   EntryBuilder.setMBB(*EntryBB);
1671 
1672   // Create all blocks, in IR order, to preserve the layout.
1673   for (const BasicBlock &BB: F) {
1674     auto *&MBB = BBToMBB[&BB];
1675 
1676     MBB = MF->CreateMachineBasicBlock(&BB);
1677     MF->push_back(MBB);
1678 
1679     if (BB.hasAddressTaken())
1680       MBB->setHasAddressTaken();
1681   }
1682 
1683   // Make our arguments/constants entry block fallthrough to the IR entry block.
1684   EntryBB->addSuccessor(&getMBB(F.front()));
1685 
1686   // Lower the actual args into this basic block.
1687   SmallVector<unsigned, 8> VRegArgs;
1688   for (const Argument &Arg: F.args()) {
1689     if (DL->getTypeStoreSize(Arg.getType()) == 0)
1690       continue; // Don't handle zero sized types.
1691     VRegArgs.push_back(
1692         MRI->createGenericVirtualRegister(getLLTForType(*Arg.getType(), *DL)));
1693   }
1694 
1695   // We don't currently support translating swifterror or swiftself functions.
1696   for (auto &Arg : F.args()) {
1697     if (Arg.hasSwiftErrorAttr() || Arg.hasSwiftSelfAttr()) {
1698       OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1699                                  F.getSubprogram(), &F.getEntryBlock());
1700       R << "unable to lower arguments due to swifterror/swiftself: "
1701         << ore::NV("Prototype", F.getType());
1702       reportTranslationError(*MF, *TPC, *ORE, R);
1703       return false;
1704     }
1705   }
1706 
1707   if (!CLI->lowerFormalArguments(EntryBuilder, F, VRegArgs)) {
1708     OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1709                                F.getSubprogram(), &F.getEntryBlock());
1710     R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
1711     reportTranslationError(*MF, *TPC, *ORE, R);
1712     return false;
1713   }
1714 
1715   auto ArgIt = F.arg_begin();
1716   for (auto &VArg : VRegArgs) {
1717     // If the argument is an unsplit scalar then don't use unpackRegs to avoid
1718     // creating redundant copies.
1719     if (!valueIsSplit(*ArgIt, VMap.getOffsets(*ArgIt))) {
1720       auto &VRegs = *VMap.getVRegs(cast<Value>(*ArgIt));
1721       assert(VRegs.empty() && "VRegs already populated?");
1722       VRegs.push_back(VArg);
1723     } else {
1724       unpackRegs(*ArgIt, VArg, EntryBuilder);
1725     }
1726     ArgIt++;
1727   }
1728 
1729   // Need to visit defs before uses when translating instructions.
1730   {
1731     ReversePostOrderTraversal<const Function *> RPOT(&F);
1732 #ifndef NDEBUG
1733     DILocationVerifier Verifier(*MF);
1734 #endif // ifndef NDEBUG
1735     for (const BasicBlock *BB : RPOT) {
1736       MachineBasicBlock &MBB = getMBB(*BB);
1737       // Set the insertion point of all the following translations to
1738       // the end of this basic block.
1739       CurBuilder.setMBB(MBB);
1740 
1741       for (const Instruction &Inst : *BB) {
1742 #ifndef NDEBUG
1743         Verifier.setCurrentInst(&Inst);
1744 #endif // ifndef NDEBUG
1745         if (translate(Inst))
1746           continue;
1747 
1748         OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1749                                    Inst.getDebugLoc(), BB);
1750         R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
1751 
1752         if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
1753           std::string InstStrStorage;
1754           raw_string_ostream InstStr(InstStrStorage);
1755           InstStr << Inst;
1756 
1757           R << ": '" << InstStr.str() << "'";
1758         }
1759 
1760         reportTranslationError(*MF, *TPC, *ORE, R);
1761         return false;
1762       }
1763     }
1764   }
1765 
1766   finishPendingPhis();
1767 
1768   // Merge the argument lowering and constants block with its single
1769   // successor, the LLVM-IR entry block.  We want the basic block to
1770   // be maximal.
1771   assert(EntryBB->succ_size() == 1 &&
1772          "Custom BB used for lowering should have only one successor");
1773   // Get the successor of the current entry block.
1774   MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
1775   assert(NewEntryBB.pred_size() == 1 &&
1776          "LLVM-IR entry block has a predecessor!?");
1777   // Move all the instruction from the current entry block to the
1778   // new entry block.
1779   NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
1780                     EntryBB->end());
1781 
1782   // Update the live-in information for the new entry block.
1783   for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
1784     NewEntryBB.addLiveIn(LiveIn);
1785   NewEntryBB.sortUniqueLiveIns();
1786 
1787   // Get rid of the now empty basic block.
1788   EntryBB->removeSuccessor(&NewEntryBB);
1789   MF->remove(EntryBB);
1790   MF->DeleteMachineBasicBlock(EntryBB);
1791 
1792   assert(&MF->front() == &NewEntryBB &&
1793          "New entry wasn't next in the list of basic block!");
1794 
1795   // Initialize stack protector information.
1796   StackProtector &SP = getAnalysis<StackProtector>();
1797   SP.copyToMachineFrameInfo(MF->getFrameInfo());
1798 
1799   return false;
1800 }
1801