1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the IRTranslator class.
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
14 #include "llvm/ADT/PostOrderIterator.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/ScopeExit.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
20 #include "llvm/CodeGen/Analysis.h"
21 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
22 #include "llvm/CodeGen/LowLevelType.h"
23 #include "llvm/CodeGen/MachineBasicBlock.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineMemOperand.h"
28 #include "llvm/CodeGen/MachineOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/StackProtector.h"
31 #include "llvm/CodeGen/TargetFrameLowering.h"
32 #include "llvm/CodeGen/TargetLowering.h"
33 #include "llvm/CodeGen/TargetPassConfig.h"
34 #include "llvm/CodeGen/TargetRegisterInfo.h"
35 #include "llvm/CodeGen/TargetSubtargetInfo.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/CFG.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/Constants.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DebugInfo.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/Function.h"
44 #include "llvm/IR/GetElementPtrTypeIterator.h"
45 #include "llvm/IR/InlineAsm.h"
46 #include "llvm/IR/InstrTypes.h"
47 #include "llvm/IR/Instructions.h"
48 #include "llvm/IR/IntrinsicInst.h"
49 #include "llvm/IR/Intrinsics.h"
50 #include "llvm/IR/LLVMContext.h"
51 #include "llvm/IR/Metadata.h"
52 #include "llvm/IR/Type.h"
53 #include "llvm/IR/User.h"
54 #include "llvm/IR/Value.h"
55 #include "llvm/MC/MCContext.h"
56 #include "llvm/Pass.h"
57 #include "llvm/Support/Casting.h"
58 #include "llvm/Support/CodeGen.h"
59 #include "llvm/Support/Debug.h"
60 #include "llvm/Support/ErrorHandling.h"
61 #include "llvm/Support/LowLevelTypeImpl.h"
62 #include "llvm/Support/MathExtras.h"
63 #include "llvm/Support/raw_ostream.h"
64 #include "llvm/Target/TargetIntrinsicInfo.h"
65 #include "llvm/Target/TargetMachine.h"
66 #include <algorithm>
67 #include <cassert>
68 #include <cstdint>
69 #include <iterator>
70 #include <string>
71 #include <utility>
72 #include <vector>
73 
74 #define DEBUG_TYPE "irtranslator"
75 
76 using namespace llvm;
77 
78 char IRTranslator::ID = 0;
79 
80 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
81                 false, false)
82 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
83 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
84                 false, false)
85 
86 static void reportTranslationError(MachineFunction &MF,
87                                    const TargetPassConfig &TPC,
88                                    OptimizationRemarkEmitter &ORE,
89                                    OptimizationRemarkMissed &R) {
90   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
91 
92   // Print the function name explicitly if we don't have a debug location (which
93   // makes the diagnostic less useful) or if we're going to emit a raw error.
94   if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
95     R << (" (in function: " + MF.getName() + ")").str();
96 
97   if (TPC.isGlobalISelAbortEnabled())
98     report_fatal_error(R.getMsg());
99   else
100     ORE.emit(R);
101 }
102 
103 IRTranslator::IRTranslator() : MachineFunctionPass(ID) {
104   initializeIRTranslatorPass(*PassRegistry::getPassRegistry());
105 }
106 
107 #ifndef NDEBUG
108 /// Verify that every instruction created has the same DILocation as the
109 /// instruction being translated.
110 class DILocationVerifier : MachineFunction::Delegate {
111   MachineFunction &MF;
112   const Instruction *CurrInst = nullptr;
113 
114 public:
115   DILocationVerifier(MachineFunction &MF) : MF(MF) { MF.setDelegate(this); }
116   ~DILocationVerifier() { MF.resetDelegate(this); }
117 
118   const Instruction *getCurrentInst() const { return CurrInst; }
119   void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
120 
121   void MF_HandleInsertion(const MachineInstr &MI) override {
122     assert(getCurrentInst() && "Inserted instruction without a current MI");
123 
124     // Only print the check message if we're actually checking it.
125 #ifndef NDEBUG
126     LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
127                       << " was copied to " << MI);
128 #endif
129     assert(CurrInst->getDebugLoc() == MI.getDebugLoc() &&
130            "Line info was not transferred to all instructions");
131   }
132   void MF_HandleRemoval(const MachineInstr &MI) override {}
133 };
134 #endif // ifndef NDEBUG
135 
136 
137 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
138   AU.addRequired<StackProtector>();
139   AU.addRequired<TargetPassConfig>();
140   getSelectionDAGFallbackAnalysisUsage(AU);
141   MachineFunctionPass::getAnalysisUsage(AU);
142 }
143 
144 static void computeValueLLTs(const DataLayout &DL, Type &Ty,
145                              SmallVectorImpl<LLT> &ValueTys,
146                              SmallVectorImpl<uint64_t> *Offsets = nullptr,
147                              uint64_t StartingOffset = 0) {
148   // Given a struct type, recursively traverse the elements.
149   if (StructType *STy = dyn_cast<StructType>(&Ty)) {
150     const StructLayout *SL = DL.getStructLayout(STy);
151     for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I)
152       computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets,
153                        StartingOffset + SL->getElementOffset(I));
154     return;
155   }
156   // Given an array type, recursively traverse the elements.
157   if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
158     Type *EltTy = ATy->getElementType();
159     uint64_t EltSize = DL.getTypeAllocSize(EltTy);
160     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
161       computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
162                        StartingOffset + i * EltSize);
163     return;
164   }
165   // Interpret void as zero return values.
166   if (Ty.isVoidTy())
167     return;
168   // Base case: we can get an LLT for this LLVM IR type.
169   ValueTys.push_back(getLLTForType(Ty, DL));
170   if (Offsets != nullptr)
171     Offsets->push_back(StartingOffset * 8);
172 }
173 
174 IRTranslator::ValueToVRegInfo::VRegListT &
175 IRTranslator::allocateVRegs(const Value &Val) {
176   assert(!VMap.contains(Val) && "Value already allocated in VMap");
177   auto *Regs = VMap.getVRegs(Val);
178   auto *Offsets = VMap.getOffsets(Val);
179   SmallVector<LLT, 4> SplitTys;
180   computeValueLLTs(*DL, *Val.getType(), SplitTys,
181                    Offsets->empty() ? Offsets : nullptr);
182   for (unsigned i = 0; i < SplitTys.size(); ++i)
183     Regs->push_back(0);
184   return *Regs;
185 }
186 
187 ArrayRef<unsigned> IRTranslator::getOrCreateVRegs(const Value &Val) {
188   auto VRegsIt = VMap.findVRegs(Val);
189   if (VRegsIt != VMap.vregs_end())
190     return *VRegsIt->second;
191 
192   if (Val.getType()->isVoidTy())
193     return *VMap.getVRegs(Val);
194 
195   // Create entry for this type.
196   auto *VRegs = VMap.getVRegs(Val);
197   auto *Offsets = VMap.getOffsets(Val);
198 
199   assert(Val.getType()->isSized() &&
200          "Don't know how to create an empty vreg");
201 
202   SmallVector<LLT, 4> SplitTys;
203   computeValueLLTs(*DL, *Val.getType(), SplitTys,
204                    Offsets->empty() ? Offsets : nullptr);
205 
206   if (!isa<Constant>(Val)) {
207     for (auto Ty : SplitTys)
208       VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
209     return *VRegs;
210   }
211 
212   if (Val.getType()->isAggregateType()) {
213     // UndefValue, ConstantAggregateZero
214     auto &C = cast<Constant>(Val);
215     unsigned Idx = 0;
216     while (auto Elt = C.getAggregateElement(Idx++)) {
217       auto EltRegs = getOrCreateVRegs(*Elt);
218       llvm::copy(EltRegs, std::back_inserter(*VRegs));
219     }
220   } else {
221     assert(SplitTys.size() == 1 && "unexpectedly split LLT");
222     VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
223     bool Success = translate(cast<Constant>(Val), VRegs->front());
224     if (!Success) {
225       OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
226                                  MF->getFunction().getSubprogram(),
227                                  &MF->getFunction().getEntryBlock());
228       R << "unable to translate constant: " << ore::NV("Type", Val.getType());
229       reportTranslationError(*MF, *TPC, *ORE, R);
230       return *VRegs;
231     }
232   }
233 
234   return *VRegs;
235 }
236 
237 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
238   if (FrameIndices.find(&AI) != FrameIndices.end())
239     return FrameIndices[&AI];
240 
241   unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
242   unsigned Size =
243       ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
244 
245   // Always allocate at least one byte.
246   Size = std::max(Size, 1u);
247 
248   unsigned Alignment = AI.getAlignment();
249   if (!Alignment)
250     Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
251 
252   int &FI = FrameIndices[&AI];
253   FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
254   return FI;
255 }
256 
257 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
258   unsigned Alignment = 0;
259   Type *ValTy = nullptr;
260   if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
261     Alignment = SI->getAlignment();
262     ValTy = SI->getValueOperand()->getType();
263   } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
264     Alignment = LI->getAlignment();
265     ValTy = LI->getType();
266   } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
267     // TODO(PR27168): This instruction has no alignment attribute, but unlike
268     // the default alignment for load/store, the default here is to assume
269     // it has NATURAL alignment, not DataLayout-specified alignment.
270     const DataLayout &DL = AI->getModule()->getDataLayout();
271     Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
272     ValTy = AI->getCompareOperand()->getType();
273   } else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) {
274     // TODO(PR27168): This instruction has no alignment attribute, but unlike
275     // the default alignment for load/store, the default here is to assume
276     // it has NATURAL alignment, not DataLayout-specified alignment.
277     const DataLayout &DL = AI->getModule()->getDataLayout();
278     Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType());
279     ValTy = AI->getType();
280   } else {
281     OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
282     R << "unable to translate memop: " << ore::NV("Opcode", &I);
283     reportTranslationError(*MF, *TPC, *ORE, R);
284     return 1;
285   }
286 
287   return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
288 }
289 
290 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
291   MachineBasicBlock *&MBB = BBToMBB[&BB];
292   assert(MBB && "BasicBlock was not encountered before");
293   return *MBB;
294 }
295 
296 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
297   assert(NewPred && "new predecessor must be a real MachineBasicBlock");
298   MachinePreds[Edge].push_back(NewPred);
299 }
300 
301 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
302                                      MachineIRBuilder &MIRBuilder) {
303   // FIXME: handle signed/unsigned wrapping flags.
304 
305   // Get or create a virtual register for each value.
306   // Unless the value is a Constant => loadimm cst?
307   // or inline constant each time?
308   // Creation of a virtual register needs to have a size.
309   unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
310   unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
311   unsigned Res = getOrCreateVReg(U);
312   auto FBinOp = MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1);
313   if (isa<Instruction>(U)) {
314     MachineInstr *FBinOpMI = FBinOp.getInstr();
315     const Instruction &I = cast<Instruction>(U);
316     FBinOpMI->copyIRFlags(I);
317   }
318   return true;
319 }
320 
321 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
322   // -0.0 - X --> G_FNEG
323   if (isa<Constant>(U.getOperand(0)) &&
324       U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) {
325     MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
326         .addDef(getOrCreateVReg(U))
327         .addUse(getOrCreateVReg(*U.getOperand(1)));
328     return true;
329   }
330   return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
331 }
332 
333 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
334   MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
335       .addDef(getOrCreateVReg(U))
336       .addUse(getOrCreateVReg(*U.getOperand(1)));
337   return true;
338 }
339 
340 bool IRTranslator::translateCompare(const User &U,
341                                     MachineIRBuilder &MIRBuilder) {
342   const CmpInst *CI = dyn_cast<CmpInst>(&U);
343   unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
344   unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
345   unsigned Res = getOrCreateVReg(U);
346   CmpInst::Predicate Pred =
347       CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
348                                     cast<ConstantExpr>(U).getPredicate());
349   if (CmpInst::isIntPredicate(Pred))
350     MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
351   else if (Pred == CmpInst::FCMP_FALSE)
352     MIRBuilder.buildCopy(
353         Res, getOrCreateVReg(*Constant::getNullValue(CI->getType())));
354   else if (Pred == CmpInst::FCMP_TRUE)
355     MIRBuilder.buildCopy(
356         Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType())));
357   else {
358     auto FCmp = MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
359     FCmp->copyIRFlags(*CI);
360   }
361 
362   return true;
363 }
364 
365 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
366   const ReturnInst &RI = cast<ReturnInst>(U);
367   const Value *Ret = RI.getReturnValue();
368   if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
369     Ret = nullptr;
370 
371   ArrayRef<unsigned> VRegs;
372   if (Ret)
373     VRegs = getOrCreateVRegs(*Ret);
374 
375   // The target may mess up with the insertion point, but
376   // this is not important as a return is the last instruction
377   // of the block anyway.
378 
379   return CLI->lowerReturn(MIRBuilder, Ret, VRegs);
380 }
381 
382 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
383   const BranchInst &BrInst = cast<BranchInst>(U);
384   unsigned Succ = 0;
385   if (!BrInst.isUnconditional()) {
386     // We want a G_BRCOND to the true BB followed by an unconditional branch.
387     unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
388     const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
389     MachineBasicBlock &TrueBB = getMBB(TrueTgt);
390     MIRBuilder.buildBrCond(Tst, TrueBB);
391   }
392 
393   const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
394   MachineBasicBlock &TgtBB = getMBB(BrTgt);
395   MachineBasicBlock &CurBB = MIRBuilder.getMBB();
396 
397   // If the unconditional target is the layout successor, fallthrough.
398   if (!CurBB.isLayoutSuccessor(&TgtBB))
399     MIRBuilder.buildBr(TgtBB);
400 
401   // Link successors.
402   for (const BasicBlock *Succ : successors(&BrInst))
403     CurBB.addSuccessor(&getMBB(*Succ));
404   return true;
405 }
406 
407 bool IRTranslator::translateSwitch(const User &U,
408                                    MachineIRBuilder &MIRBuilder) {
409   // For now, just translate as a chain of conditional branches.
410   // FIXME: could we share most of the logic/code in
411   // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel?
412   // At first sight, it seems most of the logic in there is independent of
413   // SelectionDAG-specifics and a lot of work went in to optimize switch
414   // lowering in there.
415 
416   const SwitchInst &SwInst = cast<SwitchInst>(U);
417   const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition());
418   const BasicBlock *OrigBB = SwInst.getParent();
419 
420   LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL);
421   for (auto &CaseIt : SwInst.cases()) {
422     const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue());
423     const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1);
424     MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue);
425     MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
426     const BasicBlock *TrueBB = CaseIt.getCaseSuccessor();
427     MachineBasicBlock &TrueMBB = getMBB(*TrueBB);
428 
429     MIRBuilder.buildBrCond(Tst, TrueMBB);
430     CurMBB.addSuccessor(&TrueMBB);
431     addMachineCFGPred({OrigBB, TrueBB}, &CurMBB);
432 
433     MachineBasicBlock *FalseMBB =
434         MF->CreateMachineBasicBlock(SwInst.getParent());
435     // Insert the comparison blocks one after the other.
436     MF->insert(std::next(CurMBB.getIterator()), FalseMBB);
437     MIRBuilder.buildBr(*FalseMBB);
438     CurMBB.addSuccessor(FalseMBB);
439 
440     MIRBuilder.setMBB(*FalseMBB);
441   }
442   // handle default case
443   const BasicBlock *DefaultBB = SwInst.getDefaultDest();
444   MachineBasicBlock &DefaultMBB = getMBB(*DefaultBB);
445   MIRBuilder.buildBr(DefaultMBB);
446   MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
447   CurMBB.addSuccessor(&DefaultMBB);
448   addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB);
449 
450   return true;
451 }
452 
453 bool IRTranslator::translateIndirectBr(const User &U,
454                                        MachineIRBuilder &MIRBuilder) {
455   const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
456 
457   const unsigned Tgt = getOrCreateVReg(*BrInst.getAddress());
458   MIRBuilder.buildBrIndirect(Tgt);
459 
460   // Link successors.
461   MachineBasicBlock &CurBB = MIRBuilder.getMBB();
462   for (const BasicBlock *Succ : successors(&BrInst))
463     CurBB.addSuccessor(&getMBB(*Succ));
464 
465   return true;
466 }
467 
468 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
469   const LoadInst &LI = cast<LoadInst>(U);
470 
471   auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
472                                : MachineMemOperand::MONone;
473   Flags |= MachineMemOperand::MOLoad;
474 
475   if (DL->getTypeStoreSize(LI.getType()) == 0)
476     return true;
477 
478   ArrayRef<unsigned> Regs = getOrCreateVRegs(LI);
479   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
480   unsigned Base = getOrCreateVReg(*LI.getPointerOperand());
481 
482   for (unsigned i = 0; i < Regs.size(); ++i) {
483     unsigned Addr = 0;
484     MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8);
485 
486     MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
487     unsigned BaseAlign = getMemOpAlignment(LI);
488     auto MMO = MF->getMachineMemOperand(
489         Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8,
490         MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
491         LI.getSyncScopeID(), LI.getOrdering());
492     MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
493   }
494 
495   return true;
496 }
497 
498 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
499   const StoreInst &SI = cast<StoreInst>(U);
500   auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
501                                : MachineMemOperand::MONone;
502   Flags |= MachineMemOperand::MOStore;
503 
504   if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
505     return true;
506 
507   ArrayRef<unsigned> Vals = getOrCreateVRegs(*SI.getValueOperand());
508   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
509   unsigned Base = getOrCreateVReg(*SI.getPointerOperand());
510 
511   for (unsigned i = 0; i < Vals.size(); ++i) {
512     unsigned Addr = 0;
513     MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8);
514 
515     MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
516     unsigned BaseAlign = getMemOpAlignment(SI);
517     auto MMO = MF->getMachineMemOperand(
518         Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8,
519         MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
520         SI.getSyncScopeID(), SI.getOrdering());
521     MIRBuilder.buildStore(Vals[i], Addr, *MMO);
522   }
523   return true;
524 }
525 
526 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
527   const Value *Src = U.getOperand(0);
528   Type *Int32Ty = Type::getInt32Ty(U.getContext());
529 
530   // getIndexedOffsetInType is designed for GEPs, so the first index is the
531   // usual array element rather than looking into the actual aggregate.
532   SmallVector<Value *, 1> Indices;
533   Indices.push_back(ConstantInt::get(Int32Ty, 0));
534 
535   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
536     for (auto Idx : EVI->indices())
537       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
538   } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
539     for (auto Idx : IVI->indices())
540       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
541   } else {
542     for (unsigned i = 1; i < U.getNumOperands(); ++i)
543       Indices.push_back(U.getOperand(i));
544   }
545 
546   return 8 * static_cast<uint64_t>(
547                  DL.getIndexedOffsetInType(Src->getType(), Indices));
548 }
549 
550 bool IRTranslator::translateExtractValue(const User &U,
551                                          MachineIRBuilder &MIRBuilder) {
552   const Value *Src = U.getOperand(0);
553   uint64_t Offset = getOffsetFromIndices(U, *DL);
554   ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
555   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
556   unsigned Idx = std::lower_bound(Offsets.begin(), Offsets.end(), Offset) -
557                  Offsets.begin();
558   auto &DstRegs = allocateVRegs(U);
559 
560   for (unsigned i = 0; i < DstRegs.size(); ++i)
561     DstRegs[i] = SrcRegs[Idx++];
562 
563   return true;
564 }
565 
566 bool IRTranslator::translateInsertValue(const User &U,
567                                         MachineIRBuilder &MIRBuilder) {
568   const Value *Src = U.getOperand(0);
569   uint64_t Offset = getOffsetFromIndices(U, *DL);
570   auto &DstRegs = allocateVRegs(U);
571   ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
572   ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
573   ArrayRef<unsigned> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
574   auto InsertedIt = InsertedRegs.begin();
575 
576   for (unsigned i = 0; i < DstRegs.size(); ++i) {
577     if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
578       DstRegs[i] = *InsertedIt++;
579     else
580       DstRegs[i] = SrcRegs[i];
581   }
582 
583   return true;
584 }
585 
586 bool IRTranslator::translateSelect(const User &U,
587                                    MachineIRBuilder &MIRBuilder) {
588   unsigned Tst = getOrCreateVReg(*U.getOperand(0));
589   ArrayRef<unsigned> ResRegs = getOrCreateVRegs(U);
590   ArrayRef<unsigned> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
591   ArrayRef<unsigned> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
592 
593   const SelectInst &SI = cast<SelectInst>(U);
594   const CmpInst *Cmp = dyn_cast<CmpInst>(SI.getCondition());
595   for (unsigned i = 0; i < ResRegs.size(); ++i) {
596     auto Select =
597         MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i]);
598     if (Cmp && isa<FPMathOperator>(Cmp)) {
599       Select->copyIRFlags(*Cmp);
600     }
601   }
602 
603   return true;
604 }
605 
606 bool IRTranslator::translateBitCast(const User &U,
607                                     MachineIRBuilder &MIRBuilder) {
608   // If we're bitcasting to the source type, we can reuse the source vreg.
609   if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
610       getLLTForType(*U.getType(), *DL)) {
611     unsigned SrcReg = getOrCreateVReg(*U.getOperand(0));
612     auto &Regs = *VMap.getVRegs(U);
613     // If we already assigned a vreg for this bitcast, we can't change that.
614     // Emit a copy to satisfy the users we already emitted.
615     if (!Regs.empty())
616       MIRBuilder.buildCopy(Regs[0], SrcReg);
617     else {
618       Regs.push_back(SrcReg);
619       VMap.getOffsets(U)->push_back(0);
620     }
621     return true;
622   }
623   return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
624 }
625 
626 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
627                                  MachineIRBuilder &MIRBuilder) {
628   unsigned Op = getOrCreateVReg(*U.getOperand(0));
629   unsigned Res = getOrCreateVReg(U);
630   MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op);
631   return true;
632 }
633 
634 bool IRTranslator::translateGetElementPtr(const User &U,
635                                           MachineIRBuilder &MIRBuilder) {
636   // FIXME: support vector GEPs.
637   if (U.getType()->isVectorTy())
638     return false;
639 
640   Value &Op0 = *U.getOperand(0);
641   unsigned BaseReg = getOrCreateVReg(Op0);
642   Type *PtrIRTy = Op0.getType();
643   LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
644   Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
645   LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
646 
647   int64_t Offset = 0;
648   for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
649        GTI != E; ++GTI) {
650     const Value *Idx = GTI.getOperand();
651     if (StructType *StTy = GTI.getStructTypeOrNull()) {
652       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
653       Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
654       continue;
655     } else {
656       uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
657 
658       // If this is a scalar constant or a splat vector of constants,
659       // handle it quickly.
660       if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
661         Offset += ElementSize * CI->getSExtValue();
662         continue;
663       }
664 
665       if (Offset != 0) {
666         unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
667         unsigned OffsetReg =
668             getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
669         MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
670 
671         BaseReg = NewBaseReg;
672         Offset = 0;
673       }
674 
675       unsigned IdxReg = getOrCreateVReg(*Idx);
676       if (MRI->getType(IdxReg) != OffsetTy) {
677         unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
678         MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
679         IdxReg = NewIdxReg;
680       }
681 
682       // N = N + Idx * ElementSize;
683       // Avoid doing it for ElementSize of 1.
684       unsigned GepOffsetReg;
685       if (ElementSize != 1) {
686         unsigned ElementSizeReg =
687             getOrCreateVReg(*ConstantInt::get(OffsetIRTy, ElementSize));
688 
689         GepOffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
690         MIRBuilder.buildMul(GepOffsetReg, ElementSizeReg, IdxReg);
691       } else
692         GepOffsetReg = IdxReg;
693 
694       unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
695       MIRBuilder.buildGEP(NewBaseReg, BaseReg, GepOffsetReg);
696       BaseReg = NewBaseReg;
697     }
698   }
699 
700   if (Offset != 0) {
701     unsigned OffsetReg = getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
702     MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg);
703     return true;
704   }
705 
706   MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
707   return true;
708 }
709 
710 bool IRTranslator::translateMemfunc(const CallInst &CI,
711                                     MachineIRBuilder &MIRBuilder,
712                                     unsigned ID) {
713   LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL);
714   Type *DstTy = CI.getArgOperand(0)->getType();
715   if (cast<PointerType>(DstTy)->getAddressSpace() != 0 ||
716       SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
717     return false;
718 
719   SmallVector<CallLowering::ArgInfo, 8> Args;
720   for (int i = 0; i < 3; ++i) {
721     const auto &Arg = CI.getArgOperand(i);
722     Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
723   }
724 
725   const char *Callee;
726   switch (ID) {
727   case Intrinsic::memmove:
728   case Intrinsic::memcpy: {
729     Type *SrcTy = CI.getArgOperand(1)->getType();
730     if(cast<PointerType>(SrcTy)->getAddressSpace() != 0)
731       return false;
732     Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove";
733     break;
734   }
735   case Intrinsic::memset:
736     Callee = "memset";
737     break;
738   default:
739     return false;
740   }
741 
742   return CLI->lowerCall(MIRBuilder, CI.getCallingConv(),
743                         MachineOperand::CreateES(Callee),
744                         CallLowering::ArgInfo(0, CI.getType()), Args);
745 }
746 
747 void IRTranslator::getStackGuard(unsigned DstReg,
748                                  MachineIRBuilder &MIRBuilder) {
749   const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
750   MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
751   auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
752   MIB.addDef(DstReg);
753 
754   auto &TLI = *MF->getSubtarget().getTargetLowering();
755   Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
756   if (!Global)
757     return;
758 
759   MachinePointerInfo MPInfo(Global);
760   auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
761                MachineMemOperand::MODereferenceable;
762   MachineMemOperand *MemRef =
763       MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
764                                DL->getPointerABIAlignment(0));
765   MIB.setMemRefs({MemRef});
766 }
767 
768 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
769                                               MachineIRBuilder &MIRBuilder) {
770   ArrayRef<unsigned> ResRegs = getOrCreateVRegs(CI);
771   MIRBuilder.buildInstr(Op)
772       .addDef(ResRegs[0])
773       .addDef(ResRegs[1])
774       .addUse(getOrCreateVReg(*CI.getOperand(0)))
775       .addUse(getOrCreateVReg(*CI.getOperand(1)));
776 
777   return true;
778 }
779 
780 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
781                                            MachineIRBuilder &MIRBuilder) {
782   switch (ID) {
783   default:
784     break;
785   case Intrinsic::lifetime_start:
786   case Intrinsic::lifetime_end:
787     // Stack coloring is not enabled in O0 (which we care about now) so we can
788     // drop these. Make sure someone notices when we start compiling at higher
789     // opts though.
790     if (MF->getTarget().getOptLevel() != CodeGenOpt::None)
791       return false;
792     return true;
793   case Intrinsic::dbg_declare: {
794     const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
795     assert(DI.getVariable() && "Missing variable");
796 
797     const Value *Address = DI.getAddress();
798     if (!Address || isa<UndefValue>(Address)) {
799       LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
800       return true;
801     }
802 
803     assert(DI.getVariable()->isValidLocationForIntrinsic(
804                MIRBuilder.getDebugLoc()) &&
805            "Expected inlined-at fields to agree");
806     auto AI = dyn_cast<AllocaInst>(Address);
807     if (AI && AI->isStaticAlloca()) {
808       // Static allocas are tracked at the MF level, no need for DBG_VALUE
809       // instructions (in fact, they get ignored if they *do* exist).
810       MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
811                              getOrCreateFrameIndex(*AI), DI.getDebugLoc());
812     } else {
813       // A dbg.declare describes the address of a source variable, so lower it
814       // into an indirect DBG_VALUE.
815       MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
816                                        DI.getVariable(), DI.getExpression());
817     }
818     return true;
819   }
820   case Intrinsic::dbg_label: {
821     const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
822     assert(DI.getLabel() && "Missing label");
823 
824     assert(DI.getLabel()->isValidLocationForIntrinsic(
825                MIRBuilder.getDebugLoc()) &&
826            "Expected inlined-at fields to agree");
827 
828     MIRBuilder.buildDbgLabel(DI.getLabel());
829     return true;
830   }
831   case Intrinsic::vaend:
832     // No target I know of cares about va_end. Certainly no in-tree target
833     // does. Simplest intrinsic ever!
834     return true;
835   case Intrinsic::vastart: {
836     auto &TLI = *MF->getSubtarget().getTargetLowering();
837     Value *Ptr = CI.getArgOperand(0);
838     unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
839 
840     MIRBuilder.buildInstr(TargetOpcode::G_VASTART)
841         .addUse(getOrCreateVReg(*Ptr))
842         .addMemOperand(MF->getMachineMemOperand(
843             MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 0));
844     return true;
845   }
846   case Intrinsic::dbg_value: {
847     // This form of DBG_VALUE is target-independent.
848     const DbgValueInst &DI = cast<DbgValueInst>(CI);
849     const Value *V = DI.getValue();
850     assert(DI.getVariable()->isValidLocationForIntrinsic(
851                MIRBuilder.getDebugLoc()) &&
852            "Expected inlined-at fields to agree");
853     if (!V) {
854       // Currently the optimizer can produce this; insert an undef to
855       // help debugging.  Probably the optimizer should not do this.
856       MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
857     } else if (const auto *CI = dyn_cast<Constant>(V)) {
858       MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
859     } else {
860       unsigned Reg = getOrCreateVReg(*V);
861       // FIXME: This does not handle register-indirect values at offset 0. The
862       // direct/indirect thing shouldn't really be handled by something as
863       // implicit as reg+noreg vs reg+imm in the first palce, but it seems
864       // pretty baked in right now.
865       MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
866     }
867     return true;
868   }
869   case Intrinsic::uadd_with_overflow:
870     return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
871   case Intrinsic::sadd_with_overflow:
872     return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
873   case Intrinsic::usub_with_overflow:
874     return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
875   case Intrinsic::ssub_with_overflow:
876     return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
877   case Intrinsic::umul_with_overflow:
878     return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
879   case Intrinsic::smul_with_overflow:
880     return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
881   case Intrinsic::pow: {
882     auto Pow = MIRBuilder.buildInstr(TargetOpcode::G_FPOW)
883         .addDef(getOrCreateVReg(CI))
884         .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
885         .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
886     Pow->copyIRFlags(CI);
887     return true;
888   }
889   case Intrinsic::exp: {
890     auto Exp = MIRBuilder.buildInstr(TargetOpcode::G_FEXP)
891         .addDef(getOrCreateVReg(CI))
892         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
893     Exp->copyIRFlags(CI);
894     return true;
895   }
896   case Intrinsic::exp2: {
897     auto Exp2 = MIRBuilder.buildInstr(TargetOpcode::G_FEXP2)
898         .addDef(getOrCreateVReg(CI))
899         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
900     Exp2->copyIRFlags(CI);
901     return true;
902   }
903   case Intrinsic::log: {
904     auto Log = MIRBuilder.buildInstr(TargetOpcode::G_FLOG)
905         .addDef(getOrCreateVReg(CI))
906         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
907     Log->copyIRFlags(CI);
908     return true;
909   }
910   case Intrinsic::log2: {
911     auto Log2 = MIRBuilder.buildInstr(TargetOpcode::G_FLOG2)
912         .addDef(getOrCreateVReg(CI))
913         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
914     Log2->copyIRFlags(CI);
915     return true;
916   }
917   case Intrinsic::log10: {
918     auto Log10 = MIRBuilder.buildInstr(TargetOpcode::G_FLOG10)
919         .addDef(getOrCreateVReg(CI))
920         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
921     Log10->copyIRFlags(CI);
922     return true;
923   }
924   case Intrinsic::fabs: {
925     auto Fabs = MIRBuilder.buildInstr(TargetOpcode::G_FABS)
926         .addDef(getOrCreateVReg(CI))
927         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
928     Fabs->copyIRFlags(CI);
929     return true;
930   }
931   case Intrinsic::trunc:
932     MIRBuilder.buildInstr(TargetOpcode::G_INTRINSIC_TRUNC)
933         .addDef(getOrCreateVReg(CI))
934         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
935     return true;
936   case Intrinsic::round:
937     MIRBuilder.buildInstr(TargetOpcode::G_INTRINSIC_ROUND)
938         .addDef(getOrCreateVReg(CI))
939         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
940     return true;
941   case Intrinsic::fma: {
942     auto FMA = MIRBuilder.buildInstr(TargetOpcode::G_FMA)
943         .addDef(getOrCreateVReg(CI))
944         .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
945         .addUse(getOrCreateVReg(*CI.getArgOperand(1)))
946         .addUse(getOrCreateVReg(*CI.getArgOperand(2)));
947     FMA->copyIRFlags(CI);
948     return true;
949   }
950   case Intrinsic::fmuladd: {
951     const TargetMachine &TM = MF->getTarget();
952     const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
953     unsigned Dst = getOrCreateVReg(CI);
954     unsigned Op0 = getOrCreateVReg(*CI.getArgOperand(0));
955     unsigned Op1 = getOrCreateVReg(*CI.getArgOperand(1));
956     unsigned Op2 = getOrCreateVReg(*CI.getArgOperand(2));
957     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
958         TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) {
959       // TODO: Revisit this to see if we should move this part of the
960       // lowering to the combiner.
961       auto FMA =  MIRBuilder.buildInstr(TargetOpcode::G_FMA, {Dst}, {Op0, Op1, Op2});
962       FMA->copyIRFlags(CI);
963     } else {
964       LLT Ty = getLLTForType(*CI.getType(), *DL);
965       auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, {Ty}, {Op0, Op1});
966       FMul->copyIRFlags(CI);
967       auto FAdd =  MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Dst}, {FMul, Op2});
968       FAdd->copyIRFlags(CI);
969     }
970     return true;
971   }
972   case Intrinsic::memcpy:
973   case Intrinsic::memmove:
974   case Intrinsic::memset:
975     return translateMemfunc(CI, MIRBuilder, ID);
976   case Intrinsic::eh_typeid_for: {
977     GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
978     unsigned Reg = getOrCreateVReg(CI);
979     unsigned TypeID = MF->getTypeIDFor(GV);
980     MIRBuilder.buildConstant(Reg, TypeID);
981     return true;
982   }
983   case Intrinsic::objectsize: {
984     // If we don't know by now, we're never going to know.
985     const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
986 
987     MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
988     return true;
989   }
990   case Intrinsic::is_constant:
991     // If this wasn't constant-folded away by now, then it's not a
992     // constant.
993     MIRBuilder.buildConstant(getOrCreateVReg(CI), 0);
994     return true;
995   case Intrinsic::stackguard:
996     getStackGuard(getOrCreateVReg(CI), MIRBuilder);
997     return true;
998   case Intrinsic::stackprotector: {
999     LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1000     unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
1001     getStackGuard(GuardVal, MIRBuilder);
1002 
1003     AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
1004     int FI = getOrCreateFrameIndex(*Slot);
1005     MF->getFrameInfo().setStackProtectorIndex(FI);
1006 
1007     MIRBuilder.buildStore(
1008         GuardVal, getOrCreateVReg(*Slot),
1009         *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
1010                                   MachineMemOperand::MOStore |
1011                                       MachineMemOperand::MOVolatile,
1012                                   PtrTy.getSizeInBits() / 8, 8));
1013     return true;
1014   }
1015   case Intrinsic::cttz:
1016   case Intrinsic::ctlz: {
1017     ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
1018     bool isTrailing = ID == Intrinsic::cttz;
1019     unsigned Opcode = isTrailing
1020                           ? Cst->isZero() ? TargetOpcode::G_CTTZ
1021                                           : TargetOpcode::G_CTTZ_ZERO_UNDEF
1022                           : Cst->isZero() ? TargetOpcode::G_CTLZ
1023                                           : TargetOpcode::G_CTLZ_ZERO_UNDEF;
1024     MIRBuilder.buildInstr(Opcode)
1025         .addDef(getOrCreateVReg(CI))
1026         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
1027     return true;
1028   }
1029   case Intrinsic::ctpop: {
1030     MIRBuilder.buildInstr(TargetOpcode::G_CTPOP)
1031         .addDef(getOrCreateVReg(CI))
1032         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
1033     return true;
1034   }
1035   case Intrinsic::invariant_start: {
1036     LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1037     unsigned Undef = MRI->createGenericVirtualRegister(PtrTy);
1038     MIRBuilder.buildUndef(Undef);
1039     return true;
1040   }
1041   case Intrinsic::invariant_end:
1042     return true;
1043   }
1044   return false;
1045 }
1046 
1047 bool IRTranslator::translateInlineAsm(const CallInst &CI,
1048                                       MachineIRBuilder &MIRBuilder) {
1049   const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue());
1050   if (!IA.getConstraintString().empty())
1051     return false;
1052 
1053   unsigned ExtraInfo = 0;
1054   if (IA.hasSideEffects())
1055     ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1056   if (IA.getDialect() == InlineAsm::AD_Intel)
1057     ExtraInfo |= InlineAsm::Extra_AsmDialect;
1058 
1059   MIRBuilder.buildInstr(TargetOpcode::INLINEASM)
1060     .addExternalSymbol(IA.getAsmString().c_str())
1061     .addImm(ExtraInfo);
1062 
1063   return true;
1064 }
1065 
1066 unsigned IRTranslator::packRegs(const Value &V,
1067                                   MachineIRBuilder &MIRBuilder) {
1068   ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
1069   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
1070   LLT BigTy = getLLTForType(*V.getType(), *DL);
1071 
1072   if (Regs.size() == 1)
1073     return Regs[0];
1074 
1075   unsigned Dst = MRI->createGenericVirtualRegister(BigTy);
1076   MIRBuilder.buildUndef(Dst);
1077   for (unsigned i = 0; i < Regs.size(); ++i) {
1078     unsigned NewDst = MRI->createGenericVirtualRegister(BigTy);
1079     MIRBuilder.buildInsert(NewDst, Dst, Regs[i], Offsets[i]);
1080     Dst = NewDst;
1081   }
1082   return Dst;
1083 }
1084 
1085 void IRTranslator::unpackRegs(const Value &V, unsigned Src,
1086                                 MachineIRBuilder &MIRBuilder) {
1087   ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
1088   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
1089 
1090   for (unsigned i = 0; i < Regs.size(); ++i)
1091     MIRBuilder.buildExtract(Regs[i], Src, Offsets[i]);
1092 }
1093 
1094 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
1095   const CallInst &CI = cast<CallInst>(U);
1096   auto TII = MF->getTarget().getIntrinsicInfo();
1097   const Function *F = CI.getCalledFunction();
1098 
1099   // FIXME: support Windows dllimport function calls.
1100   if (F && F->hasDLLImportStorageClass())
1101     return false;
1102 
1103   if (CI.isInlineAsm())
1104     return translateInlineAsm(CI, MIRBuilder);
1105 
1106   Intrinsic::ID ID = Intrinsic::not_intrinsic;
1107   if (F && F->isIntrinsic()) {
1108     ID = F->getIntrinsicID();
1109     if (TII && ID == Intrinsic::not_intrinsic)
1110       ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
1111   }
1112 
1113   bool IsSplitType = valueIsSplit(CI);
1114   if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) {
1115     unsigned Res = IsSplitType ? MRI->createGenericVirtualRegister(
1116                                      getLLTForType(*CI.getType(), *DL))
1117                                : getOrCreateVReg(CI);
1118 
1119     SmallVector<unsigned, 8> Args;
1120     for (auto &Arg: CI.arg_operands())
1121       Args.push_back(packRegs(*Arg, MIRBuilder));
1122 
1123     MF->getFrameInfo().setHasCalls(true);
1124     bool Success = CLI->lowerCall(MIRBuilder, &CI, Res, Args, [&]() {
1125       return getOrCreateVReg(*CI.getCalledValue());
1126     });
1127 
1128     if (IsSplitType)
1129       unpackRegs(CI, Res, MIRBuilder);
1130     return Success;
1131   }
1132 
1133   assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
1134 
1135   if (translateKnownIntrinsic(CI, ID, MIRBuilder))
1136     return true;
1137 
1138   unsigned Res = 0;
1139   if (!CI.getType()->isVoidTy()) {
1140     if (IsSplitType)
1141       Res =
1142           MRI->createGenericVirtualRegister(getLLTForType(*CI.getType(), *DL));
1143     else
1144       Res = getOrCreateVReg(CI);
1145   }
1146   MachineInstrBuilder MIB =
1147       MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory());
1148 
1149   for (auto &Arg : CI.arg_operands()) {
1150     // Some intrinsics take metadata parameters. Reject them.
1151     if (isa<MetadataAsValue>(Arg))
1152       return false;
1153     MIB.addUse(packRegs(*Arg, MIRBuilder));
1154   }
1155 
1156   if (IsSplitType)
1157     unpackRegs(CI, Res, MIRBuilder);
1158 
1159   // Add a MachineMemOperand if it is a target mem intrinsic.
1160   const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1161   TargetLowering::IntrinsicInfo Info;
1162   // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
1163   if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
1164     uint64_t Size = Info.memVT.getStoreSize();
1165     MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal),
1166                                                Info.flags, Size, Info.align));
1167   }
1168 
1169   return true;
1170 }
1171 
1172 bool IRTranslator::translateInvoke(const User &U,
1173                                    MachineIRBuilder &MIRBuilder) {
1174   const InvokeInst &I = cast<InvokeInst>(U);
1175   MCContext &Context = MF->getContext();
1176 
1177   const BasicBlock *ReturnBB = I.getSuccessor(0);
1178   const BasicBlock *EHPadBB = I.getSuccessor(1);
1179 
1180   const Value *Callee = I.getCalledValue();
1181   const Function *Fn = dyn_cast<Function>(Callee);
1182   if (isa<InlineAsm>(Callee))
1183     return false;
1184 
1185   // FIXME: support invoking patchpoint and statepoint intrinsics.
1186   if (Fn && Fn->isIntrinsic())
1187     return false;
1188 
1189   // FIXME: support whatever these are.
1190   if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
1191     return false;
1192 
1193   // FIXME: support Windows exception handling.
1194   if (!isa<LandingPadInst>(EHPadBB->front()))
1195     return false;
1196 
1197   // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
1198   // the region covered by the try.
1199   MCSymbol *BeginSymbol = Context.createTempSymbol();
1200   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
1201 
1202   unsigned Res =
1203         MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL));
1204   SmallVector<unsigned, 8> Args;
1205   for (auto &Arg: I.arg_operands())
1206     Args.push_back(packRegs(*Arg, MIRBuilder));
1207 
1208   if (!CLI->lowerCall(MIRBuilder, &I, Res, Args,
1209                       [&]() { return getOrCreateVReg(*I.getCalledValue()); }))
1210     return false;
1211 
1212   unpackRegs(I, Res, MIRBuilder);
1213 
1214   MCSymbol *EndSymbol = Context.createTempSymbol();
1215   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
1216 
1217   // FIXME: track probabilities.
1218   MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
1219                     &ReturnMBB = getMBB(*ReturnBB);
1220   MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
1221   MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
1222   MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
1223   MIRBuilder.buildBr(ReturnMBB);
1224 
1225   return true;
1226 }
1227 
1228 bool IRTranslator::translateLandingPad(const User &U,
1229                                        MachineIRBuilder &MIRBuilder) {
1230   const LandingPadInst &LP = cast<LandingPadInst>(U);
1231 
1232   MachineBasicBlock &MBB = MIRBuilder.getMBB();
1233 
1234   MBB.setIsEHPad();
1235 
1236   // If there aren't registers to copy the values into (e.g., during SjLj
1237   // exceptions), then don't bother.
1238   auto &TLI = *MF->getSubtarget().getTargetLowering();
1239   const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
1240   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
1241       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
1242     return true;
1243 
1244   // If landingpad's return type is token type, we don't create DAG nodes
1245   // for its exception pointer and selector value. The extraction of exception
1246   // pointer or selector value from token type landingpads is not currently
1247   // supported.
1248   if (LP.getType()->isTokenTy())
1249     return true;
1250 
1251   // Add a label to mark the beginning of the landing pad.  Deletion of the
1252   // landing pad can thus be detected via the MachineModuleInfo.
1253   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
1254     .addSym(MF->addLandingPad(&MBB));
1255 
1256   LLT Ty = getLLTForType(*LP.getType(), *DL);
1257   unsigned Undef = MRI->createGenericVirtualRegister(Ty);
1258   MIRBuilder.buildUndef(Undef);
1259 
1260   SmallVector<LLT, 2> Tys;
1261   for (Type *Ty : cast<StructType>(LP.getType())->elements())
1262     Tys.push_back(getLLTForType(*Ty, *DL));
1263   assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
1264 
1265   // Mark exception register as live in.
1266   unsigned ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
1267   if (!ExceptionReg)
1268     return false;
1269 
1270   MBB.addLiveIn(ExceptionReg);
1271   ArrayRef<unsigned> ResRegs = getOrCreateVRegs(LP);
1272   MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
1273 
1274   unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
1275   if (!SelectorReg)
1276     return false;
1277 
1278   MBB.addLiveIn(SelectorReg);
1279   unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
1280   MIRBuilder.buildCopy(PtrVReg, SelectorReg);
1281   MIRBuilder.buildCast(ResRegs[1], PtrVReg);
1282 
1283   return true;
1284 }
1285 
1286 bool IRTranslator::translateAlloca(const User &U,
1287                                    MachineIRBuilder &MIRBuilder) {
1288   auto &AI = cast<AllocaInst>(U);
1289 
1290   if (AI.isSwiftError())
1291     return false;
1292 
1293   if (AI.isStaticAlloca()) {
1294     unsigned Res = getOrCreateVReg(AI);
1295     int FI = getOrCreateFrameIndex(AI);
1296     MIRBuilder.buildFrameIndex(Res, FI);
1297     return true;
1298   }
1299 
1300   // FIXME: support stack probing for Windows.
1301   if (MF->getTarget().getTargetTriple().isOSWindows())
1302     return false;
1303 
1304   // Now we're in the harder dynamic case.
1305   Type *Ty = AI.getAllocatedType();
1306   unsigned Align =
1307       std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment());
1308 
1309   unsigned NumElts = getOrCreateVReg(*AI.getArraySize());
1310 
1311   Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
1312   LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
1313   if (MRI->getType(NumElts) != IntPtrTy) {
1314     unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
1315     MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
1316     NumElts = ExtElts;
1317   }
1318 
1319   unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
1320   unsigned TySize =
1321       getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty)));
1322   MIRBuilder.buildMul(AllocSize, NumElts, TySize);
1323 
1324   LLT PtrTy = getLLTForType(*AI.getType(), *DL);
1325   auto &TLI = *MF->getSubtarget().getTargetLowering();
1326   unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
1327 
1328   unsigned SPTmp = MRI->createGenericVirtualRegister(PtrTy);
1329   MIRBuilder.buildCopy(SPTmp, SPReg);
1330 
1331   unsigned AllocTmp = MRI->createGenericVirtualRegister(PtrTy);
1332   MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize);
1333 
1334   // Handle alignment. We have to realign if the allocation granule was smaller
1335   // than stack alignment, or the specific alloca requires more than stack
1336   // alignment.
1337   unsigned StackAlign =
1338       MF->getSubtarget().getFrameLowering()->getStackAlignment();
1339   Align = std::max(Align, StackAlign);
1340   if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) {
1341     // Round the size of the allocation up to the stack alignment size
1342     // by add SA-1 to the size. This doesn't overflow because we're computing
1343     // an address inside an alloca.
1344     unsigned AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy);
1345     MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align));
1346     AllocTmp = AlignedAlloc;
1347   }
1348 
1349   MIRBuilder.buildCopy(SPReg, AllocTmp);
1350   MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp);
1351 
1352   MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI);
1353   assert(MF->getFrameInfo().hasVarSizedObjects());
1354   return true;
1355 }
1356 
1357 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
1358   // FIXME: We may need more info about the type. Because of how LLT works,
1359   // we're completely discarding the i64/double distinction here (amongst
1360   // others). Fortunately the ABIs I know of where that matters don't use va_arg
1361   // anyway but that's not guaranteed.
1362   MIRBuilder.buildInstr(TargetOpcode::G_VAARG)
1363     .addDef(getOrCreateVReg(U))
1364     .addUse(getOrCreateVReg(*U.getOperand(0)))
1365     .addImm(DL->getABITypeAlignment(U.getType()));
1366   return true;
1367 }
1368 
1369 bool IRTranslator::translateInsertElement(const User &U,
1370                                           MachineIRBuilder &MIRBuilder) {
1371   // If it is a <1 x Ty> vector, use the scalar as it is
1372   // not a legal vector type in LLT.
1373   if (U.getType()->getVectorNumElements() == 1) {
1374     unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1375     auto &Regs = *VMap.getVRegs(U);
1376     if (Regs.empty()) {
1377       Regs.push_back(Elt);
1378       VMap.getOffsets(U)->push_back(0);
1379     } else {
1380       MIRBuilder.buildCopy(Regs[0], Elt);
1381     }
1382     return true;
1383   }
1384 
1385   unsigned Res = getOrCreateVReg(U);
1386   unsigned Val = getOrCreateVReg(*U.getOperand(0));
1387   unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1388   unsigned Idx = getOrCreateVReg(*U.getOperand(2));
1389   MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
1390   return true;
1391 }
1392 
1393 bool IRTranslator::translateExtractElement(const User &U,
1394                                            MachineIRBuilder &MIRBuilder) {
1395   // If it is a <1 x Ty> vector, use the scalar as it is
1396   // not a legal vector type in LLT.
1397   if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
1398     unsigned Elt = getOrCreateVReg(*U.getOperand(0));
1399     auto &Regs = *VMap.getVRegs(U);
1400     if (Regs.empty()) {
1401       Regs.push_back(Elt);
1402       VMap.getOffsets(U)->push_back(0);
1403     } else {
1404       MIRBuilder.buildCopy(Regs[0], Elt);
1405     }
1406     return true;
1407   }
1408   unsigned Res = getOrCreateVReg(U);
1409   unsigned Val = getOrCreateVReg(*U.getOperand(0));
1410   const auto &TLI = *MF->getSubtarget().getTargetLowering();
1411   unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
1412   unsigned Idx = 0;
1413   if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
1414     if (CI->getBitWidth() != PreferredVecIdxWidth) {
1415       APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth);
1416       auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
1417       Idx = getOrCreateVReg(*NewIdxCI);
1418     }
1419   }
1420   if (!Idx)
1421     Idx = getOrCreateVReg(*U.getOperand(1));
1422   if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
1423     const LLT &VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
1424     Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx)->getOperand(0).getReg();
1425   }
1426   MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
1427   return true;
1428 }
1429 
1430 bool IRTranslator::translateShuffleVector(const User &U,
1431                                           MachineIRBuilder &MIRBuilder) {
1432   MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR)
1433       .addDef(getOrCreateVReg(U))
1434       .addUse(getOrCreateVReg(*U.getOperand(0)))
1435       .addUse(getOrCreateVReg(*U.getOperand(1)))
1436       .addUse(getOrCreateVReg(*U.getOperand(2)));
1437   return true;
1438 }
1439 
1440 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
1441   const PHINode &PI = cast<PHINode>(U);
1442 
1443   SmallVector<MachineInstr *, 4> Insts;
1444   for (auto Reg : getOrCreateVRegs(PI)) {
1445     auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
1446     Insts.push_back(MIB.getInstr());
1447   }
1448 
1449   PendingPHIs.emplace_back(&PI, std::move(Insts));
1450   return true;
1451 }
1452 
1453 bool IRTranslator::translateAtomicCmpXchg(const User &U,
1454                                           MachineIRBuilder &MIRBuilder) {
1455   const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
1456 
1457   if (I.isWeak())
1458     return false;
1459 
1460   auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1461                               : MachineMemOperand::MONone;
1462   Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1463 
1464   Type *ResType = I.getType();
1465   Type *ValType = ResType->Type::getStructElementType(0);
1466 
1467   auto Res = getOrCreateVRegs(I);
1468   unsigned OldValRes = Res[0];
1469   unsigned SuccessRes = Res[1];
1470   unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
1471   unsigned Cmp = getOrCreateVReg(*I.getCompareOperand());
1472   unsigned NewVal = getOrCreateVReg(*I.getNewValOperand());
1473 
1474   MIRBuilder.buildAtomicCmpXchgWithSuccess(
1475       OldValRes, SuccessRes, Addr, Cmp, NewVal,
1476       *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1477                                 Flags, DL->getTypeStoreSize(ValType),
1478                                 getMemOpAlignment(I), AAMDNodes(), nullptr,
1479                                 I.getSyncScopeID(), I.getSuccessOrdering(),
1480                                 I.getFailureOrdering()));
1481   return true;
1482 }
1483 
1484 bool IRTranslator::translateAtomicRMW(const User &U,
1485                                       MachineIRBuilder &MIRBuilder) {
1486   const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
1487 
1488   auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1489                               : MachineMemOperand::MONone;
1490   Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1491 
1492   Type *ResType = I.getType();
1493 
1494   unsigned Res = getOrCreateVReg(I);
1495   unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
1496   unsigned Val = getOrCreateVReg(*I.getValOperand());
1497 
1498   unsigned Opcode = 0;
1499   switch (I.getOperation()) {
1500   default:
1501     llvm_unreachable("Unknown atomicrmw op");
1502     return false;
1503   case AtomicRMWInst::Xchg:
1504     Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
1505     break;
1506   case AtomicRMWInst::Add:
1507     Opcode = TargetOpcode::G_ATOMICRMW_ADD;
1508     break;
1509   case AtomicRMWInst::Sub:
1510     Opcode = TargetOpcode::G_ATOMICRMW_SUB;
1511     break;
1512   case AtomicRMWInst::And:
1513     Opcode = TargetOpcode::G_ATOMICRMW_AND;
1514     break;
1515   case AtomicRMWInst::Nand:
1516     Opcode = TargetOpcode::G_ATOMICRMW_NAND;
1517     break;
1518   case AtomicRMWInst::Or:
1519     Opcode = TargetOpcode::G_ATOMICRMW_OR;
1520     break;
1521   case AtomicRMWInst::Xor:
1522     Opcode = TargetOpcode::G_ATOMICRMW_XOR;
1523     break;
1524   case AtomicRMWInst::Max:
1525     Opcode = TargetOpcode::G_ATOMICRMW_MAX;
1526     break;
1527   case AtomicRMWInst::Min:
1528     Opcode = TargetOpcode::G_ATOMICRMW_MIN;
1529     break;
1530   case AtomicRMWInst::UMax:
1531     Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
1532     break;
1533   case AtomicRMWInst::UMin:
1534     Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
1535     break;
1536   }
1537 
1538   MIRBuilder.buildAtomicRMW(
1539       Opcode, Res, Addr, Val,
1540       *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1541                                 Flags, DL->getTypeStoreSize(ResType),
1542                                 getMemOpAlignment(I), AAMDNodes(), nullptr,
1543                                 I.getSyncScopeID(), I.getOrdering()));
1544   return true;
1545 }
1546 
1547 void IRTranslator::finishPendingPhis() {
1548 #ifndef NDEBUG
1549   DILocationVerifier Verifier(*MF);
1550 #endif // ifndef NDEBUG
1551   for (auto &Phi : PendingPHIs) {
1552     const PHINode *PI = Phi.first;
1553     ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
1554     EntryBuilder.setDebugLoc(PI->getDebugLoc());
1555 #ifndef NDEBUG
1556     Verifier.setCurrentInst(PI);
1557 #endif // ifndef NDEBUG
1558 
1559     // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
1560     // won't create extra control flow here, otherwise we need to find the
1561     // dominating predecessor here (or perhaps force the weirder IRTranslators
1562     // to provide a simple boundary).
1563     SmallSet<const BasicBlock *, 4> HandledPreds;
1564 
1565     for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
1566       auto IRPred = PI->getIncomingBlock(i);
1567       if (HandledPreds.count(IRPred))
1568         continue;
1569 
1570       HandledPreds.insert(IRPred);
1571       ArrayRef<unsigned> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
1572       for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
1573         assert(Pred->isSuccessor(ComponentPHIs[0]->getParent()) &&
1574                "incorrect CFG at MachineBasicBlock level");
1575         for (unsigned j = 0; j < ValRegs.size(); ++j) {
1576           MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
1577           MIB.addUse(ValRegs[j]);
1578           MIB.addMBB(Pred);
1579         }
1580       }
1581     }
1582   }
1583 }
1584 
1585 bool IRTranslator::valueIsSplit(const Value &V,
1586                                 SmallVectorImpl<uint64_t> *Offsets) {
1587   SmallVector<LLT, 4> SplitTys;
1588   if (Offsets && !Offsets->empty())
1589     Offsets->clear();
1590   computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
1591   return SplitTys.size() > 1;
1592 }
1593 
1594 bool IRTranslator::translate(const Instruction &Inst) {
1595   CurBuilder.setDebugLoc(Inst.getDebugLoc());
1596   EntryBuilder.setDebugLoc(Inst.getDebugLoc());
1597   switch(Inst.getOpcode()) {
1598 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1599     case Instruction::OPCODE: return translate##OPCODE(Inst, CurBuilder);
1600 #include "llvm/IR/Instruction.def"
1601   default:
1602     return false;
1603   }
1604 }
1605 
1606 bool IRTranslator::translate(const Constant &C, unsigned Reg) {
1607   if (auto CI = dyn_cast<ConstantInt>(&C))
1608     EntryBuilder.buildConstant(Reg, *CI);
1609   else if (auto CF = dyn_cast<ConstantFP>(&C))
1610     EntryBuilder.buildFConstant(Reg, *CF);
1611   else if (isa<UndefValue>(C))
1612     EntryBuilder.buildUndef(Reg);
1613   else if (isa<ConstantPointerNull>(C)) {
1614     // As we are trying to build a constant val of 0 into a pointer,
1615     // insert a cast to make them correct with respect to types.
1616     unsigned NullSize = DL->getTypeSizeInBits(C.getType());
1617     auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize);
1618     auto *ZeroVal = ConstantInt::get(ZeroTy, 0);
1619     unsigned ZeroReg = getOrCreateVReg(*ZeroVal);
1620     EntryBuilder.buildCast(Reg, ZeroReg);
1621   } else if (auto GV = dyn_cast<GlobalValue>(&C))
1622     EntryBuilder.buildGlobalValue(Reg, GV);
1623   else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
1624     if (!CAZ->getType()->isVectorTy())
1625       return false;
1626     // Return the scalar if it is a <1 x Ty> vector.
1627     if (CAZ->getNumElements() == 1)
1628       return translate(*CAZ->getElementValue(0u), Reg);
1629     SmallVector<unsigned, 4> Ops;
1630     for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
1631       Constant &Elt = *CAZ->getElementValue(i);
1632       Ops.push_back(getOrCreateVReg(Elt));
1633     }
1634     EntryBuilder.buildBuildVector(Reg, Ops);
1635   } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
1636     // Return the scalar if it is a <1 x Ty> vector.
1637     if (CV->getNumElements() == 1)
1638       return translate(*CV->getElementAsConstant(0), Reg);
1639     SmallVector<unsigned, 4> Ops;
1640     for (unsigned i = 0; i < CV->getNumElements(); ++i) {
1641       Constant &Elt = *CV->getElementAsConstant(i);
1642       Ops.push_back(getOrCreateVReg(Elt));
1643     }
1644     EntryBuilder.buildBuildVector(Reg, Ops);
1645   } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
1646     switch(CE->getOpcode()) {
1647 #define HANDLE_INST(NUM, OPCODE, CLASS)                         \
1648       case Instruction::OPCODE: return translate##OPCODE(*CE, EntryBuilder);
1649 #include "llvm/IR/Instruction.def"
1650     default:
1651       return false;
1652     }
1653   } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
1654     if (CV->getNumOperands() == 1)
1655       return translate(*CV->getOperand(0), Reg);
1656     SmallVector<unsigned, 4> Ops;
1657     for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
1658       Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
1659     }
1660     EntryBuilder.buildBuildVector(Reg, Ops);
1661   } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
1662     EntryBuilder.buildBlockAddress(Reg, BA);
1663   } else
1664     return false;
1665 
1666   return true;
1667 }
1668 
1669 void IRTranslator::finalizeFunction() {
1670   // Release the memory used by the different maps we
1671   // needed during the translation.
1672   PendingPHIs.clear();
1673   VMap.reset();
1674   FrameIndices.clear();
1675   MachinePreds.clear();
1676   // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
1677   // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
1678   // destroying it twice (in ~IRTranslator() and ~LLVMContext())
1679   EntryBuilder = MachineIRBuilder();
1680   CurBuilder = MachineIRBuilder();
1681 }
1682 
1683 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
1684   MF = &CurMF;
1685   const Function &F = MF->getFunction();
1686   if (F.empty())
1687     return false;
1688   CLI = MF->getSubtarget().getCallLowering();
1689   CurBuilder.setMF(*MF);
1690   EntryBuilder.setMF(*MF);
1691   MRI = &MF->getRegInfo();
1692   DL = &F.getParent()->getDataLayout();
1693   TPC = &getAnalysis<TargetPassConfig>();
1694   ORE = llvm::make_unique<OptimizationRemarkEmitter>(&F);
1695 
1696   assert(PendingPHIs.empty() && "stale PHIs");
1697 
1698   if (!DL->isLittleEndian()) {
1699     // Currently we don't properly handle big endian code.
1700     OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1701                                F.getSubprogram(), &F.getEntryBlock());
1702     R << "unable to translate in big endian mode";
1703     reportTranslationError(*MF, *TPC, *ORE, R);
1704   }
1705 
1706   // Release the per-function state when we return, whether we succeeded or not.
1707   auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
1708 
1709   // Setup a separate basic-block for the arguments and constants
1710   MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
1711   MF->push_back(EntryBB);
1712   EntryBuilder.setMBB(*EntryBB);
1713 
1714   // Create all blocks, in IR order, to preserve the layout.
1715   for (const BasicBlock &BB: F) {
1716     auto *&MBB = BBToMBB[&BB];
1717 
1718     MBB = MF->CreateMachineBasicBlock(&BB);
1719     MF->push_back(MBB);
1720 
1721     if (BB.hasAddressTaken())
1722       MBB->setHasAddressTaken();
1723   }
1724 
1725   // Make our arguments/constants entry block fallthrough to the IR entry block.
1726   EntryBB->addSuccessor(&getMBB(F.front()));
1727 
1728   // Lower the actual args into this basic block.
1729   SmallVector<unsigned, 8> VRegArgs;
1730   for (const Argument &Arg: F.args()) {
1731     if (DL->getTypeStoreSize(Arg.getType()) == 0)
1732       continue; // Don't handle zero sized types.
1733     VRegArgs.push_back(
1734         MRI->createGenericVirtualRegister(getLLTForType(*Arg.getType(), *DL)));
1735   }
1736 
1737   // We don't currently support translating swifterror or swiftself functions.
1738   for (auto &Arg : F.args()) {
1739     if (Arg.hasSwiftErrorAttr() || Arg.hasSwiftSelfAttr()) {
1740       OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1741                                  F.getSubprogram(), &F.getEntryBlock());
1742       R << "unable to lower arguments due to swifterror/swiftself: "
1743         << ore::NV("Prototype", F.getType());
1744       reportTranslationError(*MF, *TPC, *ORE, R);
1745       return false;
1746     }
1747   }
1748 
1749   if (!CLI->lowerFormalArguments(EntryBuilder, F, VRegArgs)) {
1750     OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1751                                F.getSubprogram(), &F.getEntryBlock());
1752     R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
1753     reportTranslationError(*MF, *TPC, *ORE, R);
1754     return false;
1755   }
1756 
1757   auto ArgIt = F.arg_begin();
1758   for (auto &VArg : VRegArgs) {
1759     // If the argument is an unsplit scalar then don't use unpackRegs to avoid
1760     // creating redundant copies.
1761     if (!valueIsSplit(*ArgIt, VMap.getOffsets(*ArgIt))) {
1762       auto &VRegs = *VMap.getVRegs(cast<Value>(*ArgIt));
1763       assert(VRegs.empty() && "VRegs already populated?");
1764       VRegs.push_back(VArg);
1765     } else {
1766       unpackRegs(*ArgIt, VArg, EntryBuilder);
1767     }
1768     ArgIt++;
1769   }
1770 
1771   // Need to visit defs before uses when translating instructions.
1772   {
1773     ReversePostOrderTraversal<const Function *> RPOT(&F);
1774 #ifndef NDEBUG
1775     DILocationVerifier Verifier(*MF);
1776 #endif // ifndef NDEBUG
1777     for (const BasicBlock *BB : RPOT) {
1778       MachineBasicBlock &MBB = getMBB(*BB);
1779       // Set the insertion point of all the following translations to
1780       // the end of this basic block.
1781       CurBuilder.setMBB(MBB);
1782 
1783       for (const Instruction &Inst : *BB) {
1784 #ifndef NDEBUG
1785         Verifier.setCurrentInst(&Inst);
1786 #endif // ifndef NDEBUG
1787         if (translate(Inst))
1788           continue;
1789 
1790         OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1791                                    Inst.getDebugLoc(), BB);
1792         R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
1793 
1794         if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
1795           std::string InstStrStorage;
1796           raw_string_ostream InstStr(InstStrStorage);
1797           InstStr << Inst;
1798 
1799           R << ": '" << InstStr.str() << "'";
1800         }
1801 
1802         reportTranslationError(*MF, *TPC, *ORE, R);
1803         return false;
1804       }
1805     }
1806   }
1807 
1808   finishPendingPhis();
1809 
1810   // Merge the argument lowering and constants block with its single
1811   // successor, the LLVM-IR entry block.  We want the basic block to
1812   // be maximal.
1813   assert(EntryBB->succ_size() == 1 &&
1814          "Custom BB used for lowering should have only one successor");
1815   // Get the successor of the current entry block.
1816   MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
1817   assert(NewEntryBB.pred_size() == 1 &&
1818          "LLVM-IR entry block has a predecessor!?");
1819   // Move all the instruction from the current entry block to the
1820   // new entry block.
1821   NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
1822                     EntryBB->end());
1823 
1824   // Update the live-in information for the new entry block.
1825   for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
1826     NewEntryBB.addLiveIn(LiveIn);
1827   NewEntryBB.sortUniqueLiveIns();
1828 
1829   // Get rid of the now empty basic block.
1830   EntryBB->removeSuccessor(&NewEntryBB);
1831   MF->remove(EntryBB);
1832   MF->DeleteMachineBasicBlock(EntryBB);
1833 
1834   assert(&MF->front() == &NewEntryBB &&
1835          "New entry wasn't next in the list of basic block!");
1836 
1837   // Initialize stack protector information.
1838   StackProtector &SP = getAnalysis<StackProtector>();
1839   SP.copyToMachineFrameInfo(MF->getFrameInfo());
1840 
1841   return false;
1842 }
1843