1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the IRTranslator class.
10 //===----------------------------------------------------------------------===//
11 
12 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
13 #include "llvm/ADT/PostOrderIterator.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
19 #include "llvm/Analysis/ValueTracking.h"
20 #include "llvm/CodeGen/Analysis.h"
21 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
22 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
23 #include "llvm/CodeGen/LowLevelType.h"
24 #include "llvm/CodeGen/MachineBasicBlock.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineOperand.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/StackProtector.h"
32 #include "llvm/CodeGen/TargetFrameLowering.h"
33 #include "llvm/CodeGen/TargetLowering.h"
34 #include "llvm/CodeGen/TargetPassConfig.h"
35 #include "llvm/CodeGen/TargetRegisterInfo.h"
36 #include "llvm/CodeGen/TargetSubtargetInfo.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/CFG.h"
39 #include "llvm/IR/Constant.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/DebugInfo.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/GetElementPtrTypeIterator.h"
46 #include "llvm/IR/InlineAsm.h"
47 #include "llvm/IR/InstrTypes.h"
48 #include "llvm/IR/Instructions.h"
49 #include "llvm/IR/IntrinsicInst.h"
50 #include "llvm/IR/Intrinsics.h"
51 #include "llvm/IR/LLVMContext.h"
52 #include "llvm/IR/Metadata.h"
53 #include "llvm/IR/Type.h"
54 #include "llvm/IR/User.h"
55 #include "llvm/IR/Value.h"
56 #include "llvm/MC/MCContext.h"
57 #include "llvm/Pass.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/CodeGen.h"
60 #include "llvm/Support/Debug.h"
61 #include "llvm/Support/ErrorHandling.h"
62 #include "llvm/Support/LowLevelTypeImpl.h"
63 #include "llvm/Support/MathExtras.h"
64 #include "llvm/Support/raw_ostream.h"
65 #include "llvm/Target/TargetIntrinsicInfo.h"
66 #include "llvm/Target/TargetMachine.h"
67 #include <algorithm>
68 #include <cassert>
69 #include <cstdint>
70 #include <iterator>
71 #include <string>
72 #include <utility>
73 #include <vector>
74 
75 #define DEBUG_TYPE "irtranslator"
76 
77 using namespace llvm;
78 
79 static cl::opt<bool>
80     EnableCSEInIRTranslator("enable-cse-in-irtranslator",
81                             cl::desc("Should enable CSE in irtranslator"),
82                             cl::Optional, cl::init(false));
83 char IRTranslator::ID = 0;
84 
85 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
86                 false, false)
87 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
88 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
89 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
90                 false, false)
91 
92 static void reportTranslationError(MachineFunction &MF,
93                                    const TargetPassConfig &TPC,
94                                    OptimizationRemarkEmitter &ORE,
95                                    OptimizationRemarkMissed &R) {
96   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
97 
98   // Print the function name explicitly if we don't have a debug location (which
99   // makes the diagnostic less useful) or if we're going to emit a raw error.
100   if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
101     R << (" (in function: " + MF.getName() + ")").str();
102 
103   if (TPC.isGlobalISelAbortEnabled())
104     report_fatal_error(R.getMsg());
105   else
106     ORE.emit(R);
107 }
108 
109 IRTranslator::IRTranslator() : MachineFunctionPass(ID) {
110   initializeIRTranslatorPass(*PassRegistry::getPassRegistry());
111 }
112 
113 #ifndef NDEBUG
114 namespace {
115 /// Verify that every instruction created has the same DILocation as the
116 /// instruction being translated.
117 class DILocationVerifier : public GISelChangeObserver {
118   const Instruction *CurrInst = nullptr;
119 
120 public:
121   DILocationVerifier() = default;
122   ~DILocationVerifier() = default;
123 
124   const Instruction *getCurrentInst() const { return CurrInst; }
125   void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
126 
127   void erasingInstr(MachineInstr &MI) override {}
128   void changingInstr(MachineInstr &MI) override {}
129   void changedInstr(MachineInstr &MI) override {}
130 
131   void createdInstr(MachineInstr &MI) override {
132     assert(getCurrentInst() && "Inserted instruction without a current MI");
133 
134     // Only print the check message if we're actually checking it.
135 #ifndef NDEBUG
136     LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
137                       << " was copied to " << MI);
138 #endif
139     assert(CurrInst->getDebugLoc() == MI.getDebugLoc() &&
140            "Line info was not transferred to all instructions");
141   }
142 };
143 } // namespace
144 #endif // ifndef NDEBUG
145 
146 
147 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
148   AU.addRequired<StackProtector>();
149   AU.addRequired<TargetPassConfig>();
150   AU.addRequired<GISelCSEAnalysisWrapperPass>();
151   getSelectionDAGFallbackAnalysisUsage(AU);
152   MachineFunctionPass::getAnalysisUsage(AU);
153 }
154 
155 IRTranslator::ValueToVRegInfo::VRegListT &
156 IRTranslator::allocateVRegs(const Value &Val) {
157   assert(!VMap.contains(Val) && "Value already allocated in VMap");
158   auto *Regs = VMap.getVRegs(Val);
159   auto *Offsets = VMap.getOffsets(Val);
160   SmallVector<LLT, 4> SplitTys;
161   computeValueLLTs(*DL, *Val.getType(), SplitTys,
162                    Offsets->empty() ? Offsets : nullptr);
163   for (unsigned i = 0; i < SplitTys.size(); ++i)
164     Regs->push_back(0);
165   return *Regs;
166 }
167 
168 ArrayRef<unsigned> IRTranslator::getOrCreateVRegs(const Value &Val) {
169   auto VRegsIt = VMap.findVRegs(Val);
170   if (VRegsIt != VMap.vregs_end())
171     return *VRegsIt->second;
172 
173   if (Val.getType()->isVoidTy())
174     return *VMap.getVRegs(Val);
175 
176   // Create entry for this type.
177   auto *VRegs = VMap.getVRegs(Val);
178   auto *Offsets = VMap.getOffsets(Val);
179 
180   assert(Val.getType()->isSized() &&
181          "Don't know how to create an empty vreg");
182 
183   SmallVector<LLT, 4> SplitTys;
184   computeValueLLTs(*DL, *Val.getType(), SplitTys,
185                    Offsets->empty() ? Offsets : nullptr);
186 
187   if (!isa<Constant>(Val)) {
188     for (auto Ty : SplitTys)
189       VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
190     return *VRegs;
191   }
192 
193   if (Val.getType()->isAggregateType()) {
194     // UndefValue, ConstantAggregateZero
195     auto &C = cast<Constant>(Val);
196     unsigned Idx = 0;
197     while (auto Elt = C.getAggregateElement(Idx++)) {
198       auto EltRegs = getOrCreateVRegs(*Elt);
199       llvm::copy(EltRegs, std::back_inserter(*VRegs));
200     }
201   } else {
202     assert(SplitTys.size() == 1 && "unexpectedly split LLT");
203     VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
204     bool Success = translate(cast<Constant>(Val), VRegs->front());
205     if (!Success) {
206       OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
207                                  MF->getFunction().getSubprogram(),
208                                  &MF->getFunction().getEntryBlock());
209       R << "unable to translate constant: " << ore::NV("Type", Val.getType());
210       reportTranslationError(*MF, *TPC, *ORE, R);
211       return *VRegs;
212     }
213   }
214 
215   return *VRegs;
216 }
217 
218 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
219   if (FrameIndices.find(&AI) != FrameIndices.end())
220     return FrameIndices[&AI];
221 
222   unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
223   unsigned Size =
224       ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
225 
226   // Always allocate at least one byte.
227   Size = std::max(Size, 1u);
228 
229   unsigned Alignment = AI.getAlignment();
230   if (!Alignment)
231     Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
232 
233   int &FI = FrameIndices[&AI];
234   FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
235   return FI;
236 }
237 
238 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
239   unsigned Alignment = 0;
240   Type *ValTy = nullptr;
241   if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
242     Alignment = SI->getAlignment();
243     ValTy = SI->getValueOperand()->getType();
244   } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
245     Alignment = LI->getAlignment();
246     ValTy = LI->getType();
247   } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
248     // TODO(PR27168): This instruction has no alignment attribute, but unlike
249     // the default alignment for load/store, the default here is to assume
250     // it has NATURAL alignment, not DataLayout-specified alignment.
251     const DataLayout &DL = AI->getModule()->getDataLayout();
252     Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
253     ValTy = AI->getCompareOperand()->getType();
254   } else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) {
255     // TODO(PR27168): This instruction has no alignment attribute, but unlike
256     // the default alignment for load/store, the default here is to assume
257     // it has NATURAL alignment, not DataLayout-specified alignment.
258     const DataLayout &DL = AI->getModule()->getDataLayout();
259     Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType());
260     ValTy = AI->getType();
261   } else {
262     OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
263     R << "unable to translate memop: " << ore::NV("Opcode", &I);
264     reportTranslationError(*MF, *TPC, *ORE, R);
265     return 1;
266   }
267 
268   return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
269 }
270 
271 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
272   MachineBasicBlock *&MBB = BBToMBB[&BB];
273   assert(MBB && "BasicBlock was not encountered before");
274   return *MBB;
275 }
276 
277 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
278   assert(NewPred && "new predecessor must be a real MachineBasicBlock");
279   MachinePreds[Edge].push_back(NewPred);
280 }
281 
282 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
283                                      MachineIRBuilder &MIRBuilder) {
284   // FIXME: handle signed/unsigned wrapping flags.
285 
286   // Get or create a virtual register for each value.
287   // Unless the value is a Constant => loadimm cst?
288   // or inline constant each time?
289   // Creation of a virtual register needs to have a size.
290   unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
291   unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
292   unsigned Res = getOrCreateVReg(U);
293   uint16_t Flags = 0;
294   if (isa<Instruction>(U)) {
295     const Instruction &I = cast<Instruction>(U);
296     Flags = MachineInstr::copyFlagsFromInstruction(I);
297   }
298 
299   MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags);
300   return true;
301 }
302 
303 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
304   // -0.0 - X --> G_FNEG
305   if (isa<Constant>(U.getOperand(0)) &&
306       U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) {
307     MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
308         .addDef(getOrCreateVReg(U))
309         .addUse(getOrCreateVReg(*U.getOperand(1)));
310     return true;
311   }
312   return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
313 }
314 
315 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
316   MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
317       .addDef(getOrCreateVReg(U))
318       .addUse(getOrCreateVReg(*U.getOperand(0)));
319   return true;
320 }
321 
322 bool IRTranslator::translateCompare(const User &U,
323                                     MachineIRBuilder &MIRBuilder) {
324   const CmpInst *CI = dyn_cast<CmpInst>(&U);
325   unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
326   unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
327   unsigned Res = getOrCreateVReg(U);
328   CmpInst::Predicate Pred =
329       CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
330                                     cast<ConstantExpr>(U).getPredicate());
331   if (CmpInst::isIntPredicate(Pred))
332     MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
333   else if (Pred == CmpInst::FCMP_FALSE)
334     MIRBuilder.buildCopy(
335         Res, getOrCreateVReg(*Constant::getNullValue(CI->getType())));
336   else if (Pred == CmpInst::FCMP_TRUE)
337     MIRBuilder.buildCopy(
338         Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType())));
339   else {
340     MIRBuilder.buildInstr(TargetOpcode::G_FCMP, {Res}, {Pred, Op0, Op1},
341                           MachineInstr::copyFlagsFromInstruction(*CI));
342   }
343 
344   return true;
345 }
346 
347 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
348   const ReturnInst &RI = cast<ReturnInst>(U);
349   const Value *Ret = RI.getReturnValue();
350   if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
351     Ret = nullptr;
352 
353   ArrayRef<unsigned> VRegs;
354   if (Ret)
355     VRegs = getOrCreateVRegs(*Ret);
356 
357   // The target may mess up with the insertion point, but
358   // this is not important as a return is the last instruction
359   // of the block anyway.
360 
361   return CLI->lowerReturn(MIRBuilder, Ret, VRegs);
362 }
363 
364 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
365   const BranchInst &BrInst = cast<BranchInst>(U);
366   unsigned Succ = 0;
367   if (!BrInst.isUnconditional()) {
368     // We want a G_BRCOND to the true BB followed by an unconditional branch.
369     unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
370     const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
371     MachineBasicBlock &TrueBB = getMBB(TrueTgt);
372     MIRBuilder.buildBrCond(Tst, TrueBB);
373   }
374 
375   const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
376   MachineBasicBlock &TgtBB = getMBB(BrTgt);
377   MachineBasicBlock &CurBB = MIRBuilder.getMBB();
378 
379   // If the unconditional target is the layout successor, fallthrough.
380   if (!CurBB.isLayoutSuccessor(&TgtBB))
381     MIRBuilder.buildBr(TgtBB);
382 
383   // Link successors.
384   for (const BasicBlock *Succ : successors(&BrInst))
385     CurBB.addSuccessor(&getMBB(*Succ));
386   return true;
387 }
388 
389 bool IRTranslator::translateSwitch(const User &U,
390                                    MachineIRBuilder &MIRBuilder) {
391   // For now, just translate as a chain of conditional branches.
392   // FIXME: could we share most of the logic/code in
393   // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel?
394   // At first sight, it seems most of the logic in there is independent of
395   // SelectionDAG-specifics and a lot of work went in to optimize switch
396   // lowering in there.
397 
398   const SwitchInst &SwInst = cast<SwitchInst>(U);
399   const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition());
400   const BasicBlock *OrigBB = SwInst.getParent();
401 
402   LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL);
403   for (auto &CaseIt : SwInst.cases()) {
404     const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue());
405     const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1);
406     MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue);
407     MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
408     const BasicBlock *TrueBB = CaseIt.getCaseSuccessor();
409     MachineBasicBlock &TrueMBB = getMBB(*TrueBB);
410 
411     MIRBuilder.buildBrCond(Tst, TrueMBB);
412     CurMBB.addSuccessor(&TrueMBB);
413     addMachineCFGPred({OrigBB, TrueBB}, &CurMBB);
414 
415     MachineBasicBlock *FalseMBB =
416         MF->CreateMachineBasicBlock(SwInst.getParent());
417     // Insert the comparison blocks one after the other.
418     MF->insert(std::next(CurMBB.getIterator()), FalseMBB);
419     MIRBuilder.buildBr(*FalseMBB);
420     CurMBB.addSuccessor(FalseMBB);
421 
422     MIRBuilder.setMBB(*FalseMBB);
423   }
424   // handle default case
425   const BasicBlock *DefaultBB = SwInst.getDefaultDest();
426   MachineBasicBlock &DefaultMBB = getMBB(*DefaultBB);
427   MIRBuilder.buildBr(DefaultMBB);
428   MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
429   CurMBB.addSuccessor(&DefaultMBB);
430   addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB);
431 
432   return true;
433 }
434 
435 bool IRTranslator::translateIndirectBr(const User &U,
436                                        MachineIRBuilder &MIRBuilder) {
437   const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
438 
439   const unsigned Tgt = getOrCreateVReg(*BrInst.getAddress());
440   MIRBuilder.buildBrIndirect(Tgt);
441 
442   // Link successors.
443   MachineBasicBlock &CurBB = MIRBuilder.getMBB();
444   for (const BasicBlock *Succ : successors(&BrInst))
445     CurBB.addSuccessor(&getMBB(*Succ));
446 
447   return true;
448 }
449 
450 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
451   const LoadInst &LI = cast<LoadInst>(U);
452 
453   auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
454                                : MachineMemOperand::MONone;
455   Flags |= MachineMemOperand::MOLoad;
456 
457   if (DL->getTypeStoreSize(LI.getType()) == 0)
458     return true;
459 
460   ArrayRef<unsigned> Regs = getOrCreateVRegs(LI);
461   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
462   unsigned Base = getOrCreateVReg(*LI.getPointerOperand());
463 
464   for (unsigned i = 0; i < Regs.size(); ++i) {
465     unsigned Addr = 0;
466     MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8);
467 
468     MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
469     unsigned BaseAlign = getMemOpAlignment(LI);
470     auto MMO = MF->getMachineMemOperand(
471         Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8,
472         MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
473         LI.getSyncScopeID(), LI.getOrdering());
474     MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
475   }
476 
477   return true;
478 }
479 
480 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
481   const StoreInst &SI = cast<StoreInst>(U);
482   auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
483                                : MachineMemOperand::MONone;
484   Flags |= MachineMemOperand::MOStore;
485 
486   if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
487     return true;
488 
489   ArrayRef<unsigned> Vals = getOrCreateVRegs(*SI.getValueOperand());
490   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
491   unsigned Base = getOrCreateVReg(*SI.getPointerOperand());
492 
493   for (unsigned i = 0; i < Vals.size(); ++i) {
494     unsigned Addr = 0;
495     MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8);
496 
497     MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
498     unsigned BaseAlign = getMemOpAlignment(SI);
499     auto MMO = MF->getMachineMemOperand(
500         Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8,
501         MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
502         SI.getSyncScopeID(), SI.getOrdering());
503     MIRBuilder.buildStore(Vals[i], Addr, *MMO);
504   }
505   return true;
506 }
507 
508 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
509   const Value *Src = U.getOperand(0);
510   Type *Int32Ty = Type::getInt32Ty(U.getContext());
511 
512   // getIndexedOffsetInType is designed for GEPs, so the first index is the
513   // usual array element rather than looking into the actual aggregate.
514   SmallVector<Value *, 1> Indices;
515   Indices.push_back(ConstantInt::get(Int32Ty, 0));
516 
517   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
518     for (auto Idx : EVI->indices())
519       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
520   } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
521     for (auto Idx : IVI->indices())
522       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
523   } else {
524     for (unsigned i = 1; i < U.getNumOperands(); ++i)
525       Indices.push_back(U.getOperand(i));
526   }
527 
528   return 8 * static_cast<uint64_t>(
529                  DL.getIndexedOffsetInType(Src->getType(), Indices));
530 }
531 
532 bool IRTranslator::translateExtractValue(const User &U,
533                                          MachineIRBuilder &MIRBuilder) {
534   const Value *Src = U.getOperand(0);
535   uint64_t Offset = getOffsetFromIndices(U, *DL);
536   ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
537   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
538   unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
539   auto &DstRegs = allocateVRegs(U);
540 
541   for (unsigned i = 0; i < DstRegs.size(); ++i)
542     DstRegs[i] = SrcRegs[Idx++];
543 
544   return true;
545 }
546 
547 bool IRTranslator::translateInsertValue(const User &U,
548                                         MachineIRBuilder &MIRBuilder) {
549   const Value *Src = U.getOperand(0);
550   uint64_t Offset = getOffsetFromIndices(U, *DL);
551   auto &DstRegs = allocateVRegs(U);
552   ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
553   ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
554   ArrayRef<unsigned> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
555   auto InsertedIt = InsertedRegs.begin();
556 
557   for (unsigned i = 0; i < DstRegs.size(); ++i) {
558     if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
559       DstRegs[i] = *InsertedIt++;
560     else
561       DstRegs[i] = SrcRegs[i];
562   }
563 
564   return true;
565 }
566 
567 bool IRTranslator::translateSelect(const User &U,
568                                    MachineIRBuilder &MIRBuilder) {
569   unsigned Tst = getOrCreateVReg(*U.getOperand(0));
570   ArrayRef<unsigned> ResRegs = getOrCreateVRegs(U);
571   ArrayRef<unsigned> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
572   ArrayRef<unsigned> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
573 
574   const SelectInst &SI = cast<SelectInst>(U);
575   uint16_t Flags = 0;
576   if (const CmpInst *Cmp = dyn_cast<CmpInst>(SI.getCondition()))
577     Flags = MachineInstr::copyFlagsFromInstruction(*Cmp);
578 
579   for (unsigned i = 0; i < ResRegs.size(); ++i) {
580     MIRBuilder.buildInstr(TargetOpcode::G_SELECT, {ResRegs[i]},
581                           {Tst, Op0Regs[i], Op1Regs[i]}, Flags);
582   }
583 
584   return true;
585 }
586 
587 bool IRTranslator::translateBitCast(const User &U,
588                                     MachineIRBuilder &MIRBuilder) {
589   // If we're bitcasting to the source type, we can reuse the source vreg.
590   if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
591       getLLTForType(*U.getType(), *DL)) {
592     unsigned SrcReg = getOrCreateVReg(*U.getOperand(0));
593     auto &Regs = *VMap.getVRegs(U);
594     // If we already assigned a vreg for this bitcast, we can't change that.
595     // Emit a copy to satisfy the users we already emitted.
596     if (!Regs.empty())
597       MIRBuilder.buildCopy(Regs[0], SrcReg);
598     else {
599       Regs.push_back(SrcReg);
600       VMap.getOffsets(U)->push_back(0);
601     }
602     return true;
603   }
604   return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
605 }
606 
607 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
608                                  MachineIRBuilder &MIRBuilder) {
609   unsigned Op = getOrCreateVReg(*U.getOperand(0));
610   unsigned Res = getOrCreateVReg(U);
611   MIRBuilder.buildInstr(Opcode, {Res}, {Op});
612   return true;
613 }
614 
615 bool IRTranslator::translateGetElementPtr(const User &U,
616                                           MachineIRBuilder &MIRBuilder) {
617   // FIXME: support vector GEPs.
618   if (U.getType()->isVectorTy())
619     return false;
620 
621   Value &Op0 = *U.getOperand(0);
622   unsigned BaseReg = getOrCreateVReg(Op0);
623   Type *PtrIRTy = Op0.getType();
624   LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
625   Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
626   LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
627 
628   int64_t Offset = 0;
629   for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
630        GTI != E; ++GTI) {
631     const Value *Idx = GTI.getOperand();
632     if (StructType *StTy = GTI.getStructTypeOrNull()) {
633       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
634       Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
635       continue;
636     } else {
637       uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
638 
639       // If this is a scalar constant or a splat vector of constants,
640       // handle it quickly.
641       if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
642         Offset += ElementSize * CI->getSExtValue();
643         continue;
644       }
645 
646       if (Offset != 0) {
647         unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
648         LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
649         auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
650         MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetMIB.getReg(0));
651 
652         BaseReg = NewBaseReg;
653         Offset = 0;
654       }
655 
656       unsigned IdxReg = getOrCreateVReg(*Idx);
657       if (MRI->getType(IdxReg) != OffsetTy) {
658         unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
659         MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
660         IdxReg = NewIdxReg;
661       }
662 
663       // N = N + Idx * ElementSize;
664       // Avoid doing it for ElementSize of 1.
665       unsigned GepOffsetReg;
666       if (ElementSize != 1) {
667         GepOffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
668         auto ElementSizeMIB = MIRBuilder.buildConstant(
669             getLLTForType(*OffsetIRTy, *DL), ElementSize);
670         MIRBuilder.buildMul(GepOffsetReg, ElementSizeMIB.getReg(0), IdxReg);
671       } else
672         GepOffsetReg = IdxReg;
673 
674       unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
675       MIRBuilder.buildGEP(NewBaseReg, BaseReg, GepOffsetReg);
676       BaseReg = NewBaseReg;
677     }
678   }
679 
680   if (Offset != 0) {
681     auto OffsetMIB =
682         MIRBuilder.buildConstant(getLLTForType(*OffsetIRTy, *DL), Offset);
683     MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0));
684     return true;
685   }
686 
687   MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
688   return true;
689 }
690 
691 bool IRTranslator::translateMemfunc(const CallInst &CI,
692                                     MachineIRBuilder &MIRBuilder,
693                                     unsigned ID) {
694   LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL);
695   Type *DstTy = CI.getArgOperand(0)->getType();
696   if (cast<PointerType>(DstTy)->getAddressSpace() != 0 ||
697       SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
698     return false;
699 
700   SmallVector<CallLowering::ArgInfo, 8> Args;
701   for (int i = 0; i < 3; ++i) {
702     const auto &Arg = CI.getArgOperand(i);
703     Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
704   }
705 
706   const char *Callee;
707   switch (ID) {
708   case Intrinsic::memmove:
709   case Intrinsic::memcpy: {
710     Type *SrcTy = CI.getArgOperand(1)->getType();
711     if(cast<PointerType>(SrcTy)->getAddressSpace() != 0)
712       return false;
713     Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove";
714     break;
715   }
716   case Intrinsic::memset:
717     Callee = "memset";
718     break;
719   default:
720     return false;
721   }
722 
723   return CLI->lowerCall(MIRBuilder, CI.getCallingConv(),
724                         MachineOperand::CreateES(Callee),
725                         CallLowering::ArgInfo(0, CI.getType()), Args);
726 }
727 
728 void IRTranslator::getStackGuard(unsigned DstReg,
729                                  MachineIRBuilder &MIRBuilder) {
730   const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
731   MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
732   auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
733   MIB.addDef(DstReg);
734 
735   auto &TLI = *MF->getSubtarget().getTargetLowering();
736   Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
737   if (!Global)
738     return;
739 
740   MachinePointerInfo MPInfo(Global);
741   auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
742                MachineMemOperand::MODereferenceable;
743   MachineMemOperand *MemRef =
744       MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
745                                DL->getPointerABIAlignment(0));
746   MIB.setMemRefs({MemRef});
747 }
748 
749 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
750                                               MachineIRBuilder &MIRBuilder) {
751   ArrayRef<unsigned> ResRegs = getOrCreateVRegs(CI);
752   MIRBuilder.buildInstr(Op)
753       .addDef(ResRegs[0])
754       .addDef(ResRegs[1])
755       .addUse(getOrCreateVReg(*CI.getOperand(0)))
756       .addUse(getOrCreateVReg(*CI.getOperand(1)));
757 
758   return true;
759 }
760 
761 unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
762   switch (ID) {
763     default:
764       break;
765     case Intrinsic::bswap:
766       return TargetOpcode::G_BSWAP;
767     case Intrinsic::ceil:
768       return TargetOpcode::G_FCEIL;
769     case Intrinsic::cos:
770       return TargetOpcode::G_FCOS;
771     case Intrinsic::ctpop:
772       return TargetOpcode::G_CTPOP;
773     case Intrinsic::exp:
774       return TargetOpcode::G_FEXP;
775     case Intrinsic::exp2:
776       return TargetOpcode::G_FEXP2;
777     case Intrinsic::fabs:
778       return TargetOpcode::G_FABS;
779     case Intrinsic::canonicalize:
780       return TargetOpcode::G_FCANONICALIZE;
781     case Intrinsic::floor:
782       return TargetOpcode::G_FFLOOR;
783     case Intrinsic::fma:
784       return TargetOpcode::G_FMA;
785     case Intrinsic::log:
786       return TargetOpcode::G_FLOG;
787     case Intrinsic::log2:
788       return TargetOpcode::G_FLOG2;
789     case Intrinsic::log10:
790       return TargetOpcode::G_FLOG10;
791     case Intrinsic::pow:
792       return TargetOpcode::G_FPOW;
793     case Intrinsic::round:
794       return TargetOpcode::G_INTRINSIC_ROUND;
795     case Intrinsic::sin:
796       return TargetOpcode::G_FSIN;
797     case Intrinsic::sqrt:
798       return TargetOpcode::G_FSQRT;
799     case Intrinsic::trunc:
800       return TargetOpcode::G_INTRINSIC_TRUNC;
801   }
802   return Intrinsic::not_intrinsic;
803 }
804 
805 bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
806                                             Intrinsic::ID ID,
807                                             MachineIRBuilder &MIRBuilder) {
808 
809   unsigned Op = getSimpleIntrinsicOpcode(ID);
810 
811   // Is this a simple intrinsic?
812   if (Op == Intrinsic::not_intrinsic)
813     return false;
814 
815   // Yes. Let's translate it.
816   SmallVector<llvm::SrcOp, 4> VRegs;
817   for (auto &Arg : CI.arg_operands())
818     VRegs.push_back(getOrCreateVReg(*Arg));
819 
820   MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
821                         MachineInstr::copyFlagsFromInstruction(CI));
822   return true;
823 }
824 
825 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
826                                            MachineIRBuilder &MIRBuilder) {
827 
828   // If this is a simple intrinsic (that is, we just need to add a def of
829   // a vreg, and uses for each arg operand, then translate it.
830   if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
831     return true;
832 
833   switch (ID) {
834   default:
835     break;
836   case Intrinsic::lifetime_start:
837   case Intrinsic::lifetime_end: {
838     // No stack colouring in O0, discard region information.
839     if (MF->getTarget().getOptLevel() == CodeGenOpt::None)
840       return true;
841 
842     unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
843                                                   : TargetOpcode::LIFETIME_END;
844 
845     // Get the underlying objects for the location passed on the lifetime
846     // marker.
847     SmallVector<Value *, 4> Allocas;
848     GetUnderlyingObjects(CI.getArgOperand(1), Allocas, *DL);
849 
850     // Iterate over each underlying object, creating lifetime markers for each
851     // static alloca. Quit if we find a non-static alloca.
852     for (Value *V : Allocas) {
853       AllocaInst *AI = dyn_cast<AllocaInst>(V);
854       if (!AI)
855         continue;
856 
857       if (!AI->isStaticAlloca())
858         return true;
859 
860       MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
861     }
862     return true;
863   }
864   case Intrinsic::dbg_declare: {
865     const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
866     assert(DI.getVariable() && "Missing variable");
867 
868     const Value *Address = DI.getAddress();
869     if (!Address || isa<UndefValue>(Address)) {
870       LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
871       return true;
872     }
873 
874     assert(DI.getVariable()->isValidLocationForIntrinsic(
875                MIRBuilder.getDebugLoc()) &&
876            "Expected inlined-at fields to agree");
877     auto AI = dyn_cast<AllocaInst>(Address);
878     if (AI && AI->isStaticAlloca()) {
879       // Static allocas are tracked at the MF level, no need for DBG_VALUE
880       // instructions (in fact, they get ignored if they *do* exist).
881       MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
882                              getOrCreateFrameIndex(*AI), DI.getDebugLoc());
883     } else {
884       // A dbg.declare describes the address of a source variable, so lower it
885       // into an indirect DBG_VALUE.
886       MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
887                                        DI.getVariable(), DI.getExpression());
888     }
889     return true;
890   }
891   case Intrinsic::dbg_label: {
892     const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
893     assert(DI.getLabel() && "Missing label");
894 
895     assert(DI.getLabel()->isValidLocationForIntrinsic(
896                MIRBuilder.getDebugLoc()) &&
897            "Expected inlined-at fields to agree");
898 
899     MIRBuilder.buildDbgLabel(DI.getLabel());
900     return true;
901   }
902   case Intrinsic::vaend:
903     // No target I know of cares about va_end. Certainly no in-tree target
904     // does. Simplest intrinsic ever!
905     return true;
906   case Intrinsic::vastart: {
907     auto &TLI = *MF->getSubtarget().getTargetLowering();
908     Value *Ptr = CI.getArgOperand(0);
909     unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
910 
911     // FIXME: Get alignment
912     MIRBuilder.buildInstr(TargetOpcode::G_VASTART)
913         .addUse(getOrCreateVReg(*Ptr))
914         .addMemOperand(MF->getMachineMemOperand(
915             MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 1));
916     return true;
917   }
918   case Intrinsic::dbg_value: {
919     // This form of DBG_VALUE is target-independent.
920     const DbgValueInst &DI = cast<DbgValueInst>(CI);
921     const Value *V = DI.getValue();
922     assert(DI.getVariable()->isValidLocationForIntrinsic(
923                MIRBuilder.getDebugLoc()) &&
924            "Expected inlined-at fields to agree");
925     if (!V) {
926       // Currently the optimizer can produce this; insert an undef to
927       // help debugging.  Probably the optimizer should not do this.
928       MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
929     } else if (const auto *CI = dyn_cast<Constant>(V)) {
930       MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
931     } else {
932       unsigned Reg = getOrCreateVReg(*V);
933       // FIXME: This does not handle register-indirect values at offset 0. The
934       // direct/indirect thing shouldn't really be handled by something as
935       // implicit as reg+noreg vs reg+imm in the first palce, but it seems
936       // pretty baked in right now.
937       MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
938     }
939     return true;
940   }
941   case Intrinsic::uadd_with_overflow:
942     return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
943   case Intrinsic::sadd_with_overflow:
944     return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
945   case Intrinsic::usub_with_overflow:
946     return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
947   case Intrinsic::ssub_with_overflow:
948     return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
949   case Intrinsic::umul_with_overflow:
950     return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
951   case Intrinsic::smul_with_overflow:
952     return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
953   case Intrinsic::fmuladd: {
954     const TargetMachine &TM = MF->getTarget();
955     const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
956     unsigned Dst = getOrCreateVReg(CI);
957     unsigned Op0 = getOrCreateVReg(*CI.getArgOperand(0));
958     unsigned Op1 = getOrCreateVReg(*CI.getArgOperand(1));
959     unsigned Op2 = getOrCreateVReg(*CI.getArgOperand(2));
960     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
961         TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) {
962       // TODO: Revisit this to see if we should move this part of the
963       // lowering to the combiner.
964       MIRBuilder.buildInstr(TargetOpcode::G_FMA, {Dst}, {Op0, Op1, Op2},
965                             MachineInstr::copyFlagsFromInstruction(CI));
966     } else {
967       LLT Ty = getLLTForType(*CI.getType(), *DL);
968       auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, {Ty}, {Op0, Op1},
969                                         MachineInstr::copyFlagsFromInstruction(CI));
970       MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Dst}, {FMul, Op2},
971                             MachineInstr::copyFlagsFromInstruction(CI));
972     }
973     return true;
974   }
975   case Intrinsic::memcpy:
976   case Intrinsic::memmove:
977   case Intrinsic::memset:
978     return translateMemfunc(CI, MIRBuilder, ID);
979   case Intrinsic::eh_typeid_for: {
980     GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
981     unsigned Reg = getOrCreateVReg(CI);
982     unsigned TypeID = MF->getTypeIDFor(GV);
983     MIRBuilder.buildConstant(Reg, TypeID);
984     return true;
985   }
986   case Intrinsic::objectsize: {
987     // If we don't know by now, we're never going to know.
988     const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
989 
990     MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
991     return true;
992   }
993   case Intrinsic::is_constant:
994     // If this wasn't constant-folded away by now, then it's not a
995     // constant.
996     MIRBuilder.buildConstant(getOrCreateVReg(CI), 0);
997     return true;
998   case Intrinsic::stackguard:
999     getStackGuard(getOrCreateVReg(CI), MIRBuilder);
1000     return true;
1001   case Intrinsic::stackprotector: {
1002     LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1003     unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
1004     getStackGuard(GuardVal, MIRBuilder);
1005 
1006     AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
1007     int FI = getOrCreateFrameIndex(*Slot);
1008     MF->getFrameInfo().setStackProtectorIndex(FI);
1009 
1010     MIRBuilder.buildStore(
1011         GuardVal, getOrCreateVReg(*Slot),
1012         *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
1013                                   MachineMemOperand::MOStore |
1014                                       MachineMemOperand::MOVolatile,
1015                                   PtrTy.getSizeInBits() / 8, 8));
1016     return true;
1017   }
1018   case Intrinsic::stacksave: {
1019     // Save the stack pointer to the location provided by the intrinsic.
1020     unsigned Reg = getOrCreateVReg(CI);
1021     unsigned StackPtr = MF->getSubtarget()
1022                             .getTargetLowering()
1023                             ->getStackPointerRegisterToSaveRestore();
1024 
1025     // If the target doesn't specify a stack pointer, then fall back.
1026     if (!StackPtr)
1027       return false;
1028 
1029     MIRBuilder.buildCopy(Reg, StackPtr);
1030     return true;
1031   }
1032   case Intrinsic::stackrestore: {
1033     // Restore the stack pointer from the location provided by the intrinsic.
1034     unsigned Reg = getOrCreateVReg(*CI.getArgOperand(0));
1035     unsigned StackPtr = MF->getSubtarget()
1036                             .getTargetLowering()
1037                             ->getStackPointerRegisterToSaveRestore();
1038 
1039     // If the target doesn't specify a stack pointer, then fall back.
1040     if (!StackPtr)
1041       return false;
1042 
1043     MIRBuilder.buildCopy(StackPtr, Reg);
1044     return true;
1045   }
1046   case Intrinsic::cttz:
1047   case Intrinsic::ctlz: {
1048     ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
1049     bool isTrailing = ID == Intrinsic::cttz;
1050     unsigned Opcode = isTrailing
1051                           ? Cst->isZero() ? TargetOpcode::G_CTTZ
1052                                           : TargetOpcode::G_CTTZ_ZERO_UNDEF
1053                           : Cst->isZero() ? TargetOpcode::G_CTLZ
1054                                           : TargetOpcode::G_CTLZ_ZERO_UNDEF;
1055     MIRBuilder.buildInstr(Opcode)
1056         .addDef(getOrCreateVReg(CI))
1057         .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
1058     return true;
1059   }
1060   case Intrinsic::invariant_start: {
1061     LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1062     unsigned Undef = MRI->createGenericVirtualRegister(PtrTy);
1063     MIRBuilder.buildUndef(Undef);
1064     return true;
1065   }
1066   case Intrinsic::invariant_end:
1067     return true;
1068   }
1069   return false;
1070 }
1071 
1072 bool IRTranslator::translateInlineAsm(const CallInst &CI,
1073                                       MachineIRBuilder &MIRBuilder) {
1074   const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue());
1075   if (!IA.getConstraintString().empty())
1076     return false;
1077 
1078   unsigned ExtraInfo = 0;
1079   if (IA.hasSideEffects())
1080     ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1081   if (IA.getDialect() == InlineAsm::AD_Intel)
1082     ExtraInfo |= InlineAsm::Extra_AsmDialect;
1083 
1084   MIRBuilder.buildInstr(TargetOpcode::INLINEASM)
1085     .addExternalSymbol(IA.getAsmString().c_str())
1086     .addImm(ExtraInfo);
1087 
1088   return true;
1089 }
1090 
1091 unsigned IRTranslator::packRegs(const Value &V,
1092                                   MachineIRBuilder &MIRBuilder) {
1093   ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
1094   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
1095   LLT BigTy = getLLTForType(*V.getType(), *DL);
1096 
1097   if (Regs.size() == 1)
1098     return Regs[0];
1099 
1100   unsigned Dst = MRI->createGenericVirtualRegister(BigTy);
1101   MIRBuilder.buildUndef(Dst);
1102   for (unsigned i = 0; i < Regs.size(); ++i) {
1103     unsigned NewDst = MRI->createGenericVirtualRegister(BigTy);
1104     MIRBuilder.buildInsert(NewDst, Dst, Regs[i], Offsets[i]);
1105     Dst = NewDst;
1106   }
1107   return Dst;
1108 }
1109 
1110 void IRTranslator::unpackRegs(const Value &V, unsigned Src,
1111                                 MachineIRBuilder &MIRBuilder) {
1112   ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
1113   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
1114 
1115   for (unsigned i = 0; i < Regs.size(); ++i)
1116     MIRBuilder.buildExtract(Regs[i], Src, Offsets[i]);
1117 }
1118 
1119 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
1120   const CallInst &CI = cast<CallInst>(U);
1121   auto TII = MF->getTarget().getIntrinsicInfo();
1122   const Function *F = CI.getCalledFunction();
1123 
1124   // FIXME: support Windows dllimport function calls.
1125   if (F && F->hasDLLImportStorageClass())
1126     return false;
1127 
1128   if (CI.isInlineAsm())
1129     return translateInlineAsm(CI, MIRBuilder);
1130 
1131   Intrinsic::ID ID = Intrinsic::not_intrinsic;
1132   if (F && F->isIntrinsic()) {
1133     ID = F->getIntrinsicID();
1134     if (TII && ID == Intrinsic::not_intrinsic)
1135       ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
1136   }
1137 
1138   if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) {
1139     bool IsSplitType = valueIsSplit(CI);
1140     unsigned Res = IsSplitType ? MRI->createGenericVirtualRegister(
1141                                      getLLTForType(*CI.getType(), *DL))
1142                                : getOrCreateVReg(CI);
1143 
1144     SmallVector<unsigned, 8> Args;
1145     for (auto &Arg: CI.arg_operands())
1146       Args.push_back(packRegs(*Arg, MIRBuilder));
1147 
1148     MF->getFrameInfo().setHasCalls(true);
1149     bool Success = CLI->lowerCall(MIRBuilder, &CI, Res, Args, [&]() {
1150       return getOrCreateVReg(*CI.getCalledValue());
1151     });
1152 
1153     if (IsSplitType)
1154       unpackRegs(CI, Res, MIRBuilder);
1155     return Success;
1156   }
1157 
1158   assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
1159 
1160   if (translateKnownIntrinsic(CI, ID, MIRBuilder))
1161     return true;
1162 
1163   ArrayRef<unsigned> ResultRegs;
1164   if (!CI.getType()->isVoidTy())
1165     ResultRegs = getOrCreateVRegs(CI);
1166 
1167   MachineInstrBuilder MIB =
1168       MIRBuilder.buildIntrinsic(ID, ResultRegs, !CI.doesNotAccessMemory());
1169 
1170   for (auto &Arg : CI.arg_operands()) {
1171     // Some intrinsics take metadata parameters. Reject them.
1172     if (isa<MetadataAsValue>(Arg))
1173       return false;
1174     MIB.addUse(packRegs(*Arg, MIRBuilder));
1175   }
1176 
1177   // Add a MachineMemOperand if it is a target mem intrinsic.
1178   const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1179   TargetLowering::IntrinsicInfo Info;
1180   // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
1181   if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
1182     unsigned Align = Info.align;
1183     if (Align == 0)
1184       Align = DL->getABITypeAlignment(Info.memVT.getTypeForEVT(F->getContext()));
1185 
1186     uint64_t Size = Info.memVT.getStoreSize();
1187     MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal),
1188                                                Info.flags, Size, Align));
1189   }
1190 
1191   return true;
1192 }
1193 
1194 bool IRTranslator::translateInvoke(const User &U,
1195                                    MachineIRBuilder &MIRBuilder) {
1196   const InvokeInst &I = cast<InvokeInst>(U);
1197   MCContext &Context = MF->getContext();
1198 
1199   const BasicBlock *ReturnBB = I.getSuccessor(0);
1200   const BasicBlock *EHPadBB = I.getSuccessor(1);
1201 
1202   const Value *Callee = I.getCalledValue();
1203   const Function *Fn = dyn_cast<Function>(Callee);
1204   if (isa<InlineAsm>(Callee))
1205     return false;
1206 
1207   // FIXME: support invoking patchpoint and statepoint intrinsics.
1208   if (Fn && Fn->isIntrinsic())
1209     return false;
1210 
1211   // FIXME: support whatever these are.
1212   if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
1213     return false;
1214 
1215   // FIXME: support Windows exception handling.
1216   if (!isa<LandingPadInst>(EHPadBB->front()))
1217     return false;
1218 
1219   // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
1220   // the region covered by the try.
1221   MCSymbol *BeginSymbol = Context.createTempSymbol();
1222   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
1223 
1224   unsigned Res = 0;
1225   if (!I.getType()->isVoidTy())
1226     Res = MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL));
1227   SmallVector<unsigned, 8> Args;
1228   for (auto &Arg: I.arg_operands())
1229     Args.push_back(packRegs(*Arg, MIRBuilder));
1230 
1231   if (!CLI->lowerCall(MIRBuilder, &I, Res, Args,
1232                       [&]() { return getOrCreateVReg(*I.getCalledValue()); }))
1233     return false;
1234 
1235   unpackRegs(I, Res, MIRBuilder);
1236 
1237   MCSymbol *EndSymbol = Context.createTempSymbol();
1238   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
1239 
1240   // FIXME: track probabilities.
1241   MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
1242                     &ReturnMBB = getMBB(*ReturnBB);
1243   MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
1244   MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
1245   MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
1246   MIRBuilder.buildBr(ReturnMBB);
1247 
1248   return true;
1249 }
1250 
1251 bool IRTranslator::translateCallBr(const User &U,
1252                                    MachineIRBuilder &MIRBuilder) {
1253   // FIXME: Implement this.
1254   return false;
1255 }
1256 
1257 bool IRTranslator::translateLandingPad(const User &U,
1258                                        MachineIRBuilder &MIRBuilder) {
1259   const LandingPadInst &LP = cast<LandingPadInst>(U);
1260 
1261   MachineBasicBlock &MBB = MIRBuilder.getMBB();
1262 
1263   MBB.setIsEHPad();
1264 
1265   // If there aren't registers to copy the values into (e.g., during SjLj
1266   // exceptions), then don't bother.
1267   auto &TLI = *MF->getSubtarget().getTargetLowering();
1268   const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
1269   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
1270       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
1271     return true;
1272 
1273   // If landingpad's return type is token type, we don't create DAG nodes
1274   // for its exception pointer and selector value. The extraction of exception
1275   // pointer or selector value from token type landingpads is not currently
1276   // supported.
1277   if (LP.getType()->isTokenTy())
1278     return true;
1279 
1280   // Add a label to mark the beginning of the landing pad.  Deletion of the
1281   // landing pad can thus be detected via the MachineModuleInfo.
1282   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
1283     .addSym(MF->addLandingPad(&MBB));
1284 
1285   LLT Ty = getLLTForType(*LP.getType(), *DL);
1286   unsigned Undef = MRI->createGenericVirtualRegister(Ty);
1287   MIRBuilder.buildUndef(Undef);
1288 
1289   SmallVector<LLT, 2> Tys;
1290   for (Type *Ty : cast<StructType>(LP.getType())->elements())
1291     Tys.push_back(getLLTForType(*Ty, *DL));
1292   assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
1293 
1294   // Mark exception register as live in.
1295   unsigned ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
1296   if (!ExceptionReg)
1297     return false;
1298 
1299   MBB.addLiveIn(ExceptionReg);
1300   ArrayRef<unsigned> ResRegs = getOrCreateVRegs(LP);
1301   MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
1302 
1303   unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
1304   if (!SelectorReg)
1305     return false;
1306 
1307   MBB.addLiveIn(SelectorReg);
1308   unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
1309   MIRBuilder.buildCopy(PtrVReg, SelectorReg);
1310   MIRBuilder.buildCast(ResRegs[1], PtrVReg);
1311 
1312   return true;
1313 }
1314 
1315 bool IRTranslator::translateAlloca(const User &U,
1316                                    MachineIRBuilder &MIRBuilder) {
1317   auto &AI = cast<AllocaInst>(U);
1318 
1319   if (AI.isSwiftError())
1320     return false;
1321 
1322   if (AI.isStaticAlloca()) {
1323     unsigned Res = getOrCreateVReg(AI);
1324     int FI = getOrCreateFrameIndex(AI);
1325     MIRBuilder.buildFrameIndex(Res, FI);
1326     return true;
1327   }
1328 
1329   // FIXME: support stack probing for Windows.
1330   if (MF->getTarget().getTargetTriple().isOSWindows())
1331     return false;
1332 
1333   // Now we're in the harder dynamic case.
1334   Type *Ty = AI.getAllocatedType();
1335   unsigned Align =
1336       std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment());
1337 
1338   unsigned NumElts = getOrCreateVReg(*AI.getArraySize());
1339 
1340   Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
1341   LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
1342   if (MRI->getType(NumElts) != IntPtrTy) {
1343     unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
1344     MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
1345     NumElts = ExtElts;
1346   }
1347 
1348   unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
1349   unsigned TySize =
1350       getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty)));
1351   MIRBuilder.buildMul(AllocSize, NumElts, TySize);
1352 
1353   LLT PtrTy = getLLTForType(*AI.getType(), *DL);
1354   auto &TLI = *MF->getSubtarget().getTargetLowering();
1355   unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
1356 
1357   unsigned SPTmp = MRI->createGenericVirtualRegister(PtrTy);
1358   MIRBuilder.buildCopy(SPTmp, SPReg);
1359 
1360   unsigned AllocTmp = MRI->createGenericVirtualRegister(PtrTy);
1361   MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize);
1362 
1363   // Handle alignment. We have to realign if the allocation granule was smaller
1364   // than stack alignment, or the specific alloca requires more than stack
1365   // alignment.
1366   unsigned StackAlign =
1367       MF->getSubtarget().getFrameLowering()->getStackAlignment();
1368   Align = std::max(Align, StackAlign);
1369   if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) {
1370     // Round the size of the allocation up to the stack alignment size
1371     // by add SA-1 to the size. This doesn't overflow because we're computing
1372     // an address inside an alloca.
1373     unsigned AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy);
1374     MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align));
1375     AllocTmp = AlignedAlloc;
1376   }
1377 
1378   MIRBuilder.buildCopy(SPReg, AllocTmp);
1379   MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp);
1380 
1381   MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI);
1382   assert(MF->getFrameInfo().hasVarSizedObjects());
1383   return true;
1384 }
1385 
1386 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
1387   // FIXME: We may need more info about the type. Because of how LLT works,
1388   // we're completely discarding the i64/double distinction here (amongst
1389   // others). Fortunately the ABIs I know of where that matters don't use va_arg
1390   // anyway but that's not guaranteed.
1391   MIRBuilder.buildInstr(TargetOpcode::G_VAARG)
1392     .addDef(getOrCreateVReg(U))
1393     .addUse(getOrCreateVReg(*U.getOperand(0)))
1394     .addImm(DL->getABITypeAlignment(U.getType()));
1395   return true;
1396 }
1397 
1398 bool IRTranslator::translateInsertElement(const User &U,
1399                                           MachineIRBuilder &MIRBuilder) {
1400   // If it is a <1 x Ty> vector, use the scalar as it is
1401   // not a legal vector type in LLT.
1402   if (U.getType()->getVectorNumElements() == 1) {
1403     unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1404     auto &Regs = *VMap.getVRegs(U);
1405     if (Regs.empty()) {
1406       Regs.push_back(Elt);
1407       VMap.getOffsets(U)->push_back(0);
1408     } else {
1409       MIRBuilder.buildCopy(Regs[0], Elt);
1410     }
1411     return true;
1412   }
1413 
1414   unsigned Res = getOrCreateVReg(U);
1415   unsigned Val = getOrCreateVReg(*U.getOperand(0));
1416   unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1417   unsigned Idx = getOrCreateVReg(*U.getOperand(2));
1418   MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
1419   return true;
1420 }
1421 
1422 bool IRTranslator::translateExtractElement(const User &U,
1423                                            MachineIRBuilder &MIRBuilder) {
1424   // If it is a <1 x Ty> vector, use the scalar as it is
1425   // not a legal vector type in LLT.
1426   if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
1427     unsigned Elt = getOrCreateVReg(*U.getOperand(0));
1428     auto &Regs = *VMap.getVRegs(U);
1429     if (Regs.empty()) {
1430       Regs.push_back(Elt);
1431       VMap.getOffsets(U)->push_back(0);
1432     } else {
1433       MIRBuilder.buildCopy(Regs[0], Elt);
1434     }
1435     return true;
1436   }
1437   unsigned Res = getOrCreateVReg(U);
1438   unsigned Val = getOrCreateVReg(*U.getOperand(0));
1439   const auto &TLI = *MF->getSubtarget().getTargetLowering();
1440   unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
1441   unsigned Idx = 0;
1442   if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
1443     if (CI->getBitWidth() != PreferredVecIdxWidth) {
1444       APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth);
1445       auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
1446       Idx = getOrCreateVReg(*NewIdxCI);
1447     }
1448   }
1449   if (!Idx)
1450     Idx = getOrCreateVReg(*U.getOperand(1));
1451   if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
1452     const LLT &VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
1453     Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx)->getOperand(0).getReg();
1454   }
1455   MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
1456   return true;
1457 }
1458 
1459 bool IRTranslator::translateShuffleVector(const User &U,
1460                                           MachineIRBuilder &MIRBuilder) {
1461   MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR)
1462       .addDef(getOrCreateVReg(U))
1463       .addUse(getOrCreateVReg(*U.getOperand(0)))
1464       .addUse(getOrCreateVReg(*U.getOperand(1)))
1465       .addUse(getOrCreateVReg(*U.getOperand(2)));
1466   return true;
1467 }
1468 
1469 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
1470   const PHINode &PI = cast<PHINode>(U);
1471 
1472   SmallVector<MachineInstr *, 4> Insts;
1473   for (auto Reg : getOrCreateVRegs(PI)) {
1474     auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
1475     Insts.push_back(MIB.getInstr());
1476   }
1477 
1478   PendingPHIs.emplace_back(&PI, std::move(Insts));
1479   return true;
1480 }
1481 
1482 bool IRTranslator::translateAtomicCmpXchg(const User &U,
1483                                           MachineIRBuilder &MIRBuilder) {
1484   const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
1485 
1486   if (I.isWeak())
1487     return false;
1488 
1489   auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1490                               : MachineMemOperand::MONone;
1491   Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1492 
1493   Type *ResType = I.getType();
1494   Type *ValType = ResType->Type::getStructElementType(0);
1495 
1496   auto Res = getOrCreateVRegs(I);
1497   unsigned OldValRes = Res[0];
1498   unsigned SuccessRes = Res[1];
1499   unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
1500   unsigned Cmp = getOrCreateVReg(*I.getCompareOperand());
1501   unsigned NewVal = getOrCreateVReg(*I.getNewValOperand());
1502 
1503   MIRBuilder.buildAtomicCmpXchgWithSuccess(
1504       OldValRes, SuccessRes, Addr, Cmp, NewVal,
1505       *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1506                                 Flags, DL->getTypeStoreSize(ValType),
1507                                 getMemOpAlignment(I), AAMDNodes(), nullptr,
1508                                 I.getSyncScopeID(), I.getSuccessOrdering(),
1509                                 I.getFailureOrdering()));
1510   return true;
1511 }
1512 
1513 bool IRTranslator::translateAtomicRMW(const User &U,
1514                                       MachineIRBuilder &MIRBuilder) {
1515   const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
1516 
1517   auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1518                               : MachineMemOperand::MONone;
1519   Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1520 
1521   Type *ResType = I.getType();
1522 
1523   unsigned Res = getOrCreateVReg(I);
1524   unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
1525   unsigned Val = getOrCreateVReg(*I.getValOperand());
1526 
1527   unsigned Opcode = 0;
1528   switch (I.getOperation()) {
1529   default:
1530     llvm_unreachable("Unknown atomicrmw op");
1531     return false;
1532   case AtomicRMWInst::Xchg:
1533     Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
1534     break;
1535   case AtomicRMWInst::Add:
1536     Opcode = TargetOpcode::G_ATOMICRMW_ADD;
1537     break;
1538   case AtomicRMWInst::Sub:
1539     Opcode = TargetOpcode::G_ATOMICRMW_SUB;
1540     break;
1541   case AtomicRMWInst::And:
1542     Opcode = TargetOpcode::G_ATOMICRMW_AND;
1543     break;
1544   case AtomicRMWInst::Nand:
1545     Opcode = TargetOpcode::G_ATOMICRMW_NAND;
1546     break;
1547   case AtomicRMWInst::Or:
1548     Opcode = TargetOpcode::G_ATOMICRMW_OR;
1549     break;
1550   case AtomicRMWInst::Xor:
1551     Opcode = TargetOpcode::G_ATOMICRMW_XOR;
1552     break;
1553   case AtomicRMWInst::Max:
1554     Opcode = TargetOpcode::G_ATOMICRMW_MAX;
1555     break;
1556   case AtomicRMWInst::Min:
1557     Opcode = TargetOpcode::G_ATOMICRMW_MIN;
1558     break;
1559   case AtomicRMWInst::UMax:
1560     Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
1561     break;
1562   case AtomicRMWInst::UMin:
1563     Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
1564     break;
1565   }
1566 
1567   MIRBuilder.buildAtomicRMW(
1568       Opcode, Res, Addr, Val,
1569       *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1570                                 Flags, DL->getTypeStoreSize(ResType),
1571                                 getMemOpAlignment(I), AAMDNodes(), nullptr,
1572                                 I.getSyncScopeID(), I.getOrdering()));
1573   return true;
1574 }
1575 
1576 void IRTranslator::finishPendingPhis() {
1577 #ifndef NDEBUG
1578   DILocationVerifier Verifier;
1579   GISelObserverWrapper WrapperObserver(&Verifier);
1580   RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
1581 #endif // ifndef NDEBUG
1582   for (auto &Phi : PendingPHIs) {
1583     const PHINode *PI = Phi.first;
1584     ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
1585     EntryBuilder->setDebugLoc(PI->getDebugLoc());
1586 #ifndef NDEBUG
1587     Verifier.setCurrentInst(PI);
1588 #endif // ifndef NDEBUG
1589 
1590     // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
1591     // won't create extra control flow here, otherwise we need to find the
1592     // dominating predecessor here (or perhaps force the weirder IRTranslators
1593     // to provide a simple boundary).
1594     SmallSet<const BasicBlock *, 4> HandledPreds;
1595 
1596     for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
1597       auto IRPred = PI->getIncomingBlock(i);
1598       if (HandledPreds.count(IRPred))
1599         continue;
1600 
1601       HandledPreds.insert(IRPred);
1602       ArrayRef<unsigned> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
1603       for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
1604         assert(Pred->isSuccessor(ComponentPHIs[0]->getParent()) &&
1605                "incorrect CFG at MachineBasicBlock level");
1606         for (unsigned j = 0; j < ValRegs.size(); ++j) {
1607           MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
1608           MIB.addUse(ValRegs[j]);
1609           MIB.addMBB(Pred);
1610         }
1611       }
1612     }
1613   }
1614 }
1615 
1616 bool IRTranslator::valueIsSplit(const Value &V,
1617                                 SmallVectorImpl<uint64_t> *Offsets) {
1618   SmallVector<LLT, 4> SplitTys;
1619   if (Offsets && !Offsets->empty())
1620     Offsets->clear();
1621   computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
1622   return SplitTys.size() > 1;
1623 }
1624 
1625 bool IRTranslator::translate(const Instruction &Inst) {
1626   CurBuilder->setDebugLoc(Inst.getDebugLoc());
1627   EntryBuilder->setDebugLoc(Inst.getDebugLoc());
1628   switch(Inst.getOpcode()) {
1629 #define HANDLE_INST(NUM, OPCODE, CLASS)                                        \
1630   case Instruction::OPCODE:                                                    \
1631     return translate##OPCODE(Inst, *CurBuilder.get());
1632 #include "llvm/IR/Instruction.def"
1633   default:
1634     return false;
1635   }
1636 }
1637 
1638 bool IRTranslator::translate(const Constant &C, unsigned Reg) {
1639   if (auto CI = dyn_cast<ConstantInt>(&C))
1640     EntryBuilder->buildConstant(Reg, *CI);
1641   else if (auto CF = dyn_cast<ConstantFP>(&C))
1642     EntryBuilder->buildFConstant(Reg, *CF);
1643   else if (isa<UndefValue>(C))
1644     EntryBuilder->buildUndef(Reg);
1645   else if (isa<ConstantPointerNull>(C)) {
1646     // As we are trying to build a constant val of 0 into a pointer,
1647     // insert a cast to make them correct with respect to types.
1648     unsigned NullSize = DL->getTypeSizeInBits(C.getType());
1649     auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize);
1650     auto *ZeroVal = ConstantInt::get(ZeroTy, 0);
1651     unsigned ZeroReg = getOrCreateVReg(*ZeroVal);
1652     EntryBuilder->buildCast(Reg, ZeroReg);
1653   } else if (auto GV = dyn_cast<GlobalValue>(&C))
1654     EntryBuilder->buildGlobalValue(Reg, GV);
1655   else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
1656     if (!CAZ->getType()->isVectorTy())
1657       return false;
1658     // Return the scalar if it is a <1 x Ty> vector.
1659     if (CAZ->getNumElements() == 1)
1660       return translate(*CAZ->getElementValue(0u), Reg);
1661     SmallVector<unsigned, 4> Ops;
1662     for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
1663       Constant &Elt = *CAZ->getElementValue(i);
1664       Ops.push_back(getOrCreateVReg(Elt));
1665     }
1666     EntryBuilder->buildBuildVector(Reg, Ops);
1667   } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
1668     // Return the scalar if it is a <1 x Ty> vector.
1669     if (CV->getNumElements() == 1)
1670       return translate(*CV->getElementAsConstant(0), Reg);
1671     SmallVector<unsigned, 4> Ops;
1672     for (unsigned i = 0; i < CV->getNumElements(); ++i) {
1673       Constant &Elt = *CV->getElementAsConstant(i);
1674       Ops.push_back(getOrCreateVReg(Elt));
1675     }
1676     EntryBuilder->buildBuildVector(Reg, Ops);
1677   } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
1678     switch(CE->getOpcode()) {
1679 #define HANDLE_INST(NUM, OPCODE, CLASS)                                        \
1680   case Instruction::OPCODE:                                                    \
1681     return translate##OPCODE(*CE, *EntryBuilder.get());
1682 #include "llvm/IR/Instruction.def"
1683     default:
1684       return false;
1685     }
1686   } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
1687     if (CV->getNumOperands() == 1)
1688       return translate(*CV->getOperand(0), Reg);
1689     SmallVector<unsigned, 4> Ops;
1690     for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
1691       Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
1692     }
1693     EntryBuilder->buildBuildVector(Reg, Ops);
1694   } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
1695     EntryBuilder->buildBlockAddress(Reg, BA);
1696   } else
1697     return false;
1698 
1699   return true;
1700 }
1701 
1702 void IRTranslator::finalizeFunction() {
1703   // Release the memory used by the different maps we
1704   // needed during the translation.
1705   PendingPHIs.clear();
1706   VMap.reset();
1707   FrameIndices.clear();
1708   MachinePreds.clear();
1709   // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
1710   // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
1711   // destroying it twice (in ~IRTranslator() and ~LLVMContext())
1712   EntryBuilder.reset();
1713   CurBuilder.reset();
1714 }
1715 
1716 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
1717   MF = &CurMF;
1718   const Function &F = MF->getFunction();
1719   if (F.empty())
1720     return false;
1721   GISelCSEAnalysisWrapper &Wrapper =
1722       getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
1723   // Set the CSEConfig and run the analysis.
1724   GISelCSEInfo *CSEInfo = nullptr;
1725   TPC = &getAnalysis<TargetPassConfig>();
1726   bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
1727                        ? EnableCSEInIRTranslator
1728                        : TPC->isGISelCSEEnabled();
1729 
1730   if (EnableCSE) {
1731     EntryBuilder = make_unique<CSEMIRBuilder>(CurMF);
1732     CSEInfo = &Wrapper.get(TPC->getCSEConfig());
1733     EntryBuilder->setCSEInfo(CSEInfo);
1734     CurBuilder = make_unique<CSEMIRBuilder>(CurMF);
1735     CurBuilder->setCSEInfo(CSEInfo);
1736   } else {
1737     EntryBuilder = make_unique<MachineIRBuilder>();
1738     CurBuilder = make_unique<MachineIRBuilder>();
1739   }
1740   CLI = MF->getSubtarget().getCallLowering();
1741   CurBuilder->setMF(*MF);
1742   EntryBuilder->setMF(*MF);
1743   MRI = &MF->getRegInfo();
1744   DL = &F.getParent()->getDataLayout();
1745   ORE = llvm::make_unique<OptimizationRemarkEmitter>(&F);
1746 
1747   assert(PendingPHIs.empty() && "stale PHIs");
1748 
1749   if (!DL->isLittleEndian()) {
1750     // Currently we don't properly handle big endian code.
1751     OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1752                                F.getSubprogram(), &F.getEntryBlock());
1753     R << "unable to translate in big endian mode";
1754     reportTranslationError(*MF, *TPC, *ORE, R);
1755   }
1756 
1757   // Release the per-function state when we return, whether we succeeded or not.
1758   auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
1759 
1760   // Setup a separate basic-block for the arguments and constants
1761   MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
1762   MF->push_back(EntryBB);
1763   EntryBuilder->setMBB(*EntryBB);
1764 
1765   // Create all blocks, in IR order, to preserve the layout.
1766   for (const BasicBlock &BB: F) {
1767     auto *&MBB = BBToMBB[&BB];
1768 
1769     MBB = MF->CreateMachineBasicBlock(&BB);
1770     MF->push_back(MBB);
1771 
1772     if (BB.hasAddressTaken())
1773       MBB->setHasAddressTaken();
1774   }
1775 
1776   // Make our arguments/constants entry block fallthrough to the IR entry block.
1777   EntryBB->addSuccessor(&getMBB(F.front()));
1778 
1779   // Lower the actual args into this basic block.
1780   SmallVector<unsigned, 8> VRegArgs;
1781   for (const Argument &Arg: F.args()) {
1782     if (DL->getTypeStoreSize(Arg.getType()) == 0)
1783       continue; // Don't handle zero sized types.
1784     VRegArgs.push_back(
1785         MRI->createGenericVirtualRegister(getLLTForType(*Arg.getType(), *DL)));
1786   }
1787 
1788   // We don't currently support translating swifterror or swiftself functions.
1789   for (auto &Arg : F.args()) {
1790     if (Arg.hasSwiftErrorAttr() || Arg.hasSwiftSelfAttr()) {
1791       OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1792                                  F.getSubprogram(), &F.getEntryBlock());
1793       R << "unable to lower arguments due to swifterror/swiftself: "
1794         << ore::NV("Prototype", F.getType());
1795       reportTranslationError(*MF, *TPC, *ORE, R);
1796       return false;
1797     }
1798   }
1799 
1800   if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs)) {
1801     OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1802                                F.getSubprogram(), &F.getEntryBlock());
1803     R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
1804     reportTranslationError(*MF, *TPC, *ORE, R);
1805     return false;
1806   }
1807 
1808   auto ArgIt = F.arg_begin();
1809   for (auto &VArg : VRegArgs) {
1810     // If the argument is an unsplit scalar then don't use unpackRegs to avoid
1811     // creating redundant copies.
1812     if (!valueIsSplit(*ArgIt, VMap.getOffsets(*ArgIt))) {
1813       auto &VRegs = *VMap.getVRegs(cast<Value>(*ArgIt));
1814       assert(VRegs.empty() && "VRegs already populated?");
1815       VRegs.push_back(VArg);
1816     } else {
1817       unpackRegs(*ArgIt, VArg, *EntryBuilder.get());
1818     }
1819     ArgIt++;
1820   }
1821 
1822   // Need to visit defs before uses when translating instructions.
1823   GISelObserverWrapper WrapperObserver;
1824   if (EnableCSE && CSEInfo)
1825     WrapperObserver.addObserver(CSEInfo);
1826   {
1827     ReversePostOrderTraversal<const Function *> RPOT(&F);
1828 #ifndef NDEBUG
1829     DILocationVerifier Verifier;
1830     WrapperObserver.addObserver(&Verifier);
1831 #endif // ifndef NDEBUG
1832     RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
1833     for (const BasicBlock *BB : RPOT) {
1834       MachineBasicBlock &MBB = getMBB(*BB);
1835       // Set the insertion point of all the following translations to
1836       // the end of this basic block.
1837       CurBuilder->setMBB(MBB);
1838 
1839       for (const Instruction &Inst : *BB) {
1840 #ifndef NDEBUG
1841         Verifier.setCurrentInst(&Inst);
1842 #endif // ifndef NDEBUG
1843         if (translate(Inst))
1844           continue;
1845 
1846         OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1847                                    Inst.getDebugLoc(), BB);
1848         R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
1849 
1850         if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
1851           std::string InstStrStorage;
1852           raw_string_ostream InstStr(InstStrStorage);
1853           InstStr << Inst;
1854 
1855           R << ": '" << InstStr.str() << "'";
1856         }
1857 
1858         reportTranslationError(*MF, *TPC, *ORE, R);
1859         return false;
1860       }
1861     }
1862 #ifndef NDEBUG
1863     WrapperObserver.removeObserver(&Verifier);
1864 #endif
1865   }
1866 
1867   finishPendingPhis();
1868 
1869   // Merge the argument lowering and constants block with its single
1870   // successor, the LLVM-IR entry block.  We want the basic block to
1871   // be maximal.
1872   assert(EntryBB->succ_size() == 1 &&
1873          "Custom BB used for lowering should have only one successor");
1874   // Get the successor of the current entry block.
1875   MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
1876   assert(NewEntryBB.pred_size() == 1 &&
1877          "LLVM-IR entry block has a predecessor!?");
1878   // Move all the instruction from the current entry block to the
1879   // new entry block.
1880   NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
1881                     EntryBB->end());
1882 
1883   // Update the live-in information for the new entry block.
1884   for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
1885     NewEntryBB.addLiveIn(LiveIn);
1886   NewEntryBB.sortUniqueLiveIns();
1887 
1888   // Get rid of the now empty basic block.
1889   EntryBB->removeSuccessor(&NewEntryBB);
1890   MF->remove(EntryBB);
1891   MF->DeleteMachineBasicBlock(EntryBB);
1892 
1893   assert(&MF->front() == &NewEntryBB &&
1894          "New entry wasn't next in the list of basic block!");
1895 
1896   // Initialize stack protector information.
1897   StackProtector &SP = getAnalysis<StackProtector>();
1898   SP.copyToMachineFrameInfo(MF->getFrameInfo());
1899 
1900   return false;
1901 }
1902