1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the IRTranslator class.
10 //===----------------------------------------------------------------------===//
11 
12 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
13 #include "llvm/ADT/PostOrderIterator.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/BranchProbabilityInfo.h"
19 #include "llvm/Analysis/Loads.h"
20 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/CodeGen/Analysis.h"
23 #include "llvm/CodeGen/FunctionLoweringInfo.h"
24 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
25 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
26 #include "llvm/CodeGen/LowLevelType.h"
27 #include "llvm/CodeGen/MachineBasicBlock.h"
28 #include "llvm/CodeGen/MachineFrameInfo.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/MachineMemOperand.h"
32 #include "llvm/CodeGen/MachineOperand.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/CodeGen/StackProtector.h"
35 #include "llvm/CodeGen/TargetFrameLowering.h"
36 #include "llvm/CodeGen/TargetInstrInfo.h"
37 #include "llvm/CodeGen/TargetLowering.h"
38 #include "llvm/CodeGen/TargetPassConfig.h"
39 #include "llvm/CodeGen/TargetRegisterInfo.h"
40 #include "llvm/CodeGen/TargetSubtargetInfo.h"
41 #include "llvm/IR/BasicBlock.h"
42 #include "llvm/IR/CFG.h"
43 #include "llvm/IR/Constant.h"
44 #include "llvm/IR/Constants.h"
45 #include "llvm/IR/DataLayout.h"
46 #include "llvm/IR/DebugInfo.h"
47 #include "llvm/IR/DerivedTypes.h"
48 #include "llvm/IR/Function.h"
49 #include "llvm/IR/GetElementPtrTypeIterator.h"
50 #include "llvm/IR/InlineAsm.h"
51 #include "llvm/IR/InstrTypes.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/IntrinsicInst.h"
54 #include "llvm/IR/Intrinsics.h"
55 #include "llvm/IR/LLVMContext.h"
56 #include "llvm/IR/Metadata.h"
57 #include "llvm/IR/Type.h"
58 #include "llvm/IR/User.h"
59 #include "llvm/IR/Value.h"
60 #include "llvm/InitializePasses.h"
61 #include "llvm/MC/MCContext.h"
62 #include "llvm/Pass.h"
63 #include "llvm/Support/Casting.h"
64 #include "llvm/Support/CodeGen.h"
65 #include "llvm/Support/Debug.h"
66 #include "llvm/Support/ErrorHandling.h"
67 #include "llvm/Support/LowLevelTypeImpl.h"
68 #include "llvm/Support/MathExtras.h"
69 #include "llvm/Support/raw_ostream.h"
70 #include "llvm/Target/TargetIntrinsicInfo.h"
71 #include "llvm/Target/TargetMachine.h"
72 #include <algorithm>
73 #include <cassert>
74 #include <cstdint>
75 #include <iterator>
76 #include <string>
77 #include <utility>
78 #include <vector>
79 
80 #define DEBUG_TYPE "irtranslator"
81 
82 using namespace llvm;
83 
84 static cl::opt<bool>
85     EnableCSEInIRTranslator("enable-cse-in-irtranslator",
86                             cl::desc("Should enable CSE in irtranslator"),
87                             cl::Optional, cl::init(false));
88 char IRTranslator::ID = 0;
89 
90 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
91                 false, false)
92 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
93 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
94 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
95                 false, false)
96 
97 static void reportTranslationError(MachineFunction &MF,
98                                    const TargetPassConfig &TPC,
99                                    OptimizationRemarkEmitter &ORE,
100                                    OptimizationRemarkMissed &R) {
101   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
102 
103   // Print the function name explicitly if we don't have a debug location (which
104   // makes the diagnostic less useful) or if we're going to emit a raw error.
105   if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
106     R << (" (in function: " + MF.getName() + ")").str();
107 
108   if (TPC.isGlobalISelAbortEnabled())
109     report_fatal_error(R.getMsg());
110   else
111     ORE.emit(R);
112 }
113 
114 IRTranslator::IRTranslator() : MachineFunctionPass(ID) { }
115 
116 #ifndef NDEBUG
117 namespace {
118 /// Verify that every instruction created has the same DILocation as the
119 /// instruction being translated.
120 class DILocationVerifier : public GISelChangeObserver {
121   const Instruction *CurrInst = nullptr;
122 
123 public:
124   DILocationVerifier() = default;
125   ~DILocationVerifier() = default;
126 
127   const Instruction *getCurrentInst() const { return CurrInst; }
128   void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
129 
130   void erasingInstr(MachineInstr &MI) override {}
131   void changingInstr(MachineInstr &MI) override {}
132   void changedInstr(MachineInstr &MI) override {}
133 
134   void createdInstr(MachineInstr &MI) override {
135     assert(getCurrentInst() && "Inserted instruction without a current MI");
136 
137     // Only print the check message if we're actually checking it.
138 #ifndef NDEBUG
139     LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
140                       << " was copied to " << MI);
141 #endif
142     // We allow insts in the entry block to have a debug loc line of 0 because
143     // they could have originated from constants, and we don't want a jumpy
144     // debug experience.
145     assert((CurrInst->getDebugLoc() == MI.getDebugLoc() ||
146             MI.getDebugLoc().getLine() == 0) &&
147            "Line info was not transferred to all instructions");
148   }
149 };
150 } // namespace
151 #endif // ifndef NDEBUG
152 
153 
154 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
155   AU.addRequired<StackProtector>();
156   AU.addRequired<TargetPassConfig>();
157   AU.addRequired<GISelCSEAnalysisWrapperPass>();
158   getSelectionDAGFallbackAnalysisUsage(AU);
159   MachineFunctionPass::getAnalysisUsage(AU);
160 }
161 
162 IRTranslator::ValueToVRegInfo::VRegListT &
163 IRTranslator::allocateVRegs(const Value &Val) {
164   assert(!VMap.contains(Val) && "Value already allocated in VMap");
165   auto *Regs = VMap.getVRegs(Val);
166   auto *Offsets = VMap.getOffsets(Val);
167   SmallVector<LLT, 4> SplitTys;
168   computeValueLLTs(*DL, *Val.getType(), SplitTys,
169                    Offsets->empty() ? Offsets : nullptr);
170   for (unsigned i = 0; i < SplitTys.size(); ++i)
171     Regs->push_back(0);
172   return *Regs;
173 }
174 
175 ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
176   auto VRegsIt = VMap.findVRegs(Val);
177   if (VRegsIt != VMap.vregs_end())
178     return *VRegsIt->second;
179 
180   if (Val.getType()->isVoidTy())
181     return *VMap.getVRegs(Val);
182 
183   // Create entry for this type.
184   auto *VRegs = VMap.getVRegs(Val);
185   auto *Offsets = VMap.getOffsets(Val);
186 
187   assert(Val.getType()->isSized() &&
188          "Don't know how to create an empty vreg");
189 
190   SmallVector<LLT, 4> SplitTys;
191   computeValueLLTs(*DL, *Val.getType(), SplitTys,
192                    Offsets->empty() ? Offsets : nullptr);
193 
194   if (!isa<Constant>(Val)) {
195     for (auto Ty : SplitTys)
196       VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
197     return *VRegs;
198   }
199 
200   if (Val.getType()->isAggregateType()) {
201     // UndefValue, ConstantAggregateZero
202     auto &C = cast<Constant>(Val);
203     unsigned Idx = 0;
204     while (auto Elt = C.getAggregateElement(Idx++)) {
205       auto EltRegs = getOrCreateVRegs(*Elt);
206       llvm::copy(EltRegs, std::back_inserter(*VRegs));
207     }
208   } else {
209     assert(SplitTys.size() == 1 && "unexpectedly split LLT");
210     VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
211     bool Success = translate(cast<Constant>(Val), VRegs->front());
212     if (!Success) {
213       OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
214                                  MF->getFunction().getSubprogram(),
215                                  &MF->getFunction().getEntryBlock());
216       R << "unable to translate constant: " << ore::NV("Type", Val.getType());
217       reportTranslationError(*MF, *TPC, *ORE, R);
218       return *VRegs;
219     }
220   }
221 
222   return *VRegs;
223 }
224 
225 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
226   if (FrameIndices.find(&AI) != FrameIndices.end())
227     return FrameIndices[&AI];
228 
229   uint64_t ElementSize = DL->getTypeAllocSize(AI.getAllocatedType());
230   uint64_t Size =
231       ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
232 
233   // Always allocate at least one byte.
234   Size = std::max<uint64_t>(Size, 1u);
235 
236   unsigned Alignment = AI.getAlignment();
237   if (!Alignment)
238     Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
239 
240   int &FI = FrameIndices[&AI];
241   FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
242   return FI;
243 }
244 
245 Align IRTranslator::getMemOpAlign(const Instruction &I) {
246   if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
247     Type *ValTy = SI->getValueOperand()->getType();
248     return SI->getAlign().getValueOr(DL->getABITypeAlign(ValTy));
249   }
250   if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
251     return DL->getValueOrABITypeAlignment(LI->getAlign(), LI->getType());
252   }
253   if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
254     // TODO(PR27168): This instruction has no alignment attribute, but unlike
255     // the default alignment for load/store, the default here is to assume
256     // it has NATURAL alignment, not DataLayout-specified alignment.
257     const DataLayout &DL = AI->getModule()->getDataLayout();
258     return Align(DL.getTypeStoreSize(AI->getCompareOperand()->getType()));
259   }
260   if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) {
261     // TODO(PR27168): This instruction has no alignment attribute, but unlike
262     // the default alignment for load/store, the default here is to assume
263     // it has NATURAL alignment, not DataLayout-specified alignment.
264     const DataLayout &DL = AI->getModule()->getDataLayout();
265     return Align(DL.getTypeStoreSize(AI->getValOperand()->getType()));
266   }
267   OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
268   R << "unable to translate memop: " << ore::NV("Opcode", &I);
269   reportTranslationError(*MF, *TPC, *ORE, R);
270   return Align(1);
271 }
272 
273 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
274   MachineBasicBlock *&MBB = BBToMBB[&BB];
275   assert(MBB && "BasicBlock was not encountered before");
276   return *MBB;
277 }
278 
279 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
280   assert(NewPred && "new predecessor must be a real MachineBasicBlock");
281   MachinePreds[Edge].push_back(NewPred);
282 }
283 
284 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
285                                      MachineIRBuilder &MIRBuilder) {
286   // Get or create a virtual register for each value.
287   // Unless the value is a Constant => loadimm cst?
288   // or inline constant each time?
289   // Creation of a virtual register needs to have a size.
290   Register Op0 = getOrCreateVReg(*U.getOperand(0));
291   Register Op1 = getOrCreateVReg(*U.getOperand(1));
292   Register Res = getOrCreateVReg(U);
293   uint16_t Flags = 0;
294   if (isa<Instruction>(U)) {
295     const Instruction &I = cast<Instruction>(U);
296     Flags = MachineInstr::copyFlagsFromInstruction(I);
297   }
298 
299   MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags);
300   return true;
301 }
302 
303 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
304   // -0.0 - X --> G_FNEG
305   if (isa<Constant>(U.getOperand(0)) &&
306       U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) {
307     Register Op1 = getOrCreateVReg(*U.getOperand(1));
308     Register Res = getOrCreateVReg(U);
309     uint16_t Flags = 0;
310     if (isa<Instruction>(U)) {
311       const Instruction &I = cast<Instruction>(U);
312       Flags = MachineInstr::copyFlagsFromInstruction(I);
313     }
314     // Negate the last operand of the FSUB
315     MIRBuilder.buildFNeg(Res, Op1, Flags);
316     return true;
317   }
318   return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
319 }
320 
321 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
322   Register Op0 = getOrCreateVReg(*U.getOperand(0));
323   Register Res = getOrCreateVReg(U);
324   uint16_t Flags = 0;
325   if (isa<Instruction>(U)) {
326     const Instruction &I = cast<Instruction>(U);
327     Flags = MachineInstr::copyFlagsFromInstruction(I);
328   }
329   MIRBuilder.buildFNeg(Res, Op0, Flags);
330   return true;
331 }
332 
333 bool IRTranslator::translateCompare(const User &U,
334                                     MachineIRBuilder &MIRBuilder) {
335   auto *CI = dyn_cast<CmpInst>(&U);
336   Register Op0 = getOrCreateVReg(*U.getOperand(0));
337   Register Op1 = getOrCreateVReg(*U.getOperand(1));
338   Register Res = getOrCreateVReg(U);
339   CmpInst::Predicate Pred =
340       CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
341                                     cast<ConstantExpr>(U).getPredicate());
342   if (CmpInst::isIntPredicate(Pred))
343     MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
344   else if (Pred == CmpInst::FCMP_FALSE)
345     MIRBuilder.buildCopy(
346         Res, getOrCreateVReg(*Constant::getNullValue(U.getType())));
347   else if (Pred == CmpInst::FCMP_TRUE)
348     MIRBuilder.buildCopy(
349         Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType())));
350   else {
351     assert(CI && "Instruction should be CmpInst");
352     MIRBuilder.buildFCmp(Pred, Res, Op0, Op1,
353                          MachineInstr::copyFlagsFromInstruction(*CI));
354   }
355 
356   return true;
357 }
358 
359 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
360   const ReturnInst &RI = cast<ReturnInst>(U);
361   const Value *Ret = RI.getReturnValue();
362   if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
363     Ret = nullptr;
364 
365   ArrayRef<Register> VRegs;
366   if (Ret)
367     VRegs = getOrCreateVRegs(*Ret);
368 
369   Register SwiftErrorVReg = 0;
370   if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
371     SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
372         &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
373   }
374 
375   // The target may mess up with the insertion point, but
376   // this is not important as a return is the last instruction
377   // of the block anyway.
378   return CLI->lowerReturn(MIRBuilder, Ret, VRegs, SwiftErrorVReg);
379 }
380 
381 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
382   const BranchInst &BrInst = cast<BranchInst>(U);
383   unsigned Succ = 0;
384   if (!BrInst.isUnconditional()) {
385     // We want a G_BRCOND to the true BB followed by an unconditional branch.
386     Register Tst = getOrCreateVReg(*BrInst.getCondition());
387     const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
388     MachineBasicBlock &TrueBB = getMBB(TrueTgt);
389     MIRBuilder.buildBrCond(Tst, TrueBB);
390   }
391 
392   const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
393   MachineBasicBlock &TgtBB = getMBB(BrTgt);
394   MachineBasicBlock &CurBB = MIRBuilder.getMBB();
395 
396   // If the unconditional target is the layout successor, fallthrough.
397   if (!CurBB.isLayoutSuccessor(&TgtBB))
398     MIRBuilder.buildBr(TgtBB);
399 
400   // Link successors.
401   for (const BasicBlock *Succ : successors(&BrInst))
402     CurBB.addSuccessor(&getMBB(*Succ));
403   return true;
404 }
405 
406 void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src,
407                                         MachineBasicBlock *Dst,
408                                         BranchProbability Prob) {
409   if (!FuncInfo.BPI) {
410     Src->addSuccessorWithoutProb(Dst);
411     return;
412   }
413   if (Prob.isUnknown())
414     Prob = getEdgeProbability(Src, Dst);
415   Src->addSuccessor(Dst, Prob);
416 }
417 
418 BranchProbability
419 IRTranslator::getEdgeProbability(const MachineBasicBlock *Src,
420                                  const MachineBasicBlock *Dst) const {
421   const BasicBlock *SrcBB = Src->getBasicBlock();
422   const BasicBlock *DstBB = Dst->getBasicBlock();
423   if (!FuncInfo.BPI) {
424     // If BPI is not available, set the default probability as 1 / N, where N is
425     // the number of successors.
426     auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
427     return BranchProbability(1, SuccSize);
428   }
429   return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
430 }
431 
432 bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) {
433   using namespace SwitchCG;
434   // Extract cases from the switch.
435   const SwitchInst &SI = cast<SwitchInst>(U);
436   BranchProbabilityInfo *BPI = FuncInfo.BPI;
437   CaseClusterVector Clusters;
438   Clusters.reserve(SI.getNumCases());
439   for (auto &I : SI.cases()) {
440     MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor());
441     assert(Succ && "Could not find successor mbb in mapping");
442     const ConstantInt *CaseVal = I.getCaseValue();
443     BranchProbability Prob =
444         BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
445             : BranchProbability(1, SI.getNumCases() + 1);
446     Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
447   }
448 
449   MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest());
450 
451   // Cluster adjacent cases with the same destination. We do this at all
452   // optimization levels because it's cheap to do and will make codegen faster
453   // if there are many clusters.
454   sortAndRangeify(Clusters);
455 
456   MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent());
457 
458   // If there is only the default destination, jump there directly.
459   if (Clusters.empty()) {
460     SwitchMBB->addSuccessor(DefaultMBB);
461     if (DefaultMBB != SwitchMBB->getNextNode())
462       MIB.buildBr(*DefaultMBB);
463     return true;
464   }
465 
466   SL->findJumpTables(Clusters, &SI, DefaultMBB, nullptr, nullptr);
467 
468   LLVM_DEBUG({
469     dbgs() << "Case clusters: ";
470     for (const CaseCluster &C : Clusters) {
471       if (C.Kind == CC_JumpTable)
472         dbgs() << "JT:";
473       if (C.Kind == CC_BitTests)
474         dbgs() << "BT:";
475 
476       C.Low->getValue().print(dbgs(), true);
477       if (C.Low != C.High) {
478         dbgs() << '-';
479         C.High->getValue().print(dbgs(), true);
480       }
481       dbgs() << ' ';
482     }
483     dbgs() << '\n';
484   });
485 
486   assert(!Clusters.empty());
487   SwitchWorkList WorkList;
488   CaseClusterIt First = Clusters.begin();
489   CaseClusterIt Last = Clusters.end() - 1;
490   auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
491   WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
492 
493   // FIXME: At the moment we don't do any splitting optimizations here like
494   // SelectionDAG does, so this worklist only has one entry.
495   while (!WorkList.empty()) {
496     SwitchWorkListItem W = WorkList.back();
497     WorkList.pop_back();
498     if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
499       return false;
500   }
501   return true;
502 }
503 
504 void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT,
505                                  MachineBasicBlock *MBB) {
506   // Emit the code for the jump table
507   assert(JT.Reg != -1U && "Should lower JT Header first!");
508   MachineIRBuilder MIB(*MBB->getParent());
509   MIB.setMBB(*MBB);
510   MIB.setDebugLoc(CurBuilder->getDebugLoc());
511 
512   Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext());
513   const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
514 
515   auto Table = MIB.buildJumpTable(PtrTy, JT.JTI);
516   MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg);
517 }
518 
519 bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
520                                        SwitchCG::JumpTableHeader &JTH,
521                                        MachineBasicBlock *HeaderBB) {
522   MachineIRBuilder MIB(*HeaderBB->getParent());
523   MIB.setMBB(*HeaderBB);
524   MIB.setDebugLoc(CurBuilder->getDebugLoc());
525 
526   const Value &SValue = *JTH.SValue;
527   // Subtract the lowest switch case value from the value being switched on.
528   const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL);
529   Register SwitchOpReg = getOrCreateVReg(SValue);
530   auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First);
531   auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst);
532 
533   // This value may be smaller or larger than the target's pointer type, and
534   // therefore require extension or truncating.
535   Type *PtrIRTy = SValue.getType()->getPointerTo();
536   const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
537   Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub);
538 
539   JT.Reg = Sub.getReg(0);
540 
541   if (JTH.OmitRangeCheck) {
542     if (JT.MBB != HeaderBB->getNextNode())
543       MIB.buildBr(*JT.MBB);
544     return true;
545   }
546 
547   // Emit the range check for the jump table, and branch to the default block
548   // for the switch statement if the value being switched on exceeds the
549   // largest case in the switch.
550   auto Cst = getOrCreateVReg(
551       *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First));
552   Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0);
553   auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst);
554 
555   auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default);
556 
557   // Avoid emitting unnecessary branches to the next block.
558   if (JT.MBB != HeaderBB->getNextNode())
559     BrCond = MIB.buildBr(*JT.MBB);
560   return true;
561 }
562 
563 void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
564                                   MachineBasicBlock *SwitchBB,
565                                   MachineIRBuilder &MIB) {
566   Register CondLHS = getOrCreateVReg(*CB.CmpLHS);
567   Register Cond;
568   DebugLoc OldDbgLoc = MIB.getDebugLoc();
569   MIB.setDebugLoc(CB.DbgLoc);
570   MIB.setMBB(*CB.ThisBB);
571 
572   if (CB.PredInfo.NoCmp) {
573     // Branch or fall through to TrueBB.
574     addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
575     addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
576                       CB.ThisBB);
577     CB.ThisBB->normalizeSuccProbs();
578     if (CB.TrueBB != CB.ThisBB->getNextNode())
579       MIB.buildBr(*CB.TrueBB);
580     MIB.setDebugLoc(OldDbgLoc);
581     return;
582   }
583 
584   const LLT i1Ty = LLT::scalar(1);
585   // Build the compare.
586   if (!CB.CmpMHS) {
587     Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
588     Cond = MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
589   } else {
590     assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE &&
591            "Can only handle SLE ranges");
592 
593     const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
594     const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
595 
596     Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS);
597     if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
598       Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
599       Cond =
600           MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0);
601     } else {
602       const LLT CmpTy = MRI->getType(CmpOpReg);
603       auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS);
604       auto Diff = MIB.buildConstant(CmpTy, High - Low);
605       Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0);
606     }
607   }
608 
609   // Update successor info
610   addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
611 
612   addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
613                     CB.ThisBB);
614 
615   // TrueBB and FalseBB are always different unless the incoming IR is
616   // degenerate. This only happens when running llc on weird IR.
617   if (CB.TrueBB != CB.FalseBB)
618     addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb);
619   CB.ThisBB->normalizeSuccProbs();
620 
621   //  if (SwitchBB->getBasicBlock() != CB.FalseBB->getBasicBlock())
622     addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()},
623                       CB.ThisBB);
624 
625   // If the lhs block is the next block, invert the condition so that we can
626   // fall through to the lhs instead of the rhs block.
627   if (CB.TrueBB == CB.ThisBB->getNextNode()) {
628     std::swap(CB.TrueBB, CB.FalseBB);
629     auto True = MIB.buildConstant(i1Ty, 1);
630     Cond = MIB.buildXor(i1Ty, Cond, True).getReg(0);
631   }
632 
633   MIB.buildBrCond(Cond, *CB.TrueBB);
634   MIB.buildBr(*CB.FalseBB);
635   MIB.setDebugLoc(OldDbgLoc);
636 }
637 
638 bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,
639                                           MachineBasicBlock *SwitchMBB,
640                                           MachineBasicBlock *CurMBB,
641                                           MachineBasicBlock *DefaultMBB,
642                                           MachineIRBuilder &MIB,
643                                           MachineFunction::iterator BBI,
644                                           BranchProbability UnhandledProbs,
645                                           SwitchCG::CaseClusterIt I,
646                                           MachineBasicBlock *Fallthrough,
647                                           bool FallthroughUnreachable) {
648   using namespace SwitchCG;
649   MachineFunction *CurMF = SwitchMBB->getParent();
650   // FIXME: Optimize away range check based on pivot comparisons.
651   JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
652   SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
653   BranchProbability DefaultProb = W.DefaultProb;
654 
655   // The jump block hasn't been inserted yet; insert it here.
656   MachineBasicBlock *JumpMBB = JT->MBB;
657   CurMF->insert(BBI, JumpMBB);
658 
659   // Since the jump table block is separate from the switch block, we need
660   // to keep track of it as a machine predecessor to the default block,
661   // otherwise we lose the phi edges.
662   addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
663                     CurMBB);
664   addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
665                     JumpMBB);
666 
667   auto JumpProb = I->Prob;
668   auto FallthroughProb = UnhandledProbs;
669 
670   // If the default statement is a target of the jump table, we evenly
671   // distribute the default probability to successors of CurMBB. Also
672   // update the probability on the edge from JumpMBB to Fallthrough.
673   for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
674                                         SE = JumpMBB->succ_end();
675        SI != SE; ++SI) {
676     if (*SI == DefaultMBB) {
677       JumpProb += DefaultProb / 2;
678       FallthroughProb -= DefaultProb / 2;
679       JumpMBB->setSuccProbability(SI, DefaultProb / 2);
680       JumpMBB->normalizeSuccProbs();
681     } else {
682       // Also record edges from the jump table block to it's successors.
683       addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()},
684                         JumpMBB);
685     }
686   }
687 
688   // Skip the range check if the fallthrough block is unreachable.
689   if (FallthroughUnreachable)
690     JTH->OmitRangeCheck = true;
691 
692   if (!JTH->OmitRangeCheck)
693     addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
694   addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
695   CurMBB->normalizeSuccProbs();
696 
697   // The jump table header will be inserted in our current block, do the
698   // range check, and fall through to our fallthrough block.
699   JTH->HeaderBB = CurMBB;
700   JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
701 
702   // If we're in the right place, emit the jump table header right now.
703   if (CurMBB == SwitchMBB) {
704     if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
705       return false;
706     JTH->Emitted = true;
707   }
708   return true;
709 }
710 bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,
711                                             Value *Cond,
712                                             MachineBasicBlock *Fallthrough,
713                                             bool FallthroughUnreachable,
714                                             BranchProbability UnhandledProbs,
715                                             MachineBasicBlock *CurMBB,
716                                             MachineIRBuilder &MIB,
717                                             MachineBasicBlock *SwitchMBB) {
718   using namespace SwitchCG;
719   const Value *RHS, *LHS, *MHS;
720   CmpInst::Predicate Pred;
721   if (I->Low == I->High) {
722     // Check Cond == I->Low.
723     Pred = CmpInst::ICMP_EQ;
724     LHS = Cond;
725     RHS = I->Low;
726     MHS = nullptr;
727   } else {
728     // Check I->Low <= Cond <= I->High.
729     Pred = CmpInst::ICMP_SLE;
730     LHS = I->Low;
731     MHS = Cond;
732     RHS = I->High;
733   }
734 
735   // If Fallthrough is unreachable, fold away the comparison.
736   // The false probability is the sum of all unhandled cases.
737   CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough,
738                CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs);
739 
740   emitSwitchCase(CB, SwitchMBB, MIB);
741   return true;
742 }
743 
744 bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W,
745                                        Value *Cond,
746                                        MachineBasicBlock *SwitchMBB,
747                                        MachineBasicBlock *DefaultMBB,
748                                        MachineIRBuilder &MIB) {
749   using namespace SwitchCG;
750   MachineFunction *CurMF = FuncInfo.MF;
751   MachineBasicBlock *NextMBB = nullptr;
752   MachineFunction::iterator BBI(W.MBB);
753   if (++BBI != FuncInfo.MF->end())
754     NextMBB = &*BBI;
755 
756   if (EnableOpts) {
757     // Here, we order cases by probability so the most likely case will be
758     // checked first. However, two clusters can have the same probability in
759     // which case their relative ordering is non-deterministic. So we use Low
760     // as a tie-breaker as clusters are guaranteed to never overlap.
761     llvm::sort(W.FirstCluster, W.LastCluster + 1,
762                [](const CaseCluster &a, const CaseCluster &b) {
763                  return a.Prob != b.Prob
764                             ? a.Prob > b.Prob
765                             : a.Low->getValue().slt(b.Low->getValue());
766                });
767 
768     // Rearrange the case blocks so that the last one falls through if possible
769     // without changing the order of probabilities.
770     for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) {
771       --I;
772       if (I->Prob > W.LastCluster->Prob)
773         break;
774       if (I->Kind == CC_Range && I->MBB == NextMBB) {
775         std::swap(*I, *W.LastCluster);
776         break;
777       }
778     }
779   }
780 
781   // Compute total probability.
782   BranchProbability DefaultProb = W.DefaultProb;
783   BranchProbability UnhandledProbs = DefaultProb;
784   for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
785     UnhandledProbs += I->Prob;
786 
787   MachineBasicBlock *CurMBB = W.MBB;
788   for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
789     bool FallthroughUnreachable = false;
790     MachineBasicBlock *Fallthrough;
791     if (I == W.LastCluster) {
792       // For the last cluster, fall through to the default destination.
793       Fallthrough = DefaultMBB;
794       FallthroughUnreachable = isa<UnreachableInst>(
795           DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
796     } else {
797       Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
798       CurMF->insert(BBI, Fallthrough);
799     }
800     UnhandledProbs -= I->Prob;
801 
802     switch (I->Kind) {
803     case CC_BitTests: {
804       LLVM_DEBUG(dbgs() << "Switch to bit test optimization unimplemented");
805       return false; // Bit tests currently unimplemented.
806     }
807     case CC_JumpTable: {
808       if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
809                                   UnhandledProbs, I, Fallthrough,
810                                   FallthroughUnreachable)) {
811         LLVM_DEBUG(dbgs() << "Failed to lower jump table");
812         return false;
813       }
814       break;
815     }
816     case CC_Range: {
817       if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough,
818                                     FallthroughUnreachable, UnhandledProbs,
819                                     CurMBB, MIB, SwitchMBB)) {
820         LLVM_DEBUG(dbgs() << "Failed to lower switch range");
821         return false;
822       }
823       break;
824     }
825     }
826     CurMBB = Fallthrough;
827   }
828 
829   return true;
830 }
831 
832 bool IRTranslator::translateIndirectBr(const User &U,
833                                        MachineIRBuilder &MIRBuilder) {
834   const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
835 
836   const Register Tgt = getOrCreateVReg(*BrInst.getAddress());
837   MIRBuilder.buildBrIndirect(Tgt);
838 
839   // Link successors.
840   MachineBasicBlock &CurBB = MIRBuilder.getMBB();
841   for (const BasicBlock *Succ : successors(&BrInst))
842     CurBB.addSuccessor(&getMBB(*Succ));
843 
844   return true;
845 }
846 
847 static bool isSwiftError(const Value *V) {
848   if (auto Arg = dyn_cast<Argument>(V))
849     return Arg->hasSwiftErrorAttr();
850   if (auto AI = dyn_cast<AllocaInst>(V))
851     return AI->isSwiftError();
852   return false;
853 }
854 
855 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
856   const LoadInst &LI = cast<LoadInst>(U);
857   if (DL->getTypeStoreSize(LI.getType()) == 0)
858     return true;
859 
860   ArrayRef<Register> Regs = getOrCreateVRegs(LI);
861   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
862   Register Base = getOrCreateVReg(*LI.getPointerOperand());
863 
864   Type *OffsetIRTy = DL->getIntPtrType(LI.getPointerOperandType());
865   LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
866 
867   if (CLI->supportSwiftError() && isSwiftError(LI.getPointerOperand())) {
868     assert(Regs.size() == 1 && "swifterror should be single pointer");
869     Register VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(),
870                                                     LI.getPointerOperand());
871     MIRBuilder.buildCopy(Regs[0], VReg);
872     return true;
873   }
874 
875   auto &TLI = *MF->getSubtarget().getTargetLowering();
876   MachineMemOperand::Flags Flags = TLI.getLoadMemOperandFlags(LI, *DL);
877 
878   const MDNode *Ranges =
879       Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr;
880   for (unsigned i = 0; i < Regs.size(); ++i) {
881     Register Addr;
882     MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
883 
884     MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
885     Align BaseAlign = getMemOpAlign(LI);
886     AAMDNodes AAMetadata;
887     LI.getAAMetadata(AAMetadata);
888     auto MMO = MF->getMachineMemOperand(
889         Ptr, Flags, MRI->getType(Regs[i]).getSizeInBytes(),
890         commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, Ranges,
891         LI.getSyncScopeID(), LI.getOrdering());
892     MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
893   }
894 
895   return true;
896 }
897 
898 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
899   const StoreInst &SI = cast<StoreInst>(U);
900   if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
901     return true;
902 
903   ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand());
904   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
905   Register Base = getOrCreateVReg(*SI.getPointerOperand());
906 
907   Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType());
908   LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
909 
910   if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) {
911     assert(Vals.size() == 1 && "swifterror should be single pointer");
912 
913     Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),
914                                                     SI.getPointerOperand());
915     MIRBuilder.buildCopy(VReg, Vals[0]);
916     return true;
917   }
918 
919   auto &TLI = *MF->getSubtarget().getTargetLowering();
920   MachineMemOperand::Flags Flags = TLI.getStoreMemOperandFlags(SI, *DL);
921 
922   for (unsigned i = 0; i < Vals.size(); ++i) {
923     Register Addr;
924     MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
925 
926     MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
927     Align BaseAlign = getMemOpAlign(SI);
928     AAMDNodes AAMetadata;
929     SI.getAAMetadata(AAMetadata);
930     auto MMO = MF->getMachineMemOperand(
931         Ptr, Flags, MRI->getType(Vals[i]).getSizeInBytes(),
932         commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, nullptr,
933         SI.getSyncScopeID(), SI.getOrdering());
934     MIRBuilder.buildStore(Vals[i], Addr, *MMO);
935   }
936   return true;
937 }
938 
939 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
940   const Value *Src = U.getOperand(0);
941   Type *Int32Ty = Type::getInt32Ty(U.getContext());
942 
943   // getIndexedOffsetInType is designed for GEPs, so the first index is the
944   // usual array element rather than looking into the actual aggregate.
945   SmallVector<Value *, 1> Indices;
946   Indices.push_back(ConstantInt::get(Int32Ty, 0));
947 
948   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
949     for (auto Idx : EVI->indices())
950       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
951   } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
952     for (auto Idx : IVI->indices())
953       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
954   } else {
955     for (unsigned i = 1; i < U.getNumOperands(); ++i)
956       Indices.push_back(U.getOperand(i));
957   }
958 
959   return 8 * static_cast<uint64_t>(
960                  DL.getIndexedOffsetInType(Src->getType(), Indices));
961 }
962 
963 bool IRTranslator::translateExtractValue(const User &U,
964                                          MachineIRBuilder &MIRBuilder) {
965   const Value *Src = U.getOperand(0);
966   uint64_t Offset = getOffsetFromIndices(U, *DL);
967   ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
968   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
969   unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
970   auto &DstRegs = allocateVRegs(U);
971 
972   for (unsigned i = 0; i < DstRegs.size(); ++i)
973     DstRegs[i] = SrcRegs[Idx++];
974 
975   return true;
976 }
977 
978 bool IRTranslator::translateInsertValue(const User &U,
979                                         MachineIRBuilder &MIRBuilder) {
980   const Value *Src = U.getOperand(0);
981   uint64_t Offset = getOffsetFromIndices(U, *DL);
982   auto &DstRegs = allocateVRegs(U);
983   ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
984   ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
985   ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
986   auto InsertedIt = InsertedRegs.begin();
987 
988   for (unsigned i = 0; i < DstRegs.size(); ++i) {
989     if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
990       DstRegs[i] = *InsertedIt++;
991     else
992       DstRegs[i] = SrcRegs[i];
993   }
994 
995   return true;
996 }
997 
998 bool IRTranslator::translateSelect(const User &U,
999                                    MachineIRBuilder &MIRBuilder) {
1000   Register Tst = getOrCreateVReg(*U.getOperand(0));
1001   ArrayRef<Register> ResRegs = getOrCreateVRegs(U);
1002   ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
1003   ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
1004 
1005   const SelectInst &SI = cast<SelectInst>(U);
1006   uint16_t Flags = 0;
1007   if (const CmpInst *Cmp = dyn_cast<CmpInst>(SI.getCondition()))
1008     Flags = MachineInstr::copyFlagsFromInstruction(*Cmp);
1009 
1010   for (unsigned i = 0; i < ResRegs.size(); ++i) {
1011     MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
1012   }
1013 
1014   return true;
1015 }
1016 
1017 bool IRTranslator::translateBitCast(const User &U,
1018                                     MachineIRBuilder &MIRBuilder) {
1019   // If we're bitcasting to the source type, we can reuse the source vreg.
1020   if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
1021       getLLTForType(*U.getType(), *DL)) {
1022     Register SrcReg = getOrCreateVReg(*U.getOperand(0));
1023     auto &Regs = *VMap.getVRegs(U);
1024     // If we already assigned a vreg for this bitcast, we can't change that.
1025     // Emit a copy to satisfy the users we already emitted.
1026     if (!Regs.empty())
1027       MIRBuilder.buildCopy(Regs[0], SrcReg);
1028     else {
1029       Regs.push_back(SrcReg);
1030       VMap.getOffsets(U)->push_back(0);
1031     }
1032     return true;
1033   }
1034   return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1035 }
1036 
1037 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
1038                                  MachineIRBuilder &MIRBuilder) {
1039   Register Op = getOrCreateVReg(*U.getOperand(0));
1040   Register Res = getOrCreateVReg(U);
1041   MIRBuilder.buildInstr(Opcode, {Res}, {Op});
1042   return true;
1043 }
1044 
1045 bool IRTranslator::translateGetElementPtr(const User &U,
1046                                           MachineIRBuilder &MIRBuilder) {
1047   Value &Op0 = *U.getOperand(0);
1048   Register BaseReg = getOrCreateVReg(Op0);
1049   Type *PtrIRTy = Op0.getType();
1050   LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1051   Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
1052   LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1053 
1054   // Normalize Vector GEP - all scalar operands should be converted to the
1055   // splat vector.
1056   unsigned VectorWidth = 0;
1057   if (auto *VT = dyn_cast<VectorType>(U.getType()))
1058     VectorWidth = VT->getNumElements();
1059 
1060   // We might need to splat the base pointer into a vector if the offsets
1061   // are vectors.
1062   if (VectorWidth && !PtrTy.isVector()) {
1063     BaseReg =
1064         MIRBuilder.buildSplatVector(LLT::vector(VectorWidth, PtrTy), BaseReg)
1065             .getReg(0);
1066     PtrIRTy = VectorType::get(PtrIRTy, VectorWidth);
1067     PtrTy = getLLTForType(*PtrIRTy, *DL);
1068     OffsetIRTy = DL->getIntPtrType(PtrIRTy);
1069     OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1070   }
1071 
1072   int64_t Offset = 0;
1073   for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
1074        GTI != E; ++GTI) {
1075     const Value *Idx = GTI.getOperand();
1076     if (StructType *StTy = GTI.getStructTypeOrNull()) {
1077       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
1078       Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
1079       continue;
1080     } else {
1081       uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
1082 
1083       // If this is a scalar constant or a splat vector of constants,
1084       // handle it quickly.
1085       if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
1086         Offset += ElementSize * CI->getSExtValue();
1087         continue;
1088       }
1089 
1090       if (Offset != 0) {
1091         auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
1092         BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0))
1093                       .getReg(0);
1094         Offset = 0;
1095       }
1096 
1097       Register IdxReg = getOrCreateVReg(*Idx);
1098       LLT IdxTy = MRI->getType(IdxReg);
1099       if (IdxTy != OffsetTy) {
1100         if (!IdxTy.isVector() && VectorWidth) {
1101           IdxReg = MIRBuilder.buildSplatVector(
1102             OffsetTy.changeElementType(IdxTy), IdxReg).getReg(0);
1103         }
1104 
1105         IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0);
1106       }
1107 
1108       // N = N + Idx * ElementSize;
1109       // Avoid doing it for ElementSize of 1.
1110       Register GepOffsetReg;
1111       if (ElementSize != 1) {
1112         auto ElementSizeMIB = MIRBuilder.buildConstant(
1113             getLLTForType(*OffsetIRTy, *DL), ElementSize);
1114         GepOffsetReg =
1115             MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB).getReg(0);
1116       } else
1117         GepOffsetReg = IdxReg;
1118 
1119       BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0);
1120     }
1121   }
1122 
1123   if (Offset != 0) {
1124     auto OffsetMIB =
1125         MIRBuilder.buildConstant(OffsetTy, Offset);
1126     MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0));
1127     return true;
1128   }
1129 
1130   MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
1131   return true;
1132 }
1133 
1134 bool IRTranslator::translateMemFunc(const CallInst &CI,
1135                                     MachineIRBuilder &MIRBuilder,
1136                                     Intrinsic::ID ID) {
1137 
1138   // If the source is undef, then just emit a nop.
1139   if (isa<UndefValue>(CI.getArgOperand(1)))
1140     return true;
1141 
1142   ArrayRef<Register> Res;
1143   auto ICall = MIRBuilder.buildIntrinsic(ID, Res, true);
1144   for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI)
1145     ICall.addUse(getOrCreateVReg(**AI));
1146 
1147   Align DstAlign;
1148   Align SrcAlign;
1149   unsigned IsVol =
1150       cast<ConstantInt>(CI.getArgOperand(CI.getNumArgOperands() - 1))
1151           ->getZExtValue();
1152 
1153   if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) {
1154     DstAlign = MCI->getDestAlign().valueOrOne();
1155     SrcAlign = MCI->getSourceAlign().valueOrOne();
1156   } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
1157     DstAlign = MMI->getDestAlign().valueOrOne();
1158     SrcAlign = MMI->getSourceAlign().valueOrOne();
1159   } else {
1160     auto *MSI = cast<MemSetInst>(&CI);
1161     DstAlign = MSI->getDestAlign().valueOrOne();
1162   }
1163 
1164   // We need to propagate the tail call flag from the IR inst as an argument.
1165   // Otherwise, we have to pessimize and assume later that we cannot tail call
1166   // any memory intrinsics.
1167   ICall.addImm(CI.isTailCall() ? 1 : 0);
1168 
1169   // Create mem operands to store the alignment and volatile info.
1170   auto VolFlag = IsVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
1171   ICall.addMemOperand(MF->getMachineMemOperand(
1172       MachinePointerInfo(CI.getArgOperand(0)),
1173       MachineMemOperand::MOStore | VolFlag, 1, DstAlign));
1174   if (ID != Intrinsic::memset)
1175     ICall.addMemOperand(MF->getMachineMemOperand(
1176         MachinePointerInfo(CI.getArgOperand(1)),
1177         MachineMemOperand::MOLoad | VolFlag, 1, SrcAlign));
1178 
1179   return true;
1180 }
1181 
1182 void IRTranslator::getStackGuard(Register DstReg,
1183                                  MachineIRBuilder &MIRBuilder) {
1184   const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1185   MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
1186   auto MIB =
1187       MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1188 
1189   auto &TLI = *MF->getSubtarget().getTargetLowering();
1190   Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
1191   if (!Global)
1192     return;
1193 
1194   MachinePointerInfo MPInfo(Global);
1195   auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
1196                MachineMemOperand::MODereferenceable;
1197   MachineMemOperand *MemRef =
1198       MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
1199                                DL->getPointerABIAlignment(0));
1200   MIB.setMemRefs({MemRef});
1201 }
1202 
1203 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
1204                                               MachineIRBuilder &MIRBuilder) {
1205   ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
1206   MIRBuilder.buildInstr(
1207       Op, {ResRegs[0], ResRegs[1]},
1208       {getOrCreateVReg(*CI.getOperand(0)), getOrCreateVReg(*CI.getOperand(1))});
1209 
1210   return true;
1211 }
1212 
1213 unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
1214   switch (ID) {
1215     default:
1216       break;
1217     case Intrinsic::bswap:
1218       return TargetOpcode::G_BSWAP;
1219     case Intrinsic::bitreverse:
1220       return TargetOpcode::G_BITREVERSE;
1221     case Intrinsic::fshl:
1222       return TargetOpcode::G_FSHL;
1223     case Intrinsic::fshr:
1224       return TargetOpcode::G_FSHR;
1225     case Intrinsic::ceil:
1226       return TargetOpcode::G_FCEIL;
1227     case Intrinsic::cos:
1228       return TargetOpcode::G_FCOS;
1229     case Intrinsic::ctpop:
1230       return TargetOpcode::G_CTPOP;
1231     case Intrinsic::exp:
1232       return TargetOpcode::G_FEXP;
1233     case Intrinsic::exp2:
1234       return TargetOpcode::G_FEXP2;
1235     case Intrinsic::fabs:
1236       return TargetOpcode::G_FABS;
1237     case Intrinsic::copysign:
1238       return TargetOpcode::G_FCOPYSIGN;
1239     case Intrinsic::minnum:
1240       return TargetOpcode::G_FMINNUM;
1241     case Intrinsic::maxnum:
1242       return TargetOpcode::G_FMAXNUM;
1243     case Intrinsic::minimum:
1244       return TargetOpcode::G_FMINIMUM;
1245     case Intrinsic::maximum:
1246       return TargetOpcode::G_FMAXIMUM;
1247     case Intrinsic::canonicalize:
1248       return TargetOpcode::G_FCANONICALIZE;
1249     case Intrinsic::floor:
1250       return TargetOpcode::G_FFLOOR;
1251     case Intrinsic::fma:
1252       return TargetOpcode::G_FMA;
1253     case Intrinsic::log:
1254       return TargetOpcode::G_FLOG;
1255     case Intrinsic::log2:
1256       return TargetOpcode::G_FLOG2;
1257     case Intrinsic::log10:
1258       return TargetOpcode::G_FLOG10;
1259     case Intrinsic::nearbyint:
1260       return TargetOpcode::G_FNEARBYINT;
1261     case Intrinsic::pow:
1262       return TargetOpcode::G_FPOW;
1263     case Intrinsic::rint:
1264       return TargetOpcode::G_FRINT;
1265     case Intrinsic::round:
1266       return TargetOpcode::G_INTRINSIC_ROUND;
1267     case Intrinsic::sin:
1268       return TargetOpcode::G_FSIN;
1269     case Intrinsic::sqrt:
1270       return TargetOpcode::G_FSQRT;
1271     case Intrinsic::trunc:
1272       return TargetOpcode::G_INTRINSIC_TRUNC;
1273     case Intrinsic::readcyclecounter:
1274       return TargetOpcode::G_READCYCLECOUNTER;
1275   }
1276   return Intrinsic::not_intrinsic;
1277 }
1278 
1279 bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
1280                                             Intrinsic::ID ID,
1281                                             MachineIRBuilder &MIRBuilder) {
1282 
1283   unsigned Op = getSimpleIntrinsicOpcode(ID);
1284 
1285   // Is this a simple intrinsic?
1286   if (Op == Intrinsic::not_intrinsic)
1287     return false;
1288 
1289   // Yes. Let's translate it.
1290   SmallVector<llvm::SrcOp, 4> VRegs;
1291   for (auto &Arg : CI.arg_operands())
1292     VRegs.push_back(getOrCreateVReg(*Arg));
1293 
1294   MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
1295                         MachineInstr::copyFlagsFromInstruction(CI));
1296   return true;
1297 }
1298 
1299 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
1300                                            MachineIRBuilder &MIRBuilder) {
1301 
1302   // If this is a simple intrinsic (that is, we just need to add a def of
1303   // a vreg, and uses for each arg operand, then translate it.
1304   if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
1305     return true;
1306 
1307   switch (ID) {
1308   default:
1309     break;
1310   case Intrinsic::lifetime_start:
1311   case Intrinsic::lifetime_end: {
1312     // No stack colouring in O0, discard region information.
1313     if (MF->getTarget().getOptLevel() == CodeGenOpt::None)
1314       return true;
1315 
1316     unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
1317                                                   : TargetOpcode::LIFETIME_END;
1318 
1319     // Get the underlying objects for the location passed on the lifetime
1320     // marker.
1321     SmallVector<const Value *, 4> Allocas;
1322     GetUnderlyingObjects(CI.getArgOperand(1), Allocas, *DL);
1323 
1324     // Iterate over each underlying object, creating lifetime markers for each
1325     // static alloca. Quit if we find a non-static alloca.
1326     for (const Value *V : Allocas) {
1327       const AllocaInst *AI = dyn_cast<AllocaInst>(V);
1328       if (!AI)
1329         continue;
1330 
1331       if (!AI->isStaticAlloca())
1332         return true;
1333 
1334       MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
1335     }
1336     return true;
1337   }
1338   case Intrinsic::dbg_declare: {
1339     const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
1340     assert(DI.getVariable() && "Missing variable");
1341 
1342     const Value *Address = DI.getAddress();
1343     if (!Address || isa<UndefValue>(Address)) {
1344       LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
1345       return true;
1346     }
1347 
1348     assert(DI.getVariable()->isValidLocationForIntrinsic(
1349                MIRBuilder.getDebugLoc()) &&
1350            "Expected inlined-at fields to agree");
1351     auto AI = dyn_cast<AllocaInst>(Address);
1352     if (AI && AI->isStaticAlloca()) {
1353       // Static allocas are tracked at the MF level, no need for DBG_VALUE
1354       // instructions (in fact, they get ignored if they *do* exist).
1355       MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
1356                              getOrCreateFrameIndex(*AI), DI.getDebugLoc());
1357     } else {
1358       // A dbg.declare describes the address of a source variable, so lower it
1359       // into an indirect DBG_VALUE.
1360       MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
1361                                        DI.getVariable(), DI.getExpression());
1362     }
1363     return true;
1364   }
1365   case Intrinsic::dbg_label: {
1366     const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
1367     assert(DI.getLabel() && "Missing label");
1368 
1369     assert(DI.getLabel()->isValidLocationForIntrinsic(
1370                MIRBuilder.getDebugLoc()) &&
1371            "Expected inlined-at fields to agree");
1372 
1373     MIRBuilder.buildDbgLabel(DI.getLabel());
1374     return true;
1375   }
1376   case Intrinsic::vaend:
1377     // No target I know of cares about va_end. Certainly no in-tree target
1378     // does. Simplest intrinsic ever!
1379     return true;
1380   case Intrinsic::vastart: {
1381     auto &TLI = *MF->getSubtarget().getTargetLowering();
1382     Value *Ptr = CI.getArgOperand(0);
1383     unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
1384 
1385     // FIXME: Get alignment
1386     MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)})
1387         .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr),
1388                                                 MachineMemOperand::MOStore,
1389                                                 ListSize, Align(1)));
1390     return true;
1391   }
1392   case Intrinsic::dbg_value: {
1393     // This form of DBG_VALUE is target-independent.
1394     const DbgValueInst &DI = cast<DbgValueInst>(CI);
1395     const Value *V = DI.getValue();
1396     assert(DI.getVariable()->isValidLocationForIntrinsic(
1397                MIRBuilder.getDebugLoc()) &&
1398            "Expected inlined-at fields to agree");
1399     if (!V) {
1400       // Currently the optimizer can produce this; insert an undef to
1401       // help debugging.  Probably the optimizer should not do this.
1402       MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
1403     } else if (const auto *CI = dyn_cast<Constant>(V)) {
1404       MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
1405     } else {
1406       for (Register Reg : getOrCreateVRegs(*V)) {
1407         // FIXME: This does not handle register-indirect values at offset 0. The
1408         // direct/indirect thing shouldn't really be handled by something as
1409         // implicit as reg+noreg vs reg+imm in the first place, but it seems
1410         // pretty baked in right now.
1411         MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
1412       }
1413     }
1414     return true;
1415   }
1416   case Intrinsic::uadd_with_overflow:
1417     return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
1418   case Intrinsic::sadd_with_overflow:
1419     return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
1420   case Intrinsic::usub_with_overflow:
1421     return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
1422   case Intrinsic::ssub_with_overflow:
1423     return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
1424   case Intrinsic::umul_with_overflow:
1425     return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
1426   case Intrinsic::smul_with_overflow:
1427     return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
1428   case Intrinsic::uadd_sat:
1429     return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
1430   case Intrinsic::sadd_sat:
1431     return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
1432   case Intrinsic::usub_sat:
1433     return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
1434   case Intrinsic::ssub_sat:
1435     return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
1436   case Intrinsic::fmuladd: {
1437     const TargetMachine &TM = MF->getTarget();
1438     const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1439     Register Dst = getOrCreateVReg(CI);
1440     Register Op0 = getOrCreateVReg(*CI.getArgOperand(0));
1441     Register Op1 = getOrCreateVReg(*CI.getArgOperand(1));
1442     Register Op2 = getOrCreateVReg(*CI.getArgOperand(2));
1443     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
1444         TLI.isFMAFasterThanFMulAndFAdd(*MF,
1445                                        TLI.getValueType(*DL, CI.getType()))) {
1446       // TODO: Revisit this to see if we should move this part of the
1447       // lowering to the combiner.
1448       MIRBuilder.buildFMA(Dst, Op0, Op1, Op2,
1449                           MachineInstr::copyFlagsFromInstruction(CI));
1450     } else {
1451       LLT Ty = getLLTForType(*CI.getType(), *DL);
1452       auto FMul = MIRBuilder.buildFMul(
1453           Ty, Op0, Op1, MachineInstr::copyFlagsFromInstruction(CI));
1454       MIRBuilder.buildFAdd(Dst, FMul, Op2,
1455                            MachineInstr::copyFlagsFromInstruction(CI));
1456     }
1457     return true;
1458   }
1459   case Intrinsic::memcpy:
1460   case Intrinsic::memmove:
1461   case Intrinsic::memset:
1462     return translateMemFunc(CI, MIRBuilder, ID);
1463   case Intrinsic::eh_typeid_for: {
1464     GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
1465     Register Reg = getOrCreateVReg(CI);
1466     unsigned TypeID = MF->getTypeIDFor(GV);
1467     MIRBuilder.buildConstant(Reg, TypeID);
1468     return true;
1469   }
1470   case Intrinsic::objectsize:
1471     llvm_unreachable("llvm.objectsize.* should have been lowered already");
1472 
1473   case Intrinsic::is_constant:
1474     llvm_unreachable("llvm.is.constant.* should have been lowered already");
1475 
1476   case Intrinsic::stackguard:
1477     getStackGuard(getOrCreateVReg(CI), MIRBuilder);
1478     return true;
1479   case Intrinsic::stackprotector: {
1480     LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1481     Register GuardVal = MRI->createGenericVirtualRegister(PtrTy);
1482     getStackGuard(GuardVal, MIRBuilder);
1483 
1484     AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
1485     int FI = getOrCreateFrameIndex(*Slot);
1486     MF->getFrameInfo().setStackProtectorIndex(FI);
1487 
1488     MIRBuilder.buildStore(
1489         GuardVal, getOrCreateVReg(*Slot),
1490         *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
1491                                   MachineMemOperand::MOStore |
1492                                       MachineMemOperand::MOVolatile,
1493                                   PtrTy.getSizeInBits() / 8, Align(8)));
1494     return true;
1495   }
1496   case Intrinsic::stacksave: {
1497     // Save the stack pointer to the location provided by the intrinsic.
1498     Register Reg = getOrCreateVReg(CI);
1499     Register StackPtr = MF->getSubtarget()
1500                             .getTargetLowering()
1501                             ->getStackPointerRegisterToSaveRestore();
1502 
1503     // If the target doesn't specify a stack pointer, then fall back.
1504     if (!StackPtr)
1505       return false;
1506 
1507     MIRBuilder.buildCopy(Reg, StackPtr);
1508     return true;
1509   }
1510   case Intrinsic::stackrestore: {
1511     // Restore the stack pointer from the location provided by the intrinsic.
1512     Register Reg = getOrCreateVReg(*CI.getArgOperand(0));
1513     Register StackPtr = MF->getSubtarget()
1514                             .getTargetLowering()
1515                             ->getStackPointerRegisterToSaveRestore();
1516 
1517     // If the target doesn't specify a stack pointer, then fall back.
1518     if (!StackPtr)
1519       return false;
1520 
1521     MIRBuilder.buildCopy(StackPtr, Reg);
1522     return true;
1523   }
1524   case Intrinsic::cttz:
1525   case Intrinsic::ctlz: {
1526     ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
1527     bool isTrailing = ID == Intrinsic::cttz;
1528     unsigned Opcode = isTrailing
1529                           ? Cst->isZero() ? TargetOpcode::G_CTTZ
1530                                           : TargetOpcode::G_CTTZ_ZERO_UNDEF
1531                           : Cst->isZero() ? TargetOpcode::G_CTLZ
1532                                           : TargetOpcode::G_CTLZ_ZERO_UNDEF;
1533     MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(CI)},
1534                           {getOrCreateVReg(*CI.getArgOperand(0))});
1535     return true;
1536   }
1537   case Intrinsic::invariant_start: {
1538     LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1539     Register Undef = MRI->createGenericVirtualRegister(PtrTy);
1540     MIRBuilder.buildUndef(Undef);
1541     return true;
1542   }
1543   case Intrinsic::invariant_end:
1544     return true;
1545   case Intrinsic::assume:
1546   case Intrinsic::var_annotation:
1547   case Intrinsic::sideeffect:
1548     // Discard annotate attributes, assumptions, and artificial side-effects.
1549     return true;
1550   case Intrinsic::read_register: {
1551     Value *Arg = CI.getArgOperand(0);
1552     MIRBuilder
1553         .buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
1554         .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()));
1555     return true;
1556   }
1557   case Intrinsic::write_register: {
1558     Value *Arg = CI.getArgOperand(0);
1559     MIRBuilder.buildInstr(TargetOpcode::G_WRITE_REGISTER)
1560       .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()))
1561       .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
1562     return true;
1563   }
1564   }
1565   return false;
1566 }
1567 
1568 bool IRTranslator::translateInlineAsm(const CallInst &CI,
1569                                       MachineIRBuilder &MIRBuilder) {
1570   const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue());
1571   StringRef ConstraintStr = IA.getConstraintString();
1572 
1573   bool HasOnlyMemoryClobber = false;
1574   if (!ConstraintStr.empty()) {
1575     // Until we have full inline assembly support, we just try to handle the
1576     // very simple case of just "~{memory}" to avoid falling back so often.
1577     if (ConstraintStr != "~{memory}")
1578       return false;
1579     HasOnlyMemoryClobber = true;
1580   }
1581 
1582   unsigned ExtraInfo = 0;
1583   if (IA.hasSideEffects())
1584     ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1585   if (IA.getDialect() == InlineAsm::AD_Intel)
1586     ExtraInfo |= InlineAsm::Extra_AsmDialect;
1587 
1588   // HACK: special casing for ~memory.
1589   if (HasOnlyMemoryClobber)
1590     ExtraInfo |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
1591 
1592   auto Inst = MIRBuilder.buildInstr(TargetOpcode::INLINEASM)
1593                   .addExternalSymbol(IA.getAsmString().c_str())
1594                   .addImm(ExtraInfo);
1595   if (const MDNode *SrcLoc = CI.getMetadata("srcloc"))
1596     Inst.addMetadata(SrcLoc);
1597 
1598   return true;
1599 }
1600 
1601 bool IRTranslator::translateCallBase(const CallBase &CB,
1602                                      MachineIRBuilder &MIRBuilder) {
1603   ArrayRef<Register> Res = getOrCreateVRegs(CB);
1604 
1605   SmallVector<ArrayRef<Register>, 8> Args;
1606   Register SwiftInVReg = 0;
1607   Register SwiftErrorVReg = 0;
1608   for (auto &Arg : CB.args()) {
1609     if (CLI->supportSwiftError() && isSwiftError(Arg)) {
1610       assert(SwiftInVReg == 0 && "Expected only one swift error argument");
1611       LLT Ty = getLLTForType(*Arg->getType(), *DL);
1612       SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
1613       MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
1614                                             &CB, &MIRBuilder.getMBB(), Arg));
1615       Args.emplace_back(makeArrayRef(SwiftInVReg));
1616       SwiftErrorVReg =
1617           SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.getMBB(), Arg);
1618       continue;
1619     }
1620     Args.push_back(getOrCreateVRegs(*Arg));
1621   }
1622 
1623   // We don't set HasCalls on MFI here yet because call lowering may decide to
1624   // optimize into tail calls. Instead, we defer that to selection where a final
1625   // scan is done to check if any instructions are calls.
1626   bool Success =
1627       CLI->lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg,
1628                      [&]() { return getOrCreateVReg(*CB.getCalledValue()); });
1629 
1630   // Check if we just inserted a tail call.
1631   if (Success) {
1632     assert(!HasTailCall && "Can't tail call return twice from block?");
1633     const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1634     HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt()));
1635   }
1636 
1637   return Success;
1638 }
1639 
1640 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
1641   const CallInst &CI = cast<CallInst>(U);
1642   auto TII = MF->getTarget().getIntrinsicInfo();
1643   const Function *F = CI.getCalledFunction();
1644 
1645   // FIXME: support Windows dllimport function calls.
1646   if (F && (F->hasDLLImportStorageClass() ||
1647             (MF->getTarget().getTargetTriple().isOSWindows() &&
1648              F->hasExternalWeakLinkage())))
1649     return false;
1650 
1651   // FIXME: support control flow guard targets.
1652   if (CI.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
1653     return false;
1654 
1655   if (CI.isInlineAsm())
1656     return translateInlineAsm(CI, MIRBuilder);
1657 
1658   Intrinsic::ID ID = Intrinsic::not_intrinsic;
1659   if (F && F->isIntrinsic()) {
1660     ID = F->getIntrinsicID();
1661     if (TII && ID == Intrinsic::not_intrinsic)
1662       ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
1663   }
1664 
1665   if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic)
1666     return translateCallBase(CI, MIRBuilder);
1667 
1668   assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
1669 
1670   if (translateKnownIntrinsic(CI, ID, MIRBuilder))
1671     return true;
1672 
1673   ArrayRef<Register> ResultRegs;
1674   if (!CI.getType()->isVoidTy())
1675     ResultRegs = getOrCreateVRegs(CI);
1676 
1677   // Ignore the callsite attributes. Backend code is most likely not expecting
1678   // an intrinsic to sometimes have side effects and sometimes not.
1679   MachineInstrBuilder MIB =
1680       MIRBuilder.buildIntrinsic(ID, ResultRegs, !F->doesNotAccessMemory());
1681   if (isa<FPMathOperator>(CI))
1682     MIB->copyIRFlags(CI);
1683 
1684   for (auto &Arg : enumerate(CI.arg_operands())) {
1685     // Some intrinsics take metadata parameters. Reject them.
1686     if (isa<MetadataAsValue>(Arg.value()))
1687       return false;
1688 
1689     // If this is required to be an immediate, don't materialize it in a
1690     // register.
1691     if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) {
1692       if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) {
1693         // imm arguments are more convenient than cimm (and realistically
1694         // probably sufficient), so use them.
1695         assert(CI->getBitWidth() <= 64 &&
1696                "large intrinsic immediates not handled");
1697         MIB.addImm(CI->getSExtValue());
1698       } else {
1699         MIB.addFPImm(cast<ConstantFP>(Arg.value()));
1700       }
1701     } else {
1702       ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value());
1703       if (VRegs.size() > 1)
1704         return false;
1705       MIB.addUse(VRegs[0]);
1706     }
1707   }
1708 
1709   // Add a MachineMemOperand if it is a target mem intrinsic.
1710   const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1711   TargetLowering::IntrinsicInfo Info;
1712   // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
1713   if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
1714     Align Alignment = Info.align.getValueOr(
1715         DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext())));
1716 
1717     uint64_t Size = Info.memVT.getStoreSize();
1718     MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal),
1719                                                Info.flags, Size, Alignment));
1720   }
1721 
1722   return true;
1723 }
1724 
1725 bool IRTranslator::translateInvoke(const User &U,
1726                                    MachineIRBuilder &MIRBuilder) {
1727   const InvokeInst &I = cast<InvokeInst>(U);
1728   MCContext &Context = MF->getContext();
1729 
1730   const BasicBlock *ReturnBB = I.getSuccessor(0);
1731   const BasicBlock *EHPadBB = I.getSuccessor(1);
1732 
1733   const Value *Callee = I.getCalledValue();
1734   const Function *Fn = dyn_cast<Function>(Callee);
1735   if (isa<InlineAsm>(Callee))
1736     return false;
1737 
1738   // FIXME: support invoking patchpoint and statepoint intrinsics.
1739   if (Fn && Fn->isIntrinsic())
1740     return false;
1741 
1742   // FIXME: support whatever these are.
1743   if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
1744     return false;
1745 
1746   // FIXME: support control flow guard targets.
1747   if (I.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
1748     return false;
1749 
1750   // FIXME: support Windows exception handling.
1751   if (!isa<LandingPadInst>(EHPadBB->front()))
1752     return false;
1753 
1754   // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
1755   // the region covered by the try.
1756   MCSymbol *BeginSymbol = Context.createTempSymbol();
1757   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
1758 
1759   if (!translateCallBase(I, MIRBuilder))
1760     return false;
1761 
1762   MCSymbol *EndSymbol = Context.createTempSymbol();
1763   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
1764 
1765   // FIXME: track probabilities.
1766   MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
1767                     &ReturnMBB = getMBB(*ReturnBB);
1768   MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
1769   MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
1770   MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
1771   MIRBuilder.buildBr(ReturnMBB);
1772 
1773   return true;
1774 }
1775 
1776 bool IRTranslator::translateCallBr(const User &U,
1777                                    MachineIRBuilder &MIRBuilder) {
1778   // FIXME: Implement this.
1779   return false;
1780 }
1781 
1782 bool IRTranslator::translateLandingPad(const User &U,
1783                                        MachineIRBuilder &MIRBuilder) {
1784   const LandingPadInst &LP = cast<LandingPadInst>(U);
1785 
1786   MachineBasicBlock &MBB = MIRBuilder.getMBB();
1787 
1788   MBB.setIsEHPad();
1789 
1790   // If there aren't registers to copy the values into (e.g., during SjLj
1791   // exceptions), then don't bother.
1792   auto &TLI = *MF->getSubtarget().getTargetLowering();
1793   const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
1794   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
1795       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
1796     return true;
1797 
1798   // If landingpad's return type is token type, we don't create DAG nodes
1799   // for its exception pointer and selector value. The extraction of exception
1800   // pointer or selector value from token type landingpads is not currently
1801   // supported.
1802   if (LP.getType()->isTokenTy())
1803     return true;
1804 
1805   // Add a label to mark the beginning of the landing pad.  Deletion of the
1806   // landing pad can thus be detected via the MachineModuleInfo.
1807   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
1808     .addSym(MF->addLandingPad(&MBB));
1809 
1810   LLT Ty = getLLTForType(*LP.getType(), *DL);
1811   Register Undef = MRI->createGenericVirtualRegister(Ty);
1812   MIRBuilder.buildUndef(Undef);
1813 
1814   SmallVector<LLT, 2> Tys;
1815   for (Type *Ty : cast<StructType>(LP.getType())->elements())
1816     Tys.push_back(getLLTForType(*Ty, *DL));
1817   assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
1818 
1819   // Mark exception register as live in.
1820   Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
1821   if (!ExceptionReg)
1822     return false;
1823 
1824   MBB.addLiveIn(ExceptionReg);
1825   ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
1826   MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
1827 
1828   Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
1829   if (!SelectorReg)
1830     return false;
1831 
1832   MBB.addLiveIn(SelectorReg);
1833   Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
1834   MIRBuilder.buildCopy(PtrVReg, SelectorReg);
1835   MIRBuilder.buildCast(ResRegs[1], PtrVReg);
1836 
1837   return true;
1838 }
1839 
1840 bool IRTranslator::translateAlloca(const User &U,
1841                                    MachineIRBuilder &MIRBuilder) {
1842   auto &AI = cast<AllocaInst>(U);
1843 
1844   if (AI.isSwiftError())
1845     return true;
1846 
1847   if (AI.isStaticAlloca()) {
1848     Register Res = getOrCreateVReg(AI);
1849     int FI = getOrCreateFrameIndex(AI);
1850     MIRBuilder.buildFrameIndex(Res, FI);
1851     return true;
1852   }
1853 
1854   // FIXME: support stack probing for Windows.
1855   if (MF->getTarget().getTargetTriple().isOSWindows())
1856     return false;
1857 
1858   // Now we're in the harder dynamic case.
1859   Register NumElts = getOrCreateVReg(*AI.getArraySize());
1860   Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
1861   LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
1862   if (MRI->getType(NumElts) != IntPtrTy) {
1863     Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
1864     MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
1865     NumElts = ExtElts;
1866   }
1867 
1868   Type *Ty = AI.getAllocatedType();
1869 
1870   Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
1871   Register TySize =
1872       getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));
1873   MIRBuilder.buildMul(AllocSize, NumElts, TySize);
1874 
1875   // Round the size of the allocation up to the stack alignment size
1876   // by add SA-1 to the size. This doesn't overflow because we're computing
1877   // an address inside an alloca.
1878   Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign();
1879   auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign.value() - 1);
1880   auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne,
1881                                       MachineInstr::NoUWrap);
1882   auto AlignCst =
1883       MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign.value() - 1));
1884   auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst);
1885 
1886   Align Alignment = max(AI.getAlign(), DL->getPrefTypeAlign(Ty));
1887   if (Alignment <= StackAlign)
1888     Alignment = Align(1);
1889   MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Alignment);
1890 
1891   MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI);
1892   assert(MF->getFrameInfo().hasVarSizedObjects());
1893   return true;
1894 }
1895 
1896 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
1897   // FIXME: We may need more info about the type. Because of how LLT works,
1898   // we're completely discarding the i64/double distinction here (amongst
1899   // others). Fortunately the ABIs I know of where that matters don't use va_arg
1900   // anyway but that's not guaranteed.
1901   MIRBuilder.buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
1902                         {getOrCreateVReg(*U.getOperand(0)),
1903                          uint64_t(DL->getABITypeAlignment(U.getType()))});
1904   return true;
1905 }
1906 
1907 bool IRTranslator::translateInsertElement(const User &U,
1908                                           MachineIRBuilder &MIRBuilder) {
1909   // If it is a <1 x Ty> vector, use the scalar as it is
1910   // not a legal vector type in LLT.
1911   if (cast<VectorType>(U.getType())->getNumElements() == 1) {
1912     Register Elt = getOrCreateVReg(*U.getOperand(1));
1913     auto &Regs = *VMap.getVRegs(U);
1914     if (Regs.empty()) {
1915       Regs.push_back(Elt);
1916       VMap.getOffsets(U)->push_back(0);
1917     } else {
1918       MIRBuilder.buildCopy(Regs[0], Elt);
1919     }
1920     return true;
1921   }
1922 
1923   Register Res = getOrCreateVReg(U);
1924   Register Val = getOrCreateVReg(*U.getOperand(0));
1925   Register Elt = getOrCreateVReg(*U.getOperand(1));
1926   Register Idx = getOrCreateVReg(*U.getOperand(2));
1927   MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
1928   return true;
1929 }
1930 
1931 bool IRTranslator::translateExtractElement(const User &U,
1932                                            MachineIRBuilder &MIRBuilder) {
1933   // If it is a <1 x Ty> vector, use the scalar as it is
1934   // not a legal vector type in LLT.
1935   if (cast<VectorType>(U.getOperand(0)->getType())->getNumElements() == 1) {
1936     Register Elt = getOrCreateVReg(*U.getOperand(0));
1937     auto &Regs = *VMap.getVRegs(U);
1938     if (Regs.empty()) {
1939       Regs.push_back(Elt);
1940       VMap.getOffsets(U)->push_back(0);
1941     } else {
1942       MIRBuilder.buildCopy(Regs[0], Elt);
1943     }
1944     return true;
1945   }
1946   Register Res = getOrCreateVReg(U);
1947   Register Val = getOrCreateVReg(*U.getOperand(0));
1948   const auto &TLI = *MF->getSubtarget().getTargetLowering();
1949   unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
1950   Register Idx;
1951   if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
1952     if (CI->getBitWidth() != PreferredVecIdxWidth) {
1953       APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth);
1954       auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
1955       Idx = getOrCreateVReg(*NewIdxCI);
1956     }
1957   }
1958   if (!Idx)
1959     Idx = getOrCreateVReg(*U.getOperand(1));
1960   if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
1961     const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
1962     Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx).getReg(0);
1963   }
1964   MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
1965   return true;
1966 }
1967 
1968 bool IRTranslator::translateShuffleVector(const User &U,
1969                                           MachineIRBuilder &MIRBuilder) {
1970   ArrayRef<int> Mask;
1971   if (auto *SVI = dyn_cast<ShuffleVectorInst>(&U))
1972     Mask = SVI->getShuffleMask();
1973   else
1974     Mask = cast<ConstantExpr>(U).getShuffleMask();
1975   ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask);
1976   MIRBuilder
1977       .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
1978                   {getOrCreateVReg(*U.getOperand(0)),
1979                    getOrCreateVReg(*U.getOperand(1))})
1980       .addShuffleMask(MaskAlloc);
1981   return true;
1982 }
1983 
1984 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
1985   const PHINode &PI = cast<PHINode>(U);
1986 
1987   SmallVector<MachineInstr *, 4> Insts;
1988   for (auto Reg : getOrCreateVRegs(PI)) {
1989     auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
1990     Insts.push_back(MIB.getInstr());
1991   }
1992 
1993   PendingPHIs.emplace_back(&PI, std::move(Insts));
1994   return true;
1995 }
1996 
1997 bool IRTranslator::translateAtomicCmpXchg(const User &U,
1998                                           MachineIRBuilder &MIRBuilder) {
1999   const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
2000 
2001   if (I.isWeak())
2002     return false;
2003 
2004   auto &TLI = *MF->getSubtarget().getTargetLowering();
2005   auto Flags = TLI.getAtomicMemOperandFlags(I, *DL);
2006 
2007   Type *ResType = I.getType();
2008   Type *ValType = ResType->Type::getStructElementType(0);
2009 
2010   auto Res = getOrCreateVRegs(I);
2011   Register OldValRes = Res[0];
2012   Register SuccessRes = Res[1];
2013   Register Addr = getOrCreateVReg(*I.getPointerOperand());
2014   Register Cmp = getOrCreateVReg(*I.getCompareOperand());
2015   Register NewVal = getOrCreateVReg(*I.getNewValOperand());
2016 
2017   AAMDNodes AAMetadata;
2018   I.getAAMetadata(AAMetadata);
2019 
2020   MIRBuilder.buildAtomicCmpXchgWithSuccess(
2021       OldValRes, SuccessRes, Addr, Cmp, NewVal,
2022       *MF->getMachineMemOperand(
2023           MachinePointerInfo(I.getPointerOperand()), Flags,
2024           DL->getTypeStoreSize(ValType), getMemOpAlign(I), AAMetadata, nullptr,
2025           I.getSyncScopeID(), I.getSuccessOrdering(), I.getFailureOrdering()));
2026   return true;
2027 }
2028 
2029 bool IRTranslator::translateAtomicRMW(const User &U,
2030                                       MachineIRBuilder &MIRBuilder) {
2031   const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
2032   auto &TLI = *MF->getSubtarget().getTargetLowering();
2033   auto Flags = TLI.getAtomicMemOperandFlags(I, *DL);
2034 
2035   Type *ResType = I.getType();
2036 
2037   Register Res = getOrCreateVReg(I);
2038   Register Addr = getOrCreateVReg(*I.getPointerOperand());
2039   Register Val = getOrCreateVReg(*I.getValOperand());
2040 
2041   unsigned Opcode = 0;
2042   switch (I.getOperation()) {
2043   default:
2044     return false;
2045   case AtomicRMWInst::Xchg:
2046     Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
2047     break;
2048   case AtomicRMWInst::Add:
2049     Opcode = TargetOpcode::G_ATOMICRMW_ADD;
2050     break;
2051   case AtomicRMWInst::Sub:
2052     Opcode = TargetOpcode::G_ATOMICRMW_SUB;
2053     break;
2054   case AtomicRMWInst::And:
2055     Opcode = TargetOpcode::G_ATOMICRMW_AND;
2056     break;
2057   case AtomicRMWInst::Nand:
2058     Opcode = TargetOpcode::G_ATOMICRMW_NAND;
2059     break;
2060   case AtomicRMWInst::Or:
2061     Opcode = TargetOpcode::G_ATOMICRMW_OR;
2062     break;
2063   case AtomicRMWInst::Xor:
2064     Opcode = TargetOpcode::G_ATOMICRMW_XOR;
2065     break;
2066   case AtomicRMWInst::Max:
2067     Opcode = TargetOpcode::G_ATOMICRMW_MAX;
2068     break;
2069   case AtomicRMWInst::Min:
2070     Opcode = TargetOpcode::G_ATOMICRMW_MIN;
2071     break;
2072   case AtomicRMWInst::UMax:
2073     Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
2074     break;
2075   case AtomicRMWInst::UMin:
2076     Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
2077     break;
2078   case AtomicRMWInst::FAdd:
2079     Opcode = TargetOpcode::G_ATOMICRMW_FADD;
2080     break;
2081   case AtomicRMWInst::FSub:
2082     Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
2083     break;
2084   }
2085 
2086   AAMDNodes AAMetadata;
2087   I.getAAMetadata(AAMetadata);
2088 
2089   MIRBuilder.buildAtomicRMW(
2090       Opcode, Res, Addr, Val,
2091       *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
2092                                 Flags, DL->getTypeStoreSize(ResType),
2093                                 getMemOpAlign(I), AAMetadata, nullptr,
2094                                 I.getSyncScopeID(), I.getOrdering()));
2095   return true;
2096 }
2097 
2098 bool IRTranslator::translateFence(const User &U,
2099                                   MachineIRBuilder &MIRBuilder) {
2100   const FenceInst &Fence = cast<FenceInst>(U);
2101   MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()),
2102                         Fence.getSyncScopeID());
2103   return true;
2104 }
2105 
2106 void IRTranslator::finishPendingPhis() {
2107 #ifndef NDEBUG
2108   DILocationVerifier Verifier;
2109   GISelObserverWrapper WrapperObserver(&Verifier);
2110   RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
2111 #endif // ifndef NDEBUG
2112   for (auto &Phi : PendingPHIs) {
2113     const PHINode *PI = Phi.first;
2114     ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
2115     MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
2116     EntryBuilder->setDebugLoc(PI->getDebugLoc());
2117 #ifndef NDEBUG
2118     Verifier.setCurrentInst(PI);
2119 #endif // ifndef NDEBUG
2120 
2121     SmallSet<const MachineBasicBlock *, 16> SeenPreds;
2122     for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
2123       auto IRPred = PI->getIncomingBlock(i);
2124       ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
2125       for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
2126         if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred))
2127           continue;
2128         SeenPreds.insert(Pred);
2129         for (unsigned j = 0; j < ValRegs.size(); ++j) {
2130           MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
2131           MIB.addUse(ValRegs[j]);
2132           MIB.addMBB(Pred);
2133         }
2134       }
2135     }
2136   }
2137 }
2138 
2139 bool IRTranslator::valueIsSplit(const Value &V,
2140                                 SmallVectorImpl<uint64_t> *Offsets) {
2141   SmallVector<LLT, 4> SplitTys;
2142   if (Offsets && !Offsets->empty())
2143     Offsets->clear();
2144   computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
2145   return SplitTys.size() > 1;
2146 }
2147 
2148 bool IRTranslator::translate(const Instruction &Inst) {
2149   CurBuilder->setDebugLoc(Inst.getDebugLoc());
2150   // We only emit constants into the entry block from here. To prevent jumpy
2151   // debug behaviour set the line to 0.
2152   if (const DebugLoc &DL = Inst.getDebugLoc())
2153     EntryBuilder->setDebugLoc(
2154         DebugLoc::get(0, 0, DL.getScope(), DL.getInlinedAt()));
2155   else
2156     EntryBuilder->setDebugLoc(DebugLoc());
2157 
2158   switch (Inst.getOpcode()) {
2159 #define HANDLE_INST(NUM, OPCODE, CLASS)                                        \
2160   case Instruction::OPCODE:                                                    \
2161     return translate##OPCODE(Inst, *CurBuilder.get());
2162 #include "llvm/IR/Instruction.def"
2163   default:
2164     return false;
2165   }
2166 }
2167 
2168 bool IRTranslator::translate(const Constant &C, Register Reg) {
2169   if (auto CI = dyn_cast<ConstantInt>(&C))
2170     EntryBuilder->buildConstant(Reg, *CI);
2171   else if (auto CF = dyn_cast<ConstantFP>(&C))
2172     EntryBuilder->buildFConstant(Reg, *CF);
2173   else if (isa<UndefValue>(C))
2174     EntryBuilder->buildUndef(Reg);
2175   else if (isa<ConstantPointerNull>(C))
2176     EntryBuilder->buildConstant(Reg, 0);
2177   else if (auto GV = dyn_cast<GlobalValue>(&C))
2178     EntryBuilder->buildGlobalValue(Reg, GV);
2179   else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
2180     if (!CAZ->getType()->isVectorTy())
2181       return false;
2182     // Return the scalar if it is a <1 x Ty> vector.
2183     if (CAZ->getNumElements() == 1)
2184       return translate(*CAZ->getElementValue(0u), Reg);
2185     SmallVector<Register, 4> Ops;
2186     for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
2187       Constant &Elt = *CAZ->getElementValue(i);
2188       Ops.push_back(getOrCreateVReg(Elt));
2189     }
2190     EntryBuilder->buildBuildVector(Reg, Ops);
2191   } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
2192     // Return the scalar if it is a <1 x Ty> vector.
2193     if (CV->getNumElements() == 1)
2194       return translate(*CV->getElementAsConstant(0), Reg);
2195     SmallVector<Register, 4> Ops;
2196     for (unsigned i = 0; i < CV->getNumElements(); ++i) {
2197       Constant &Elt = *CV->getElementAsConstant(i);
2198       Ops.push_back(getOrCreateVReg(Elt));
2199     }
2200     EntryBuilder->buildBuildVector(Reg, Ops);
2201   } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
2202     switch(CE->getOpcode()) {
2203 #define HANDLE_INST(NUM, OPCODE, CLASS)                                        \
2204   case Instruction::OPCODE:                                                    \
2205     return translate##OPCODE(*CE, *EntryBuilder.get());
2206 #include "llvm/IR/Instruction.def"
2207     default:
2208       return false;
2209     }
2210   } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
2211     if (CV->getNumOperands() == 1)
2212       return translate(*CV->getOperand(0), Reg);
2213     SmallVector<Register, 4> Ops;
2214     for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
2215       Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
2216     }
2217     EntryBuilder->buildBuildVector(Reg, Ops);
2218   } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
2219     EntryBuilder->buildBlockAddress(Reg, BA);
2220   } else
2221     return false;
2222 
2223   return true;
2224 }
2225 
2226 void IRTranslator::finalizeBasicBlock() {
2227   for (auto &JTCase : SL->JTCases) {
2228     // Emit header first, if it wasn't already emitted.
2229     if (!JTCase.first.Emitted)
2230       emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
2231 
2232     emitJumpTable(JTCase.second, JTCase.second.MBB);
2233   }
2234   SL->JTCases.clear();
2235 }
2236 
2237 void IRTranslator::finalizeFunction() {
2238   // Release the memory used by the different maps we
2239   // needed during the translation.
2240   PendingPHIs.clear();
2241   VMap.reset();
2242   FrameIndices.clear();
2243   MachinePreds.clear();
2244   // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
2245   // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
2246   // destroying it twice (in ~IRTranslator() and ~LLVMContext())
2247   EntryBuilder.reset();
2248   CurBuilder.reset();
2249   FuncInfo.clear();
2250 }
2251 
2252 /// Returns true if a BasicBlock \p BB within a variadic function contains a
2253 /// variadic musttail call.
2254 static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) {
2255   if (!IsVarArg)
2256     return false;
2257 
2258   // Walk the block backwards, because tail calls usually only appear at the end
2259   // of a block.
2260   return std::any_of(BB.rbegin(), BB.rend(), [](const Instruction &I) {
2261     const auto *CI = dyn_cast<CallInst>(&I);
2262     return CI && CI->isMustTailCall();
2263   });
2264 }
2265 
2266 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
2267   MF = &CurMF;
2268   const Function &F = MF->getFunction();
2269   if (F.empty())
2270     return false;
2271   GISelCSEAnalysisWrapper &Wrapper =
2272       getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
2273   // Set the CSEConfig and run the analysis.
2274   GISelCSEInfo *CSEInfo = nullptr;
2275   TPC = &getAnalysis<TargetPassConfig>();
2276   bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
2277                        ? EnableCSEInIRTranslator
2278                        : TPC->isGISelCSEEnabled();
2279 
2280   if (EnableCSE) {
2281     EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
2282     CSEInfo = &Wrapper.get(TPC->getCSEConfig());
2283     EntryBuilder->setCSEInfo(CSEInfo);
2284     CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
2285     CurBuilder->setCSEInfo(CSEInfo);
2286   } else {
2287     EntryBuilder = std::make_unique<MachineIRBuilder>();
2288     CurBuilder = std::make_unique<MachineIRBuilder>();
2289   }
2290   CLI = MF->getSubtarget().getCallLowering();
2291   CurBuilder->setMF(*MF);
2292   EntryBuilder->setMF(*MF);
2293   MRI = &MF->getRegInfo();
2294   DL = &F.getParent()->getDataLayout();
2295   ORE = std::make_unique<OptimizationRemarkEmitter>(&F);
2296   FuncInfo.MF = MF;
2297   FuncInfo.BPI = nullptr;
2298   const auto &TLI = *MF->getSubtarget().getTargetLowering();
2299   const TargetMachine &TM = MF->getTarget();
2300   SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo);
2301   SL->init(TLI, TM, *DL);
2302 
2303   EnableOpts = TM.getOptLevel() != CodeGenOpt::None && !skipFunction(F);
2304 
2305   assert(PendingPHIs.empty() && "stale PHIs");
2306 
2307   if (!DL->isLittleEndian()) {
2308     // Currently we don't properly handle big endian code.
2309     OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
2310                                F.getSubprogram(), &F.getEntryBlock());
2311     R << "unable to translate in big endian mode";
2312     reportTranslationError(*MF, *TPC, *ORE, R);
2313   }
2314 
2315   // Release the per-function state when we return, whether we succeeded or not.
2316   auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
2317 
2318   // Setup a separate basic-block for the arguments and constants
2319   MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
2320   MF->push_back(EntryBB);
2321   EntryBuilder->setMBB(*EntryBB);
2322 
2323   DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc();
2324   SwiftError.setFunction(CurMF);
2325   SwiftError.createEntriesInEntryBlock(DbgLoc);
2326 
2327   bool IsVarArg = F.isVarArg();
2328   bool HasMustTailInVarArgFn = false;
2329 
2330   // Create all blocks, in IR order, to preserve the layout.
2331   for (const BasicBlock &BB: F) {
2332     auto *&MBB = BBToMBB[&BB];
2333 
2334     MBB = MF->CreateMachineBasicBlock(&BB);
2335     MF->push_back(MBB);
2336 
2337     if (BB.hasAddressTaken())
2338       MBB->setHasAddressTaken();
2339 
2340     if (!HasMustTailInVarArgFn)
2341       HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB);
2342   }
2343 
2344   MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
2345 
2346   // Make our arguments/constants entry block fallthrough to the IR entry block.
2347   EntryBB->addSuccessor(&getMBB(F.front()));
2348 
2349   // Lower the actual args into this basic block.
2350   SmallVector<ArrayRef<Register>, 8> VRegArgs;
2351   for (const Argument &Arg: F.args()) {
2352     if (DL->getTypeStoreSize(Arg.getType()) == 0)
2353       continue; // Don't handle zero sized types.
2354     ArrayRef<Register> VRegs = getOrCreateVRegs(Arg);
2355     VRegArgs.push_back(VRegs);
2356 
2357     if (Arg.hasSwiftErrorAttr()) {
2358       assert(VRegs.size() == 1 && "Too many vregs for Swift error");
2359       SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
2360     }
2361   }
2362 
2363   if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs)) {
2364     OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
2365                                F.getSubprogram(), &F.getEntryBlock());
2366     R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
2367     reportTranslationError(*MF, *TPC, *ORE, R);
2368     return false;
2369   }
2370 
2371   // Need to visit defs before uses when translating instructions.
2372   GISelObserverWrapper WrapperObserver;
2373   if (EnableCSE && CSEInfo)
2374     WrapperObserver.addObserver(CSEInfo);
2375   {
2376     ReversePostOrderTraversal<const Function *> RPOT(&F);
2377 #ifndef NDEBUG
2378     DILocationVerifier Verifier;
2379     WrapperObserver.addObserver(&Verifier);
2380 #endif // ifndef NDEBUG
2381     RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
2382     RAIIMFObserverInstaller ObsInstall(*MF, WrapperObserver);
2383     for (const BasicBlock *BB : RPOT) {
2384       MachineBasicBlock &MBB = getMBB(*BB);
2385       // Set the insertion point of all the following translations to
2386       // the end of this basic block.
2387       CurBuilder->setMBB(MBB);
2388       HasTailCall = false;
2389       for (const Instruction &Inst : *BB) {
2390         // If we translated a tail call in the last step, then we know
2391         // everything after the call is either a return, or something that is
2392         // handled by the call itself. (E.g. a lifetime marker or assume
2393         // intrinsic.) In this case, we should stop translating the block and
2394         // move on.
2395         if (HasTailCall)
2396           break;
2397 #ifndef NDEBUG
2398         Verifier.setCurrentInst(&Inst);
2399 #endif // ifndef NDEBUG
2400         if (translate(Inst))
2401           continue;
2402 
2403         OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
2404                                    Inst.getDebugLoc(), BB);
2405         R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
2406 
2407         if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
2408           std::string InstStrStorage;
2409           raw_string_ostream InstStr(InstStrStorage);
2410           InstStr << Inst;
2411 
2412           R << ": '" << InstStr.str() << "'";
2413         }
2414 
2415         reportTranslationError(*MF, *TPC, *ORE, R);
2416         return false;
2417       }
2418 
2419       finalizeBasicBlock();
2420     }
2421 #ifndef NDEBUG
2422     WrapperObserver.removeObserver(&Verifier);
2423 #endif
2424   }
2425 
2426   finishPendingPhis();
2427 
2428   SwiftError.propagateVRegs();
2429 
2430   // Merge the argument lowering and constants block with its single
2431   // successor, the LLVM-IR entry block.  We want the basic block to
2432   // be maximal.
2433   assert(EntryBB->succ_size() == 1 &&
2434          "Custom BB used for lowering should have only one successor");
2435   // Get the successor of the current entry block.
2436   MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
2437   assert(NewEntryBB.pred_size() == 1 &&
2438          "LLVM-IR entry block has a predecessor!?");
2439   // Move all the instruction from the current entry block to the
2440   // new entry block.
2441   NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
2442                     EntryBB->end());
2443 
2444   // Update the live-in information for the new entry block.
2445   for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
2446     NewEntryBB.addLiveIn(LiveIn);
2447   NewEntryBB.sortUniqueLiveIns();
2448 
2449   // Get rid of the now empty basic block.
2450   EntryBB->removeSuccessor(&NewEntryBB);
2451   MF->remove(EntryBB);
2452   MF->DeleteMachineBasicBlock(EntryBB);
2453 
2454   assert(&MF->front() == &NewEntryBB &&
2455          "New entry wasn't next in the list of basic block!");
2456 
2457   // Initialize stack protector information.
2458   StackProtector &SP = getAnalysis<StackProtector>();
2459   SP.copyToMachineFrameInfo(MF->getFrameInfo());
2460 
2461   return false;
2462 }
2463