1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the implementation of the FastISel class.
11 //
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported.  It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time.  For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
19 //
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support.  In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
24 //
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated.  Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time.  Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
32 // in -O0 compiles.
33 //
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators.  More complicated operations currently require
38 // target-specific code.
39 //
40 //===----------------------------------------------------------------------===//
41 
42 #include "llvm/CodeGen/Analysis.h"
43 #include "llvm/ADT/Optional.h"
44 #include "llvm/ADT/Statistic.h"
45 #include "llvm/Analysis/BranchProbabilityInfo.h"
46 #include "llvm/Analysis/Loads.h"
47 #include "llvm/Analysis/TargetLibraryInfo.h"
48 #include "llvm/CodeGen/Analysis.h"
49 #include "llvm/CodeGen/FastISel.h"
50 #include "llvm/CodeGen/FunctionLoweringInfo.h"
51 #include "llvm/CodeGen/MachineFrameInfo.h"
52 #include "llvm/CodeGen/MachineInstrBuilder.h"
53 #include "llvm/CodeGen/MachineModuleInfo.h"
54 #include "llvm/CodeGen/MachineRegisterInfo.h"
55 #include "llvm/CodeGen/StackMaps.h"
56 #include "llvm/IR/DataLayout.h"
57 #include "llvm/IR/DebugInfo.h"
58 #include "llvm/IR/Function.h"
59 #include "llvm/IR/GetElementPtrTypeIterator.h"
60 #include "llvm/IR/GlobalVariable.h"
61 #include "llvm/IR/Instructions.h"
62 #include "llvm/IR/IntrinsicInst.h"
63 #include "llvm/IR/Mangler.h"
64 #include "llvm/IR/Operator.h"
65 #include "llvm/Support/Debug.h"
66 #include "llvm/Support/ErrorHandling.h"
67 #include "llvm/Support/raw_ostream.h"
68 #include "llvm/Target/TargetInstrInfo.h"
69 #include "llvm/Target/TargetLowering.h"
70 #include "llvm/Target/TargetMachine.h"
71 #include "llvm/Target/TargetSubtargetInfo.h"
72 using namespace llvm;
73 
74 #define DEBUG_TYPE "isel"
75 
76 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
77                                          "target-independent selector");
78 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
79                                     "target-specific selector");
80 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
81 
82 void FastISel::ArgListEntry::setAttributes(ImmutableCallSite *CS,
83                                            unsigned AttrIdx) {
84   IsSExt = CS->paramHasAttr(AttrIdx, Attribute::SExt);
85   IsZExt = CS->paramHasAttr(AttrIdx, Attribute::ZExt);
86   IsInReg = CS->paramHasAttr(AttrIdx, Attribute::InReg);
87   IsSRet = CS->paramHasAttr(AttrIdx, Attribute::StructRet);
88   IsNest = CS->paramHasAttr(AttrIdx, Attribute::Nest);
89   IsByVal = CS->paramHasAttr(AttrIdx, Attribute::ByVal);
90   IsInAlloca = CS->paramHasAttr(AttrIdx, Attribute::InAlloca);
91   IsReturned = CS->paramHasAttr(AttrIdx, Attribute::Returned);
92   IsSwiftSelf = CS->paramHasAttr(AttrIdx, Attribute::SwiftSelf);
93   IsSwiftError = CS->paramHasAttr(AttrIdx, Attribute::SwiftError);
94   Alignment = CS->getParamAlignment(AttrIdx);
95 }
96 
97 /// Set the current block to which generated machine instructions will be
98 /// appended, and clear the local CSE map.
99 void FastISel::startNewBlock() {
100   LocalValueMap.clear();
101 
102   // Instructions are appended to FuncInfo.MBB. If the basic block already
103   // contains labels or copies, use the last instruction as the last local
104   // value.
105   EmitStartPt = nullptr;
106   if (!FuncInfo.MBB->empty())
107     EmitStartPt = &FuncInfo.MBB->back();
108   LastLocalValue = EmitStartPt;
109 }
110 
111 bool FastISel::lowerArguments() {
112   if (!FuncInfo.CanLowerReturn)
113     // Fallback to SDISel argument lowering code to deal with sret pointer
114     // parameter.
115     return false;
116 
117   if (!fastLowerArguments())
118     return false;
119 
120   // Enter arguments into ValueMap for uses in non-entry BBs.
121   for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(),
122                                     E = FuncInfo.Fn->arg_end();
123        I != E; ++I) {
124     DenseMap<const Value *, unsigned>::iterator VI = LocalValueMap.find(&*I);
125     assert(VI != LocalValueMap.end() && "Missed an argument?");
126     FuncInfo.ValueMap[&*I] = VI->second;
127   }
128   return true;
129 }
130 
131 void FastISel::flushLocalValueMap() {
132   LocalValueMap.clear();
133   LastLocalValue = EmitStartPt;
134   recomputeInsertPt();
135   SavedInsertPt = FuncInfo.InsertPt;
136 }
137 
138 bool FastISel::hasTrivialKill(const Value *V) {
139   // Don't consider constants or arguments to have trivial kills.
140   const Instruction *I = dyn_cast<Instruction>(V);
141   if (!I)
142     return false;
143 
144   // No-op casts are trivially coalesced by fast-isel.
145   if (const auto *Cast = dyn_cast<CastInst>(I))
146     if (Cast->isNoopCast(DL.getIntPtrType(Cast->getContext())) &&
147         !hasTrivialKill(Cast->getOperand(0)))
148       return false;
149 
150   // Even the value might have only one use in the LLVM IR, it is possible that
151   // FastISel might fold the use into another instruction and now there is more
152   // than one use at the Machine Instruction level.
153   unsigned Reg = lookUpRegForValue(V);
154   if (Reg && !MRI.use_empty(Reg))
155     return false;
156 
157   // GEPs with all zero indices are trivially coalesced by fast-isel.
158   if (const auto *GEP = dyn_cast<GetElementPtrInst>(I))
159     if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
160       return false;
161 
162   // Only instructions with a single use in the same basic block are considered
163   // to have trivial kills.
164   return I->hasOneUse() &&
165          !(I->getOpcode() == Instruction::BitCast ||
166            I->getOpcode() == Instruction::PtrToInt ||
167            I->getOpcode() == Instruction::IntToPtr) &&
168          cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
169 }
170 
171 unsigned FastISel::getRegForValue(const Value *V) {
172   EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
173   // Don't handle non-simple values in FastISel.
174   if (!RealVT.isSimple())
175     return 0;
176 
177   // Ignore illegal types. We must do this before looking up the value
178   // in ValueMap because Arguments are given virtual registers regardless
179   // of whether FastISel can handle them.
180   MVT VT = RealVT.getSimpleVT();
181   if (!TLI.isTypeLegal(VT)) {
182     // Handle integer promotions, though, because they're common and easy.
183     if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
184       VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
185     else
186       return 0;
187   }
188 
189   // Look up the value to see if we already have a register for it.
190   unsigned Reg = lookUpRegForValue(V);
191   if (Reg)
192     return Reg;
193 
194   // In bottom-up mode, just create the virtual register which will be used
195   // to hold the value. It will be materialized later.
196   if (isa<Instruction>(V) &&
197       (!isa<AllocaInst>(V) ||
198        !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
199     return FuncInfo.InitializeRegForValue(V);
200 
201   SavePoint SaveInsertPt = enterLocalValueArea();
202 
203   // Materialize the value in a register. Emit any instructions in the
204   // local value area.
205   Reg = materializeRegForValue(V, VT);
206 
207   leaveLocalValueArea(SaveInsertPt);
208 
209   return Reg;
210 }
211 
212 unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
213   unsigned Reg = 0;
214   if (const auto *CI = dyn_cast<ConstantInt>(V)) {
215     if (CI->getValue().getActiveBits() <= 64)
216       Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
217   } else if (isa<AllocaInst>(V))
218     Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
219   else if (isa<ConstantPointerNull>(V))
220     // Translate this as an integer zero so that it can be
221     // local-CSE'd with actual integer zeros.
222     Reg = getRegForValue(
223         Constant::getNullValue(DL.getIntPtrType(V->getContext())));
224   else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
225     if (CF->isNullValue())
226       Reg = fastMaterializeFloatZero(CF);
227     else
228       // Try to emit the constant directly.
229       Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
230 
231     if (!Reg) {
232       // Try to emit the constant by using an integer constant with a cast.
233       const APFloat &Flt = CF->getValueAPF();
234       EVT IntVT = TLI.getPointerTy(DL);
235 
236       uint64_t x[2];
237       uint32_t IntBitWidth = IntVT.getSizeInBits();
238       bool isExact;
239       (void)Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
240                                  APFloat::rmTowardZero, &isExact);
241       if (isExact) {
242         APInt IntVal(IntBitWidth, x);
243 
244         unsigned IntegerReg =
245             getRegForValue(ConstantInt::get(V->getContext(), IntVal));
246         if (IntegerReg != 0)
247           Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
248                            /*Kill=*/false);
249       }
250     }
251   } else if (const auto *Op = dyn_cast<Operator>(V)) {
252     if (!selectOperator(Op, Op->getOpcode()))
253       if (!isa<Instruction>(Op) ||
254           !fastSelectInstruction(cast<Instruction>(Op)))
255         return 0;
256     Reg = lookUpRegForValue(Op);
257   } else if (isa<UndefValue>(V)) {
258     Reg = createResultReg(TLI.getRegClassFor(VT));
259     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
260             TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
261   }
262   return Reg;
263 }
264 
265 /// Helper for getRegForValue. This function is called when the value isn't
266 /// already available in a register and must be materialized with new
267 /// instructions.
268 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
269   unsigned Reg = 0;
270   // Give the target-specific code a try first.
271   if (isa<Constant>(V))
272     Reg = fastMaterializeConstant(cast<Constant>(V));
273 
274   // If target-specific code couldn't or didn't want to handle the value, then
275   // give target-independent code a try.
276   if (!Reg)
277     Reg = materializeConstant(V, VT);
278 
279   // Don't cache constant materializations in the general ValueMap.
280   // To do so would require tracking what uses they dominate.
281   if (Reg) {
282     LocalValueMap[V] = Reg;
283     LastLocalValue = MRI.getVRegDef(Reg);
284   }
285   return Reg;
286 }
287 
288 unsigned FastISel::lookUpRegForValue(const Value *V) {
289   // Look up the value to see if we already have a register for it. We
290   // cache values defined by Instructions across blocks, and other values
291   // only locally. This is because Instructions already have the SSA
292   // def-dominates-use requirement enforced.
293   DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
294   if (I != FuncInfo.ValueMap.end())
295     return I->second;
296   return LocalValueMap[V];
297 }
298 
299 void FastISel::updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
300   if (!isa<Instruction>(I)) {
301     LocalValueMap[I] = Reg;
302     return;
303   }
304 
305   unsigned &AssignedReg = FuncInfo.ValueMap[I];
306   if (AssignedReg == 0)
307     // Use the new register.
308     AssignedReg = Reg;
309   else if (Reg != AssignedReg) {
310     // Arrange for uses of AssignedReg to be replaced by uses of Reg.
311     for (unsigned i = 0; i < NumRegs; i++)
312       FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
313 
314     AssignedReg = Reg;
315   }
316 }
317 
318 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
319   unsigned IdxN = getRegForValue(Idx);
320   if (IdxN == 0)
321     // Unhandled operand. Halt "fast" selection and bail.
322     return std::pair<unsigned, bool>(0, false);
323 
324   bool IdxNIsKill = hasTrivialKill(Idx);
325 
326   // If the index is smaller or larger than intptr_t, truncate or extend it.
327   MVT PtrVT = TLI.getPointerTy(DL);
328   EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
329   if (IdxVT.bitsLT(PtrVT)) {
330     IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
331                       IdxNIsKill);
332     IdxNIsKill = true;
333   } else if (IdxVT.bitsGT(PtrVT)) {
334     IdxN =
335         fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill);
336     IdxNIsKill = true;
337   }
338   return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
339 }
340 
341 void FastISel::recomputeInsertPt() {
342   if (getLastLocalValue()) {
343     FuncInfo.InsertPt = getLastLocalValue();
344     FuncInfo.MBB = FuncInfo.InsertPt->getParent();
345     ++FuncInfo.InsertPt;
346   } else
347     FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
348 
349   // Now skip past any EH_LABELs, which must remain at the beginning.
350   while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
351          FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
352     ++FuncInfo.InsertPt;
353 }
354 
355 void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
356                               MachineBasicBlock::iterator E) {
357   assert(I && E && std::distance(I, E) > 0 && "Invalid iterator!");
358   while (I != E) {
359     MachineInstr *Dead = &*I;
360     ++I;
361     Dead->eraseFromParent();
362     ++NumFastIselDead;
363   }
364   recomputeInsertPt();
365 }
366 
367 FastISel::SavePoint FastISel::enterLocalValueArea() {
368   MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
369   DebugLoc OldDL = DbgLoc;
370   recomputeInsertPt();
371   DbgLoc = DebugLoc();
372   SavePoint SP = {OldInsertPt, OldDL};
373   return SP;
374 }
375 
376 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
377   if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
378     LastLocalValue = std::prev(FuncInfo.InsertPt);
379 
380   // Restore the previous insert position.
381   FuncInfo.InsertPt = OldInsertPt.InsertPt;
382   DbgLoc = OldInsertPt.DL;
383 }
384 
385 bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
386   EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
387   if (VT == MVT::Other || !VT.isSimple())
388     // Unhandled type. Halt "fast" selection and bail.
389     return false;
390 
391   // We only handle legal types. For example, on x86-32 the instruction
392   // selector contains all of the 64-bit instructions from x86-64,
393   // under the assumption that i64 won't be used if the target doesn't
394   // support it.
395   if (!TLI.isTypeLegal(VT)) {
396     // MVT::i1 is special. Allow AND, OR, or XOR because they
397     // don't require additional zeroing, which makes them easy.
398     if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
399                           ISDOpcode == ISD::XOR))
400       VT = TLI.getTypeToTransformTo(I->getContext(), VT);
401     else
402       return false;
403   }
404 
405   // Check if the first operand is a constant, and handle it as "ri".  At -O0,
406   // we don't have anything that canonicalizes operand order.
407   if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
408     if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
409       unsigned Op1 = getRegForValue(I->getOperand(1));
410       if (!Op1)
411         return false;
412       bool Op1IsKill = hasTrivialKill(I->getOperand(1));
413 
414       unsigned ResultReg =
415           fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill,
416                        CI->getZExtValue(), VT.getSimpleVT());
417       if (!ResultReg)
418         return false;
419 
420       // We successfully emitted code for the given LLVM Instruction.
421       updateValueMap(I, ResultReg);
422       return true;
423     }
424 
425   unsigned Op0 = getRegForValue(I->getOperand(0));
426   if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
427     return false;
428   bool Op0IsKill = hasTrivialKill(I->getOperand(0));
429 
430   // Check if the second operand is a constant and handle it appropriately.
431   if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
432     uint64_t Imm = CI->getSExtValue();
433 
434     // Transform "sdiv exact X, 8" -> "sra X, 3".
435     if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
436         cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
437       Imm = Log2_64(Imm);
438       ISDOpcode = ISD::SRA;
439     }
440 
441     // Transform "urem x, pow2" -> "and x, pow2-1".
442     if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
443         isPowerOf2_64(Imm)) {
444       --Imm;
445       ISDOpcode = ISD::AND;
446     }
447 
448     unsigned ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
449                                       Op0IsKill, Imm, VT.getSimpleVT());
450     if (!ResultReg)
451       return false;
452 
453     // We successfully emitted code for the given LLVM Instruction.
454     updateValueMap(I, ResultReg);
455     return true;
456   }
457 
458   // Check if the second operand is a constant float.
459   if (const auto *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
460     unsigned ResultReg = fastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
461                                      ISDOpcode, Op0, Op0IsKill, CF);
462     if (ResultReg) {
463       // We successfully emitted code for the given LLVM Instruction.
464       updateValueMap(I, ResultReg);
465       return true;
466     }
467   }
468 
469   unsigned Op1 = getRegForValue(I->getOperand(1));
470   if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
471     return false;
472   bool Op1IsKill = hasTrivialKill(I->getOperand(1));
473 
474   // Now we have both operands in registers. Emit the instruction.
475   unsigned ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
476                                    ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill);
477   if (!ResultReg)
478     // Target-specific code wasn't able to find a machine opcode for
479     // the given ISD opcode and type. Halt "fast" selection and bail.
480     return false;
481 
482   // We successfully emitted code for the given LLVM Instruction.
483   updateValueMap(I, ResultReg);
484   return true;
485 }
486 
487 bool FastISel::selectGetElementPtr(const User *I) {
488   unsigned N = getRegForValue(I->getOperand(0));
489   if (!N) // Unhandled operand. Halt "fast" selection and bail.
490     return false;
491   bool NIsKill = hasTrivialKill(I->getOperand(0));
492 
493   // Keep a running tab of the total offset to coalesce multiple N = N + Offset
494   // into a single N = N + TotalOffset.
495   uint64_t TotalOffs = 0;
496   // FIXME: What's a good SWAG number for MaxOffs?
497   uint64_t MaxOffs = 2048;
498   MVT VT = TLI.getPointerTy(DL);
499   for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
500        GTI != E; ++GTI) {
501     const Value *Idx = GTI.getOperand();
502     if (auto *StTy = dyn_cast<StructType>(*GTI)) {
503       uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
504       if (Field) {
505         // N = N + Offset
506         TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
507         if (TotalOffs >= MaxOffs) {
508           N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
509           if (!N) // Unhandled operand. Halt "fast" selection and bail.
510             return false;
511           NIsKill = true;
512           TotalOffs = 0;
513         }
514       }
515     } else {
516       Type *Ty = GTI.getIndexedType();
517 
518       // If this is a constant subscript, handle it quickly.
519       if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
520         if (CI->isZero())
521           continue;
522         // N = N + Offset
523         uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
524         TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
525         if (TotalOffs >= MaxOffs) {
526           N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
527           if (!N) // Unhandled operand. Halt "fast" selection and bail.
528             return false;
529           NIsKill = true;
530           TotalOffs = 0;
531         }
532         continue;
533       }
534       if (TotalOffs) {
535         N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
536         if (!N) // Unhandled operand. Halt "fast" selection and bail.
537           return false;
538         NIsKill = true;
539         TotalOffs = 0;
540       }
541 
542       // N = N + Idx * ElementSize;
543       uint64_t ElementSize = DL.getTypeAllocSize(Ty);
544       std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
545       unsigned IdxN = Pair.first;
546       bool IdxNIsKill = Pair.second;
547       if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
548         return false;
549 
550       if (ElementSize != 1) {
551         IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
552         if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
553           return false;
554         IdxNIsKill = true;
555       }
556       N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
557       if (!N) // Unhandled operand. Halt "fast" selection and bail.
558         return false;
559     }
560   }
561   if (TotalOffs) {
562     N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
563     if (!N) // Unhandled operand. Halt "fast" selection and bail.
564       return false;
565   }
566 
567   // We successfully emitted code for the given LLVM Instruction.
568   updateValueMap(I, N);
569   return true;
570 }
571 
572 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
573                                    const CallInst *CI, unsigned StartIdx) {
574   for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) {
575     Value *Val = CI->getArgOperand(i);
576     // Check for constants and encode them with a StackMaps::ConstantOp prefix.
577     if (const auto *C = dyn_cast<ConstantInt>(Val)) {
578       Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
579       Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
580     } else if (isa<ConstantPointerNull>(Val)) {
581       Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
582       Ops.push_back(MachineOperand::CreateImm(0));
583     } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
584       // Values coming from a stack location also require a sepcial encoding,
585       // but that is added later on by the target specific frame index
586       // elimination implementation.
587       auto SI = FuncInfo.StaticAllocaMap.find(AI);
588       if (SI != FuncInfo.StaticAllocaMap.end())
589         Ops.push_back(MachineOperand::CreateFI(SI->second));
590       else
591         return false;
592     } else {
593       unsigned Reg = getRegForValue(Val);
594       if (!Reg)
595         return false;
596       Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
597     }
598   }
599   return true;
600 }
601 
602 bool FastISel::selectStackmap(const CallInst *I) {
603   // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
604   //                                  [live variables...])
605   assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
606          "Stackmap cannot return a value.");
607 
608   // The stackmap intrinsic only records the live variables (the arguments
609   // passed to it) and emits NOPS (if requested). Unlike the patchpoint
610   // intrinsic, this won't be lowered to a function call. This means we don't
611   // have to worry about calling conventions and target-specific lowering code.
612   // Instead we perform the call lowering right here.
613   //
614   // CALLSEQ_START(0...)
615   // STACKMAP(id, nbytes, ...)
616   // CALLSEQ_END(0, 0)
617   //
618   SmallVector<MachineOperand, 32> Ops;
619 
620   // Add the <id> and <numBytes> constants.
621   assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
622          "Expected a constant integer.");
623   const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
624   Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
625 
626   assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
627          "Expected a constant integer.");
628   const auto *NumBytes =
629       cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
630   Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
631 
632   // Push live variables for the stack map (skipping the first two arguments
633   // <id> and <numBytes>).
634   if (!addStackMapLiveVars(Ops, I, 2))
635     return false;
636 
637   // We are not adding any register mask info here, because the stackmap doesn't
638   // clobber anything.
639 
640   // Add scratch registers as implicit def and early clobber.
641   CallingConv::ID CC = I->getCallingConv();
642   const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
643   for (unsigned i = 0; ScratchRegs[i]; ++i)
644     Ops.push_back(MachineOperand::CreateReg(
645         ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
646         /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
647 
648   // Issue CALLSEQ_START
649   unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
650   auto Builder =
651       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown));
652   const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
653   for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
654     Builder.addImm(0);
655 
656   // Issue STACKMAP.
657   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
658                                     TII.get(TargetOpcode::STACKMAP));
659   for (auto const &MO : Ops)
660     MIB.addOperand(MO);
661 
662   // Issue CALLSEQ_END
663   unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
664   BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
665       .addImm(0)
666       .addImm(0);
667 
668   // Inform the Frame Information that we have a stackmap in this function.
669   FuncInfo.MF->getFrameInfo()->setHasStackMap();
670 
671   return true;
672 }
673 
674 /// \brief Lower an argument list according to the target calling convention.
675 ///
676 /// This is a helper for lowering intrinsics that follow a target calling
677 /// convention or require stack pointer adjustment. Only a subset of the
678 /// intrinsic's operands need to participate in the calling convention.
679 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
680                                  unsigned NumArgs, const Value *Callee,
681                                  bool ForceRetVoidTy, CallLoweringInfo &CLI) {
682   ArgListTy Args;
683   Args.reserve(NumArgs);
684 
685   // Populate the argument list.
686   // Attributes for args start at offset 1, after the return attribute.
687   ImmutableCallSite CS(CI);
688   for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs, AttrI = ArgIdx + 1;
689        ArgI != ArgE; ++ArgI) {
690     Value *V = CI->getOperand(ArgI);
691 
692     assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
693 
694     ArgListEntry Entry;
695     Entry.Val = V;
696     Entry.Ty = V->getType();
697     Entry.setAttributes(&CS, AttrI);
698     Args.push_back(Entry);
699   }
700 
701   Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
702                                : CI->getType();
703   CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
704 
705   return lowerCallTo(CLI);
706 }
707 
708 FastISel::CallLoweringInfo &FastISel::CallLoweringInfo::setCallee(
709     const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
710     const char *Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
711   SmallString<32> MangledName;
712   Mangler::getNameWithPrefix(MangledName, Target, DL);
713   MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
714   return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
715 }
716 
717 bool FastISel::selectPatchpoint(const CallInst *I) {
718   // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
719   //                                                 i32 <numBytes>,
720   //                                                 i8* <target>,
721   //                                                 i32 <numArgs>,
722   //                                                 [Args...],
723   //                                                 [live variables...])
724   CallingConv::ID CC = I->getCallingConv();
725   bool IsAnyRegCC = CC == CallingConv::AnyReg;
726   bool HasDef = !I->getType()->isVoidTy();
727   Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts();
728 
729   // Get the real number of arguments participating in the call <numArgs>
730   assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
731          "Expected a constant integer.");
732   const auto *NumArgsVal =
733       cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
734   unsigned NumArgs = NumArgsVal->getZExtValue();
735 
736   // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
737   // This includes all meta-operands up to but not including CC.
738   unsigned NumMetaOpers = PatchPointOpers::CCPos;
739   assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs &&
740          "Not enough arguments provided to the patchpoint intrinsic");
741 
742   // For AnyRegCC the arguments are lowered later on manually.
743   unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
744   CallLoweringInfo CLI;
745   CLI.setIsPatchPoint();
746   if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
747     return false;
748 
749   assert(CLI.Call && "No call instruction specified.");
750 
751   SmallVector<MachineOperand, 32> Ops;
752 
753   // Add an explicit result reg if we use the anyreg calling convention.
754   if (IsAnyRegCC && HasDef) {
755     assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
756     CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64));
757     CLI.NumResultRegs = 1;
758     Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*IsDef=*/true));
759   }
760 
761   // Add the <id> and <numBytes> constants.
762   assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
763          "Expected a constant integer.");
764   const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
765   Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
766 
767   assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
768          "Expected a constant integer.");
769   const auto *NumBytes =
770       cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
771   Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
772 
773   // Add the call target.
774   if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
775     uint64_t CalleeConstAddr =
776       cast<ConstantInt>(C->getOperand(0))->getZExtValue();
777     Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
778   } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
779     if (C->getOpcode() == Instruction::IntToPtr) {
780       uint64_t CalleeConstAddr =
781         cast<ConstantInt>(C->getOperand(0))->getZExtValue();
782       Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
783     } else
784       llvm_unreachable("Unsupported ConstantExpr.");
785   } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
786     Ops.push_back(MachineOperand::CreateGA(GV, 0));
787   } else if (isa<ConstantPointerNull>(Callee))
788     Ops.push_back(MachineOperand::CreateImm(0));
789   else
790     llvm_unreachable("Unsupported callee address.");
791 
792   // Adjust <numArgs> to account for any arguments that have been passed on
793   // the stack instead.
794   unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
795   Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
796 
797   // Add the calling convention
798   Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
799 
800   // Add the arguments we omitted previously. The register allocator should
801   // place these in any free register.
802   if (IsAnyRegCC) {
803     for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
804       unsigned Reg = getRegForValue(I->getArgOperand(i));
805       if (!Reg)
806         return false;
807       Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
808     }
809   }
810 
811   // Push the arguments from the call instruction.
812   for (auto Reg : CLI.OutRegs)
813     Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
814 
815   // Push live variables for the stack map.
816   if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
817     return false;
818 
819   // Push the register mask info.
820   Ops.push_back(MachineOperand::CreateRegMask(
821       TRI.getCallPreservedMask(*FuncInfo.MF, CC)));
822 
823   // Add scratch registers as implicit def and early clobber.
824   const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
825   for (unsigned i = 0; ScratchRegs[i]; ++i)
826     Ops.push_back(MachineOperand::CreateReg(
827         ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
828         /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
829 
830   // Add implicit defs (return values).
831   for (auto Reg : CLI.InRegs)
832     Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/true,
833                                             /*IsImpl=*/true));
834 
835   // Insert the patchpoint instruction before the call generated by the target.
836   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc,
837                                     TII.get(TargetOpcode::PATCHPOINT));
838 
839   for (auto &MO : Ops)
840     MIB.addOperand(MO);
841 
842   MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
843 
844   // Delete the original call instruction.
845   CLI.Call->eraseFromParent();
846 
847   // Inform the Frame Information that we have a patchpoint in this function.
848   FuncInfo.MF->getFrameInfo()->setHasPatchPoint();
849 
850   if (CLI.NumResultRegs)
851     updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs);
852   return true;
853 }
854 
855 /// Returns an AttributeSet representing the attributes applied to the return
856 /// value of the given call.
857 static AttributeSet getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
858   SmallVector<Attribute::AttrKind, 2> Attrs;
859   if (CLI.RetSExt)
860     Attrs.push_back(Attribute::SExt);
861   if (CLI.RetZExt)
862     Attrs.push_back(Attribute::ZExt);
863   if (CLI.IsInReg)
864     Attrs.push_back(Attribute::InReg);
865 
866   return AttributeSet::get(CLI.RetTy->getContext(), AttributeSet::ReturnIndex,
867                            Attrs);
868 }
869 
870 bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
871                            unsigned NumArgs) {
872   MCContext &Ctx = MF->getContext();
873   SmallString<32> MangledName;
874   Mangler::getNameWithPrefix(MangledName, SymName, DL);
875   MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
876   return lowerCallTo(CI, Sym, NumArgs);
877 }
878 
879 bool FastISel::lowerCallTo(const CallInst *CI, MCSymbol *Symbol,
880                            unsigned NumArgs) {
881   ImmutableCallSite CS(CI);
882 
883   FunctionType *FTy = CS.getFunctionType();
884   Type *RetTy = CS.getType();
885 
886   ArgListTy Args;
887   Args.reserve(NumArgs);
888 
889   // Populate the argument list.
890   // Attributes for args start at offset 1, after the return attribute.
891   for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
892     Value *V = CI->getOperand(ArgI);
893 
894     assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
895 
896     ArgListEntry Entry;
897     Entry.Val = V;
898     Entry.Ty = V->getType();
899     Entry.setAttributes(&CS, ArgI + 1);
900     Args.push_back(Entry);
901   }
902 
903   CallLoweringInfo CLI;
904   CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), CS, NumArgs);
905 
906   return lowerCallTo(CLI);
907 }
908 
909 bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
910   // Handle the incoming return values from the call.
911   CLI.clearIns();
912   SmallVector<EVT, 4> RetTys;
913   ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
914 
915   SmallVector<ISD::OutputArg, 4> Outs;
916   GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
917 
918   bool CanLowerReturn = TLI.CanLowerReturn(
919       CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
920 
921   // FIXME: sret demotion isn't supported yet - bail out.
922   if (!CanLowerReturn)
923     return false;
924 
925   for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
926     EVT VT = RetTys[I];
927     MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
928     unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
929     for (unsigned i = 0; i != NumRegs; ++i) {
930       ISD::InputArg MyFlags;
931       MyFlags.VT = RegisterVT;
932       MyFlags.ArgVT = VT;
933       MyFlags.Used = CLI.IsReturnValueUsed;
934       if (CLI.RetSExt)
935         MyFlags.Flags.setSExt();
936       if (CLI.RetZExt)
937         MyFlags.Flags.setZExt();
938       if (CLI.IsInReg)
939         MyFlags.Flags.setInReg();
940       CLI.Ins.push_back(MyFlags);
941     }
942   }
943 
944   // Handle all of the outgoing arguments.
945   CLI.clearOuts();
946   for (auto &Arg : CLI.getArgs()) {
947     Type *FinalType = Arg.Ty;
948     if (Arg.IsByVal)
949       FinalType = cast<PointerType>(Arg.Ty)->getElementType();
950     bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
951         FinalType, CLI.CallConv, CLI.IsVarArg);
952 
953     ISD::ArgFlagsTy Flags;
954     if (Arg.IsZExt)
955       Flags.setZExt();
956     if (Arg.IsSExt)
957       Flags.setSExt();
958     if (Arg.IsInReg)
959       Flags.setInReg();
960     if (Arg.IsSRet)
961       Flags.setSRet();
962     if (Arg.IsSwiftSelf)
963       Flags.setSwiftSelf();
964     if (Arg.IsSwiftError)
965       Flags.setSwiftError();
966     if (Arg.IsByVal)
967       Flags.setByVal();
968     if (Arg.IsInAlloca) {
969       Flags.setInAlloca();
970       // Set the byval flag for CCAssignFn callbacks that don't know about
971       // inalloca. This way we can know how many bytes we should've allocated
972       // and how many bytes a callee cleanup function will pop.  If we port
973       // inalloca to more targets, we'll have to add custom inalloca handling in
974       // the various CC lowering callbacks.
975       Flags.setByVal();
976     }
977     if (Arg.IsByVal || Arg.IsInAlloca) {
978       PointerType *Ty = cast<PointerType>(Arg.Ty);
979       Type *ElementTy = Ty->getElementType();
980       unsigned FrameSize = DL.getTypeAllocSize(ElementTy);
981       // For ByVal, alignment should come from FE. BE will guess if this info is
982       // not there, but there are cases it cannot get right.
983       unsigned FrameAlign = Arg.Alignment;
984       if (!FrameAlign)
985         FrameAlign = TLI.getByValTypeAlignment(ElementTy, DL);
986       Flags.setByValSize(FrameSize);
987       Flags.setByValAlign(FrameAlign);
988     }
989     if (Arg.IsNest)
990       Flags.setNest();
991     if (NeedsRegBlock)
992       Flags.setInConsecutiveRegs();
993     unsigned OriginalAlignment = DL.getABITypeAlignment(Arg.Ty);
994     Flags.setOrigAlign(OriginalAlignment);
995 
996     CLI.OutVals.push_back(Arg.Val);
997     CLI.OutFlags.push_back(Flags);
998   }
999 
1000   if (!fastLowerCall(CLI))
1001     return false;
1002 
1003   // Set all unused physreg defs as dead.
1004   assert(CLI.Call && "No call instruction specified.");
1005   CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
1006 
1007   if (CLI.NumResultRegs && CLI.CS)
1008     updateValueMap(CLI.CS->getInstruction(), CLI.ResultReg, CLI.NumResultRegs);
1009 
1010   return true;
1011 }
1012 
1013 bool FastISel::lowerCall(const CallInst *CI) {
1014   ImmutableCallSite CS(CI);
1015 
1016   FunctionType *FuncTy = CS.getFunctionType();
1017   Type *RetTy = CS.getType();
1018 
1019   ArgListTy Args;
1020   ArgListEntry Entry;
1021   Args.reserve(CS.arg_size());
1022 
1023   for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
1024        i != e; ++i) {
1025     Value *V = *i;
1026 
1027     // Skip empty types
1028     if (V->getType()->isEmptyTy())
1029       continue;
1030 
1031     Entry.Val = V;
1032     Entry.Ty = V->getType();
1033 
1034     // Skip the first return-type Attribute to get to params.
1035     Entry.setAttributes(&CS, i - CS.arg_begin() + 1);
1036     Args.push_back(Entry);
1037   }
1038 
1039   // Check if target-independent constraints permit a tail call here.
1040   // Target-dependent constraints are checked within fastLowerCall.
1041   bool IsTailCall = CI->isTailCall();
1042   if (IsTailCall && !isInTailCallPosition(CS, TM))
1043     IsTailCall = false;
1044 
1045   CallLoweringInfo CLI;
1046   CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), CS)
1047       .setTailCall(IsTailCall);
1048 
1049   return lowerCallTo(CLI);
1050 }
1051 
1052 bool FastISel::selectCall(const User *I) {
1053   const CallInst *Call = cast<CallInst>(I);
1054 
1055   // Handle simple inline asms.
1056   if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
1057     // If the inline asm has side effects, then make sure that no local value
1058     // lives across by flushing the local value map.
1059     if (IA->hasSideEffects())
1060       flushLocalValueMap();
1061 
1062     // Don't attempt to handle constraints.
1063     if (!IA->getConstraintString().empty())
1064       return false;
1065 
1066     unsigned ExtraInfo = 0;
1067     if (IA->hasSideEffects())
1068       ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1069     if (IA->isAlignStack())
1070       ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1071 
1072     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1073             TII.get(TargetOpcode::INLINEASM))
1074         .addExternalSymbol(IA->getAsmString().c_str())
1075         .addImm(ExtraInfo);
1076     return true;
1077   }
1078 
1079   MachineModuleInfo &MMI = FuncInfo.MF->getMMI();
1080   ComputeUsesVAFloatArgument(*Call, &MMI);
1081 
1082   // Handle intrinsic function calls.
1083   if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1084     return selectIntrinsicCall(II);
1085 
1086   // Usually, it does not make sense to initialize a value,
1087   // make an unrelated function call and use the value, because
1088   // it tends to be spilled on the stack. So, we move the pointer
1089   // to the last local value to the beginning of the block, so that
1090   // all the values which have already been materialized,
1091   // appear after the call. It also makes sense to skip intrinsics
1092   // since they tend to be inlined.
1093   flushLocalValueMap();
1094 
1095   return lowerCall(Call);
1096 }
1097 
1098 bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
1099   switch (II->getIntrinsicID()) {
1100   default:
1101     break;
1102   // At -O0 we don't care about the lifetime intrinsics.
1103   case Intrinsic::lifetime_start:
1104   case Intrinsic::lifetime_end:
1105   // The donothing intrinsic does, well, nothing.
1106   case Intrinsic::donothing:
1107     return true;
1108   case Intrinsic::dbg_declare: {
1109     const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1110     assert(DI->getVariable() && "Missing variable");
1111     if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1112       DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1113       return true;
1114     }
1115 
1116     const Value *Address = DI->getAddress();
1117     if (!Address || isa<UndefValue>(Address)) {
1118       DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1119       return true;
1120     }
1121 
1122     unsigned Offset = 0;
1123     Optional<MachineOperand> Op;
1124     if (const auto *Arg = dyn_cast<Argument>(Address))
1125       // Some arguments' frame index is recorded during argument lowering.
1126       Offset = FuncInfo.getArgumentFrameIndex(Arg);
1127     if (Offset)
1128       Op = MachineOperand::CreateFI(Offset);
1129     if (!Op)
1130       if (unsigned Reg = lookUpRegForValue(Address))
1131         Op = MachineOperand::CreateReg(Reg, false);
1132 
1133     // If we have a VLA that has a "use" in a metadata node that's then used
1134     // here but it has no other uses, then we have a problem. E.g.,
1135     //
1136     //   int foo (const int *x) {
1137     //     char a[*x];
1138     //     return 0;
1139     //   }
1140     //
1141     // If we assign 'a' a vreg and fast isel later on has to use the selection
1142     // DAG isel, it will want to copy the value to the vreg. However, there are
1143     // no uses, which goes counter to what selection DAG isel expects.
1144     if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1145         (!isa<AllocaInst>(Address) ||
1146          !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1147       Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address),
1148                                      false);
1149 
1150     if (Op) {
1151       assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
1152              "Expected inlined-at fields to agree");
1153       if (Op->isReg()) {
1154         Op->setIsDebug(true);
1155         BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1156                 TII.get(TargetOpcode::DBG_VALUE), false, Op->getReg(), 0,
1157                 DI->getVariable(), DI->getExpression());
1158       } else
1159         BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1160                 TII.get(TargetOpcode::DBG_VALUE))
1161             .addOperand(*Op)
1162             .addImm(0)
1163             .addMetadata(DI->getVariable())
1164             .addMetadata(DI->getExpression());
1165     } else {
1166       // We can't yet handle anything else here because it would require
1167       // generating code, thus altering codegen because of debug info.
1168       DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1169     }
1170     return true;
1171   }
1172   case Intrinsic::dbg_value: {
1173     // This form of DBG_VALUE is target-independent.
1174     const DbgValueInst *DI = cast<DbgValueInst>(II);
1175     const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1176     const Value *V = DI->getValue();
1177     assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
1178            "Expected inlined-at fields to agree");
1179     if (!V) {
1180       // Currently the optimizer can produce this; insert an undef to
1181       // help debugging.  Probably the optimizer should not do this.
1182       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1183           .addReg(0U)
1184           .addImm(DI->getOffset())
1185           .addMetadata(DI->getVariable())
1186           .addMetadata(DI->getExpression());
1187     } else if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1188       if (CI->getBitWidth() > 64)
1189         BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1190             .addCImm(CI)
1191             .addImm(DI->getOffset())
1192             .addMetadata(DI->getVariable())
1193             .addMetadata(DI->getExpression());
1194       else
1195         BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1196             .addImm(CI->getZExtValue())
1197             .addImm(DI->getOffset())
1198             .addMetadata(DI->getVariable())
1199             .addMetadata(DI->getExpression());
1200     } else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1201       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1202           .addFPImm(CF)
1203           .addImm(DI->getOffset())
1204           .addMetadata(DI->getVariable())
1205           .addMetadata(DI->getExpression());
1206     } else if (unsigned Reg = lookUpRegForValue(V)) {
1207       // FIXME: This does not handle register-indirect values at offset 0.
1208       bool IsIndirect = DI->getOffset() != 0;
1209       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg,
1210               DI->getOffset(), DI->getVariable(), DI->getExpression());
1211     } else {
1212       // We can't yet handle anything else here because it would require
1213       // generating code, thus altering codegen because of debug info.
1214       DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1215     }
1216     return true;
1217   }
1218   case Intrinsic::objectsize: {
1219     ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
1220     unsigned long long Res = CI->isZero() ? -1ULL : 0;
1221     Constant *ResCI = ConstantInt::get(II->getType(), Res);
1222     unsigned ResultReg = getRegForValue(ResCI);
1223     if (!ResultReg)
1224       return false;
1225     updateValueMap(II, ResultReg);
1226     return true;
1227   }
1228   case Intrinsic::expect: {
1229     unsigned ResultReg = getRegForValue(II->getArgOperand(0));
1230     if (!ResultReg)
1231       return false;
1232     updateValueMap(II, ResultReg);
1233     return true;
1234   }
1235   case Intrinsic::experimental_stackmap:
1236     return selectStackmap(II);
1237   case Intrinsic::experimental_patchpoint_void:
1238   case Intrinsic::experimental_patchpoint_i64:
1239     return selectPatchpoint(II);
1240   }
1241 
1242   return fastLowerIntrinsicCall(II);
1243 }
1244 
1245 bool FastISel::selectCast(const User *I, unsigned Opcode) {
1246   EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1247   EVT DstVT = TLI.getValueType(DL, I->getType());
1248 
1249   if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1250       !DstVT.isSimple())
1251     // Unhandled type. Halt "fast" selection and bail.
1252     return false;
1253 
1254   // Check if the destination type is legal.
1255   if (!TLI.isTypeLegal(DstVT))
1256     return false;
1257 
1258   // Check if the source operand is legal.
1259   if (!TLI.isTypeLegal(SrcVT))
1260     return false;
1261 
1262   unsigned InputReg = getRegForValue(I->getOperand(0));
1263   if (!InputReg)
1264     // Unhandled operand.  Halt "fast" selection and bail.
1265     return false;
1266 
1267   bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
1268 
1269   unsigned ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1270                                   Opcode, InputReg, InputRegIsKill);
1271   if (!ResultReg)
1272     return false;
1273 
1274   updateValueMap(I, ResultReg);
1275   return true;
1276 }
1277 
1278 bool FastISel::selectBitCast(const User *I) {
1279   // If the bitcast doesn't change the type, just use the operand value.
1280   if (I->getType() == I->getOperand(0)->getType()) {
1281     unsigned Reg = getRegForValue(I->getOperand(0));
1282     if (!Reg)
1283       return false;
1284     updateValueMap(I, Reg);
1285     return true;
1286   }
1287 
1288   // Bitcasts of other values become reg-reg copies or BITCAST operators.
1289   EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1290   EVT DstEVT = TLI.getValueType(DL, I->getType());
1291   if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1292       !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1293     // Unhandled type. Halt "fast" selection and bail.
1294     return false;
1295 
1296   MVT SrcVT = SrcEVT.getSimpleVT();
1297   MVT DstVT = DstEVT.getSimpleVT();
1298   unsigned Op0 = getRegForValue(I->getOperand(0));
1299   if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1300     return false;
1301   bool Op0IsKill = hasTrivialKill(I->getOperand(0));
1302 
1303   // First, try to perform the bitcast by inserting a reg-reg copy.
1304   unsigned ResultReg = 0;
1305   if (SrcVT == DstVT) {
1306     const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT);
1307     const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
1308     // Don't attempt a cross-class copy. It will likely fail.
1309     if (SrcClass == DstClass) {
1310       ResultReg = createResultReg(DstClass);
1311       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1312               TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0);
1313     }
1314   }
1315 
1316   // If the reg-reg copy failed, select a BITCAST opcode.
1317   if (!ResultReg)
1318     ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
1319 
1320   if (!ResultReg)
1321     return false;
1322 
1323   updateValueMap(I, ResultReg);
1324   return true;
1325 }
1326 
1327 // Return true if we should copy from swift error to the final vreg as specified
1328 // by SwiftErrorWorklist.
1329 static bool shouldCopySwiftErrorsToFinalVRegs(const TargetLowering &TLI,
1330                                               FunctionLoweringInfo &FuncInfo) {
1331   if (!TLI.supportSwiftError())
1332     return false;
1333   return FuncInfo.SwiftErrorWorklist.count(FuncInfo.MBB);
1334 }
1335 
1336 // Remove local value instructions starting from the instruction after
1337 // SavedLastLocalValue to the current function insert point.
1338 void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
1339 {
1340   MachineInstr *CurLastLocalValue = getLastLocalValue();
1341   if (CurLastLocalValue != SavedLastLocalValue) {
1342     // Find the first local value instruction to be deleted.
1343     // This is the instruction after SavedLastLocalValue if it is non-NULL.
1344     // Otherwise it's the first instruction in the block.
1345     MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
1346     if (SavedLastLocalValue)
1347       ++FirstDeadInst;
1348     else
1349       FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
1350     setLastLocalValue(SavedLastLocalValue);
1351     removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
1352   }
1353 }
1354 
1355 bool FastISel::selectInstruction(const Instruction *I) {
1356   MachineInstr *SavedLastLocalValue = getLastLocalValue();
1357   // Just before the terminator instruction, insert instructions to
1358   // feed PHI nodes in successor blocks.
1359   if (isa<TerminatorInst>(I)) {
1360     // If we need to materialize any vreg from worklist, we bail out of
1361     // FastISel.
1362     if (shouldCopySwiftErrorsToFinalVRegs(TLI, FuncInfo))
1363       return false;
1364     if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
1365       // PHI node handling may have generated local value instructions,
1366       // even though it failed to handle all PHI nodes.
1367       // We remove these instructions because SelectionDAGISel will generate
1368       // them again.
1369       removeDeadLocalValueCode(SavedLastLocalValue);
1370       return false;
1371     }
1372   }
1373 
1374   // FastISel does not handle any operand bundles except OB_funclet.
1375   if (ImmutableCallSite CS = ImmutableCallSite(I))
1376     for (unsigned i = 0, e = CS.getNumOperandBundles(); i != e; ++i)
1377       if (CS.getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet)
1378         return false;
1379 
1380   DbgLoc = I->getDebugLoc();
1381 
1382   SavedInsertPt = FuncInfo.InsertPt;
1383 
1384   if (const auto *Call = dyn_cast<CallInst>(I)) {
1385     const Function *F = Call->getCalledFunction();
1386     LibFunc::Func Func;
1387 
1388     // As a special case, don't handle calls to builtin library functions that
1389     // may be translated directly to target instructions.
1390     if (F && !F->hasLocalLinkage() && F->hasName() &&
1391         LibInfo->getLibFunc(F->getName(), Func) &&
1392         LibInfo->hasOptimizedCodeGen(Func))
1393       return false;
1394 
1395     // Don't handle Intrinsic::trap if a trap function is specified.
1396     if (F && F->getIntrinsicID() == Intrinsic::trap &&
1397         Call->hasFnAttr("trap-func-name"))
1398       return false;
1399   }
1400 
1401   // First, try doing target-independent selection.
1402   if (!SkipTargetIndependentISel) {
1403     if (selectOperator(I, I->getOpcode())) {
1404       ++NumFastIselSuccessIndependent;
1405       DbgLoc = DebugLoc();
1406       return true;
1407     }
1408     // Remove dead code.
1409     recomputeInsertPt();
1410     if (SavedInsertPt != FuncInfo.InsertPt)
1411       removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1412     SavedInsertPt = FuncInfo.InsertPt;
1413   }
1414   // Next, try calling the target to attempt to handle the instruction.
1415   if (fastSelectInstruction(I)) {
1416     ++NumFastIselSuccessTarget;
1417     DbgLoc = DebugLoc();
1418     return true;
1419   }
1420   // Remove dead code.
1421   recomputeInsertPt();
1422   if (SavedInsertPt != FuncInfo.InsertPt)
1423     removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1424 
1425   DbgLoc = DebugLoc();
1426   // Undo phi node updates, because they will be added again by SelectionDAG.
1427   if (isa<TerminatorInst>(I)) {
1428     // PHI node handling may have generated local value instructions.
1429     // We remove them because SelectionDAGISel will generate them again.
1430     removeDeadLocalValueCode(SavedLastLocalValue);
1431     FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
1432   }
1433   return false;
1434 }
1435 
1436 /// Emit an unconditional branch to the given block, unless it is the immediate
1437 /// (fall-through) successor, and update the CFG.
1438 void FastISel::fastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DbgLoc) {
1439   if (FuncInfo.MBB->getBasicBlock()->size() > 1 &&
1440       FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1441     // For more accurate line information if this is the only instruction
1442     // in the block then emit it, otherwise we have the unconditional
1443     // fall-through case, which needs no instructions.
1444   } else {
1445     // The unconditional branch case.
1446     TII.InsertBranch(*FuncInfo.MBB, MSucc, nullptr,
1447                      SmallVector<MachineOperand, 0>(), DbgLoc);
1448   }
1449   if (FuncInfo.BPI) {
1450     auto BranchProbability = FuncInfo.BPI->getEdgeProbability(
1451         FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock());
1452     FuncInfo.MBB->addSuccessor(MSucc, BranchProbability);
1453   } else
1454     FuncInfo.MBB->addSuccessorWithoutProb(MSucc);
1455 }
1456 
1457 void FastISel::finishCondBranch(const BasicBlock *BranchBB,
1458                                 MachineBasicBlock *TrueMBB,
1459                                 MachineBasicBlock *FalseMBB) {
1460   // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
1461   // happen in degenerate IR and MachineIR forbids to have a block twice in the
1462   // successor/predecessor lists.
1463   if (TrueMBB != FalseMBB) {
1464     if (FuncInfo.BPI) {
1465       auto BranchProbability =
1466           FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
1467       FuncInfo.MBB->addSuccessor(TrueMBB, BranchProbability);
1468     } else
1469       FuncInfo.MBB->addSuccessorWithoutProb(TrueMBB);
1470   }
1471 
1472   fastEmitBranch(FalseMBB, DbgLoc);
1473 }
1474 
1475 /// Emit an FNeg operation.
1476 bool FastISel::selectFNeg(const User *I) {
1477   unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
1478   if (!OpReg)
1479     return false;
1480   bool OpRegIsKill = hasTrivialKill(I);
1481 
1482   // If the target has ISD::FNEG, use it.
1483   EVT VT = TLI.getValueType(DL, I->getType());
1484   unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1485                                   OpReg, OpRegIsKill);
1486   if (ResultReg) {
1487     updateValueMap(I, ResultReg);
1488     return true;
1489   }
1490 
1491   // Bitcast the value to integer, twiddle the sign bit with xor,
1492   // and then bitcast it back to floating-point.
1493   if (VT.getSizeInBits() > 64)
1494     return false;
1495   EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1496   if (!TLI.isTypeLegal(IntVT))
1497     return false;
1498 
1499   unsigned IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1500                                ISD::BITCAST, OpReg, OpRegIsKill);
1501   if (!IntReg)
1502     return false;
1503 
1504   unsigned IntResultReg = fastEmit_ri_(
1505       IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true,
1506       UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1507   if (!IntResultReg)
1508     return false;
1509 
1510   ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1511                          IntResultReg, /*IsKill=*/true);
1512   if (!ResultReg)
1513     return false;
1514 
1515   updateValueMap(I, ResultReg);
1516   return true;
1517 }
1518 
1519 bool FastISel::selectExtractValue(const User *U) {
1520   const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1521   if (!EVI)
1522     return false;
1523 
1524   // Make sure we only try to handle extracts with a legal result.  But also
1525   // allow i1 because it's easy.
1526   EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
1527   if (!RealVT.isSimple())
1528     return false;
1529   MVT VT = RealVT.getSimpleVT();
1530   if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1531     return false;
1532 
1533   const Value *Op0 = EVI->getOperand(0);
1534   Type *AggTy = Op0->getType();
1535 
1536   // Get the base result register.
1537   unsigned ResultReg;
1538   DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0);
1539   if (I != FuncInfo.ValueMap.end())
1540     ResultReg = I->second;
1541   else if (isa<Instruction>(Op0))
1542     ResultReg = FuncInfo.InitializeRegForValue(Op0);
1543   else
1544     return false; // fast-isel can't handle aggregate constants at the moment
1545 
1546   // Get the actual result register, which is an offset from the base register.
1547   unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1548 
1549   SmallVector<EVT, 4> AggValueVTs;
1550   ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
1551 
1552   for (unsigned i = 0; i < VTIndex; i++)
1553     ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1554 
1555   updateValueMap(EVI, ResultReg);
1556   return true;
1557 }
1558 
1559 bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1560   switch (Opcode) {
1561   case Instruction::Add:
1562     return selectBinaryOp(I, ISD::ADD);
1563   case Instruction::FAdd:
1564     return selectBinaryOp(I, ISD::FADD);
1565   case Instruction::Sub:
1566     return selectBinaryOp(I, ISD::SUB);
1567   case Instruction::FSub:
1568     // FNeg is currently represented in LLVM IR as a special case of FSub.
1569     if (BinaryOperator::isFNeg(I))
1570       return selectFNeg(I);
1571     return selectBinaryOp(I, ISD::FSUB);
1572   case Instruction::Mul:
1573     return selectBinaryOp(I, ISD::MUL);
1574   case Instruction::FMul:
1575     return selectBinaryOp(I, ISD::FMUL);
1576   case Instruction::SDiv:
1577     return selectBinaryOp(I, ISD::SDIV);
1578   case Instruction::UDiv:
1579     return selectBinaryOp(I, ISD::UDIV);
1580   case Instruction::FDiv:
1581     return selectBinaryOp(I, ISD::FDIV);
1582   case Instruction::SRem:
1583     return selectBinaryOp(I, ISD::SREM);
1584   case Instruction::URem:
1585     return selectBinaryOp(I, ISD::UREM);
1586   case Instruction::FRem:
1587     return selectBinaryOp(I, ISD::FREM);
1588   case Instruction::Shl:
1589     return selectBinaryOp(I, ISD::SHL);
1590   case Instruction::LShr:
1591     return selectBinaryOp(I, ISD::SRL);
1592   case Instruction::AShr:
1593     return selectBinaryOp(I, ISD::SRA);
1594   case Instruction::And:
1595     return selectBinaryOp(I, ISD::AND);
1596   case Instruction::Or:
1597     return selectBinaryOp(I, ISD::OR);
1598   case Instruction::Xor:
1599     return selectBinaryOp(I, ISD::XOR);
1600 
1601   case Instruction::GetElementPtr:
1602     return selectGetElementPtr(I);
1603 
1604   case Instruction::Br: {
1605     const BranchInst *BI = cast<BranchInst>(I);
1606 
1607     if (BI->isUnconditional()) {
1608       const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1609       MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1610       fastEmitBranch(MSucc, BI->getDebugLoc());
1611       return true;
1612     }
1613 
1614     // Conditional branches are not handed yet.
1615     // Halt "fast" selection and bail.
1616     return false;
1617   }
1618 
1619   case Instruction::Unreachable:
1620     if (TM.Options.TrapUnreachable)
1621       return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1622     else
1623       return true;
1624 
1625   case Instruction::Alloca:
1626     // FunctionLowering has the static-sized case covered.
1627     if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1628       return true;
1629 
1630     // Dynamic-sized alloca is not handled yet.
1631     return false;
1632 
1633   case Instruction::Call:
1634     return selectCall(I);
1635 
1636   case Instruction::BitCast:
1637     return selectBitCast(I);
1638 
1639   case Instruction::FPToSI:
1640     return selectCast(I, ISD::FP_TO_SINT);
1641   case Instruction::ZExt:
1642     return selectCast(I, ISD::ZERO_EXTEND);
1643   case Instruction::SExt:
1644     return selectCast(I, ISD::SIGN_EXTEND);
1645   case Instruction::Trunc:
1646     return selectCast(I, ISD::TRUNCATE);
1647   case Instruction::SIToFP:
1648     return selectCast(I, ISD::SINT_TO_FP);
1649 
1650   case Instruction::IntToPtr: // Deliberate fall-through.
1651   case Instruction::PtrToInt: {
1652     EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1653     EVT DstVT = TLI.getValueType(DL, I->getType());
1654     if (DstVT.bitsGT(SrcVT))
1655       return selectCast(I, ISD::ZERO_EXTEND);
1656     if (DstVT.bitsLT(SrcVT))
1657       return selectCast(I, ISD::TRUNCATE);
1658     unsigned Reg = getRegForValue(I->getOperand(0));
1659     if (!Reg)
1660       return false;
1661     updateValueMap(I, Reg);
1662     return true;
1663   }
1664 
1665   case Instruction::ExtractValue:
1666     return selectExtractValue(I);
1667 
1668   case Instruction::PHI:
1669     llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1670 
1671   default:
1672     // Unhandled instruction. Halt "fast" selection and bail.
1673     return false;
1674   }
1675 }
1676 
1677 FastISel::FastISel(FunctionLoweringInfo &FuncInfo,
1678                    const TargetLibraryInfo *LibInfo,
1679                    bool SkipTargetIndependentISel)
1680     : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1681       MFI(*FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1682       TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
1683       TII(*MF->getSubtarget().getInstrInfo()),
1684       TLI(*MF->getSubtarget().getTargetLowering()),
1685       TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1686       SkipTargetIndependentISel(SkipTargetIndependentISel) {}
1687 
1688 FastISel::~FastISel() {}
1689 
1690 bool FastISel::fastLowerArguments() { return false; }
1691 
1692 bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1693 
1694 bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) {
1695   return false;
1696 }
1697 
1698 unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
1699 
1700 unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/,
1701                               bool /*Op0IsKill*/) {
1702   return 0;
1703 }
1704 
1705 unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
1706                                bool /*Op0IsKill*/, unsigned /*Op1*/,
1707                                bool /*Op1IsKill*/) {
1708   return 0;
1709 }
1710 
1711 unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1712   return 0;
1713 }
1714 
1715 unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
1716                               const ConstantFP * /*FPImm*/) {
1717   return 0;
1718 }
1719 
1720 unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
1721                                bool /*Op0IsKill*/, uint64_t /*Imm*/) {
1722   return 0;
1723 }
1724 
1725 unsigned FastISel::fastEmit_rf(MVT, MVT, unsigned, unsigned /*Op0*/,
1726                                bool /*Op0IsKill*/,
1727                                const ConstantFP * /*FPImm*/) {
1728   return 0;
1729 }
1730 
1731 unsigned FastISel::fastEmit_rri(MVT, MVT, unsigned, unsigned /*Op0*/,
1732                                 bool /*Op0IsKill*/, unsigned /*Op1*/,
1733                                 bool /*Op1IsKill*/, uint64_t /*Imm*/) {
1734   return 0;
1735 }
1736 
1737 /// This method is a wrapper of fastEmit_ri. It first tries to emit an
1738 /// instruction with an immediate operand using fastEmit_ri.
1739 /// If that fails, it materializes the immediate into a register and try
1740 /// fastEmit_rr instead.
1741 unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
1742                                 bool Op0IsKill, uint64_t Imm, MVT ImmType) {
1743   // If this is a multiply by a power of two, emit this as a shift left.
1744   if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1745     Opcode = ISD::SHL;
1746     Imm = Log2_64(Imm);
1747   } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1748     // div x, 8 -> srl x, 3
1749     Opcode = ISD::SRL;
1750     Imm = Log2_64(Imm);
1751   }
1752 
1753   // Horrible hack (to be removed), check to make sure shift amounts are
1754   // in-range.
1755   if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1756       Imm >= VT.getSizeInBits())
1757     return 0;
1758 
1759   // First check if immediate type is legal. If not, we can't use the ri form.
1760   unsigned ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1761   if (ResultReg)
1762     return ResultReg;
1763   unsigned MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1764   bool IsImmKill = true;
1765   if (!MaterialReg) {
1766     // This is a bit ugly/slow, but failing here means falling out of
1767     // fast-isel, which would be very slow.
1768     IntegerType *ITy =
1769         IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits());
1770     MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1771     if (!MaterialReg)
1772       return 0;
1773     // FIXME: If the materialized register here has no uses yet then this
1774     // will be the first use and we should be able to mark it as killed.
1775     // However, the local value area for materialising constant expressions
1776     // grows down, not up, which means that any constant expressions we generate
1777     // later which also use 'Imm' could be after this instruction and therefore
1778     // after this kill.
1779     IsImmKill = false;
1780   }
1781   return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, IsImmKill);
1782 }
1783 
1784 unsigned FastISel::createResultReg(const TargetRegisterClass *RC) {
1785   return MRI.createVirtualRegister(RC);
1786 }
1787 
1788 unsigned FastISel::constrainOperandRegClass(const MCInstrDesc &II, unsigned Op,
1789                                             unsigned OpNum) {
1790   if (TargetRegisterInfo::isVirtualRegister(Op)) {
1791     const TargetRegisterClass *RegClass =
1792         TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
1793     if (!MRI.constrainRegClass(Op, RegClass)) {
1794       // If it's not legal to COPY between the register classes, something
1795       // has gone very wrong before we got here.
1796       unsigned NewOp = createResultReg(RegClass);
1797       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1798               TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
1799       return NewOp;
1800     }
1801   }
1802   return Op;
1803 }
1804 
1805 unsigned FastISel::fastEmitInst_(unsigned MachineInstOpcode,
1806                                  const TargetRegisterClass *RC) {
1807   unsigned ResultReg = createResultReg(RC);
1808   const MCInstrDesc &II = TII.get(MachineInstOpcode);
1809 
1810   BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
1811   return ResultReg;
1812 }
1813 
1814 unsigned FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
1815                                   const TargetRegisterClass *RC, unsigned Op0,
1816                                   bool Op0IsKill) {
1817   const MCInstrDesc &II = TII.get(MachineInstOpcode);
1818 
1819   unsigned ResultReg = createResultReg(RC);
1820   Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1821 
1822   if (II.getNumDefs() >= 1)
1823     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1824         .addReg(Op0, getKillRegState(Op0IsKill));
1825   else {
1826     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1827         .addReg(Op0, getKillRegState(Op0IsKill));
1828     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1829             TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1830   }
1831 
1832   return ResultReg;
1833 }
1834 
1835 unsigned FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
1836                                    const TargetRegisterClass *RC, unsigned Op0,
1837                                    bool Op0IsKill, unsigned Op1,
1838                                    bool Op1IsKill) {
1839   const MCInstrDesc &II = TII.get(MachineInstOpcode);
1840 
1841   unsigned ResultReg = createResultReg(RC);
1842   Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1843   Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1844 
1845   if (II.getNumDefs() >= 1)
1846     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1847         .addReg(Op0, getKillRegState(Op0IsKill))
1848         .addReg(Op1, getKillRegState(Op1IsKill));
1849   else {
1850     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1851         .addReg(Op0, getKillRegState(Op0IsKill))
1852         .addReg(Op1, getKillRegState(Op1IsKill));
1853     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1854             TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1855   }
1856   return ResultReg;
1857 }
1858 
1859 unsigned FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
1860                                     const TargetRegisterClass *RC, unsigned Op0,
1861                                     bool Op0IsKill, unsigned Op1,
1862                                     bool Op1IsKill, unsigned Op2,
1863                                     bool Op2IsKill) {
1864   const MCInstrDesc &II = TII.get(MachineInstOpcode);
1865 
1866   unsigned ResultReg = createResultReg(RC);
1867   Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1868   Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1869   Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
1870 
1871   if (II.getNumDefs() >= 1)
1872     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1873         .addReg(Op0, getKillRegState(Op0IsKill))
1874         .addReg(Op1, getKillRegState(Op1IsKill))
1875         .addReg(Op2, getKillRegState(Op2IsKill));
1876   else {
1877     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1878         .addReg(Op0, getKillRegState(Op0IsKill))
1879         .addReg(Op1, getKillRegState(Op1IsKill))
1880         .addReg(Op2, getKillRegState(Op2IsKill));
1881     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1882             TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1883   }
1884   return ResultReg;
1885 }
1886 
1887 unsigned FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
1888                                    const TargetRegisterClass *RC, unsigned Op0,
1889                                    bool Op0IsKill, uint64_t Imm) {
1890   const MCInstrDesc &II = TII.get(MachineInstOpcode);
1891 
1892   unsigned ResultReg = createResultReg(RC);
1893   Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1894 
1895   if (II.getNumDefs() >= 1)
1896     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1897         .addReg(Op0, getKillRegState(Op0IsKill))
1898         .addImm(Imm);
1899   else {
1900     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1901         .addReg(Op0, getKillRegState(Op0IsKill))
1902         .addImm(Imm);
1903     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1904             TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1905   }
1906   return ResultReg;
1907 }
1908 
1909 unsigned FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
1910                                     const TargetRegisterClass *RC, unsigned Op0,
1911                                     bool Op0IsKill, uint64_t Imm1,
1912                                     uint64_t Imm2) {
1913   const MCInstrDesc &II = TII.get(MachineInstOpcode);
1914 
1915   unsigned ResultReg = createResultReg(RC);
1916   Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1917 
1918   if (II.getNumDefs() >= 1)
1919     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1920         .addReg(Op0, getKillRegState(Op0IsKill))
1921         .addImm(Imm1)
1922         .addImm(Imm2);
1923   else {
1924     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1925         .addReg(Op0, getKillRegState(Op0IsKill))
1926         .addImm(Imm1)
1927         .addImm(Imm2);
1928     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1929             TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1930   }
1931   return ResultReg;
1932 }
1933 
1934 unsigned FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
1935                                   const TargetRegisterClass *RC,
1936                                   const ConstantFP *FPImm) {
1937   const MCInstrDesc &II = TII.get(MachineInstOpcode);
1938 
1939   unsigned ResultReg = createResultReg(RC);
1940 
1941   if (II.getNumDefs() >= 1)
1942     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1943         .addFPImm(FPImm);
1944   else {
1945     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1946         .addFPImm(FPImm);
1947     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1948             TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1949   }
1950   return ResultReg;
1951 }
1952 
1953 unsigned FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
1954                                     const TargetRegisterClass *RC, unsigned Op0,
1955                                     bool Op0IsKill, unsigned Op1,
1956                                     bool Op1IsKill, uint64_t Imm) {
1957   const MCInstrDesc &II = TII.get(MachineInstOpcode);
1958 
1959   unsigned ResultReg = createResultReg(RC);
1960   Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1961   Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1962 
1963   if (II.getNumDefs() >= 1)
1964     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1965         .addReg(Op0, getKillRegState(Op0IsKill))
1966         .addReg(Op1, getKillRegState(Op1IsKill))
1967         .addImm(Imm);
1968   else {
1969     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1970         .addReg(Op0, getKillRegState(Op0IsKill))
1971         .addReg(Op1, getKillRegState(Op1IsKill))
1972         .addImm(Imm);
1973     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1974             TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1975   }
1976   return ResultReg;
1977 }
1978 
1979 unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
1980                                   const TargetRegisterClass *RC, uint64_t Imm) {
1981   unsigned ResultReg = createResultReg(RC);
1982   const MCInstrDesc &II = TII.get(MachineInstOpcode);
1983 
1984   if (II.getNumDefs() >= 1)
1985     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1986         .addImm(Imm);
1987   else {
1988     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm);
1989     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1990             TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1991   }
1992   return ResultReg;
1993 }
1994 
1995 unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
1996                                               bool Op0IsKill, uint32_t Idx) {
1997   unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1998   assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
1999          "Cannot yet extract from physregs");
2000   const TargetRegisterClass *RC = MRI.getRegClass(Op0);
2001   MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
2002   BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
2003           ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx);
2004   return ResultReg;
2005 }
2006 
2007 /// Emit MachineInstrs to compute the value of Op with all but the least
2008 /// significant bit set to zero.
2009 unsigned FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
2010   return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
2011 }
2012 
2013 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
2014 /// Emit code to ensure constants are copied into registers when needed.
2015 /// Remember the virtual registers that need to be added to the Machine PHI
2016 /// nodes as input.  We cannot just directly add them, because expansion
2017 /// might result in multiple MBB's for one BB.  As such, the start of the
2018 /// BB might correspond to a different MBB than the end.
2019 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
2020   const TerminatorInst *TI = LLVMBB->getTerminator();
2021 
2022   SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
2023   FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
2024 
2025   // Check successor nodes' PHI nodes that expect a constant to be available
2026   // from this block.
2027   for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
2028     const BasicBlock *SuccBB = TI->getSuccessor(succ);
2029     if (!isa<PHINode>(SuccBB->begin()))
2030       continue;
2031     MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
2032 
2033     // If this terminator has multiple identical successors (common for
2034     // switches), only handle each succ once.
2035     if (!SuccsHandled.insert(SuccMBB).second)
2036       continue;
2037 
2038     MachineBasicBlock::iterator MBBI = SuccMBB->begin();
2039 
2040     // At this point we know that there is a 1-1 correspondence between LLVM PHI
2041     // nodes and Machine PHI nodes, but the incoming operands have not been
2042     // emitted yet.
2043     for (BasicBlock::const_iterator I = SuccBB->begin();
2044          const auto *PN = dyn_cast<PHINode>(I); ++I) {
2045 
2046       // Ignore dead phi's.
2047       if (PN->use_empty())
2048         continue;
2049 
2050       // Only handle legal types. Two interesting things to note here. First,
2051       // by bailing out early, we may leave behind some dead instructions,
2052       // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2053       // own moves. Second, this check is necessary because FastISel doesn't
2054       // use CreateRegs to create registers, so it always creates
2055       // exactly one register for each non-void instruction.
2056       EVT VT = TLI.getValueType(DL, PN->getType(), /*AllowUnknown=*/true);
2057       if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2058         // Handle integer promotions, though, because they're common and easy.
2059         if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2060           FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2061           return false;
2062         }
2063       }
2064 
2065       const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
2066 
2067       // Set the DebugLoc for the copy. Prefer the location of the operand
2068       // if there is one; use the location of the PHI otherwise.
2069       DbgLoc = PN->getDebugLoc();
2070       if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2071         DbgLoc = Inst->getDebugLoc();
2072 
2073       unsigned Reg = getRegForValue(PHIOp);
2074       if (!Reg) {
2075         FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2076         return false;
2077       }
2078       FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
2079       DbgLoc = DebugLoc();
2080     }
2081   }
2082 
2083   return true;
2084 }
2085 
2086 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2087   assert(LI->hasOneUse() &&
2088          "tryToFoldLoad expected a LoadInst with a single use");
2089   // We know that the load has a single use, but don't know what it is.  If it
2090   // isn't one of the folded instructions, then we can't succeed here.  Handle
2091   // this by scanning the single-use users of the load until we get to FoldInst.
2092   unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2093 
2094   const Instruction *TheUser = LI->user_back();
2095   while (TheUser != FoldInst && // Scan up until we find FoldInst.
2096          // Stay in the right block.
2097          TheUser->getParent() == FoldInst->getParent() &&
2098          --MaxUsers) { // Don't scan too far.
2099     // If there are multiple or no uses of this instruction, then bail out.
2100     if (!TheUser->hasOneUse())
2101       return false;
2102 
2103     TheUser = TheUser->user_back();
2104   }
2105 
2106   // If we didn't find the fold instruction, then we failed to collapse the
2107   // sequence.
2108   if (TheUser != FoldInst)
2109     return false;
2110 
2111   // Don't try to fold volatile loads.  Target has to deal with alignment
2112   // constraints.
2113   if (LI->isVolatile())
2114     return false;
2115 
2116   // Figure out which vreg this is going into.  If there is no assigned vreg yet
2117   // then there actually was no reference to it.  Perhaps the load is referenced
2118   // by a dead instruction.
2119   unsigned LoadReg = getRegForValue(LI);
2120   if (!LoadReg)
2121     return false;
2122 
2123   // We can't fold if this vreg has no uses or more than one use.  Multiple uses
2124   // may mean that the instruction got lowered to multiple MIs, or the use of
2125   // the loaded value ended up being multiple operands of the result.
2126   if (!MRI.hasOneUse(LoadReg))
2127     return false;
2128 
2129   MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg);
2130   MachineInstr *User = RI->getParent();
2131 
2132   // Set the insertion point properly.  Folding the load can cause generation of
2133   // other random instructions (like sign extends) for addressing modes; make
2134   // sure they get inserted in a logical place before the new instruction.
2135   FuncInfo.InsertPt = User;
2136   FuncInfo.MBB = User->getParent();
2137 
2138   // Ask the target to try folding the load.
2139   return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2140 }
2141 
2142 bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
2143   // Must be an add.
2144   if (!isa<AddOperator>(Add))
2145     return false;
2146   // Type size needs to match.
2147   if (DL.getTypeSizeInBits(GEP->getType()) !=
2148       DL.getTypeSizeInBits(Add->getType()))
2149     return false;
2150   // Must be in the same basic block.
2151   if (isa<Instruction>(Add) &&
2152       FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
2153     return false;
2154   // Must have a constant operand.
2155   return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2156 }
2157 
2158 MachineMemOperand *
2159 FastISel::createMachineMemOperandFor(const Instruction *I) const {
2160   const Value *Ptr;
2161   Type *ValTy;
2162   unsigned Alignment;
2163   unsigned Flags;
2164   bool IsVolatile;
2165 
2166   if (const auto *LI = dyn_cast<LoadInst>(I)) {
2167     Alignment = LI->getAlignment();
2168     IsVolatile = LI->isVolatile();
2169     Flags = MachineMemOperand::MOLoad;
2170     Ptr = LI->getPointerOperand();
2171     ValTy = LI->getType();
2172   } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2173     Alignment = SI->getAlignment();
2174     IsVolatile = SI->isVolatile();
2175     Flags = MachineMemOperand::MOStore;
2176     Ptr = SI->getPointerOperand();
2177     ValTy = SI->getValueOperand()->getType();
2178   } else
2179     return nullptr;
2180 
2181   bool IsNonTemporal = I->getMetadata(LLVMContext::MD_nontemporal) != nullptr;
2182   bool IsInvariant = I->getMetadata(LLVMContext::MD_invariant_load) != nullptr;
2183   const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2184 
2185   AAMDNodes AAInfo;
2186   I->getAAMetadata(AAInfo);
2187 
2188   if (Alignment == 0) // Ensure that codegen never sees alignment 0.
2189     Alignment = DL.getABITypeAlignment(ValTy);
2190 
2191   unsigned Size = DL.getTypeStoreSize(ValTy);
2192 
2193   if (IsVolatile)
2194     Flags |= MachineMemOperand::MOVolatile;
2195   if (IsNonTemporal)
2196     Flags |= MachineMemOperand::MONonTemporal;
2197   if (IsInvariant)
2198     Flags |= MachineMemOperand::MOInvariant;
2199 
2200   return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
2201                                            Alignment, AAInfo, Ranges);
2202 }
2203 
2204 CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const {
2205   // If both operands are the same, then try to optimize or fold the cmp.
2206   CmpInst::Predicate Predicate = CI->getPredicate();
2207   if (CI->getOperand(0) != CI->getOperand(1))
2208     return Predicate;
2209 
2210   switch (Predicate) {
2211   default: llvm_unreachable("Invalid predicate!");
2212   case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2213   case CmpInst::FCMP_OEQ:   Predicate = CmpInst::FCMP_ORD;   break;
2214   case CmpInst::FCMP_OGT:   Predicate = CmpInst::FCMP_FALSE; break;
2215   case CmpInst::FCMP_OGE:   Predicate = CmpInst::FCMP_ORD;   break;
2216   case CmpInst::FCMP_OLT:   Predicate = CmpInst::FCMP_FALSE; break;
2217   case CmpInst::FCMP_OLE:   Predicate = CmpInst::FCMP_ORD;   break;
2218   case CmpInst::FCMP_ONE:   Predicate = CmpInst::FCMP_FALSE; break;
2219   case CmpInst::FCMP_ORD:   Predicate = CmpInst::FCMP_ORD;   break;
2220   case CmpInst::FCMP_UNO:   Predicate = CmpInst::FCMP_UNO;   break;
2221   case CmpInst::FCMP_UEQ:   Predicate = CmpInst::FCMP_TRUE;  break;
2222   case CmpInst::FCMP_UGT:   Predicate = CmpInst::FCMP_UNO;   break;
2223   case CmpInst::FCMP_UGE:   Predicate = CmpInst::FCMP_TRUE;  break;
2224   case CmpInst::FCMP_ULT:   Predicate = CmpInst::FCMP_UNO;   break;
2225   case CmpInst::FCMP_ULE:   Predicate = CmpInst::FCMP_TRUE;  break;
2226   case CmpInst::FCMP_UNE:   Predicate = CmpInst::FCMP_UNO;   break;
2227   case CmpInst::FCMP_TRUE:  Predicate = CmpInst::FCMP_TRUE;  break;
2228 
2229   case CmpInst::ICMP_EQ:    Predicate = CmpInst::FCMP_TRUE;  break;
2230   case CmpInst::ICMP_NE:    Predicate = CmpInst::FCMP_FALSE; break;
2231   case CmpInst::ICMP_UGT:   Predicate = CmpInst::FCMP_FALSE; break;
2232   case CmpInst::ICMP_UGE:   Predicate = CmpInst::FCMP_TRUE;  break;
2233   case CmpInst::ICMP_ULT:   Predicate = CmpInst::FCMP_FALSE; break;
2234   case CmpInst::ICMP_ULE:   Predicate = CmpInst::FCMP_TRUE;  break;
2235   case CmpInst::ICMP_SGT:   Predicate = CmpInst::FCMP_FALSE; break;
2236   case CmpInst::ICMP_SGE:   Predicate = CmpInst::FCMP_TRUE;  break;
2237   case CmpInst::ICMP_SLT:   Predicate = CmpInst::FCMP_FALSE; break;
2238   case CmpInst::ICMP_SLE:   Predicate = CmpInst::FCMP_TRUE;  break;
2239   }
2240 
2241   return Predicate;
2242 }
2243