1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file This file implements the utility functions used by the GlobalISel
9 /// pipeline.
10 //===----------------------------------------------------------------------===//
11 
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/ADT/APFloat.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/Optional.h"
16 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
17 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
18 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
19 #include "llvm/CodeGen/GlobalISel/LostDebugLocObserver.h"
20 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
21 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
22 #include "llvm/CodeGen/MachineInstr.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/MachineSizeOpts.h"
27 #include "llvm/CodeGen/RegisterBankInfo.h"
28 #include "llvm/CodeGen/StackProtector.h"
29 #include "llvm/CodeGen/TargetInstrInfo.h"
30 #include "llvm/CodeGen/TargetLowering.h"
31 #include "llvm/CodeGen/TargetPassConfig.h"
32 #include "llvm/CodeGen/TargetRegisterInfo.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Transforms/Utils/SizeOpts.h"
36 
37 #define DEBUG_TYPE "globalisel-utils"
38 
39 using namespace llvm;
40 using namespace MIPatternMatch;
41 
42 Register llvm::constrainRegToClass(MachineRegisterInfo &MRI,
43                                    const TargetInstrInfo &TII,
44                                    const RegisterBankInfo &RBI, Register Reg,
45                                    const TargetRegisterClass &RegClass) {
46   if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
47     return MRI.createVirtualRegister(&RegClass);
48 
49   return Reg;
50 }
51 
52 Register llvm::constrainOperandRegClass(
53     const MachineFunction &MF, const TargetRegisterInfo &TRI,
54     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
55     const RegisterBankInfo &RBI, MachineInstr &InsertPt,
56     const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
57   Register Reg = RegMO.getReg();
58   // Assume physical registers are properly constrained.
59   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
60 
61   // Save the old register class to check whether
62   // the change notifications will be required.
63   // TODO: A better approach would be to pass
64   // the observers to constrainRegToClass().
65   auto *OldRegClass = MRI.getRegClassOrNull(Reg);
66   Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
67   // If we created a new virtual register because the class is not compatible
68   // then create a copy between the new and the old register.
69   if (ConstrainedReg != Reg) {
70     MachineBasicBlock::iterator InsertIt(&InsertPt);
71     MachineBasicBlock &MBB = *InsertPt.getParent();
72     // FIXME: The copy needs to have the classes constrained for its operands.
73     // Use operand's regbank to get the class for old register (Reg).
74     if (RegMO.isUse()) {
75       BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
76               TII.get(TargetOpcode::COPY), ConstrainedReg)
77           .addReg(Reg);
78     } else {
79       assert(RegMO.isDef() && "Must be a definition");
80       BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
81               TII.get(TargetOpcode::COPY), Reg)
82           .addReg(ConstrainedReg);
83     }
84     if (GISelChangeObserver *Observer = MF.getObserver()) {
85       Observer->changingInstr(*RegMO.getParent());
86     }
87     RegMO.setReg(ConstrainedReg);
88     if (GISelChangeObserver *Observer = MF.getObserver()) {
89       Observer->changedInstr(*RegMO.getParent());
90     }
91   } else if (OldRegClass != MRI.getRegClassOrNull(Reg)) {
92     if (GISelChangeObserver *Observer = MF.getObserver()) {
93       if (!RegMO.isDef()) {
94         MachineInstr *RegDef = MRI.getVRegDef(Reg);
95         Observer->changedInstr(*RegDef);
96       }
97       Observer->changingAllUsesOfReg(MRI, Reg);
98       Observer->finishedChangingAllUsesOfReg();
99     }
100   }
101   return ConstrainedReg;
102 }
103 
104 Register llvm::constrainOperandRegClass(
105     const MachineFunction &MF, const TargetRegisterInfo &TRI,
106     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
107     const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
108     MachineOperand &RegMO, unsigned OpIdx) {
109   Register Reg = RegMO.getReg();
110   // Assume physical registers are properly constrained.
111   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
112 
113   const TargetRegisterClass *OpRC = TII.getRegClass(II, OpIdx, &TRI, MF);
114   // Some of the target independent instructions, like COPY, may not impose any
115   // register class constraints on some of their operands: If it's a use, we can
116   // skip constraining as the instruction defining the register would constrain
117   // it.
118 
119   if (OpRC) {
120     // Obtain the RC from incoming regbank if it is a proper sub-class. Operands
121     // can have multiple regbanks for a superclass that combine different
122     // register types (E.g., AMDGPU's VGPR and AGPR). The regbank ambiguity
123     // resolved by targets during regbankselect should not be overridden.
124     if (const auto *SubRC = TRI.getCommonSubClass(
125             OpRC, TRI.getConstrainedRegClassForOperand(RegMO, MRI)))
126       OpRC = SubRC;
127 
128     OpRC = TRI.getAllocatableClass(OpRC);
129   }
130 
131   if (!OpRC) {
132     assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
133            "Register class constraint is required unless either the "
134            "instruction is target independent or the operand is a use");
135     // FIXME: Just bailing out like this here could be not enough, unless we
136     // expect the users of this function to do the right thing for PHIs and
137     // COPY:
138     //   v1 = COPY v0
139     //   v2 = COPY v1
140     // v1 here may end up not being constrained at all. Please notice that to
141     // reproduce the issue we likely need a destination pattern of a selection
142     // rule producing such extra copies, not just an input GMIR with them as
143     // every existing target using selectImpl handles copies before calling it
144     // and they never reach this function.
145     return Reg;
146   }
147   return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *OpRC,
148                                   RegMO);
149 }
150 
151 bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
152                                             const TargetInstrInfo &TII,
153                                             const TargetRegisterInfo &TRI,
154                                             const RegisterBankInfo &RBI) {
155   assert(!isPreISelGenericOpcode(I.getOpcode()) &&
156          "A selected instruction is expected");
157   MachineBasicBlock &MBB = *I.getParent();
158   MachineFunction &MF = *MBB.getParent();
159   MachineRegisterInfo &MRI = MF.getRegInfo();
160 
161   for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
162     MachineOperand &MO = I.getOperand(OpI);
163 
164     // There's nothing to be done on non-register operands.
165     if (!MO.isReg())
166       continue;
167 
168     LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
169     assert(MO.isReg() && "Unsupported non-reg operand");
170 
171     Register Reg = MO.getReg();
172     // Physical registers don't need to be constrained.
173     if (Register::isPhysicalRegister(Reg))
174       continue;
175 
176     // Register operands with a value of 0 (e.g. predicate operands) don't need
177     // to be constrained.
178     if (Reg == 0)
179       continue;
180 
181     // If the operand is a vreg, we should constrain its regclass, and only
182     // insert COPYs if that's impossible.
183     // constrainOperandRegClass does that for us.
184     constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
185 
186     // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
187     // done.
188     if (MO.isUse()) {
189       int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
190       if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
191         I.tieOperands(DefIdx, OpI);
192     }
193   }
194   return true;
195 }
196 
197 bool llvm::canReplaceReg(Register DstReg, Register SrcReg,
198                          MachineRegisterInfo &MRI) {
199   // Give up if either DstReg or SrcReg  is a physical register.
200   if (DstReg.isPhysical() || SrcReg.isPhysical())
201     return false;
202   // Give up if the types don't match.
203   if (MRI.getType(DstReg) != MRI.getType(SrcReg))
204     return false;
205   // Replace if either DstReg has no constraints or the register
206   // constraints match.
207   return !MRI.getRegClassOrRegBank(DstReg) ||
208          MRI.getRegClassOrRegBank(DstReg) == MRI.getRegClassOrRegBank(SrcReg);
209 }
210 
211 bool llvm::isTriviallyDead(const MachineInstr &MI,
212                            const MachineRegisterInfo &MRI) {
213   // FIXME: This logical is mostly duplicated with
214   // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in
215   // MachineInstr::isLabel?
216 
217   // Don't delete frame allocation labels.
218   if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE)
219     return false;
220   // LIFETIME markers should be preserved even if they seem dead.
221   if (MI.getOpcode() == TargetOpcode::LIFETIME_START ||
222       MI.getOpcode() == TargetOpcode::LIFETIME_END)
223     return false;
224 
225   // If we can move an instruction, we can remove it.  Otherwise, it has
226   // a side-effect of some sort.
227   bool SawStore = false;
228   if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
229     return false;
230 
231   // Instructions without side-effects are dead iff they only define dead vregs.
232   for (auto &MO : MI.operands()) {
233     if (!MO.isReg() || !MO.isDef())
234       continue;
235 
236     Register Reg = MO.getReg();
237     if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg))
238       return false;
239   }
240   return true;
241 }
242 
243 static void reportGISelDiagnostic(DiagnosticSeverity Severity,
244                                   MachineFunction &MF,
245                                   const TargetPassConfig &TPC,
246                                   MachineOptimizationRemarkEmitter &MORE,
247                                   MachineOptimizationRemarkMissed &R) {
248   bool IsFatal = Severity == DS_Error &&
249                  TPC.isGlobalISelAbortEnabled();
250   // Print the function name explicitly if we don't have a debug location (which
251   // makes the diagnostic less useful) or if we're going to emit a raw error.
252   if (!R.getLocation().isValid() || IsFatal)
253     R << (" (in function: " + MF.getName() + ")").str();
254 
255   if (IsFatal)
256     report_fatal_error(Twine(R.getMsg()));
257   else
258     MORE.emit(R);
259 }
260 
261 void llvm::reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
262                               MachineOptimizationRemarkEmitter &MORE,
263                               MachineOptimizationRemarkMissed &R) {
264   reportGISelDiagnostic(DS_Warning, MF, TPC, MORE, R);
265 }
266 
267 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
268                               MachineOptimizationRemarkEmitter &MORE,
269                               MachineOptimizationRemarkMissed &R) {
270   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
271   reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
272 }
273 
274 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
275                               MachineOptimizationRemarkEmitter &MORE,
276                               const char *PassName, StringRef Msg,
277                               const MachineInstr &MI) {
278   MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
279                                     MI.getDebugLoc(), MI.getParent());
280   R << Msg;
281   // Printing MI is expensive;  only do it if expensive remarks are enabled.
282   if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
283     R << ": " << ore::MNV("Inst", MI);
284   reportGISelFailure(MF, TPC, MORE, R);
285 }
286 
287 Optional<APInt> llvm::getIConstantVRegVal(Register VReg,
288                                           const MachineRegisterInfo &MRI) {
289   Optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
290       VReg, MRI, /*LookThroughInstrs*/ false);
291   assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
292          "Value found while looking through instrs");
293   if (!ValAndVReg)
294     return None;
295   return ValAndVReg->Value;
296 }
297 
298 Optional<int64_t>
299 llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) {
300   Optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
301   if (Val && Val->getBitWidth() <= 64)
302     return Val->getSExtValue();
303   return None;
304 }
305 
306 namespace {
307 
308 typedef std::function<bool(const MachineInstr *)> IsOpcodeFn;
309 typedef std::function<Optional<APInt>(const MachineInstr *MI)> GetAPCstFn;
310 
311 Optional<ValueAndVReg> getConstantVRegValWithLookThrough(
312     Register VReg, const MachineRegisterInfo &MRI, IsOpcodeFn IsConstantOpcode,
313     GetAPCstFn getAPCstValue, bool LookThroughInstrs = true,
314     bool LookThroughAnyExt = false) {
315   SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes;
316   MachineInstr *MI;
317 
318   while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) &&
319          LookThroughInstrs) {
320     switch (MI->getOpcode()) {
321     case TargetOpcode::G_ANYEXT:
322       if (!LookThroughAnyExt)
323         return None;
324       LLVM_FALLTHROUGH;
325     case TargetOpcode::G_TRUNC:
326     case TargetOpcode::G_SEXT:
327     case TargetOpcode::G_ZEXT:
328       SeenOpcodes.push_back(std::make_pair(
329           MI->getOpcode(),
330           MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
331       VReg = MI->getOperand(1).getReg();
332       break;
333     case TargetOpcode::COPY:
334       VReg = MI->getOperand(1).getReg();
335       if (Register::isPhysicalRegister(VReg))
336         return None;
337       break;
338     case TargetOpcode::G_INTTOPTR:
339       VReg = MI->getOperand(1).getReg();
340       break;
341     default:
342       return None;
343     }
344   }
345   if (!MI || !IsConstantOpcode(MI))
346     return None;
347 
348   Optional<APInt> MaybeVal = getAPCstValue(MI);
349   if (!MaybeVal)
350     return None;
351   APInt &Val = *MaybeVal;
352   while (!SeenOpcodes.empty()) {
353     std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val();
354     switch (OpcodeAndSize.first) {
355     case TargetOpcode::G_TRUNC:
356       Val = Val.trunc(OpcodeAndSize.second);
357       break;
358     case TargetOpcode::G_ANYEXT:
359     case TargetOpcode::G_SEXT:
360       Val = Val.sext(OpcodeAndSize.second);
361       break;
362     case TargetOpcode::G_ZEXT:
363       Val = Val.zext(OpcodeAndSize.second);
364       break;
365     }
366   }
367 
368   return ValueAndVReg{Val, VReg};
369 }
370 
371 bool isIConstant(const MachineInstr *MI) {
372   if (!MI)
373     return false;
374   return MI->getOpcode() == TargetOpcode::G_CONSTANT;
375 }
376 
377 bool isFConstant(const MachineInstr *MI) {
378   if (!MI)
379     return false;
380   return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
381 }
382 
383 bool isAnyConstant(const MachineInstr *MI) {
384   if (!MI)
385     return false;
386   unsigned Opc = MI->getOpcode();
387   return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
388 }
389 
390 Optional<APInt> getCImmAsAPInt(const MachineInstr *MI) {
391   const MachineOperand &CstVal = MI->getOperand(1);
392   if (CstVal.isCImm())
393     return CstVal.getCImm()->getValue();
394   return None;
395 }
396 
397 Optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) {
398   const MachineOperand &CstVal = MI->getOperand(1);
399   if (CstVal.isCImm())
400     return CstVal.getCImm()->getValue();
401   if (CstVal.isFPImm())
402     return CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
403   return None;
404 }
405 
406 } // end anonymous namespace
407 
408 Optional<ValueAndVReg> llvm::getIConstantVRegValWithLookThrough(
409     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
410   return getConstantVRegValWithLookThrough(VReg, MRI, isIConstant,
411                                            getCImmAsAPInt, LookThroughInstrs);
412 }
413 
414 Optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough(
415     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
416     bool LookThroughAnyExt) {
417   return getConstantVRegValWithLookThrough(
418       VReg, MRI, isAnyConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs,
419       LookThroughAnyExt);
420 }
421 
422 Optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
423     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
424   auto Reg = getConstantVRegValWithLookThrough(
425       VReg, MRI, isFConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs);
426   if (!Reg)
427     return None;
428   return FPValueAndVReg{getConstantFPVRegVal(Reg->VReg, MRI)->getValueAPF(),
429                         Reg->VReg};
430 }
431 
432 const ConstantFP *
433 llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) {
434   MachineInstr *MI = MRI.getVRegDef(VReg);
435   if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
436     return nullptr;
437   return MI->getOperand(1).getFPImm();
438 }
439 
440 Optional<DefinitionAndSourceRegister>
441 llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) {
442   Register DefSrcReg = Reg;
443   auto *DefMI = MRI.getVRegDef(Reg);
444   auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
445   if (!DstTy.isValid())
446     return None;
447   unsigned Opc = DefMI->getOpcode();
448   while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
449     Register SrcReg = DefMI->getOperand(1).getReg();
450     auto SrcTy = MRI.getType(SrcReg);
451     if (!SrcTy.isValid())
452       break;
453     DefMI = MRI.getVRegDef(SrcReg);
454     DefSrcReg = SrcReg;
455     Opc = DefMI->getOpcode();
456   }
457   return DefinitionAndSourceRegister{DefMI, DefSrcReg};
458 }
459 
460 MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
461                                          const MachineRegisterInfo &MRI) {
462   Optional<DefinitionAndSourceRegister> DefSrcReg =
463       getDefSrcRegIgnoringCopies(Reg, MRI);
464   return DefSrcReg ? DefSrcReg->MI : nullptr;
465 }
466 
467 Register llvm::getSrcRegIgnoringCopies(Register Reg,
468                                        const MachineRegisterInfo &MRI) {
469   Optional<DefinitionAndSourceRegister> DefSrcReg =
470       getDefSrcRegIgnoringCopies(Reg, MRI);
471   return DefSrcReg ? DefSrcReg->Reg : Register();
472 }
473 
474 MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg,
475                                  const MachineRegisterInfo &MRI) {
476   MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
477   return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
478 }
479 
480 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
481   if (Size == 32)
482     return APFloat(float(Val));
483   if (Size == 64)
484     return APFloat(Val);
485   if (Size != 16)
486     llvm_unreachable("Unsupported FPConstant size");
487   bool Ignored;
488   APFloat APF(Val);
489   APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
490   return APF;
491 }
492 
493 Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1,
494                                         const Register Op2,
495                                         const MachineRegisterInfo &MRI) {
496   auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
497   if (!MaybeOp2Cst)
498     return None;
499 
500   auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false);
501   if (!MaybeOp1Cst)
502     return None;
503 
504   const APInt &C1 = MaybeOp1Cst->Value;
505   const APInt &C2 = MaybeOp2Cst->Value;
506   switch (Opcode) {
507   default:
508     break;
509   case TargetOpcode::G_ADD:
510   case TargetOpcode::G_PTR_ADD:
511     return C1 + C2;
512   case TargetOpcode::G_AND:
513     return C1 & C2;
514   case TargetOpcode::G_ASHR:
515     return C1.ashr(C2);
516   case TargetOpcode::G_LSHR:
517     return C1.lshr(C2);
518   case TargetOpcode::G_MUL:
519     return C1 * C2;
520   case TargetOpcode::G_OR:
521     return C1 | C2;
522   case TargetOpcode::G_SHL:
523     return C1 << C2;
524   case TargetOpcode::G_SUB:
525     return C1 - C2;
526   case TargetOpcode::G_XOR:
527     return C1 ^ C2;
528   case TargetOpcode::G_UDIV:
529     if (!C2.getBoolValue())
530       break;
531     return C1.udiv(C2);
532   case TargetOpcode::G_SDIV:
533     if (!C2.getBoolValue())
534       break;
535     return C1.sdiv(C2);
536   case TargetOpcode::G_UREM:
537     if (!C2.getBoolValue())
538       break;
539     return C1.urem(C2);
540   case TargetOpcode::G_SREM:
541     if (!C2.getBoolValue())
542       break;
543     return C1.srem(C2);
544   case TargetOpcode::G_SMIN:
545     return APIntOps::smin(C1, C2);
546   case TargetOpcode::G_SMAX:
547     return APIntOps::smax(C1, C2);
548   case TargetOpcode::G_UMIN:
549     return APIntOps::umin(C1, C2);
550   case TargetOpcode::G_UMAX:
551     return APIntOps::umax(C1, C2);
552   }
553 
554   return None;
555 }
556 
557 Optional<APFloat> llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
558                                             const Register Op2,
559                                             const MachineRegisterInfo &MRI) {
560   const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
561   if (!Op2Cst)
562     return None;
563 
564   const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
565   if (!Op1Cst)
566     return None;
567 
568   APFloat C1 = Op1Cst->getValueAPF();
569   const APFloat &C2 = Op2Cst->getValueAPF();
570   switch (Opcode) {
571   case TargetOpcode::G_FADD:
572     C1.add(C2, APFloat::rmNearestTiesToEven);
573     return C1;
574   case TargetOpcode::G_FSUB:
575     C1.subtract(C2, APFloat::rmNearestTiesToEven);
576     return C1;
577   case TargetOpcode::G_FMUL:
578     C1.multiply(C2, APFloat::rmNearestTiesToEven);
579     return C1;
580   case TargetOpcode::G_FDIV:
581     C1.divide(C2, APFloat::rmNearestTiesToEven);
582     return C1;
583   case TargetOpcode::G_FREM:
584     C1.mod(C2);
585     return C1;
586   case TargetOpcode::G_FCOPYSIGN:
587     C1.copySign(C2);
588     return C1;
589   case TargetOpcode::G_FMINNUM:
590     return minnum(C1, C2);
591   case TargetOpcode::G_FMAXNUM:
592     return maxnum(C1, C2);
593   case TargetOpcode::G_FMINIMUM:
594     return minimum(C1, C2);
595   case TargetOpcode::G_FMAXIMUM:
596     return maximum(C1, C2);
597   case TargetOpcode::G_FMINNUM_IEEE:
598   case TargetOpcode::G_FMAXNUM_IEEE:
599     // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
600     // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
601     // and currently there isn't a nice wrapper in APFloat for the version with
602     // correct snan handling.
603     break;
604   default:
605     break;
606   }
607 
608   return None;
609 }
610 
611 Register llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
612                                        const Register Op2,
613                                        const MachineRegisterInfo &MRI,
614                                        MachineIRBuilder &MIB) {
615   auto *SrcVec2 = getOpcodeDef<GBuildVector>(Op2, MRI);
616   if (!SrcVec2)
617     return Register();
618 
619   auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1, MRI);
620   if (!SrcVec1)
621     return Register();
622 
623   const LLT EltTy = MRI.getType(SrcVec1->getSourceReg(0));
624 
625   SmallVector<Register, 16> FoldedElements;
626   for (unsigned Idx = 0, E = SrcVec1->getNumSources(); Idx < E; ++Idx) {
627     auto MaybeCst = ConstantFoldBinOp(Opcode, SrcVec1->getSourceReg(Idx),
628                                       SrcVec2->getSourceReg(Idx), MRI);
629     if (!MaybeCst)
630       return Register();
631     auto FoldedCstReg = MIB.buildConstant(EltTy, *MaybeCst).getReg(0);
632     FoldedElements.emplace_back(FoldedCstReg);
633   }
634   // Create the new vector constant.
635   auto CstVec =
636       MIB.buildBuildVector(MRI.getType(SrcVec1->getReg(0)), FoldedElements);
637   return CstVec.getReg(0);
638 }
639 
640 bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
641                            bool SNaN) {
642   const MachineInstr *DefMI = MRI.getVRegDef(Val);
643   if (!DefMI)
644     return false;
645 
646   const TargetMachine& TM = DefMI->getMF()->getTarget();
647   if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath)
648     return true;
649 
650   // If the value is a constant, we can obviously see if it is a NaN or not.
651   if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
652     return !FPVal->getValueAPF().isNaN() ||
653            (SNaN && !FPVal->getValueAPF().isSignaling());
654   }
655 
656   if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
657     for (const auto &Op : DefMI->uses())
658       if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
659         return false;
660     return true;
661   }
662 
663   switch (DefMI->getOpcode()) {
664   default:
665     break;
666   case TargetOpcode::G_FMINNUM_IEEE:
667   case TargetOpcode::G_FMAXNUM_IEEE: {
668     if (SNaN)
669       return true;
670     // This can return a NaN if either operand is an sNaN, or if both operands
671     // are NaN.
672     return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
673             isKnownNeverSNaN(DefMI->getOperand(2).getReg(), MRI)) ||
674            (isKnownNeverSNaN(DefMI->getOperand(1).getReg(), MRI) &&
675             isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI));
676   }
677   case TargetOpcode::G_FMINNUM:
678   case TargetOpcode::G_FMAXNUM: {
679     // Only one needs to be known not-nan, since it will be returned if the
680     // other ends up being one.
681     return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
682            isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI, SNaN);
683   }
684   }
685 
686   if (SNaN) {
687     // FP operations quiet. For now, just handle the ones inserted during
688     // legalization.
689     switch (DefMI->getOpcode()) {
690     case TargetOpcode::G_FPEXT:
691     case TargetOpcode::G_FPTRUNC:
692     case TargetOpcode::G_FCANONICALIZE:
693       return true;
694     default:
695       return false;
696     }
697   }
698 
699   return false;
700 }
701 
702 Align llvm::inferAlignFromPtrInfo(MachineFunction &MF,
703                                   const MachinePointerInfo &MPO) {
704   auto PSV = MPO.V.dyn_cast<const PseudoSourceValue *>();
705   if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
706     MachineFrameInfo &MFI = MF.getFrameInfo();
707     return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
708                            MPO.Offset);
709   }
710 
711   if (const Value *V = MPO.V.dyn_cast<const Value *>()) {
712     const Module *M = MF.getFunction().getParent();
713     return V->getPointerAlignment(M->getDataLayout());
714   }
715 
716   return Align(1);
717 }
718 
719 Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF,
720                                         const TargetInstrInfo &TII,
721                                         MCRegister PhysReg,
722                                         const TargetRegisterClass &RC,
723                                         const DebugLoc &DL, LLT RegTy) {
724   MachineBasicBlock &EntryMBB = MF.front();
725   MachineRegisterInfo &MRI = MF.getRegInfo();
726   Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
727   if (LiveIn) {
728     MachineInstr *Def = MRI.getVRegDef(LiveIn);
729     if (Def) {
730       // FIXME: Should the verifier check this is in the entry block?
731       assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
732       return LiveIn;
733     }
734 
735     // It's possible the incoming argument register and copy was added during
736     // lowering, but later deleted due to being/becoming dead. If this happens,
737     // re-insert the copy.
738   } else {
739     // The live in register was not present, so add it.
740     LiveIn = MF.addLiveIn(PhysReg, &RC);
741     if (RegTy.isValid())
742       MRI.setType(LiveIn, RegTy);
743   }
744 
745   BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
746     .addReg(PhysReg);
747   if (!EntryMBB.isLiveIn(PhysReg))
748     EntryMBB.addLiveIn(PhysReg);
749   return LiveIn;
750 }
751 
752 Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1,
753                                         uint64_t Imm,
754                                         const MachineRegisterInfo &MRI) {
755   auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
756   if (MaybeOp1Cst) {
757     switch (Opcode) {
758     default:
759       break;
760     case TargetOpcode::G_SEXT_INREG: {
761       LLT Ty = MRI.getType(Op1);
762       return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
763     }
764     }
765   }
766   return None;
767 }
768 
769 Optional<APFloat> llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
770                                                Register Src,
771                                                const MachineRegisterInfo &MRI) {
772   assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
773   if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
774     APFloat DstVal(getFltSemanticForLLT(DstTy));
775     DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP,
776                             APFloat::rmNearestTiesToEven);
777     return DstVal;
778   }
779   return None;
780 }
781 
782 Optional<SmallVector<unsigned>>
783 llvm::ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI) {
784   LLT Ty = MRI.getType(Src);
785   SmallVector<unsigned> FoldedCTLZs;
786   auto tryFoldScalar = [&](Register R) -> Optional<unsigned> {
787     auto MaybeCst = getIConstantVRegVal(R, MRI);
788     if (!MaybeCst)
789       return None;
790     return MaybeCst->countLeadingZeros();
791   };
792   if (Ty.isVector()) {
793     // Try to constant fold each element.
794     auto *BV = getOpcodeDef<GBuildVector>(Src, MRI);
795     if (!BV)
796       return None;
797     for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
798       if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
799         FoldedCTLZs.emplace_back(*MaybeFold);
800         continue;
801       }
802       return None;
803     }
804     return FoldedCTLZs;
805   }
806   if (auto MaybeCst = tryFoldScalar(Src)) {
807     FoldedCTLZs.emplace_back(*MaybeCst);
808     return FoldedCTLZs;
809   }
810   return None;
811 }
812 
813 bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
814                                   GISelKnownBits *KB) {
815   Optional<DefinitionAndSourceRegister> DefSrcReg =
816       getDefSrcRegIgnoringCopies(Reg, MRI);
817   if (!DefSrcReg)
818     return false;
819 
820   const MachineInstr &MI = *DefSrcReg->MI;
821   const LLT Ty = MRI.getType(Reg);
822 
823   switch (MI.getOpcode()) {
824   case TargetOpcode::G_CONSTANT: {
825     unsigned BitWidth = Ty.getScalarSizeInBits();
826     const ConstantInt *CI = MI.getOperand(1).getCImm();
827     return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
828   }
829   case TargetOpcode::G_SHL: {
830     // A left-shift of a constant one will have exactly one bit set because
831     // shifting the bit off the end is undefined.
832 
833     // TODO: Constant splat
834     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
835       if (*ConstLHS == 1)
836         return true;
837     }
838 
839     break;
840   }
841   case TargetOpcode::G_LSHR: {
842     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
843       if (ConstLHS->isSignMask())
844         return true;
845     }
846 
847     break;
848   }
849   case TargetOpcode::G_BUILD_VECTOR: {
850     // TODO: Probably should have a recursion depth guard since you could have
851     // bitcasted vector elements.
852     for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
853       if (!isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB))
854         return false;
855 
856     return true;
857   }
858   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
859     // Only handle constants since we would need to know if number of leading
860     // zeros is greater than the truncation amount.
861     const unsigned BitWidth = Ty.getScalarSizeInBits();
862     for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
863       auto Const = getIConstantVRegVal(MO.getReg(), MRI);
864       if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())
865         return false;
866     }
867 
868     return true;
869   }
870   default:
871     break;
872   }
873 
874   if (!KB)
875     return false;
876 
877   // More could be done here, though the above checks are enough
878   // to handle some common cases.
879 
880   // Fall back to computeKnownBits to catch other known cases.
881   KnownBits Known = KB->getKnownBits(Reg);
882   return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
883 }
884 
885 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
886   AU.addPreserved<StackProtector>();
887 }
888 
889 static unsigned getLCMSize(unsigned OrigSize, unsigned TargetSize) {
890   unsigned Mul = OrigSize * TargetSize;
891   unsigned GCDSize = greatestCommonDivisor(OrigSize, TargetSize);
892   return Mul / GCDSize;
893 }
894 
895 LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
896   const unsigned OrigSize = OrigTy.getSizeInBits();
897   const unsigned TargetSize = TargetTy.getSizeInBits();
898 
899   if (OrigSize == TargetSize)
900     return OrigTy;
901 
902   if (OrigTy.isVector()) {
903     const LLT OrigElt = OrigTy.getElementType();
904 
905     if (TargetTy.isVector()) {
906       const LLT TargetElt = TargetTy.getElementType();
907 
908       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
909         int GCDElts = greatestCommonDivisor(OrigTy.getNumElements(),
910                                             TargetTy.getNumElements());
911         // Prefer the original element type.
912         ElementCount Mul = OrigTy.getElementCount() * TargetTy.getNumElements();
913         return LLT::vector(Mul.divideCoefficientBy(GCDElts),
914                            OrigTy.getElementType());
915       }
916     } else {
917       if (OrigElt.getSizeInBits() == TargetSize)
918         return OrigTy;
919     }
920 
921     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
922     return LLT::fixed_vector(LCMSize / OrigElt.getSizeInBits(), OrigElt);
923   }
924 
925   if (TargetTy.isVector()) {
926     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
927     return LLT::fixed_vector(LCMSize / OrigSize, OrigTy);
928   }
929 
930   unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
931 
932   // Preserve pointer types.
933   if (LCMSize == OrigSize)
934     return OrigTy;
935   if (LCMSize == TargetSize)
936     return TargetTy;
937 
938   return LLT::scalar(LCMSize);
939 }
940 
941 LLT llvm::getCoverTy(LLT OrigTy, LLT TargetTy) {
942   if (!OrigTy.isVector() || !TargetTy.isVector() || OrigTy == TargetTy ||
943       (OrigTy.getScalarSizeInBits() != TargetTy.getScalarSizeInBits()))
944     return getLCMType(OrigTy, TargetTy);
945 
946   unsigned OrigTyNumElts = OrigTy.getNumElements();
947   unsigned TargetTyNumElts = TargetTy.getNumElements();
948   if (OrigTyNumElts % TargetTyNumElts == 0)
949     return OrigTy;
950 
951   unsigned NumElts = alignTo(OrigTyNumElts, TargetTyNumElts);
952   return LLT::scalarOrVector(ElementCount::getFixed(NumElts),
953                              OrigTy.getElementType());
954 }
955 
956 LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
957   const unsigned OrigSize = OrigTy.getSizeInBits();
958   const unsigned TargetSize = TargetTy.getSizeInBits();
959 
960   if (OrigSize == TargetSize)
961     return OrigTy;
962 
963   if (OrigTy.isVector()) {
964     LLT OrigElt = OrigTy.getElementType();
965     if (TargetTy.isVector()) {
966       LLT TargetElt = TargetTy.getElementType();
967       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
968         int GCD = greatestCommonDivisor(OrigTy.getNumElements(),
969                                         TargetTy.getNumElements());
970         return LLT::scalarOrVector(ElementCount::getFixed(GCD), OrigElt);
971       }
972     } else {
973       // If the source is a vector of pointers, return a pointer element.
974       if (OrigElt.getSizeInBits() == TargetSize)
975         return OrigElt;
976     }
977 
978     unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
979     if (GCD == OrigElt.getSizeInBits())
980       return OrigElt;
981 
982     // If we can't produce the original element type, we have to use a smaller
983     // scalar.
984     if (GCD < OrigElt.getSizeInBits())
985       return LLT::scalar(GCD);
986     return LLT::fixed_vector(GCD / OrigElt.getSizeInBits(), OrigElt);
987   }
988 
989   if (TargetTy.isVector()) {
990     // Try to preserve the original element type.
991     LLT TargetElt = TargetTy.getElementType();
992     if (TargetElt.getSizeInBits() == OrigSize)
993       return OrigTy;
994   }
995 
996   unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
997   return LLT::scalar(GCD);
998 }
999 
1000 Optional<int> llvm::getSplatIndex(MachineInstr &MI) {
1001   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
1002          "Only G_SHUFFLE_VECTOR can have a splat index!");
1003   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
1004   auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
1005 
1006   // If all elements are undefined, this shuffle can be considered a splat.
1007   // Return 0 for better potential for callers to simplify.
1008   if (FirstDefinedIdx == Mask.end())
1009     return 0;
1010 
1011   // Make sure all remaining elements are either undef or the same
1012   // as the first non-undef value.
1013   int SplatValue = *FirstDefinedIdx;
1014   if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
1015              [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
1016     return None;
1017 
1018   return SplatValue;
1019 }
1020 
1021 static bool isBuildVectorOp(unsigned Opcode) {
1022   return Opcode == TargetOpcode::G_BUILD_VECTOR ||
1023          Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
1024 }
1025 
1026 namespace {
1027 
1028 Optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
1029                                            const MachineRegisterInfo &MRI,
1030                                            bool AllowUndef) {
1031   MachineInstr *MI = getDefIgnoringCopies(VReg, MRI);
1032   if (!MI)
1033     return None;
1034 
1035   if (!isBuildVectorOp(MI->getOpcode()))
1036     return None;
1037 
1038   Optional<ValueAndVReg> SplatValAndReg = None;
1039   for (MachineOperand &Op : MI->uses()) {
1040     Register Element = Op.getReg();
1041     auto ElementValAndReg =
1042         getAnyConstantVRegValWithLookThrough(Element, MRI, true, true);
1043 
1044     // If AllowUndef, treat undef as value that will result in a constant splat.
1045     if (!ElementValAndReg) {
1046       if (AllowUndef && isa<GImplicitDef>(MRI.getVRegDef(Element)))
1047         continue;
1048       return None;
1049     }
1050 
1051     // Record splat value
1052     if (!SplatValAndReg)
1053       SplatValAndReg = ElementValAndReg;
1054 
1055     // Different constant then the one already recorded, not a constant splat.
1056     if (SplatValAndReg->Value != ElementValAndReg->Value)
1057       return None;
1058   }
1059 
1060   return SplatValAndReg;
1061 }
1062 
1063 } // end anonymous namespace
1064 
1065 bool llvm::isBuildVectorConstantSplat(const Register Reg,
1066                                       const MachineRegisterInfo &MRI,
1067                                       int64_t SplatValue, bool AllowUndef) {
1068   if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef))
1069     return mi_match(SplatValAndReg->VReg, MRI, m_SpecificICst(SplatValue));
1070   return false;
1071 }
1072 
1073 bool llvm::isBuildVectorConstantSplat(const MachineInstr &MI,
1074                                       const MachineRegisterInfo &MRI,
1075                                       int64_t SplatValue, bool AllowUndef) {
1076   return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
1077                                     AllowUndef);
1078 }
1079 
1080 Optional<int64_t>
1081 llvm::getBuildVectorConstantSplat(const MachineInstr &MI,
1082                                   const MachineRegisterInfo &MRI) {
1083   if (auto SplatValAndReg =
1084           getAnyConstantSplat(MI.getOperand(0).getReg(), MRI, false))
1085     return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI);
1086   return None;
1087 }
1088 
1089 Optional<FPValueAndVReg> llvm::getFConstantSplat(Register VReg,
1090                                                  const MachineRegisterInfo &MRI,
1091                                                  bool AllowUndef) {
1092   if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))
1093     return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
1094   return None;
1095 }
1096 
1097 bool llvm::isBuildVectorAllZeros(const MachineInstr &MI,
1098                                  const MachineRegisterInfo &MRI,
1099                                  bool AllowUndef) {
1100   return isBuildVectorConstantSplat(MI, MRI, 0, AllowUndef);
1101 }
1102 
1103 bool llvm::isBuildVectorAllOnes(const MachineInstr &MI,
1104                                 const MachineRegisterInfo &MRI,
1105                                 bool AllowUndef) {
1106   return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef);
1107 }
1108 
1109 Optional<RegOrConstant> llvm::getVectorSplat(const MachineInstr &MI,
1110                                              const MachineRegisterInfo &MRI) {
1111   unsigned Opc = MI.getOpcode();
1112   if (!isBuildVectorOp(Opc))
1113     return None;
1114   if (auto Splat = getBuildVectorConstantSplat(MI, MRI))
1115     return RegOrConstant(*Splat);
1116   auto Reg = MI.getOperand(1).getReg();
1117   if (any_of(make_range(MI.operands_begin() + 2, MI.operands_end()),
1118              [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
1119     return None;
1120   return RegOrConstant(Reg);
1121 }
1122 
1123 static bool isConstantScalar(const MachineInstr &MI,
1124                              const MachineRegisterInfo &MRI,
1125                              bool AllowFP = true,
1126                              bool AllowOpaqueConstants = true) {
1127   switch (MI.getOpcode()) {
1128   case TargetOpcode::G_CONSTANT:
1129   case TargetOpcode::G_IMPLICIT_DEF:
1130     return true;
1131   case TargetOpcode::G_FCONSTANT:
1132     return AllowFP;
1133   case TargetOpcode::G_GLOBAL_VALUE:
1134   case TargetOpcode::G_FRAME_INDEX:
1135   case TargetOpcode::G_BLOCK_ADDR:
1136   case TargetOpcode::G_JUMP_TABLE:
1137     return AllowOpaqueConstants;
1138   default:
1139     return false;
1140   }
1141 }
1142 
1143 bool llvm::isConstantOrConstantVector(MachineInstr &MI,
1144                                       const MachineRegisterInfo &MRI) {
1145   Register Def = MI.getOperand(0).getReg();
1146   if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1147     return true;
1148   GBuildVector *BV = dyn_cast<GBuildVector>(&MI);
1149   if (!BV)
1150     return false;
1151   for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1152     if (getIConstantVRegValWithLookThrough(BV->getSourceReg(SrcIdx), MRI) ||
1153         getOpcodeDef<GImplicitDef>(BV->getSourceReg(SrcIdx), MRI))
1154       continue;
1155     return false;
1156   }
1157   return true;
1158 }
1159 
1160 bool llvm::isConstantOrConstantVector(const MachineInstr &MI,
1161                                       const MachineRegisterInfo &MRI,
1162                                       bool AllowFP, bool AllowOpaqueConstants) {
1163   if (isConstantScalar(MI, MRI, AllowFP, AllowOpaqueConstants))
1164     return true;
1165 
1166   if (!isBuildVectorOp(MI.getOpcode()))
1167     return false;
1168 
1169   const unsigned NumOps = MI.getNumOperands();
1170   for (unsigned I = 1; I != NumOps; ++I) {
1171     const MachineInstr *ElementDef = MRI.getVRegDef(MI.getOperand(I).getReg());
1172     if (!isConstantScalar(*ElementDef, MRI, AllowFP, AllowOpaqueConstants))
1173       return false;
1174   }
1175 
1176   return true;
1177 }
1178 
1179 Optional<APInt>
1180 llvm::isConstantOrConstantSplatVector(MachineInstr &MI,
1181                                       const MachineRegisterInfo &MRI) {
1182   Register Def = MI.getOperand(0).getReg();
1183   if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1184     return C->Value;
1185   auto MaybeCst = getBuildVectorConstantSplat(MI, MRI);
1186   if (!MaybeCst)
1187     return None;
1188   const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits();
1189   return APInt(ScalarSize, *MaybeCst, true);
1190 }
1191 
1192 bool llvm::isNullOrNullSplat(const MachineInstr &MI,
1193                              const MachineRegisterInfo &MRI, bool AllowUndefs) {
1194   switch (MI.getOpcode()) {
1195   case TargetOpcode::G_IMPLICIT_DEF:
1196     return AllowUndefs;
1197   case TargetOpcode::G_CONSTANT:
1198     return MI.getOperand(1).getCImm()->isNullValue();
1199   case TargetOpcode::G_FCONSTANT: {
1200     const ConstantFP *FPImm = MI.getOperand(1).getFPImm();
1201     return FPImm->isZero() && !FPImm->isNegative();
1202   }
1203   default:
1204     if (!AllowUndefs) // TODO: isBuildVectorAllZeros assumes undef is OK already
1205       return false;
1206     return isBuildVectorAllZeros(MI, MRI);
1207   }
1208 }
1209 
1210 bool llvm::isAllOnesOrAllOnesSplat(const MachineInstr &MI,
1211                                    const MachineRegisterInfo &MRI,
1212                                    bool AllowUndefs) {
1213   switch (MI.getOpcode()) {
1214   case TargetOpcode::G_IMPLICIT_DEF:
1215     return AllowUndefs;
1216   case TargetOpcode::G_CONSTANT:
1217     return MI.getOperand(1).getCImm()->isAllOnesValue();
1218   default:
1219     if (!AllowUndefs) // TODO: isBuildVectorAllOnes assumes undef is OK already
1220       return false;
1221     return isBuildVectorAllOnes(MI, MRI);
1222   }
1223 }
1224 
1225 bool llvm::matchUnaryPredicate(
1226     const MachineRegisterInfo &MRI, Register Reg,
1227     std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
1228 
1229   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
1230   if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1231     return Match(nullptr);
1232 
1233   // TODO: Also handle fconstant
1234   if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1235     return Match(Def->getOperand(1).getCImm());
1236 
1237   if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
1238     return false;
1239 
1240   for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
1241     Register SrcElt = Def->getOperand(I).getReg();
1242     const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
1243     if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1244       if (!Match(nullptr))
1245         return false;
1246       continue;
1247     }
1248 
1249     if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
1250         !Match(SrcDef->getOperand(1).getCImm()))
1251       return false;
1252   }
1253 
1254   return true;
1255 }
1256 
1257 bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
1258                           bool IsFP) {
1259   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1260   case TargetLowering::UndefinedBooleanContent:
1261     return Val & 0x1;
1262   case TargetLowering::ZeroOrOneBooleanContent:
1263     return Val == 1;
1264   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1265     return Val == -1;
1266   }
1267   llvm_unreachable("Invalid boolean contents");
1268 }
1269 
1270 int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
1271                              bool IsFP) {
1272   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1273   case TargetLowering::UndefinedBooleanContent:
1274   case TargetLowering::ZeroOrOneBooleanContent:
1275     return 1;
1276   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1277     return -1;
1278   }
1279   llvm_unreachable("Invalid boolean contents");
1280 }
1281 
1282 bool llvm::shouldOptForSize(const MachineBasicBlock &MBB,
1283                             ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
1284   const auto &F = MBB.getParent()->getFunction();
1285   return F.hasOptSize() || F.hasMinSize() ||
1286          llvm::shouldOptimizeForSize(MBB.getBasicBlock(), PSI, BFI);
1287 }
1288 
1289 void llvm::saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI,
1290                             LostDebugLocObserver *LocObserver,
1291                             SmallInstListTy &DeadInstChain) {
1292   for (MachineOperand &Op : MI.uses()) {
1293     if (Op.isReg() && Op.getReg().isVirtual())
1294       DeadInstChain.insert(MRI.getVRegDef(Op.getReg()));
1295   }
1296   LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
1297   DeadInstChain.remove(&MI);
1298   MI.eraseFromParent();
1299   if (LocObserver)
1300     LocObserver->checkpoint(false);
1301 }
1302 
1303 void llvm::eraseInstrs(ArrayRef<MachineInstr *> DeadInstrs,
1304                        MachineRegisterInfo &MRI,
1305                        LostDebugLocObserver *LocObserver) {
1306   SmallInstListTy DeadInstChain;
1307   for (MachineInstr *MI : DeadInstrs)
1308     saveUsesAndErase(*MI, MRI, LocObserver, DeadInstChain);
1309 
1310   while (!DeadInstChain.empty()) {
1311     MachineInstr *Inst = DeadInstChain.pop_back_val();
1312     if (!isTriviallyDead(*Inst, MRI))
1313       continue;
1314     saveUsesAndErase(*Inst, MRI, LocObserver, DeadInstChain);
1315   }
1316 }
1317 
1318 void llvm::eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI,
1319                       LostDebugLocObserver *LocObserver) {
1320   return eraseInstrs({&MI}, MRI, LocObserver);
1321 }
1322