1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file This file implements the utility functions used by the GlobalISel
9 /// pipeline.
10 //===----------------------------------------------------------------------===//
11 
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/ADT/APFloat.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/Optional.h"
16 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
17 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
18 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
19 #include "llvm/CodeGen/GlobalISel/LostDebugLocObserver.h"
20 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
21 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
22 #include "llvm/CodeGen/MachineInstr.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/MachineSizeOpts.h"
27 #include "llvm/CodeGen/RegisterBankInfo.h"
28 #include "llvm/CodeGen/StackProtector.h"
29 #include "llvm/CodeGen/TargetInstrInfo.h"
30 #include "llvm/CodeGen/TargetLowering.h"
31 #include "llvm/CodeGen/TargetPassConfig.h"
32 #include "llvm/CodeGen/TargetRegisterInfo.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/Target/TargetMachine.h"
35 
36 #define DEBUG_TYPE "globalisel-utils"
37 
38 using namespace llvm;
39 using namespace MIPatternMatch;
40 
41 Register llvm::constrainRegToClass(MachineRegisterInfo &MRI,
42                                    const TargetInstrInfo &TII,
43                                    const RegisterBankInfo &RBI, Register Reg,
44                                    const TargetRegisterClass &RegClass) {
45   if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
46     return MRI.createVirtualRegister(&RegClass);
47 
48   return Reg;
49 }
50 
51 Register llvm::constrainOperandRegClass(
52     const MachineFunction &MF, const TargetRegisterInfo &TRI,
53     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
54     const RegisterBankInfo &RBI, MachineInstr &InsertPt,
55     const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
56   Register Reg = RegMO.getReg();
57   // Assume physical registers are properly constrained.
58   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
59 
60   Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
61   // If we created a new virtual register because the class is not compatible
62   // then create a copy between the new and the old register.
63   if (ConstrainedReg != Reg) {
64     MachineBasicBlock::iterator InsertIt(&InsertPt);
65     MachineBasicBlock &MBB = *InsertPt.getParent();
66     // FIXME: The copy needs to have the classes constrained for its operands.
67     // Use operand's regbank to get the class for old register (Reg).
68     if (RegMO.isUse()) {
69       BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
70               TII.get(TargetOpcode::COPY), ConstrainedReg)
71           .addReg(Reg);
72     } else {
73       assert(RegMO.isDef() && "Must be a definition");
74       BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
75               TII.get(TargetOpcode::COPY), Reg)
76           .addReg(ConstrainedReg);
77     }
78     if (GISelChangeObserver *Observer = MF.getObserver()) {
79       Observer->changingInstr(*RegMO.getParent());
80     }
81     RegMO.setReg(ConstrainedReg);
82     if (GISelChangeObserver *Observer = MF.getObserver()) {
83       Observer->changedInstr(*RegMO.getParent());
84     }
85   } else {
86     if (GISelChangeObserver *Observer = MF.getObserver()) {
87       if (!RegMO.isDef()) {
88         MachineInstr *RegDef = MRI.getVRegDef(Reg);
89         Observer->changedInstr(*RegDef);
90       }
91       Observer->changingAllUsesOfReg(MRI, Reg);
92       Observer->finishedChangingAllUsesOfReg();
93     }
94   }
95   return ConstrainedReg;
96 }
97 
98 Register llvm::constrainOperandRegClass(
99     const MachineFunction &MF, const TargetRegisterInfo &TRI,
100     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
101     const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
102     MachineOperand &RegMO, unsigned OpIdx) {
103   Register Reg = RegMO.getReg();
104   // Assume physical registers are properly constrained.
105   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
106 
107   const TargetRegisterClass *OpRC = TII.getRegClass(II, OpIdx, &TRI, MF);
108   // Some of the target independent instructions, like COPY, may not impose any
109   // register class constraints on some of their operands: If it's a use, we can
110   // skip constraining as the instruction defining the register would constrain
111   // it.
112 
113   if (OpRC) {
114     // Obtain the RC from incoming regbank if it is a proper sub-class. Operands
115     // can have multiple regbanks for a superclass that combine different
116     // register types (E.g., AMDGPU's VGPR and AGPR). The regbank ambiguity
117     // resolved by targets during regbankselect should not be overridden.
118     if (const auto *SubRC = TRI.getCommonSubClass(
119             OpRC, TRI.getConstrainedRegClassForOperand(RegMO, MRI)))
120       OpRC = SubRC;
121 
122     OpRC = TRI.getAllocatableClass(OpRC);
123   }
124 
125   if (!OpRC) {
126     assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
127            "Register class constraint is required unless either the "
128            "instruction is target independent or the operand is a use");
129     // FIXME: Just bailing out like this here could be not enough, unless we
130     // expect the users of this function to do the right thing for PHIs and
131     // COPY:
132     //   v1 = COPY v0
133     //   v2 = COPY v1
134     // v1 here may end up not being constrained at all. Please notice that to
135     // reproduce the issue we likely need a destination pattern of a selection
136     // rule producing such extra copies, not just an input GMIR with them as
137     // every existing target using selectImpl handles copies before calling it
138     // and they never reach this function.
139     return Reg;
140   }
141   return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *OpRC,
142                                   RegMO);
143 }
144 
145 bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
146                                             const TargetInstrInfo &TII,
147                                             const TargetRegisterInfo &TRI,
148                                             const RegisterBankInfo &RBI) {
149   assert(!isPreISelGenericOpcode(I.getOpcode()) &&
150          "A selected instruction is expected");
151   MachineBasicBlock &MBB = *I.getParent();
152   MachineFunction &MF = *MBB.getParent();
153   MachineRegisterInfo &MRI = MF.getRegInfo();
154 
155   for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
156     MachineOperand &MO = I.getOperand(OpI);
157 
158     // There's nothing to be done on non-register operands.
159     if (!MO.isReg())
160       continue;
161 
162     LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
163     assert(MO.isReg() && "Unsupported non-reg operand");
164 
165     Register Reg = MO.getReg();
166     // Physical registers don't need to be constrained.
167     if (Register::isPhysicalRegister(Reg))
168       continue;
169 
170     // Register operands with a value of 0 (e.g. predicate operands) don't need
171     // to be constrained.
172     if (Reg == 0)
173       continue;
174 
175     // If the operand is a vreg, we should constrain its regclass, and only
176     // insert COPYs if that's impossible.
177     // constrainOperandRegClass does that for us.
178     constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
179 
180     // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
181     // done.
182     if (MO.isUse()) {
183       int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
184       if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
185         I.tieOperands(DefIdx, OpI);
186     }
187   }
188   return true;
189 }
190 
191 bool llvm::canReplaceReg(Register DstReg, Register SrcReg,
192                          MachineRegisterInfo &MRI) {
193   // Give up if either DstReg or SrcReg  is a physical register.
194   if (DstReg.isPhysical() || SrcReg.isPhysical())
195     return false;
196   // Give up if the types don't match.
197   if (MRI.getType(DstReg) != MRI.getType(SrcReg))
198     return false;
199   // Replace if either DstReg has no constraints or the register
200   // constraints match.
201   return !MRI.getRegClassOrRegBank(DstReg) ||
202          MRI.getRegClassOrRegBank(DstReg) == MRI.getRegClassOrRegBank(SrcReg);
203 }
204 
205 bool llvm::isTriviallyDead(const MachineInstr &MI,
206                            const MachineRegisterInfo &MRI) {
207   // FIXME: This logical is mostly duplicated with
208   // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in
209   // MachineInstr::isLabel?
210 
211   // Don't delete frame allocation labels.
212   if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE)
213     return false;
214   // LIFETIME markers should be preserved even if they seem dead.
215   if (MI.getOpcode() == TargetOpcode::LIFETIME_START ||
216       MI.getOpcode() == TargetOpcode::LIFETIME_END)
217     return false;
218 
219   // If we can move an instruction, we can remove it.  Otherwise, it has
220   // a side-effect of some sort.
221   bool SawStore = false;
222   if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
223     return false;
224 
225   // Instructions without side-effects are dead iff they only define dead vregs.
226   for (auto &MO : MI.operands()) {
227     if (!MO.isReg() || !MO.isDef())
228       continue;
229 
230     Register Reg = MO.getReg();
231     if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg))
232       return false;
233   }
234   return true;
235 }
236 
237 static void reportGISelDiagnostic(DiagnosticSeverity Severity,
238                                   MachineFunction &MF,
239                                   const TargetPassConfig &TPC,
240                                   MachineOptimizationRemarkEmitter &MORE,
241                                   MachineOptimizationRemarkMissed &R) {
242   bool IsFatal = Severity == DS_Error &&
243                  TPC.isGlobalISelAbortEnabled();
244   // Print the function name explicitly if we don't have a debug location (which
245   // makes the diagnostic less useful) or if we're going to emit a raw error.
246   if (!R.getLocation().isValid() || IsFatal)
247     R << (" (in function: " + MF.getName() + ")").str();
248 
249   if (IsFatal)
250     report_fatal_error(Twine(R.getMsg()));
251   else
252     MORE.emit(R);
253 }
254 
255 void llvm::reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
256                               MachineOptimizationRemarkEmitter &MORE,
257                               MachineOptimizationRemarkMissed &R) {
258   reportGISelDiagnostic(DS_Warning, MF, TPC, MORE, R);
259 }
260 
261 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
262                               MachineOptimizationRemarkEmitter &MORE,
263                               MachineOptimizationRemarkMissed &R) {
264   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
265   reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
266 }
267 
268 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
269                               MachineOptimizationRemarkEmitter &MORE,
270                               const char *PassName, StringRef Msg,
271                               const MachineInstr &MI) {
272   MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
273                                     MI.getDebugLoc(), MI.getParent());
274   R << Msg;
275   // Printing MI is expensive;  only do it if expensive remarks are enabled.
276   if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
277     R << ": " << ore::MNV("Inst", MI);
278   reportGISelFailure(MF, TPC, MORE, R);
279 }
280 
281 Optional<APInt> llvm::getIConstantVRegVal(Register VReg,
282                                           const MachineRegisterInfo &MRI) {
283   Optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
284       VReg, MRI, /*LookThroughInstrs*/ false);
285   assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
286          "Value found while looking through instrs");
287   if (!ValAndVReg)
288     return None;
289   return ValAndVReg->Value;
290 }
291 
292 Optional<int64_t>
293 llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) {
294   Optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
295   if (Val && Val->getBitWidth() <= 64)
296     return Val->getSExtValue();
297   return None;
298 }
299 
300 namespace {
301 
302 typedef std::function<bool(const MachineInstr *)> IsOpcodeFn;
303 typedef std::function<Optional<APInt>(const MachineInstr *MI)> GetAPCstFn;
304 
305 Optional<ValueAndVReg> getConstantVRegValWithLookThrough(
306     Register VReg, const MachineRegisterInfo &MRI, IsOpcodeFn IsConstantOpcode,
307     GetAPCstFn getAPCstValue, bool LookThroughInstrs = true,
308     bool LookThroughAnyExt = false) {
309   SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes;
310   MachineInstr *MI;
311 
312   while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) &&
313          LookThroughInstrs) {
314     switch (MI->getOpcode()) {
315     case TargetOpcode::G_ANYEXT:
316       if (!LookThroughAnyExt)
317         return None;
318       LLVM_FALLTHROUGH;
319     case TargetOpcode::G_TRUNC:
320     case TargetOpcode::G_SEXT:
321     case TargetOpcode::G_ZEXT:
322       SeenOpcodes.push_back(std::make_pair(
323           MI->getOpcode(),
324           MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
325       VReg = MI->getOperand(1).getReg();
326       break;
327     case TargetOpcode::COPY:
328       VReg = MI->getOperand(1).getReg();
329       if (Register::isPhysicalRegister(VReg))
330         return None;
331       break;
332     case TargetOpcode::G_INTTOPTR:
333       VReg = MI->getOperand(1).getReg();
334       break;
335     default:
336       return None;
337     }
338   }
339   if (!MI || !IsConstantOpcode(MI))
340     return None;
341 
342   Optional<APInt> MaybeVal = getAPCstValue(MI);
343   if (!MaybeVal)
344     return None;
345   APInt &Val = *MaybeVal;
346   while (!SeenOpcodes.empty()) {
347     std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val();
348     switch (OpcodeAndSize.first) {
349     case TargetOpcode::G_TRUNC:
350       Val = Val.trunc(OpcodeAndSize.second);
351       break;
352     case TargetOpcode::G_ANYEXT:
353     case TargetOpcode::G_SEXT:
354       Val = Val.sext(OpcodeAndSize.second);
355       break;
356     case TargetOpcode::G_ZEXT:
357       Val = Val.zext(OpcodeAndSize.second);
358       break;
359     }
360   }
361 
362   return ValueAndVReg{Val, VReg};
363 }
364 
365 bool isIConstant(const MachineInstr *MI) {
366   if (!MI)
367     return false;
368   return MI->getOpcode() == TargetOpcode::G_CONSTANT;
369 }
370 
371 bool isFConstant(const MachineInstr *MI) {
372   if (!MI)
373     return false;
374   return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
375 }
376 
377 bool isAnyConstant(const MachineInstr *MI) {
378   if (!MI)
379     return false;
380   unsigned Opc = MI->getOpcode();
381   return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
382 }
383 
384 Optional<APInt> getCImmAsAPInt(const MachineInstr *MI) {
385   const MachineOperand &CstVal = MI->getOperand(1);
386   if (CstVal.isCImm())
387     return CstVal.getCImm()->getValue();
388   return None;
389 }
390 
391 Optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) {
392   const MachineOperand &CstVal = MI->getOperand(1);
393   if (CstVal.isCImm())
394     return CstVal.getCImm()->getValue();
395   if (CstVal.isFPImm())
396     return CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
397   return None;
398 }
399 
400 } // end anonymous namespace
401 
402 Optional<ValueAndVReg> llvm::getIConstantVRegValWithLookThrough(
403     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
404   return getConstantVRegValWithLookThrough(VReg, MRI, isIConstant,
405                                            getCImmAsAPInt, LookThroughInstrs);
406 }
407 
408 Optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough(
409     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
410     bool LookThroughAnyExt) {
411   return getConstantVRegValWithLookThrough(
412       VReg, MRI, isAnyConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs,
413       LookThroughAnyExt);
414 }
415 
416 Optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
417     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
418   auto Reg = getConstantVRegValWithLookThrough(
419       VReg, MRI, isFConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs);
420   if (!Reg)
421     return None;
422   return FPValueAndVReg{getConstantFPVRegVal(Reg->VReg, MRI)->getValueAPF(),
423                         Reg->VReg};
424 }
425 
426 const ConstantFP *
427 llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) {
428   MachineInstr *MI = MRI.getVRegDef(VReg);
429   if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
430     return nullptr;
431   return MI->getOperand(1).getFPImm();
432 }
433 
434 Optional<DefinitionAndSourceRegister>
435 llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) {
436   Register DefSrcReg = Reg;
437   auto *DefMI = MRI.getVRegDef(Reg);
438   auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
439   if (!DstTy.isValid())
440     return None;
441   unsigned Opc = DefMI->getOpcode();
442   while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
443     Register SrcReg = DefMI->getOperand(1).getReg();
444     auto SrcTy = MRI.getType(SrcReg);
445     if (!SrcTy.isValid())
446       break;
447     DefMI = MRI.getVRegDef(SrcReg);
448     DefSrcReg = SrcReg;
449     Opc = DefMI->getOpcode();
450   }
451   return DefinitionAndSourceRegister{DefMI, DefSrcReg};
452 }
453 
454 MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
455                                          const MachineRegisterInfo &MRI) {
456   Optional<DefinitionAndSourceRegister> DefSrcReg =
457       getDefSrcRegIgnoringCopies(Reg, MRI);
458   return DefSrcReg ? DefSrcReg->MI : nullptr;
459 }
460 
461 Register llvm::getSrcRegIgnoringCopies(Register Reg,
462                                        const MachineRegisterInfo &MRI) {
463   Optional<DefinitionAndSourceRegister> DefSrcReg =
464       getDefSrcRegIgnoringCopies(Reg, MRI);
465   return DefSrcReg ? DefSrcReg->Reg : Register();
466 }
467 
468 MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg,
469                                  const MachineRegisterInfo &MRI) {
470   MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
471   return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
472 }
473 
474 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
475   if (Size == 32)
476     return APFloat(float(Val));
477   if (Size == 64)
478     return APFloat(Val);
479   if (Size != 16)
480     llvm_unreachable("Unsupported FPConstant size");
481   bool Ignored;
482   APFloat APF(Val);
483   APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
484   return APF;
485 }
486 
487 Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1,
488                                         const Register Op2,
489                                         const MachineRegisterInfo &MRI) {
490   auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
491   if (!MaybeOp2Cst)
492     return None;
493 
494   auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false);
495   if (!MaybeOp1Cst)
496     return None;
497 
498   const APInt &C1 = MaybeOp1Cst->Value;
499   const APInt &C2 = MaybeOp2Cst->Value;
500   switch (Opcode) {
501   default:
502     break;
503   case TargetOpcode::G_ADD:
504   case TargetOpcode::G_PTR_ADD:
505     return C1 + C2;
506   case TargetOpcode::G_AND:
507     return C1 & C2;
508   case TargetOpcode::G_ASHR:
509     return C1.ashr(C2);
510   case TargetOpcode::G_LSHR:
511     return C1.lshr(C2);
512   case TargetOpcode::G_MUL:
513     return C1 * C2;
514   case TargetOpcode::G_OR:
515     return C1 | C2;
516   case TargetOpcode::G_SHL:
517     return C1 << C2;
518   case TargetOpcode::G_SUB:
519     return C1 - C2;
520   case TargetOpcode::G_XOR:
521     return C1 ^ C2;
522   case TargetOpcode::G_UDIV:
523     if (!C2.getBoolValue())
524       break;
525     return C1.udiv(C2);
526   case TargetOpcode::G_SDIV:
527     if (!C2.getBoolValue())
528       break;
529     return C1.sdiv(C2);
530   case TargetOpcode::G_UREM:
531     if (!C2.getBoolValue())
532       break;
533     return C1.urem(C2);
534   case TargetOpcode::G_SREM:
535     if (!C2.getBoolValue())
536       break;
537     return C1.srem(C2);
538   case TargetOpcode::G_SMIN:
539     return APIntOps::smin(C1, C2);
540   case TargetOpcode::G_SMAX:
541     return APIntOps::smax(C1, C2);
542   case TargetOpcode::G_UMIN:
543     return APIntOps::umin(C1, C2);
544   case TargetOpcode::G_UMAX:
545     return APIntOps::umax(C1, C2);
546   }
547 
548   return None;
549 }
550 
551 Optional<APFloat> llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
552                                             const Register Op2,
553                                             const MachineRegisterInfo &MRI) {
554   const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
555   if (!Op2Cst)
556     return None;
557 
558   const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
559   if (!Op1Cst)
560     return None;
561 
562   APFloat C1 = Op1Cst->getValueAPF();
563   const APFloat &C2 = Op2Cst->getValueAPF();
564   switch (Opcode) {
565   case TargetOpcode::G_FADD:
566     C1.add(C2, APFloat::rmNearestTiesToEven);
567     return C1;
568   case TargetOpcode::G_FSUB:
569     C1.subtract(C2, APFloat::rmNearestTiesToEven);
570     return C1;
571   case TargetOpcode::G_FMUL:
572     C1.multiply(C2, APFloat::rmNearestTiesToEven);
573     return C1;
574   case TargetOpcode::G_FDIV:
575     C1.divide(C2, APFloat::rmNearestTiesToEven);
576     return C1;
577   case TargetOpcode::G_FREM:
578     C1.mod(C2);
579     return C1;
580   case TargetOpcode::G_FCOPYSIGN:
581     C1.copySign(C2);
582     return C1;
583   case TargetOpcode::G_FMINNUM:
584     return minnum(C1, C2);
585   case TargetOpcode::G_FMAXNUM:
586     return maxnum(C1, C2);
587   case TargetOpcode::G_FMINIMUM:
588     return minimum(C1, C2);
589   case TargetOpcode::G_FMAXIMUM:
590     return maximum(C1, C2);
591   case TargetOpcode::G_FMINNUM_IEEE:
592   case TargetOpcode::G_FMAXNUM_IEEE:
593     // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
594     // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
595     // and currently there isn't a nice wrapper in APFloat for the version with
596     // correct snan handling.
597     break;
598   default:
599     break;
600   }
601 
602   return None;
603 }
604 
605 Register llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
606                                        const Register Op2,
607                                        const MachineRegisterInfo &MRI,
608                                        MachineIRBuilder &MIB) {
609   auto *SrcVec2 = getOpcodeDef<GBuildVector>(Op2, MRI);
610   if (!SrcVec2)
611     return Register();
612 
613   auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1, MRI);
614   if (!SrcVec1)
615     return Register();
616 
617   const LLT EltTy = MRI.getType(SrcVec1->getSourceReg(0));
618 
619   SmallVector<Register, 16> FoldedElements;
620   for (unsigned Idx = 0, E = SrcVec1->getNumSources(); Idx < E; ++Idx) {
621     auto MaybeCst = ConstantFoldBinOp(Opcode, SrcVec1->getSourceReg(Idx),
622                                       SrcVec2->getSourceReg(Idx), MRI);
623     if (!MaybeCst)
624       return Register();
625     auto FoldedCstReg = MIB.buildConstant(EltTy, *MaybeCst).getReg(0);
626     FoldedElements.emplace_back(FoldedCstReg);
627   }
628   // Create the new vector constant.
629   auto CstVec =
630       MIB.buildBuildVector(MRI.getType(SrcVec1->getReg(0)), FoldedElements);
631   return CstVec.getReg(0);
632 }
633 
634 bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
635                            bool SNaN) {
636   const MachineInstr *DefMI = MRI.getVRegDef(Val);
637   if (!DefMI)
638     return false;
639 
640   const TargetMachine& TM = DefMI->getMF()->getTarget();
641   if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath)
642     return true;
643 
644   // If the value is a constant, we can obviously see if it is a NaN or not.
645   if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
646     return !FPVal->getValueAPF().isNaN() ||
647            (SNaN && !FPVal->getValueAPF().isSignaling());
648   }
649 
650   if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
651     for (const auto &Op : DefMI->uses())
652       if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
653         return false;
654     return true;
655   }
656 
657   switch (DefMI->getOpcode()) {
658   default:
659     break;
660   case TargetOpcode::G_FMINNUM_IEEE:
661   case TargetOpcode::G_FMAXNUM_IEEE: {
662     if (SNaN)
663       return true;
664     // This can return a NaN if either operand is an sNaN, or if both operands
665     // are NaN.
666     return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
667             isKnownNeverSNaN(DefMI->getOperand(2).getReg(), MRI)) ||
668            (isKnownNeverSNaN(DefMI->getOperand(1).getReg(), MRI) &&
669             isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI));
670   }
671   case TargetOpcode::G_FMINNUM:
672   case TargetOpcode::G_FMAXNUM: {
673     // Only one needs to be known not-nan, since it will be returned if the
674     // other ends up being one.
675     return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
676            isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI, SNaN);
677   }
678   }
679 
680   if (SNaN) {
681     // FP operations quiet. For now, just handle the ones inserted during
682     // legalization.
683     switch (DefMI->getOpcode()) {
684     case TargetOpcode::G_FPEXT:
685     case TargetOpcode::G_FPTRUNC:
686     case TargetOpcode::G_FCANONICALIZE:
687       return true;
688     default:
689       return false;
690     }
691   }
692 
693   return false;
694 }
695 
696 Align llvm::inferAlignFromPtrInfo(MachineFunction &MF,
697                                   const MachinePointerInfo &MPO) {
698   auto PSV = MPO.V.dyn_cast<const PseudoSourceValue *>();
699   if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
700     MachineFrameInfo &MFI = MF.getFrameInfo();
701     return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
702                            MPO.Offset);
703   }
704 
705   if (const Value *V = MPO.V.dyn_cast<const Value *>()) {
706     const Module *M = MF.getFunction().getParent();
707     return V->getPointerAlignment(M->getDataLayout());
708   }
709 
710   return Align(1);
711 }
712 
713 Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF,
714                                         const TargetInstrInfo &TII,
715                                         MCRegister PhysReg,
716                                         const TargetRegisterClass &RC,
717                                         const DebugLoc &DL, LLT RegTy) {
718   MachineBasicBlock &EntryMBB = MF.front();
719   MachineRegisterInfo &MRI = MF.getRegInfo();
720   Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
721   if (LiveIn) {
722     MachineInstr *Def = MRI.getVRegDef(LiveIn);
723     if (Def) {
724       // FIXME: Should the verifier check this is in the entry block?
725       assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
726       return LiveIn;
727     }
728 
729     // It's possible the incoming argument register and copy was added during
730     // lowering, but later deleted due to being/becoming dead. If this happens,
731     // re-insert the copy.
732   } else {
733     // The live in register was not present, so add it.
734     LiveIn = MF.addLiveIn(PhysReg, &RC);
735     if (RegTy.isValid())
736       MRI.setType(LiveIn, RegTy);
737   }
738 
739   BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
740     .addReg(PhysReg);
741   if (!EntryMBB.isLiveIn(PhysReg))
742     EntryMBB.addLiveIn(PhysReg);
743   return LiveIn;
744 }
745 
746 Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1,
747                                         uint64_t Imm,
748                                         const MachineRegisterInfo &MRI) {
749   auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
750   if (MaybeOp1Cst) {
751     switch (Opcode) {
752     default:
753       break;
754     case TargetOpcode::G_SEXT_INREG: {
755       LLT Ty = MRI.getType(Op1);
756       return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
757     }
758     }
759   }
760   return None;
761 }
762 
763 Optional<APFloat> llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
764                                                Register Src,
765                                                const MachineRegisterInfo &MRI) {
766   assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
767   if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
768     APFloat DstVal(getFltSemanticForLLT(DstTy));
769     DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP,
770                             APFloat::rmNearestTiesToEven);
771     return DstVal;
772   }
773   return None;
774 }
775 
776 Optional<SmallVector<unsigned>>
777 llvm::ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI) {
778   LLT Ty = MRI.getType(Src);
779   SmallVector<unsigned> FoldedCTLZs;
780   auto tryFoldScalar = [&](Register R) -> Optional<unsigned> {
781     auto MaybeCst = getIConstantVRegVal(R, MRI);
782     if (!MaybeCst)
783       return None;
784     return MaybeCst->countLeadingZeros();
785   };
786   if (Ty.isVector()) {
787     // Try to constant fold each element.
788     auto *BV = getOpcodeDef<GBuildVector>(Src, MRI);
789     if (!BV)
790       return None;
791     for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
792       if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
793         FoldedCTLZs.emplace_back(*MaybeFold);
794         continue;
795       }
796       return None;
797     }
798     return FoldedCTLZs;
799   }
800   if (auto MaybeCst = tryFoldScalar(Src)) {
801     FoldedCTLZs.emplace_back(*MaybeCst);
802     return FoldedCTLZs;
803   }
804   return None;
805 }
806 
807 bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
808                                   GISelKnownBits *KB) {
809   Optional<DefinitionAndSourceRegister> DefSrcReg =
810       getDefSrcRegIgnoringCopies(Reg, MRI);
811   if (!DefSrcReg)
812     return false;
813 
814   const MachineInstr &MI = *DefSrcReg->MI;
815   const LLT Ty = MRI.getType(Reg);
816 
817   switch (MI.getOpcode()) {
818   case TargetOpcode::G_CONSTANT: {
819     unsigned BitWidth = Ty.getScalarSizeInBits();
820     const ConstantInt *CI = MI.getOperand(1).getCImm();
821     return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
822   }
823   case TargetOpcode::G_SHL: {
824     // A left-shift of a constant one will have exactly one bit set because
825     // shifting the bit off the end is undefined.
826 
827     // TODO: Constant splat
828     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
829       if (*ConstLHS == 1)
830         return true;
831     }
832 
833     break;
834   }
835   case TargetOpcode::G_LSHR: {
836     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
837       if (ConstLHS->isSignMask())
838         return true;
839     }
840 
841     break;
842   }
843   case TargetOpcode::G_BUILD_VECTOR: {
844     // TODO: Probably should have a recursion depth guard since you could have
845     // bitcasted vector elements.
846     for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
847       if (!isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB))
848         return false;
849 
850     return true;
851   }
852   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
853     // Only handle constants since we would need to know if number of leading
854     // zeros is greater than the truncation amount.
855     const unsigned BitWidth = Ty.getScalarSizeInBits();
856     for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
857       auto Const = getIConstantVRegVal(MO.getReg(), MRI);
858       if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())
859         return false;
860     }
861 
862     return true;
863   }
864   default:
865     break;
866   }
867 
868   if (!KB)
869     return false;
870 
871   // More could be done here, though the above checks are enough
872   // to handle some common cases.
873 
874   // Fall back to computeKnownBits to catch other known cases.
875   KnownBits Known = KB->getKnownBits(Reg);
876   return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
877 }
878 
879 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
880   AU.addPreserved<StackProtector>();
881 }
882 
883 static unsigned getLCMSize(unsigned OrigSize, unsigned TargetSize) {
884   unsigned Mul = OrigSize * TargetSize;
885   unsigned GCDSize = greatestCommonDivisor(OrigSize, TargetSize);
886   return Mul / GCDSize;
887 }
888 
889 LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
890   const unsigned OrigSize = OrigTy.getSizeInBits();
891   const unsigned TargetSize = TargetTy.getSizeInBits();
892 
893   if (OrigSize == TargetSize)
894     return OrigTy;
895 
896   if (OrigTy.isVector()) {
897     const LLT OrigElt = OrigTy.getElementType();
898 
899     if (TargetTy.isVector()) {
900       const LLT TargetElt = TargetTy.getElementType();
901 
902       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
903         int GCDElts = greatestCommonDivisor(OrigTy.getNumElements(),
904                                             TargetTy.getNumElements());
905         // Prefer the original element type.
906         ElementCount Mul = OrigTy.getElementCount() * TargetTy.getNumElements();
907         return LLT::vector(Mul.divideCoefficientBy(GCDElts),
908                            OrigTy.getElementType());
909       }
910     } else {
911       if (OrigElt.getSizeInBits() == TargetSize)
912         return OrigTy;
913     }
914 
915     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
916     return LLT::fixed_vector(LCMSize / OrigElt.getSizeInBits(), OrigElt);
917   }
918 
919   if (TargetTy.isVector()) {
920     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
921     return LLT::fixed_vector(LCMSize / OrigSize, OrigTy);
922   }
923 
924   unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
925 
926   // Preserve pointer types.
927   if (LCMSize == OrigSize)
928     return OrigTy;
929   if (LCMSize == TargetSize)
930     return TargetTy;
931 
932   return LLT::scalar(LCMSize);
933 }
934 
935 LLT llvm::getCoverTy(LLT OrigTy, LLT TargetTy) {
936   if (!OrigTy.isVector() || !TargetTy.isVector() || OrigTy == TargetTy ||
937       (OrigTy.getScalarSizeInBits() != TargetTy.getScalarSizeInBits()))
938     return getLCMType(OrigTy, TargetTy);
939 
940   unsigned OrigTyNumElts = OrigTy.getNumElements();
941   unsigned TargetTyNumElts = TargetTy.getNumElements();
942   if (OrigTyNumElts % TargetTyNumElts == 0)
943     return OrigTy;
944 
945   unsigned NumElts = alignTo(OrigTyNumElts, TargetTyNumElts);
946   return LLT::scalarOrVector(ElementCount::getFixed(NumElts),
947                              OrigTy.getElementType());
948 }
949 
950 LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
951   const unsigned OrigSize = OrigTy.getSizeInBits();
952   const unsigned TargetSize = TargetTy.getSizeInBits();
953 
954   if (OrigSize == TargetSize)
955     return OrigTy;
956 
957   if (OrigTy.isVector()) {
958     LLT OrigElt = OrigTy.getElementType();
959     if (TargetTy.isVector()) {
960       LLT TargetElt = TargetTy.getElementType();
961       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
962         int GCD = greatestCommonDivisor(OrigTy.getNumElements(),
963                                         TargetTy.getNumElements());
964         return LLT::scalarOrVector(ElementCount::getFixed(GCD), OrigElt);
965       }
966     } else {
967       // If the source is a vector of pointers, return a pointer element.
968       if (OrigElt.getSizeInBits() == TargetSize)
969         return OrigElt;
970     }
971 
972     unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
973     if (GCD == OrigElt.getSizeInBits())
974       return OrigElt;
975 
976     // If we can't produce the original element type, we have to use a smaller
977     // scalar.
978     if (GCD < OrigElt.getSizeInBits())
979       return LLT::scalar(GCD);
980     return LLT::fixed_vector(GCD / OrigElt.getSizeInBits(), OrigElt);
981   }
982 
983   if (TargetTy.isVector()) {
984     // Try to preserve the original element type.
985     LLT TargetElt = TargetTy.getElementType();
986     if (TargetElt.getSizeInBits() == OrigSize)
987       return OrigTy;
988   }
989 
990   unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
991   return LLT::scalar(GCD);
992 }
993 
994 Optional<int> llvm::getSplatIndex(MachineInstr &MI) {
995   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
996          "Only G_SHUFFLE_VECTOR can have a splat index!");
997   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
998   auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
999 
1000   // If all elements are undefined, this shuffle can be considered a splat.
1001   // Return 0 for better potential for callers to simplify.
1002   if (FirstDefinedIdx == Mask.end())
1003     return 0;
1004 
1005   // Make sure all remaining elements are either undef or the same
1006   // as the first non-undef value.
1007   int SplatValue = *FirstDefinedIdx;
1008   if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
1009              [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
1010     return None;
1011 
1012   return SplatValue;
1013 }
1014 
1015 static bool isBuildVectorOp(unsigned Opcode) {
1016   return Opcode == TargetOpcode::G_BUILD_VECTOR ||
1017          Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
1018 }
1019 
1020 namespace {
1021 
1022 Optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
1023                                            const MachineRegisterInfo &MRI,
1024                                            bool AllowUndef) {
1025   MachineInstr *MI = getDefIgnoringCopies(VReg, MRI);
1026   if (!MI)
1027     return None;
1028 
1029   if (!isBuildVectorOp(MI->getOpcode()))
1030     return None;
1031 
1032   Optional<ValueAndVReg> SplatValAndReg = None;
1033   for (MachineOperand &Op : MI->uses()) {
1034     Register Element = Op.getReg();
1035     auto ElementValAndReg =
1036         getAnyConstantVRegValWithLookThrough(Element, MRI, true, true);
1037 
1038     // If AllowUndef, treat undef as value that will result in a constant splat.
1039     if (!ElementValAndReg) {
1040       if (AllowUndef && isa<GImplicitDef>(MRI.getVRegDef(Element)))
1041         continue;
1042       return None;
1043     }
1044 
1045     // Record splat value
1046     if (!SplatValAndReg)
1047       SplatValAndReg = ElementValAndReg;
1048 
1049     // Different constant then the one already recorded, not a constant splat.
1050     if (SplatValAndReg->Value != ElementValAndReg->Value)
1051       return None;
1052   }
1053 
1054   return SplatValAndReg;
1055 }
1056 
1057 } // end anonymous namespace
1058 
1059 bool llvm::isBuildVectorConstantSplat(const Register Reg,
1060                                       const MachineRegisterInfo &MRI,
1061                                       int64_t SplatValue, bool AllowUndef) {
1062   if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef))
1063     return mi_match(SplatValAndReg->VReg, MRI, m_SpecificICst(SplatValue));
1064   return false;
1065 }
1066 
1067 bool llvm::isBuildVectorConstantSplat(const MachineInstr &MI,
1068                                       const MachineRegisterInfo &MRI,
1069                                       int64_t SplatValue, bool AllowUndef) {
1070   return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
1071                                     AllowUndef);
1072 }
1073 
1074 Optional<int64_t>
1075 llvm::getBuildVectorConstantSplat(const MachineInstr &MI,
1076                                   const MachineRegisterInfo &MRI) {
1077   if (auto SplatValAndReg =
1078           getAnyConstantSplat(MI.getOperand(0).getReg(), MRI, false))
1079     return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI);
1080   return None;
1081 }
1082 
1083 Optional<FPValueAndVReg> llvm::getFConstantSplat(Register VReg,
1084                                                  const MachineRegisterInfo &MRI,
1085                                                  bool AllowUndef) {
1086   if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))
1087     return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
1088   return None;
1089 }
1090 
1091 bool llvm::isBuildVectorAllZeros(const MachineInstr &MI,
1092                                  const MachineRegisterInfo &MRI,
1093                                  bool AllowUndef) {
1094   return isBuildVectorConstantSplat(MI, MRI, 0, AllowUndef);
1095 }
1096 
1097 bool llvm::isBuildVectorAllOnes(const MachineInstr &MI,
1098                                 const MachineRegisterInfo &MRI,
1099                                 bool AllowUndef) {
1100   return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef);
1101 }
1102 
1103 Optional<RegOrConstant> llvm::getVectorSplat(const MachineInstr &MI,
1104                                              const MachineRegisterInfo &MRI) {
1105   unsigned Opc = MI.getOpcode();
1106   if (!isBuildVectorOp(Opc))
1107     return None;
1108   if (auto Splat = getBuildVectorConstantSplat(MI, MRI))
1109     return RegOrConstant(*Splat);
1110   auto Reg = MI.getOperand(1).getReg();
1111   if (any_of(make_range(MI.operands_begin() + 2, MI.operands_end()),
1112              [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
1113     return None;
1114   return RegOrConstant(Reg);
1115 }
1116 
1117 static bool isConstantScalar(const MachineInstr &MI,
1118                              const MachineRegisterInfo &MRI,
1119                              bool AllowFP = true,
1120                              bool AllowOpaqueConstants = true) {
1121   switch (MI.getOpcode()) {
1122   case TargetOpcode::G_CONSTANT:
1123   case TargetOpcode::G_IMPLICIT_DEF:
1124     return true;
1125   case TargetOpcode::G_FCONSTANT:
1126     return AllowFP;
1127   case TargetOpcode::G_GLOBAL_VALUE:
1128   case TargetOpcode::G_FRAME_INDEX:
1129   case TargetOpcode::G_BLOCK_ADDR:
1130   case TargetOpcode::G_JUMP_TABLE:
1131     return AllowOpaqueConstants;
1132   default:
1133     return false;
1134   }
1135 }
1136 
1137 bool llvm::isConstantOrConstantVector(MachineInstr &MI,
1138                                       const MachineRegisterInfo &MRI) {
1139   Register Def = MI.getOperand(0).getReg();
1140   if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1141     return true;
1142   GBuildVector *BV = dyn_cast<GBuildVector>(&MI);
1143   if (!BV)
1144     return false;
1145   for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1146     if (getIConstantVRegValWithLookThrough(BV->getSourceReg(SrcIdx), MRI) ||
1147         getOpcodeDef<GImplicitDef>(BV->getSourceReg(SrcIdx), MRI))
1148       continue;
1149     return false;
1150   }
1151   return true;
1152 }
1153 
1154 bool llvm::isConstantOrConstantVector(const MachineInstr &MI,
1155                                       const MachineRegisterInfo &MRI,
1156                                       bool AllowFP, bool AllowOpaqueConstants) {
1157   if (isConstantScalar(MI, MRI, AllowFP, AllowOpaqueConstants))
1158     return true;
1159 
1160   if (!isBuildVectorOp(MI.getOpcode()))
1161     return false;
1162 
1163   const unsigned NumOps = MI.getNumOperands();
1164   for (unsigned I = 1; I != NumOps; ++I) {
1165     const MachineInstr *ElementDef = MRI.getVRegDef(MI.getOperand(I).getReg());
1166     if (!isConstantScalar(*ElementDef, MRI, AllowFP, AllowOpaqueConstants))
1167       return false;
1168   }
1169 
1170   return true;
1171 }
1172 
1173 Optional<APInt>
1174 llvm::isConstantOrConstantSplatVector(MachineInstr &MI,
1175                                       const MachineRegisterInfo &MRI) {
1176   Register Def = MI.getOperand(0).getReg();
1177   if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1178     return C->Value;
1179   auto MaybeCst = getBuildVectorConstantSplat(MI, MRI);
1180   if (!MaybeCst)
1181     return None;
1182   const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits();
1183   return APInt(ScalarSize, *MaybeCst, true);
1184 }
1185 
1186 bool llvm::isNullOrNullSplat(const MachineInstr &MI,
1187                              const MachineRegisterInfo &MRI, bool AllowUndefs) {
1188   switch (MI.getOpcode()) {
1189   case TargetOpcode::G_IMPLICIT_DEF:
1190     return AllowUndefs;
1191   case TargetOpcode::G_CONSTANT:
1192     return MI.getOperand(1).getCImm()->isNullValue();
1193   case TargetOpcode::G_FCONSTANT: {
1194     const ConstantFP *FPImm = MI.getOperand(1).getFPImm();
1195     return FPImm->isZero() && !FPImm->isNegative();
1196   }
1197   default:
1198     if (!AllowUndefs) // TODO: isBuildVectorAllZeros assumes undef is OK already
1199       return false;
1200     return isBuildVectorAllZeros(MI, MRI);
1201   }
1202 }
1203 
1204 bool llvm::isAllOnesOrAllOnesSplat(const MachineInstr &MI,
1205                                    const MachineRegisterInfo &MRI,
1206                                    bool AllowUndefs) {
1207   switch (MI.getOpcode()) {
1208   case TargetOpcode::G_IMPLICIT_DEF:
1209     return AllowUndefs;
1210   case TargetOpcode::G_CONSTANT:
1211     return MI.getOperand(1).getCImm()->isAllOnesValue();
1212   default:
1213     if (!AllowUndefs) // TODO: isBuildVectorAllOnes assumes undef is OK already
1214       return false;
1215     return isBuildVectorAllOnes(MI, MRI);
1216   }
1217 }
1218 
1219 bool llvm::matchUnaryPredicate(
1220     const MachineRegisterInfo &MRI, Register Reg,
1221     std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
1222 
1223   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
1224   if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1225     return Match(nullptr);
1226 
1227   // TODO: Also handle fconstant
1228   if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1229     return Match(Def->getOperand(1).getCImm());
1230 
1231   if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
1232     return false;
1233 
1234   for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
1235     Register SrcElt = Def->getOperand(I).getReg();
1236     const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
1237     if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1238       if (!Match(nullptr))
1239         return false;
1240       continue;
1241     }
1242 
1243     if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
1244         !Match(SrcDef->getOperand(1).getCImm()))
1245       return false;
1246   }
1247 
1248   return true;
1249 }
1250 
1251 bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
1252                           bool IsFP) {
1253   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1254   case TargetLowering::UndefinedBooleanContent:
1255     return Val & 0x1;
1256   case TargetLowering::ZeroOrOneBooleanContent:
1257     return Val == 1;
1258   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1259     return Val == -1;
1260   }
1261   llvm_unreachable("Invalid boolean contents");
1262 }
1263 
1264 int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
1265                              bool IsFP) {
1266   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1267   case TargetLowering::UndefinedBooleanContent:
1268   case TargetLowering::ZeroOrOneBooleanContent:
1269     return 1;
1270   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1271     return -1;
1272   }
1273   llvm_unreachable("Invalid boolean contents");
1274 }
1275 
1276 bool llvm::shouldOptForSize(const MachineBasicBlock &MBB,
1277                             ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
1278   const auto &F = MBB.getParent()->getFunction();
1279   return F.hasOptSize() || F.hasMinSize() ||
1280          llvm::shouldOptimizeForSize(MBB.getBasicBlock(), PSI, BFI);
1281 }
1282 
1283 void llvm::saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI,
1284                             LostDebugLocObserver *LocObserver,
1285                             SmallInstListTy &DeadInstChain) {
1286   for (MachineOperand &Op : MI.uses()) {
1287     if (Op.isReg() && Op.getReg().isVirtual())
1288       DeadInstChain.insert(MRI.getVRegDef(Op.getReg()));
1289   }
1290   LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
1291   DeadInstChain.remove(&MI);
1292   MI.eraseFromParent();
1293   if (LocObserver)
1294     LocObserver->checkpoint(false);
1295 }
1296 
1297 void llvm::eraseInstrs(ArrayRef<MachineInstr *> DeadInstrs,
1298                        MachineRegisterInfo &MRI,
1299                        LostDebugLocObserver *LocObserver) {
1300   SmallInstListTy DeadInstChain;
1301   for (MachineInstr *MI : DeadInstrs)
1302     saveUsesAndErase(*MI, MRI, LocObserver, DeadInstChain);
1303 
1304   while (!DeadInstChain.empty()) {
1305     MachineInstr *Inst = DeadInstChain.pop_back_val();
1306     if (!isTriviallyDead(*Inst, MRI))
1307       continue;
1308     saveUsesAndErase(*Inst, MRI, LocObserver, DeadInstChain);
1309   }
1310 }
1311 
1312 void llvm::eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI,
1313                       LostDebugLocObserver *LocObserver) {
1314   return eraseInstrs({&MI}, MRI, LocObserver);
1315 }
1316