1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file This file implements the utility functions used by the GlobalISel
9 /// pipeline.
10 //===----------------------------------------------------------------------===//
11 
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/ADT/APFloat.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/Optional.h"
16 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
17 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
18 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
19 #include "llvm/CodeGen/GlobalISel/LostDebugLocObserver.h"
20 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
21 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
22 #include "llvm/CodeGen/MachineInstr.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/MachineSizeOpts.h"
27 #include "llvm/CodeGen/RegisterBankInfo.h"
28 #include "llvm/CodeGen/StackProtector.h"
29 #include "llvm/CodeGen/TargetInstrInfo.h"
30 #include "llvm/CodeGen/TargetLowering.h"
31 #include "llvm/CodeGen/TargetPassConfig.h"
32 #include "llvm/CodeGen/TargetRegisterInfo.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Transforms/Utils/SizeOpts.h"
36 
37 #define DEBUG_TYPE "globalisel-utils"
38 
39 using namespace llvm;
40 using namespace MIPatternMatch;
41 
42 Register llvm::constrainRegToClass(MachineRegisterInfo &MRI,
43                                    const TargetInstrInfo &TII,
44                                    const RegisterBankInfo &RBI, Register Reg,
45                                    const TargetRegisterClass &RegClass) {
46   if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
47     return MRI.createVirtualRegister(&RegClass);
48 
49   return Reg;
50 }
51 
52 Register llvm::constrainOperandRegClass(
53     const MachineFunction &MF, const TargetRegisterInfo &TRI,
54     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
55     const RegisterBankInfo &RBI, MachineInstr &InsertPt,
56     const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
57   Register Reg = RegMO.getReg();
58   // Assume physical registers are properly constrained.
59   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
60 
61   Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
62   // If we created a new virtual register because the class is not compatible
63   // then create a copy between the new and the old register.
64   if (ConstrainedReg != Reg) {
65     MachineBasicBlock::iterator InsertIt(&InsertPt);
66     MachineBasicBlock &MBB = *InsertPt.getParent();
67     // FIXME: The copy needs to have the classes constrained for its operands.
68     // Use operand's regbank to get the class for old register (Reg).
69     if (RegMO.isUse()) {
70       BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
71               TII.get(TargetOpcode::COPY), ConstrainedReg)
72           .addReg(Reg);
73     } else {
74       assert(RegMO.isDef() && "Must be a definition");
75       BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
76               TII.get(TargetOpcode::COPY), Reg)
77           .addReg(ConstrainedReg);
78     }
79     if (GISelChangeObserver *Observer = MF.getObserver()) {
80       Observer->changingInstr(*RegMO.getParent());
81     }
82     RegMO.setReg(ConstrainedReg);
83     if (GISelChangeObserver *Observer = MF.getObserver()) {
84       Observer->changedInstr(*RegMO.getParent());
85     }
86   } else {
87     if (GISelChangeObserver *Observer = MF.getObserver()) {
88       if (!RegMO.isDef()) {
89         MachineInstr *RegDef = MRI.getVRegDef(Reg);
90         Observer->changedInstr(*RegDef);
91       }
92       Observer->changingAllUsesOfReg(MRI, Reg);
93       Observer->finishedChangingAllUsesOfReg();
94     }
95   }
96   return ConstrainedReg;
97 }
98 
99 Register llvm::constrainOperandRegClass(
100     const MachineFunction &MF, const TargetRegisterInfo &TRI,
101     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
102     const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
103     MachineOperand &RegMO, unsigned OpIdx) {
104   Register Reg = RegMO.getReg();
105   // Assume physical registers are properly constrained.
106   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
107 
108   const TargetRegisterClass *OpRC = TII.getRegClass(II, OpIdx, &TRI, MF);
109   // Some of the target independent instructions, like COPY, may not impose any
110   // register class constraints on some of their operands: If it's a use, we can
111   // skip constraining as the instruction defining the register would constrain
112   // it.
113 
114   if (OpRC) {
115     // Obtain the RC from incoming regbank if it is a proper sub-class. Operands
116     // can have multiple regbanks for a superclass that combine different
117     // register types (E.g., AMDGPU's VGPR and AGPR). The regbank ambiguity
118     // resolved by targets during regbankselect should not be overridden.
119     if (const auto *SubRC = TRI.getCommonSubClass(
120             OpRC, TRI.getConstrainedRegClassForOperand(RegMO, MRI)))
121       OpRC = SubRC;
122 
123     OpRC = TRI.getAllocatableClass(OpRC);
124   }
125 
126   if (!OpRC) {
127     assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
128            "Register class constraint is required unless either the "
129            "instruction is target independent or the operand is a use");
130     // FIXME: Just bailing out like this here could be not enough, unless we
131     // expect the users of this function to do the right thing for PHIs and
132     // COPY:
133     //   v1 = COPY v0
134     //   v2 = COPY v1
135     // v1 here may end up not being constrained at all. Please notice that to
136     // reproduce the issue we likely need a destination pattern of a selection
137     // rule producing such extra copies, not just an input GMIR with them as
138     // every existing target using selectImpl handles copies before calling it
139     // and they never reach this function.
140     return Reg;
141   }
142   return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *OpRC,
143                                   RegMO);
144 }
145 
146 bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
147                                             const TargetInstrInfo &TII,
148                                             const TargetRegisterInfo &TRI,
149                                             const RegisterBankInfo &RBI) {
150   assert(!isPreISelGenericOpcode(I.getOpcode()) &&
151          "A selected instruction is expected");
152   MachineBasicBlock &MBB = *I.getParent();
153   MachineFunction &MF = *MBB.getParent();
154   MachineRegisterInfo &MRI = MF.getRegInfo();
155 
156   for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
157     MachineOperand &MO = I.getOperand(OpI);
158 
159     // There's nothing to be done on non-register operands.
160     if (!MO.isReg())
161       continue;
162 
163     LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
164     assert(MO.isReg() && "Unsupported non-reg operand");
165 
166     Register Reg = MO.getReg();
167     // Physical registers don't need to be constrained.
168     if (Register::isPhysicalRegister(Reg))
169       continue;
170 
171     // Register operands with a value of 0 (e.g. predicate operands) don't need
172     // to be constrained.
173     if (Reg == 0)
174       continue;
175 
176     // If the operand is a vreg, we should constrain its regclass, and only
177     // insert COPYs if that's impossible.
178     // constrainOperandRegClass does that for us.
179     constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
180 
181     // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
182     // done.
183     if (MO.isUse()) {
184       int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
185       if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
186         I.tieOperands(DefIdx, OpI);
187     }
188   }
189   return true;
190 }
191 
192 bool llvm::canReplaceReg(Register DstReg, Register SrcReg,
193                          MachineRegisterInfo &MRI) {
194   // Give up if either DstReg or SrcReg  is a physical register.
195   if (DstReg.isPhysical() || SrcReg.isPhysical())
196     return false;
197   // Give up if the types don't match.
198   if (MRI.getType(DstReg) != MRI.getType(SrcReg))
199     return false;
200   // Replace if either DstReg has no constraints or the register
201   // constraints match.
202   return !MRI.getRegClassOrRegBank(DstReg) ||
203          MRI.getRegClassOrRegBank(DstReg) == MRI.getRegClassOrRegBank(SrcReg);
204 }
205 
206 bool llvm::isTriviallyDead(const MachineInstr &MI,
207                            const MachineRegisterInfo &MRI) {
208   // FIXME: This logical is mostly duplicated with
209   // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in
210   // MachineInstr::isLabel?
211 
212   // Don't delete frame allocation labels.
213   if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE)
214     return false;
215   // LIFETIME markers should be preserved even if they seem dead.
216   if (MI.getOpcode() == TargetOpcode::LIFETIME_START ||
217       MI.getOpcode() == TargetOpcode::LIFETIME_END)
218     return false;
219 
220   // If we can move an instruction, we can remove it.  Otherwise, it has
221   // a side-effect of some sort.
222   bool SawStore = false;
223   if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
224     return false;
225 
226   // Instructions without side-effects are dead iff they only define dead vregs.
227   for (auto &MO : MI.operands()) {
228     if (!MO.isReg() || !MO.isDef())
229       continue;
230 
231     Register Reg = MO.getReg();
232     if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg))
233       return false;
234   }
235   return true;
236 }
237 
238 static void reportGISelDiagnostic(DiagnosticSeverity Severity,
239                                   MachineFunction &MF,
240                                   const TargetPassConfig &TPC,
241                                   MachineOptimizationRemarkEmitter &MORE,
242                                   MachineOptimizationRemarkMissed &R) {
243   bool IsFatal = Severity == DS_Error &&
244                  TPC.isGlobalISelAbortEnabled();
245   // Print the function name explicitly if we don't have a debug location (which
246   // makes the diagnostic less useful) or if we're going to emit a raw error.
247   if (!R.getLocation().isValid() || IsFatal)
248     R << (" (in function: " + MF.getName() + ")").str();
249 
250   if (IsFatal)
251     report_fatal_error(Twine(R.getMsg()));
252   else
253     MORE.emit(R);
254 }
255 
256 void llvm::reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
257                               MachineOptimizationRemarkEmitter &MORE,
258                               MachineOptimizationRemarkMissed &R) {
259   reportGISelDiagnostic(DS_Warning, MF, TPC, MORE, R);
260 }
261 
262 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
263                               MachineOptimizationRemarkEmitter &MORE,
264                               MachineOptimizationRemarkMissed &R) {
265   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
266   reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
267 }
268 
269 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
270                               MachineOptimizationRemarkEmitter &MORE,
271                               const char *PassName, StringRef Msg,
272                               const MachineInstr &MI) {
273   MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
274                                     MI.getDebugLoc(), MI.getParent());
275   R << Msg;
276   // Printing MI is expensive;  only do it if expensive remarks are enabled.
277   if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
278     R << ": " << ore::MNV("Inst", MI);
279   reportGISelFailure(MF, TPC, MORE, R);
280 }
281 
282 Optional<APInt> llvm::getIConstantVRegVal(Register VReg,
283                                           const MachineRegisterInfo &MRI) {
284   Optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
285       VReg, MRI, /*LookThroughInstrs*/ false);
286   assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
287          "Value found while looking through instrs");
288   if (!ValAndVReg)
289     return None;
290   return ValAndVReg->Value;
291 }
292 
293 Optional<int64_t>
294 llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) {
295   Optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
296   if (Val && Val->getBitWidth() <= 64)
297     return Val->getSExtValue();
298   return None;
299 }
300 
301 namespace {
302 
303 typedef std::function<bool(const MachineInstr *)> IsOpcodeFn;
304 typedef std::function<Optional<APInt>(const MachineInstr *MI)> GetAPCstFn;
305 
306 Optional<ValueAndVReg> getConstantVRegValWithLookThrough(
307     Register VReg, const MachineRegisterInfo &MRI, IsOpcodeFn IsConstantOpcode,
308     GetAPCstFn getAPCstValue, bool LookThroughInstrs = true,
309     bool LookThroughAnyExt = false) {
310   SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes;
311   MachineInstr *MI;
312 
313   while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) &&
314          LookThroughInstrs) {
315     switch (MI->getOpcode()) {
316     case TargetOpcode::G_ANYEXT:
317       if (!LookThroughAnyExt)
318         return None;
319       LLVM_FALLTHROUGH;
320     case TargetOpcode::G_TRUNC:
321     case TargetOpcode::G_SEXT:
322     case TargetOpcode::G_ZEXT:
323       SeenOpcodes.push_back(std::make_pair(
324           MI->getOpcode(),
325           MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
326       VReg = MI->getOperand(1).getReg();
327       break;
328     case TargetOpcode::COPY:
329       VReg = MI->getOperand(1).getReg();
330       if (Register::isPhysicalRegister(VReg))
331         return None;
332       break;
333     case TargetOpcode::G_INTTOPTR:
334       VReg = MI->getOperand(1).getReg();
335       break;
336     default:
337       return None;
338     }
339   }
340   if (!MI || !IsConstantOpcode(MI))
341     return None;
342 
343   Optional<APInt> MaybeVal = getAPCstValue(MI);
344   if (!MaybeVal)
345     return None;
346   APInt &Val = *MaybeVal;
347   while (!SeenOpcodes.empty()) {
348     std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val();
349     switch (OpcodeAndSize.first) {
350     case TargetOpcode::G_TRUNC:
351       Val = Val.trunc(OpcodeAndSize.second);
352       break;
353     case TargetOpcode::G_ANYEXT:
354     case TargetOpcode::G_SEXT:
355       Val = Val.sext(OpcodeAndSize.second);
356       break;
357     case TargetOpcode::G_ZEXT:
358       Val = Val.zext(OpcodeAndSize.second);
359       break;
360     }
361   }
362 
363   return ValueAndVReg{Val, VReg};
364 }
365 
366 bool isIConstant(const MachineInstr *MI) {
367   if (!MI)
368     return false;
369   return MI->getOpcode() == TargetOpcode::G_CONSTANT;
370 }
371 
372 bool isFConstant(const MachineInstr *MI) {
373   if (!MI)
374     return false;
375   return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
376 }
377 
378 bool isAnyConstant(const MachineInstr *MI) {
379   if (!MI)
380     return false;
381   unsigned Opc = MI->getOpcode();
382   return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
383 }
384 
385 Optional<APInt> getCImmAsAPInt(const MachineInstr *MI) {
386   const MachineOperand &CstVal = MI->getOperand(1);
387   if (CstVal.isCImm())
388     return CstVal.getCImm()->getValue();
389   return None;
390 }
391 
392 Optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) {
393   const MachineOperand &CstVal = MI->getOperand(1);
394   if (CstVal.isCImm())
395     return CstVal.getCImm()->getValue();
396   if (CstVal.isFPImm())
397     return CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
398   return None;
399 }
400 
401 } // end anonymous namespace
402 
403 Optional<ValueAndVReg> llvm::getIConstantVRegValWithLookThrough(
404     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
405   return getConstantVRegValWithLookThrough(VReg, MRI, isIConstant,
406                                            getCImmAsAPInt, LookThroughInstrs);
407 }
408 
409 Optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough(
410     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
411     bool LookThroughAnyExt) {
412   return getConstantVRegValWithLookThrough(
413       VReg, MRI, isAnyConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs,
414       LookThroughAnyExt);
415 }
416 
417 Optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
418     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
419   auto Reg = getConstantVRegValWithLookThrough(
420       VReg, MRI, isFConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs);
421   if (!Reg)
422     return None;
423   return FPValueAndVReg{getConstantFPVRegVal(Reg->VReg, MRI)->getValueAPF(),
424                         Reg->VReg};
425 }
426 
427 const ConstantFP *
428 llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) {
429   MachineInstr *MI = MRI.getVRegDef(VReg);
430   if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
431     return nullptr;
432   return MI->getOperand(1).getFPImm();
433 }
434 
435 Optional<DefinitionAndSourceRegister>
436 llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) {
437   Register DefSrcReg = Reg;
438   auto *DefMI = MRI.getVRegDef(Reg);
439   auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
440   if (!DstTy.isValid())
441     return None;
442   unsigned Opc = DefMI->getOpcode();
443   while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
444     Register SrcReg = DefMI->getOperand(1).getReg();
445     auto SrcTy = MRI.getType(SrcReg);
446     if (!SrcTy.isValid())
447       break;
448     DefMI = MRI.getVRegDef(SrcReg);
449     DefSrcReg = SrcReg;
450     Opc = DefMI->getOpcode();
451   }
452   return DefinitionAndSourceRegister{DefMI, DefSrcReg};
453 }
454 
455 MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
456                                          const MachineRegisterInfo &MRI) {
457   Optional<DefinitionAndSourceRegister> DefSrcReg =
458       getDefSrcRegIgnoringCopies(Reg, MRI);
459   return DefSrcReg ? DefSrcReg->MI : nullptr;
460 }
461 
462 Register llvm::getSrcRegIgnoringCopies(Register Reg,
463                                        const MachineRegisterInfo &MRI) {
464   Optional<DefinitionAndSourceRegister> DefSrcReg =
465       getDefSrcRegIgnoringCopies(Reg, MRI);
466   return DefSrcReg ? DefSrcReg->Reg : Register();
467 }
468 
469 MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg,
470                                  const MachineRegisterInfo &MRI) {
471   MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
472   return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
473 }
474 
475 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
476   if (Size == 32)
477     return APFloat(float(Val));
478   if (Size == 64)
479     return APFloat(Val);
480   if (Size != 16)
481     llvm_unreachable("Unsupported FPConstant size");
482   bool Ignored;
483   APFloat APF(Val);
484   APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
485   return APF;
486 }
487 
488 Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1,
489                                         const Register Op2,
490                                         const MachineRegisterInfo &MRI) {
491   auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
492   if (!MaybeOp2Cst)
493     return None;
494 
495   auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false);
496   if (!MaybeOp1Cst)
497     return None;
498 
499   const APInt &C1 = MaybeOp1Cst->Value;
500   const APInt &C2 = MaybeOp2Cst->Value;
501   switch (Opcode) {
502   default:
503     break;
504   case TargetOpcode::G_ADD:
505   case TargetOpcode::G_PTR_ADD:
506     return C1 + C2;
507   case TargetOpcode::G_AND:
508     return C1 & C2;
509   case TargetOpcode::G_ASHR:
510     return C1.ashr(C2);
511   case TargetOpcode::G_LSHR:
512     return C1.lshr(C2);
513   case TargetOpcode::G_MUL:
514     return C1 * C2;
515   case TargetOpcode::G_OR:
516     return C1 | C2;
517   case TargetOpcode::G_SHL:
518     return C1 << C2;
519   case TargetOpcode::G_SUB:
520     return C1 - C2;
521   case TargetOpcode::G_XOR:
522     return C1 ^ C2;
523   case TargetOpcode::G_UDIV:
524     if (!C2.getBoolValue())
525       break;
526     return C1.udiv(C2);
527   case TargetOpcode::G_SDIV:
528     if (!C2.getBoolValue())
529       break;
530     return C1.sdiv(C2);
531   case TargetOpcode::G_UREM:
532     if (!C2.getBoolValue())
533       break;
534     return C1.urem(C2);
535   case TargetOpcode::G_SREM:
536     if (!C2.getBoolValue())
537       break;
538     return C1.srem(C2);
539   case TargetOpcode::G_SMIN:
540     return APIntOps::smin(C1, C2);
541   case TargetOpcode::G_SMAX:
542     return APIntOps::smax(C1, C2);
543   case TargetOpcode::G_UMIN:
544     return APIntOps::umin(C1, C2);
545   case TargetOpcode::G_UMAX:
546     return APIntOps::umax(C1, C2);
547   }
548 
549   return None;
550 }
551 
552 Optional<APFloat> llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
553                                             const Register Op2,
554                                             const MachineRegisterInfo &MRI) {
555   const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
556   if (!Op2Cst)
557     return None;
558 
559   const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
560   if (!Op1Cst)
561     return None;
562 
563   APFloat C1 = Op1Cst->getValueAPF();
564   const APFloat &C2 = Op2Cst->getValueAPF();
565   switch (Opcode) {
566   case TargetOpcode::G_FADD:
567     C1.add(C2, APFloat::rmNearestTiesToEven);
568     return C1;
569   case TargetOpcode::G_FSUB:
570     C1.subtract(C2, APFloat::rmNearestTiesToEven);
571     return C1;
572   case TargetOpcode::G_FMUL:
573     C1.multiply(C2, APFloat::rmNearestTiesToEven);
574     return C1;
575   case TargetOpcode::G_FDIV:
576     C1.divide(C2, APFloat::rmNearestTiesToEven);
577     return C1;
578   case TargetOpcode::G_FREM:
579     C1.mod(C2);
580     return C1;
581   case TargetOpcode::G_FCOPYSIGN:
582     C1.copySign(C2);
583     return C1;
584   case TargetOpcode::G_FMINNUM:
585     return minnum(C1, C2);
586   case TargetOpcode::G_FMAXNUM:
587     return maxnum(C1, C2);
588   case TargetOpcode::G_FMINIMUM:
589     return minimum(C1, C2);
590   case TargetOpcode::G_FMAXIMUM:
591     return maximum(C1, C2);
592   case TargetOpcode::G_FMINNUM_IEEE:
593   case TargetOpcode::G_FMAXNUM_IEEE:
594     // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
595     // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
596     // and currently there isn't a nice wrapper in APFloat for the version with
597     // correct snan handling.
598     break;
599   default:
600     break;
601   }
602 
603   return None;
604 }
605 
606 Register llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
607                                        const Register Op2,
608                                        const MachineRegisterInfo &MRI,
609                                        MachineIRBuilder &MIB) {
610   auto *SrcVec2 = getOpcodeDef<GBuildVector>(Op2, MRI);
611   if (!SrcVec2)
612     return Register();
613 
614   auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1, MRI);
615   if (!SrcVec1)
616     return Register();
617 
618   const LLT EltTy = MRI.getType(SrcVec1->getSourceReg(0));
619 
620   SmallVector<Register, 16> FoldedElements;
621   for (unsigned Idx = 0, E = SrcVec1->getNumSources(); Idx < E; ++Idx) {
622     auto MaybeCst = ConstantFoldBinOp(Opcode, SrcVec1->getSourceReg(Idx),
623                                       SrcVec2->getSourceReg(Idx), MRI);
624     if (!MaybeCst)
625       return Register();
626     auto FoldedCstReg = MIB.buildConstant(EltTy, *MaybeCst).getReg(0);
627     FoldedElements.emplace_back(FoldedCstReg);
628   }
629   // Create the new vector constant.
630   auto CstVec =
631       MIB.buildBuildVector(MRI.getType(SrcVec1->getReg(0)), FoldedElements);
632   return CstVec.getReg(0);
633 }
634 
635 bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
636                            bool SNaN) {
637   const MachineInstr *DefMI = MRI.getVRegDef(Val);
638   if (!DefMI)
639     return false;
640 
641   const TargetMachine& TM = DefMI->getMF()->getTarget();
642   if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath)
643     return true;
644 
645   // If the value is a constant, we can obviously see if it is a NaN or not.
646   if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
647     return !FPVal->getValueAPF().isNaN() ||
648            (SNaN && !FPVal->getValueAPF().isSignaling());
649   }
650 
651   if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
652     for (const auto &Op : DefMI->uses())
653       if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
654         return false;
655     return true;
656   }
657 
658   switch (DefMI->getOpcode()) {
659   default:
660     break;
661   case TargetOpcode::G_FMINNUM_IEEE:
662   case TargetOpcode::G_FMAXNUM_IEEE: {
663     if (SNaN)
664       return true;
665     // This can return a NaN if either operand is an sNaN, or if both operands
666     // are NaN.
667     return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
668             isKnownNeverSNaN(DefMI->getOperand(2).getReg(), MRI)) ||
669            (isKnownNeverSNaN(DefMI->getOperand(1).getReg(), MRI) &&
670             isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI));
671   }
672   case TargetOpcode::G_FMINNUM:
673   case TargetOpcode::G_FMAXNUM: {
674     // Only one needs to be known not-nan, since it will be returned if the
675     // other ends up being one.
676     return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
677            isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI, SNaN);
678   }
679   }
680 
681   if (SNaN) {
682     // FP operations quiet. For now, just handle the ones inserted during
683     // legalization.
684     switch (DefMI->getOpcode()) {
685     case TargetOpcode::G_FPEXT:
686     case TargetOpcode::G_FPTRUNC:
687     case TargetOpcode::G_FCANONICALIZE:
688       return true;
689     default:
690       return false;
691     }
692   }
693 
694   return false;
695 }
696 
697 Align llvm::inferAlignFromPtrInfo(MachineFunction &MF,
698                                   const MachinePointerInfo &MPO) {
699   auto PSV = MPO.V.dyn_cast<const PseudoSourceValue *>();
700   if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
701     MachineFrameInfo &MFI = MF.getFrameInfo();
702     return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
703                            MPO.Offset);
704   }
705 
706   if (const Value *V = MPO.V.dyn_cast<const Value *>()) {
707     const Module *M = MF.getFunction().getParent();
708     return V->getPointerAlignment(M->getDataLayout());
709   }
710 
711   return Align(1);
712 }
713 
714 Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF,
715                                         const TargetInstrInfo &TII,
716                                         MCRegister PhysReg,
717                                         const TargetRegisterClass &RC,
718                                         const DebugLoc &DL, LLT RegTy) {
719   MachineBasicBlock &EntryMBB = MF.front();
720   MachineRegisterInfo &MRI = MF.getRegInfo();
721   Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
722   if (LiveIn) {
723     MachineInstr *Def = MRI.getVRegDef(LiveIn);
724     if (Def) {
725       // FIXME: Should the verifier check this is in the entry block?
726       assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
727       return LiveIn;
728     }
729 
730     // It's possible the incoming argument register and copy was added during
731     // lowering, but later deleted due to being/becoming dead. If this happens,
732     // re-insert the copy.
733   } else {
734     // The live in register was not present, so add it.
735     LiveIn = MF.addLiveIn(PhysReg, &RC);
736     if (RegTy.isValid())
737       MRI.setType(LiveIn, RegTy);
738   }
739 
740   BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
741     .addReg(PhysReg);
742   if (!EntryMBB.isLiveIn(PhysReg))
743     EntryMBB.addLiveIn(PhysReg);
744   return LiveIn;
745 }
746 
747 Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1,
748                                         uint64_t Imm,
749                                         const MachineRegisterInfo &MRI) {
750   auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
751   if (MaybeOp1Cst) {
752     switch (Opcode) {
753     default:
754       break;
755     case TargetOpcode::G_SEXT_INREG: {
756       LLT Ty = MRI.getType(Op1);
757       return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
758     }
759     }
760   }
761   return None;
762 }
763 
764 Optional<APFloat> llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
765                                                Register Src,
766                                                const MachineRegisterInfo &MRI) {
767   assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
768   if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
769     APFloat DstVal(getFltSemanticForLLT(DstTy));
770     DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP,
771                             APFloat::rmNearestTiesToEven);
772     return DstVal;
773   }
774   return None;
775 }
776 
777 Optional<SmallVector<unsigned>>
778 llvm::ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI) {
779   LLT Ty = MRI.getType(Src);
780   SmallVector<unsigned> FoldedCTLZs;
781   auto tryFoldScalar = [&](Register R) -> Optional<unsigned> {
782     auto MaybeCst = getIConstantVRegVal(R, MRI);
783     if (!MaybeCst)
784       return None;
785     return MaybeCst->countLeadingZeros();
786   };
787   if (Ty.isVector()) {
788     // Try to constant fold each element.
789     auto *BV = getOpcodeDef<GBuildVector>(Src, MRI);
790     if (!BV)
791       return None;
792     for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
793       if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
794         FoldedCTLZs.emplace_back(*MaybeFold);
795         continue;
796       }
797       return None;
798     }
799     return FoldedCTLZs;
800   }
801   if (auto MaybeCst = tryFoldScalar(Src)) {
802     FoldedCTLZs.emplace_back(*MaybeCst);
803     return FoldedCTLZs;
804   }
805   return None;
806 }
807 
808 bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
809                                   GISelKnownBits *KB) {
810   Optional<DefinitionAndSourceRegister> DefSrcReg =
811       getDefSrcRegIgnoringCopies(Reg, MRI);
812   if (!DefSrcReg)
813     return false;
814 
815   const MachineInstr &MI = *DefSrcReg->MI;
816   const LLT Ty = MRI.getType(Reg);
817 
818   switch (MI.getOpcode()) {
819   case TargetOpcode::G_CONSTANT: {
820     unsigned BitWidth = Ty.getScalarSizeInBits();
821     const ConstantInt *CI = MI.getOperand(1).getCImm();
822     return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
823   }
824   case TargetOpcode::G_SHL: {
825     // A left-shift of a constant one will have exactly one bit set because
826     // shifting the bit off the end is undefined.
827 
828     // TODO: Constant splat
829     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
830       if (*ConstLHS == 1)
831         return true;
832     }
833 
834     break;
835   }
836   case TargetOpcode::G_LSHR: {
837     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
838       if (ConstLHS->isSignMask())
839         return true;
840     }
841 
842     break;
843   }
844   case TargetOpcode::G_BUILD_VECTOR: {
845     // TODO: Probably should have a recursion depth guard since you could have
846     // bitcasted vector elements.
847     for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
848       if (!isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB))
849         return false;
850 
851     return true;
852   }
853   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
854     // Only handle constants since we would need to know if number of leading
855     // zeros is greater than the truncation amount.
856     const unsigned BitWidth = Ty.getScalarSizeInBits();
857     for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
858       auto Const = getIConstantVRegVal(MO.getReg(), MRI);
859       if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())
860         return false;
861     }
862 
863     return true;
864   }
865   default:
866     break;
867   }
868 
869   if (!KB)
870     return false;
871 
872   // More could be done here, though the above checks are enough
873   // to handle some common cases.
874 
875   // Fall back to computeKnownBits to catch other known cases.
876   KnownBits Known = KB->getKnownBits(Reg);
877   return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
878 }
879 
880 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
881   AU.addPreserved<StackProtector>();
882 }
883 
884 static unsigned getLCMSize(unsigned OrigSize, unsigned TargetSize) {
885   unsigned Mul = OrigSize * TargetSize;
886   unsigned GCDSize = greatestCommonDivisor(OrigSize, TargetSize);
887   return Mul / GCDSize;
888 }
889 
890 LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
891   const unsigned OrigSize = OrigTy.getSizeInBits();
892   const unsigned TargetSize = TargetTy.getSizeInBits();
893 
894   if (OrigSize == TargetSize)
895     return OrigTy;
896 
897   if (OrigTy.isVector()) {
898     const LLT OrigElt = OrigTy.getElementType();
899 
900     if (TargetTy.isVector()) {
901       const LLT TargetElt = TargetTy.getElementType();
902 
903       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
904         int GCDElts = greatestCommonDivisor(OrigTy.getNumElements(),
905                                             TargetTy.getNumElements());
906         // Prefer the original element type.
907         ElementCount Mul = OrigTy.getElementCount() * TargetTy.getNumElements();
908         return LLT::vector(Mul.divideCoefficientBy(GCDElts),
909                            OrigTy.getElementType());
910       }
911     } else {
912       if (OrigElt.getSizeInBits() == TargetSize)
913         return OrigTy;
914     }
915 
916     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
917     return LLT::fixed_vector(LCMSize / OrigElt.getSizeInBits(), OrigElt);
918   }
919 
920   if (TargetTy.isVector()) {
921     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
922     return LLT::fixed_vector(LCMSize / OrigSize, OrigTy);
923   }
924 
925   unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
926 
927   // Preserve pointer types.
928   if (LCMSize == OrigSize)
929     return OrigTy;
930   if (LCMSize == TargetSize)
931     return TargetTy;
932 
933   return LLT::scalar(LCMSize);
934 }
935 
936 LLT llvm::getCoverTy(LLT OrigTy, LLT TargetTy) {
937   if (!OrigTy.isVector() || !TargetTy.isVector() || OrigTy == TargetTy ||
938       (OrigTy.getScalarSizeInBits() != TargetTy.getScalarSizeInBits()))
939     return getLCMType(OrigTy, TargetTy);
940 
941   unsigned OrigTyNumElts = OrigTy.getNumElements();
942   unsigned TargetTyNumElts = TargetTy.getNumElements();
943   if (OrigTyNumElts % TargetTyNumElts == 0)
944     return OrigTy;
945 
946   unsigned NumElts = alignTo(OrigTyNumElts, TargetTyNumElts);
947   return LLT::scalarOrVector(ElementCount::getFixed(NumElts),
948                              OrigTy.getElementType());
949 }
950 
951 LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
952   const unsigned OrigSize = OrigTy.getSizeInBits();
953   const unsigned TargetSize = TargetTy.getSizeInBits();
954 
955   if (OrigSize == TargetSize)
956     return OrigTy;
957 
958   if (OrigTy.isVector()) {
959     LLT OrigElt = OrigTy.getElementType();
960     if (TargetTy.isVector()) {
961       LLT TargetElt = TargetTy.getElementType();
962       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
963         int GCD = greatestCommonDivisor(OrigTy.getNumElements(),
964                                         TargetTy.getNumElements());
965         return LLT::scalarOrVector(ElementCount::getFixed(GCD), OrigElt);
966       }
967     } else {
968       // If the source is a vector of pointers, return a pointer element.
969       if (OrigElt.getSizeInBits() == TargetSize)
970         return OrigElt;
971     }
972 
973     unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
974     if (GCD == OrigElt.getSizeInBits())
975       return OrigElt;
976 
977     // If we can't produce the original element type, we have to use a smaller
978     // scalar.
979     if (GCD < OrigElt.getSizeInBits())
980       return LLT::scalar(GCD);
981     return LLT::fixed_vector(GCD / OrigElt.getSizeInBits(), OrigElt);
982   }
983 
984   if (TargetTy.isVector()) {
985     // Try to preserve the original element type.
986     LLT TargetElt = TargetTy.getElementType();
987     if (TargetElt.getSizeInBits() == OrigSize)
988       return OrigTy;
989   }
990 
991   unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
992   return LLT::scalar(GCD);
993 }
994 
995 Optional<int> llvm::getSplatIndex(MachineInstr &MI) {
996   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
997          "Only G_SHUFFLE_VECTOR can have a splat index!");
998   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
999   auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
1000 
1001   // If all elements are undefined, this shuffle can be considered a splat.
1002   // Return 0 for better potential for callers to simplify.
1003   if (FirstDefinedIdx == Mask.end())
1004     return 0;
1005 
1006   // Make sure all remaining elements are either undef or the same
1007   // as the first non-undef value.
1008   int SplatValue = *FirstDefinedIdx;
1009   if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
1010              [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
1011     return None;
1012 
1013   return SplatValue;
1014 }
1015 
1016 static bool isBuildVectorOp(unsigned Opcode) {
1017   return Opcode == TargetOpcode::G_BUILD_VECTOR ||
1018          Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
1019 }
1020 
1021 namespace {
1022 
1023 Optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
1024                                            const MachineRegisterInfo &MRI,
1025                                            bool AllowUndef) {
1026   MachineInstr *MI = getDefIgnoringCopies(VReg, MRI);
1027   if (!MI)
1028     return None;
1029 
1030   if (!isBuildVectorOp(MI->getOpcode()))
1031     return None;
1032 
1033   Optional<ValueAndVReg> SplatValAndReg = None;
1034   for (MachineOperand &Op : MI->uses()) {
1035     Register Element = Op.getReg();
1036     auto ElementValAndReg =
1037         getAnyConstantVRegValWithLookThrough(Element, MRI, true, true);
1038 
1039     // If AllowUndef, treat undef as value that will result in a constant splat.
1040     if (!ElementValAndReg) {
1041       if (AllowUndef && isa<GImplicitDef>(MRI.getVRegDef(Element)))
1042         continue;
1043       return None;
1044     }
1045 
1046     // Record splat value
1047     if (!SplatValAndReg)
1048       SplatValAndReg = ElementValAndReg;
1049 
1050     // Different constant then the one already recorded, not a constant splat.
1051     if (SplatValAndReg->Value != ElementValAndReg->Value)
1052       return None;
1053   }
1054 
1055   return SplatValAndReg;
1056 }
1057 
1058 } // end anonymous namespace
1059 
1060 bool llvm::isBuildVectorConstantSplat(const Register Reg,
1061                                       const MachineRegisterInfo &MRI,
1062                                       int64_t SplatValue, bool AllowUndef) {
1063   if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef))
1064     return mi_match(SplatValAndReg->VReg, MRI, m_SpecificICst(SplatValue));
1065   return false;
1066 }
1067 
1068 bool llvm::isBuildVectorConstantSplat(const MachineInstr &MI,
1069                                       const MachineRegisterInfo &MRI,
1070                                       int64_t SplatValue, bool AllowUndef) {
1071   return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
1072                                     AllowUndef);
1073 }
1074 
1075 Optional<int64_t>
1076 llvm::getBuildVectorConstantSplat(const MachineInstr &MI,
1077                                   const MachineRegisterInfo &MRI) {
1078   if (auto SplatValAndReg =
1079           getAnyConstantSplat(MI.getOperand(0).getReg(), MRI, false))
1080     return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI);
1081   return None;
1082 }
1083 
1084 Optional<FPValueAndVReg> llvm::getFConstantSplat(Register VReg,
1085                                                  const MachineRegisterInfo &MRI,
1086                                                  bool AllowUndef) {
1087   if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))
1088     return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
1089   return None;
1090 }
1091 
1092 bool llvm::isBuildVectorAllZeros(const MachineInstr &MI,
1093                                  const MachineRegisterInfo &MRI,
1094                                  bool AllowUndef) {
1095   return isBuildVectorConstantSplat(MI, MRI, 0, AllowUndef);
1096 }
1097 
1098 bool llvm::isBuildVectorAllOnes(const MachineInstr &MI,
1099                                 const MachineRegisterInfo &MRI,
1100                                 bool AllowUndef) {
1101   return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef);
1102 }
1103 
1104 Optional<RegOrConstant> llvm::getVectorSplat(const MachineInstr &MI,
1105                                              const MachineRegisterInfo &MRI) {
1106   unsigned Opc = MI.getOpcode();
1107   if (!isBuildVectorOp(Opc))
1108     return None;
1109   if (auto Splat = getBuildVectorConstantSplat(MI, MRI))
1110     return RegOrConstant(*Splat);
1111   auto Reg = MI.getOperand(1).getReg();
1112   if (any_of(make_range(MI.operands_begin() + 2, MI.operands_end()),
1113              [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
1114     return None;
1115   return RegOrConstant(Reg);
1116 }
1117 
1118 static bool isConstantScalar(const MachineInstr &MI,
1119                              const MachineRegisterInfo &MRI,
1120                              bool AllowFP = true,
1121                              bool AllowOpaqueConstants = true) {
1122   switch (MI.getOpcode()) {
1123   case TargetOpcode::G_CONSTANT:
1124   case TargetOpcode::G_IMPLICIT_DEF:
1125     return true;
1126   case TargetOpcode::G_FCONSTANT:
1127     return AllowFP;
1128   case TargetOpcode::G_GLOBAL_VALUE:
1129   case TargetOpcode::G_FRAME_INDEX:
1130   case TargetOpcode::G_BLOCK_ADDR:
1131   case TargetOpcode::G_JUMP_TABLE:
1132     return AllowOpaqueConstants;
1133   default:
1134     return false;
1135   }
1136 }
1137 
1138 bool llvm::isConstantOrConstantVector(MachineInstr &MI,
1139                                       const MachineRegisterInfo &MRI) {
1140   Register Def = MI.getOperand(0).getReg();
1141   if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1142     return true;
1143   GBuildVector *BV = dyn_cast<GBuildVector>(&MI);
1144   if (!BV)
1145     return false;
1146   for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1147     if (getIConstantVRegValWithLookThrough(BV->getSourceReg(SrcIdx), MRI) ||
1148         getOpcodeDef<GImplicitDef>(BV->getSourceReg(SrcIdx), MRI))
1149       continue;
1150     return false;
1151   }
1152   return true;
1153 }
1154 
1155 bool llvm::isConstantOrConstantVector(const MachineInstr &MI,
1156                                       const MachineRegisterInfo &MRI,
1157                                       bool AllowFP, bool AllowOpaqueConstants) {
1158   if (isConstantScalar(MI, MRI, AllowFP, AllowOpaqueConstants))
1159     return true;
1160 
1161   if (!isBuildVectorOp(MI.getOpcode()))
1162     return false;
1163 
1164   const unsigned NumOps = MI.getNumOperands();
1165   for (unsigned I = 1; I != NumOps; ++I) {
1166     const MachineInstr *ElementDef = MRI.getVRegDef(MI.getOperand(I).getReg());
1167     if (!isConstantScalar(*ElementDef, MRI, AllowFP, AllowOpaqueConstants))
1168       return false;
1169   }
1170 
1171   return true;
1172 }
1173 
1174 Optional<APInt>
1175 llvm::isConstantOrConstantSplatVector(MachineInstr &MI,
1176                                       const MachineRegisterInfo &MRI) {
1177   Register Def = MI.getOperand(0).getReg();
1178   if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1179     return C->Value;
1180   auto MaybeCst = getBuildVectorConstantSplat(MI, MRI);
1181   if (!MaybeCst)
1182     return None;
1183   const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits();
1184   return APInt(ScalarSize, *MaybeCst, true);
1185 }
1186 
1187 bool llvm::isNullOrNullSplat(const MachineInstr &MI,
1188                              const MachineRegisterInfo &MRI, bool AllowUndefs) {
1189   switch (MI.getOpcode()) {
1190   case TargetOpcode::G_IMPLICIT_DEF:
1191     return AllowUndefs;
1192   case TargetOpcode::G_CONSTANT:
1193     return MI.getOperand(1).getCImm()->isNullValue();
1194   case TargetOpcode::G_FCONSTANT: {
1195     const ConstantFP *FPImm = MI.getOperand(1).getFPImm();
1196     return FPImm->isZero() && !FPImm->isNegative();
1197   }
1198   default:
1199     if (!AllowUndefs) // TODO: isBuildVectorAllZeros assumes undef is OK already
1200       return false;
1201     return isBuildVectorAllZeros(MI, MRI);
1202   }
1203 }
1204 
1205 bool llvm::isAllOnesOrAllOnesSplat(const MachineInstr &MI,
1206                                    const MachineRegisterInfo &MRI,
1207                                    bool AllowUndefs) {
1208   switch (MI.getOpcode()) {
1209   case TargetOpcode::G_IMPLICIT_DEF:
1210     return AllowUndefs;
1211   case TargetOpcode::G_CONSTANT:
1212     return MI.getOperand(1).getCImm()->isAllOnesValue();
1213   default:
1214     if (!AllowUndefs) // TODO: isBuildVectorAllOnes assumes undef is OK already
1215       return false;
1216     return isBuildVectorAllOnes(MI, MRI);
1217   }
1218 }
1219 
1220 bool llvm::matchUnaryPredicate(
1221     const MachineRegisterInfo &MRI, Register Reg,
1222     std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
1223 
1224   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
1225   if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1226     return Match(nullptr);
1227 
1228   // TODO: Also handle fconstant
1229   if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1230     return Match(Def->getOperand(1).getCImm());
1231 
1232   if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
1233     return false;
1234 
1235   for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
1236     Register SrcElt = Def->getOperand(I).getReg();
1237     const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
1238     if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1239       if (!Match(nullptr))
1240         return false;
1241       continue;
1242     }
1243 
1244     if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
1245         !Match(SrcDef->getOperand(1).getCImm()))
1246       return false;
1247   }
1248 
1249   return true;
1250 }
1251 
1252 bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
1253                           bool IsFP) {
1254   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1255   case TargetLowering::UndefinedBooleanContent:
1256     return Val & 0x1;
1257   case TargetLowering::ZeroOrOneBooleanContent:
1258     return Val == 1;
1259   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1260     return Val == -1;
1261   }
1262   llvm_unreachable("Invalid boolean contents");
1263 }
1264 
1265 int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
1266                              bool IsFP) {
1267   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1268   case TargetLowering::UndefinedBooleanContent:
1269   case TargetLowering::ZeroOrOneBooleanContent:
1270     return 1;
1271   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1272     return -1;
1273   }
1274   llvm_unreachable("Invalid boolean contents");
1275 }
1276 
1277 bool llvm::shouldOptForSize(const MachineBasicBlock &MBB,
1278                             ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
1279   const auto &F = MBB.getParent()->getFunction();
1280   return F.hasOptSize() || F.hasMinSize() ||
1281          llvm::shouldOptimizeForSize(MBB.getBasicBlock(), PSI, BFI);
1282 }
1283 
1284 void llvm::saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI,
1285                             LostDebugLocObserver *LocObserver,
1286                             SmallInstListTy &DeadInstChain) {
1287   for (MachineOperand &Op : MI.uses()) {
1288     if (Op.isReg() && Op.getReg().isVirtual())
1289       DeadInstChain.insert(MRI.getVRegDef(Op.getReg()));
1290   }
1291   LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
1292   DeadInstChain.remove(&MI);
1293   MI.eraseFromParent();
1294   if (LocObserver)
1295     LocObserver->checkpoint(false);
1296 }
1297 
1298 void llvm::eraseInstrs(ArrayRef<MachineInstr *> DeadInstrs,
1299                        MachineRegisterInfo &MRI,
1300                        LostDebugLocObserver *LocObserver) {
1301   SmallInstListTy DeadInstChain;
1302   for (MachineInstr *MI : DeadInstrs)
1303     saveUsesAndErase(*MI, MRI, LocObserver, DeadInstChain);
1304 
1305   while (!DeadInstChain.empty()) {
1306     MachineInstr *Inst = DeadInstChain.pop_back_val();
1307     if (!isTriviallyDead(*Inst, MRI))
1308       continue;
1309     saveUsesAndErase(*Inst, MRI, LocObserver, DeadInstChain);
1310   }
1311 }
1312 
1313 void llvm::eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI,
1314                       LostDebugLocObserver *LocObserver) {
1315   return eraseInstrs({&MI}, MRI, LocObserver);
1316 }
1317