1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file This file implements the utility functions used by the GlobalISel
9 /// pipeline.
10 //===----------------------------------------------------------------------===//
11 
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/ADT/APFloat.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/Optional.h"
16 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
17 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
18 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
19 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
20 #include "llvm/CodeGen/MachineInstr.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
23 #include "llvm/CodeGen/MachineSizeOpts.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/StackProtector.h"
26 #include "llvm/CodeGen/TargetInstrInfo.h"
27 #include "llvm/CodeGen/TargetLowering.h"
28 #include "llvm/CodeGen/TargetPassConfig.h"
29 #include "llvm/CodeGen/TargetRegisterInfo.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/Target/TargetMachine.h"
32 
33 #define DEBUG_TYPE "globalisel-utils"
34 
35 using namespace llvm;
36 using namespace MIPatternMatch;
37 
38 Register llvm::constrainRegToClass(MachineRegisterInfo &MRI,
39                                    const TargetInstrInfo &TII,
40                                    const RegisterBankInfo &RBI, Register Reg,
41                                    const TargetRegisterClass &RegClass) {
42   if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
43     return MRI.createVirtualRegister(&RegClass);
44 
45   return Reg;
46 }
47 
48 Register llvm::constrainOperandRegClass(
49     const MachineFunction &MF, const TargetRegisterInfo &TRI,
50     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
51     const RegisterBankInfo &RBI, MachineInstr &InsertPt,
52     const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
53   Register Reg = RegMO.getReg();
54   // Assume physical registers are properly constrained.
55   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
56 
57   Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
58   // If we created a new virtual register because the class is not compatible
59   // then create a copy between the new and the old register.
60   if (ConstrainedReg != Reg) {
61     MachineBasicBlock::iterator InsertIt(&InsertPt);
62     MachineBasicBlock &MBB = *InsertPt.getParent();
63     if (RegMO.isUse()) {
64       BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
65               TII.get(TargetOpcode::COPY), ConstrainedReg)
66           .addReg(Reg);
67     } else {
68       assert(RegMO.isDef() && "Must be a definition");
69       BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
70               TII.get(TargetOpcode::COPY), Reg)
71           .addReg(ConstrainedReg);
72     }
73     if (GISelChangeObserver *Observer = MF.getObserver()) {
74       Observer->changingInstr(*RegMO.getParent());
75     }
76     RegMO.setReg(ConstrainedReg);
77     if (GISelChangeObserver *Observer = MF.getObserver()) {
78       Observer->changedInstr(*RegMO.getParent());
79     }
80   } else {
81     if (GISelChangeObserver *Observer = MF.getObserver()) {
82       if (!RegMO.isDef()) {
83         MachineInstr *RegDef = MRI.getVRegDef(Reg);
84         Observer->changedInstr(*RegDef);
85       }
86       Observer->changingAllUsesOfReg(MRI, Reg);
87       Observer->finishedChangingAllUsesOfReg();
88     }
89   }
90   return ConstrainedReg;
91 }
92 
93 Register llvm::constrainOperandRegClass(
94     const MachineFunction &MF, const TargetRegisterInfo &TRI,
95     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
96     const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
97     MachineOperand &RegMO, unsigned OpIdx) {
98   Register Reg = RegMO.getReg();
99   // Assume physical registers are properly constrained.
100   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
101 
102   const TargetRegisterClass *RegClass = TII.getRegClass(II, OpIdx, &TRI, MF);
103   // Some of the target independent instructions, like COPY, may not impose any
104   // register class constraints on some of their operands: If it's a use, we can
105   // skip constraining as the instruction defining the register would constrain
106   // it.
107 
108   // We can't constrain unallocatable register classes, because we can't create
109   // virtual registers for these classes, so we need to let targets handled this
110   // case.
111   if (RegClass && !RegClass->isAllocatable())
112     RegClass = TRI.getConstrainedRegClassForOperand(RegMO, MRI);
113 
114   if (!RegClass) {
115     assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
116            "Register class constraint is required unless either the "
117            "instruction is target independent or the operand is a use");
118     // FIXME: Just bailing out like this here could be not enough, unless we
119     // expect the users of this function to do the right thing for PHIs and
120     // COPY:
121     //   v1 = COPY v0
122     //   v2 = COPY v1
123     // v1 here may end up not being constrained at all. Please notice that to
124     // reproduce the issue we likely need a destination pattern of a selection
125     // rule producing such extra copies, not just an input GMIR with them as
126     // every existing target using selectImpl handles copies before calling it
127     // and they never reach this function.
128     return Reg;
129   }
130   return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *RegClass,
131                                   RegMO);
132 }
133 
134 bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
135                                             const TargetInstrInfo &TII,
136                                             const TargetRegisterInfo &TRI,
137                                             const RegisterBankInfo &RBI) {
138   assert(!isPreISelGenericOpcode(I.getOpcode()) &&
139          "A selected instruction is expected");
140   MachineBasicBlock &MBB = *I.getParent();
141   MachineFunction &MF = *MBB.getParent();
142   MachineRegisterInfo &MRI = MF.getRegInfo();
143 
144   for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
145     MachineOperand &MO = I.getOperand(OpI);
146 
147     // There's nothing to be done on non-register operands.
148     if (!MO.isReg())
149       continue;
150 
151     LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
152     assert(MO.isReg() && "Unsupported non-reg operand");
153 
154     Register Reg = MO.getReg();
155     // Physical registers don't need to be constrained.
156     if (Register::isPhysicalRegister(Reg))
157       continue;
158 
159     // Register operands with a value of 0 (e.g. predicate operands) don't need
160     // to be constrained.
161     if (Reg == 0)
162       continue;
163 
164     // If the operand is a vreg, we should constrain its regclass, and only
165     // insert COPYs if that's impossible.
166     // constrainOperandRegClass does that for us.
167     constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
168 
169     // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
170     // done.
171     if (MO.isUse()) {
172       int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
173       if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
174         I.tieOperands(DefIdx, OpI);
175     }
176   }
177   return true;
178 }
179 
180 bool llvm::canReplaceReg(Register DstReg, Register SrcReg,
181                          MachineRegisterInfo &MRI) {
182   // Give up if either DstReg or SrcReg  is a physical register.
183   if (DstReg.isPhysical() || SrcReg.isPhysical())
184     return false;
185   // Give up if the types don't match.
186   if (MRI.getType(DstReg) != MRI.getType(SrcReg))
187     return false;
188   // Replace if either DstReg has no constraints or the register
189   // constraints match.
190   return !MRI.getRegClassOrRegBank(DstReg) ||
191          MRI.getRegClassOrRegBank(DstReg) == MRI.getRegClassOrRegBank(SrcReg);
192 }
193 
194 bool llvm::isTriviallyDead(const MachineInstr &MI,
195                            const MachineRegisterInfo &MRI) {
196   // FIXME: This logical is mostly duplicated with
197   // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in
198   // MachineInstr::isLabel?
199 
200   // Don't delete frame allocation labels.
201   if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE)
202     return false;
203 
204   // If we can move an instruction, we can remove it.  Otherwise, it has
205   // a side-effect of some sort.
206   bool SawStore = false;
207   if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
208     return false;
209 
210   // Instructions without side-effects are dead iff they only define dead vregs.
211   for (auto &MO : MI.operands()) {
212     if (!MO.isReg() || !MO.isDef())
213       continue;
214 
215     Register Reg = MO.getReg();
216     if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg))
217       return false;
218   }
219   return true;
220 }
221 
222 static void reportGISelDiagnostic(DiagnosticSeverity Severity,
223                                   MachineFunction &MF,
224                                   const TargetPassConfig &TPC,
225                                   MachineOptimizationRemarkEmitter &MORE,
226                                   MachineOptimizationRemarkMissed &R) {
227   bool IsFatal = Severity == DS_Error &&
228                  TPC.isGlobalISelAbortEnabled();
229   // Print the function name explicitly if we don't have a debug location (which
230   // makes the diagnostic less useful) or if we're going to emit a raw error.
231   if (!R.getLocation().isValid() || IsFatal)
232     R << (" (in function: " + MF.getName() + ")").str();
233 
234   if (IsFatal)
235     report_fatal_error(R.getMsg());
236   else
237     MORE.emit(R);
238 }
239 
240 void llvm::reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
241                               MachineOptimizationRemarkEmitter &MORE,
242                               MachineOptimizationRemarkMissed &R) {
243   reportGISelDiagnostic(DS_Warning, MF, TPC, MORE, R);
244 }
245 
246 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
247                               MachineOptimizationRemarkEmitter &MORE,
248                               MachineOptimizationRemarkMissed &R) {
249   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
250   reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
251 }
252 
253 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
254                               MachineOptimizationRemarkEmitter &MORE,
255                               const char *PassName, StringRef Msg,
256                               const MachineInstr &MI) {
257   MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
258                                     MI.getDebugLoc(), MI.getParent());
259   R << Msg;
260   // Printing MI is expensive;  only do it if expensive remarks are enabled.
261   if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
262     R << ": " << ore::MNV("Inst", MI);
263   reportGISelFailure(MF, TPC, MORE, R);
264 }
265 
266 Optional<APInt> llvm::getConstantVRegVal(Register VReg,
267                                          const MachineRegisterInfo &MRI) {
268   Optional<ValueAndVReg> ValAndVReg =
269       getConstantVRegValWithLookThrough(VReg, MRI, /*LookThroughInstrs*/ false);
270   assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
271          "Value found while looking through instrs");
272   if (!ValAndVReg)
273     return None;
274   return ValAndVReg->Value;
275 }
276 
277 Optional<int64_t> llvm::getConstantVRegSExtVal(Register VReg,
278                                                const MachineRegisterInfo &MRI) {
279   Optional<APInt> Val = getConstantVRegVal(VReg, MRI);
280   if (Val && Val->getBitWidth() <= 64)
281     return Val->getSExtValue();
282   return None;
283 }
284 
285 Optional<ValueAndVReg> llvm::getConstantVRegValWithLookThrough(
286     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
287     bool HandleFConstant, bool LookThroughAnyExt) {
288   SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes;
289   MachineInstr *MI;
290   auto IsConstantOpcode = [HandleFConstant](unsigned Opcode) {
291     return Opcode == TargetOpcode::G_CONSTANT ||
292            (HandleFConstant && Opcode == TargetOpcode::G_FCONSTANT);
293   };
294   auto GetImmediateValue = [HandleFConstant,
295                             &MRI](const MachineInstr &MI) -> Optional<APInt> {
296     const MachineOperand &CstVal = MI.getOperand(1);
297     if (!CstVal.isImm() && !CstVal.isCImm() &&
298         (!HandleFConstant || !CstVal.isFPImm()))
299       return None;
300     if (!CstVal.isFPImm()) {
301       unsigned BitWidth =
302           MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
303       APInt Val = CstVal.isImm() ? APInt(BitWidth, CstVal.getImm())
304                                  : CstVal.getCImm()->getValue();
305       assert(Val.getBitWidth() == BitWidth &&
306              "Value bitwidth doesn't match definition type");
307       return Val;
308     }
309     return CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
310   };
311   while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI->getOpcode()) &&
312          LookThroughInstrs) {
313     switch (MI->getOpcode()) {
314     case TargetOpcode::G_ANYEXT:
315       if (!LookThroughAnyExt)
316         return None;
317       LLVM_FALLTHROUGH;
318     case TargetOpcode::G_TRUNC:
319     case TargetOpcode::G_SEXT:
320     case TargetOpcode::G_ZEXT:
321       SeenOpcodes.push_back(std::make_pair(
322           MI->getOpcode(),
323           MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
324       VReg = MI->getOperand(1).getReg();
325       break;
326     case TargetOpcode::COPY:
327       VReg = MI->getOperand(1).getReg();
328       if (Register::isPhysicalRegister(VReg))
329         return None;
330       break;
331     case TargetOpcode::G_INTTOPTR:
332       VReg = MI->getOperand(1).getReg();
333       break;
334     default:
335       return None;
336     }
337   }
338   if (!MI || !IsConstantOpcode(MI->getOpcode()))
339     return None;
340 
341   Optional<APInt> MaybeVal = GetImmediateValue(*MI);
342   if (!MaybeVal)
343     return None;
344   APInt &Val = *MaybeVal;
345   while (!SeenOpcodes.empty()) {
346     std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val();
347     switch (OpcodeAndSize.first) {
348     case TargetOpcode::G_TRUNC:
349       Val = Val.trunc(OpcodeAndSize.second);
350       break;
351     case TargetOpcode::G_ANYEXT:
352     case TargetOpcode::G_SEXT:
353       Val = Val.sext(OpcodeAndSize.second);
354       break;
355     case TargetOpcode::G_ZEXT:
356       Val = Val.zext(OpcodeAndSize.second);
357       break;
358     }
359   }
360 
361   return ValueAndVReg{Val, VReg};
362 }
363 
364 const ConstantFP *
365 llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) {
366   MachineInstr *MI = MRI.getVRegDef(VReg);
367   if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
368     return nullptr;
369   return MI->getOperand(1).getFPImm();
370 }
371 
372 Optional<DefinitionAndSourceRegister>
373 llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) {
374   Register DefSrcReg = Reg;
375   auto *DefMI = MRI.getVRegDef(Reg);
376   auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
377   if (!DstTy.isValid())
378     return None;
379   unsigned Opc = DefMI->getOpcode();
380   while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
381     Register SrcReg = DefMI->getOperand(1).getReg();
382     auto SrcTy = MRI.getType(SrcReg);
383     if (!SrcTy.isValid())
384       break;
385     DefMI = MRI.getVRegDef(SrcReg);
386     DefSrcReg = SrcReg;
387     Opc = DefMI->getOpcode();
388   }
389   return DefinitionAndSourceRegister{DefMI, DefSrcReg};
390 }
391 
392 MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
393                                          const MachineRegisterInfo &MRI) {
394   Optional<DefinitionAndSourceRegister> DefSrcReg =
395       getDefSrcRegIgnoringCopies(Reg, MRI);
396   return DefSrcReg ? DefSrcReg->MI : nullptr;
397 }
398 
399 Register llvm::getSrcRegIgnoringCopies(Register Reg,
400                                        const MachineRegisterInfo &MRI) {
401   Optional<DefinitionAndSourceRegister> DefSrcReg =
402       getDefSrcRegIgnoringCopies(Reg, MRI);
403   return DefSrcReg ? DefSrcReg->Reg : Register();
404 }
405 
406 MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg,
407                                  const MachineRegisterInfo &MRI) {
408   MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
409   return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
410 }
411 
412 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
413   if (Size == 32)
414     return APFloat(float(Val));
415   if (Size == 64)
416     return APFloat(Val);
417   if (Size != 16)
418     llvm_unreachable("Unsupported FPConstant size");
419   bool Ignored;
420   APFloat APF(Val);
421   APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
422   return APF;
423 }
424 
425 Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1,
426                                         const Register Op2,
427                                         const MachineRegisterInfo &MRI) {
428   auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI);
429   if (!MaybeOp2Cst)
430     return None;
431 
432   auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
433   if (!MaybeOp1Cst)
434     return None;
435 
436   const APInt &C1 = *MaybeOp1Cst;
437   const APInt &C2 = *MaybeOp2Cst;
438   switch (Opcode) {
439   default:
440     break;
441   case TargetOpcode::G_ADD:
442     return C1 + C2;
443   case TargetOpcode::G_AND:
444     return C1 & C2;
445   case TargetOpcode::G_ASHR:
446     return C1.ashr(C2);
447   case TargetOpcode::G_LSHR:
448     return C1.lshr(C2);
449   case TargetOpcode::G_MUL:
450     return C1 * C2;
451   case TargetOpcode::G_OR:
452     return C1 | C2;
453   case TargetOpcode::G_SHL:
454     return C1 << C2;
455   case TargetOpcode::G_SUB:
456     return C1 - C2;
457   case TargetOpcode::G_XOR:
458     return C1 ^ C2;
459   case TargetOpcode::G_UDIV:
460     if (!C2.getBoolValue())
461       break;
462     return C1.udiv(C2);
463   case TargetOpcode::G_SDIV:
464     if (!C2.getBoolValue())
465       break;
466     return C1.sdiv(C2);
467   case TargetOpcode::G_UREM:
468     if (!C2.getBoolValue())
469       break;
470     return C1.urem(C2);
471   case TargetOpcode::G_SREM:
472     if (!C2.getBoolValue())
473       break;
474     return C1.srem(C2);
475   }
476 
477   return None;
478 }
479 
480 bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
481                            bool SNaN) {
482   const MachineInstr *DefMI = MRI.getVRegDef(Val);
483   if (!DefMI)
484     return false;
485 
486   const TargetMachine& TM = DefMI->getMF()->getTarget();
487   if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath)
488     return true;
489 
490   // If the value is a constant, we can obviously see if it is a NaN or not.
491   if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
492     return !FPVal->getValueAPF().isNaN() ||
493            (SNaN && !FPVal->getValueAPF().isSignaling());
494   }
495 
496   if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
497     for (const auto &Op : DefMI->uses())
498       if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
499         return false;
500     return true;
501   }
502 
503   switch (DefMI->getOpcode()) {
504   default:
505     break;
506   case TargetOpcode::G_FMINNUM_IEEE:
507   case TargetOpcode::G_FMAXNUM_IEEE: {
508     if (SNaN)
509       return true;
510     // This can return a NaN if either operand is an sNaN, or if both operands
511     // are NaN.
512     return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
513             isKnownNeverSNaN(DefMI->getOperand(2).getReg(), MRI)) ||
514            (isKnownNeverSNaN(DefMI->getOperand(1).getReg(), MRI) &&
515             isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI));
516   }
517   case TargetOpcode::G_FMINNUM:
518   case TargetOpcode::G_FMAXNUM: {
519     // Only one needs to be known not-nan, since it will be returned if the
520     // other ends up being one.
521     return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
522            isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI, SNaN);
523   }
524   }
525 
526   if (SNaN) {
527     // FP operations quiet. For now, just handle the ones inserted during
528     // legalization.
529     switch (DefMI->getOpcode()) {
530     case TargetOpcode::G_FPEXT:
531     case TargetOpcode::G_FPTRUNC:
532     case TargetOpcode::G_FCANONICALIZE:
533       return true;
534     default:
535       return false;
536     }
537   }
538 
539   return false;
540 }
541 
542 Align llvm::inferAlignFromPtrInfo(MachineFunction &MF,
543                                   const MachinePointerInfo &MPO) {
544   auto PSV = MPO.V.dyn_cast<const PseudoSourceValue *>();
545   if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
546     MachineFrameInfo &MFI = MF.getFrameInfo();
547     return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
548                            MPO.Offset);
549   }
550 
551   return Align(1);
552 }
553 
554 Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF,
555                                         const TargetInstrInfo &TII,
556                                         MCRegister PhysReg,
557                                         const TargetRegisterClass &RC,
558                                         LLT RegTy) {
559   DebugLoc DL; // FIXME: Is no location the right choice?
560   MachineBasicBlock &EntryMBB = MF.front();
561   MachineRegisterInfo &MRI = MF.getRegInfo();
562   Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
563   if (LiveIn) {
564     MachineInstr *Def = MRI.getVRegDef(LiveIn);
565     if (Def) {
566       // FIXME: Should the verifier check this is in the entry block?
567       assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
568       return LiveIn;
569     }
570 
571     // It's possible the incoming argument register and copy was added during
572     // lowering, but later deleted due to being/becoming dead. If this happens,
573     // re-insert the copy.
574   } else {
575     // The live in register was not present, so add it.
576     LiveIn = MF.addLiveIn(PhysReg, &RC);
577     if (RegTy.isValid())
578       MRI.setType(LiveIn, RegTy);
579   }
580 
581   BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
582     .addReg(PhysReg);
583   if (!EntryMBB.isLiveIn(PhysReg))
584     EntryMBB.addLiveIn(PhysReg);
585   return LiveIn;
586 }
587 
588 Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1,
589                                         uint64_t Imm,
590                                         const MachineRegisterInfo &MRI) {
591   auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
592   if (MaybeOp1Cst) {
593     switch (Opcode) {
594     default:
595       break;
596     case TargetOpcode::G_SEXT_INREG: {
597       LLT Ty = MRI.getType(Op1);
598       return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
599     }
600     }
601   }
602   return None;
603 }
604 
605 bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
606                                   GISelKnownBits *KB) {
607   Optional<DefinitionAndSourceRegister> DefSrcReg =
608       getDefSrcRegIgnoringCopies(Reg, MRI);
609   if (!DefSrcReg)
610     return false;
611 
612   const MachineInstr &MI = *DefSrcReg->MI;
613   const LLT Ty = MRI.getType(Reg);
614 
615   switch (MI.getOpcode()) {
616   case TargetOpcode::G_CONSTANT: {
617     unsigned BitWidth = Ty.getScalarSizeInBits();
618     const ConstantInt *CI = MI.getOperand(1).getCImm();
619     return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
620   }
621   case TargetOpcode::G_SHL: {
622     // A left-shift of a constant one will have exactly one bit set because
623     // shifting the bit off the end is undefined.
624 
625     // TODO: Constant splat
626     if (auto ConstLHS = getConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
627       if (*ConstLHS == 1)
628         return true;
629     }
630 
631     break;
632   }
633   case TargetOpcode::G_LSHR: {
634     if (auto ConstLHS = getConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
635       if (ConstLHS->isSignMask())
636         return true;
637     }
638 
639     break;
640   }
641   default:
642     break;
643   }
644 
645   // TODO: Are all operands of a build vector constant powers of two?
646   if (!KB)
647     return false;
648 
649   // More could be done here, though the above checks are enough
650   // to handle some common cases.
651 
652   // Fall back to computeKnownBits to catch other known cases.
653   KnownBits Known = KB->getKnownBits(Reg);
654   return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
655 }
656 
657 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
658   AU.addPreserved<StackProtector>();
659 }
660 
661 static unsigned getLCMSize(unsigned OrigSize, unsigned TargetSize) {
662   unsigned Mul = OrigSize * TargetSize;
663   unsigned GCDSize = greatestCommonDivisor(OrigSize, TargetSize);
664   return Mul / GCDSize;
665 }
666 
667 LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
668   const unsigned OrigSize = OrigTy.getSizeInBits();
669   const unsigned TargetSize = TargetTy.getSizeInBits();
670 
671   if (OrigSize == TargetSize)
672     return OrigTy;
673 
674   if (OrigTy.isVector()) {
675     const LLT OrigElt = OrigTy.getElementType();
676 
677     if (TargetTy.isVector()) {
678       const LLT TargetElt = TargetTy.getElementType();
679 
680       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
681         int GCDElts = greatestCommonDivisor(OrigTy.getNumElements(),
682                                             TargetTy.getNumElements());
683         // Prefer the original element type.
684         int Mul = OrigTy.getNumElements() * TargetTy.getNumElements();
685         return LLT::vector(Mul / GCDElts, OrigTy.getElementType());
686       }
687     } else {
688       if (OrigElt.getSizeInBits() == TargetSize)
689         return OrigTy;
690     }
691 
692     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
693     return LLT::vector(LCMSize / OrigElt.getSizeInBits(), OrigElt);
694   }
695 
696   if (TargetTy.isVector()) {
697     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
698     return LLT::vector(LCMSize / OrigSize, OrigTy);
699   }
700 
701   unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
702 
703   // Preserve pointer types.
704   if (LCMSize == OrigSize)
705     return OrigTy;
706   if (LCMSize == TargetSize)
707     return TargetTy;
708 
709   return LLT::scalar(LCMSize);
710 }
711 
712 LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
713   const unsigned OrigSize = OrigTy.getSizeInBits();
714   const unsigned TargetSize = TargetTy.getSizeInBits();
715 
716   if (OrigSize == TargetSize)
717     return OrigTy;
718 
719   if (OrigTy.isVector()) {
720     LLT OrigElt = OrigTy.getElementType();
721     if (TargetTy.isVector()) {
722       LLT TargetElt = TargetTy.getElementType();
723       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
724         int GCD = greatestCommonDivisor(OrigTy.getNumElements(),
725                                         TargetTy.getNumElements());
726         return LLT::scalarOrVector(GCD, OrigElt);
727       }
728     } else {
729       // If the source is a vector of pointers, return a pointer element.
730       if (OrigElt.getSizeInBits() == TargetSize)
731         return OrigElt;
732     }
733 
734     unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
735     if (GCD == OrigElt.getSizeInBits())
736       return OrigElt;
737 
738     // If we can't produce the original element type, we have to use a smaller
739     // scalar.
740     if (GCD < OrigElt.getSizeInBits())
741       return LLT::scalar(GCD);
742     return LLT::vector(GCD / OrigElt.getSizeInBits(), OrigElt);
743   }
744 
745   if (TargetTy.isVector()) {
746     // Try to preserve the original element type.
747     LLT TargetElt = TargetTy.getElementType();
748     if (TargetElt.getSizeInBits() == OrigSize)
749       return OrigTy;
750   }
751 
752   unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
753   return LLT::scalar(GCD);
754 }
755 
756 Optional<int> llvm::getSplatIndex(MachineInstr &MI) {
757   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
758          "Only G_SHUFFLE_VECTOR can have a splat index!");
759   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
760   auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
761 
762   // If all elements are undefined, this shuffle can be considered a splat.
763   // Return 0 for better potential for callers to simplify.
764   if (FirstDefinedIdx == Mask.end())
765     return 0;
766 
767   // Make sure all remaining elements are either undef or the same
768   // as the first non-undef value.
769   int SplatValue = *FirstDefinedIdx;
770   if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
771              [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
772     return None;
773 
774   return SplatValue;
775 }
776 
777 static bool isBuildVectorOp(unsigned Opcode) {
778   return Opcode == TargetOpcode::G_BUILD_VECTOR ||
779          Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
780 }
781 
782 // TODO: Handle mixed undef elements.
783 static bool isBuildVectorConstantSplat(const MachineInstr &MI,
784                                        const MachineRegisterInfo &MRI,
785                                        int64_t SplatValue) {
786   if (!isBuildVectorOp(MI.getOpcode()))
787     return false;
788 
789   const unsigned NumOps = MI.getNumOperands();
790   for (unsigned I = 1; I != NumOps; ++I) {
791     Register Element = MI.getOperand(I).getReg();
792     if (!mi_match(Element, MRI, m_SpecificICst(SplatValue)))
793       return false;
794   }
795 
796   return true;
797 }
798 
799 Optional<int64_t>
800 llvm::getBuildVectorConstantSplat(const MachineInstr &MI,
801                                   const MachineRegisterInfo &MRI) {
802   if (!isBuildVectorOp(MI.getOpcode()))
803     return None;
804 
805   const unsigned NumOps = MI.getNumOperands();
806   Optional<int64_t> Scalar;
807   for (unsigned I = 1; I != NumOps; ++I) {
808     Register Element = MI.getOperand(I).getReg();
809     int64_t ElementValue;
810     if (!mi_match(Element, MRI, m_ICst(ElementValue)))
811       return None;
812     if (!Scalar)
813       Scalar = ElementValue;
814     else if (*Scalar != ElementValue)
815       return None;
816   }
817 
818   return Scalar;
819 }
820 
821 bool llvm::isBuildVectorAllZeros(const MachineInstr &MI,
822                                  const MachineRegisterInfo &MRI) {
823   return isBuildVectorConstantSplat(MI, MRI, 0);
824 }
825 
826 bool llvm::isBuildVectorAllOnes(const MachineInstr &MI,
827                                 const MachineRegisterInfo &MRI) {
828   return isBuildVectorConstantSplat(MI, MRI, -1);
829 }
830 
831 Optional<RegOrConstant> llvm::getVectorSplat(const MachineInstr &MI,
832                                              const MachineRegisterInfo &MRI) {
833   unsigned Opc = MI.getOpcode();
834   if (!isBuildVectorOp(Opc))
835     return None;
836   if (auto Splat = getBuildVectorConstantSplat(MI, MRI))
837     return RegOrConstant(*Splat);
838   auto Reg = MI.getOperand(1).getReg();
839   if (any_of(make_range(MI.operands_begin() + 2, MI.operands_end()),
840              [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
841     return None;
842   return RegOrConstant(Reg);
843 }
844 
845 bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
846                           bool IsFP) {
847   switch (TLI.getBooleanContents(IsVector, IsFP)) {
848   case TargetLowering::UndefinedBooleanContent:
849     return Val & 0x1;
850   case TargetLowering::ZeroOrOneBooleanContent:
851     return Val == 1;
852   case TargetLowering::ZeroOrNegativeOneBooleanContent:
853     return Val == -1;
854   }
855   llvm_unreachable("Invalid boolean contents");
856 }
857 
858 int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
859                              bool IsFP) {
860   switch (TLI.getBooleanContents(IsVector, IsFP)) {
861   case TargetLowering::UndefinedBooleanContent:
862   case TargetLowering::ZeroOrOneBooleanContent:
863     return 1;
864   case TargetLowering::ZeroOrNegativeOneBooleanContent:
865     return -1;
866   }
867   llvm_unreachable("Invalid boolean contents");
868 }
869 
870 bool llvm::shouldOptForSize(const MachineBasicBlock &MBB,
871                             ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
872   const auto &F = MBB.getParent()->getFunction();
873   return F.hasOptSize() || F.hasMinSize() ||
874          llvm::shouldOptimizeForSize(MBB.getBasicBlock(), PSI, BFI);
875 }
876