1 //===-- Instruction.cpp - Implement the Instruction class -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the Instruction class for the IR library.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/IR/Instruction.h"
14 #include "llvm/ADT/DenseSet.h"
15 #include "llvm/IR/Constants.h"
16 #include "llvm/IR/Instructions.h"
17 #include "llvm/IR/IntrinsicInst.h"
18 #include "llvm/IR/Intrinsics.h"
19 #include "llvm/IR/MDBuilder.h"
20 #include "llvm/IR/Operator.h"
21 #include "llvm/IR/Type.h"
22 using namespace llvm;
23 
24 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
25                          Instruction *InsertBefore)
26   : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
27 
28   // If requested, insert this instruction into a basic block...
29   if (InsertBefore) {
30     BasicBlock *BB = InsertBefore->getParent();
31     assert(BB && "Instruction to insert before is not in a basic block!");
32     BB->getInstList().insert(InsertBefore->getIterator(), this);
33   }
34 }
35 
36 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
37                          BasicBlock *InsertAtEnd)
38   : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
39 
40   // append this instruction into the basic block
41   assert(InsertAtEnd && "Basic block to append to may not be NULL!");
42   InsertAtEnd->getInstList().push_back(this);
43 }
44 
45 Instruction::~Instruction() {
46   assert(!Parent && "Instruction still linked in the program!");
47 
48   // Replace any extant metadata uses of this instruction with undef to
49   // preserve debug info accuracy. Some alternatives include:
50   // - Treat Instruction like any other Value, and point its extant metadata
51   //   uses to an empty ValueAsMetadata node. This makes extant dbg.value uses
52   //   trivially dead (i.e. fair game for deletion in many passes), leading to
53   //   stale dbg.values being in effect for too long.
54   // - Call salvageDebugInfoOrMarkUndef. Not needed to make instruction removal
55   //   correct. OTOH results in wasted work in some common cases (e.g. when all
56   //   instructions in a BasicBlock are deleted).
57   if (isUsedByMetadata())
58     ValueAsMetadata::handleRAUW(this, UndefValue::get(getType()));
59 }
60 
61 
62 void Instruction::setParent(BasicBlock *P) {
63   Parent = P;
64 }
65 
66 const Module *Instruction::getModule() const {
67   return getParent()->getModule();
68 }
69 
70 const Function *Instruction::getFunction() const {
71   return getParent()->getParent();
72 }
73 
74 void Instruction::removeFromParent() {
75   getParent()->getInstList().remove(getIterator());
76 }
77 
78 iplist<Instruction>::iterator Instruction::eraseFromParent() {
79   return getParent()->getInstList().erase(getIterator());
80 }
81 
82 /// Insert an unlinked instruction into a basic block immediately before the
83 /// specified instruction.
84 void Instruction::insertBefore(Instruction *InsertPos) {
85   InsertPos->getParent()->getInstList().insert(InsertPos->getIterator(), this);
86 }
87 
88 /// Insert an unlinked instruction into a basic block immediately after the
89 /// specified instruction.
90 void Instruction::insertAfter(Instruction *InsertPos) {
91   InsertPos->getParent()->getInstList().insertAfter(InsertPos->getIterator(),
92                                                     this);
93 }
94 
95 /// Unlink this instruction from its current basic block and insert it into the
96 /// basic block that MovePos lives in, right before MovePos.
97 void Instruction::moveBefore(Instruction *MovePos) {
98   moveBefore(*MovePos->getParent(), MovePos->getIterator());
99 }
100 
101 void Instruction::moveAfter(Instruction *MovePos) {
102   moveBefore(*MovePos->getParent(), ++MovePos->getIterator());
103 }
104 
105 void Instruction::moveBefore(BasicBlock &BB,
106                              SymbolTableList<Instruction>::iterator I) {
107   assert(I == BB.end() || I->getParent() == &BB);
108   BB.getInstList().splice(I, getParent()->getInstList(), getIterator());
109 }
110 
111 bool Instruction::comesBefore(const Instruction *Other) const {
112   assert(Parent && Other->Parent &&
113          "instructions without BB parents have no order");
114   assert(Parent == Other->Parent && "cross-BB instruction order comparison");
115   if (!Parent->isInstrOrderValid())
116     Parent->renumberInstructions();
117   return Order < Other->Order;
118 }
119 
120 bool Instruction::isOnlyUserOfAnyOperand() {
121   return any_of(operands(), [](Value *V) { return V->hasOneUser(); });
122 }
123 
124 void Instruction::setHasNoUnsignedWrap(bool b) {
125   cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(b);
126 }
127 
128 void Instruction::setHasNoSignedWrap(bool b) {
129   cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(b);
130 }
131 
132 void Instruction::setIsExact(bool b) {
133   cast<PossiblyExactOperator>(this)->setIsExact(b);
134 }
135 
136 bool Instruction::hasNoUnsignedWrap() const {
137   return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap();
138 }
139 
140 bool Instruction::hasNoSignedWrap() const {
141   return cast<OverflowingBinaryOperator>(this)->hasNoSignedWrap();
142 }
143 
144 void Instruction::dropPoisonGeneratingFlags() {
145   switch (getOpcode()) {
146   case Instruction::Add:
147   case Instruction::Sub:
148   case Instruction::Mul:
149   case Instruction::Shl:
150     cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(false);
151     cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(false);
152     break;
153 
154   case Instruction::UDiv:
155   case Instruction::SDiv:
156   case Instruction::AShr:
157   case Instruction::LShr:
158     cast<PossiblyExactOperator>(this)->setIsExact(false);
159     break;
160 
161   case Instruction::GetElementPtr:
162     cast<GetElementPtrInst>(this)->setIsInBounds(false);
163     break;
164   }
165   // TODO: FastMathFlags!
166 
167   assert(!cast<Operator>(this)->hasPoisonGeneratingFlags() &&
168          "must be kept in sync");
169 }
170 
171 void Instruction::dropUndefImplyingAttrsAndUnknownMetadata(
172     ArrayRef<unsigned> KnownIDs) {
173   dropUnknownNonDebugMetadata(KnownIDs);
174   auto *CB = dyn_cast<CallBase>(this);
175   if (!CB)
176     return;
177   // For call instructions, we also need to drop parameter and return attributes
178   // that are can cause UB if the call is moved to a location where the
179   // attribute is not valid.
180   AttributeList AL = CB->getAttributes();
181   if (AL.isEmpty())
182     return;
183   AttrBuilder UBImplyingAttributes = AttributeFuncs::getUBImplyingAttributes();
184   for (unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++)
185     CB->removeParamAttrs(ArgNo, UBImplyingAttributes);
186   CB->removeRetAttrs(UBImplyingAttributes);
187 }
188 
189 bool Instruction::isExact() const {
190   return cast<PossiblyExactOperator>(this)->isExact();
191 }
192 
193 void Instruction::setFast(bool B) {
194   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
195   cast<FPMathOperator>(this)->setFast(B);
196 }
197 
198 void Instruction::setHasAllowReassoc(bool B) {
199   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
200   cast<FPMathOperator>(this)->setHasAllowReassoc(B);
201 }
202 
203 void Instruction::setHasNoNaNs(bool B) {
204   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
205   cast<FPMathOperator>(this)->setHasNoNaNs(B);
206 }
207 
208 void Instruction::setHasNoInfs(bool B) {
209   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
210   cast<FPMathOperator>(this)->setHasNoInfs(B);
211 }
212 
213 void Instruction::setHasNoSignedZeros(bool B) {
214   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
215   cast<FPMathOperator>(this)->setHasNoSignedZeros(B);
216 }
217 
218 void Instruction::setHasAllowReciprocal(bool B) {
219   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
220   cast<FPMathOperator>(this)->setHasAllowReciprocal(B);
221 }
222 
223 void Instruction::setHasAllowContract(bool B) {
224   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
225   cast<FPMathOperator>(this)->setHasAllowContract(B);
226 }
227 
228 void Instruction::setHasApproxFunc(bool B) {
229   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
230   cast<FPMathOperator>(this)->setHasApproxFunc(B);
231 }
232 
233 void Instruction::setFastMathFlags(FastMathFlags FMF) {
234   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
235   cast<FPMathOperator>(this)->setFastMathFlags(FMF);
236 }
237 
238 void Instruction::copyFastMathFlags(FastMathFlags FMF) {
239   assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op");
240   cast<FPMathOperator>(this)->copyFastMathFlags(FMF);
241 }
242 
243 bool Instruction::isFast() const {
244   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
245   return cast<FPMathOperator>(this)->isFast();
246 }
247 
248 bool Instruction::hasAllowReassoc() const {
249   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
250   return cast<FPMathOperator>(this)->hasAllowReassoc();
251 }
252 
253 bool Instruction::hasNoNaNs() const {
254   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
255   return cast<FPMathOperator>(this)->hasNoNaNs();
256 }
257 
258 bool Instruction::hasNoInfs() const {
259   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
260   return cast<FPMathOperator>(this)->hasNoInfs();
261 }
262 
263 bool Instruction::hasNoSignedZeros() const {
264   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
265   return cast<FPMathOperator>(this)->hasNoSignedZeros();
266 }
267 
268 bool Instruction::hasAllowReciprocal() const {
269   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
270   return cast<FPMathOperator>(this)->hasAllowReciprocal();
271 }
272 
273 bool Instruction::hasAllowContract() const {
274   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
275   return cast<FPMathOperator>(this)->hasAllowContract();
276 }
277 
278 bool Instruction::hasApproxFunc() const {
279   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
280   return cast<FPMathOperator>(this)->hasApproxFunc();
281 }
282 
283 FastMathFlags Instruction::getFastMathFlags() const {
284   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
285   return cast<FPMathOperator>(this)->getFastMathFlags();
286 }
287 
288 void Instruction::copyFastMathFlags(const Instruction *I) {
289   copyFastMathFlags(I->getFastMathFlags());
290 }
291 
292 void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) {
293   // Copy the wrapping flags.
294   if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(this)) {
295     if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
296       setHasNoSignedWrap(OB->hasNoSignedWrap());
297       setHasNoUnsignedWrap(OB->hasNoUnsignedWrap());
298     }
299   }
300 
301   // Copy the exact flag.
302   if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
303     if (isa<PossiblyExactOperator>(this))
304       setIsExact(PE->isExact());
305 
306   // Copy the fast-math flags.
307   if (auto *FP = dyn_cast<FPMathOperator>(V))
308     if (isa<FPMathOperator>(this))
309       copyFastMathFlags(FP->getFastMathFlags());
310 
311   if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
312     if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
313       DestGEP->setIsInBounds(SrcGEP->isInBounds() || DestGEP->isInBounds());
314 }
315 
316 void Instruction::andIRFlags(const Value *V) {
317   if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
318     if (isa<OverflowingBinaryOperator>(this)) {
319       setHasNoSignedWrap(hasNoSignedWrap() && OB->hasNoSignedWrap());
320       setHasNoUnsignedWrap(hasNoUnsignedWrap() && OB->hasNoUnsignedWrap());
321     }
322   }
323 
324   if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
325     if (isa<PossiblyExactOperator>(this))
326       setIsExact(isExact() && PE->isExact());
327 
328   if (auto *FP = dyn_cast<FPMathOperator>(V)) {
329     if (isa<FPMathOperator>(this)) {
330       FastMathFlags FM = getFastMathFlags();
331       FM &= FP->getFastMathFlags();
332       copyFastMathFlags(FM);
333     }
334   }
335 
336   if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
337     if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
338       DestGEP->setIsInBounds(SrcGEP->isInBounds() && DestGEP->isInBounds());
339 }
340 
341 const char *Instruction::getOpcodeName(unsigned OpCode) {
342   switch (OpCode) {
343   // Terminators
344   case Ret:    return "ret";
345   case Br:     return "br";
346   case Switch: return "switch";
347   case IndirectBr: return "indirectbr";
348   case Invoke: return "invoke";
349   case Resume: return "resume";
350   case Unreachable: return "unreachable";
351   case CleanupRet: return "cleanupret";
352   case CatchRet: return "catchret";
353   case CatchPad: return "catchpad";
354   case CatchSwitch: return "catchswitch";
355   case CallBr: return "callbr";
356 
357   // Standard unary operators...
358   case FNeg: return "fneg";
359 
360   // Standard binary operators...
361   case Add: return "add";
362   case FAdd: return "fadd";
363   case Sub: return "sub";
364   case FSub: return "fsub";
365   case Mul: return "mul";
366   case FMul: return "fmul";
367   case UDiv: return "udiv";
368   case SDiv: return "sdiv";
369   case FDiv: return "fdiv";
370   case URem: return "urem";
371   case SRem: return "srem";
372   case FRem: return "frem";
373 
374   // Logical operators...
375   case And: return "and";
376   case Or : return "or";
377   case Xor: return "xor";
378 
379   // Memory instructions...
380   case Alloca:        return "alloca";
381   case Load:          return "load";
382   case Store:         return "store";
383   case AtomicCmpXchg: return "cmpxchg";
384   case AtomicRMW:     return "atomicrmw";
385   case Fence:         return "fence";
386   case GetElementPtr: return "getelementptr";
387 
388   // Convert instructions...
389   case Trunc:         return "trunc";
390   case ZExt:          return "zext";
391   case SExt:          return "sext";
392   case FPTrunc:       return "fptrunc";
393   case FPExt:         return "fpext";
394   case FPToUI:        return "fptoui";
395   case FPToSI:        return "fptosi";
396   case UIToFP:        return "uitofp";
397   case SIToFP:        return "sitofp";
398   case IntToPtr:      return "inttoptr";
399   case PtrToInt:      return "ptrtoint";
400   case BitCast:       return "bitcast";
401   case AddrSpaceCast: return "addrspacecast";
402 
403   // Other instructions...
404   case ICmp:           return "icmp";
405   case FCmp:           return "fcmp";
406   case PHI:            return "phi";
407   case Select:         return "select";
408   case Call:           return "call";
409   case Shl:            return "shl";
410   case LShr:           return "lshr";
411   case AShr:           return "ashr";
412   case VAArg:          return "va_arg";
413   case ExtractElement: return "extractelement";
414   case InsertElement:  return "insertelement";
415   case ShuffleVector:  return "shufflevector";
416   case ExtractValue:   return "extractvalue";
417   case InsertValue:    return "insertvalue";
418   case LandingPad:     return "landingpad";
419   case CleanupPad:     return "cleanuppad";
420   case Freeze:         return "freeze";
421 
422   default: return "<Invalid operator> ";
423   }
424 }
425 
426 /// Return true if both instructions have the same special state. This must be
427 /// kept in sync with FunctionComparator::cmpOperations in
428 /// lib/Transforms/IPO/MergeFunctions.cpp.
429 static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
430                                  bool IgnoreAlignment = false) {
431   assert(I1->getOpcode() == I2->getOpcode() &&
432          "Can not compare special state of different instructions");
433 
434   if (const AllocaInst *AI = dyn_cast<AllocaInst>(I1))
435     return AI->getAllocatedType() == cast<AllocaInst>(I2)->getAllocatedType() &&
436            (AI->getAlignment() == cast<AllocaInst>(I2)->getAlignment() ||
437             IgnoreAlignment);
438   if (const LoadInst *LI = dyn_cast<LoadInst>(I1))
439     return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
440            (LI->getAlignment() == cast<LoadInst>(I2)->getAlignment() ||
441             IgnoreAlignment) &&
442            LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
443            LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID();
444   if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
445     return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
446            (SI->getAlignment() == cast<StoreInst>(I2)->getAlignment() ||
447             IgnoreAlignment) &&
448            SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
449            SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID();
450   if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
451     return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
452   if (const CallInst *CI = dyn_cast<CallInst>(I1))
453     return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() &&
454            CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
455            CI->getAttributes() == cast<CallInst>(I2)->getAttributes() &&
456            CI->hasIdenticalOperandBundleSchema(*cast<CallInst>(I2));
457   if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
458     return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
459            CI->getAttributes() == cast<InvokeInst>(I2)->getAttributes() &&
460            CI->hasIdenticalOperandBundleSchema(*cast<InvokeInst>(I2));
461   if (const CallBrInst *CI = dyn_cast<CallBrInst>(I1))
462     return CI->getCallingConv() == cast<CallBrInst>(I2)->getCallingConv() &&
463            CI->getAttributes() == cast<CallBrInst>(I2)->getAttributes() &&
464            CI->hasIdenticalOperandBundleSchema(*cast<CallBrInst>(I2));
465   if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1))
466     return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
467   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1))
468     return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
469   if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
470     return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
471            FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
472   if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
473     return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
474            CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
475            CXI->getSuccessOrdering() ==
476                cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
477            CXI->getFailureOrdering() ==
478                cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
479            CXI->getSyncScopeID() ==
480                cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
481   if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
482     return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
483            RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
484            RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
485            RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
486   if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I1))
487     return SVI->getShuffleMask() ==
488            cast<ShuffleVectorInst>(I2)->getShuffleMask();
489 
490   return true;
491 }
492 
493 bool Instruction::isIdenticalTo(const Instruction *I) const {
494   return isIdenticalToWhenDefined(I) &&
495          SubclassOptionalData == I->SubclassOptionalData;
496 }
497 
498 bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
499   if (getOpcode() != I->getOpcode() ||
500       getNumOperands() != I->getNumOperands() ||
501       getType() != I->getType())
502     return false;
503 
504   // If both instructions have no operands, they are identical.
505   if (getNumOperands() == 0 && I->getNumOperands() == 0)
506     return haveSameSpecialState(this, I);
507 
508   // We have two instructions of identical opcode and #operands.  Check to see
509   // if all operands are the same.
510   if (!std::equal(op_begin(), op_end(), I->op_begin()))
511     return false;
512 
513   // WARNING: this logic must be kept in sync with EliminateDuplicatePHINodes()!
514   if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) {
515     const PHINode *otherPHI = cast<PHINode>(I);
516     return std::equal(thisPHI->block_begin(), thisPHI->block_end(),
517                       otherPHI->block_begin());
518   }
519 
520   return haveSameSpecialState(this, I);
521 }
522 
523 // Keep this in sync with FunctionComparator::cmpOperations in
524 // lib/Transforms/IPO/MergeFunctions.cpp.
525 bool Instruction::isSameOperationAs(const Instruction *I,
526                                     unsigned flags) const {
527   bool IgnoreAlignment = flags & CompareIgnoringAlignment;
528   bool UseScalarTypes  = flags & CompareUsingScalarTypes;
529 
530   if (getOpcode() != I->getOpcode() ||
531       getNumOperands() != I->getNumOperands() ||
532       (UseScalarTypes ?
533        getType()->getScalarType() != I->getType()->getScalarType() :
534        getType() != I->getType()))
535     return false;
536 
537   // We have two instructions of identical opcode and #operands.  Check to see
538   // if all operands are the same type
539   for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
540     if (UseScalarTypes ?
541         getOperand(i)->getType()->getScalarType() !=
542           I->getOperand(i)->getType()->getScalarType() :
543         getOperand(i)->getType() != I->getOperand(i)->getType())
544       return false;
545 
546   return haveSameSpecialState(this, I, IgnoreAlignment);
547 }
548 
549 bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
550   for (const Use &U : uses()) {
551     // PHI nodes uses values in the corresponding predecessor block.  For other
552     // instructions, just check to see whether the parent of the use matches up.
553     const Instruction *I = cast<Instruction>(U.getUser());
554     const PHINode *PN = dyn_cast<PHINode>(I);
555     if (!PN) {
556       if (I->getParent() != BB)
557         return true;
558       continue;
559     }
560 
561     if (PN->getIncomingBlock(U) != BB)
562       return true;
563   }
564   return false;
565 }
566 
567 bool Instruction::mayReadFromMemory() const {
568   switch (getOpcode()) {
569   default: return false;
570   case Instruction::VAArg:
571   case Instruction::Load:
572   case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
573   case Instruction::AtomicCmpXchg:
574   case Instruction::AtomicRMW:
575   case Instruction::CatchPad:
576   case Instruction::CatchRet:
577     return true;
578   case Instruction::Call:
579   case Instruction::Invoke:
580   case Instruction::CallBr:
581     return !cast<CallBase>(this)->doesNotReadMemory();
582   case Instruction::Store:
583     return !cast<StoreInst>(this)->isUnordered();
584   }
585 }
586 
587 bool Instruction::mayWriteToMemory() const {
588   switch (getOpcode()) {
589   default: return false;
590   case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
591   case Instruction::Store:
592   case Instruction::VAArg:
593   case Instruction::AtomicCmpXchg:
594   case Instruction::AtomicRMW:
595   case Instruction::CatchPad:
596   case Instruction::CatchRet:
597     return true;
598   case Instruction::Call:
599   case Instruction::Invoke:
600   case Instruction::CallBr:
601     return !cast<CallBase>(this)->onlyReadsMemory();
602   case Instruction::Load:
603     return !cast<LoadInst>(this)->isUnordered();
604   }
605 }
606 
607 bool Instruction::isAtomic() const {
608   switch (getOpcode()) {
609   default:
610     return false;
611   case Instruction::AtomicCmpXchg:
612   case Instruction::AtomicRMW:
613   case Instruction::Fence:
614     return true;
615   case Instruction::Load:
616     return cast<LoadInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
617   case Instruction::Store:
618     return cast<StoreInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
619   }
620 }
621 
622 bool Instruction::hasAtomicLoad() const {
623   assert(isAtomic());
624   switch (getOpcode()) {
625   default:
626     return false;
627   case Instruction::AtomicCmpXchg:
628   case Instruction::AtomicRMW:
629   case Instruction::Load:
630     return true;
631   }
632 }
633 
634 bool Instruction::hasAtomicStore() const {
635   assert(isAtomic());
636   switch (getOpcode()) {
637   default:
638     return false;
639   case Instruction::AtomicCmpXchg:
640   case Instruction::AtomicRMW:
641   case Instruction::Store:
642     return true;
643   }
644 }
645 
646 bool Instruction::isVolatile() const {
647   switch (getOpcode()) {
648   default:
649     return false;
650   case Instruction::AtomicRMW:
651     return cast<AtomicRMWInst>(this)->isVolatile();
652   case Instruction::Store:
653     return cast<StoreInst>(this)->isVolatile();
654   case Instruction::Load:
655     return cast<LoadInst>(this)->isVolatile();
656   case Instruction::AtomicCmpXchg:
657     return cast<AtomicCmpXchgInst>(this)->isVolatile();
658   case Instruction::Call:
659   case Instruction::Invoke:
660     // There are a very limited number of intrinsics with volatile flags.
661     if (auto *II = dyn_cast<IntrinsicInst>(this)) {
662       if (auto *MI = dyn_cast<MemIntrinsic>(II))
663         return MI->isVolatile();
664       switch (II->getIntrinsicID()) {
665       default: break;
666       case Intrinsic::matrix_column_major_load:
667         return cast<ConstantInt>(II->getArgOperand(2))->isOne();
668       case Intrinsic::matrix_column_major_store:
669         return cast<ConstantInt>(II->getArgOperand(3))->isOne();
670       }
671     }
672     return false;
673   }
674 }
675 
676 bool Instruction::mayThrow() const {
677   if (const CallInst *CI = dyn_cast<CallInst>(this))
678     return !CI->doesNotThrow();
679   if (const auto *CRI = dyn_cast<CleanupReturnInst>(this))
680     return CRI->unwindsToCaller();
681   if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(this))
682     return CatchSwitch->unwindsToCaller();
683   return isa<ResumeInst>(this);
684 }
685 
686 bool Instruction::mayHaveSideEffects() const {
687   return mayWriteToMemory() || mayThrow() || !willReturn();
688 }
689 
690 bool Instruction::isSafeToRemove() const {
691   return (!isa<CallInst>(this) || !this->mayHaveSideEffects()) &&
692          !this->isTerminator();
693 }
694 
695 bool Instruction::willReturn() const {
696   // Volatile store isn't guaranteed to return; see LangRef.
697   if (auto *SI = dyn_cast<StoreInst>(this))
698     return !SI->isVolatile();
699 
700   if (const auto *CB = dyn_cast<CallBase>(this))
701     // FIXME: Temporarily assume that all side-effect free intrinsics will
702     // return. Remove this workaround once all intrinsics are appropriately
703     // annotated.
704     return CB->hasFnAttr(Attribute::WillReturn) ||
705            (isa<IntrinsicInst>(CB) && CB->onlyReadsMemory());
706   return true;
707 }
708 
709 bool Instruction::isLifetimeStartOrEnd() const {
710   auto *II = dyn_cast<IntrinsicInst>(this);
711   if (!II)
712     return false;
713   Intrinsic::ID ID = II->getIntrinsicID();
714   return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end;
715 }
716 
717 bool Instruction::isLaunderOrStripInvariantGroup() const {
718   auto *II = dyn_cast<IntrinsicInst>(this);
719   if (!II)
720     return false;
721   Intrinsic::ID ID = II->getIntrinsicID();
722   return ID == Intrinsic::launder_invariant_group ||
723          ID == Intrinsic::strip_invariant_group;
724 }
725 
726 bool Instruction::isDebugOrPseudoInst() const {
727   return isa<DbgInfoIntrinsic>(this) || isa<PseudoProbeInst>(this);
728 }
729 
730 const Instruction *
731 Instruction::getNextNonDebugInstruction(bool SkipPseudoOp) const {
732   for (const Instruction *I = getNextNode(); I; I = I->getNextNode())
733     if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
734       return I;
735   return nullptr;
736 }
737 
738 const Instruction *
739 Instruction::getPrevNonDebugInstruction(bool SkipPseudoOp) const {
740   for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode())
741     if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
742       return I;
743   return nullptr;
744 }
745 
746 bool Instruction::isAssociative() const {
747   unsigned Opcode = getOpcode();
748   if (isAssociative(Opcode))
749     return true;
750 
751   switch (Opcode) {
752   case FMul:
753   case FAdd:
754     return cast<FPMathOperator>(this)->hasAllowReassoc() &&
755            cast<FPMathOperator>(this)->hasNoSignedZeros();
756   default:
757     return false;
758   }
759 }
760 
761 bool Instruction::isCommutative() const {
762   if (auto *II = dyn_cast<IntrinsicInst>(this))
763     return II->isCommutative();
764   // TODO: Should allow icmp/fcmp?
765   return isCommutative(getOpcode());
766 }
767 
768 unsigned Instruction::getNumSuccessors() const {
769   switch (getOpcode()) {
770 #define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
771   case Instruction::OPC:                                                       \
772     return static_cast<const CLASS *>(this)->getNumSuccessors();
773 #include "llvm/IR/Instruction.def"
774   default:
775     break;
776   }
777   llvm_unreachable("not a terminator");
778 }
779 
780 BasicBlock *Instruction::getSuccessor(unsigned idx) const {
781   switch (getOpcode()) {
782 #define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
783   case Instruction::OPC:                                                       \
784     return static_cast<const CLASS *>(this)->getSuccessor(idx);
785 #include "llvm/IR/Instruction.def"
786   default:
787     break;
788   }
789   llvm_unreachable("not a terminator");
790 }
791 
792 void Instruction::setSuccessor(unsigned idx, BasicBlock *B) {
793   switch (getOpcode()) {
794 #define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
795   case Instruction::OPC:                                                       \
796     return static_cast<CLASS *>(this)->setSuccessor(idx, B);
797 #include "llvm/IR/Instruction.def"
798   default:
799     break;
800   }
801   llvm_unreachable("not a terminator");
802 }
803 
804 void Instruction::replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB) {
805   for (unsigned Idx = 0, NumSuccessors = Instruction::getNumSuccessors();
806        Idx != NumSuccessors; ++Idx)
807     if (getSuccessor(Idx) == OldBB)
808       setSuccessor(Idx, NewBB);
809 }
810 
811 Instruction *Instruction::cloneImpl() const {
812   llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
813 }
814 
815 void Instruction::swapProfMetadata() {
816   MDNode *ProfileData = getMetadata(LLVMContext::MD_prof);
817   if (!ProfileData || ProfileData->getNumOperands() != 3 ||
818       !isa<MDString>(ProfileData->getOperand(0)))
819     return;
820 
821   MDString *MDName = cast<MDString>(ProfileData->getOperand(0));
822   if (MDName->getString() != "branch_weights")
823     return;
824 
825   // The first operand is the name. Fetch them backwards and build a new one.
826   Metadata *Ops[] = {ProfileData->getOperand(0), ProfileData->getOperand(2),
827                      ProfileData->getOperand(1)};
828   setMetadata(LLVMContext::MD_prof,
829               MDNode::get(ProfileData->getContext(), Ops));
830 }
831 
832 void Instruction::copyMetadata(const Instruction &SrcInst,
833                                ArrayRef<unsigned> WL) {
834   if (!SrcInst.hasMetadata())
835     return;
836 
837   DenseSet<unsigned> WLS;
838   for (unsigned M : WL)
839     WLS.insert(M);
840 
841   // Otherwise, enumerate and copy over metadata from the old instruction to the
842   // new one.
843   SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs;
844   SrcInst.getAllMetadataOtherThanDebugLoc(TheMDs);
845   for (const auto &MD : TheMDs) {
846     if (WL.empty() || WLS.count(MD.first))
847       setMetadata(MD.first, MD.second);
848   }
849   if (WL.empty() || WLS.count(LLVMContext::MD_dbg))
850     setDebugLoc(SrcInst.getDebugLoc());
851 }
852 
853 Instruction *Instruction::clone() const {
854   Instruction *New = nullptr;
855   switch (getOpcode()) {
856   default:
857     llvm_unreachable("Unhandled Opcode.");
858 #define HANDLE_INST(num, opc, clas)                                            \
859   case Instruction::opc:                                                       \
860     New = cast<clas>(this)->cloneImpl();                                       \
861     break;
862 #include "llvm/IR/Instruction.def"
863 #undef HANDLE_INST
864   }
865 
866   New->SubclassOptionalData = SubclassOptionalData;
867   New->copyMetadata(*this);
868   return New;
869 }
870