1 //===-- Instruction.cpp - Implement the Instruction class -----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the Instruction class for the IR library. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/IR/Instruction.h" 14 #include "llvm/ADT/DenseSet.h" 15 #include "llvm/IR/Constants.h" 16 #include "llvm/IR/Instructions.h" 17 #include "llvm/IR/IntrinsicInst.h" 18 #include "llvm/IR/Intrinsics.h" 19 #include "llvm/IR/MDBuilder.h" 20 #include "llvm/IR/Operator.h" 21 #include "llvm/IR/Type.h" 22 using namespace llvm; 23 24 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps, 25 Instruction *InsertBefore) 26 : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) { 27 28 // If requested, insert this instruction into a basic block... 29 if (InsertBefore) { 30 BasicBlock *BB = InsertBefore->getParent(); 31 assert(BB && "Instruction to insert before is not in a basic block!"); 32 BB->getInstList().insert(InsertBefore->getIterator(), this); 33 } 34 } 35 36 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps, 37 BasicBlock *InsertAtEnd) 38 : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) { 39 40 // append this instruction into the basic block 41 assert(InsertAtEnd && "Basic block to append to may not be NULL!"); 42 InsertAtEnd->getInstList().push_back(this); 43 } 44 45 Instruction::~Instruction() { 46 assert(!Parent && "Instruction still linked in the program!"); 47 48 // Replace any extant metadata uses of this instruction with undef to 49 // preserve debug info accuracy. Some alternatives include: 50 // - Treat Instruction like any other Value, and point its extant metadata 51 // uses to an empty ValueAsMetadata node. This makes extant dbg.value uses 52 // trivially dead (i.e. fair game for deletion in many passes), leading to 53 // stale dbg.values being in effect for too long. 54 // - Call salvageDebugInfoOrMarkUndef. Not needed to make instruction removal 55 // correct. OTOH results in wasted work in some common cases (e.g. when all 56 // instructions in a BasicBlock are deleted). 57 if (isUsedByMetadata()) 58 ValueAsMetadata::handleRAUW(this, UndefValue::get(getType())); 59 } 60 61 62 void Instruction::setParent(BasicBlock *P) { 63 Parent = P; 64 } 65 66 const Module *Instruction::getModule() const { 67 return getParent()->getModule(); 68 } 69 70 const Function *Instruction::getFunction() const { 71 return getParent()->getParent(); 72 } 73 74 void Instruction::removeFromParent() { 75 getParent()->getInstList().remove(getIterator()); 76 } 77 78 iplist<Instruction>::iterator Instruction::eraseFromParent() { 79 return getParent()->getInstList().erase(getIterator()); 80 } 81 82 /// Insert an unlinked instruction into a basic block immediately before the 83 /// specified instruction. 84 void Instruction::insertBefore(Instruction *InsertPos) { 85 InsertPos->getParent()->getInstList().insert(InsertPos->getIterator(), this); 86 } 87 88 /// Insert an unlinked instruction into a basic block immediately after the 89 /// specified instruction. 90 void Instruction::insertAfter(Instruction *InsertPos) { 91 InsertPos->getParent()->getInstList().insertAfter(InsertPos->getIterator(), 92 this); 93 } 94 95 /// Unlink this instruction from its current basic block and insert it into the 96 /// basic block that MovePos lives in, right before MovePos. 97 void Instruction::moveBefore(Instruction *MovePos) { 98 moveBefore(*MovePos->getParent(), MovePos->getIterator()); 99 } 100 101 void Instruction::moveAfter(Instruction *MovePos) { 102 moveBefore(*MovePos->getParent(), ++MovePos->getIterator()); 103 } 104 105 void Instruction::moveBefore(BasicBlock &BB, 106 SymbolTableList<Instruction>::iterator I) { 107 assert(I == BB.end() || I->getParent() == &BB); 108 BB.getInstList().splice(I, getParent()->getInstList(), getIterator()); 109 } 110 111 bool Instruction::comesBefore(const Instruction *Other) const { 112 assert(Parent && Other->Parent && 113 "instructions without BB parents have no order"); 114 assert(Parent == Other->Parent && "cross-BB instruction order comparison"); 115 if (!Parent->isInstrOrderValid()) 116 Parent->renumberInstructions(); 117 return Order < Other->Order; 118 } 119 120 bool Instruction::isOnlyUserOfAnyOperand() { 121 return any_of(operands(), [](Value *V) { return V->hasOneUser(); }); 122 } 123 124 void Instruction::setHasNoUnsignedWrap(bool b) { 125 cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(b); 126 } 127 128 void Instruction::setHasNoSignedWrap(bool b) { 129 cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(b); 130 } 131 132 void Instruction::setIsExact(bool b) { 133 cast<PossiblyExactOperator>(this)->setIsExact(b); 134 } 135 136 bool Instruction::hasNoUnsignedWrap() const { 137 return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap(); 138 } 139 140 bool Instruction::hasNoSignedWrap() const { 141 return cast<OverflowingBinaryOperator>(this)->hasNoSignedWrap(); 142 } 143 144 bool Instruction::hasPoisonGeneratingFlags() const { 145 return cast<Operator>(this)->hasPoisonGeneratingFlags(); 146 } 147 148 void Instruction::dropPoisonGeneratingFlags() { 149 switch (getOpcode()) { 150 case Instruction::Add: 151 case Instruction::Sub: 152 case Instruction::Mul: 153 case Instruction::Shl: 154 cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(false); 155 cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(false); 156 break; 157 158 case Instruction::UDiv: 159 case Instruction::SDiv: 160 case Instruction::AShr: 161 case Instruction::LShr: 162 cast<PossiblyExactOperator>(this)->setIsExact(false); 163 break; 164 165 case Instruction::GetElementPtr: 166 cast<GetElementPtrInst>(this)->setIsInBounds(false); 167 break; 168 } 169 if (isa<FPMathOperator>(this)) { 170 setHasNoNaNs(false); 171 setHasNoInfs(false); 172 } 173 174 assert(!hasPoisonGeneratingFlags() && "must be kept in sync"); 175 } 176 177 void Instruction::dropUndefImplyingAttrsAndUnknownMetadata( 178 ArrayRef<unsigned> KnownIDs) { 179 dropUnknownNonDebugMetadata(KnownIDs); 180 auto *CB = dyn_cast<CallBase>(this); 181 if (!CB) 182 return; 183 // For call instructions, we also need to drop parameter and return attributes 184 // that are can cause UB if the call is moved to a location where the 185 // attribute is not valid. 186 AttributeList AL = CB->getAttributes(); 187 if (AL.isEmpty()) 188 return; 189 AttributeMask UBImplyingAttributes = 190 AttributeFuncs::getUBImplyingAttributes(); 191 for (unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++) 192 CB->removeParamAttrs(ArgNo, UBImplyingAttributes); 193 CB->removeRetAttrs(UBImplyingAttributes); 194 } 195 196 bool Instruction::isExact() const { 197 return cast<PossiblyExactOperator>(this)->isExact(); 198 } 199 200 void Instruction::setFast(bool B) { 201 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); 202 cast<FPMathOperator>(this)->setFast(B); 203 } 204 205 void Instruction::setHasAllowReassoc(bool B) { 206 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); 207 cast<FPMathOperator>(this)->setHasAllowReassoc(B); 208 } 209 210 void Instruction::setHasNoNaNs(bool B) { 211 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); 212 cast<FPMathOperator>(this)->setHasNoNaNs(B); 213 } 214 215 void Instruction::setHasNoInfs(bool B) { 216 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); 217 cast<FPMathOperator>(this)->setHasNoInfs(B); 218 } 219 220 void Instruction::setHasNoSignedZeros(bool B) { 221 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); 222 cast<FPMathOperator>(this)->setHasNoSignedZeros(B); 223 } 224 225 void Instruction::setHasAllowReciprocal(bool B) { 226 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); 227 cast<FPMathOperator>(this)->setHasAllowReciprocal(B); 228 } 229 230 void Instruction::setHasAllowContract(bool B) { 231 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); 232 cast<FPMathOperator>(this)->setHasAllowContract(B); 233 } 234 235 void Instruction::setHasApproxFunc(bool B) { 236 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); 237 cast<FPMathOperator>(this)->setHasApproxFunc(B); 238 } 239 240 void Instruction::setFastMathFlags(FastMathFlags FMF) { 241 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); 242 cast<FPMathOperator>(this)->setFastMathFlags(FMF); 243 } 244 245 void Instruction::copyFastMathFlags(FastMathFlags FMF) { 246 assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op"); 247 cast<FPMathOperator>(this)->copyFastMathFlags(FMF); 248 } 249 250 bool Instruction::isFast() const { 251 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op"); 252 return cast<FPMathOperator>(this)->isFast(); 253 } 254 255 bool Instruction::hasAllowReassoc() const { 256 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op"); 257 return cast<FPMathOperator>(this)->hasAllowReassoc(); 258 } 259 260 bool Instruction::hasNoNaNs() const { 261 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op"); 262 return cast<FPMathOperator>(this)->hasNoNaNs(); 263 } 264 265 bool Instruction::hasNoInfs() const { 266 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op"); 267 return cast<FPMathOperator>(this)->hasNoInfs(); 268 } 269 270 bool Instruction::hasNoSignedZeros() const { 271 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op"); 272 return cast<FPMathOperator>(this)->hasNoSignedZeros(); 273 } 274 275 bool Instruction::hasAllowReciprocal() const { 276 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op"); 277 return cast<FPMathOperator>(this)->hasAllowReciprocal(); 278 } 279 280 bool Instruction::hasAllowContract() const { 281 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op"); 282 return cast<FPMathOperator>(this)->hasAllowContract(); 283 } 284 285 bool Instruction::hasApproxFunc() const { 286 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op"); 287 return cast<FPMathOperator>(this)->hasApproxFunc(); 288 } 289 290 FastMathFlags Instruction::getFastMathFlags() const { 291 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op"); 292 return cast<FPMathOperator>(this)->getFastMathFlags(); 293 } 294 295 void Instruction::copyFastMathFlags(const Instruction *I) { 296 copyFastMathFlags(I->getFastMathFlags()); 297 } 298 299 void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) { 300 // Copy the wrapping flags. 301 if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(this)) { 302 if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) { 303 setHasNoSignedWrap(OB->hasNoSignedWrap()); 304 setHasNoUnsignedWrap(OB->hasNoUnsignedWrap()); 305 } 306 } 307 308 // Copy the exact flag. 309 if (auto *PE = dyn_cast<PossiblyExactOperator>(V)) 310 if (isa<PossiblyExactOperator>(this)) 311 setIsExact(PE->isExact()); 312 313 // Copy the fast-math flags. 314 if (auto *FP = dyn_cast<FPMathOperator>(V)) 315 if (isa<FPMathOperator>(this)) 316 copyFastMathFlags(FP->getFastMathFlags()); 317 318 if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V)) 319 if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this)) 320 DestGEP->setIsInBounds(SrcGEP->isInBounds() || DestGEP->isInBounds()); 321 } 322 323 void Instruction::andIRFlags(const Value *V) { 324 if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) { 325 if (isa<OverflowingBinaryOperator>(this)) { 326 setHasNoSignedWrap(hasNoSignedWrap() && OB->hasNoSignedWrap()); 327 setHasNoUnsignedWrap(hasNoUnsignedWrap() && OB->hasNoUnsignedWrap()); 328 } 329 } 330 331 if (auto *PE = dyn_cast<PossiblyExactOperator>(V)) 332 if (isa<PossiblyExactOperator>(this)) 333 setIsExact(isExact() && PE->isExact()); 334 335 if (auto *FP = dyn_cast<FPMathOperator>(V)) { 336 if (isa<FPMathOperator>(this)) { 337 FastMathFlags FM = getFastMathFlags(); 338 FM &= FP->getFastMathFlags(); 339 copyFastMathFlags(FM); 340 } 341 } 342 343 if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V)) 344 if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this)) 345 DestGEP->setIsInBounds(SrcGEP->isInBounds() && DestGEP->isInBounds()); 346 } 347 348 const char *Instruction::getOpcodeName(unsigned OpCode) { 349 switch (OpCode) { 350 // Terminators 351 case Ret: return "ret"; 352 case Br: return "br"; 353 case Switch: return "switch"; 354 case IndirectBr: return "indirectbr"; 355 case Invoke: return "invoke"; 356 case Resume: return "resume"; 357 case Unreachable: return "unreachable"; 358 case CleanupRet: return "cleanupret"; 359 case CatchRet: return "catchret"; 360 case CatchPad: return "catchpad"; 361 case CatchSwitch: return "catchswitch"; 362 case CallBr: return "callbr"; 363 364 // Standard unary operators... 365 case FNeg: return "fneg"; 366 367 // Standard binary operators... 368 case Add: return "add"; 369 case FAdd: return "fadd"; 370 case Sub: return "sub"; 371 case FSub: return "fsub"; 372 case Mul: return "mul"; 373 case FMul: return "fmul"; 374 case UDiv: return "udiv"; 375 case SDiv: return "sdiv"; 376 case FDiv: return "fdiv"; 377 case URem: return "urem"; 378 case SRem: return "srem"; 379 case FRem: return "frem"; 380 381 // Logical operators... 382 case And: return "and"; 383 case Or : return "or"; 384 case Xor: return "xor"; 385 386 // Memory instructions... 387 case Alloca: return "alloca"; 388 case Load: return "load"; 389 case Store: return "store"; 390 case AtomicCmpXchg: return "cmpxchg"; 391 case AtomicRMW: return "atomicrmw"; 392 case Fence: return "fence"; 393 case GetElementPtr: return "getelementptr"; 394 395 // Convert instructions... 396 case Trunc: return "trunc"; 397 case ZExt: return "zext"; 398 case SExt: return "sext"; 399 case FPTrunc: return "fptrunc"; 400 case FPExt: return "fpext"; 401 case FPToUI: return "fptoui"; 402 case FPToSI: return "fptosi"; 403 case UIToFP: return "uitofp"; 404 case SIToFP: return "sitofp"; 405 case IntToPtr: return "inttoptr"; 406 case PtrToInt: return "ptrtoint"; 407 case BitCast: return "bitcast"; 408 case AddrSpaceCast: return "addrspacecast"; 409 410 // Other instructions... 411 case ICmp: return "icmp"; 412 case FCmp: return "fcmp"; 413 case PHI: return "phi"; 414 case Select: return "select"; 415 case Call: return "call"; 416 case Shl: return "shl"; 417 case LShr: return "lshr"; 418 case AShr: return "ashr"; 419 case VAArg: return "va_arg"; 420 case ExtractElement: return "extractelement"; 421 case InsertElement: return "insertelement"; 422 case ShuffleVector: return "shufflevector"; 423 case ExtractValue: return "extractvalue"; 424 case InsertValue: return "insertvalue"; 425 case LandingPad: return "landingpad"; 426 case CleanupPad: return "cleanuppad"; 427 case Freeze: return "freeze"; 428 429 default: return "<Invalid operator> "; 430 } 431 } 432 433 /// Return true if both instructions have the same special state. This must be 434 /// kept in sync with FunctionComparator::cmpOperations in 435 /// lib/Transforms/IPO/MergeFunctions.cpp. 436 static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2, 437 bool IgnoreAlignment = false) { 438 assert(I1->getOpcode() == I2->getOpcode() && 439 "Can not compare special state of different instructions"); 440 441 if (const AllocaInst *AI = dyn_cast<AllocaInst>(I1)) 442 return AI->getAllocatedType() == cast<AllocaInst>(I2)->getAllocatedType() && 443 (AI->getAlign() == cast<AllocaInst>(I2)->getAlign() || 444 IgnoreAlignment); 445 if (const LoadInst *LI = dyn_cast<LoadInst>(I1)) 446 return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() && 447 (LI->getAlign() == cast<LoadInst>(I2)->getAlign() || 448 IgnoreAlignment) && 449 LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() && 450 LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID(); 451 if (const StoreInst *SI = dyn_cast<StoreInst>(I1)) 452 return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() && 453 (SI->getAlign() == cast<StoreInst>(I2)->getAlign() || 454 IgnoreAlignment) && 455 SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() && 456 SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID(); 457 if (const CmpInst *CI = dyn_cast<CmpInst>(I1)) 458 return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate(); 459 if (const CallInst *CI = dyn_cast<CallInst>(I1)) 460 return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() && 461 CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() && 462 CI->getAttributes() == cast<CallInst>(I2)->getAttributes() && 463 CI->hasIdenticalOperandBundleSchema(*cast<CallInst>(I2)); 464 if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1)) 465 return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() && 466 CI->getAttributes() == cast<InvokeInst>(I2)->getAttributes() && 467 CI->hasIdenticalOperandBundleSchema(*cast<InvokeInst>(I2)); 468 if (const CallBrInst *CI = dyn_cast<CallBrInst>(I1)) 469 return CI->getCallingConv() == cast<CallBrInst>(I2)->getCallingConv() && 470 CI->getAttributes() == cast<CallBrInst>(I2)->getAttributes() && 471 CI->hasIdenticalOperandBundleSchema(*cast<CallBrInst>(I2)); 472 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1)) 473 return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices(); 474 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1)) 475 return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices(); 476 if (const FenceInst *FI = dyn_cast<FenceInst>(I1)) 477 return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() && 478 FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID(); 479 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1)) 480 return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() && 481 CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() && 482 CXI->getSuccessOrdering() == 483 cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() && 484 CXI->getFailureOrdering() == 485 cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() && 486 CXI->getSyncScopeID() == 487 cast<AtomicCmpXchgInst>(I2)->getSyncScopeID(); 488 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1)) 489 return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() && 490 RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() && 491 RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() && 492 RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID(); 493 if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I1)) 494 return SVI->getShuffleMask() == 495 cast<ShuffleVectorInst>(I2)->getShuffleMask(); 496 497 return true; 498 } 499 500 bool Instruction::isIdenticalTo(const Instruction *I) const { 501 return isIdenticalToWhenDefined(I) && 502 SubclassOptionalData == I->SubclassOptionalData; 503 } 504 505 bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const { 506 if (getOpcode() != I->getOpcode() || 507 getNumOperands() != I->getNumOperands() || 508 getType() != I->getType()) 509 return false; 510 511 // If both instructions have no operands, they are identical. 512 if (getNumOperands() == 0 && I->getNumOperands() == 0) 513 return haveSameSpecialState(this, I); 514 515 // We have two instructions of identical opcode and #operands. Check to see 516 // if all operands are the same. 517 if (!std::equal(op_begin(), op_end(), I->op_begin())) 518 return false; 519 520 // WARNING: this logic must be kept in sync with EliminateDuplicatePHINodes()! 521 if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) { 522 const PHINode *otherPHI = cast<PHINode>(I); 523 return std::equal(thisPHI->block_begin(), thisPHI->block_end(), 524 otherPHI->block_begin()); 525 } 526 527 return haveSameSpecialState(this, I); 528 } 529 530 // Keep this in sync with FunctionComparator::cmpOperations in 531 // lib/Transforms/IPO/MergeFunctions.cpp. 532 bool Instruction::isSameOperationAs(const Instruction *I, 533 unsigned flags) const { 534 bool IgnoreAlignment = flags & CompareIgnoringAlignment; 535 bool UseScalarTypes = flags & CompareUsingScalarTypes; 536 537 if (getOpcode() != I->getOpcode() || 538 getNumOperands() != I->getNumOperands() || 539 (UseScalarTypes ? 540 getType()->getScalarType() != I->getType()->getScalarType() : 541 getType() != I->getType())) 542 return false; 543 544 // We have two instructions of identical opcode and #operands. Check to see 545 // if all operands are the same type 546 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 547 if (UseScalarTypes ? 548 getOperand(i)->getType()->getScalarType() != 549 I->getOperand(i)->getType()->getScalarType() : 550 getOperand(i)->getType() != I->getOperand(i)->getType()) 551 return false; 552 553 return haveSameSpecialState(this, I, IgnoreAlignment); 554 } 555 556 bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const { 557 for (const Use &U : uses()) { 558 // PHI nodes uses values in the corresponding predecessor block. For other 559 // instructions, just check to see whether the parent of the use matches up. 560 const Instruction *I = cast<Instruction>(U.getUser()); 561 const PHINode *PN = dyn_cast<PHINode>(I); 562 if (!PN) { 563 if (I->getParent() != BB) 564 return true; 565 continue; 566 } 567 568 if (PN->getIncomingBlock(U) != BB) 569 return true; 570 } 571 return false; 572 } 573 574 bool Instruction::mayReadFromMemory() const { 575 switch (getOpcode()) { 576 default: return false; 577 case Instruction::VAArg: 578 case Instruction::Load: 579 case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory 580 case Instruction::AtomicCmpXchg: 581 case Instruction::AtomicRMW: 582 case Instruction::CatchPad: 583 case Instruction::CatchRet: 584 return true; 585 case Instruction::Call: 586 case Instruction::Invoke: 587 case Instruction::CallBr: 588 return !cast<CallBase>(this)->onlyWritesMemory(); 589 case Instruction::Store: 590 return !cast<StoreInst>(this)->isUnordered(); 591 } 592 } 593 594 bool Instruction::mayWriteToMemory() const { 595 switch (getOpcode()) { 596 default: return false; 597 case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory 598 case Instruction::Store: 599 case Instruction::VAArg: 600 case Instruction::AtomicCmpXchg: 601 case Instruction::AtomicRMW: 602 case Instruction::CatchPad: 603 case Instruction::CatchRet: 604 return true; 605 case Instruction::Call: 606 case Instruction::Invoke: 607 case Instruction::CallBr: 608 return !cast<CallBase>(this)->onlyReadsMemory(); 609 case Instruction::Load: 610 return !cast<LoadInst>(this)->isUnordered(); 611 } 612 } 613 614 bool Instruction::isAtomic() const { 615 switch (getOpcode()) { 616 default: 617 return false; 618 case Instruction::AtomicCmpXchg: 619 case Instruction::AtomicRMW: 620 case Instruction::Fence: 621 return true; 622 case Instruction::Load: 623 return cast<LoadInst>(this)->getOrdering() != AtomicOrdering::NotAtomic; 624 case Instruction::Store: 625 return cast<StoreInst>(this)->getOrdering() != AtomicOrdering::NotAtomic; 626 } 627 } 628 629 bool Instruction::hasAtomicLoad() const { 630 assert(isAtomic()); 631 switch (getOpcode()) { 632 default: 633 return false; 634 case Instruction::AtomicCmpXchg: 635 case Instruction::AtomicRMW: 636 case Instruction::Load: 637 return true; 638 } 639 } 640 641 bool Instruction::hasAtomicStore() const { 642 assert(isAtomic()); 643 switch (getOpcode()) { 644 default: 645 return false; 646 case Instruction::AtomicCmpXchg: 647 case Instruction::AtomicRMW: 648 case Instruction::Store: 649 return true; 650 } 651 } 652 653 bool Instruction::isVolatile() const { 654 switch (getOpcode()) { 655 default: 656 return false; 657 case Instruction::AtomicRMW: 658 return cast<AtomicRMWInst>(this)->isVolatile(); 659 case Instruction::Store: 660 return cast<StoreInst>(this)->isVolatile(); 661 case Instruction::Load: 662 return cast<LoadInst>(this)->isVolatile(); 663 case Instruction::AtomicCmpXchg: 664 return cast<AtomicCmpXchgInst>(this)->isVolatile(); 665 case Instruction::Call: 666 case Instruction::Invoke: 667 // There are a very limited number of intrinsics with volatile flags. 668 if (auto *II = dyn_cast<IntrinsicInst>(this)) { 669 if (auto *MI = dyn_cast<MemIntrinsic>(II)) 670 return MI->isVolatile(); 671 switch (II->getIntrinsicID()) { 672 default: break; 673 case Intrinsic::matrix_column_major_load: 674 return cast<ConstantInt>(II->getArgOperand(2))->isOne(); 675 case Intrinsic::matrix_column_major_store: 676 return cast<ConstantInt>(II->getArgOperand(3))->isOne(); 677 } 678 } 679 return false; 680 } 681 } 682 683 bool Instruction::mayThrow() const { 684 if (const CallInst *CI = dyn_cast<CallInst>(this)) 685 return !CI->doesNotThrow(); 686 if (const auto *CRI = dyn_cast<CleanupReturnInst>(this)) 687 return CRI->unwindsToCaller(); 688 if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(this)) 689 return CatchSwitch->unwindsToCaller(); 690 return isa<ResumeInst>(this); 691 } 692 693 bool Instruction::mayHaveSideEffects() const { 694 return mayWriteToMemory() || mayThrow() || !willReturn(); 695 } 696 697 bool Instruction::isSafeToRemove() const { 698 return (!isa<CallInst>(this) || !this->mayHaveSideEffects()) && 699 !this->isTerminator(); 700 } 701 702 bool Instruction::willReturn() const { 703 // Volatile store isn't guaranteed to return; see LangRef. 704 if (auto *SI = dyn_cast<StoreInst>(this)) 705 return !SI->isVolatile(); 706 707 if (const auto *CB = dyn_cast<CallBase>(this)) 708 // FIXME: Temporarily assume that all side-effect free intrinsics will 709 // return. Remove this workaround once all intrinsics are appropriately 710 // annotated. 711 return CB->hasFnAttr(Attribute::WillReturn) || 712 (isa<IntrinsicInst>(CB) && CB->onlyReadsMemory()); 713 return true; 714 } 715 716 bool Instruction::isLifetimeStartOrEnd() const { 717 auto *II = dyn_cast<IntrinsicInst>(this); 718 if (!II) 719 return false; 720 Intrinsic::ID ID = II->getIntrinsicID(); 721 return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end; 722 } 723 724 bool Instruction::isLaunderOrStripInvariantGroup() const { 725 auto *II = dyn_cast<IntrinsicInst>(this); 726 if (!II) 727 return false; 728 Intrinsic::ID ID = II->getIntrinsicID(); 729 return ID == Intrinsic::launder_invariant_group || 730 ID == Intrinsic::strip_invariant_group; 731 } 732 733 bool Instruction::isDebugOrPseudoInst() const { 734 return isa<DbgInfoIntrinsic>(this) || isa<PseudoProbeInst>(this); 735 } 736 737 const Instruction * 738 Instruction::getNextNonDebugInstruction(bool SkipPseudoOp) const { 739 for (const Instruction *I = getNextNode(); I; I = I->getNextNode()) 740 if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I))) 741 return I; 742 return nullptr; 743 } 744 745 const Instruction * 746 Instruction::getPrevNonDebugInstruction(bool SkipPseudoOp) const { 747 for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode()) 748 if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I))) 749 return I; 750 return nullptr; 751 } 752 753 bool Instruction::isAssociative() const { 754 unsigned Opcode = getOpcode(); 755 if (isAssociative(Opcode)) 756 return true; 757 758 switch (Opcode) { 759 case FMul: 760 case FAdd: 761 return cast<FPMathOperator>(this)->hasAllowReassoc() && 762 cast<FPMathOperator>(this)->hasNoSignedZeros(); 763 default: 764 return false; 765 } 766 } 767 768 bool Instruction::isCommutative() const { 769 if (auto *II = dyn_cast<IntrinsicInst>(this)) 770 return II->isCommutative(); 771 // TODO: Should allow icmp/fcmp? 772 return isCommutative(getOpcode()); 773 } 774 775 unsigned Instruction::getNumSuccessors() const { 776 switch (getOpcode()) { 777 #define HANDLE_TERM_INST(N, OPC, CLASS) \ 778 case Instruction::OPC: \ 779 return static_cast<const CLASS *>(this)->getNumSuccessors(); 780 #include "llvm/IR/Instruction.def" 781 default: 782 break; 783 } 784 llvm_unreachable("not a terminator"); 785 } 786 787 BasicBlock *Instruction::getSuccessor(unsigned idx) const { 788 switch (getOpcode()) { 789 #define HANDLE_TERM_INST(N, OPC, CLASS) \ 790 case Instruction::OPC: \ 791 return static_cast<const CLASS *>(this)->getSuccessor(idx); 792 #include "llvm/IR/Instruction.def" 793 default: 794 break; 795 } 796 llvm_unreachable("not a terminator"); 797 } 798 799 void Instruction::setSuccessor(unsigned idx, BasicBlock *B) { 800 switch (getOpcode()) { 801 #define HANDLE_TERM_INST(N, OPC, CLASS) \ 802 case Instruction::OPC: \ 803 return static_cast<CLASS *>(this)->setSuccessor(idx, B); 804 #include "llvm/IR/Instruction.def" 805 default: 806 break; 807 } 808 llvm_unreachable("not a terminator"); 809 } 810 811 void Instruction::replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB) { 812 for (unsigned Idx = 0, NumSuccessors = Instruction::getNumSuccessors(); 813 Idx != NumSuccessors; ++Idx) 814 if (getSuccessor(Idx) == OldBB) 815 setSuccessor(Idx, NewBB); 816 } 817 818 Instruction *Instruction::cloneImpl() const { 819 llvm_unreachable("Subclass of Instruction failed to implement cloneImpl"); 820 } 821 822 void Instruction::swapProfMetadata() { 823 MDNode *ProfileData = getMetadata(LLVMContext::MD_prof); 824 if (!ProfileData || ProfileData->getNumOperands() != 3 || 825 !isa<MDString>(ProfileData->getOperand(0))) 826 return; 827 828 MDString *MDName = cast<MDString>(ProfileData->getOperand(0)); 829 if (MDName->getString() != "branch_weights") 830 return; 831 832 // The first operand is the name. Fetch them backwards and build a new one. 833 Metadata *Ops[] = {ProfileData->getOperand(0), ProfileData->getOperand(2), 834 ProfileData->getOperand(1)}; 835 setMetadata(LLVMContext::MD_prof, 836 MDNode::get(ProfileData->getContext(), Ops)); 837 } 838 839 void Instruction::copyMetadata(const Instruction &SrcInst, 840 ArrayRef<unsigned> WL) { 841 if (!SrcInst.hasMetadata()) 842 return; 843 844 DenseSet<unsigned> WLS; 845 for (unsigned M : WL) 846 WLS.insert(M); 847 848 // Otherwise, enumerate and copy over metadata from the old instruction to the 849 // new one. 850 SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs; 851 SrcInst.getAllMetadataOtherThanDebugLoc(TheMDs); 852 for (const auto &MD : TheMDs) { 853 if (WL.empty() || WLS.count(MD.first)) 854 setMetadata(MD.first, MD.second); 855 } 856 if (WL.empty() || WLS.count(LLVMContext::MD_dbg)) 857 setDebugLoc(SrcInst.getDebugLoc()); 858 } 859 860 Instruction *Instruction::clone() const { 861 Instruction *New = nullptr; 862 switch (getOpcode()) { 863 default: 864 llvm_unreachable("Unhandled Opcode."); 865 #define HANDLE_INST(num, opc, clas) \ 866 case Instruction::opc: \ 867 New = cast<clas>(this)->cloneImpl(); \ 868 break; 869 #include "llvm/IR/Instruction.def" 870 #undef HANDLE_INST 871 } 872 873 New->SubclassOptionalData = SubclassOptionalData; 874 New->copyMetadata(*this); 875 return New; 876 } 877