1 //===-- Instruction.cpp - Implement the Instruction class -----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the Instruction class for the IR library. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/ADT/DenseSet.h" 15 #include "llvm/IR/Instruction.h" 16 #include "llvm/IR/CallSite.h" 17 #include "llvm/IR/Constants.h" 18 #include "llvm/IR/Instructions.h" 19 #include "llvm/IR/Module.h" 20 #include "llvm/IR/Operator.h" 21 #include "llvm/IR/Type.h" 22 using namespace llvm; 23 24 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps, 25 Instruction *InsertBefore) 26 : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) { 27 28 // If requested, insert this instruction into a basic block... 29 if (InsertBefore) { 30 BasicBlock *BB = InsertBefore->getParent(); 31 assert(BB && "Instruction to insert before is not in a basic block!"); 32 BB->getInstList().insert(InsertBefore->getIterator(), this); 33 } 34 } 35 36 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps, 37 BasicBlock *InsertAtEnd) 38 : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) { 39 40 // append this instruction into the basic block 41 assert(InsertAtEnd && "Basic block to append to may not be NULL!"); 42 InsertAtEnd->getInstList().push_back(this); 43 } 44 45 46 // Out of line virtual method, so the vtable, etc has a home. 47 Instruction::~Instruction() { 48 assert(!Parent && "Instruction still linked in the program!"); 49 if (hasMetadataHashEntry()) 50 clearMetadataHashEntries(); 51 } 52 53 54 void Instruction::setParent(BasicBlock *P) { 55 Parent = P; 56 } 57 58 const Module *Instruction::getModule() const { 59 return getParent()->getModule(); 60 } 61 62 Module *Instruction::getModule() { 63 return getParent()->getModule(); 64 } 65 66 Function *Instruction::getFunction() { return getParent()->getParent(); } 67 68 const Function *Instruction::getFunction() const { 69 return getParent()->getParent(); 70 } 71 72 void Instruction::removeFromParent() { 73 getParent()->getInstList().remove(getIterator()); 74 } 75 76 iplist<Instruction>::iterator Instruction::eraseFromParent() { 77 return getParent()->getInstList().erase(getIterator()); 78 } 79 80 /// Insert an unlinked instruction into a basic block immediately before the 81 /// specified instruction. 82 void Instruction::insertBefore(Instruction *InsertPos) { 83 InsertPos->getParent()->getInstList().insert(InsertPos->getIterator(), this); 84 } 85 86 /// Insert an unlinked instruction into a basic block immediately after the 87 /// specified instruction. 88 void Instruction::insertAfter(Instruction *InsertPos) { 89 InsertPos->getParent()->getInstList().insertAfter(InsertPos->getIterator(), 90 this); 91 } 92 93 /// Unlink this instruction from its current basic block and insert it into the 94 /// basic block that MovePos lives in, right before MovePos. 95 void Instruction::moveBefore(Instruction *MovePos) { 96 moveBefore(*MovePos->getParent(), MovePos->getIterator()); 97 } 98 99 void Instruction::moveBefore(BasicBlock &BB, 100 SymbolTableList<Instruction>::iterator I) { 101 assert(I == BB.end() || I->getParent() == &BB); 102 BB.getInstList().splice(I, getParent()->getInstList(), getIterator()); 103 } 104 105 void Instruction::setHasNoUnsignedWrap(bool b) { 106 cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(b); 107 } 108 109 void Instruction::setHasNoSignedWrap(bool b) { 110 cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(b); 111 } 112 113 void Instruction::setIsExact(bool b) { 114 cast<PossiblyExactOperator>(this)->setIsExact(b); 115 } 116 117 bool Instruction::hasNoUnsignedWrap() const { 118 return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap(); 119 } 120 121 bool Instruction::hasNoSignedWrap() const { 122 return cast<OverflowingBinaryOperator>(this)->hasNoSignedWrap(); 123 } 124 125 bool Instruction::isExact() const { 126 return cast<PossiblyExactOperator>(this)->isExact(); 127 } 128 129 void Instruction::setHasUnsafeAlgebra(bool B) { 130 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); 131 cast<FPMathOperator>(this)->setHasUnsafeAlgebra(B); 132 } 133 134 void Instruction::setHasNoNaNs(bool B) { 135 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); 136 cast<FPMathOperator>(this)->setHasNoNaNs(B); 137 } 138 139 void Instruction::setHasNoInfs(bool B) { 140 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); 141 cast<FPMathOperator>(this)->setHasNoInfs(B); 142 } 143 144 void Instruction::setHasNoSignedZeros(bool B) { 145 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); 146 cast<FPMathOperator>(this)->setHasNoSignedZeros(B); 147 } 148 149 void Instruction::setHasAllowReciprocal(bool B) { 150 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); 151 cast<FPMathOperator>(this)->setHasAllowReciprocal(B); 152 } 153 154 void Instruction::setFastMathFlags(FastMathFlags FMF) { 155 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); 156 cast<FPMathOperator>(this)->setFastMathFlags(FMF); 157 } 158 159 void Instruction::copyFastMathFlags(FastMathFlags FMF) { 160 assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op"); 161 cast<FPMathOperator>(this)->copyFastMathFlags(FMF); 162 } 163 164 bool Instruction::hasUnsafeAlgebra() const { 165 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op"); 166 return cast<FPMathOperator>(this)->hasUnsafeAlgebra(); 167 } 168 169 bool Instruction::hasNoNaNs() const { 170 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op"); 171 return cast<FPMathOperator>(this)->hasNoNaNs(); 172 } 173 174 bool Instruction::hasNoInfs() const { 175 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op"); 176 return cast<FPMathOperator>(this)->hasNoInfs(); 177 } 178 179 bool Instruction::hasNoSignedZeros() const { 180 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op"); 181 return cast<FPMathOperator>(this)->hasNoSignedZeros(); 182 } 183 184 bool Instruction::hasAllowReciprocal() const { 185 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op"); 186 return cast<FPMathOperator>(this)->hasAllowReciprocal(); 187 } 188 189 FastMathFlags Instruction::getFastMathFlags() const { 190 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op"); 191 return cast<FPMathOperator>(this)->getFastMathFlags(); 192 } 193 194 void Instruction::copyFastMathFlags(const Instruction *I) { 195 copyFastMathFlags(I->getFastMathFlags()); 196 } 197 198 void Instruction::copyIRFlags(const Value *V) { 199 // Copy the wrapping flags. 200 if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) { 201 if (isa<OverflowingBinaryOperator>(this)) { 202 setHasNoSignedWrap(OB->hasNoSignedWrap()); 203 setHasNoUnsignedWrap(OB->hasNoUnsignedWrap()); 204 } 205 } 206 207 // Copy the exact flag. 208 if (auto *PE = dyn_cast<PossiblyExactOperator>(V)) 209 if (isa<PossiblyExactOperator>(this)) 210 setIsExact(PE->isExact()); 211 212 // Copy the fast-math flags. 213 if (auto *FP = dyn_cast<FPMathOperator>(V)) 214 if (isa<FPMathOperator>(this)) 215 copyFastMathFlags(FP->getFastMathFlags()); 216 217 if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V)) 218 if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this)) 219 DestGEP->setIsInBounds(SrcGEP->isInBounds() | DestGEP->isInBounds()); 220 } 221 222 void Instruction::andIRFlags(const Value *V) { 223 if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) { 224 if (isa<OverflowingBinaryOperator>(this)) { 225 setHasNoSignedWrap(hasNoSignedWrap() & OB->hasNoSignedWrap()); 226 setHasNoUnsignedWrap(hasNoUnsignedWrap() & OB->hasNoUnsignedWrap()); 227 } 228 } 229 230 if (auto *PE = dyn_cast<PossiblyExactOperator>(V)) 231 if (isa<PossiblyExactOperator>(this)) 232 setIsExact(isExact() & PE->isExact()); 233 234 if (auto *FP = dyn_cast<FPMathOperator>(V)) { 235 if (isa<FPMathOperator>(this)) { 236 FastMathFlags FM = getFastMathFlags(); 237 FM &= FP->getFastMathFlags(); 238 copyFastMathFlags(FM); 239 } 240 } 241 242 if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V)) 243 if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this)) 244 DestGEP->setIsInBounds(SrcGEP->isInBounds() & DestGEP->isInBounds()); 245 } 246 247 const char *Instruction::getOpcodeName(unsigned OpCode) { 248 switch (OpCode) { 249 // Terminators 250 case Ret: return "ret"; 251 case Br: return "br"; 252 case Switch: return "switch"; 253 case IndirectBr: return "indirectbr"; 254 case Invoke: return "invoke"; 255 case Resume: return "resume"; 256 case Unreachable: return "unreachable"; 257 case CleanupRet: return "cleanupret"; 258 case CatchRet: return "catchret"; 259 case CatchPad: return "catchpad"; 260 case CatchSwitch: return "catchswitch"; 261 262 // Standard binary operators... 263 case Add: return "add"; 264 case FAdd: return "fadd"; 265 case Sub: return "sub"; 266 case FSub: return "fsub"; 267 case Mul: return "mul"; 268 case FMul: return "fmul"; 269 case UDiv: return "udiv"; 270 case SDiv: return "sdiv"; 271 case FDiv: return "fdiv"; 272 case URem: return "urem"; 273 case SRem: return "srem"; 274 case FRem: return "frem"; 275 276 // Logical operators... 277 case And: return "and"; 278 case Or : return "or"; 279 case Xor: return "xor"; 280 281 // Memory instructions... 282 case Alloca: return "alloca"; 283 case Load: return "load"; 284 case Store: return "store"; 285 case AtomicCmpXchg: return "cmpxchg"; 286 case AtomicRMW: return "atomicrmw"; 287 case Fence: return "fence"; 288 case GetElementPtr: return "getelementptr"; 289 290 // Convert instructions... 291 case Trunc: return "trunc"; 292 case ZExt: return "zext"; 293 case SExt: return "sext"; 294 case FPTrunc: return "fptrunc"; 295 case FPExt: return "fpext"; 296 case FPToUI: return "fptoui"; 297 case FPToSI: return "fptosi"; 298 case UIToFP: return "uitofp"; 299 case SIToFP: return "sitofp"; 300 case IntToPtr: return "inttoptr"; 301 case PtrToInt: return "ptrtoint"; 302 case BitCast: return "bitcast"; 303 case AddrSpaceCast: return "addrspacecast"; 304 305 // Other instructions... 306 case ICmp: return "icmp"; 307 case FCmp: return "fcmp"; 308 case PHI: return "phi"; 309 case Select: return "select"; 310 case Call: return "call"; 311 case Shl: return "shl"; 312 case LShr: return "lshr"; 313 case AShr: return "ashr"; 314 case VAArg: return "va_arg"; 315 case ExtractElement: return "extractelement"; 316 case InsertElement: return "insertelement"; 317 case ShuffleVector: return "shufflevector"; 318 case ExtractValue: return "extractvalue"; 319 case InsertValue: return "insertvalue"; 320 case LandingPad: return "landingpad"; 321 case CleanupPad: return "cleanuppad"; 322 323 default: return "<Invalid operator> "; 324 } 325 } 326 327 /// Return true if both instructions have the same special state. This must be 328 /// kept in sync with FunctionComparator::cmpOperations in 329 /// lib/Transforms/IPO/MergeFunctions.cpp. 330 static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2, 331 bool IgnoreAlignment = false) { 332 assert(I1->getOpcode() == I2->getOpcode() && 333 "Can not compare special state of different instructions"); 334 335 if (const AllocaInst *AI = dyn_cast<AllocaInst>(I1)) 336 return AI->getAllocatedType() == cast<AllocaInst>(I2)->getAllocatedType() && 337 (AI->getAlignment() == cast<AllocaInst>(I2)->getAlignment() || 338 IgnoreAlignment); 339 if (const LoadInst *LI = dyn_cast<LoadInst>(I1)) 340 return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() && 341 (LI->getAlignment() == cast<LoadInst>(I2)->getAlignment() || 342 IgnoreAlignment) && 343 LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() && 344 LI->getSynchScope() == cast<LoadInst>(I2)->getSynchScope(); 345 if (const StoreInst *SI = dyn_cast<StoreInst>(I1)) 346 return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() && 347 (SI->getAlignment() == cast<StoreInst>(I2)->getAlignment() || 348 IgnoreAlignment) && 349 SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() && 350 SI->getSynchScope() == cast<StoreInst>(I2)->getSynchScope(); 351 if (const CmpInst *CI = dyn_cast<CmpInst>(I1)) 352 return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate(); 353 if (const CallInst *CI = dyn_cast<CallInst>(I1)) 354 return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() && 355 CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() && 356 CI->getAttributes() == cast<CallInst>(I2)->getAttributes() && 357 CI->hasIdenticalOperandBundleSchema(*cast<CallInst>(I2)); 358 if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1)) 359 return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() && 360 CI->getAttributes() == cast<InvokeInst>(I2)->getAttributes() && 361 CI->hasIdenticalOperandBundleSchema(*cast<InvokeInst>(I2)); 362 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1)) 363 return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices(); 364 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1)) 365 return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices(); 366 if (const FenceInst *FI = dyn_cast<FenceInst>(I1)) 367 return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() && 368 FI->getSynchScope() == cast<FenceInst>(I2)->getSynchScope(); 369 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1)) 370 return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() && 371 CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() && 372 CXI->getSuccessOrdering() == 373 cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() && 374 CXI->getFailureOrdering() == 375 cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() && 376 CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I2)->getSynchScope(); 377 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1)) 378 return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() && 379 RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() && 380 RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() && 381 RMWI->getSynchScope() == cast<AtomicRMWInst>(I2)->getSynchScope(); 382 383 return true; 384 } 385 386 bool Instruction::isIdenticalTo(const Instruction *I) const { 387 return isIdenticalToWhenDefined(I) && 388 SubclassOptionalData == I->SubclassOptionalData; 389 } 390 391 bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const { 392 if (getOpcode() != I->getOpcode() || 393 getNumOperands() != I->getNumOperands() || 394 getType() != I->getType()) 395 return false; 396 397 // If both instructions have no operands, they are identical. 398 if (getNumOperands() == 0 && I->getNumOperands() == 0) 399 return haveSameSpecialState(this, I); 400 401 // We have two instructions of identical opcode and #operands. Check to see 402 // if all operands are the same. 403 if (!std::equal(op_begin(), op_end(), I->op_begin())) 404 return false; 405 406 if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) { 407 const PHINode *otherPHI = cast<PHINode>(I); 408 return std::equal(thisPHI->block_begin(), thisPHI->block_end(), 409 otherPHI->block_begin()); 410 } 411 412 return haveSameSpecialState(this, I); 413 } 414 415 // Keep this in sync with FunctionComparator::cmpOperations in 416 // lib/Transforms/IPO/MergeFunctions.cpp. 417 bool Instruction::isSameOperationAs(const Instruction *I, 418 unsigned flags) const { 419 bool IgnoreAlignment = flags & CompareIgnoringAlignment; 420 bool UseScalarTypes = flags & CompareUsingScalarTypes; 421 422 if (getOpcode() != I->getOpcode() || 423 getNumOperands() != I->getNumOperands() || 424 (UseScalarTypes ? 425 getType()->getScalarType() != I->getType()->getScalarType() : 426 getType() != I->getType())) 427 return false; 428 429 // We have two instructions of identical opcode and #operands. Check to see 430 // if all operands are the same type 431 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 432 if (UseScalarTypes ? 433 getOperand(i)->getType()->getScalarType() != 434 I->getOperand(i)->getType()->getScalarType() : 435 getOperand(i)->getType() != I->getOperand(i)->getType()) 436 return false; 437 438 return haveSameSpecialState(this, I, IgnoreAlignment); 439 } 440 441 bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const { 442 for (const Use &U : uses()) { 443 // PHI nodes uses values in the corresponding predecessor block. For other 444 // instructions, just check to see whether the parent of the use matches up. 445 const Instruction *I = cast<Instruction>(U.getUser()); 446 const PHINode *PN = dyn_cast<PHINode>(I); 447 if (!PN) { 448 if (I->getParent() != BB) 449 return true; 450 continue; 451 } 452 453 if (PN->getIncomingBlock(U) != BB) 454 return true; 455 } 456 return false; 457 } 458 459 bool Instruction::mayReadFromMemory() const { 460 switch (getOpcode()) { 461 default: return false; 462 case Instruction::VAArg: 463 case Instruction::Load: 464 case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory 465 case Instruction::AtomicCmpXchg: 466 case Instruction::AtomicRMW: 467 case Instruction::CatchPad: 468 case Instruction::CatchRet: 469 return true; 470 case Instruction::Call: 471 return !cast<CallInst>(this)->doesNotAccessMemory(); 472 case Instruction::Invoke: 473 return !cast<InvokeInst>(this)->doesNotAccessMemory(); 474 case Instruction::Store: 475 return !cast<StoreInst>(this)->isUnordered(); 476 } 477 } 478 479 bool Instruction::mayWriteToMemory() const { 480 switch (getOpcode()) { 481 default: return false; 482 case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory 483 case Instruction::Store: 484 case Instruction::VAArg: 485 case Instruction::AtomicCmpXchg: 486 case Instruction::AtomicRMW: 487 case Instruction::CatchPad: 488 case Instruction::CatchRet: 489 return true; 490 case Instruction::Call: 491 return !cast<CallInst>(this)->onlyReadsMemory(); 492 case Instruction::Invoke: 493 return !cast<InvokeInst>(this)->onlyReadsMemory(); 494 case Instruction::Load: 495 return !cast<LoadInst>(this)->isUnordered(); 496 } 497 } 498 499 bool Instruction::isAtomic() const { 500 switch (getOpcode()) { 501 default: 502 return false; 503 case Instruction::AtomicCmpXchg: 504 case Instruction::AtomicRMW: 505 case Instruction::Fence: 506 return true; 507 case Instruction::Load: 508 return cast<LoadInst>(this)->getOrdering() != AtomicOrdering::NotAtomic; 509 case Instruction::Store: 510 return cast<StoreInst>(this)->getOrdering() != AtomicOrdering::NotAtomic; 511 } 512 } 513 514 bool Instruction::mayThrow() const { 515 if (const CallInst *CI = dyn_cast<CallInst>(this)) 516 return !CI->doesNotThrow(); 517 if (const auto *CRI = dyn_cast<CleanupReturnInst>(this)) 518 return CRI->unwindsToCaller(); 519 if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(this)) 520 return CatchSwitch->unwindsToCaller(); 521 return isa<ResumeInst>(this); 522 } 523 524 /// Return true if the instruction is associative: 525 /// 526 /// Associative operators satisfy: x op (y op z) === (x op y) op z 527 /// 528 /// In LLVM, the Add, Mul, And, Or, and Xor operators are associative. 529 /// 530 bool Instruction::isAssociative(unsigned Opcode) { 531 return Opcode == And || Opcode == Or || Opcode == Xor || 532 Opcode == Add || Opcode == Mul; 533 } 534 535 bool Instruction::isAssociative() const { 536 unsigned Opcode = getOpcode(); 537 if (isAssociative(Opcode)) 538 return true; 539 540 switch (Opcode) { 541 case FMul: 542 case FAdd: 543 return cast<FPMathOperator>(this)->hasUnsafeAlgebra(); 544 default: 545 return false; 546 } 547 } 548 549 /// Return true if the instruction is commutative: 550 /// 551 /// Commutative operators satisfy: (x op y) === (y op x) 552 /// 553 /// In LLVM, these are the associative operators, plus SetEQ and SetNE, when 554 /// applied to any type. 555 /// 556 bool Instruction::isCommutative(unsigned op) { 557 switch (op) { 558 case Add: 559 case FAdd: 560 case Mul: 561 case FMul: 562 case And: 563 case Or: 564 case Xor: 565 return true; 566 default: 567 return false; 568 } 569 } 570 571 /// Return true if the instruction is idempotent: 572 /// 573 /// Idempotent operators satisfy: x op x === x 574 /// 575 /// In LLVM, the And and Or operators are idempotent. 576 /// 577 bool Instruction::isIdempotent(unsigned Opcode) { 578 return Opcode == And || Opcode == Or; 579 } 580 581 /// Return true if the instruction is nilpotent: 582 /// 583 /// Nilpotent operators satisfy: x op x === Id, 584 /// 585 /// where Id is the identity for the operator, i.e. a constant such that 586 /// x op Id === x and Id op x === x for all x. 587 /// 588 /// In LLVM, the Xor operator is nilpotent. 589 /// 590 bool Instruction::isNilpotent(unsigned Opcode) { 591 return Opcode == Xor; 592 } 593 594 Instruction *Instruction::cloneImpl() const { 595 llvm_unreachable("Subclass of Instruction failed to implement cloneImpl"); 596 } 597 598 void Instruction::swapProfMetadata() { 599 MDNode *ProfileData = getMetadata(LLVMContext::MD_prof); 600 if (!ProfileData || ProfileData->getNumOperands() != 3 || 601 !isa<MDString>(ProfileData->getOperand(0))) 602 return; 603 604 MDString *MDName = cast<MDString>(ProfileData->getOperand(0)); 605 if (MDName->getString() != "branch_weights") 606 return; 607 608 // The first operand is the name. Fetch them backwards and build a new one. 609 Metadata *Ops[] = {ProfileData->getOperand(0), ProfileData->getOperand(2), 610 ProfileData->getOperand(1)}; 611 setMetadata(LLVMContext::MD_prof, 612 MDNode::get(ProfileData->getContext(), Ops)); 613 } 614 615 void Instruction::copyMetadata(const Instruction &SrcInst, 616 ArrayRef<unsigned> WL) { 617 if (!SrcInst.hasMetadata()) 618 return; 619 620 DenseSet<unsigned> WLS; 621 for (unsigned M : WL) 622 WLS.insert(M); 623 624 // Otherwise, enumerate and copy over metadata from the old instruction to the 625 // new one. 626 SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs; 627 SrcInst.getAllMetadataOtherThanDebugLoc(TheMDs); 628 for (const auto &MD : TheMDs) { 629 if (WL.empty() || WLS.count(MD.first)) 630 setMetadata(MD.first, MD.second); 631 } 632 if (WL.empty() || WLS.count(LLVMContext::MD_dbg)) 633 setDebugLoc(SrcInst.getDebugLoc()); 634 return; 635 } 636 637 Instruction *Instruction::clone() const { 638 Instruction *New = nullptr; 639 switch (getOpcode()) { 640 default: 641 llvm_unreachable("Unhandled Opcode."); 642 #define HANDLE_INST(num, opc, clas) \ 643 case Instruction::opc: \ 644 New = cast<clas>(this)->cloneImpl(); \ 645 break; 646 #include "llvm/IR/Instruction.def" 647 #undef HANDLE_INST 648 } 649 650 New->SubclassOptionalData = SubclassOptionalData; 651 New->copyMetadata(*this); 652 return New; 653 } 654