1 //===- Instructions.cpp - Implement the LLVM instructions -----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements all of the non-inline methods for the LLVM instruction 10 // classes. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/IR/Instructions.h" 15 #include "LLVMContextImpl.h" 16 #include "llvm/ADT/None.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/ADT/Twine.h" 19 #include "llvm/IR/Attributes.h" 20 #include "llvm/IR/BasicBlock.h" 21 #include "llvm/IR/CallSite.h" 22 #include "llvm/IR/Constant.h" 23 #include "llvm/IR/Constants.h" 24 #include "llvm/IR/DataLayout.h" 25 #include "llvm/IR/DerivedTypes.h" 26 #include "llvm/IR/Function.h" 27 #include "llvm/IR/InstrTypes.h" 28 #include "llvm/IR/Instruction.h" 29 #include "llvm/IR/Intrinsics.h" 30 #include "llvm/IR/LLVMContext.h" 31 #include "llvm/IR/MDBuilder.h" 32 #include "llvm/IR/Metadata.h" 33 #include "llvm/IR/Module.h" 34 #include "llvm/IR/Operator.h" 35 #include "llvm/IR/Type.h" 36 #include "llvm/IR/Value.h" 37 #include "llvm/Support/AtomicOrdering.h" 38 #include "llvm/Support/Casting.h" 39 #include "llvm/Support/ErrorHandling.h" 40 #include "llvm/Support/MathExtras.h" 41 #include "llvm/Support/TypeSize.h" 42 #include <algorithm> 43 #include <cassert> 44 #include <cstdint> 45 #include <vector> 46 47 using namespace llvm; 48 49 //===----------------------------------------------------------------------===// 50 // AllocaInst Class 51 //===----------------------------------------------------------------------===// 52 53 Optional<uint64_t> 54 AllocaInst::getAllocationSizeInBits(const DataLayout &DL) const { 55 uint64_t Size = DL.getTypeAllocSizeInBits(getAllocatedType()); 56 if (isArrayAllocation()) { 57 auto C = dyn_cast<ConstantInt>(getArraySize()); 58 if (!C) 59 return None; 60 Size *= C->getZExtValue(); 61 } 62 return Size; 63 } 64 65 //===----------------------------------------------------------------------===// 66 // CallSite Class 67 //===----------------------------------------------------------------------===// 68 69 User::op_iterator CallSite::getCallee() const { 70 return cast<CallBase>(getInstruction())->op_end() - 1; 71 } 72 73 //===----------------------------------------------------------------------===// 74 // SelectInst Class 75 //===----------------------------------------------------------------------===// 76 77 /// areInvalidOperands - Return a string if the specified operands are invalid 78 /// for a select operation, otherwise return null. 79 const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) { 80 if (Op1->getType() != Op2->getType()) 81 return "both values to select must have same type"; 82 83 if (Op1->getType()->isTokenTy()) 84 return "select values cannot have token type"; 85 86 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) { 87 // Vector select. 88 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext())) 89 return "vector select condition element type must be i1"; 90 VectorType *ET = dyn_cast<VectorType>(Op1->getType()); 91 if (!ET) 92 return "selected values for vector select must be vectors"; 93 if (ET->getNumElements() != VT->getNumElements()) 94 return "vector select requires selected vectors to have " 95 "the same vector length as select condition"; 96 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) { 97 return "select condition must be i1 or <n x i1>"; 98 } 99 return nullptr; 100 } 101 102 //===----------------------------------------------------------------------===// 103 // PHINode Class 104 //===----------------------------------------------------------------------===// 105 106 PHINode::PHINode(const PHINode &PN) 107 : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()), 108 ReservedSpace(PN.getNumOperands()) { 109 allocHungoffUses(PN.getNumOperands()); 110 std::copy(PN.op_begin(), PN.op_end(), op_begin()); 111 std::copy(PN.block_begin(), PN.block_end(), block_begin()); 112 SubclassOptionalData = PN.SubclassOptionalData; 113 } 114 115 // removeIncomingValue - Remove an incoming value. This is useful if a 116 // predecessor basic block is deleted. 117 Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) { 118 Value *Removed = getIncomingValue(Idx); 119 120 // Move everything after this operand down. 121 // 122 // FIXME: we could just swap with the end of the list, then erase. However, 123 // clients might not expect this to happen. The code as it is thrashes the 124 // use/def lists, which is kinda lame. 125 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx); 126 std::copy(block_begin() + Idx + 1, block_end(), block_begin() + Idx); 127 128 // Nuke the last value. 129 Op<-1>().set(nullptr); 130 setNumHungOffUseOperands(getNumOperands() - 1); 131 132 // If the PHI node is dead, because it has zero entries, nuke it now. 133 if (getNumOperands() == 0 && DeletePHIIfEmpty) { 134 // If anyone is using this PHI, make them use a dummy value instead... 135 replaceAllUsesWith(UndefValue::get(getType())); 136 eraseFromParent(); 137 } 138 return Removed; 139 } 140 141 /// growOperands - grow operands - This grows the operand list in response 142 /// to a push_back style of operation. This grows the number of ops by 1.5 143 /// times. 144 /// 145 void PHINode::growOperands() { 146 unsigned e = getNumOperands(); 147 unsigned NumOps = e + e / 2; 148 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common. 149 150 ReservedSpace = NumOps; 151 growHungoffUses(ReservedSpace, /* IsPhi */ true); 152 } 153 154 /// hasConstantValue - If the specified PHI node always merges together the same 155 /// value, return the value, otherwise return null. 156 Value *PHINode::hasConstantValue() const { 157 // Exploit the fact that phi nodes always have at least one entry. 158 Value *ConstantValue = getIncomingValue(0); 159 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i) 160 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) { 161 if (ConstantValue != this) 162 return nullptr; // Incoming values not all the same. 163 // The case where the first value is this PHI. 164 ConstantValue = getIncomingValue(i); 165 } 166 if (ConstantValue == this) 167 return UndefValue::get(getType()); 168 return ConstantValue; 169 } 170 171 /// hasConstantOrUndefValue - Whether the specified PHI node always merges 172 /// together the same value, assuming that undefs result in the same value as 173 /// non-undefs. 174 /// Unlike \ref hasConstantValue, this does not return a value because the 175 /// unique non-undef incoming value need not dominate the PHI node. 176 bool PHINode::hasConstantOrUndefValue() const { 177 Value *ConstantValue = nullptr; 178 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) { 179 Value *Incoming = getIncomingValue(i); 180 if (Incoming != this && !isa<UndefValue>(Incoming)) { 181 if (ConstantValue && ConstantValue != Incoming) 182 return false; 183 ConstantValue = Incoming; 184 } 185 } 186 return true; 187 } 188 189 //===----------------------------------------------------------------------===// 190 // LandingPadInst Implementation 191 //===----------------------------------------------------------------------===// 192 193 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues, 194 const Twine &NameStr, Instruction *InsertBefore) 195 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) { 196 init(NumReservedValues, NameStr); 197 } 198 199 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues, 200 const Twine &NameStr, BasicBlock *InsertAtEnd) 201 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) { 202 init(NumReservedValues, NameStr); 203 } 204 205 LandingPadInst::LandingPadInst(const LandingPadInst &LP) 206 : Instruction(LP.getType(), Instruction::LandingPad, nullptr, 207 LP.getNumOperands()), 208 ReservedSpace(LP.getNumOperands()) { 209 allocHungoffUses(LP.getNumOperands()); 210 Use *OL = getOperandList(); 211 const Use *InOL = LP.getOperandList(); 212 for (unsigned I = 0, E = ReservedSpace; I != E; ++I) 213 OL[I] = InOL[I]; 214 215 setCleanup(LP.isCleanup()); 216 } 217 218 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses, 219 const Twine &NameStr, 220 Instruction *InsertBefore) { 221 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore); 222 } 223 224 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses, 225 const Twine &NameStr, 226 BasicBlock *InsertAtEnd) { 227 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd); 228 } 229 230 void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) { 231 ReservedSpace = NumReservedValues; 232 setNumHungOffUseOperands(0); 233 allocHungoffUses(ReservedSpace); 234 setName(NameStr); 235 setCleanup(false); 236 } 237 238 /// growOperands - grow operands - This grows the operand list in response to a 239 /// push_back style of operation. This grows the number of ops by 2 times. 240 void LandingPadInst::growOperands(unsigned Size) { 241 unsigned e = getNumOperands(); 242 if (ReservedSpace >= e + Size) return; 243 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2; 244 growHungoffUses(ReservedSpace); 245 } 246 247 void LandingPadInst::addClause(Constant *Val) { 248 unsigned OpNo = getNumOperands(); 249 growOperands(1); 250 assert(OpNo < ReservedSpace && "Growing didn't work!"); 251 setNumHungOffUseOperands(getNumOperands() + 1); 252 getOperandList()[OpNo] = Val; 253 } 254 255 //===----------------------------------------------------------------------===// 256 // CallBase Implementation 257 //===----------------------------------------------------------------------===// 258 259 Function *CallBase::getCaller() { return getParent()->getParent(); } 260 261 unsigned CallBase::getNumSubclassExtraOperandsDynamic() const { 262 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!"); 263 return cast<CallBrInst>(this)->getNumIndirectDests() + 1; 264 } 265 266 bool CallBase::isIndirectCall() const { 267 const Value *V = getCalledValue(); 268 if (isa<Function>(V) || isa<Constant>(V)) 269 return false; 270 if (const CallInst *CI = dyn_cast<CallInst>(this)) 271 if (CI->isInlineAsm()) 272 return false; 273 return true; 274 } 275 276 /// Tests if this call site must be tail call optimized. Only a CallInst can 277 /// be tail call optimized. 278 bool CallBase::isMustTailCall() const { 279 if (auto *CI = dyn_cast<CallInst>(this)) 280 return CI->isMustTailCall(); 281 return false; 282 } 283 284 /// Tests if this call site is marked as a tail call. 285 bool CallBase::isTailCall() const { 286 if (auto *CI = dyn_cast<CallInst>(this)) 287 return CI->isTailCall(); 288 return false; 289 } 290 291 Intrinsic::ID CallBase::getIntrinsicID() const { 292 if (auto *F = getCalledFunction()) 293 return F->getIntrinsicID(); 294 return Intrinsic::not_intrinsic; 295 } 296 297 bool CallBase::isReturnNonNull() const { 298 if (hasRetAttr(Attribute::NonNull)) 299 return true; 300 301 if (getDereferenceableBytes(AttributeList::ReturnIndex) > 0 && 302 !NullPointerIsDefined(getCaller(), 303 getType()->getPointerAddressSpace())) 304 return true; 305 306 return false; 307 } 308 309 Value *CallBase::getReturnedArgOperand() const { 310 unsigned Index; 311 312 if (Attrs.hasAttrSomewhere(Attribute::Returned, &Index) && Index) 313 return getArgOperand(Index - AttributeList::FirstArgIndex); 314 if (const Function *F = getCalledFunction()) 315 if (F->getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) && 316 Index) 317 return getArgOperand(Index - AttributeList::FirstArgIndex); 318 319 return nullptr; 320 } 321 322 bool CallBase::hasRetAttr(Attribute::AttrKind Kind) const { 323 if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind)) 324 return true; 325 326 // Look at the callee, if available. 327 if (const Function *F = getCalledFunction()) 328 return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind); 329 return false; 330 } 331 332 /// Determine whether the argument or parameter has the given attribute. 333 bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const { 334 assert(ArgNo < getNumArgOperands() && "Param index out of bounds!"); 335 336 if (Attrs.hasParamAttribute(ArgNo, Kind)) 337 return true; 338 if (const Function *F = getCalledFunction()) 339 return F->getAttributes().hasParamAttribute(ArgNo, Kind); 340 return false; 341 } 342 343 bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const { 344 if (const Function *F = getCalledFunction()) 345 return F->getAttributes().hasAttribute(AttributeList::FunctionIndex, Kind); 346 return false; 347 } 348 349 bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const { 350 if (const Function *F = getCalledFunction()) 351 return F->getAttributes().hasAttribute(AttributeList::FunctionIndex, Kind); 352 return false; 353 } 354 355 void CallBase::getOperandBundlesAsDefs( 356 SmallVectorImpl<OperandBundleDef> &Defs) const { 357 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) 358 Defs.emplace_back(getOperandBundleAt(i)); 359 } 360 361 CallBase::op_iterator 362 CallBase::populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles, 363 const unsigned BeginIndex) { 364 auto It = op_begin() + BeginIndex; 365 for (auto &B : Bundles) 366 It = std::copy(B.input_begin(), B.input_end(), It); 367 368 auto *ContextImpl = getContext().pImpl; 369 auto BI = Bundles.begin(); 370 unsigned CurrentIndex = BeginIndex; 371 372 for (auto &BOI : bundle_op_infos()) { 373 assert(BI != Bundles.end() && "Incorrect allocation?"); 374 375 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag()); 376 BOI.Begin = CurrentIndex; 377 BOI.End = CurrentIndex + BI->input_size(); 378 CurrentIndex = BOI.End; 379 BI++; 380 } 381 382 assert(BI == Bundles.end() && "Incorrect allocation?"); 383 384 return It; 385 } 386 387 CallBase::BundleOpInfo &CallBase::getBundleOpInfoForOperand(unsigned OpIdx) { 388 /// When there isn't many bundles, we do a simple linear search. 389 /// Else fallback to a binary-search that use the fact that bundles usually 390 /// have similar number of argument to get faster convergence. 391 if (bundle_op_info_end() - bundle_op_info_begin() < 8) { 392 for (auto &BOI : bundle_op_infos()) 393 if (BOI.Begin <= OpIdx && OpIdx < BOI.End) 394 return BOI; 395 396 llvm_unreachable("Did not find operand bundle for operand!"); 397 } 398 399 assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles"); 400 assert(bundle_op_info_end() - bundle_op_info_begin() > 0 && 401 OpIdx < std::prev(bundle_op_info_end())->End && 402 "The Idx isn't in the operand bundle"); 403 404 /// We need a decimal number below and to prevent using floating point numbers 405 /// we use an intergal value multiplied by this constant. 406 constexpr unsigned NumberScaling = 1024; 407 408 bundle_op_iterator Begin = bundle_op_info_begin(); 409 bundle_op_iterator End = bundle_op_info_end(); 410 bundle_op_iterator Current; 411 412 while (Begin != End) { 413 unsigned ScaledOperandPerBundle = 414 NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin); 415 Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) / 416 ScaledOperandPerBundle); 417 if (Current >= End) 418 Current = std::prev(End); 419 assert(Current < End && Current >= Begin && 420 "the operand bundle doesn't cover every value in the range"); 421 if (OpIdx >= Current->Begin && OpIdx < Current->End) 422 break; 423 if (OpIdx >= Current->End) 424 Begin = Current + 1; 425 else 426 End = Current; 427 } 428 429 assert(OpIdx >= Current->Begin && OpIdx < Current->End && 430 "the operand bundle doesn't cover every value in the range"); 431 return *Current; 432 } 433 434 //===----------------------------------------------------------------------===// 435 // CallInst Implementation 436 //===----------------------------------------------------------------------===// 437 438 void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, 439 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) { 440 this->FTy = FTy; 441 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 && 442 "NumOperands not set up?"); 443 setCalledOperand(Func); 444 445 #ifndef NDEBUG 446 assert((Args.size() == FTy->getNumParams() || 447 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 448 "Calling a function with bad signature!"); 449 450 for (unsigned i = 0; i != Args.size(); ++i) 451 assert((i >= FTy->getNumParams() || 452 FTy->getParamType(i) == Args[i]->getType()) && 453 "Calling a function with a bad signature!"); 454 #endif 455 456 llvm::copy(Args, op_begin()); 457 458 auto It = populateBundleOperandInfos(Bundles, Args.size()); 459 (void)It; 460 assert(It + 1 == op_end() && "Should add up!"); 461 462 setName(NameStr); 463 } 464 465 void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) { 466 this->FTy = FTy; 467 assert(getNumOperands() == 1 && "NumOperands not set up?"); 468 setCalledOperand(Func); 469 470 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature"); 471 472 setName(NameStr); 473 } 474 475 CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name, 476 Instruction *InsertBefore) 477 : CallBase(Ty->getReturnType(), Instruction::Call, 478 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) { 479 init(Ty, Func, Name); 480 } 481 482 CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name, 483 BasicBlock *InsertAtEnd) 484 : CallBase(Ty->getReturnType(), Instruction::Call, 485 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertAtEnd) { 486 init(Ty, Func, Name); 487 } 488 489 CallInst::CallInst(const CallInst &CI) 490 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call, 491 OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(), 492 CI.getNumOperands()) { 493 setTailCallKind(CI.getTailCallKind()); 494 setCallingConv(CI.getCallingConv()); 495 496 std::copy(CI.op_begin(), CI.op_end(), op_begin()); 497 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(), 498 bundle_op_info_begin()); 499 SubclassOptionalData = CI.SubclassOptionalData; 500 } 501 502 CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB, 503 Instruction *InsertPt) { 504 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end()); 505 506 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledValue(), 507 Args, OpB, CI->getName(), InsertPt); 508 NewCI->setTailCallKind(CI->getTailCallKind()); 509 NewCI->setCallingConv(CI->getCallingConv()); 510 NewCI->SubclassOptionalData = CI->SubclassOptionalData; 511 NewCI->setAttributes(CI->getAttributes()); 512 NewCI->setDebugLoc(CI->getDebugLoc()); 513 return NewCI; 514 } 515 516 // Update profile weight for call instruction by scaling it using the ratio 517 // of S/T. The meaning of "branch_weights" meta data for call instruction is 518 // transfered to represent call count. 519 void CallInst::updateProfWeight(uint64_t S, uint64_t T) { 520 auto *ProfileData = getMetadata(LLVMContext::MD_prof); 521 if (ProfileData == nullptr) 522 return; 523 524 auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0)); 525 if (!ProfDataName || (!ProfDataName->getString().equals("branch_weights") && 526 !ProfDataName->getString().equals("VP"))) 527 return; 528 529 if (T == 0) { 530 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in " 531 "div by 0. Ignoring. Likely the function " 532 << getParent()->getParent()->getName() 533 << " has 0 entry count, and contains call instructions " 534 "with non-zero prof info."); 535 return; 536 } 537 538 MDBuilder MDB(getContext()); 539 SmallVector<Metadata *, 3> Vals; 540 Vals.push_back(ProfileData->getOperand(0)); 541 APInt APS(128, S), APT(128, T); 542 if (ProfDataName->getString().equals("branch_weights") && 543 ProfileData->getNumOperands() > 0) { 544 // Using APInt::div may be expensive, but most cases should fit 64 bits. 545 APInt Val(128, mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1)) 546 ->getValue() 547 .getZExtValue()); 548 Val *= APS; 549 Vals.push_back(MDB.createConstant(ConstantInt::get( 550 Type::getInt64Ty(getContext()), Val.udiv(APT).getLimitedValue()))); 551 } else if (ProfDataName->getString().equals("VP")) 552 for (unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) { 553 // The first value is the key of the value profile, which will not change. 554 Vals.push_back(ProfileData->getOperand(i)); 555 // Using APInt::div may be expensive, but most cases should fit 64 bits. 556 APInt Val(128, 557 mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1)) 558 ->getValue() 559 .getZExtValue()); 560 Val *= APS; 561 Vals.push_back(MDB.createConstant( 562 ConstantInt::get(Type::getInt64Ty(getContext()), 563 Val.udiv(APT).getLimitedValue()))); 564 } 565 setMetadata(LLVMContext::MD_prof, MDNode::get(getContext(), Vals)); 566 } 567 568 /// IsConstantOne - Return true only if val is constant int 1 569 static bool IsConstantOne(Value *val) { 570 assert(val && "IsConstantOne does not work with nullptr val"); 571 const ConstantInt *CVal = dyn_cast<ConstantInt>(val); 572 return CVal && CVal->isOne(); 573 } 574 575 static Instruction *createMalloc(Instruction *InsertBefore, 576 BasicBlock *InsertAtEnd, Type *IntPtrTy, 577 Type *AllocTy, Value *AllocSize, 578 Value *ArraySize, 579 ArrayRef<OperandBundleDef> OpB, 580 Function *MallocF, const Twine &Name) { 581 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) && 582 "createMalloc needs either InsertBefore or InsertAtEnd"); 583 584 // malloc(type) becomes: 585 // bitcast (i8* malloc(typeSize)) to type* 586 // malloc(type, arraySize) becomes: 587 // bitcast (i8* malloc(typeSize*arraySize)) to type* 588 if (!ArraySize) 589 ArraySize = ConstantInt::get(IntPtrTy, 1); 590 else if (ArraySize->getType() != IntPtrTy) { 591 if (InsertBefore) 592 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false, 593 "", InsertBefore); 594 else 595 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false, 596 "", InsertAtEnd); 597 } 598 599 if (!IsConstantOne(ArraySize)) { 600 if (IsConstantOne(AllocSize)) { 601 AllocSize = ArraySize; // Operand * 1 = Operand 602 } else if (Constant *CO = dyn_cast<Constant>(ArraySize)) { 603 Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy, 604 false /*ZExt*/); 605 // Malloc arg is constant product of type size and array size 606 AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize)); 607 } else { 608 // Multiply type size by the array size... 609 if (InsertBefore) 610 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize, 611 "mallocsize", InsertBefore); 612 else 613 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize, 614 "mallocsize", InsertAtEnd); 615 } 616 } 617 618 assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size"); 619 // Create the call to Malloc. 620 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd; 621 Module *M = BB->getParent()->getParent(); 622 Type *BPTy = Type::getInt8PtrTy(BB->getContext()); 623 FunctionCallee MallocFunc = MallocF; 624 if (!MallocFunc) 625 // prototype malloc as "void *malloc(size_t)" 626 MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy); 627 PointerType *AllocPtrType = PointerType::getUnqual(AllocTy); 628 CallInst *MCall = nullptr; 629 Instruction *Result = nullptr; 630 if (InsertBefore) { 631 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall", 632 InsertBefore); 633 Result = MCall; 634 if (Result->getType() != AllocPtrType) 635 // Create a cast instruction to convert to the right type... 636 Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore); 637 } else { 638 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall"); 639 Result = MCall; 640 if (Result->getType() != AllocPtrType) { 641 InsertAtEnd->getInstList().push_back(MCall); 642 // Create a cast instruction to convert to the right type... 643 Result = new BitCastInst(MCall, AllocPtrType, Name); 644 } 645 } 646 MCall->setTailCall(); 647 if (Function *F = dyn_cast<Function>(MallocFunc.getCallee())) { 648 MCall->setCallingConv(F->getCallingConv()); 649 if (!F->returnDoesNotAlias()) 650 F->setReturnDoesNotAlias(); 651 } 652 assert(!MCall->getType()->isVoidTy() && "Malloc has void return type"); 653 654 return Result; 655 } 656 657 /// CreateMalloc - Generate the IR for a call to malloc: 658 /// 1. Compute the malloc call's argument as the specified type's size, 659 /// possibly multiplied by the array size if the array size is not 660 /// constant 1. 661 /// 2. Call malloc with that argument. 662 /// 3. Bitcast the result of the malloc call to the specified type. 663 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore, 664 Type *IntPtrTy, Type *AllocTy, 665 Value *AllocSize, Value *ArraySize, 666 Function *MallocF, 667 const Twine &Name) { 668 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize, 669 ArraySize, None, MallocF, Name); 670 } 671 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore, 672 Type *IntPtrTy, Type *AllocTy, 673 Value *AllocSize, Value *ArraySize, 674 ArrayRef<OperandBundleDef> OpB, 675 Function *MallocF, 676 const Twine &Name) { 677 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize, 678 ArraySize, OpB, MallocF, Name); 679 } 680 681 /// CreateMalloc - Generate the IR for a call to malloc: 682 /// 1. Compute the malloc call's argument as the specified type's size, 683 /// possibly multiplied by the array size if the array size is not 684 /// constant 1. 685 /// 2. Call malloc with that argument. 686 /// 3. Bitcast the result of the malloc call to the specified type. 687 /// Note: This function does not add the bitcast to the basic block, that is the 688 /// responsibility of the caller. 689 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd, 690 Type *IntPtrTy, Type *AllocTy, 691 Value *AllocSize, Value *ArraySize, 692 Function *MallocF, const Twine &Name) { 693 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize, 694 ArraySize, None, MallocF, Name); 695 } 696 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd, 697 Type *IntPtrTy, Type *AllocTy, 698 Value *AllocSize, Value *ArraySize, 699 ArrayRef<OperandBundleDef> OpB, 700 Function *MallocF, const Twine &Name) { 701 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize, 702 ArraySize, OpB, MallocF, Name); 703 } 704 705 static Instruction *createFree(Value *Source, 706 ArrayRef<OperandBundleDef> Bundles, 707 Instruction *InsertBefore, 708 BasicBlock *InsertAtEnd) { 709 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) && 710 "createFree needs either InsertBefore or InsertAtEnd"); 711 assert(Source->getType()->isPointerTy() && 712 "Can not free something of nonpointer type!"); 713 714 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd; 715 Module *M = BB->getParent()->getParent(); 716 717 Type *VoidTy = Type::getVoidTy(M->getContext()); 718 Type *IntPtrTy = Type::getInt8PtrTy(M->getContext()); 719 // prototype free as "void free(void*)" 720 FunctionCallee FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy); 721 CallInst *Result = nullptr; 722 Value *PtrCast = Source; 723 if (InsertBefore) { 724 if (Source->getType() != IntPtrTy) 725 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore); 726 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "", InsertBefore); 727 } else { 728 if (Source->getType() != IntPtrTy) 729 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd); 730 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, ""); 731 } 732 Result->setTailCall(); 733 if (Function *F = dyn_cast<Function>(FreeFunc.getCallee())) 734 Result->setCallingConv(F->getCallingConv()); 735 736 return Result; 737 } 738 739 /// CreateFree - Generate the IR for a call to the builtin free function. 740 Instruction *CallInst::CreateFree(Value *Source, Instruction *InsertBefore) { 741 return createFree(Source, None, InsertBefore, nullptr); 742 } 743 Instruction *CallInst::CreateFree(Value *Source, 744 ArrayRef<OperandBundleDef> Bundles, 745 Instruction *InsertBefore) { 746 return createFree(Source, Bundles, InsertBefore, nullptr); 747 } 748 749 /// CreateFree - Generate the IR for a call to the builtin free function. 750 /// Note: This function does not add the call to the basic block, that is the 751 /// responsibility of the caller. 752 Instruction *CallInst::CreateFree(Value *Source, BasicBlock *InsertAtEnd) { 753 Instruction *FreeCall = createFree(Source, None, nullptr, InsertAtEnd); 754 assert(FreeCall && "CreateFree did not create a CallInst"); 755 return FreeCall; 756 } 757 Instruction *CallInst::CreateFree(Value *Source, 758 ArrayRef<OperandBundleDef> Bundles, 759 BasicBlock *InsertAtEnd) { 760 Instruction *FreeCall = createFree(Source, Bundles, nullptr, InsertAtEnd); 761 assert(FreeCall && "CreateFree did not create a CallInst"); 762 return FreeCall; 763 } 764 765 //===----------------------------------------------------------------------===// 766 // InvokeInst Implementation 767 //===----------------------------------------------------------------------===// 768 769 void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal, 770 BasicBlock *IfException, ArrayRef<Value *> Args, 771 ArrayRef<OperandBundleDef> Bundles, 772 const Twine &NameStr) { 773 this->FTy = FTy; 774 775 assert((int)getNumOperands() == 776 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) && 777 "NumOperands not set up?"); 778 setNormalDest(IfNormal); 779 setUnwindDest(IfException); 780 setCalledOperand(Fn); 781 782 #ifndef NDEBUG 783 assert(((Args.size() == FTy->getNumParams()) || 784 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 785 "Invoking a function with bad signature"); 786 787 for (unsigned i = 0, e = Args.size(); i != e; i++) 788 assert((i >= FTy->getNumParams() || 789 FTy->getParamType(i) == Args[i]->getType()) && 790 "Invoking a function with a bad signature!"); 791 #endif 792 793 llvm::copy(Args, op_begin()); 794 795 auto It = populateBundleOperandInfos(Bundles, Args.size()); 796 (void)It; 797 assert(It + 3 == op_end() && "Should add up!"); 798 799 setName(NameStr); 800 } 801 802 InvokeInst::InvokeInst(const InvokeInst &II) 803 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke, 804 OperandTraits<CallBase>::op_end(this) - II.getNumOperands(), 805 II.getNumOperands()) { 806 setCallingConv(II.getCallingConv()); 807 std::copy(II.op_begin(), II.op_end(), op_begin()); 808 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(), 809 bundle_op_info_begin()); 810 SubclassOptionalData = II.SubclassOptionalData; 811 } 812 813 InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB, 814 Instruction *InsertPt) { 815 std::vector<Value *> Args(II->arg_begin(), II->arg_end()); 816 817 auto *NewII = InvokeInst::Create(II->getFunctionType(), II->getCalledValue(), 818 II->getNormalDest(), II->getUnwindDest(), 819 Args, OpB, II->getName(), InsertPt); 820 NewII->setCallingConv(II->getCallingConv()); 821 NewII->SubclassOptionalData = II->SubclassOptionalData; 822 NewII->setAttributes(II->getAttributes()); 823 NewII->setDebugLoc(II->getDebugLoc()); 824 return NewII; 825 } 826 827 828 LandingPadInst *InvokeInst::getLandingPadInst() const { 829 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI()); 830 } 831 832 //===----------------------------------------------------------------------===// 833 // CallBrInst Implementation 834 //===----------------------------------------------------------------------===// 835 836 void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough, 837 ArrayRef<BasicBlock *> IndirectDests, 838 ArrayRef<Value *> Args, 839 ArrayRef<OperandBundleDef> Bundles, 840 const Twine &NameStr) { 841 this->FTy = FTy; 842 843 assert((int)getNumOperands() == 844 ComputeNumOperands(Args.size(), IndirectDests.size(), 845 CountBundleInputs(Bundles)) && 846 "NumOperands not set up?"); 847 NumIndirectDests = IndirectDests.size(); 848 setDefaultDest(Fallthrough); 849 for (unsigned i = 0; i != NumIndirectDests; ++i) 850 setIndirectDest(i, IndirectDests[i]); 851 setCalledOperand(Fn); 852 853 #ifndef NDEBUG 854 assert(((Args.size() == FTy->getNumParams()) || 855 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 856 "Calling a function with bad signature"); 857 858 for (unsigned i = 0, e = Args.size(); i != e; i++) 859 assert((i >= FTy->getNumParams() || 860 FTy->getParamType(i) == Args[i]->getType()) && 861 "Calling a function with a bad signature!"); 862 #endif 863 864 std::copy(Args.begin(), Args.end(), op_begin()); 865 866 auto It = populateBundleOperandInfos(Bundles, Args.size()); 867 (void)It; 868 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!"); 869 870 setName(NameStr); 871 } 872 873 void CallBrInst::updateArgBlockAddresses(unsigned i, BasicBlock *B) { 874 assert(getNumIndirectDests() > i && "IndirectDest # out of range for callbr"); 875 if (BasicBlock *OldBB = getIndirectDest(i)) { 876 BlockAddress *Old = BlockAddress::get(OldBB); 877 BlockAddress *New = BlockAddress::get(B); 878 for (unsigned ArgNo = 0, e = getNumArgOperands(); ArgNo != e; ++ArgNo) 879 if (dyn_cast<BlockAddress>(getArgOperand(ArgNo)) == Old) 880 setArgOperand(ArgNo, New); 881 } 882 } 883 884 CallBrInst::CallBrInst(const CallBrInst &CBI) 885 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr, 886 OperandTraits<CallBase>::op_end(this) - CBI.getNumOperands(), 887 CBI.getNumOperands()) { 888 setCallingConv(CBI.getCallingConv()); 889 std::copy(CBI.op_begin(), CBI.op_end(), op_begin()); 890 std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(), 891 bundle_op_info_begin()); 892 SubclassOptionalData = CBI.SubclassOptionalData; 893 NumIndirectDests = CBI.NumIndirectDests; 894 } 895 896 CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB, 897 Instruction *InsertPt) { 898 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end()); 899 900 auto *NewCBI = CallBrInst::Create(CBI->getFunctionType(), 901 CBI->getCalledValue(), 902 CBI->getDefaultDest(), 903 CBI->getIndirectDests(), 904 Args, OpB, CBI->getName(), InsertPt); 905 NewCBI->setCallingConv(CBI->getCallingConv()); 906 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData; 907 NewCBI->setAttributes(CBI->getAttributes()); 908 NewCBI->setDebugLoc(CBI->getDebugLoc()); 909 NewCBI->NumIndirectDests = CBI->NumIndirectDests; 910 return NewCBI; 911 } 912 913 //===----------------------------------------------------------------------===// 914 // ReturnInst Implementation 915 //===----------------------------------------------------------------------===// 916 917 ReturnInst::ReturnInst(const ReturnInst &RI) 918 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret, 919 OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(), 920 RI.getNumOperands()) { 921 if (RI.getNumOperands()) 922 Op<0>() = RI.Op<0>(); 923 SubclassOptionalData = RI.SubclassOptionalData; 924 } 925 926 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore) 927 : Instruction(Type::getVoidTy(C), Instruction::Ret, 928 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal, 929 InsertBefore) { 930 if (retVal) 931 Op<0>() = retVal; 932 } 933 934 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd) 935 : Instruction(Type::getVoidTy(C), Instruction::Ret, 936 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal, 937 InsertAtEnd) { 938 if (retVal) 939 Op<0>() = retVal; 940 } 941 942 ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd) 943 : Instruction(Type::getVoidTy(Context), Instruction::Ret, 944 OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {} 945 946 //===----------------------------------------------------------------------===// 947 // ResumeInst Implementation 948 //===----------------------------------------------------------------------===// 949 950 ResumeInst::ResumeInst(const ResumeInst &RI) 951 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume, 952 OperandTraits<ResumeInst>::op_begin(this), 1) { 953 Op<0>() = RI.Op<0>(); 954 } 955 956 ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore) 957 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume, 958 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) { 959 Op<0>() = Exn; 960 } 961 962 ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd) 963 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume, 964 OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) { 965 Op<0>() = Exn; 966 } 967 968 //===----------------------------------------------------------------------===// 969 // CleanupReturnInst Implementation 970 //===----------------------------------------------------------------------===// 971 972 CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI) 973 : Instruction(CRI.getType(), Instruction::CleanupRet, 974 OperandTraits<CleanupReturnInst>::op_end(this) - 975 CRI.getNumOperands(), 976 CRI.getNumOperands()) { 977 setInstructionSubclassData(CRI.getSubclassDataFromInstruction()); 978 Op<0>() = CRI.Op<0>(); 979 if (CRI.hasUnwindDest()) 980 Op<1>() = CRI.Op<1>(); 981 } 982 983 void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) { 984 if (UnwindBB) 985 setInstructionSubclassData(getSubclassDataFromInstruction() | 1); 986 987 Op<0>() = CleanupPad; 988 if (UnwindBB) 989 Op<1>() = UnwindBB; 990 } 991 992 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, 993 unsigned Values, Instruction *InsertBefore) 994 : Instruction(Type::getVoidTy(CleanupPad->getContext()), 995 Instruction::CleanupRet, 996 OperandTraits<CleanupReturnInst>::op_end(this) - Values, 997 Values, InsertBefore) { 998 init(CleanupPad, UnwindBB); 999 } 1000 1001 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, 1002 unsigned Values, BasicBlock *InsertAtEnd) 1003 : Instruction(Type::getVoidTy(CleanupPad->getContext()), 1004 Instruction::CleanupRet, 1005 OperandTraits<CleanupReturnInst>::op_end(this) - Values, 1006 Values, InsertAtEnd) { 1007 init(CleanupPad, UnwindBB); 1008 } 1009 1010 //===----------------------------------------------------------------------===// 1011 // CatchReturnInst Implementation 1012 //===----------------------------------------------------------------------===// 1013 void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) { 1014 Op<0>() = CatchPad; 1015 Op<1>() = BB; 1016 } 1017 1018 CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI) 1019 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet, 1020 OperandTraits<CatchReturnInst>::op_begin(this), 2) { 1021 Op<0>() = CRI.Op<0>(); 1022 Op<1>() = CRI.Op<1>(); 1023 } 1024 1025 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB, 1026 Instruction *InsertBefore) 1027 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet, 1028 OperandTraits<CatchReturnInst>::op_begin(this), 2, 1029 InsertBefore) { 1030 init(CatchPad, BB); 1031 } 1032 1033 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB, 1034 BasicBlock *InsertAtEnd) 1035 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet, 1036 OperandTraits<CatchReturnInst>::op_begin(this), 2, 1037 InsertAtEnd) { 1038 init(CatchPad, BB); 1039 } 1040 1041 //===----------------------------------------------------------------------===// 1042 // CatchSwitchInst Implementation 1043 //===----------------------------------------------------------------------===// 1044 1045 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 1046 unsigned NumReservedValues, 1047 const Twine &NameStr, 1048 Instruction *InsertBefore) 1049 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0, 1050 InsertBefore) { 1051 if (UnwindDest) 1052 ++NumReservedValues; 1053 init(ParentPad, UnwindDest, NumReservedValues + 1); 1054 setName(NameStr); 1055 } 1056 1057 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 1058 unsigned NumReservedValues, 1059 const Twine &NameStr, BasicBlock *InsertAtEnd) 1060 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0, 1061 InsertAtEnd) { 1062 if (UnwindDest) 1063 ++NumReservedValues; 1064 init(ParentPad, UnwindDest, NumReservedValues + 1); 1065 setName(NameStr); 1066 } 1067 1068 CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI) 1069 : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr, 1070 CSI.getNumOperands()) { 1071 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands()); 1072 setNumHungOffUseOperands(ReservedSpace); 1073 Use *OL = getOperandList(); 1074 const Use *InOL = CSI.getOperandList(); 1075 for (unsigned I = 1, E = ReservedSpace; I != E; ++I) 1076 OL[I] = InOL[I]; 1077 } 1078 1079 void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest, 1080 unsigned NumReservedValues) { 1081 assert(ParentPad && NumReservedValues); 1082 1083 ReservedSpace = NumReservedValues; 1084 setNumHungOffUseOperands(UnwindDest ? 2 : 1); 1085 allocHungoffUses(ReservedSpace); 1086 1087 Op<0>() = ParentPad; 1088 if (UnwindDest) { 1089 setInstructionSubclassData(getSubclassDataFromInstruction() | 1); 1090 setUnwindDest(UnwindDest); 1091 } 1092 } 1093 1094 /// growOperands - grow operands - This grows the operand list in response to a 1095 /// push_back style of operation. This grows the number of ops by 2 times. 1096 void CatchSwitchInst::growOperands(unsigned Size) { 1097 unsigned NumOperands = getNumOperands(); 1098 assert(NumOperands >= 1); 1099 if (ReservedSpace >= NumOperands + Size) 1100 return; 1101 ReservedSpace = (NumOperands + Size / 2) * 2; 1102 growHungoffUses(ReservedSpace); 1103 } 1104 1105 void CatchSwitchInst::addHandler(BasicBlock *Handler) { 1106 unsigned OpNo = getNumOperands(); 1107 growOperands(1); 1108 assert(OpNo < ReservedSpace && "Growing didn't work!"); 1109 setNumHungOffUseOperands(getNumOperands() + 1); 1110 getOperandList()[OpNo] = Handler; 1111 } 1112 1113 void CatchSwitchInst::removeHandler(handler_iterator HI) { 1114 // Move all subsequent handlers up one. 1115 Use *EndDst = op_end() - 1; 1116 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst) 1117 *CurDst = *(CurDst + 1); 1118 // Null out the last handler use. 1119 *EndDst = nullptr; 1120 1121 setNumHungOffUseOperands(getNumOperands() - 1); 1122 } 1123 1124 //===----------------------------------------------------------------------===// 1125 // FuncletPadInst Implementation 1126 //===----------------------------------------------------------------------===// 1127 void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args, 1128 const Twine &NameStr) { 1129 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?"); 1130 llvm::copy(Args, op_begin()); 1131 setParentPad(ParentPad); 1132 setName(NameStr); 1133 } 1134 1135 FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI) 1136 : Instruction(FPI.getType(), FPI.getOpcode(), 1137 OperandTraits<FuncletPadInst>::op_end(this) - 1138 FPI.getNumOperands(), 1139 FPI.getNumOperands()) { 1140 std::copy(FPI.op_begin(), FPI.op_end(), op_begin()); 1141 setParentPad(FPI.getParentPad()); 1142 } 1143 1144 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad, 1145 ArrayRef<Value *> Args, unsigned Values, 1146 const Twine &NameStr, Instruction *InsertBefore) 1147 : Instruction(ParentPad->getType(), Op, 1148 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values, 1149 InsertBefore) { 1150 init(ParentPad, Args, NameStr); 1151 } 1152 1153 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad, 1154 ArrayRef<Value *> Args, unsigned Values, 1155 const Twine &NameStr, BasicBlock *InsertAtEnd) 1156 : Instruction(ParentPad->getType(), Op, 1157 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values, 1158 InsertAtEnd) { 1159 init(ParentPad, Args, NameStr); 1160 } 1161 1162 //===----------------------------------------------------------------------===// 1163 // UnreachableInst Implementation 1164 //===----------------------------------------------------------------------===// 1165 1166 UnreachableInst::UnreachableInst(LLVMContext &Context, 1167 Instruction *InsertBefore) 1168 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr, 1169 0, InsertBefore) {} 1170 UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd) 1171 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr, 1172 0, InsertAtEnd) {} 1173 1174 //===----------------------------------------------------------------------===// 1175 // BranchInst Implementation 1176 //===----------------------------------------------------------------------===// 1177 1178 void BranchInst::AssertOK() { 1179 if (isConditional()) 1180 assert(getCondition()->getType()->isIntegerTy(1) && 1181 "May only branch on boolean predicates!"); 1182 } 1183 1184 BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore) 1185 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 1186 OperandTraits<BranchInst>::op_end(this) - 1, 1, 1187 InsertBefore) { 1188 assert(IfTrue && "Branch destination may not be null!"); 1189 Op<-1>() = IfTrue; 1190 } 1191 1192 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 1193 Instruction *InsertBefore) 1194 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 1195 OperandTraits<BranchInst>::op_end(this) - 3, 3, 1196 InsertBefore) { 1197 Op<-1>() = IfTrue; 1198 Op<-2>() = IfFalse; 1199 Op<-3>() = Cond; 1200 #ifndef NDEBUG 1201 AssertOK(); 1202 #endif 1203 } 1204 1205 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) 1206 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 1207 OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) { 1208 assert(IfTrue && "Branch destination may not be null!"); 1209 Op<-1>() = IfTrue; 1210 } 1211 1212 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 1213 BasicBlock *InsertAtEnd) 1214 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 1215 OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) { 1216 Op<-1>() = IfTrue; 1217 Op<-2>() = IfFalse; 1218 Op<-3>() = Cond; 1219 #ifndef NDEBUG 1220 AssertOK(); 1221 #endif 1222 } 1223 1224 BranchInst::BranchInst(const BranchInst &BI) 1225 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br, 1226 OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(), 1227 BI.getNumOperands()) { 1228 Op<-1>() = BI.Op<-1>(); 1229 if (BI.getNumOperands() != 1) { 1230 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!"); 1231 Op<-3>() = BI.Op<-3>(); 1232 Op<-2>() = BI.Op<-2>(); 1233 } 1234 SubclassOptionalData = BI.SubclassOptionalData; 1235 } 1236 1237 void BranchInst::swapSuccessors() { 1238 assert(isConditional() && 1239 "Cannot swap successors of an unconditional branch"); 1240 Op<-1>().swap(Op<-2>()); 1241 1242 // Update profile metadata if present and it matches our structural 1243 // expectations. 1244 swapProfMetadata(); 1245 } 1246 1247 //===----------------------------------------------------------------------===// 1248 // AllocaInst Implementation 1249 //===----------------------------------------------------------------------===// 1250 1251 static Value *getAISize(LLVMContext &Context, Value *Amt) { 1252 if (!Amt) 1253 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1); 1254 else { 1255 assert(!isa<BasicBlock>(Amt) && 1256 "Passed basic block into allocation size parameter! Use other ctor"); 1257 assert(Amt->getType()->isIntegerTy() && 1258 "Allocation array size is not an integer!"); 1259 } 1260 return Amt; 1261 } 1262 1263 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 1264 Instruction *InsertBefore) 1265 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {} 1266 1267 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 1268 BasicBlock *InsertAtEnd) 1269 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {} 1270 1271 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1272 const Twine &Name, Instruction *InsertBefore) 1273 : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/None, Name, InsertBefore) { 1274 } 1275 1276 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1277 const Twine &Name, BasicBlock *InsertAtEnd) 1278 : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/None, Name, InsertAtEnd) {} 1279 1280 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1281 MaybeAlign Align, const Twine &Name, 1282 Instruction *InsertBefore) 1283 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca, 1284 getAISize(Ty->getContext(), ArraySize), InsertBefore), 1285 AllocatedType(Ty) { 1286 setAlignment(MaybeAlign(Align)); 1287 assert(!Ty->isVoidTy() && "Cannot allocate void!"); 1288 setName(Name); 1289 } 1290 1291 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1292 MaybeAlign Align, const Twine &Name, 1293 BasicBlock *InsertAtEnd) 1294 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca, 1295 getAISize(Ty->getContext(), ArraySize), InsertAtEnd), 1296 AllocatedType(Ty) { 1297 setAlignment(Align); 1298 assert(!Ty->isVoidTy() && "Cannot allocate void!"); 1299 setName(Name); 1300 } 1301 1302 void AllocaInst::setAlignment(MaybeAlign Align) { 1303 assert((!Align || *Align <= MaximumAlignment) && 1304 "Alignment is greater than MaximumAlignment!"); 1305 setInstructionSubclassData((getSubclassDataFromInstruction() & ~31) | 1306 encode(Align)); 1307 if (Align) 1308 assert(getAlignment() == Align->value() && 1309 "Alignment representation error!"); 1310 else 1311 assert(getAlignment() == 0 && "Alignment representation error!"); 1312 } 1313 1314 bool AllocaInst::isArrayAllocation() const { 1315 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0))) 1316 return !CI->isOne(); 1317 return true; 1318 } 1319 1320 /// isStaticAlloca - Return true if this alloca is in the entry block of the 1321 /// function and is a constant size. If so, the code generator will fold it 1322 /// into the prolog/epilog code, so it is basically free. 1323 bool AllocaInst::isStaticAlloca() const { 1324 // Must be constant size. 1325 if (!isa<ConstantInt>(getArraySize())) return false; 1326 1327 // Must be in the entry block. 1328 const BasicBlock *Parent = getParent(); 1329 return Parent == &Parent->getParent()->front() && !isUsedWithInAlloca(); 1330 } 1331 1332 //===----------------------------------------------------------------------===// 1333 // LoadInst Implementation 1334 //===----------------------------------------------------------------------===// 1335 1336 void LoadInst::AssertOK() { 1337 assert(getOperand(0)->getType()->isPointerTy() && 1338 "Ptr must have pointer type."); 1339 assert(!(isAtomic() && getAlignment() == 0) && 1340 "Alignment required for atomic load"); 1341 } 1342 1343 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, 1344 Instruction *InsertBef) 1345 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {} 1346 1347 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, 1348 BasicBlock *InsertAE) 1349 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertAE) {} 1350 1351 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1352 Instruction *InsertBef) 1353 : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/None, InsertBef) {} 1354 1355 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1356 BasicBlock *InsertAE) 1357 : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/None, InsertAE) {} 1358 1359 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1360 MaybeAlign Align, Instruction *InsertBef) 1361 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, 1362 SyncScope::System, InsertBef) {} 1363 1364 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1365 MaybeAlign Align, BasicBlock *InsertAE) 1366 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, 1367 SyncScope::System, InsertAE) {} 1368 1369 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1370 MaybeAlign Align, AtomicOrdering Order, SyncScope::ID SSID, 1371 Instruction *InsertBef) 1372 : UnaryInstruction(Ty, Load, Ptr, InsertBef) { 1373 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType()); 1374 setVolatile(isVolatile); 1375 setAlignment(MaybeAlign(Align)); 1376 setAtomic(Order, SSID); 1377 AssertOK(); 1378 setName(Name); 1379 } 1380 1381 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1382 MaybeAlign Align, AtomicOrdering Order, SyncScope::ID SSID, 1383 BasicBlock *InsertAE) 1384 : UnaryInstruction(Ty, Load, Ptr, InsertAE) { 1385 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType()); 1386 setVolatile(isVolatile); 1387 setAlignment(Align); 1388 setAtomic(Order, SSID); 1389 AssertOK(); 1390 setName(Name); 1391 } 1392 1393 void LoadInst::setAlignment(MaybeAlign Align) { 1394 assert((!Align || *Align <= MaximumAlignment) && 1395 "Alignment is greater than MaximumAlignment!"); 1396 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | 1397 (encode(Align) << 1)); 1398 assert(getAlign() == Align && "Alignment representation error!"); 1399 } 1400 1401 //===----------------------------------------------------------------------===// 1402 // StoreInst Implementation 1403 //===----------------------------------------------------------------------===// 1404 1405 void StoreInst::AssertOK() { 1406 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!"); 1407 assert(getOperand(1)->getType()->isPointerTy() && 1408 "Ptr must have pointer type!"); 1409 assert(getOperand(0)->getType() == 1410 cast<PointerType>(getOperand(1)->getType())->getElementType() 1411 && "Ptr must be a pointer to Val type!"); 1412 assert(!(isAtomic() && getAlignment() == 0) && 1413 "Alignment required for atomic store"); 1414 } 1415 1416 StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore) 1417 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {} 1418 1419 StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd) 1420 : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {} 1421 1422 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1423 Instruction *InsertBefore) 1424 : StoreInst(val, addr, isVolatile, /*Align=*/None, InsertBefore) {} 1425 1426 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1427 BasicBlock *InsertAtEnd) 1428 : StoreInst(val, addr, isVolatile, /*Align=*/None, InsertAtEnd) {} 1429 1430 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align, 1431 Instruction *InsertBefore) 1432 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, 1433 SyncScope::System, InsertBefore) {} 1434 1435 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align, 1436 BasicBlock *InsertAtEnd) 1437 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, 1438 SyncScope::System, InsertAtEnd) {} 1439 1440 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align, 1441 AtomicOrdering Order, SyncScope::ID SSID, 1442 Instruction *InsertBefore) 1443 : Instruction(Type::getVoidTy(val->getContext()), Store, 1444 OperandTraits<StoreInst>::op_begin(this), 1445 OperandTraits<StoreInst>::operands(this), InsertBefore) { 1446 Op<0>() = val; 1447 Op<1>() = addr; 1448 setVolatile(isVolatile); 1449 setAlignment(Align); 1450 setAtomic(Order, SSID); 1451 AssertOK(); 1452 } 1453 1454 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align, 1455 AtomicOrdering Order, SyncScope::ID SSID, 1456 BasicBlock *InsertAtEnd) 1457 : Instruction(Type::getVoidTy(val->getContext()), Store, 1458 OperandTraits<StoreInst>::op_begin(this), 1459 OperandTraits<StoreInst>::operands(this), InsertAtEnd) { 1460 Op<0>() = val; 1461 Op<1>() = addr; 1462 setVolatile(isVolatile); 1463 setAlignment(Align); 1464 setAtomic(Order, SSID); 1465 AssertOK(); 1466 } 1467 1468 void StoreInst::setAlignment(MaybeAlign Alignment) { 1469 assert((!Alignment || *Alignment <= MaximumAlignment) && 1470 "Alignment is greater than MaximumAlignment!"); 1471 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | 1472 (encode(Alignment) << 1)); 1473 assert(getAlign() == Alignment && "Alignment representation error!"); 1474 } 1475 1476 //===----------------------------------------------------------------------===// 1477 // AtomicCmpXchgInst Implementation 1478 //===----------------------------------------------------------------------===// 1479 1480 void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal, 1481 AtomicOrdering SuccessOrdering, 1482 AtomicOrdering FailureOrdering, 1483 SyncScope::ID SSID) { 1484 Op<0>() = Ptr; 1485 Op<1>() = Cmp; 1486 Op<2>() = NewVal; 1487 setSuccessOrdering(SuccessOrdering); 1488 setFailureOrdering(FailureOrdering); 1489 setSyncScopeID(SSID); 1490 1491 assert(getOperand(0) && getOperand(1) && getOperand(2) && 1492 "All operands must be non-null!"); 1493 assert(getOperand(0)->getType()->isPointerTy() && 1494 "Ptr must have pointer type!"); 1495 assert(getOperand(1)->getType() == 1496 cast<PointerType>(getOperand(0)->getType())->getElementType() 1497 && "Ptr must be a pointer to Cmp type!"); 1498 assert(getOperand(2)->getType() == 1499 cast<PointerType>(getOperand(0)->getType())->getElementType() 1500 && "Ptr must be a pointer to NewVal type!"); 1501 assert(SuccessOrdering != AtomicOrdering::NotAtomic && 1502 "AtomicCmpXchg instructions must be atomic!"); 1503 assert(FailureOrdering != AtomicOrdering::NotAtomic && 1504 "AtomicCmpXchg instructions must be atomic!"); 1505 assert(!isStrongerThan(FailureOrdering, SuccessOrdering) && 1506 "AtomicCmpXchg failure argument shall be no stronger than the success " 1507 "argument"); 1508 assert(FailureOrdering != AtomicOrdering::Release && 1509 FailureOrdering != AtomicOrdering::AcquireRelease && 1510 "AtomicCmpXchg failure ordering cannot include release semantics"); 1511 } 1512 1513 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, 1514 AtomicOrdering SuccessOrdering, 1515 AtomicOrdering FailureOrdering, 1516 SyncScope::ID SSID, 1517 Instruction *InsertBefore) 1518 : Instruction( 1519 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())), 1520 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this), 1521 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) { 1522 Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID); 1523 } 1524 1525 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, 1526 AtomicOrdering SuccessOrdering, 1527 AtomicOrdering FailureOrdering, 1528 SyncScope::ID SSID, 1529 BasicBlock *InsertAtEnd) 1530 : Instruction( 1531 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())), 1532 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this), 1533 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) { 1534 Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID); 1535 } 1536 1537 //===----------------------------------------------------------------------===// 1538 // AtomicRMWInst Implementation 1539 //===----------------------------------------------------------------------===// 1540 1541 void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val, 1542 AtomicOrdering Ordering, 1543 SyncScope::ID SSID) { 1544 Op<0>() = Ptr; 1545 Op<1>() = Val; 1546 setOperation(Operation); 1547 setOrdering(Ordering); 1548 setSyncScopeID(SSID); 1549 1550 assert(getOperand(0) && getOperand(1) && 1551 "All operands must be non-null!"); 1552 assert(getOperand(0)->getType()->isPointerTy() && 1553 "Ptr must have pointer type!"); 1554 assert(getOperand(1)->getType() == 1555 cast<PointerType>(getOperand(0)->getType())->getElementType() 1556 && "Ptr must be a pointer to Val type!"); 1557 assert(Ordering != AtomicOrdering::NotAtomic && 1558 "AtomicRMW instructions must be atomic!"); 1559 } 1560 1561 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, 1562 AtomicOrdering Ordering, 1563 SyncScope::ID SSID, 1564 Instruction *InsertBefore) 1565 : Instruction(Val->getType(), AtomicRMW, 1566 OperandTraits<AtomicRMWInst>::op_begin(this), 1567 OperandTraits<AtomicRMWInst>::operands(this), 1568 InsertBefore) { 1569 Init(Operation, Ptr, Val, Ordering, SSID); 1570 } 1571 1572 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, 1573 AtomicOrdering Ordering, 1574 SyncScope::ID SSID, 1575 BasicBlock *InsertAtEnd) 1576 : Instruction(Val->getType(), AtomicRMW, 1577 OperandTraits<AtomicRMWInst>::op_begin(this), 1578 OperandTraits<AtomicRMWInst>::operands(this), 1579 InsertAtEnd) { 1580 Init(Operation, Ptr, Val, Ordering, SSID); 1581 } 1582 1583 StringRef AtomicRMWInst::getOperationName(BinOp Op) { 1584 switch (Op) { 1585 case AtomicRMWInst::Xchg: 1586 return "xchg"; 1587 case AtomicRMWInst::Add: 1588 return "add"; 1589 case AtomicRMWInst::Sub: 1590 return "sub"; 1591 case AtomicRMWInst::And: 1592 return "and"; 1593 case AtomicRMWInst::Nand: 1594 return "nand"; 1595 case AtomicRMWInst::Or: 1596 return "or"; 1597 case AtomicRMWInst::Xor: 1598 return "xor"; 1599 case AtomicRMWInst::Max: 1600 return "max"; 1601 case AtomicRMWInst::Min: 1602 return "min"; 1603 case AtomicRMWInst::UMax: 1604 return "umax"; 1605 case AtomicRMWInst::UMin: 1606 return "umin"; 1607 case AtomicRMWInst::FAdd: 1608 return "fadd"; 1609 case AtomicRMWInst::FSub: 1610 return "fsub"; 1611 case AtomicRMWInst::BAD_BINOP: 1612 return "<invalid operation>"; 1613 } 1614 1615 llvm_unreachable("invalid atomicrmw operation"); 1616 } 1617 1618 //===----------------------------------------------------------------------===// 1619 // FenceInst Implementation 1620 //===----------------------------------------------------------------------===// 1621 1622 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, 1623 SyncScope::ID SSID, 1624 Instruction *InsertBefore) 1625 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) { 1626 setOrdering(Ordering); 1627 setSyncScopeID(SSID); 1628 } 1629 1630 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, 1631 SyncScope::ID SSID, 1632 BasicBlock *InsertAtEnd) 1633 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) { 1634 setOrdering(Ordering); 1635 setSyncScopeID(SSID); 1636 } 1637 1638 //===----------------------------------------------------------------------===// 1639 // GetElementPtrInst Implementation 1640 //===----------------------------------------------------------------------===// 1641 1642 void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList, 1643 const Twine &Name) { 1644 assert(getNumOperands() == 1 + IdxList.size() && 1645 "NumOperands not initialized?"); 1646 Op<0>() = Ptr; 1647 llvm::copy(IdxList, op_begin() + 1); 1648 setName(Name); 1649 } 1650 1651 GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI) 1652 : Instruction(GEPI.getType(), GetElementPtr, 1653 OperandTraits<GetElementPtrInst>::op_end(this) - 1654 GEPI.getNumOperands(), 1655 GEPI.getNumOperands()), 1656 SourceElementType(GEPI.SourceElementType), 1657 ResultElementType(GEPI.ResultElementType) { 1658 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin()); 1659 SubclassOptionalData = GEPI.SubclassOptionalData; 1660 } 1661 1662 Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, Value *Idx) { 1663 if (auto Struct = dyn_cast<StructType>(Ty)) { 1664 if (!Struct->indexValid(Idx)) 1665 return nullptr; 1666 return Struct->getTypeAtIndex(Idx); 1667 } 1668 if (!Idx->getType()->isIntOrIntVectorTy()) 1669 return nullptr; 1670 if (auto Array = dyn_cast<ArrayType>(Ty)) 1671 return Array->getElementType(); 1672 if (auto Vector = dyn_cast<VectorType>(Ty)) 1673 return Vector->getElementType(); 1674 return nullptr; 1675 } 1676 1677 Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, uint64_t Idx) { 1678 if (auto Struct = dyn_cast<StructType>(Ty)) { 1679 if (Idx >= Struct->getNumElements()) 1680 return nullptr; 1681 return Struct->getElementType(Idx); 1682 } 1683 if (auto Array = dyn_cast<ArrayType>(Ty)) 1684 return Array->getElementType(); 1685 if (auto Vector = dyn_cast<VectorType>(Ty)) 1686 return Vector->getElementType(); 1687 return nullptr; 1688 } 1689 1690 template <typename IndexTy> 1691 static Type *getIndexedTypeInternal(Type *Ty, ArrayRef<IndexTy> IdxList) { 1692 if (IdxList.empty()) 1693 return Ty; 1694 for (IndexTy V : IdxList.slice(1)) { 1695 Ty = GetElementPtrInst::getTypeAtIndex(Ty, V); 1696 if (!Ty) 1697 return Ty; 1698 } 1699 return Ty; 1700 } 1701 1702 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) { 1703 return getIndexedTypeInternal(Ty, IdxList); 1704 } 1705 1706 Type *GetElementPtrInst::getIndexedType(Type *Ty, 1707 ArrayRef<Constant *> IdxList) { 1708 return getIndexedTypeInternal(Ty, IdxList); 1709 } 1710 1711 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) { 1712 return getIndexedTypeInternal(Ty, IdxList); 1713 } 1714 1715 /// hasAllZeroIndices - Return true if all of the indices of this GEP are 1716 /// zeros. If so, the result pointer and the first operand have the same 1717 /// value, just potentially different types. 1718 bool GetElementPtrInst::hasAllZeroIndices() const { 1719 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1720 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) { 1721 if (!CI->isZero()) return false; 1722 } else { 1723 return false; 1724 } 1725 } 1726 return true; 1727 } 1728 1729 /// hasAllConstantIndices - Return true if all of the indices of this GEP are 1730 /// constant integers. If so, the result pointer and the first operand have 1731 /// a constant offset between them. 1732 bool GetElementPtrInst::hasAllConstantIndices() const { 1733 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1734 if (!isa<ConstantInt>(getOperand(i))) 1735 return false; 1736 } 1737 return true; 1738 } 1739 1740 void GetElementPtrInst::setIsInBounds(bool B) { 1741 cast<GEPOperator>(this)->setIsInBounds(B); 1742 } 1743 1744 bool GetElementPtrInst::isInBounds() const { 1745 return cast<GEPOperator>(this)->isInBounds(); 1746 } 1747 1748 bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL, 1749 APInt &Offset) const { 1750 // Delegate to the generic GEPOperator implementation. 1751 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset); 1752 } 1753 1754 //===----------------------------------------------------------------------===// 1755 // ExtractElementInst Implementation 1756 //===----------------------------------------------------------------------===// 1757 1758 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index, 1759 const Twine &Name, 1760 Instruction *InsertBef) 1761 : Instruction(cast<VectorType>(Val->getType())->getElementType(), 1762 ExtractElement, 1763 OperandTraits<ExtractElementInst>::op_begin(this), 1764 2, InsertBef) { 1765 assert(isValidOperands(Val, Index) && 1766 "Invalid extractelement instruction operands!"); 1767 Op<0>() = Val; 1768 Op<1>() = Index; 1769 setName(Name); 1770 } 1771 1772 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index, 1773 const Twine &Name, 1774 BasicBlock *InsertAE) 1775 : Instruction(cast<VectorType>(Val->getType())->getElementType(), 1776 ExtractElement, 1777 OperandTraits<ExtractElementInst>::op_begin(this), 1778 2, InsertAE) { 1779 assert(isValidOperands(Val, Index) && 1780 "Invalid extractelement instruction operands!"); 1781 1782 Op<0>() = Val; 1783 Op<1>() = Index; 1784 setName(Name); 1785 } 1786 1787 bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) { 1788 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy()) 1789 return false; 1790 return true; 1791 } 1792 1793 //===----------------------------------------------------------------------===// 1794 // InsertElementInst Implementation 1795 //===----------------------------------------------------------------------===// 1796 1797 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index, 1798 const Twine &Name, 1799 Instruction *InsertBef) 1800 : Instruction(Vec->getType(), InsertElement, 1801 OperandTraits<InsertElementInst>::op_begin(this), 1802 3, InsertBef) { 1803 assert(isValidOperands(Vec, Elt, Index) && 1804 "Invalid insertelement instruction operands!"); 1805 Op<0>() = Vec; 1806 Op<1>() = Elt; 1807 Op<2>() = Index; 1808 setName(Name); 1809 } 1810 1811 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index, 1812 const Twine &Name, 1813 BasicBlock *InsertAE) 1814 : Instruction(Vec->getType(), InsertElement, 1815 OperandTraits<InsertElementInst>::op_begin(this), 1816 3, InsertAE) { 1817 assert(isValidOperands(Vec, Elt, Index) && 1818 "Invalid insertelement instruction operands!"); 1819 1820 Op<0>() = Vec; 1821 Op<1>() = Elt; 1822 Op<2>() = Index; 1823 setName(Name); 1824 } 1825 1826 bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt, 1827 const Value *Index) { 1828 if (!Vec->getType()->isVectorTy()) 1829 return false; // First operand of insertelement must be vector type. 1830 1831 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType()) 1832 return false;// Second operand of insertelement must be vector element type. 1833 1834 if (!Index->getType()->isIntegerTy()) 1835 return false; // Third operand of insertelement must be i32. 1836 return true; 1837 } 1838 1839 //===----------------------------------------------------------------------===// 1840 // ShuffleVectorInst Implementation 1841 //===----------------------------------------------------------------------===// 1842 1843 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 1844 const Twine &Name, 1845 Instruction *InsertBefore) 1846 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(), 1847 cast<VectorType>(Mask->getType())->getElementCount()), 1848 ShuffleVector, 1849 OperandTraits<ShuffleVectorInst>::op_begin(this), 1850 OperandTraits<ShuffleVectorInst>::operands(this), 1851 InsertBefore) { 1852 assert(isValidOperands(V1, V2, Mask) && 1853 "Invalid shuffle vector instruction operands!"); 1854 Op<0>() = V1; 1855 Op<1>() = V2; 1856 Op<2>() = Mask; 1857 setName(Name); 1858 } 1859 1860 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 1861 const Twine &Name, 1862 BasicBlock *InsertAtEnd) 1863 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(), 1864 cast<VectorType>(Mask->getType())->getElementCount()), 1865 ShuffleVector, 1866 OperandTraits<ShuffleVectorInst>::op_begin(this), 1867 OperandTraits<ShuffleVectorInst>::operands(this), 1868 InsertAtEnd) { 1869 assert(isValidOperands(V1, V2, Mask) && 1870 "Invalid shuffle vector instruction operands!"); 1871 1872 Op<0>() = V1; 1873 Op<1>() = V2; 1874 Op<2>() = Mask; 1875 setName(Name); 1876 } 1877 1878 void ShuffleVectorInst::commute() { 1879 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 1880 int NumMaskElts = getMask()->getType()->getVectorNumElements(); 1881 SmallVector<Constant*, 16> NewMask(NumMaskElts); 1882 Type *Int32Ty = Type::getInt32Ty(getContext()); 1883 for (int i = 0; i != NumMaskElts; ++i) { 1884 int MaskElt = getMaskValue(i); 1885 if (MaskElt == -1) { 1886 NewMask[i] = UndefValue::get(Int32Ty); 1887 continue; 1888 } 1889 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask"); 1890 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts; 1891 NewMask[i] = ConstantInt::get(Int32Ty, MaskElt); 1892 } 1893 Op<2>() = ConstantVector::get(NewMask); 1894 Op<0>().swap(Op<1>()); 1895 } 1896 1897 bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2, 1898 const Value *Mask) { 1899 // V1 and V2 must be vectors of the same type. 1900 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType()) 1901 return false; 1902 1903 // Mask must be vector of i32. 1904 auto *MaskTy = dyn_cast<VectorType>(Mask->getType()); 1905 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32)) 1906 return false; 1907 1908 // Check to see if Mask is valid. 1909 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask)) 1910 return true; 1911 1912 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) { 1913 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); 1914 for (Value *Op : MV->operands()) { 1915 if (auto *CI = dyn_cast<ConstantInt>(Op)) { 1916 if (CI->uge(V1Size*2)) 1917 return false; 1918 } else if (!isa<UndefValue>(Op)) { 1919 return false; 1920 } 1921 } 1922 return true; 1923 } 1924 1925 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) { 1926 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); 1927 for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i) 1928 if (CDS->getElementAsInteger(i) >= V1Size*2) 1929 return false; 1930 return true; 1931 } 1932 1933 // The bitcode reader can create a place holder for a forward reference 1934 // used as the shuffle mask. When this occurs, the shuffle mask will 1935 // fall into this case and fail. To avoid this error, do this bit of 1936 // ugliness to allow such a mask pass. 1937 if (const auto *CE = dyn_cast<ConstantExpr>(Mask)) 1938 if (CE->getOpcode() == Instruction::UserOp1) 1939 return true; 1940 1941 return false; 1942 } 1943 1944 int ShuffleVectorInst::getMaskValue(const Constant *Mask, unsigned i) { 1945 assert(i < Mask->getType()->getVectorNumElements() && "Index out of range"); 1946 assert(!Mask->getType()->getVectorElementCount().Scalable && 1947 "Length of scalable vectors unknown at compile time"); 1948 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) 1949 return CDS->getElementAsInteger(i); 1950 Constant *C = Mask->getAggregateElement(i); 1951 if (isa<UndefValue>(C)) 1952 return -1; 1953 return cast<ConstantInt>(C)->getZExtValue(); 1954 } 1955 1956 void ShuffleVectorInst::getShuffleMask(const Constant *Mask, 1957 SmallVectorImpl<int> &Result) { 1958 assert(!Mask->getType()->getVectorElementCount().Scalable && 1959 "Length of scalable vectors unknown at compile time"); 1960 unsigned NumElts = Mask->getType()->getVectorNumElements(); 1961 1962 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) { 1963 for (unsigned i = 0; i != NumElts; ++i) 1964 Result.push_back(CDS->getElementAsInteger(i)); 1965 return; 1966 } 1967 for (unsigned i = 0; i != NumElts; ++i) { 1968 Constant *C = Mask->getAggregateElement(i); 1969 Result.push_back(isa<UndefValue>(C) ? -1 : 1970 cast<ConstantInt>(C)->getZExtValue()); 1971 } 1972 } 1973 1974 static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) { 1975 assert(!Mask.empty() && "Shuffle mask must contain elements"); 1976 bool UsesLHS = false; 1977 bool UsesRHS = false; 1978 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) { 1979 if (Mask[i] == -1) 1980 continue; 1981 assert(Mask[i] >= 0 && Mask[i] < (NumOpElts * 2) && 1982 "Out-of-bounds shuffle mask element"); 1983 UsesLHS |= (Mask[i] < NumOpElts); 1984 UsesRHS |= (Mask[i] >= NumOpElts); 1985 if (UsesLHS && UsesRHS) 1986 return false; 1987 } 1988 assert((UsesLHS ^ UsesRHS) && "Should have selected from exactly 1 source"); 1989 return true; 1990 } 1991 1992 bool ShuffleVectorInst::isSingleSourceMask(ArrayRef<int> Mask) { 1993 // We don't have vector operand size information, so assume operands are the 1994 // same size as the mask. 1995 return isSingleSourceMaskImpl(Mask, Mask.size()); 1996 } 1997 1998 static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) { 1999 if (!isSingleSourceMaskImpl(Mask, NumOpElts)) 2000 return false; 2001 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) { 2002 if (Mask[i] == -1) 2003 continue; 2004 if (Mask[i] != i && Mask[i] != (NumOpElts + i)) 2005 return false; 2006 } 2007 return true; 2008 } 2009 2010 bool ShuffleVectorInst::isIdentityMask(ArrayRef<int> Mask) { 2011 // We don't have vector operand size information, so assume operands are the 2012 // same size as the mask. 2013 return isIdentityMaskImpl(Mask, Mask.size()); 2014 } 2015 2016 bool ShuffleVectorInst::isReverseMask(ArrayRef<int> Mask) { 2017 if (!isSingleSourceMask(Mask)) 2018 return false; 2019 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 2020 if (Mask[i] == -1) 2021 continue; 2022 if (Mask[i] != (NumElts - 1 - i) && Mask[i] != (NumElts + NumElts - 1 - i)) 2023 return false; 2024 } 2025 return true; 2026 } 2027 2028 bool ShuffleVectorInst::isZeroEltSplatMask(ArrayRef<int> Mask) { 2029 if (!isSingleSourceMask(Mask)) 2030 return false; 2031 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 2032 if (Mask[i] == -1) 2033 continue; 2034 if (Mask[i] != 0 && Mask[i] != NumElts) 2035 return false; 2036 } 2037 return true; 2038 } 2039 2040 bool ShuffleVectorInst::isSelectMask(ArrayRef<int> Mask) { 2041 // Select is differentiated from identity. It requires using both sources. 2042 if (isSingleSourceMask(Mask)) 2043 return false; 2044 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 2045 if (Mask[i] == -1) 2046 continue; 2047 if (Mask[i] != i && Mask[i] != (NumElts + i)) 2048 return false; 2049 } 2050 return true; 2051 } 2052 2053 bool ShuffleVectorInst::isTransposeMask(ArrayRef<int> Mask) { 2054 // Example masks that will return true: 2055 // v1 = <a, b, c, d> 2056 // v2 = <e, f, g, h> 2057 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g> 2058 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h> 2059 2060 // 1. The number of elements in the mask must be a power-of-2 and at least 2. 2061 int NumElts = Mask.size(); 2062 if (NumElts < 2 || !isPowerOf2_32(NumElts)) 2063 return false; 2064 2065 // 2. The first element of the mask must be either a 0 or a 1. 2066 if (Mask[0] != 0 && Mask[0] != 1) 2067 return false; 2068 2069 // 3. The difference between the first 2 elements must be equal to the 2070 // number of elements in the mask. 2071 if ((Mask[1] - Mask[0]) != NumElts) 2072 return false; 2073 2074 // 4. The difference between consecutive even-numbered and odd-numbered 2075 // elements must be equal to 2. 2076 for (int i = 2; i < NumElts; ++i) { 2077 int MaskEltVal = Mask[i]; 2078 if (MaskEltVal == -1) 2079 return false; 2080 int MaskEltPrevVal = Mask[i - 2]; 2081 if (MaskEltVal - MaskEltPrevVal != 2) 2082 return false; 2083 } 2084 return true; 2085 } 2086 2087 bool ShuffleVectorInst::isExtractSubvectorMask(ArrayRef<int> Mask, 2088 int NumSrcElts, int &Index) { 2089 // Must extract from a single source. 2090 if (!isSingleSourceMaskImpl(Mask, NumSrcElts)) 2091 return false; 2092 2093 // Must be smaller (else this is an Identity shuffle). 2094 if (NumSrcElts <= (int)Mask.size()) 2095 return false; 2096 2097 // Find start of extraction, accounting that we may start with an UNDEF. 2098 int SubIndex = -1; 2099 for (int i = 0, e = Mask.size(); i != e; ++i) { 2100 int M = Mask[i]; 2101 if (M < 0) 2102 continue; 2103 int Offset = (M % NumSrcElts) - i; 2104 if (0 <= SubIndex && SubIndex != Offset) 2105 return false; 2106 SubIndex = Offset; 2107 } 2108 2109 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) { 2110 Index = SubIndex; 2111 return true; 2112 } 2113 return false; 2114 } 2115 2116 bool ShuffleVectorInst::isIdentityWithPadding() const { 2117 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 2118 int NumMaskElts = getType()->getVectorNumElements(); 2119 if (NumMaskElts <= NumOpElts) 2120 return false; 2121 2122 // The first part of the mask must choose elements from exactly 1 source op. 2123 SmallVector<int, 16> Mask = getShuffleMask(); 2124 if (!isIdentityMaskImpl(Mask, NumOpElts)) 2125 return false; 2126 2127 // All extending must be with undef elements. 2128 for (int i = NumOpElts; i < NumMaskElts; ++i) 2129 if (Mask[i] != -1) 2130 return false; 2131 2132 return true; 2133 } 2134 2135 bool ShuffleVectorInst::isIdentityWithExtract() const { 2136 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 2137 int NumMaskElts = getType()->getVectorNumElements(); 2138 if (NumMaskElts >= NumOpElts) 2139 return false; 2140 2141 return isIdentityMaskImpl(getShuffleMask(), NumOpElts); 2142 } 2143 2144 bool ShuffleVectorInst::isConcat() const { 2145 // Vector concatenation is differentiated from identity with padding. 2146 if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>())) 2147 return false; 2148 2149 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 2150 int NumMaskElts = getType()->getVectorNumElements(); 2151 if (NumMaskElts != NumOpElts * 2) 2152 return false; 2153 2154 // Use the mask length rather than the operands' vector lengths here. We 2155 // already know that the shuffle returns a vector twice as long as the inputs, 2156 // and neither of the inputs are undef vectors. If the mask picks consecutive 2157 // elements from both inputs, then this is a concatenation of the inputs. 2158 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts); 2159 } 2160 2161 //===----------------------------------------------------------------------===// 2162 // InsertValueInst Class 2163 //===----------------------------------------------------------------------===// 2164 2165 void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, 2166 const Twine &Name) { 2167 assert(getNumOperands() == 2 && "NumOperands not initialized?"); 2168 2169 // There's no fundamental reason why we require at least one index 2170 // (other than weirdness with &*IdxBegin being invalid; see 2171 // getelementptr's init routine for example). But there's no 2172 // present need to support it. 2173 assert(!Idxs.empty() && "InsertValueInst must have at least one index"); 2174 2175 assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) == 2176 Val->getType() && "Inserted value must match indexed type!"); 2177 Op<0>() = Agg; 2178 Op<1>() = Val; 2179 2180 Indices.append(Idxs.begin(), Idxs.end()); 2181 setName(Name); 2182 } 2183 2184 InsertValueInst::InsertValueInst(const InsertValueInst &IVI) 2185 : Instruction(IVI.getType(), InsertValue, 2186 OperandTraits<InsertValueInst>::op_begin(this), 2), 2187 Indices(IVI.Indices) { 2188 Op<0>() = IVI.getOperand(0); 2189 Op<1>() = IVI.getOperand(1); 2190 SubclassOptionalData = IVI.SubclassOptionalData; 2191 } 2192 2193 //===----------------------------------------------------------------------===// 2194 // ExtractValueInst Class 2195 //===----------------------------------------------------------------------===// 2196 2197 void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) { 2198 assert(getNumOperands() == 1 && "NumOperands not initialized?"); 2199 2200 // There's no fundamental reason why we require at least one index. 2201 // But there's no present need to support it. 2202 assert(!Idxs.empty() && "ExtractValueInst must have at least one index"); 2203 2204 Indices.append(Idxs.begin(), Idxs.end()); 2205 setName(Name); 2206 } 2207 2208 ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI) 2209 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)), 2210 Indices(EVI.Indices) { 2211 SubclassOptionalData = EVI.SubclassOptionalData; 2212 } 2213 2214 // getIndexedType - Returns the type of the element that would be extracted 2215 // with an extractvalue instruction with the specified parameters. 2216 // 2217 // A null type is returned if the indices are invalid for the specified 2218 // pointer type. 2219 // 2220 Type *ExtractValueInst::getIndexedType(Type *Agg, 2221 ArrayRef<unsigned> Idxs) { 2222 for (unsigned Index : Idxs) { 2223 // We can't use CompositeType::indexValid(Index) here. 2224 // indexValid() always returns true for arrays because getelementptr allows 2225 // out-of-bounds indices. Since we don't allow those for extractvalue and 2226 // insertvalue we need to check array indexing manually. 2227 // Since the only other types we can index into are struct types it's just 2228 // as easy to check those manually as well. 2229 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) { 2230 if (Index >= AT->getNumElements()) 2231 return nullptr; 2232 Agg = AT->getElementType(); 2233 } else if (StructType *ST = dyn_cast<StructType>(Agg)) { 2234 if (Index >= ST->getNumElements()) 2235 return nullptr; 2236 Agg = ST->getElementType(Index); 2237 } else { 2238 // Not a valid type to index into. 2239 return nullptr; 2240 } 2241 } 2242 return const_cast<Type*>(Agg); 2243 } 2244 2245 //===----------------------------------------------------------------------===// 2246 // UnaryOperator Class 2247 //===----------------------------------------------------------------------===// 2248 2249 UnaryOperator::UnaryOperator(UnaryOps iType, Value *S, 2250 Type *Ty, const Twine &Name, 2251 Instruction *InsertBefore) 2252 : UnaryInstruction(Ty, iType, S, InsertBefore) { 2253 Op<0>() = S; 2254 setName(Name); 2255 AssertOK(); 2256 } 2257 2258 UnaryOperator::UnaryOperator(UnaryOps iType, Value *S, 2259 Type *Ty, const Twine &Name, 2260 BasicBlock *InsertAtEnd) 2261 : UnaryInstruction(Ty, iType, S, InsertAtEnd) { 2262 Op<0>() = S; 2263 setName(Name); 2264 AssertOK(); 2265 } 2266 2267 UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S, 2268 const Twine &Name, 2269 Instruction *InsertBefore) { 2270 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore); 2271 } 2272 2273 UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S, 2274 const Twine &Name, 2275 BasicBlock *InsertAtEnd) { 2276 UnaryOperator *Res = Create(Op, S, Name); 2277 InsertAtEnd->getInstList().push_back(Res); 2278 return Res; 2279 } 2280 2281 void UnaryOperator::AssertOK() { 2282 Value *LHS = getOperand(0); 2283 (void)LHS; // Silence warnings. 2284 #ifndef NDEBUG 2285 switch (getOpcode()) { 2286 case FNeg: 2287 assert(getType() == LHS->getType() && 2288 "Unary operation should return same type as operand!"); 2289 assert(getType()->isFPOrFPVectorTy() && 2290 "Tried to create a floating-point operation on a " 2291 "non-floating-point type!"); 2292 break; 2293 default: llvm_unreachable("Invalid opcode provided"); 2294 } 2295 #endif 2296 } 2297 2298 //===----------------------------------------------------------------------===// 2299 // BinaryOperator Class 2300 //===----------------------------------------------------------------------===// 2301 2302 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, 2303 Type *Ty, const Twine &Name, 2304 Instruction *InsertBefore) 2305 : Instruction(Ty, iType, 2306 OperandTraits<BinaryOperator>::op_begin(this), 2307 OperandTraits<BinaryOperator>::operands(this), 2308 InsertBefore) { 2309 Op<0>() = S1; 2310 Op<1>() = S2; 2311 setName(Name); 2312 AssertOK(); 2313 } 2314 2315 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, 2316 Type *Ty, const Twine &Name, 2317 BasicBlock *InsertAtEnd) 2318 : Instruction(Ty, iType, 2319 OperandTraits<BinaryOperator>::op_begin(this), 2320 OperandTraits<BinaryOperator>::operands(this), 2321 InsertAtEnd) { 2322 Op<0>() = S1; 2323 Op<1>() = S2; 2324 setName(Name); 2325 AssertOK(); 2326 } 2327 2328 void BinaryOperator::AssertOK() { 2329 Value *LHS = getOperand(0), *RHS = getOperand(1); 2330 (void)LHS; (void)RHS; // Silence warnings. 2331 assert(LHS->getType() == RHS->getType() && 2332 "Binary operator operand types must match!"); 2333 #ifndef NDEBUG 2334 switch (getOpcode()) { 2335 case Add: case Sub: 2336 case Mul: 2337 assert(getType() == LHS->getType() && 2338 "Arithmetic operation should return same type as operands!"); 2339 assert(getType()->isIntOrIntVectorTy() && 2340 "Tried to create an integer operation on a non-integer type!"); 2341 break; 2342 case FAdd: case FSub: 2343 case FMul: 2344 assert(getType() == LHS->getType() && 2345 "Arithmetic operation should return same type as operands!"); 2346 assert(getType()->isFPOrFPVectorTy() && 2347 "Tried to create a floating-point operation on a " 2348 "non-floating-point type!"); 2349 break; 2350 case UDiv: 2351 case SDiv: 2352 assert(getType() == LHS->getType() && 2353 "Arithmetic operation should return same type as operands!"); 2354 assert(getType()->isIntOrIntVectorTy() && 2355 "Incorrect operand type (not integer) for S/UDIV"); 2356 break; 2357 case FDiv: 2358 assert(getType() == LHS->getType() && 2359 "Arithmetic operation should return same type as operands!"); 2360 assert(getType()->isFPOrFPVectorTy() && 2361 "Incorrect operand type (not floating point) for FDIV"); 2362 break; 2363 case URem: 2364 case SRem: 2365 assert(getType() == LHS->getType() && 2366 "Arithmetic operation should return same type as operands!"); 2367 assert(getType()->isIntOrIntVectorTy() && 2368 "Incorrect operand type (not integer) for S/UREM"); 2369 break; 2370 case FRem: 2371 assert(getType() == LHS->getType() && 2372 "Arithmetic operation should return same type as operands!"); 2373 assert(getType()->isFPOrFPVectorTy() && 2374 "Incorrect operand type (not floating point) for FREM"); 2375 break; 2376 case Shl: 2377 case LShr: 2378 case AShr: 2379 assert(getType() == LHS->getType() && 2380 "Shift operation should return same type as operands!"); 2381 assert(getType()->isIntOrIntVectorTy() && 2382 "Tried to create a shift operation on a non-integral type!"); 2383 break; 2384 case And: case Or: 2385 case Xor: 2386 assert(getType() == LHS->getType() && 2387 "Logical operation should return same type as operands!"); 2388 assert(getType()->isIntOrIntVectorTy() && 2389 "Tried to create a logical operation on a non-integral type!"); 2390 break; 2391 default: llvm_unreachable("Invalid opcode provided"); 2392 } 2393 #endif 2394 } 2395 2396 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2, 2397 const Twine &Name, 2398 Instruction *InsertBefore) { 2399 assert(S1->getType() == S2->getType() && 2400 "Cannot create binary operator with two operands of differing type!"); 2401 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore); 2402 } 2403 2404 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2, 2405 const Twine &Name, 2406 BasicBlock *InsertAtEnd) { 2407 BinaryOperator *Res = Create(Op, S1, S2, Name); 2408 InsertAtEnd->getInstList().push_back(Res); 2409 return Res; 2410 } 2411 2412 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name, 2413 Instruction *InsertBefore) { 2414 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2415 return new BinaryOperator(Instruction::Sub, 2416 zero, Op, 2417 Op->getType(), Name, InsertBefore); 2418 } 2419 2420 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name, 2421 BasicBlock *InsertAtEnd) { 2422 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2423 return new BinaryOperator(Instruction::Sub, 2424 zero, Op, 2425 Op->getType(), Name, InsertAtEnd); 2426 } 2427 2428 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, 2429 Instruction *InsertBefore) { 2430 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2431 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertBefore); 2432 } 2433 2434 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, 2435 BasicBlock *InsertAtEnd) { 2436 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2437 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertAtEnd); 2438 } 2439 2440 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name, 2441 Instruction *InsertBefore) { 2442 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2443 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertBefore); 2444 } 2445 2446 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name, 2447 BasicBlock *InsertAtEnd) { 2448 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2449 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertAtEnd); 2450 } 2451 2452 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, 2453 Instruction *InsertBefore) { 2454 Constant *C = Constant::getAllOnesValue(Op->getType()); 2455 return new BinaryOperator(Instruction::Xor, Op, C, 2456 Op->getType(), Name, InsertBefore); 2457 } 2458 2459 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, 2460 BasicBlock *InsertAtEnd) { 2461 Constant *AllOnes = Constant::getAllOnesValue(Op->getType()); 2462 return new BinaryOperator(Instruction::Xor, Op, AllOnes, 2463 Op->getType(), Name, InsertAtEnd); 2464 } 2465 2466 // Exchange the two operands to this instruction. This instruction is safe to 2467 // use on any binary instruction and does not modify the semantics of the 2468 // instruction. If the instruction is order-dependent (SetLT f.e.), the opcode 2469 // is changed. 2470 bool BinaryOperator::swapOperands() { 2471 if (!isCommutative()) 2472 return true; // Can't commute operands 2473 Op<0>().swap(Op<1>()); 2474 return false; 2475 } 2476 2477 //===----------------------------------------------------------------------===// 2478 // FPMathOperator Class 2479 //===----------------------------------------------------------------------===// 2480 2481 float FPMathOperator::getFPAccuracy() const { 2482 const MDNode *MD = 2483 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath); 2484 if (!MD) 2485 return 0.0; 2486 ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0)); 2487 return Accuracy->getValueAPF().convertToFloat(); 2488 } 2489 2490 //===----------------------------------------------------------------------===// 2491 // CastInst Class 2492 //===----------------------------------------------------------------------===// 2493 2494 // Just determine if this cast only deals with integral->integral conversion. 2495 bool CastInst::isIntegerCast() const { 2496 switch (getOpcode()) { 2497 default: return false; 2498 case Instruction::ZExt: 2499 case Instruction::SExt: 2500 case Instruction::Trunc: 2501 return true; 2502 case Instruction::BitCast: 2503 return getOperand(0)->getType()->isIntegerTy() && 2504 getType()->isIntegerTy(); 2505 } 2506 } 2507 2508 bool CastInst::isLosslessCast() const { 2509 // Only BitCast can be lossless, exit fast if we're not BitCast 2510 if (getOpcode() != Instruction::BitCast) 2511 return false; 2512 2513 // Identity cast is always lossless 2514 Type *SrcTy = getOperand(0)->getType(); 2515 Type *DstTy = getType(); 2516 if (SrcTy == DstTy) 2517 return true; 2518 2519 // Pointer to pointer is always lossless. 2520 if (SrcTy->isPointerTy()) 2521 return DstTy->isPointerTy(); 2522 return false; // Other types have no identity values 2523 } 2524 2525 /// This function determines if the CastInst does not require any bits to be 2526 /// changed in order to effect the cast. Essentially, it identifies cases where 2527 /// no code gen is necessary for the cast, hence the name no-op cast. For 2528 /// example, the following are all no-op casts: 2529 /// # bitcast i32* %x to i8* 2530 /// # bitcast <2 x i32> %x to <4 x i16> 2531 /// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only 2532 /// Determine if the described cast is a no-op. 2533 bool CastInst::isNoopCast(Instruction::CastOps Opcode, 2534 Type *SrcTy, 2535 Type *DestTy, 2536 const DataLayout &DL) { 2537 switch (Opcode) { 2538 default: llvm_unreachable("Invalid CastOp"); 2539 case Instruction::Trunc: 2540 case Instruction::ZExt: 2541 case Instruction::SExt: 2542 case Instruction::FPTrunc: 2543 case Instruction::FPExt: 2544 case Instruction::UIToFP: 2545 case Instruction::SIToFP: 2546 case Instruction::FPToUI: 2547 case Instruction::FPToSI: 2548 case Instruction::AddrSpaceCast: 2549 // TODO: Target informations may give a more accurate answer here. 2550 return false; 2551 case Instruction::BitCast: 2552 return true; // BitCast never modifies bits. 2553 case Instruction::PtrToInt: 2554 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() == 2555 DestTy->getScalarSizeInBits(); 2556 case Instruction::IntToPtr: 2557 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() == 2558 SrcTy->getScalarSizeInBits(); 2559 } 2560 } 2561 2562 bool CastInst::isNoopCast(const DataLayout &DL) const { 2563 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL); 2564 } 2565 2566 /// This function determines if a pair of casts can be eliminated and what 2567 /// opcode should be used in the elimination. This assumes that there are two 2568 /// instructions like this: 2569 /// * %F = firstOpcode SrcTy %x to MidTy 2570 /// * %S = secondOpcode MidTy %F to DstTy 2571 /// The function returns a resultOpcode so these two casts can be replaced with: 2572 /// * %Replacement = resultOpcode %SrcTy %x to DstTy 2573 /// If no such cast is permitted, the function returns 0. 2574 unsigned CastInst::isEliminableCastPair( 2575 Instruction::CastOps firstOp, Instruction::CastOps secondOp, 2576 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, 2577 Type *DstIntPtrTy) { 2578 // Define the 144 possibilities for these two cast instructions. The values 2579 // in this matrix determine what to do in a given situation and select the 2580 // case in the switch below. The rows correspond to firstOp, the columns 2581 // correspond to secondOp. In looking at the table below, keep in mind 2582 // the following cast properties: 2583 // 2584 // Size Compare Source Destination 2585 // Operator Src ? Size Type Sign Type Sign 2586 // -------- ------------ ------------------- --------------------- 2587 // TRUNC > Integer Any Integral Any 2588 // ZEXT < Integral Unsigned Integer Any 2589 // SEXT < Integral Signed Integer Any 2590 // FPTOUI n/a FloatPt n/a Integral Unsigned 2591 // FPTOSI n/a FloatPt n/a Integral Signed 2592 // UITOFP n/a Integral Unsigned FloatPt n/a 2593 // SITOFP n/a Integral Signed FloatPt n/a 2594 // FPTRUNC > FloatPt n/a FloatPt n/a 2595 // FPEXT < FloatPt n/a FloatPt n/a 2596 // PTRTOINT n/a Pointer n/a Integral Unsigned 2597 // INTTOPTR n/a Integral Unsigned Pointer n/a 2598 // BITCAST = FirstClass n/a FirstClass n/a 2599 // ADDRSPCST n/a Pointer n/a Pointer n/a 2600 // 2601 // NOTE: some transforms are safe, but we consider them to be non-profitable. 2602 // For example, we could merge "fptoui double to i32" + "zext i32 to i64", 2603 // into "fptoui double to i64", but this loses information about the range 2604 // of the produced value (we no longer know the top-part is all zeros). 2605 // Further this conversion is often much more expensive for typical hardware, 2606 // and causes issues when building libgcc. We disallow fptosi+sext for the 2607 // same reason. 2608 const unsigned numCastOps = 2609 Instruction::CastOpsEnd - Instruction::CastOpsBegin; 2610 static const uint8_t CastResults[numCastOps][numCastOps] = { 2611 // T F F U S F F P I B A -+ 2612 // R Z S P P I I T P 2 N T S | 2613 // U E E 2 2 2 2 R E I T C C +- secondOp 2614 // N X X U S F F N X N 2 V V | 2615 // C T T I I P P C T T P T T -+ 2616 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+ 2617 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt | 2618 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt | 2619 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI | 2620 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI | 2621 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp 2622 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP | 2623 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc | 2624 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt | 2625 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt | 2626 { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr | 2627 { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast | 2628 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+ 2629 }; 2630 2631 // TODO: This logic could be encoded into the table above and handled in the 2632 // switch below. 2633 // If either of the casts are a bitcast from scalar to vector, disallow the 2634 // merging. However, any pair of bitcasts are allowed. 2635 bool IsFirstBitcast = (firstOp == Instruction::BitCast); 2636 bool IsSecondBitcast = (secondOp == Instruction::BitCast); 2637 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast; 2638 2639 // Check if any of the casts convert scalars <-> vectors. 2640 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) || 2641 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy))) 2642 if (!AreBothBitcasts) 2643 return 0; 2644 2645 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin] 2646 [secondOp-Instruction::CastOpsBegin]; 2647 switch (ElimCase) { 2648 case 0: 2649 // Categorically disallowed. 2650 return 0; 2651 case 1: 2652 // Allowed, use first cast's opcode. 2653 return firstOp; 2654 case 2: 2655 // Allowed, use second cast's opcode. 2656 return secondOp; 2657 case 3: 2658 // No-op cast in second op implies firstOp as long as the DestTy 2659 // is integer and we are not converting between a vector and a 2660 // non-vector type. 2661 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy()) 2662 return firstOp; 2663 return 0; 2664 case 4: 2665 // No-op cast in second op implies firstOp as long as the DestTy 2666 // is floating point. 2667 if (DstTy->isFloatingPointTy()) 2668 return firstOp; 2669 return 0; 2670 case 5: 2671 // No-op cast in first op implies secondOp as long as the SrcTy 2672 // is an integer. 2673 if (SrcTy->isIntegerTy()) 2674 return secondOp; 2675 return 0; 2676 case 6: 2677 // No-op cast in first op implies secondOp as long as the SrcTy 2678 // is a floating point. 2679 if (SrcTy->isFloatingPointTy()) 2680 return secondOp; 2681 return 0; 2682 case 7: { 2683 // Cannot simplify if address spaces are different! 2684 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) 2685 return 0; 2686 2687 unsigned MidSize = MidTy->getScalarSizeInBits(); 2688 // We can still fold this without knowing the actual sizes as long we 2689 // know that the intermediate pointer is the largest possible 2690 // pointer size. 2691 // FIXME: Is this always true? 2692 if (MidSize == 64) 2693 return Instruction::BitCast; 2694 2695 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size. 2696 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy) 2697 return 0; 2698 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits(); 2699 if (MidSize >= PtrSize) 2700 return Instruction::BitCast; 2701 return 0; 2702 } 2703 case 8: { 2704 // ext, trunc -> bitcast, if the SrcTy and DstTy are same size 2705 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy) 2706 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy) 2707 unsigned SrcSize = SrcTy->getScalarSizeInBits(); 2708 unsigned DstSize = DstTy->getScalarSizeInBits(); 2709 if (SrcSize == DstSize) 2710 return Instruction::BitCast; 2711 else if (SrcSize < DstSize) 2712 return firstOp; 2713 return secondOp; 2714 } 2715 case 9: 2716 // zext, sext -> zext, because sext can't sign extend after zext 2717 return Instruction::ZExt; 2718 case 11: { 2719 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize 2720 if (!MidIntPtrTy) 2721 return 0; 2722 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits(); 2723 unsigned SrcSize = SrcTy->getScalarSizeInBits(); 2724 unsigned DstSize = DstTy->getScalarSizeInBits(); 2725 if (SrcSize <= PtrSize && SrcSize == DstSize) 2726 return Instruction::BitCast; 2727 return 0; 2728 } 2729 case 12: 2730 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS 2731 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS 2732 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) 2733 return Instruction::AddrSpaceCast; 2734 return Instruction::BitCast; 2735 case 13: 2736 // FIXME: this state can be merged with (1), but the following assert 2737 // is useful to check the correcteness of the sequence due to semantic 2738 // change of bitcast. 2739 assert( 2740 SrcTy->isPtrOrPtrVectorTy() && 2741 MidTy->isPtrOrPtrVectorTy() && 2742 DstTy->isPtrOrPtrVectorTy() && 2743 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() && 2744 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() && 2745 "Illegal addrspacecast, bitcast sequence!"); 2746 // Allowed, use first cast's opcode 2747 return firstOp; 2748 case 14: 2749 // bitcast, addrspacecast -> addrspacecast if the element type of 2750 // bitcast's source is the same as that of addrspacecast's destination. 2751 if (SrcTy->getScalarType()->getPointerElementType() == 2752 DstTy->getScalarType()->getPointerElementType()) 2753 return Instruction::AddrSpaceCast; 2754 return 0; 2755 case 15: 2756 // FIXME: this state can be merged with (1), but the following assert 2757 // is useful to check the correcteness of the sequence due to semantic 2758 // change of bitcast. 2759 assert( 2760 SrcTy->isIntOrIntVectorTy() && 2761 MidTy->isPtrOrPtrVectorTy() && 2762 DstTy->isPtrOrPtrVectorTy() && 2763 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() && 2764 "Illegal inttoptr, bitcast sequence!"); 2765 // Allowed, use first cast's opcode 2766 return firstOp; 2767 case 16: 2768 // FIXME: this state can be merged with (2), but the following assert 2769 // is useful to check the correcteness of the sequence due to semantic 2770 // change of bitcast. 2771 assert( 2772 SrcTy->isPtrOrPtrVectorTy() && 2773 MidTy->isPtrOrPtrVectorTy() && 2774 DstTy->isIntOrIntVectorTy() && 2775 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() && 2776 "Illegal bitcast, ptrtoint sequence!"); 2777 // Allowed, use second cast's opcode 2778 return secondOp; 2779 case 17: 2780 // (sitofp (zext x)) -> (uitofp x) 2781 return Instruction::UIToFP; 2782 case 99: 2783 // Cast combination can't happen (error in input). This is for all cases 2784 // where the MidTy is not the same for the two cast instructions. 2785 llvm_unreachable("Invalid Cast Combination"); 2786 default: 2787 llvm_unreachable("Error in CastResults table!!!"); 2788 } 2789 } 2790 2791 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty, 2792 const Twine &Name, Instruction *InsertBefore) { 2793 assert(castIsValid(op, S, Ty) && "Invalid cast!"); 2794 // Construct and return the appropriate CastInst subclass 2795 switch (op) { 2796 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore); 2797 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore); 2798 case SExt: return new SExtInst (S, Ty, Name, InsertBefore); 2799 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore); 2800 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore); 2801 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore); 2802 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore); 2803 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore); 2804 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore); 2805 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore); 2806 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore); 2807 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore); 2808 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore); 2809 default: llvm_unreachable("Invalid opcode provided"); 2810 } 2811 } 2812 2813 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty, 2814 const Twine &Name, BasicBlock *InsertAtEnd) { 2815 assert(castIsValid(op, S, Ty) && "Invalid cast!"); 2816 // Construct and return the appropriate CastInst subclass 2817 switch (op) { 2818 case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd); 2819 case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd); 2820 case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd); 2821 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd); 2822 case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd); 2823 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd); 2824 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd); 2825 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd); 2826 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd); 2827 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd); 2828 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd); 2829 case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd); 2830 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd); 2831 default: llvm_unreachable("Invalid opcode provided"); 2832 } 2833 } 2834 2835 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, 2836 const Twine &Name, 2837 Instruction *InsertBefore) { 2838 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2839 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2840 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore); 2841 } 2842 2843 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, 2844 const Twine &Name, 2845 BasicBlock *InsertAtEnd) { 2846 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2847 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2848 return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd); 2849 } 2850 2851 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, 2852 const Twine &Name, 2853 Instruction *InsertBefore) { 2854 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2855 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2856 return Create(Instruction::SExt, S, Ty, Name, InsertBefore); 2857 } 2858 2859 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, 2860 const Twine &Name, 2861 BasicBlock *InsertAtEnd) { 2862 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2863 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2864 return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd); 2865 } 2866 2867 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, 2868 const Twine &Name, 2869 Instruction *InsertBefore) { 2870 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2871 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2872 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore); 2873 } 2874 2875 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, 2876 const Twine &Name, 2877 BasicBlock *InsertAtEnd) { 2878 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2879 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2880 return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd); 2881 } 2882 2883 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, 2884 const Twine &Name, 2885 BasicBlock *InsertAtEnd) { 2886 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2887 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) && 2888 "Invalid cast"); 2889 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); 2890 assert((!Ty->isVectorTy() || 2891 Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) && 2892 "Invalid cast"); 2893 2894 if (Ty->isIntOrIntVectorTy()) 2895 return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd); 2896 2897 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd); 2898 } 2899 2900 /// Create a BitCast or a PtrToInt cast instruction 2901 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, 2902 const Twine &Name, 2903 Instruction *InsertBefore) { 2904 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2905 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) && 2906 "Invalid cast"); 2907 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); 2908 assert((!Ty->isVectorTy() || 2909 Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) && 2910 "Invalid cast"); 2911 2912 if (Ty->isIntOrIntVectorTy()) 2913 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore); 2914 2915 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore); 2916 } 2917 2918 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast( 2919 Value *S, Type *Ty, 2920 const Twine &Name, 2921 BasicBlock *InsertAtEnd) { 2922 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2923 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast"); 2924 2925 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace()) 2926 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd); 2927 2928 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2929 } 2930 2931 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast( 2932 Value *S, Type *Ty, 2933 const Twine &Name, 2934 Instruction *InsertBefore) { 2935 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2936 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast"); 2937 2938 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace()) 2939 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore); 2940 2941 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2942 } 2943 2944 CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty, 2945 const Twine &Name, 2946 Instruction *InsertBefore) { 2947 if (S->getType()->isPointerTy() && Ty->isIntegerTy()) 2948 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore); 2949 if (S->getType()->isIntegerTy() && Ty->isPointerTy()) 2950 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore); 2951 2952 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2953 } 2954 2955 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, 2956 bool isSigned, const Twine &Name, 2957 Instruction *InsertBefore) { 2958 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() && 2959 "Invalid integer cast"); 2960 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2961 unsigned DstBits = Ty->getScalarSizeInBits(); 2962 Instruction::CastOps opcode = 2963 (SrcBits == DstBits ? Instruction::BitCast : 2964 (SrcBits > DstBits ? Instruction::Trunc : 2965 (isSigned ? Instruction::SExt : Instruction::ZExt))); 2966 return Create(opcode, C, Ty, Name, InsertBefore); 2967 } 2968 2969 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, 2970 bool isSigned, const Twine &Name, 2971 BasicBlock *InsertAtEnd) { 2972 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() && 2973 "Invalid cast"); 2974 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2975 unsigned DstBits = Ty->getScalarSizeInBits(); 2976 Instruction::CastOps opcode = 2977 (SrcBits == DstBits ? Instruction::BitCast : 2978 (SrcBits > DstBits ? Instruction::Trunc : 2979 (isSigned ? Instruction::SExt : Instruction::ZExt))); 2980 return Create(opcode, C, Ty, Name, InsertAtEnd); 2981 } 2982 2983 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, 2984 const Twine &Name, 2985 Instruction *InsertBefore) { 2986 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && 2987 "Invalid cast"); 2988 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2989 unsigned DstBits = Ty->getScalarSizeInBits(); 2990 Instruction::CastOps opcode = 2991 (SrcBits == DstBits ? Instruction::BitCast : 2992 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt)); 2993 return Create(opcode, C, Ty, Name, InsertBefore); 2994 } 2995 2996 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, 2997 const Twine &Name, 2998 BasicBlock *InsertAtEnd) { 2999 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && 3000 "Invalid cast"); 3001 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 3002 unsigned DstBits = Ty->getScalarSizeInBits(); 3003 Instruction::CastOps opcode = 3004 (SrcBits == DstBits ? Instruction::BitCast : 3005 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt)); 3006 return Create(opcode, C, Ty, Name, InsertAtEnd); 3007 } 3008 3009 // Check whether it is valid to call getCastOpcode for these types. 3010 // This routine must be kept in sync with getCastOpcode. 3011 bool CastInst::isCastable(Type *SrcTy, Type *DestTy) { 3012 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType()) 3013 return false; 3014 3015 if (SrcTy == DestTy) 3016 return true; 3017 3018 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) 3019 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) 3020 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 3021 // An element by element cast. Valid if casting the elements is valid. 3022 SrcTy = SrcVecTy->getElementType(); 3023 DestTy = DestVecTy->getElementType(); 3024 } 3025 3026 // Get the bit sizes, we'll need these 3027 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 3028 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 3029 3030 // Run through the possibilities ... 3031 if (DestTy->isIntegerTy()) { // Casting to integral 3032 if (SrcTy->isIntegerTy()) // Casting from integral 3033 return true; 3034 if (SrcTy->isFloatingPointTy()) // Casting from floating pt 3035 return true; 3036 if (SrcTy->isVectorTy()) // Casting from vector 3037 return DestBits == SrcBits; 3038 // Casting from something else 3039 return SrcTy->isPointerTy(); 3040 } 3041 if (DestTy->isFloatingPointTy()) { // Casting to floating pt 3042 if (SrcTy->isIntegerTy()) // Casting from integral 3043 return true; 3044 if (SrcTy->isFloatingPointTy()) // Casting from floating pt 3045 return true; 3046 if (SrcTy->isVectorTy()) // Casting from vector 3047 return DestBits == SrcBits; 3048 // Casting from something else 3049 return false; 3050 } 3051 if (DestTy->isVectorTy()) // Casting to vector 3052 return DestBits == SrcBits; 3053 if (DestTy->isPointerTy()) { // Casting to pointer 3054 if (SrcTy->isPointerTy()) // Casting from pointer 3055 return true; 3056 return SrcTy->isIntegerTy(); // Casting from integral 3057 } 3058 if (DestTy->isX86_MMXTy()) { 3059 if (SrcTy->isVectorTy()) 3060 return DestBits == SrcBits; // 64-bit vector to MMX 3061 return false; 3062 } // Casting to something else 3063 return false; 3064 } 3065 3066 bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) { 3067 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType()) 3068 return false; 3069 3070 if (SrcTy == DestTy) 3071 return true; 3072 3073 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) { 3074 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) { 3075 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) { 3076 // An element by element cast. Valid if casting the elements is valid. 3077 SrcTy = SrcVecTy->getElementType(); 3078 DestTy = DestVecTy->getElementType(); 3079 } 3080 } 3081 } 3082 3083 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) { 3084 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) { 3085 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace(); 3086 } 3087 } 3088 3089 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 3090 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 3091 3092 // Could still have vectors of pointers if the number of elements doesn't 3093 // match 3094 if (SrcBits.getKnownMinSize() == 0 || DestBits.getKnownMinSize() == 0) 3095 return false; 3096 3097 if (SrcBits != DestBits) 3098 return false; 3099 3100 if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy()) 3101 return false; 3102 3103 return true; 3104 } 3105 3106 bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, 3107 const DataLayout &DL) { 3108 // ptrtoint and inttoptr are not allowed on non-integral pointers 3109 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy)) 3110 if (auto *IntTy = dyn_cast<IntegerType>(DestTy)) 3111 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) && 3112 !DL.isNonIntegralPointerType(PtrTy)); 3113 if (auto *PtrTy = dyn_cast<PointerType>(DestTy)) 3114 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy)) 3115 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) && 3116 !DL.isNonIntegralPointerType(PtrTy)); 3117 3118 return isBitCastable(SrcTy, DestTy); 3119 } 3120 3121 // Provide a way to get a "cast" where the cast opcode is inferred from the 3122 // types and size of the operand. This, basically, is a parallel of the 3123 // logic in the castIsValid function below. This axiom should hold: 3124 // castIsValid( getCastOpcode(Val, Ty), Val, Ty) 3125 // should not assert in castIsValid. In other words, this produces a "correct" 3126 // casting opcode for the arguments passed to it. 3127 // This routine must be kept in sync with isCastable. 3128 Instruction::CastOps 3129 CastInst::getCastOpcode( 3130 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) { 3131 Type *SrcTy = Src->getType(); 3132 3133 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() && 3134 "Only first class types are castable!"); 3135 3136 if (SrcTy == DestTy) 3137 return BitCast; 3138 3139 // FIXME: Check address space sizes here 3140 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) 3141 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) 3142 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 3143 // An element by element cast. Find the appropriate opcode based on the 3144 // element types. 3145 SrcTy = SrcVecTy->getElementType(); 3146 DestTy = DestVecTy->getElementType(); 3147 } 3148 3149 // Get the bit sizes, we'll need these 3150 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 3151 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 3152 3153 // Run through the possibilities ... 3154 if (DestTy->isIntegerTy()) { // Casting to integral 3155 if (SrcTy->isIntegerTy()) { // Casting from integral 3156 if (DestBits < SrcBits) 3157 return Trunc; // int -> smaller int 3158 else if (DestBits > SrcBits) { // its an extension 3159 if (SrcIsSigned) 3160 return SExt; // signed -> SEXT 3161 else 3162 return ZExt; // unsigned -> ZEXT 3163 } else { 3164 return BitCast; // Same size, No-op cast 3165 } 3166 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt 3167 if (DestIsSigned) 3168 return FPToSI; // FP -> sint 3169 else 3170 return FPToUI; // FP -> uint 3171 } else if (SrcTy->isVectorTy()) { 3172 assert(DestBits == SrcBits && 3173 "Casting vector to integer of different width"); 3174 return BitCast; // Same size, no-op cast 3175 } else { 3176 assert(SrcTy->isPointerTy() && 3177 "Casting from a value that is not first-class type"); 3178 return PtrToInt; // ptr -> int 3179 } 3180 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt 3181 if (SrcTy->isIntegerTy()) { // Casting from integral 3182 if (SrcIsSigned) 3183 return SIToFP; // sint -> FP 3184 else 3185 return UIToFP; // uint -> FP 3186 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt 3187 if (DestBits < SrcBits) { 3188 return FPTrunc; // FP -> smaller FP 3189 } else if (DestBits > SrcBits) { 3190 return FPExt; // FP -> larger FP 3191 } else { 3192 return BitCast; // same size, no-op cast 3193 } 3194 } else if (SrcTy->isVectorTy()) { 3195 assert(DestBits == SrcBits && 3196 "Casting vector to floating point of different width"); 3197 return BitCast; // same size, no-op cast 3198 } 3199 llvm_unreachable("Casting pointer or non-first class to float"); 3200 } else if (DestTy->isVectorTy()) { 3201 assert(DestBits == SrcBits && 3202 "Illegal cast to vector (wrong type or size)"); 3203 return BitCast; 3204 } else if (DestTy->isPointerTy()) { 3205 if (SrcTy->isPointerTy()) { 3206 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace()) 3207 return AddrSpaceCast; 3208 return BitCast; // ptr -> ptr 3209 } else if (SrcTy->isIntegerTy()) { 3210 return IntToPtr; // int -> ptr 3211 } 3212 llvm_unreachable("Casting pointer to other than pointer or int"); 3213 } else if (DestTy->isX86_MMXTy()) { 3214 if (SrcTy->isVectorTy()) { 3215 assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX"); 3216 return BitCast; // 64-bit vector to MMX 3217 } 3218 llvm_unreachable("Illegal cast to X86_MMX"); 3219 } 3220 llvm_unreachable("Casting to type that is not first-class"); 3221 } 3222 3223 //===----------------------------------------------------------------------===// 3224 // CastInst SubClass Constructors 3225 //===----------------------------------------------------------------------===// 3226 3227 /// Check that the construction parameters for a CastInst are correct. This 3228 /// could be broken out into the separate constructors but it is useful to have 3229 /// it in one place and to eliminate the redundant code for getting the sizes 3230 /// of the types involved. 3231 bool 3232 CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) { 3233 // Check for type sanity on the arguments 3234 Type *SrcTy = S->getType(); 3235 3236 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() || 3237 SrcTy->isAggregateType() || DstTy->isAggregateType()) 3238 return false; 3239 3240 // Get the size of the types in bits, we'll need this later 3241 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 3242 unsigned DstBitSize = DstTy->getScalarSizeInBits(); 3243 3244 // If these are vector types, get the lengths of the vectors (using zero for 3245 // scalar types means that checking that vector lengths match also checks that 3246 // scalars are not being converted to vectors or vectors to scalars). 3247 unsigned SrcLength = SrcTy->isVectorTy() ? 3248 cast<VectorType>(SrcTy)->getNumElements() : 0; 3249 unsigned DstLength = DstTy->isVectorTy() ? 3250 cast<VectorType>(DstTy)->getNumElements() : 0; 3251 3252 // Switch on the opcode provided 3253 switch (op) { 3254 default: return false; // This is an input error 3255 case Instruction::Trunc: 3256 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 3257 SrcLength == DstLength && SrcBitSize > DstBitSize; 3258 case Instruction::ZExt: 3259 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 3260 SrcLength == DstLength && SrcBitSize < DstBitSize; 3261 case Instruction::SExt: 3262 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 3263 SrcLength == DstLength && SrcBitSize < DstBitSize; 3264 case Instruction::FPTrunc: 3265 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() && 3266 SrcLength == DstLength && SrcBitSize > DstBitSize; 3267 case Instruction::FPExt: 3268 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() && 3269 SrcLength == DstLength && SrcBitSize < DstBitSize; 3270 case Instruction::UIToFP: 3271 case Instruction::SIToFP: 3272 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() && 3273 SrcLength == DstLength; 3274 case Instruction::FPToUI: 3275 case Instruction::FPToSI: 3276 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() && 3277 SrcLength == DstLength; 3278 case Instruction::PtrToInt: 3279 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy)) 3280 return false; 3281 if (VectorType *VT = dyn_cast<VectorType>(SrcTy)) 3282 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements()) 3283 return false; 3284 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy(); 3285 case Instruction::IntToPtr: 3286 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy)) 3287 return false; 3288 if (VectorType *VT = dyn_cast<VectorType>(SrcTy)) 3289 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements()) 3290 return false; 3291 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy(); 3292 case Instruction::BitCast: { 3293 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType()); 3294 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType()); 3295 3296 // BitCast implies a no-op cast of type only. No bits change. 3297 // However, you can't cast pointers to anything but pointers. 3298 if (!SrcPtrTy != !DstPtrTy) 3299 return false; 3300 3301 // For non-pointer cases, the cast is okay if the source and destination bit 3302 // widths are identical. 3303 if (!SrcPtrTy) 3304 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits(); 3305 3306 // If both are pointers then the address spaces must match. 3307 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) 3308 return false; 3309 3310 // A vector of pointers must have the same number of elements. 3311 VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy); 3312 VectorType *DstVecTy = dyn_cast<VectorType>(DstTy); 3313 if (SrcVecTy && DstVecTy) 3314 return (SrcVecTy->getNumElements() == DstVecTy->getNumElements()); 3315 if (SrcVecTy) 3316 return SrcVecTy->getNumElements() == 1; 3317 if (DstVecTy) 3318 return DstVecTy->getNumElements() == 1; 3319 3320 return true; 3321 } 3322 case Instruction::AddrSpaceCast: { 3323 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType()); 3324 if (!SrcPtrTy) 3325 return false; 3326 3327 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType()); 3328 if (!DstPtrTy) 3329 return false; 3330 3331 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace()) 3332 return false; 3333 3334 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) { 3335 if (VectorType *DstVecTy = dyn_cast<VectorType>(DstTy)) 3336 return (SrcVecTy->getNumElements() == DstVecTy->getNumElements()); 3337 3338 return false; 3339 } 3340 3341 return true; 3342 } 3343 } 3344 } 3345 3346 TruncInst::TruncInst( 3347 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3348 ) : CastInst(Ty, Trunc, S, Name, InsertBefore) { 3349 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc"); 3350 } 3351 3352 TruncInst::TruncInst( 3353 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3354 ) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) { 3355 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc"); 3356 } 3357 3358 ZExtInst::ZExtInst( 3359 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3360 ) : CastInst(Ty, ZExt, S, Name, InsertBefore) { 3361 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); 3362 } 3363 3364 ZExtInst::ZExtInst( 3365 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3366 ) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) { 3367 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); 3368 } 3369 SExtInst::SExtInst( 3370 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3371 ) : CastInst(Ty, SExt, S, Name, InsertBefore) { 3372 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); 3373 } 3374 3375 SExtInst::SExtInst( 3376 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3377 ) : CastInst(Ty, SExt, S, Name, InsertAtEnd) { 3378 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); 3379 } 3380 3381 FPTruncInst::FPTruncInst( 3382 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3383 ) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) { 3384 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); 3385 } 3386 3387 FPTruncInst::FPTruncInst( 3388 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3389 ) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) { 3390 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); 3391 } 3392 3393 FPExtInst::FPExtInst( 3394 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3395 ) : CastInst(Ty, FPExt, S, Name, InsertBefore) { 3396 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); 3397 } 3398 3399 FPExtInst::FPExtInst( 3400 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3401 ) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) { 3402 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); 3403 } 3404 3405 UIToFPInst::UIToFPInst( 3406 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3407 ) : CastInst(Ty, UIToFP, S, Name, InsertBefore) { 3408 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); 3409 } 3410 3411 UIToFPInst::UIToFPInst( 3412 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3413 ) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) { 3414 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); 3415 } 3416 3417 SIToFPInst::SIToFPInst( 3418 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3419 ) : CastInst(Ty, SIToFP, S, Name, InsertBefore) { 3420 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); 3421 } 3422 3423 SIToFPInst::SIToFPInst( 3424 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3425 ) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) { 3426 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); 3427 } 3428 3429 FPToUIInst::FPToUIInst( 3430 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3431 ) : CastInst(Ty, FPToUI, S, Name, InsertBefore) { 3432 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); 3433 } 3434 3435 FPToUIInst::FPToUIInst( 3436 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3437 ) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) { 3438 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); 3439 } 3440 3441 FPToSIInst::FPToSIInst( 3442 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3443 ) : CastInst(Ty, FPToSI, S, Name, InsertBefore) { 3444 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); 3445 } 3446 3447 FPToSIInst::FPToSIInst( 3448 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3449 ) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) { 3450 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); 3451 } 3452 3453 PtrToIntInst::PtrToIntInst( 3454 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3455 ) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) { 3456 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); 3457 } 3458 3459 PtrToIntInst::PtrToIntInst( 3460 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3461 ) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) { 3462 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); 3463 } 3464 3465 IntToPtrInst::IntToPtrInst( 3466 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3467 ) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) { 3468 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); 3469 } 3470 3471 IntToPtrInst::IntToPtrInst( 3472 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3473 ) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) { 3474 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); 3475 } 3476 3477 BitCastInst::BitCastInst( 3478 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3479 ) : CastInst(Ty, BitCast, S, Name, InsertBefore) { 3480 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); 3481 } 3482 3483 BitCastInst::BitCastInst( 3484 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3485 ) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) { 3486 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); 3487 } 3488 3489 AddrSpaceCastInst::AddrSpaceCastInst( 3490 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3491 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) { 3492 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast"); 3493 } 3494 3495 AddrSpaceCastInst::AddrSpaceCastInst( 3496 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3497 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) { 3498 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast"); 3499 } 3500 3501 //===----------------------------------------------------------------------===// 3502 // CmpInst Classes 3503 //===----------------------------------------------------------------------===// 3504 3505 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS, 3506 Value *RHS, const Twine &Name, Instruction *InsertBefore, 3507 Instruction *FlagsSource) 3508 : Instruction(ty, op, 3509 OperandTraits<CmpInst>::op_begin(this), 3510 OperandTraits<CmpInst>::operands(this), 3511 InsertBefore) { 3512 Op<0>() = LHS; 3513 Op<1>() = RHS; 3514 setPredicate((Predicate)predicate); 3515 setName(Name); 3516 if (FlagsSource) 3517 copyIRFlags(FlagsSource); 3518 } 3519 3520 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS, 3521 Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd) 3522 : Instruction(ty, op, 3523 OperandTraits<CmpInst>::op_begin(this), 3524 OperandTraits<CmpInst>::operands(this), 3525 InsertAtEnd) { 3526 Op<0>() = LHS; 3527 Op<1>() = RHS; 3528 setPredicate((Predicate)predicate); 3529 setName(Name); 3530 } 3531 3532 CmpInst * 3533 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, 3534 const Twine &Name, Instruction *InsertBefore) { 3535 if (Op == Instruction::ICmp) { 3536 if (InsertBefore) 3537 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate), 3538 S1, S2, Name); 3539 else 3540 return new ICmpInst(CmpInst::Predicate(predicate), 3541 S1, S2, Name); 3542 } 3543 3544 if (InsertBefore) 3545 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate), 3546 S1, S2, Name); 3547 else 3548 return new FCmpInst(CmpInst::Predicate(predicate), 3549 S1, S2, Name); 3550 } 3551 3552 CmpInst * 3553 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, 3554 const Twine &Name, BasicBlock *InsertAtEnd) { 3555 if (Op == Instruction::ICmp) { 3556 return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate), 3557 S1, S2, Name); 3558 } 3559 return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate), 3560 S1, S2, Name); 3561 } 3562 3563 void CmpInst::swapOperands() { 3564 if (ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3565 IC->swapOperands(); 3566 else 3567 cast<FCmpInst>(this)->swapOperands(); 3568 } 3569 3570 bool CmpInst::isCommutative() const { 3571 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3572 return IC->isCommutative(); 3573 return cast<FCmpInst>(this)->isCommutative(); 3574 } 3575 3576 bool CmpInst::isEquality() const { 3577 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3578 return IC->isEquality(); 3579 return cast<FCmpInst>(this)->isEquality(); 3580 } 3581 3582 CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) { 3583 switch (pred) { 3584 default: llvm_unreachable("Unknown cmp predicate!"); 3585 case ICMP_EQ: return ICMP_NE; 3586 case ICMP_NE: return ICMP_EQ; 3587 case ICMP_UGT: return ICMP_ULE; 3588 case ICMP_ULT: return ICMP_UGE; 3589 case ICMP_UGE: return ICMP_ULT; 3590 case ICMP_ULE: return ICMP_UGT; 3591 case ICMP_SGT: return ICMP_SLE; 3592 case ICMP_SLT: return ICMP_SGE; 3593 case ICMP_SGE: return ICMP_SLT; 3594 case ICMP_SLE: return ICMP_SGT; 3595 3596 case FCMP_OEQ: return FCMP_UNE; 3597 case FCMP_ONE: return FCMP_UEQ; 3598 case FCMP_OGT: return FCMP_ULE; 3599 case FCMP_OLT: return FCMP_UGE; 3600 case FCMP_OGE: return FCMP_ULT; 3601 case FCMP_OLE: return FCMP_UGT; 3602 case FCMP_UEQ: return FCMP_ONE; 3603 case FCMP_UNE: return FCMP_OEQ; 3604 case FCMP_UGT: return FCMP_OLE; 3605 case FCMP_ULT: return FCMP_OGE; 3606 case FCMP_UGE: return FCMP_OLT; 3607 case FCMP_ULE: return FCMP_OGT; 3608 case FCMP_ORD: return FCMP_UNO; 3609 case FCMP_UNO: return FCMP_ORD; 3610 case FCMP_TRUE: return FCMP_FALSE; 3611 case FCMP_FALSE: return FCMP_TRUE; 3612 } 3613 } 3614 3615 StringRef CmpInst::getPredicateName(Predicate Pred) { 3616 switch (Pred) { 3617 default: return "unknown"; 3618 case FCmpInst::FCMP_FALSE: return "false"; 3619 case FCmpInst::FCMP_OEQ: return "oeq"; 3620 case FCmpInst::FCMP_OGT: return "ogt"; 3621 case FCmpInst::FCMP_OGE: return "oge"; 3622 case FCmpInst::FCMP_OLT: return "olt"; 3623 case FCmpInst::FCMP_OLE: return "ole"; 3624 case FCmpInst::FCMP_ONE: return "one"; 3625 case FCmpInst::FCMP_ORD: return "ord"; 3626 case FCmpInst::FCMP_UNO: return "uno"; 3627 case FCmpInst::FCMP_UEQ: return "ueq"; 3628 case FCmpInst::FCMP_UGT: return "ugt"; 3629 case FCmpInst::FCMP_UGE: return "uge"; 3630 case FCmpInst::FCMP_ULT: return "ult"; 3631 case FCmpInst::FCMP_ULE: return "ule"; 3632 case FCmpInst::FCMP_UNE: return "une"; 3633 case FCmpInst::FCMP_TRUE: return "true"; 3634 case ICmpInst::ICMP_EQ: return "eq"; 3635 case ICmpInst::ICMP_NE: return "ne"; 3636 case ICmpInst::ICMP_SGT: return "sgt"; 3637 case ICmpInst::ICMP_SGE: return "sge"; 3638 case ICmpInst::ICMP_SLT: return "slt"; 3639 case ICmpInst::ICMP_SLE: return "sle"; 3640 case ICmpInst::ICMP_UGT: return "ugt"; 3641 case ICmpInst::ICMP_UGE: return "uge"; 3642 case ICmpInst::ICMP_ULT: return "ult"; 3643 case ICmpInst::ICMP_ULE: return "ule"; 3644 } 3645 } 3646 3647 ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) { 3648 switch (pred) { 3649 default: llvm_unreachable("Unknown icmp predicate!"); 3650 case ICMP_EQ: case ICMP_NE: 3651 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE: 3652 return pred; 3653 case ICMP_UGT: return ICMP_SGT; 3654 case ICMP_ULT: return ICMP_SLT; 3655 case ICMP_UGE: return ICMP_SGE; 3656 case ICMP_ULE: return ICMP_SLE; 3657 } 3658 } 3659 3660 ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) { 3661 switch (pred) { 3662 default: llvm_unreachable("Unknown icmp predicate!"); 3663 case ICMP_EQ: case ICMP_NE: 3664 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE: 3665 return pred; 3666 case ICMP_SGT: return ICMP_UGT; 3667 case ICMP_SLT: return ICMP_ULT; 3668 case ICMP_SGE: return ICMP_UGE; 3669 case ICMP_SLE: return ICMP_ULE; 3670 } 3671 } 3672 3673 CmpInst::Predicate CmpInst::getFlippedStrictnessPredicate(Predicate pred) { 3674 switch (pred) { 3675 default: llvm_unreachable("Unknown or unsupported cmp predicate!"); 3676 case ICMP_SGT: return ICMP_SGE; 3677 case ICMP_SLT: return ICMP_SLE; 3678 case ICMP_SGE: return ICMP_SGT; 3679 case ICMP_SLE: return ICMP_SLT; 3680 case ICMP_UGT: return ICMP_UGE; 3681 case ICMP_ULT: return ICMP_ULE; 3682 case ICMP_UGE: return ICMP_UGT; 3683 case ICMP_ULE: return ICMP_ULT; 3684 3685 case FCMP_OGT: return FCMP_OGE; 3686 case FCMP_OLT: return FCMP_OLE; 3687 case FCMP_OGE: return FCMP_OGT; 3688 case FCMP_OLE: return FCMP_OLT; 3689 case FCMP_UGT: return FCMP_UGE; 3690 case FCMP_ULT: return FCMP_ULE; 3691 case FCMP_UGE: return FCMP_UGT; 3692 case FCMP_ULE: return FCMP_ULT; 3693 } 3694 } 3695 3696 CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) { 3697 switch (pred) { 3698 default: llvm_unreachable("Unknown cmp predicate!"); 3699 case ICMP_EQ: case ICMP_NE: 3700 return pred; 3701 case ICMP_SGT: return ICMP_SLT; 3702 case ICMP_SLT: return ICMP_SGT; 3703 case ICMP_SGE: return ICMP_SLE; 3704 case ICMP_SLE: return ICMP_SGE; 3705 case ICMP_UGT: return ICMP_ULT; 3706 case ICMP_ULT: return ICMP_UGT; 3707 case ICMP_UGE: return ICMP_ULE; 3708 case ICMP_ULE: return ICMP_UGE; 3709 3710 case FCMP_FALSE: case FCMP_TRUE: 3711 case FCMP_OEQ: case FCMP_ONE: 3712 case FCMP_UEQ: case FCMP_UNE: 3713 case FCMP_ORD: case FCMP_UNO: 3714 return pred; 3715 case FCMP_OGT: return FCMP_OLT; 3716 case FCMP_OLT: return FCMP_OGT; 3717 case FCMP_OGE: return FCMP_OLE; 3718 case FCMP_OLE: return FCMP_OGE; 3719 case FCMP_UGT: return FCMP_ULT; 3720 case FCMP_ULT: return FCMP_UGT; 3721 case FCMP_UGE: return FCMP_ULE; 3722 case FCMP_ULE: return FCMP_UGE; 3723 } 3724 } 3725 3726 CmpInst::Predicate CmpInst::getNonStrictPredicate(Predicate pred) { 3727 switch (pred) { 3728 case ICMP_SGT: return ICMP_SGE; 3729 case ICMP_SLT: return ICMP_SLE; 3730 case ICMP_UGT: return ICMP_UGE; 3731 case ICMP_ULT: return ICMP_ULE; 3732 case FCMP_OGT: return FCMP_OGE; 3733 case FCMP_OLT: return FCMP_OLE; 3734 case FCMP_UGT: return FCMP_UGE; 3735 case FCMP_ULT: return FCMP_ULE; 3736 default: return pred; 3737 } 3738 } 3739 3740 CmpInst::Predicate CmpInst::getSignedPredicate(Predicate pred) { 3741 assert(CmpInst::isUnsigned(pred) && "Call only with signed predicates!"); 3742 3743 switch (pred) { 3744 default: 3745 llvm_unreachable("Unknown predicate!"); 3746 case CmpInst::ICMP_ULT: 3747 return CmpInst::ICMP_SLT; 3748 case CmpInst::ICMP_ULE: 3749 return CmpInst::ICMP_SLE; 3750 case CmpInst::ICMP_UGT: 3751 return CmpInst::ICMP_SGT; 3752 case CmpInst::ICMP_UGE: 3753 return CmpInst::ICMP_SGE; 3754 } 3755 } 3756 3757 bool CmpInst::isUnsigned(Predicate predicate) { 3758 switch (predicate) { 3759 default: return false; 3760 case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT: 3761 case ICmpInst::ICMP_UGE: return true; 3762 } 3763 } 3764 3765 bool CmpInst::isSigned(Predicate predicate) { 3766 switch (predicate) { 3767 default: return false; 3768 case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT: 3769 case ICmpInst::ICMP_SGE: return true; 3770 } 3771 } 3772 3773 bool CmpInst::isOrdered(Predicate predicate) { 3774 switch (predicate) { 3775 default: return false; 3776 case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT: 3777 case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE: 3778 case FCmpInst::FCMP_ORD: return true; 3779 } 3780 } 3781 3782 bool CmpInst::isUnordered(Predicate predicate) { 3783 switch (predicate) { 3784 default: return false; 3785 case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT: 3786 case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE: 3787 case FCmpInst::FCMP_UNO: return true; 3788 } 3789 } 3790 3791 bool CmpInst::isTrueWhenEqual(Predicate predicate) { 3792 switch(predicate) { 3793 default: return false; 3794 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE: 3795 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true; 3796 } 3797 } 3798 3799 bool CmpInst::isFalseWhenEqual(Predicate predicate) { 3800 switch(predicate) { 3801 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT: 3802 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true; 3803 default: return false; 3804 } 3805 } 3806 3807 bool CmpInst::isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2) { 3808 // If the predicates match, then we know the first condition implies the 3809 // second is true. 3810 if (Pred1 == Pred2) 3811 return true; 3812 3813 switch (Pred1) { 3814 default: 3815 break; 3816 case ICMP_EQ: 3817 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true. 3818 return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE || 3819 Pred2 == ICMP_SLE; 3820 case ICMP_UGT: // A >u B implies A != B and A >=u B are true. 3821 return Pred2 == ICMP_NE || Pred2 == ICMP_UGE; 3822 case ICMP_ULT: // A <u B implies A != B and A <=u B are true. 3823 return Pred2 == ICMP_NE || Pred2 == ICMP_ULE; 3824 case ICMP_SGT: // A >s B implies A != B and A >=s B are true. 3825 return Pred2 == ICMP_NE || Pred2 == ICMP_SGE; 3826 case ICMP_SLT: // A <s B implies A != B and A <=s B are true. 3827 return Pred2 == ICMP_NE || Pred2 == ICMP_SLE; 3828 } 3829 return false; 3830 } 3831 3832 bool CmpInst::isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2) { 3833 return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2)); 3834 } 3835 3836 //===----------------------------------------------------------------------===// 3837 // SwitchInst Implementation 3838 //===----------------------------------------------------------------------===// 3839 3840 void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) { 3841 assert(Value && Default && NumReserved); 3842 ReservedSpace = NumReserved; 3843 setNumHungOffUseOperands(2); 3844 allocHungoffUses(ReservedSpace); 3845 3846 Op<0>() = Value; 3847 Op<1>() = Default; 3848 } 3849 3850 /// SwitchInst ctor - Create a new switch instruction, specifying a value to 3851 /// switch on and a default destination. The number of additional cases can 3852 /// be specified here to make memory allocation more efficient. This 3853 /// constructor can also autoinsert before another instruction. 3854 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3855 Instruction *InsertBefore) 3856 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch, 3857 nullptr, 0, InsertBefore) { 3858 init(Value, Default, 2+NumCases*2); 3859 } 3860 3861 /// SwitchInst ctor - Create a new switch instruction, specifying a value to 3862 /// switch on and a default destination. The number of additional cases can 3863 /// be specified here to make memory allocation more efficient. This 3864 /// constructor also autoinserts at the end of the specified BasicBlock. 3865 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3866 BasicBlock *InsertAtEnd) 3867 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch, 3868 nullptr, 0, InsertAtEnd) { 3869 init(Value, Default, 2+NumCases*2); 3870 } 3871 3872 SwitchInst::SwitchInst(const SwitchInst &SI) 3873 : Instruction(SI.getType(), Instruction::Switch, nullptr, 0) { 3874 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands()); 3875 setNumHungOffUseOperands(SI.getNumOperands()); 3876 Use *OL = getOperandList(); 3877 const Use *InOL = SI.getOperandList(); 3878 for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) { 3879 OL[i] = InOL[i]; 3880 OL[i+1] = InOL[i+1]; 3881 } 3882 SubclassOptionalData = SI.SubclassOptionalData; 3883 } 3884 3885 /// addCase - Add an entry to the switch instruction... 3886 /// 3887 void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) { 3888 unsigned NewCaseIdx = getNumCases(); 3889 unsigned OpNo = getNumOperands(); 3890 if (OpNo+2 > ReservedSpace) 3891 growOperands(); // Get more space! 3892 // Initialize some new operands. 3893 assert(OpNo+1 < ReservedSpace && "Growing didn't work!"); 3894 setNumHungOffUseOperands(OpNo+2); 3895 CaseHandle Case(this, NewCaseIdx); 3896 Case.setValue(OnVal); 3897 Case.setSuccessor(Dest); 3898 } 3899 3900 /// removeCase - This method removes the specified case and its successor 3901 /// from the switch instruction. 3902 SwitchInst::CaseIt SwitchInst::removeCase(CaseIt I) { 3903 unsigned idx = I->getCaseIndex(); 3904 3905 assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!"); 3906 3907 unsigned NumOps = getNumOperands(); 3908 Use *OL = getOperandList(); 3909 3910 // Overwrite this case with the end of the list. 3911 if (2 + (idx + 1) * 2 != NumOps) { 3912 OL[2 + idx * 2] = OL[NumOps - 2]; 3913 OL[2 + idx * 2 + 1] = OL[NumOps - 1]; 3914 } 3915 3916 // Nuke the last value. 3917 OL[NumOps-2].set(nullptr); 3918 OL[NumOps-2+1].set(nullptr); 3919 setNumHungOffUseOperands(NumOps-2); 3920 3921 return CaseIt(this, idx); 3922 } 3923 3924 /// growOperands - grow operands - This grows the operand list in response 3925 /// to a push_back style of operation. This grows the number of ops by 3 times. 3926 /// 3927 void SwitchInst::growOperands() { 3928 unsigned e = getNumOperands(); 3929 unsigned NumOps = e*3; 3930 3931 ReservedSpace = NumOps; 3932 growHungoffUses(ReservedSpace); 3933 } 3934 3935 MDNode * 3936 SwitchInstProfUpdateWrapper::getProfBranchWeightsMD(const SwitchInst &SI) { 3937 if (MDNode *ProfileData = SI.getMetadata(LLVMContext::MD_prof)) 3938 if (auto *MDName = dyn_cast<MDString>(ProfileData->getOperand(0))) 3939 if (MDName->getString() == "branch_weights") 3940 return ProfileData; 3941 return nullptr; 3942 } 3943 3944 MDNode *SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() { 3945 assert(Changed && "called only if metadata has changed"); 3946 3947 if (!Weights) 3948 return nullptr; 3949 3950 assert(SI.getNumSuccessors() == Weights->size() && 3951 "num of prof branch_weights must accord with num of successors"); 3952 3953 bool AllZeroes = 3954 all_of(Weights.getValue(), [](uint32_t W) { return W == 0; }); 3955 3956 if (AllZeroes || Weights.getValue().size() < 2) 3957 return nullptr; 3958 3959 return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights); 3960 } 3961 3962 void SwitchInstProfUpdateWrapper::init() { 3963 MDNode *ProfileData = getProfBranchWeightsMD(SI); 3964 if (!ProfileData) 3965 return; 3966 3967 if (ProfileData->getNumOperands() != SI.getNumSuccessors() + 1) { 3968 llvm_unreachable("number of prof branch_weights metadata operands does " 3969 "not correspond to number of succesors"); 3970 } 3971 3972 SmallVector<uint32_t, 8> Weights; 3973 for (unsigned CI = 1, CE = SI.getNumSuccessors(); CI <= CE; ++CI) { 3974 ConstantInt *C = mdconst::extract<ConstantInt>(ProfileData->getOperand(CI)); 3975 uint32_t CW = C->getValue().getZExtValue(); 3976 Weights.push_back(CW); 3977 } 3978 this->Weights = std::move(Weights); 3979 } 3980 3981 SwitchInst::CaseIt 3982 SwitchInstProfUpdateWrapper::removeCase(SwitchInst::CaseIt I) { 3983 if (Weights) { 3984 assert(SI.getNumSuccessors() == Weights->size() && 3985 "num of prof branch_weights must accord with num of successors"); 3986 Changed = true; 3987 // Copy the last case to the place of the removed one and shrink. 3988 // This is tightly coupled with the way SwitchInst::removeCase() removes 3989 // the cases in SwitchInst::removeCase(CaseIt). 3990 Weights.getValue()[I->getCaseIndex() + 1] = Weights.getValue().back(); 3991 Weights.getValue().pop_back(); 3992 } 3993 return SI.removeCase(I); 3994 } 3995 3996 void SwitchInstProfUpdateWrapper::addCase( 3997 ConstantInt *OnVal, BasicBlock *Dest, 3998 SwitchInstProfUpdateWrapper::CaseWeightOpt W) { 3999 SI.addCase(OnVal, Dest); 4000 4001 if (!Weights && W && *W) { 4002 Changed = true; 4003 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0); 4004 Weights.getValue()[SI.getNumSuccessors() - 1] = *W; 4005 } else if (Weights) { 4006 Changed = true; 4007 Weights.getValue().push_back(W ? *W : 0); 4008 } 4009 if (Weights) 4010 assert(SI.getNumSuccessors() == Weights->size() && 4011 "num of prof branch_weights must accord with num of successors"); 4012 } 4013 4014 SymbolTableList<Instruction>::iterator 4015 SwitchInstProfUpdateWrapper::eraseFromParent() { 4016 // Instruction is erased. Mark as unchanged to not touch it in the destructor. 4017 Changed = false; 4018 if (Weights) 4019 Weights->resize(0); 4020 return SI.eraseFromParent(); 4021 } 4022 4023 SwitchInstProfUpdateWrapper::CaseWeightOpt 4024 SwitchInstProfUpdateWrapper::getSuccessorWeight(unsigned idx) { 4025 if (!Weights) 4026 return None; 4027 return Weights.getValue()[idx]; 4028 } 4029 4030 void SwitchInstProfUpdateWrapper::setSuccessorWeight( 4031 unsigned idx, SwitchInstProfUpdateWrapper::CaseWeightOpt W) { 4032 if (!W) 4033 return; 4034 4035 if (!Weights && *W) 4036 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0); 4037 4038 if (Weights) { 4039 auto &OldW = Weights.getValue()[idx]; 4040 if (*W != OldW) { 4041 Changed = true; 4042 OldW = *W; 4043 } 4044 } 4045 } 4046 4047 SwitchInstProfUpdateWrapper::CaseWeightOpt 4048 SwitchInstProfUpdateWrapper::getSuccessorWeight(const SwitchInst &SI, 4049 unsigned idx) { 4050 if (MDNode *ProfileData = getProfBranchWeightsMD(SI)) 4051 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1) 4052 return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1)) 4053 ->getValue() 4054 .getZExtValue(); 4055 4056 return None; 4057 } 4058 4059 //===----------------------------------------------------------------------===// 4060 // IndirectBrInst Implementation 4061 //===----------------------------------------------------------------------===// 4062 4063 void IndirectBrInst::init(Value *Address, unsigned NumDests) { 4064 assert(Address && Address->getType()->isPointerTy() && 4065 "Address of indirectbr must be a pointer"); 4066 ReservedSpace = 1+NumDests; 4067 setNumHungOffUseOperands(1); 4068 allocHungoffUses(ReservedSpace); 4069 4070 Op<0>() = Address; 4071 } 4072 4073 4074 /// growOperands - grow operands - This grows the operand list in response 4075 /// to a push_back style of operation. This grows the number of ops by 2 times. 4076 /// 4077 void IndirectBrInst::growOperands() { 4078 unsigned e = getNumOperands(); 4079 unsigned NumOps = e*2; 4080 4081 ReservedSpace = NumOps; 4082 growHungoffUses(ReservedSpace); 4083 } 4084 4085 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases, 4086 Instruction *InsertBefore) 4087 : Instruction(Type::getVoidTy(Address->getContext()), 4088 Instruction::IndirectBr, nullptr, 0, InsertBefore) { 4089 init(Address, NumCases); 4090 } 4091 4092 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases, 4093 BasicBlock *InsertAtEnd) 4094 : Instruction(Type::getVoidTy(Address->getContext()), 4095 Instruction::IndirectBr, nullptr, 0, InsertAtEnd) { 4096 init(Address, NumCases); 4097 } 4098 4099 IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI) 4100 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr, 4101 nullptr, IBI.getNumOperands()) { 4102 allocHungoffUses(IBI.getNumOperands()); 4103 Use *OL = getOperandList(); 4104 const Use *InOL = IBI.getOperandList(); 4105 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i) 4106 OL[i] = InOL[i]; 4107 SubclassOptionalData = IBI.SubclassOptionalData; 4108 } 4109 4110 /// addDestination - Add a destination. 4111 /// 4112 void IndirectBrInst::addDestination(BasicBlock *DestBB) { 4113 unsigned OpNo = getNumOperands(); 4114 if (OpNo+1 > ReservedSpace) 4115 growOperands(); // Get more space! 4116 // Initialize some new operands. 4117 assert(OpNo < ReservedSpace && "Growing didn't work!"); 4118 setNumHungOffUseOperands(OpNo+1); 4119 getOperandList()[OpNo] = DestBB; 4120 } 4121 4122 /// removeDestination - This method removes the specified successor from the 4123 /// indirectbr instruction. 4124 void IndirectBrInst::removeDestination(unsigned idx) { 4125 assert(idx < getNumOperands()-1 && "Successor index out of range!"); 4126 4127 unsigned NumOps = getNumOperands(); 4128 Use *OL = getOperandList(); 4129 4130 // Replace this value with the last one. 4131 OL[idx+1] = OL[NumOps-1]; 4132 4133 // Nuke the last value. 4134 OL[NumOps-1].set(nullptr); 4135 setNumHungOffUseOperands(NumOps-1); 4136 } 4137 4138 //===----------------------------------------------------------------------===// 4139 // FreezeInst Implementation 4140 //===----------------------------------------------------------------------===// 4141 4142 FreezeInst::FreezeInst(Value *S, 4143 const Twine &Name, Instruction *InsertBefore) 4144 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) { 4145 setName(Name); 4146 } 4147 4148 FreezeInst::FreezeInst(Value *S, 4149 const Twine &Name, BasicBlock *InsertAtEnd) 4150 : UnaryInstruction(S->getType(), Freeze, S, InsertAtEnd) { 4151 setName(Name); 4152 } 4153 4154 //===----------------------------------------------------------------------===// 4155 // cloneImpl() implementations 4156 //===----------------------------------------------------------------------===// 4157 4158 // Define these methods here so vtables don't get emitted into every translation 4159 // unit that uses these classes. 4160 4161 GetElementPtrInst *GetElementPtrInst::cloneImpl() const { 4162 return new (getNumOperands()) GetElementPtrInst(*this); 4163 } 4164 4165 UnaryOperator *UnaryOperator::cloneImpl() const { 4166 return Create(getOpcode(), Op<0>()); 4167 } 4168 4169 BinaryOperator *BinaryOperator::cloneImpl() const { 4170 return Create(getOpcode(), Op<0>(), Op<1>()); 4171 } 4172 4173 FCmpInst *FCmpInst::cloneImpl() const { 4174 return new FCmpInst(getPredicate(), Op<0>(), Op<1>()); 4175 } 4176 4177 ICmpInst *ICmpInst::cloneImpl() const { 4178 return new ICmpInst(getPredicate(), Op<0>(), Op<1>()); 4179 } 4180 4181 ExtractValueInst *ExtractValueInst::cloneImpl() const { 4182 return new ExtractValueInst(*this); 4183 } 4184 4185 InsertValueInst *InsertValueInst::cloneImpl() const { 4186 return new InsertValueInst(*this); 4187 } 4188 4189 AllocaInst *AllocaInst::cloneImpl() const { 4190 AllocaInst *Result = 4191 new AllocaInst(getAllocatedType(), getType()->getAddressSpace(), 4192 (Value *)getOperand(0), MaybeAlign(getAlignment())); 4193 Result->setUsedWithInAlloca(isUsedWithInAlloca()); 4194 Result->setSwiftError(isSwiftError()); 4195 return Result; 4196 } 4197 4198 LoadInst *LoadInst::cloneImpl() const { 4199 return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(), 4200 MaybeAlign(getAlignment()), getOrdering(), 4201 getSyncScopeID()); 4202 } 4203 4204 StoreInst *StoreInst::cloneImpl() const { 4205 return new StoreInst(getOperand(0), getOperand(1), isVolatile(), 4206 MaybeAlign(getAlignment()), getOrdering(), 4207 getSyncScopeID()); 4208 } 4209 4210 AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const { 4211 AtomicCmpXchgInst *Result = 4212 new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2), 4213 getSuccessOrdering(), getFailureOrdering(), 4214 getSyncScopeID()); 4215 Result->setVolatile(isVolatile()); 4216 Result->setWeak(isWeak()); 4217 return Result; 4218 } 4219 4220 AtomicRMWInst *AtomicRMWInst::cloneImpl() const { 4221 AtomicRMWInst *Result = 4222 new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1), 4223 getOrdering(), getSyncScopeID()); 4224 Result->setVolatile(isVolatile()); 4225 return Result; 4226 } 4227 4228 FenceInst *FenceInst::cloneImpl() const { 4229 return new FenceInst(getContext(), getOrdering(), getSyncScopeID()); 4230 } 4231 4232 TruncInst *TruncInst::cloneImpl() const { 4233 return new TruncInst(getOperand(0), getType()); 4234 } 4235 4236 ZExtInst *ZExtInst::cloneImpl() const { 4237 return new ZExtInst(getOperand(0), getType()); 4238 } 4239 4240 SExtInst *SExtInst::cloneImpl() const { 4241 return new SExtInst(getOperand(0), getType()); 4242 } 4243 4244 FPTruncInst *FPTruncInst::cloneImpl() const { 4245 return new FPTruncInst(getOperand(0), getType()); 4246 } 4247 4248 FPExtInst *FPExtInst::cloneImpl() const { 4249 return new FPExtInst(getOperand(0), getType()); 4250 } 4251 4252 UIToFPInst *UIToFPInst::cloneImpl() const { 4253 return new UIToFPInst(getOperand(0), getType()); 4254 } 4255 4256 SIToFPInst *SIToFPInst::cloneImpl() const { 4257 return new SIToFPInst(getOperand(0), getType()); 4258 } 4259 4260 FPToUIInst *FPToUIInst::cloneImpl() const { 4261 return new FPToUIInst(getOperand(0), getType()); 4262 } 4263 4264 FPToSIInst *FPToSIInst::cloneImpl() const { 4265 return new FPToSIInst(getOperand(0), getType()); 4266 } 4267 4268 PtrToIntInst *PtrToIntInst::cloneImpl() const { 4269 return new PtrToIntInst(getOperand(0), getType()); 4270 } 4271 4272 IntToPtrInst *IntToPtrInst::cloneImpl() const { 4273 return new IntToPtrInst(getOperand(0), getType()); 4274 } 4275 4276 BitCastInst *BitCastInst::cloneImpl() const { 4277 return new BitCastInst(getOperand(0), getType()); 4278 } 4279 4280 AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const { 4281 return new AddrSpaceCastInst(getOperand(0), getType()); 4282 } 4283 4284 CallInst *CallInst::cloneImpl() const { 4285 if (hasOperandBundles()) { 4286 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 4287 return new(getNumOperands(), DescriptorBytes) CallInst(*this); 4288 } 4289 return new(getNumOperands()) CallInst(*this); 4290 } 4291 4292 SelectInst *SelectInst::cloneImpl() const { 4293 return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2)); 4294 } 4295 4296 VAArgInst *VAArgInst::cloneImpl() const { 4297 return new VAArgInst(getOperand(0), getType()); 4298 } 4299 4300 ExtractElementInst *ExtractElementInst::cloneImpl() const { 4301 return ExtractElementInst::Create(getOperand(0), getOperand(1)); 4302 } 4303 4304 InsertElementInst *InsertElementInst::cloneImpl() const { 4305 return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2)); 4306 } 4307 4308 ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const { 4309 return new ShuffleVectorInst(getOperand(0), getOperand(1), getOperand(2)); 4310 } 4311 4312 PHINode *PHINode::cloneImpl() const { return new PHINode(*this); } 4313 4314 LandingPadInst *LandingPadInst::cloneImpl() const { 4315 return new LandingPadInst(*this); 4316 } 4317 4318 ReturnInst *ReturnInst::cloneImpl() const { 4319 return new(getNumOperands()) ReturnInst(*this); 4320 } 4321 4322 BranchInst *BranchInst::cloneImpl() const { 4323 return new(getNumOperands()) BranchInst(*this); 4324 } 4325 4326 SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); } 4327 4328 IndirectBrInst *IndirectBrInst::cloneImpl() const { 4329 return new IndirectBrInst(*this); 4330 } 4331 4332 InvokeInst *InvokeInst::cloneImpl() const { 4333 if (hasOperandBundles()) { 4334 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 4335 return new(getNumOperands(), DescriptorBytes) InvokeInst(*this); 4336 } 4337 return new(getNumOperands()) InvokeInst(*this); 4338 } 4339 4340 CallBrInst *CallBrInst::cloneImpl() const { 4341 if (hasOperandBundles()) { 4342 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 4343 return new (getNumOperands(), DescriptorBytes) CallBrInst(*this); 4344 } 4345 return new (getNumOperands()) CallBrInst(*this); 4346 } 4347 4348 ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); } 4349 4350 CleanupReturnInst *CleanupReturnInst::cloneImpl() const { 4351 return new (getNumOperands()) CleanupReturnInst(*this); 4352 } 4353 4354 CatchReturnInst *CatchReturnInst::cloneImpl() const { 4355 return new (getNumOperands()) CatchReturnInst(*this); 4356 } 4357 4358 CatchSwitchInst *CatchSwitchInst::cloneImpl() const { 4359 return new CatchSwitchInst(*this); 4360 } 4361 4362 FuncletPadInst *FuncletPadInst::cloneImpl() const { 4363 return new (getNumOperands()) FuncletPadInst(*this); 4364 } 4365 4366 UnreachableInst *UnreachableInst::cloneImpl() const { 4367 LLVMContext &Context = getContext(); 4368 return new UnreachableInst(Context); 4369 } 4370 4371 FreezeInst *FreezeInst::cloneImpl() const { 4372 return new FreezeInst(getOperand(0)); 4373 } 4374