1 //===- Instructions.cpp - Implement the LLVM instructions -----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements all of the non-inline methods for the LLVM instruction 10 // classes. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/IR/Instructions.h" 15 #include "LLVMContextImpl.h" 16 #include "llvm/ADT/None.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/ADT/Twine.h" 19 #include "llvm/IR/Attributes.h" 20 #include "llvm/IR/BasicBlock.h" 21 #include "llvm/IR/CallSite.h" 22 #include "llvm/IR/Constant.h" 23 #include "llvm/IR/Constants.h" 24 #include "llvm/IR/DataLayout.h" 25 #include "llvm/IR/DerivedTypes.h" 26 #include "llvm/IR/Function.h" 27 #include "llvm/IR/InstrTypes.h" 28 #include "llvm/IR/Instruction.h" 29 #include "llvm/IR/Intrinsics.h" 30 #include "llvm/IR/LLVMContext.h" 31 #include "llvm/IR/MDBuilder.h" 32 #include "llvm/IR/Metadata.h" 33 #include "llvm/IR/Module.h" 34 #include "llvm/IR/Operator.h" 35 #include "llvm/IR/Type.h" 36 #include "llvm/IR/Value.h" 37 #include "llvm/Support/AtomicOrdering.h" 38 #include "llvm/Support/Casting.h" 39 #include "llvm/Support/ErrorHandling.h" 40 #include "llvm/Support/MathExtras.h" 41 #include "llvm/Support/TypeSize.h" 42 #include <algorithm> 43 #include <cassert> 44 #include <cstdint> 45 #include <vector> 46 47 using namespace llvm; 48 49 //===----------------------------------------------------------------------===// 50 // AllocaInst Class 51 //===----------------------------------------------------------------------===// 52 53 Optional<uint64_t> 54 AllocaInst::getAllocationSizeInBits(const DataLayout &DL) const { 55 uint64_t Size = DL.getTypeAllocSizeInBits(getAllocatedType()); 56 if (isArrayAllocation()) { 57 auto C = dyn_cast<ConstantInt>(getArraySize()); 58 if (!C) 59 return None; 60 Size *= C->getZExtValue(); 61 } 62 return Size; 63 } 64 65 //===----------------------------------------------------------------------===// 66 // CallSite Class 67 //===----------------------------------------------------------------------===// 68 69 User::op_iterator CallSite::getCallee() const { 70 return cast<CallBase>(getInstruction())->op_end() - 1; 71 } 72 73 //===----------------------------------------------------------------------===// 74 // SelectInst Class 75 //===----------------------------------------------------------------------===// 76 77 /// areInvalidOperands - Return a string if the specified operands are invalid 78 /// for a select operation, otherwise return null. 79 const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) { 80 if (Op1->getType() != Op2->getType()) 81 return "both values to select must have same type"; 82 83 if (Op1->getType()->isTokenTy()) 84 return "select values cannot have token type"; 85 86 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) { 87 // Vector select. 88 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext())) 89 return "vector select condition element type must be i1"; 90 VectorType *ET = dyn_cast<VectorType>(Op1->getType()); 91 if (!ET) 92 return "selected values for vector select must be vectors"; 93 if (ET->getNumElements() != VT->getNumElements()) 94 return "vector select requires selected vectors to have " 95 "the same vector length as select condition"; 96 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) { 97 return "select condition must be i1 or <n x i1>"; 98 } 99 return nullptr; 100 } 101 102 //===----------------------------------------------------------------------===// 103 // PHINode Class 104 //===----------------------------------------------------------------------===// 105 106 PHINode::PHINode(const PHINode &PN) 107 : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()), 108 ReservedSpace(PN.getNumOperands()) { 109 allocHungoffUses(PN.getNumOperands()); 110 std::copy(PN.op_begin(), PN.op_end(), op_begin()); 111 std::copy(PN.block_begin(), PN.block_end(), block_begin()); 112 SubclassOptionalData = PN.SubclassOptionalData; 113 } 114 115 // removeIncomingValue - Remove an incoming value. This is useful if a 116 // predecessor basic block is deleted. 117 Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) { 118 Value *Removed = getIncomingValue(Idx); 119 120 // Move everything after this operand down. 121 // 122 // FIXME: we could just swap with the end of the list, then erase. However, 123 // clients might not expect this to happen. The code as it is thrashes the 124 // use/def lists, which is kinda lame. 125 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx); 126 std::copy(block_begin() + Idx + 1, block_end(), block_begin() + Idx); 127 128 // Nuke the last value. 129 Op<-1>().set(nullptr); 130 setNumHungOffUseOperands(getNumOperands() - 1); 131 132 // If the PHI node is dead, because it has zero entries, nuke it now. 133 if (getNumOperands() == 0 && DeletePHIIfEmpty) { 134 // If anyone is using this PHI, make them use a dummy value instead... 135 replaceAllUsesWith(UndefValue::get(getType())); 136 eraseFromParent(); 137 } 138 return Removed; 139 } 140 141 /// growOperands - grow operands - This grows the operand list in response 142 /// to a push_back style of operation. This grows the number of ops by 1.5 143 /// times. 144 /// 145 void PHINode::growOperands() { 146 unsigned e = getNumOperands(); 147 unsigned NumOps = e + e / 2; 148 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common. 149 150 ReservedSpace = NumOps; 151 growHungoffUses(ReservedSpace, /* IsPhi */ true); 152 } 153 154 /// hasConstantValue - If the specified PHI node always merges together the same 155 /// value, return the value, otherwise return null. 156 Value *PHINode::hasConstantValue() const { 157 // Exploit the fact that phi nodes always have at least one entry. 158 Value *ConstantValue = getIncomingValue(0); 159 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i) 160 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) { 161 if (ConstantValue != this) 162 return nullptr; // Incoming values not all the same. 163 // The case where the first value is this PHI. 164 ConstantValue = getIncomingValue(i); 165 } 166 if (ConstantValue == this) 167 return UndefValue::get(getType()); 168 return ConstantValue; 169 } 170 171 /// hasConstantOrUndefValue - Whether the specified PHI node always merges 172 /// together the same value, assuming that undefs result in the same value as 173 /// non-undefs. 174 /// Unlike \ref hasConstantValue, this does not return a value because the 175 /// unique non-undef incoming value need not dominate the PHI node. 176 bool PHINode::hasConstantOrUndefValue() const { 177 Value *ConstantValue = nullptr; 178 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) { 179 Value *Incoming = getIncomingValue(i); 180 if (Incoming != this && !isa<UndefValue>(Incoming)) { 181 if (ConstantValue && ConstantValue != Incoming) 182 return false; 183 ConstantValue = Incoming; 184 } 185 } 186 return true; 187 } 188 189 //===----------------------------------------------------------------------===// 190 // LandingPadInst Implementation 191 //===----------------------------------------------------------------------===// 192 193 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues, 194 const Twine &NameStr, Instruction *InsertBefore) 195 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) { 196 init(NumReservedValues, NameStr); 197 } 198 199 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues, 200 const Twine &NameStr, BasicBlock *InsertAtEnd) 201 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) { 202 init(NumReservedValues, NameStr); 203 } 204 205 LandingPadInst::LandingPadInst(const LandingPadInst &LP) 206 : Instruction(LP.getType(), Instruction::LandingPad, nullptr, 207 LP.getNumOperands()), 208 ReservedSpace(LP.getNumOperands()) { 209 allocHungoffUses(LP.getNumOperands()); 210 Use *OL = getOperandList(); 211 const Use *InOL = LP.getOperandList(); 212 for (unsigned I = 0, E = ReservedSpace; I != E; ++I) 213 OL[I] = InOL[I]; 214 215 setCleanup(LP.isCleanup()); 216 } 217 218 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses, 219 const Twine &NameStr, 220 Instruction *InsertBefore) { 221 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore); 222 } 223 224 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses, 225 const Twine &NameStr, 226 BasicBlock *InsertAtEnd) { 227 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd); 228 } 229 230 void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) { 231 ReservedSpace = NumReservedValues; 232 setNumHungOffUseOperands(0); 233 allocHungoffUses(ReservedSpace); 234 setName(NameStr); 235 setCleanup(false); 236 } 237 238 /// growOperands - grow operands - This grows the operand list in response to a 239 /// push_back style of operation. This grows the number of ops by 2 times. 240 void LandingPadInst::growOperands(unsigned Size) { 241 unsigned e = getNumOperands(); 242 if (ReservedSpace >= e + Size) return; 243 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2; 244 growHungoffUses(ReservedSpace); 245 } 246 247 void LandingPadInst::addClause(Constant *Val) { 248 unsigned OpNo = getNumOperands(); 249 growOperands(1); 250 assert(OpNo < ReservedSpace && "Growing didn't work!"); 251 setNumHungOffUseOperands(getNumOperands() + 1); 252 getOperandList()[OpNo] = Val; 253 } 254 255 //===----------------------------------------------------------------------===// 256 // CallBase Implementation 257 //===----------------------------------------------------------------------===// 258 259 Function *CallBase::getCaller() { return getParent()->getParent(); } 260 261 unsigned CallBase::getNumSubclassExtraOperandsDynamic() const { 262 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!"); 263 return cast<CallBrInst>(this)->getNumIndirectDests() + 1; 264 } 265 266 bool CallBase::isIndirectCall() const { 267 const Value *V = getCalledValue(); 268 if (isa<Function>(V) || isa<Constant>(V)) 269 return false; 270 if (const CallInst *CI = dyn_cast<CallInst>(this)) 271 if (CI->isInlineAsm()) 272 return false; 273 return true; 274 } 275 276 /// Tests if this call site must be tail call optimized. Only a CallInst can 277 /// be tail call optimized. 278 bool CallBase::isMustTailCall() const { 279 if (auto *CI = dyn_cast<CallInst>(this)) 280 return CI->isMustTailCall(); 281 return false; 282 } 283 284 /// Tests if this call site is marked as a tail call. 285 bool CallBase::isTailCall() const { 286 if (auto *CI = dyn_cast<CallInst>(this)) 287 return CI->isTailCall(); 288 return false; 289 } 290 291 Intrinsic::ID CallBase::getIntrinsicID() const { 292 if (auto *F = getCalledFunction()) 293 return F->getIntrinsicID(); 294 return Intrinsic::not_intrinsic; 295 } 296 297 bool CallBase::isReturnNonNull() const { 298 if (hasRetAttr(Attribute::NonNull)) 299 return true; 300 301 if (getDereferenceableBytes(AttributeList::ReturnIndex) > 0 && 302 !NullPointerIsDefined(getCaller(), 303 getType()->getPointerAddressSpace())) 304 return true; 305 306 return false; 307 } 308 309 Value *CallBase::getReturnedArgOperand() const { 310 unsigned Index; 311 312 if (Attrs.hasAttrSomewhere(Attribute::Returned, &Index) && Index) 313 return getArgOperand(Index - AttributeList::FirstArgIndex); 314 if (const Function *F = getCalledFunction()) 315 if (F->getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) && 316 Index) 317 return getArgOperand(Index - AttributeList::FirstArgIndex); 318 319 return nullptr; 320 } 321 322 bool CallBase::hasRetAttr(Attribute::AttrKind Kind) const { 323 if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind)) 324 return true; 325 326 // Look at the callee, if available. 327 if (const Function *F = getCalledFunction()) 328 return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind); 329 return false; 330 } 331 332 /// Determine whether the argument or parameter has the given attribute. 333 bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const { 334 assert(ArgNo < getNumArgOperands() && "Param index out of bounds!"); 335 336 if (Attrs.hasParamAttribute(ArgNo, Kind)) 337 return true; 338 if (const Function *F = getCalledFunction()) 339 return F->getAttributes().hasParamAttribute(ArgNo, Kind); 340 return false; 341 } 342 343 bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const { 344 if (const Function *F = getCalledFunction()) 345 return F->getAttributes().hasAttribute(AttributeList::FunctionIndex, Kind); 346 return false; 347 } 348 349 bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const { 350 if (const Function *F = getCalledFunction()) 351 return F->getAttributes().hasAttribute(AttributeList::FunctionIndex, Kind); 352 return false; 353 } 354 355 CallBase::op_iterator 356 CallBase::populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles, 357 const unsigned BeginIndex) { 358 auto It = op_begin() + BeginIndex; 359 for (auto &B : Bundles) 360 It = std::copy(B.input_begin(), B.input_end(), It); 361 362 auto *ContextImpl = getContext().pImpl; 363 auto BI = Bundles.begin(); 364 unsigned CurrentIndex = BeginIndex; 365 366 for (auto &BOI : bundle_op_infos()) { 367 assert(BI != Bundles.end() && "Incorrect allocation?"); 368 369 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag()); 370 BOI.Begin = CurrentIndex; 371 BOI.End = CurrentIndex + BI->input_size(); 372 CurrentIndex = BOI.End; 373 BI++; 374 } 375 376 assert(BI == Bundles.end() && "Incorrect allocation?"); 377 378 return It; 379 } 380 381 //===----------------------------------------------------------------------===// 382 // CallInst Implementation 383 //===----------------------------------------------------------------------===// 384 385 void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, 386 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) { 387 this->FTy = FTy; 388 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 && 389 "NumOperands not set up?"); 390 setCalledOperand(Func); 391 392 #ifndef NDEBUG 393 assert((Args.size() == FTy->getNumParams() || 394 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 395 "Calling a function with bad signature!"); 396 397 for (unsigned i = 0; i != Args.size(); ++i) 398 assert((i >= FTy->getNumParams() || 399 FTy->getParamType(i) == Args[i]->getType()) && 400 "Calling a function with a bad signature!"); 401 #endif 402 403 llvm::copy(Args, op_begin()); 404 405 auto It = populateBundleOperandInfos(Bundles, Args.size()); 406 (void)It; 407 assert(It + 1 == op_end() && "Should add up!"); 408 409 setName(NameStr); 410 } 411 412 void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) { 413 this->FTy = FTy; 414 assert(getNumOperands() == 1 && "NumOperands not set up?"); 415 setCalledOperand(Func); 416 417 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature"); 418 419 setName(NameStr); 420 } 421 422 CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name, 423 Instruction *InsertBefore) 424 : CallBase(Ty->getReturnType(), Instruction::Call, 425 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) { 426 init(Ty, Func, Name); 427 } 428 429 CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name, 430 BasicBlock *InsertAtEnd) 431 : CallBase(Ty->getReturnType(), Instruction::Call, 432 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertAtEnd) { 433 init(Ty, Func, Name); 434 } 435 436 CallInst::CallInst(const CallInst &CI) 437 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call, 438 OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(), 439 CI.getNumOperands()) { 440 setTailCallKind(CI.getTailCallKind()); 441 setCallingConv(CI.getCallingConv()); 442 443 std::copy(CI.op_begin(), CI.op_end(), op_begin()); 444 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(), 445 bundle_op_info_begin()); 446 SubclassOptionalData = CI.SubclassOptionalData; 447 } 448 449 CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB, 450 Instruction *InsertPt) { 451 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end()); 452 453 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledValue(), 454 Args, OpB, CI->getName(), InsertPt); 455 NewCI->setTailCallKind(CI->getTailCallKind()); 456 NewCI->setCallingConv(CI->getCallingConv()); 457 NewCI->SubclassOptionalData = CI->SubclassOptionalData; 458 NewCI->setAttributes(CI->getAttributes()); 459 NewCI->setDebugLoc(CI->getDebugLoc()); 460 return NewCI; 461 } 462 463 // Update profile weight for call instruction by scaling it using the ratio 464 // of S/T. The meaning of "branch_weights" meta data for call instruction is 465 // transfered to represent call count. 466 void CallInst::updateProfWeight(uint64_t S, uint64_t T) { 467 auto *ProfileData = getMetadata(LLVMContext::MD_prof); 468 if (ProfileData == nullptr) 469 return; 470 471 auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0)); 472 if (!ProfDataName || (!ProfDataName->getString().equals("branch_weights") && 473 !ProfDataName->getString().equals("VP"))) 474 return; 475 476 if (T == 0) { 477 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in " 478 "div by 0. Ignoring. Likely the function " 479 << getParent()->getParent()->getName() 480 << " has 0 entry count, and contains call instructions " 481 "with non-zero prof info."); 482 return; 483 } 484 485 MDBuilder MDB(getContext()); 486 SmallVector<Metadata *, 3> Vals; 487 Vals.push_back(ProfileData->getOperand(0)); 488 APInt APS(128, S), APT(128, T); 489 if (ProfDataName->getString().equals("branch_weights") && 490 ProfileData->getNumOperands() > 0) { 491 // Using APInt::div may be expensive, but most cases should fit 64 bits. 492 APInt Val(128, mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1)) 493 ->getValue() 494 .getZExtValue()); 495 Val *= APS; 496 Vals.push_back(MDB.createConstant(ConstantInt::get( 497 Type::getInt64Ty(getContext()), Val.udiv(APT).getLimitedValue()))); 498 } else if (ProfDataName->getString().equals("VP")) 499 for (unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) { 500 // The first value is the key of the value profile, which will not change. 501 Vals.push_back(ProfileData->getOperand(i)); 502 // Using APInt::div may be expensive, but most cases should fit 64 bits. 503 APInt Val(128, 504 mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1)) 505 ->getValue() 506 .getZExtValue()); 507 Val *= APS; 508 Vals.push_back(MDB.createConstant( 509 ConstantInt::get(Type::getInt64Ty(getContext()), 510 Val.udiv(APT).getLimitedValue()))); 511 } 512 setMetadata(LLVMContext::MD_prof, MDNode::get(getContext(), Vals)); 513 } 514 515 /// IsConstantOne - Return true only if val is constant int 1 516 static bool IsConstantOne(Value *val) { 517 assert(val && "IsConstantOne does not work with nullptr val"); 518 const ConstantInt *CVal = dyn_cast<ConstantInt>(val); 519 return CVal && CVal->isOne(); 520 } 521 522 static Instruction *createMalloc(Instruction *InsertBefore, 523 BasicBlock *InsertAtEnd, Type *IntPtrTy, 524 Type *AllocTy, Value *AllocSize, 525 Value *ArraySize, 526 ArrayRef<OperandBundleDef> OpB, 527 Function *MallocF, const Twine &Name) { 528 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) && 529 "createMalloc needs either InsertBefore or InsertAtEnd"); 530 531 // malloc(type) becomes: 532 // bitcast (i8* malloc(typeSize)) to type* 533 // malloc(type, arraySize) becomes: 534 // bitcast (i8* malloc(typeSize*arraySize)) to type* 535 if (!ArraySize) 536 ArraySize = ConstantInt::get(IntPtrTy, 1); 537 else if (ArraySize->getType() != IntPtrTy) { 538 if (InsertBefore) 539 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false, 540 "", InsertBefore); 541 else 542 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false, 543 "", InsertAtEnd); 544 } 545 546 if (!IsConstantOne(ArraySize)) { 547 if (IsConstantOne(AllocSize)) { 548 AllocSize = ArraySize; // Operand * 1 = Operand 549 } else if (Constant *CO = dyn_cast<Constant>(ArraySize)) { 550 Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy, 551 false /*ZExt*/); 552 // Malloc arg is constant product of type size and array size 553 AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize)); 554 } else { 555 // Multiply type size by the array size... 556 if (InsertBefore) 557 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize, 558 "mallocsize", InsertBefore); 559 else 560 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize, 561 "mallocsize", InsertAtEnd); 562 } 563 } 564 565 assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size"); 566 // Create the call to Malloc. 567 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd; 568 Module *M = BB->getParent()->getParent(); 569 Type *BPTy = Type::getInt8PtrTy(BB->getContext()); 570 FunctionCallee MallocFunc = MallocF; 571 if (!MallocFunc) 572 // prototype malloc as "void *malloc(size_t)" 573 MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy); 574 PointerType *AllocPtrType = PointerType::getUnqual(AllocTy); 575 CallInst *MCall = nullptr; 576 Instruction *Result = nullptr; 577 if (InsertBefore) { 578 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall", 579 InsertBefore); 580 Result = MCall; 581 if (Result->getType() != AllocPtrType) 582 // Create a cast instruction to convert to the right type... 583 Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore); 584 } else { 585 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall"); 586 Result = MCall; 587 if (Result->getType() != AllocPtrType) { 588 InsertAtEnd->getInstList().push_back(MCall); 589 // Create a cast instruction to convert to the right type... 590 Result = new BitCastInst(MCall, AllocPtrType, Name); 591 } 592 } 593 MCall->setTailCall(); 594 if (Function *F = dyn_cast<Function>(MallocFunc.getCallee())) { 595 MCall->setCallingConv(F->getCallingConv()); 596 if (!F->returnDoesNotAlias()) 597 F->setReturnDoesNotAlias(); 598 } 599 assert(!MCall->getType()->isVoidTy() && "Malloc has void return type"); 600 601 return Result; 602 } 603 604 /// CreateMalloc - Generate the IR for a call to malloc: 605 /// 1. Compute the malloc call's argument as the specified type's size, 606 /// possibly multiplied by the array size if the array size is not 607 /// constant 1. 608 /// 2. Call malloc with that argument. 609 /// 3. Bitcast the result of the malloc call to the specified type. 610 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore, 611 Type *IntPtrTy, Type *AllocTy, 612 Value *AllocSize, Value *ArraySize, 613 Function *MallocF, 614 const Twine &Name) { 615 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize, 616 ArraySize, None, MallocF, Name); 617 } 618 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore, 619 Type *IntPtrTy, Type *AllocTy, 620 Value *AllocSize, Value *ArraySize, 621 ArrayRef<OperandBundleDef> OpB, 622 Function *MallocF, 623 const Twine &Name) { 624 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize, 625 ArraySize, OpB, MallocF, Name); 626 } 627 628 /// CreateMalloc - Generate the IR for a call to malloc: 629 /// 1. Compute the malloc call's argument as the specified type's size, 630 /// possibly multiplied by the array size if the array size is not 631 /// constant 1. 632 /// 2. Call malloc with that argument. 633 /// 3. Bitcast the result of the malloc call to the specified type. 634 /// Note: This function does not add the bitcast to the basic block, that is the 635 /// responsibility of the caller. 636 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd, 637 Type *IntPtrTy, Type *AllocTy, 638 Value *AllocSize, Value *ArraySize, 639 Function *MallocF, const Twine &Name) { 640 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize, 641 ArraySize, None, MallocF, Name); 642 } 643 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd, 644 Type *IntPtrTy, Type *AllocTy, 645 Value *AllocSize, Value *ArraySize, 646 ArrayRef<OperandBundleDef> OpB, 647 Function *MallocF, const Twine &Name) { 648 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize, 649 ArraySize, OpB, MallocF, Name); 650 } 651 652 static Instruction *createFree(Value *Source, 653 ArrayRef<OperandBundleDef> Bundles, 654 Instruction *InsertBefore, 655 BasicBlock *InsertAtEnd) { 656 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) && 657 "createFree needs either InsertBefore or InsertAtEnd"); 658 assert(Source->getType()->isPointerTy() && 659 "Can not free something of nonpointer type!"); 660 661 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd; 662 Module *M = BB->getParent()->getParent(); 663 664 Type *VoidTy = Type::getVoidTy(M->getContext()); 665 Type *IntPtrTy = Type::getInt8PtrTy(M->getContext()); 666 // prototype free as "void free(void*)" 667 FunctionCallee FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy); 668 CallInst *Result = nullptr; 669 Value *PtrCast = Source; 670 if (InsertBefore) { 671 if (Source->getType() != IntPtrTy) 672 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore); 673 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "", InsertBefore); 674 } else { 675 if (Source->getType() != IntPtrTy) 676 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd); 677 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, ""); 678 } 679 Result->setTailCall(); 680 if (Function *F = dyn_cast<Function>(FreeFunc.getCallee())) 681 Result->setCallingConv(F->getCallingConv()); 682 683 return Result; 684 } 685 686 /// CreateFree - Generate the IR for a call to the builtin free function. 687 Instruction *CallInst::CreateFree(Value *Source, Instruction *InsertBefore) { 688 return createFree(Source, None, InsertBefore, nullptr); 689 } 690 Instruction *CallInst::CreateFree(Value *Source, 691 ArrayRef<OperandBundleDef> Bundles, 692 Instruction *InsertBefore) { 693 return createFree(Source, Bundles, InsertBefore, nullptr); 694 } 695 696 /// CreateFree - Generate the IR for a call to the builtin free function. 697 /// Note: This function does not add the call to the basic block, that is the 698 /// responsibility of the caller. 699 Instruction *CallInst::CreateFree(Value *Source, BasicBlock *InsertAtEnd) { 700 Instruction *FreeCall = createFree(Source, None, nullptr, InsertAtEnd); 701 assert(FreeCall && "CreateFree did not create a CallInst"); 702 return FreeCall; 703 } 704 Instruction *CallInst::CreateFree(Value *Source, 705 ArrayRef<OperandBundleDef> Bundles, 706 BasicBlock *InsertAtEnd) { 707 Instruction *FreeCall = createFree(Source, Bundles, nullptr, InsertAtEnd); 708 assert(FreeCall && "CreateFree did not create a CallInst"); 709 return FreeCall; 710 } 711 712 //===----------------------------------------------------------------------===// 713 // InvokeInst Implementation 714 //===----------------------------------------------------------------------===// 715 716 void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal, 717 BasicBlock *IfException, ArrayRef<Value *> Args, 718 ArrayRef<OperandBundleDef> Bundles, 719 const Twine &NameStr) { 720 this->FTy = FTy; 721 722 assert((int)getNumOperands() == 723 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) && 724 "NumOperands not set up?"); 725 setNormalDest(IfNormal); 726 setUnwindDest(IfException); 727 setCalledOperand(Fn); 728 729 #ifndef NDEBUG 730 assert(((Args.size() == FTy->getNumParams()) || 731 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 732 "Invoking a function with bad signature"); 733 734 for (unsigned i = 0, e = Args.size(); i != e; i++) 735 assert((i >= FTy->getNumParams() || 736 FTy->getParamType(i) == Args[i]->getType()) && 737 "Invoking a function with a bad signature!"); 738 #endif 739 740 llvm::copy(Args, op_begin()); 741 742 auto It = populateBundleOperandInfos(Bundles, Args.size()); 743 (void)It; 744 assert(It + 3 == op_end() && "Should add up!"); 745 746 setName(NameStr); 747 } 748 749 InvokeInst::InvokeInst(const InvokeInst &II) 750 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke, 751 OperandTraits<CallBase>::op_end(this) - II.getNumOperands(), 752 II.getNumOperands()) { 753 setCallingConv(II.getCallingConv()); 754 std::copy(II.op_begin(), II.op_end(), op_begin()); 755 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(), 756 bundle_op_info_begin()); 757 SubclassOptionalData = II.SubclassOptionalData; 758 } 759 760 InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB, 761 Instruction *InsertPt) { 762 std::vector<Value *> Args(II->arg_begin(), II->arg_end()); 763 764 auto *NewII = InvokeInst::Create(II->getFunctionType(), II->getCalledValue(), 765 II->getNormalDest(), II->getUnwindDest(), 766 Args, OpB, II->getName(), InsertPt); 767 NewII->setCallingConv(II->getCallingConv()); 768 NewII->SubclassOptionalData = II->SubclassOptionalData; 769 NewII->setAttributes(II->getAttributes()); 770 NewII->setDebugLoc(II->getDebugLoc()); 771 return NewII; 772 } 773 774 775 LandingPadInst *InvokeInst::getLandingPadInst() const { 776 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI()); 777 } 778 779 //===----------------------------------------------------------------------===// 780 // CallBrInst Implementation 781 //===----------------------------------------------------------------------===// 782 783 void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough, 784 ArrayRef<BasicBlock *> IndirectDests, 785 ArrayRef<Value *> Args, 786 ArrayRef<OperandBundleDef> Bundles, 787 const Twine &NameStr) { 788 this->FTy = FTy; 789 790 assert((int)getNumOperands() == 791 ComputeNumOperands(Args.size(), IndirectDests.size(), 792 CountBundleInputs(Bundles)) && 793 "NumOperands not set up?"); 794 NumIndirectDests = IndirectDests.size(); 795 setDefaultDest(Fallthrough); 796 for (unsigned i = 0; i != NumIndirectDests; ++i) 797 setIndirectDest(i, IndirectDests[i]); 798 setCalledOperand(Fn); 799 800 #ifndef NDEBUG 801 assert(((Args.size() == FTy->getNumParams()) || 802 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 803 "Calling a function with bad signature"); 804 805 for (unsigned i = 0, e = Args.size(); i != e; i++) 806 assert((i >= FTy->getNumParams() || 807 FTy->getParamType(i) == Args[i]->getType()) && 808 "Calling a function with a bad signature!"); 809 #endif 810 811 std::copy(Args.begin(), Args.end(), op_begin()); 812 813 auto It = populateBundleOperandInfos(Bundles, Args.size()); 814 (void)It; 815 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!"); 816 817 setName(NameStr); 818 } 819 820 void CallBrInst::updateArgBlockAddresses(unsigned i, BasicBlock *B) { 821 assert(getNumIndirectDests() > i && "IndirectDest # out of range for callbr"); 822 if (BasicBlock *OldBB = getIndirectDest(i)) { 823 BlockAddress *Old = BlockAddress::get(OldBB); 824 BlockAddress *New = BlockAddress::get(B); 825 for (unsigned ArgNo = 0, e = getNumArgOperands(); ArgNo != e; ++ArgNo) 826 if (dyn_cast<BlockAddress>(getArgOperand(ArgNo)) == Old) 827 setArgOperand(ArgNo, New); 828 } 829 } 830 831 CallBrInst::CallBrInst(const CallBrInst &CBI) 832 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr, 833 OperandTraits<CallBase>::op_end(this) - CBI.getNumOperands(), 834 CBI.getNumOperands()) { 835 setCallingConv(CBI.getCallingConv()); 836 std::copy(CBI.op_begin(), CBI.op_end(), op_begin()); 837 std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(), 838 bundle_op_info_begin()); 839 SubclassOptionalData = CBI.SubclassOptionalData; 840 NumIndirectDests = CBI.NumIndirectDests; 841 } 842 843 CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB, 844 Instruction *InsertPt) { 845 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end()); 846 847 auto *NewCBI = CallBrInst::Create(CBI->getFunctionType(), 848 CBI->getCalledValue(), 849 CBI->getDefaultDest(), 850 CBI->getIndirectDests(), 851 Args, OpB, CBI->getName(), InsertPt); 852 NewCBI->setCallingConv(CBI->getCallingConv()); 853 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData; 854 NewCBI->setAttributes(CBI->getAttributes()); 855 NewCBI->setDebugLoc(CBI->getDebugLoc()); 856 NewCBI->NumIndirectDests = CBI->NumIndirectDests; 857 return NewCBI; 858 } 859 860 //===----------------------------------------------------------------------===// 861 // ReturnInst Implementation 862 //===----------------------------------------------------------------------===// 863 864 ReturnInst::ReturnInst(const ReturnInst &RI) 865 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret, 866 OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(), 867 RI.getNumOperands()) { 868 if (RI.getNumOperands()) 869 Op<0>() = RI.Op<0>(); 870 SubclassOptionalData = RI.SubclassOptionalData; 871 } 872 873 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore) 874 : Instruction(Type::getVoidTy(C), Instruction::Ret, 875 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal, 876 InsertBefore) { 877 if (retVal) 878 Op<0>() = retVal; 879 } 880 881 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd) 882 : Instruction(Type::getVoidTy(C), Instruction::Ret, 883 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal, 884 InsertAtEnd) { 885 if (retVal) 886 Op<0>() = retVal; 887 } 888 889 ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd) 890 : Instruction(Type::getVoidTy(Context), Instruction::Ret, 891 OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {} 892 893 //===----------------------------------------------------------------------===// 894 // ResumeInst Implementation 895 //===----------------------------------------------------------------------===// 896 897 ResumeInst::ResumeInst(const ResumeInst &RI) 898 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume, 899 OperandTraits<ResumeInst>::op_begin(this), 1) { 900 Op<0>() = RI.Op<0>(); 901 } 902 903 ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore) 904 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume, 905 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) { 906 Op<0>() = Exn; 907 } 908 909 ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd) 910 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume, 911 OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) { 912 Op<0>() = Exn; 913 } 914 915 //===----------------------------------------------------------------------===// 916 // CleanupReturnInst Implementation 917 //===----------------------------------------------------------------------===// 918 919 CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI) 920 : Instruction(CRI.getType(), Instruction::CleanupRet, 921 OperandTraits<CleanupReturnInst>::op_end(this) - 922 CRI.getNumOperands(), 923 CRI.getNumOperands()) { 924 setInstructionSubclassData(CRI.getSubclassDataFromInstruction()); 925 Op<0>() = CRI.Op<0>(); 926 if (CRI.hasUnwindDest()) 927 Op<1>() = CRI.Op<1>(); 928 } 929 930 void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) { 931 if (UnwindBB) 932 setInstructionSubclassData(getSubclassDataFromInstruction() | 1); 933 934 Op<0>() = CleanupPad; 935 if (UnwindBB) 936 Op<1>() = UnwindBB; 937 } 938 939 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, 940 unsigned Values, Instruction *InsertBefore) 941 : Instruction(Type::getVoidTy(CleanupPad->getContext()), 942 Instruction::CleanupRet, 943 OperandTraits<CleanupReturnInst>::op_end(this) - Values, 944 Values, InsertBefore) { 945 init(CleanupPad, UnwindBB); 946 } 947 948 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, 949 unsigned Values, BasicBlock *InsertAtEnd) 950 : Instruction(Type::getVoidTy(CleanupPad->getContext()), 951 Instruction::CleanupRet, 952 OperandTraits<CleanupReturnInst>::op_end(this) - Values, 953 Values, InsertAtEnd) { 954 init(CleanupPad, UnwindBB); 955 } 956 957 //===----------------------------------------------------------------------===// 958 // CatchReturnInst Implementation 959 //===----------------------------------------------------------------------===// 960 void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) { 961 Op<0>() = CatchPad; 962 Op<1>() = BB; 963 } 964 965 CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI) 966 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet, 967 OperandTraits<CatchReturnInst>::op_begin(this), 2) { 968 Op<0>() = CRI.Op<0>(); 969 Op<1>() = CRI.Op<1>(); 970 } 971 972 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB, 973 Instruction *InsertBefore) 974 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet, 975 OperandTraits<CatchReturnInst>::op_begin(this), 2, 976 InsertBefore) { 977 init(CatchPad, BB); 978 } 979 980 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB, 981 BasicBlock *InsertAtEnd) 982 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet, 983 OperandTraits<CatchReturnInst>::op_begin(this), 2, 984 InsertAtEnd) { 985 init(CatchPad, BB); 986 } 987 988 //===----------------------------------------------------------------------===// 989 // CatchSwitchInst Implementation 990 //===----------------------------------------------------------------------===// 991 992 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 993 unsigned NumReservedValues, 994 const Twine &NameStr, 995 Instruction *InsertBefore) 996 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0, 997 InsertBefore) { 998 if (UnwindDest) 999 ++NumReservedValues; 1000 init(ParentPad, UnwindDest, NumReservedValues + 1); 1001 setName(NameStr); 1002 } 1003 1004 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 1005 unsigned NumReservedValues, 1006 const Twine &NameStr, BasicBlock *InsertAtEnd) 1007 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0, 1008 InsertAtEnd) { 1009 if (UnwindDest) 1010 ++NumReservedValues; 1011 init(ParentPad, UnwindDest, NumReservedValues + 1); 1012 setName(NameStr); 1013 } 1014 1015 CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI) 1016 : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr, 1017 CSI.getNumOperands()) { 1018 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands()); 1019 setNumHungOffUseOperands(ReservedSpace); 1020 Use *OL = getOperandList(); 1021 const Use *InOL = CSI.getOperandList(); 1022 for (unsigned I = 1, E = ReservedSpace; I != E; ++I) 1023 OL[I] = InOL[I]; 1024 } 1025 1026 void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest, 1027 unsigned NumReservedValues) { 1028 assert(ParentPad && NumReservedValues); 1029 1030 ReservedSpace = NumReservedValues; 1031 setNumHungOffUseOperands(UnwindDest ? 2 : 1); 1032 allocHungoffUses(ReservedSpace); 1033 1034 Op<0>() = ParentPad; 1035 if (UnwindDest) { 1036 setInstructionSubclassData(getSubclassDataFromInstruction() | 1); 1037 setUnwindDest(UnwindDest); 1038 } 1039 } 1040 1041 /// growOperands - grow operands - This grows the operand list in response to a 1042 /// push_back style of operation. This grows the number of ops by 2 times. 1043 void CatchSwitchInst::growOperands(unsigned Size) { 1044 unsigned NumOperands = getNumOperands(); 1045 assert(NumOperands >= 1); 1046 if (ReservedSpace >= NumOperands + Size) 1047 return; 1048 ReservedSpace = (NumOperands + Size / 2) * 2; 1049 growHungoffUses(ReservedSpace); 1050 } 1051 1052 void CatchSwitchInst::addHandler(BasicBlock *Handler) { 1053 unsigned OpNo = getNumOperands(); 1054 growOperands(1); 1055 assert(OpNo < ReservedSpace && "Growing didn't work!"); 1056 setNumHungOffUseOperands(getNumOperands() + 1); 1057 getOperandList()[OpNo] = Handler; 1058 } 1059 1060 void CatchSwitchInst::removeHandler(handler_iterator HI) { 1061 // Move all subsequent handlers up one. 1062 Use *EndDst = op_end() - 1; 1063 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst) 1064 *CurDst = *(CurDst + 1); 1065 // Null out the last handler use. 1066 *EndDst = nullptr; 1067 1068 setNumHungOffUseOperands(getNumOperands() - 1); 1069 } 1070 1071 //===----------------------------------------------------------------------===// 1072 // FuncletPadInst Implementation 1073 //===----------------------------------------------------------------------===// 1074 void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args, 1075 const Twine &NameStr) { 1076 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?"); 1077 llvm::copy(Args, op_begin()); 1078 setParentPad(ParentPad); 1079 setName(NameStr); 1080 } 1081 1082 FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI) 1083 : Instruction(FPI.getType(), FPI.getOpcode(), 1084 OperandTraits<FuncletPadInst>::op_end(this) - 1085 FPI.getNumOperands(), 1086 FPI.getNumOperands()) { 1087 std::copy(FPI.op_begin(), FPI.op_end(), op_begin()); 1088 setParentPad(FPI.getParentPad()); 1089 } 1090 1091 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad, 1092 ArrayRef<Value *> Args, unsigned Values, 1093 const Twine &NameStr, Instruction *InsertBefore) 1094 : Instruction(ParentPad->getType(), Op, 1095 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values, 1096 InsertBefore) { 1097 init(ParentPad, Args, NameStr); 1098 } 1099 1100 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad, 1101 ArrayRef<Value *> Args, unsigned Values, 1102 const Twine &NameStr, BasicBlock *InsertAtEnd) 1103 : Instruction(ParentPad->getType(), Op, 1104 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values, 1105 InsertAtEnd) { 1106 init(ParentPad, Args, NameStr); 1107 } 1108 1109 //===----------------------------------------------------------------------===// 1110 // UnreachableInst Implementation 1111 //===----------------------------------------------------------------------===// 1112 1113 UnreachableInst::UnreachableInst(LLVMContext &Context, 1114 Instruction *InsertBefore) 1115 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr, 1116 0, InsertBefore) {} 1117 UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd) 1118 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr, 1119 0, InsertAtEnd) {} 1120 1121 //===----------------------------------------------------------------------===// 1122 // BranchInst Implementation 1123 //===----------------------------------------------------------------------===// 1124 1125 void BranchInst::AssertOK() { 1126 if (isConditional()) 1127 assert(getCondition()->getType()->isIntegerTy(1) && 1128 "May only branch on boolean predicates!"); 1129 } 1130 1131 BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore) 1132 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 1133 OperandTraits<BranchInst>::op_end(this) - 1, 1, 1134 InsertBefore) { 1135 assert(IfTrue && "Branch destination may not be null!"); 1136 Op<-1>() = IfTrue; 1137 } 1138 1139 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 1140 Instruction *InsertBefore) 1141 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 1142 OperandTraits<BranchInst>::op_end(this) - 3, 3, 1143 InsertBefore) { 1144 Op<-1>() = IfTrue; 1145 Op<-2>() = IfFalse; 1146 Op<-3>() = Cond; 1147 #ifndef NDEBUG 1148 AssertOK(); 1149 #endif 1150 } 1151 1152 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) 1153 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 1154 OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) { 1155 assert(IfTrue && "Branch destination may not be null!"); 1156 Op<-1>() = IfTrue; 1157 } 1158 1159 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 1160 BasicBlock *InsertAtEnd) 1161 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 1162 OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) { 1163 Op<-1>() = IfTrue; 1164 Op<-2>() = IfFalse; 1165 Op<-3>() = Cond; 1166 #ifndef NDEBUG 1167 AssertOK(); 1168 #endif 1169 } 1170 1171 BranchInst::BranchInst(const BranchInst &BI) 1172 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br, 1173 OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(), 1174 BI.getNumOperands()) { 1175 Op<-1>() = BI.Op<-1>(); 1176 if (BI.getNumOperands() != 1) { 1177 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!"); 1178 Op<-3>() = BI.Op<-3>(); 1179 Op<-2>() = BI.Op<-2>(); 1180 } 1181 SubclassOptionalData = BI.SubclassOptionalData; 1182 } 1183 1184 void BranchInst::swapSuccessors() { 1185 assert(isConditional() && 1186 "Cannot swap successors of an unconditional branch"); 1187 Op<-1>().swap(Op<-2>()); 1188 1189 // Update profile metadata if present and it matches our structural 1190 // expectations. 1191 swapProfMetadata(); 1192 } 1193 1194 //===----------------------------------------------------------------------===// 1195 // AllocaInst Implementation 1196 //===----------------------------------------------------------------------===// 1197 1198 static Value *getAISize(LLVMContext &Context, Value *Amt) { 1199 if (!Amt) 1200 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1); 1201 else { 1202 assert(!isa<BasicBlock>(Amt) && 1203 "Passed basic block into allocation size parameter! Use other ctor"); 1204 assert(Amt->getType()->isIntegerTy() && 1205 "Allocation array size is not an integer!"); 1206 } 1207 return Amt; 1208 } 1209 1210 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 1211 Instruction *InsertBefore) 1212 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {} 1213 1214 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 1215 BasicBlock *InsertAtEnd) 1216 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {} 1217 1218 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1219 const Twine &Name, Instruction *InsertBefore) 1220 : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/None, Name, InsertBefore) { 1221 } 1222 1223 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1224 const Twine &Name, BasicBlock *InsertAtEnd) 1225 : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/None, Name, InsertAtEnd) {} 1226 1227 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1228 MaybeAlign Align, const Twine &Name, 1229 Instruction *InsertBefore) 1230 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca, 1231 getAISize(Ty->getContext(), ArraySize), InsertBefore), 1232 AllocatedType(Ty) { 1233 setAlignment(MaybeAlign(Align)); 1234 assert(!Ty->isVoidTy() && "Cannot allocate void!"); 1235 setName(Name); 1236 } 1237 1238 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1239 MaybeAlign Align, const Twine &Name, 1240 BasicBlock *InsertAtEnd) 1241 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca, 1242 getAISize(Ty->getContext(), ArraySize), InsertAtEnd), 1243 AllocatedType(Ty) { 1244 setAlignment(Align); 1245 assert(!Ty->isVoidTy() && "Cannot allocate void!"); 1246 setName(Name); 1247 } 1248 1249 void AllocaInst::setAlignment(MaybeAlign Align) { 1250 assert((!Align || *Align <= MaximumAlignment) && 1251 "Alignment is greater than MaximumAlignment!"); 1252 setInstructionSubclassData((getSubclassDataFromInstruction() & ~31) | 1253 encode(Align)); 1254 if (Align) 1255 assert(getAlignment() == Align->value() && 1256 "Alignment representation error!"); 1257 else 1258 assert(getAlignment() == 0 && "Alignment representation error!"); 1259 } 1260 1261 bool AllocaInst::isArrayAllocation() const { 1262 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0))) 1263 return !CI->isOne(); 1264 return true; 1265 } 1266 1267 /// isStaticAlloca - Return true if this alloca is in the entry block of the 1268 /// function and is a constant size. If so, the code generator will fold it 1269 /// into the prolog/epilog code, so it is basically free. 1270 bool AllocaInst::isStaticAlloca() const { 1271 // Must be constant size. 1272 if (!isa<ConstantInt>(getArraySize())) return false; 1273 1274 // Must be in the entry block. 1275 const BasicBlock *Parent = getParent(); 1276 return Parent == &Parent->getParent()->front() && !isUsedWithInAlloca(); 1277 } 1278 1279 //===----------------------------------------------------------------------===// 1280 // LoadInst Implementation 1281 //===----------------------------------------------------------------------===// 1282 1283 void LoadInst::AssertOK() { 1284 assert(getOperand(0)->getType()->isPointerTy() && 1285 "Ptr must have pointer type."); 1286 assert(!(isAtomic() && getAlignment() == 0) && 1287 "Alignment required for atomic load"); 1288 } 1289 1290 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, 1291 Instruction *InsertBef) 1292 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {} 1293 1294 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, 1295 BasicBlock *InsertAE) 1296 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertAE) {} 1297 1298 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1299 Instruction *InsertBef) 1300 : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/None, InsertBef) {} 1301 1302 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1303 BasicBlock *InsertAE) 1304 : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/None, InsertAE) {} 1305 1306 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1307 MaybeAlign Align, Instruction *InsertBef) 1308 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, 1309 SyncScope::System, InsertBef) {} 1310 1311 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1312 MaybeAlign Align, BasicBlock *InsertAE) 1313 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, 1314 SyncScope::System, InsertAE) {} 1315 1316 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1317 MaybeAlign Align, AtomicOrdering Order, SyncScope::ID SSID, 1318 Instruction *InsertBef) 1319 : UnaryInstruction(Ty, Load, Ptr, InsertBef) { 1320 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType()); 1321 setVolatile(isVolatile); 1322 setAlignment(MaybeAlign(Align)); 1323 setAtomic(Order, SSID); 1324 AssertOK(); 1325 setName(Name); 1326 } 1327 1328 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1329 MaybeAlign Align, AtomicOrdering Order, SyncScope::ID SSID, 1330 BasicBlock *InsertAE) 1331 : UnaryInstruction(Ty, Load, Ptr, InsertAE) { 1332 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType()); 1333 setVolatile(isVolatile); 1334 setAlignment(Align); 1335 setAtomic(Order, SSID); 1336 AssertOK(); 1337 setName(Name); 1338 } 1339 1340 void LoadInst::setAlignment(MaybeAlign Align) { 1341 assert((!Align || *Align <= MaximumAlignment) && 1342 "Alignment is greater than MaximumAlignment!"); 1343 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | 1344 (encode(Align) << 1)); 1345 if (Align) 1346 assert(getAlignment() == Align->value() && 1347 "Alignment representation error!"); 1348 else 1349 assert(getAlignment() == 0 && "Alignment representation error!"); 1350 } 1351 1352 //===----------------------------------------------------------------------===// 1353 // StoreInst Implementation 1354 //===----------------------------------------------------------------------===// 1355 1356 void StoreInst::AssertOK() { 1357 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!"); 1358 assert(getOperand(1)->getType()->isPointerTy() && 1359 "Ptr must have pointer type!"); 1360 assert(getOperand(0)->getType() == 1361 cast<PointerType>(getOperand(1)->getType())->getElementType() 1362 && "Ptr must be a pointer to Val type!"); 1363 assert(!(isAtomic() && getAlignment() == 0) && 1364 "Alignment required for atomic store"); 1365 } 1366 1367 StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore) 1368 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {} 1369 1370 StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd) 1371 : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {} 1372 1373 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1374 Instruction *InsertBefore) 1375 : StoreInst(val, addr, isVolatile, /*Align=*/None, InsertBefore) {} 1376 1377 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1378 BasicBlock *InsertAtEnd) 1379 : StoreInst(val, addr, isVolatile, /*Align=*/None, InsertAtEnd) {} 1380 1381 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align, 1382 Instruction *InsertBefore) 1383 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, 1384 SyncScope::System, InsertBefore) {} 1385 1386 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align, 1387 BasicBlock *InsertAtEnd) 1388 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, 1389 SyncScope::System, InsertAtEnd) {} 1390 1391 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align, 1392 AtomicOrdering Order, SyncScope::ID SSID, 1393 Instruction *InsertBefore) 1394 : Instruction(Type::getVoidTy(val->getContext()), Store, 1395 OperandTraits<StoreInst>::op_begin(this), 1396 OperandTraits<StoreInst>::operands(this), InsertBefore) { 1397 Op<0>() = val; 1398 Op<1>() = addr; 1399 setVolatile(isVolatile); 1400 setAlignment(Align); 1401 setAtomic(Order, SSID); 1402 AssertOK(); 1403 } 1404 1405 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align, 1406 AtomicOrdering Order, SyncScope::ID SSID, 1407 BasicBlock *InsertAtEnd) 1408 : Instruction(Type::getVoidTy(val->getContext()), Store, 1409 OperandTraits<StoreInst>::op_begin(this), 1410 OperandTraits<StoreInst>::operands(this), InsertAtEnd) { 1411 Op<0>() = val; 1412 Op<1>() = addr; 1413 setVolatile(isVolatile); 1414 setAlignment(Align); 1415 setAtomic(Order, SSID); 1416 AssertOK(); 1417 } 1418 1419 void StoreInst::setAlignment(MaybeAlign Align) { 1420 assert((!Align || *Align <= MaximumAlignment) && 1421 "Alignment is greater than MaximumAlignment!"); 1422 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | 1423 (encode(Align) << 1)); 1424 if (Align) 1425 assert(getAlignment() == Align->value() && 1426 "Alignment representation error!"); 1427 else 1428 assert(getAlignment() == 0 && "Alignment representation error!"); 1429 } 1430 1431 //===----------------------------------------------------------------------===// 1432 // AtomicCmpXchgInst Implementation 1433 //===----------------------------------------------------------------------===// 1434 1435 void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal, 1436 AtomicOrdering SuccessOrdering, 1437 AtomicOrdering FailureOrdering, 1438 SyncScope::ID SSID) { 1439 Op<0>() = Ptr; 1440 Op<1>() = Cmp; 1441 Op<2>() = NewVal; 1442 setSuccessOrdering(SuccessOrdering); 1443 setFailureOrdering(FailureOrdering); 1444 setSyncScopeID(SSID); 1445 1446 assert(getOperand(0) && getOperand(1) && getOperand(2) && 1447 "All operands must be non-null!"); 1448 assert(getOperand(0)->getType()->isPointerTy() && 1449 "Ptr must have pointer type!"); 1450 assert(getOperand(1)->getType() == 1451 cast<PointerType>(getOperand(0)->getType())->getElementType() 1452 && "Ptr must be a pointer to Cmp type!"); 1453 assert(getOperand(2)->getType() == 1454 cast<PointerType>(getOperand(0)->getType())->getElementType() 1455 && "Ptr must be a pointer to NewVal type!"); 1456 assert(SuccessOrdering != AtomicOrdering::NotAtomic && 1457 "AtomicCmpXchg instructions must be atomic!"); 1458 assert(FailureOrdering != AtomicOrdering::NotAtomic && 1459 "AtomicCmpXchg instructions must be atomic!"); 1460 assert(!isStrongerThan(FailureOrdering, SuccessOrdering) && 1461 "AtomicCmpXchg failure argument shall be no stronger than the success " 1462 "argument"); 1463 assert(FailureOrdering != AtomicOrdering::Release && 1464 FailureOrdering != AtomicOrdering::AcquireRelease && 1465 "AtomicCmpXchg failure ordering cannot include release semantics"); 1466 } 1467 1468 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, 1469 AtomicOrdering SuccessOrdering, 1470 AtomicOrdering FailureOrdering, 1471 SyncScope::ID SSID, 1472 Instruction *InsertBefore) 1473 : Instruction( 1474 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())), 1475 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this), 1476 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) { 1477 Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID); 1478 } 1479 1480 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, 1481 AtomicOrdering SuccessOrdering, 1482 AtomicOrdering FailureOrdering, 1483 SyncScope::ID SSID, 1484 BasicBlock *InsertAtEnd) 1485 : Instruction( 1486 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())), 1487 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this), 1488 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) { 1489 Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID); 1490 } 1491 1492 //===----------------------------------------------------------------------===// 1493 // AtomicRMWInst Implementation 1494 //===----------------------------------------------------------------------===// 1495 1496 void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val, 1497 AtomicOrdering Ordering, 1498 SyncScope::ID SSID) { 1499 Op<0>() = Ptr; 1500 Op<1>() = Val; 1501 setOperation(Operation); 1502 setOrdering(Ordering); 1503 setSyncScopeID(SSID); 1504 1505 assert(getOperand(0) && getOperand(1) && 1506 "All operands must be non-null!"); 1507 assert(getOperand(0)->getType()->isPointerTy() && 1508 "Ptr must have pointer type!"); 1509 assert(getOperand(1)->getType() == 1510 cast<PointerType>(getOperand(0)->getType())->getElementType() 1511 && "Ptr must be a pointer to Val type!"); 1512 assert(Ordering != AtomicOrdering::NotAtomic && 1513 "AtomicRMW instructions must be atomic!"); 1514 } 1515 1516 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, 1517 AtomicOrdering Ordering, 1518 SyncScope::ID SSID, 1519 Instruction *InsertBefore) 1520 : Instruction(Val->getType(), AtomicRMW, 1521 OperandTraits<AtomicRMWInst>::op_begin(this), 1522 OperandTraits<AtomicRMWInst>::operands(this), 1523 InsertBefore) { 1524 Init(Operation, Ptr, Val, Ordering, SSID); 1525 } 1526 1527 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, 1528 AtomicOrdering Ordering, 1529 SyncScope::ID SSID, 1530 BasicBlock *InsertAtEnd) 1531 : Instruction(Val->getType(), AtomicRMW, 1532 OperandTraits<AtomicRMWInst>::op_begin(this), 1533 OperandTraits<AtomicRMWInst>::operands(this), 1534 InsertAtEnd) { 1535 Init(Operation, Ptr, Val, Ordering, SSID); 1536 } 1537 1538 StringRef AtomicRMWInst::getOperationName(BinOp Op) { 1539 switch (Op) { 1540 case AtomicRMWInst::Xchg: 1541 return "xchg"; 1542 case AtomicRMWInst::Add: 1543 return "add"; 1544 case AtomicRMWInst::Sub: 1545 return "sub"; 1546 case AtomicRMWInst::And: 1547 return "and"; 1548 case AtomicRMWInst::Nand: 1549 return "nand"; 1550 case AtomicRMWInst::Or: 1551 return "or"; 1552 case AtomicRMWInst::Xor: 1553 return "xor"; 1554 case AtomicRMWInst::Max: 1555 return "max"; 1556 case AtomicRMWInst::Min: 1557 return "min"; 1558 case AtomicRMWInst::UMax: 1559 return "umax"; 1560 case AtomicRMWInst::UMin: 1561 return "umin"; 1562 case AtomicRMWInst::FAdd: 1563 return "fadd"; 1564 case AtomicRMWInst::FSub: 1565 return "fsub"; 1566 case AtomicRMWInst::BAD_BINOP: 1567 return "<invalid operation>"; 1568 } 1569 1570 llvm_unreachable("invalid atomicrmw operation"); 1571 } 1572 1573 //===----------------------------------------------------------------------===// 1574 // FenceInst Implementation 1575 //===----------------------------------------------------------------------===// 1576 1577 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, 1578 SyncScope::ID SSID, 1579 Instruction *InsertBefore) 1580 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) { 1581 setOrdering(Ordering); 1582 setSyncScopeID(SSID); 1583 } 1584 1585 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, 1586 SyncScope::ID SSID, 1587 BasicBlock *InsertAtEnd) 1588 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) { 1589 setOrdering(Ordering); 1590 setSyncScopeID(SSID); 1591 } 1592 1593 //===----------------------------------------------------------------------===// 1594 // GetElementPtrInst Implementation 1595 //===----------------------------------------------------------------------===// 1596 1597 void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList, 1598 const Twine &Name) { 1599 assert(getNumOperands() == 1 + IdxList.size() && 1600 "NumOperands not initialized?"); 1601 Op<0>() = Ptr; 1602 llvm::copy(IdxList, op_begin() + 1); 1603 setName(Name); 1604 } 1605 1606 GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI) 1607 : Instruction(GEPI.getType(), GetElementPtr, 1608 OperandTraits<GetElementPtrInst>::op_end(this) - 1609 GEPI.getNumOperands(), 1610 GEPI.getNumOperands()), 1611 SourceElementType(GEPI.SourceElementType), 1612 ResultElementType(GEPI.ResultElementType) { 1613 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin()); 1614 SubclassOptionalData = GEPI.SubclassOptionalData; 1615 } 1616 1617 /// getIndexedType - Returns the type of the element that would be accessed with 1618 /// a gep instruction with the specified parameters. 1619 /// 1620 /// The Idxs pointer should point to a continuous piece of memory containing the 1621 /// indices, either as Value* or uint64_t. 1622 /// 1623 /// A null type is returned if the indices are invalid for the specified 1624 /// pointer type. 1625 /// 1626 template <typename IndexTy> 1627 static Type *getIndexedTypeInternal(Type *Agg, ArrayRef<IndexTy> IdxList) { 1628 // Handle the special case of the empty set index set, which is always valid. 1629 if (IdxList.empty()) 1630 return Agg; 1631 1632 // If there is at least one index, the top level type must be sized, otherwise 1633 // it cannot be 'stepped over'. 1634 if (!Agg->isSized()) 1635 return nullptr; 1636 1637 unsigned CurIdx = 1; 1638 for (; CurIdx != IdxList.size(); ++CurIdx) { 1639 CompositeType *CT = dyn_cast<CompositeType>(Agg); 1640 if (!CT || CT->isPointerTy()) return nullptr; 1641 IndexTy Index = IdxList[CurIdx]; 1642 if (!CT->indexValid(Index)) return nullptr; 1643 Agg = CT->getTypeAtIndex(Index); 1644 } 1645 return CurIdx == IdxList.size() ? Agg : nullptr; 1646 } 1647 1648 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) { 1649 return getIndexedTypeInternal(Ty, IdxList); 1650 } 1651 1652 Type *GetElementPtrInst::getIndexedType(Type *Ty, 1653 ArrayRef<Constant *> IdxList) { 1654 return getIndexedTypeInternal(Ty, IdxList); 1655 } 1656 1657 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) { 1658 return getIndexedTypeInternal(Ty, IdxList); 1659 } 1660 1661 /// hasAllZeroIndices - Return true if all of the indices of this GEP are 1662 /// zeros. If so, the result pointer and the first operand have the same 1663 /// value, just potentially different types. 1664 bool GetElementPtrInst::hasAllZeroIndices() const { 1665 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1666 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) { 1667 if (!CI->isZero()) return false; 1668 } else { 1669 return false; 1670 } 1671 } 1672 return true; 1673 } 1674 1675 /// hasAllConstantIndices - Return true if all of the indices of this GEP are 1676 /// constant integers. If so, the result pointer and the first operand have 1677 /// a constant offset between them. 1678 bool GetElementPtrInst::hasAllConstantIndices() const { 1679 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1680 if (!isa<ConstantInt>(getOperand(i))) 1681 return false; 1682 } 1683 return true; 1684 } 1685 1686 void GetElementPtrInst::setIsInBounds(bool B) { 1687 cast<GEPOperator>(this)->setIsInBounds(B); 1688 } 1689 1690 bool GetElementPtrInst::isInBounds() const { 1691 return cast<GEPOperator>(this)->isInBounds(); 1692 } 1693 1694 bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL, 1695 APInt &Offset) const { 1696 // Delegate to the generic GEPOperator implementation. 1697 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset); 1698 } 1699 1700 //===----------------------------------------------------------------------===// 1701 // ExtractElementInst Implementation 1702 //===----------------------------------------------------------------------===// 1703 1704 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index, 1705 const Twine &Name, 1706 Instruction *InsertBef) 1707 : Instruction(cast<VectorType>(Val->getType())->getElementType(), 1708 ExtractElement, 1709 OperandTraits<ExtractElementInst>::op_begin(this), 1710 2, InsertBef) { 1711 assert(isValidOperands(Val, Index) && 1712 "Invalid extractelement instruction operands!"); 1713 Op<0>() = Val; 1714 Op<1>() = Index; 1715 setName(Name); 1716 } 1717 1718 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index, 1719 const Twine &Name, 1720 BasicBlock *InsertAE) 1721 : Instruction(cast<VectorType>(Val->getType())->getElementType(), 1722 ExtractElement, 1723 OperandTraits<ExtractElementInst>::op_begin(this), 1724 2, InsertAE) { 1725 assert(isValidOperands(Val, Index) && 1726 "Invalid extractelement instruction operands!"); 1727 1728 Op<0>() = Val; 1729 Op<1>() = Index; 1730 setName(Name); 1731 } 1732 1733 bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) { 1734 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy()) 1735 return false; 1736 return true; 1737 } 1738 1739 //===----------------------------------------------------------------------===// 1740 // InsertElementInst Implementation 1741 //===----------------------------------------------------------------------===// 1742 1743 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index, 1744 const Twine &Name, 1745 Instruction *InsertBef) 1746 : Instruction(Vec->getType(), InsertElement, 1747 OperandTraits<InsertElementInst>::op_begin(this), 1748 3, InsertBef) { 1749 assert(isValidOperands(Vec, Elt, Index) && 1750 "Invalid insertelement instruction operands!"); 1751 Op<0>() = Vec; 1752 Op<1>() = Elt; 1753 Op<2>() = Index; 1754 setName(Name); 1755 } 1756 1757 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index, 1758 const Twine &Name, 1759 BasicBlock *InsertAE) 1760 : Instruction(Vec->getType(), InsertElement, 1761 OperandTraits<InsertElementInst>::op_begin(this), 1762 3, InsertAE) { 1763 assert(isValidOperands(Vec, Elt, Index) && 1764 "Invalid insertelement instruction operands!"); 1765 1766 Op<0>() = Vec; 1767 Op<1>() = Elt; 1768 Op<2>() = Index; 1769 setName(Name); 1770 } 1771 1772 bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt, 1773 const Value *Index) { 1774 if (!Vec->getType()->isVectorTy()) 1775 return false; // First operand of insertelement must be vector type. 1776 1777 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType()) 1778 return false;// Second operand of insertelement must be vector element type. 1779 1780 if (!Index->getType()->isIntegerTy()) 1781 return false; // Third operand of insertelement must be i32. 1782 return true; 1783 } 1784 1785 //===----------------------------------------------------------------------===// 1786 // ShuffleVectorInst Implementation 1787 //===----------------------------------------------------------------------===// 1788 1789 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 1790 const Twine &Name, 1791 Instruction *InsertBefore) 1792 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(), 1793 cast<VectorType>(Mask->getType())->getElementCount()), 1794 ShuffleVector, 1795 OperandTraits<ShuffleVectorInst>::op_begin(this), 1796 OperandTraits<ShuffleVectorInst>::operands(this), 1797 InsertBefore) { 1798 assert(isValidOperands(V1, V2, Mask) && 1799 "Invalid shuffle vector instruction operands!"); 1800 Op<0>() = V1; 1801 Op<1>() = V2; 1802 Op<2>() = Mask; 1803 setName(Name); 1804 } 1805 1806 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 1807 const Twine &Name, 1808 BasicBlock *InsertAtEnd) 1809 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(), 1810 cast<VectorType>(Mask->getType())->getElementCount()), 1811 ShuffleVector, 1812 OperandTraits<ShuffleVectorInst>::op_begin(this), 1813 OperandTraits<ShuffleVectorInst>::operands(this), 1814 InsertAtEnd) { 1815 assert(isValidOperands(V1, V2, Mask) && 1816 "Invalid shuffle vector instruction operands!"); 1817 1818 Op<0>() = V1; 1819 Op<1>() = V2; 1820 Op<2>() = Mask; 1821 setName(Name); 1822 } 1823 1824 void ShuffleVectorInst::commute() { 1825 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 1826 int NumMaskElts = getMask()->getType()->getVectorNumElements(); 1827 SmallVector<Constant*, 16> NewMask(NumMaskElts); 1828 Type *Int32Ty = Type::getInt32Ty(getContext()); 1829 for (int i = 0; i != NumMaskElts; ++i) { 1830 int MaskElt = getMaskValue(i); 1831 if (MaskElt == -1) { 1832 NewMask[i] = UndefValue::get(Int32Ty); 1833 continue; 1834 } 1835 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask"); 1836 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts; 1837 NewMask[i] = ConstantInt::get(Int32Ty, MaskElt); 1838 } 1839 Op<2>() = ConstantVector::get(NewMask); 1840 Op<0>().swap(Op<1>()); 1841 } 1842 1843 bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2, 1844 const Value *Mask) { 1845 // V1 and V2 must be vectors of the same type. 1846 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType()) 1847 return false; 1848 1849 // Mask must be vector of i32. 1850 auto *MaskTy = dyn_cast<VectorType>(Mask->getType()); 1851 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32)) 1852 return false; 1853 1854 // Check to see if Mask is valid. 1855 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask)) 1856 return true; 1857 1858 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) { 1859 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); 1860 for (Value *Op : MV->operands()) { 1861 if (auto *CI = dyn_cast<ConstantInt>(Op)) { 1862 if (CI->uge(V1Size*2)) 1863 return false; 1864 } else if (!isa<UndefValue>(Op)) { 1865 return false; 1866 } 1867 } 1868 return true; 1869 } 1870 1871 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) { 1872 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); 1873 for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i) 1874 if (CDS->getElementAsInteger(i) >= V1Size*2) 1875 return false; 1876 return true; 1877 } 1878 1879 // The bitcode reader can create a place holder for a forward reference 1880 // used as the shuffle mask. When this occurs, the shuffle mask will 1881 // fall into this case and fail. To avoid this error, do this bit of 1882 // ugliness to allow such a mask pass. 1883 if (const auto *CE = dyn_cast<ConstantExpr>(Mask)) 1884 if (CE->getOpcode() == Instruction::UserOp1) 1885 return true; 1886 1887 return false; 1888 } 1889 1890 int ShuffleVectorInst::getMaskValue(const Constant *Mask, unsigned i) { 1891 assert(i < Mask->getType()->getVectorNumElements() && "Index out of range"); 1892 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) 1893 return CDS->getElementAsInteger(i); 1894 Constant *C = Mask->getAggregateElement(i); 1895 if (isa<UndefValue>(C)) 1896 return -1; 1897 return cast<ConstantInt>(C)->getZExtValue(); 1898 } 1899 1900 void ShuffleVectorInst::getShuffleMask(const Constant *Mask, 1901 SmallVectorImpl<int> &Result) { 1902 unsigned NumElts = Mask->getType()->getVectorNumElements(); 1903 1904 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) { 1905 for (unsigned i = 0; i != NumElts; ++i) 1906 Result.push_back(CDS->getElementAsInteger(i)); 1907 return; 1908 } 1909 for (unsigned i = 0; i != NumElts; ++i) { 1910 Constant *C = Mask->getAggregateElement(i); 1911 Result.push_back(isa<UndefValue>(C) ? -1 : 1912 cast<ConstantInt>(C)->getZExtValue()); 1913 } 1914 } 1915 1916 static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) { 1917 assert(!Mask.empty() && "Shuffle mask must contain elements"); 1918 bool UsesLHS = false; 1919 bool UsesRHS = false; 1920 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) { 1921 if (Mask[i] == -1) 1922 continue; 1923 assert(Mask[i] >= 0 && Mask[i] < (NumOpElts * 2) && 1924 "Out-of-bounds shuffle mask element"); 1925 UsesLHS |= (Mask[i] < NumOpElts); 1926 UsesRHS |= (Mask[i] >= NumOpElts); 1927 if (UsesLHS && UsesRHS) 1928 return false; 1929 } 1930 assert((UsesLHS ^ UsesRHS) && "Should have selected from exactly 1 source"); 1931 return true; 1932 } 1933 1934 bool ShuffleVectorInst::isSingleSourceMask(ArrayRef<int> Mask) { 1935 // We don't have vector operand size information, so assume operands are the 1936 // same size as the mask. 1937 return isSingleSourceMaskImpl(Mask, Mask.size()); 1938 } 1939 1940 static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) { 1941 if (!isSingleSourceMaskImpl(Mask, NumOpElts)) 1942 return false; 1943 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) { 1944 if (Mask[i] == -1) 1945 continue; 1946 if (Mask[i] != i && Mask[i] != (NumOpElts + i)) 1947 return false; 1948 } 1949 return true; 1950 } 1951 1952 bool ShuffleVectorInst::isIdentityMask(ArrayRef<int> Mask) { 1953 // We don't have vector operand size information, so assume operands are the 1954 // same size as the mask. 1955 return isIdentityMaskImpl(Mask, Mask.size()); 1956 } 1957 1958 bool ShuffleVectorInst::isReverseMask(ArrayRef<int> Mask) { 1959 if (!isSingleSourceMask(Mask)) 1960 return false; 1961 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 1962 if (Mask[i] == -1) 1963 continue; 1964 if (Mask[i] != (NumElts - 1 - i) && Mask[i] != (NumElts + NumElts - 1 - i)) 1965 return false; 1966 } 1967 return true; 1968 } 1969 1970 bool ShuffleVectorInst::isZeroEltSplatMask(ArrayRef<int> Mask) { 1971 if (!isSingleSourceMask(Mask)) 1972 return false; 1973 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 1974 if (Mask[i] == -1) 1975 continue; 1976 if (Mask[i] != 0 && Mask[i] != NumElts) 1977 return false; 1978 } 1979 return true; 1980 } 1981 1982 bool ShuffleVectorInst::isSelectMask(ArrayRef<int> Mask) { 1983 // Select is differentiated from identity. It requires using both sources. 1984 if (isSingleSourceMask(Mask)) 1985 return false; 1986 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 1987 if (Mask[i] == -1) 1988 continue; 1989 if (Mask[i] != i && Mask[i] != (NumElts + i)) 1990 return false; 1991 } 1992 return true; 1993 } 1994 1995 bool ShuffleVectorInst::isTransposeMask(ArrayRef<int> Mask) { 1996 // Example masks that will return true: 1997 // v1 = <a, b, c, d> 1998 // v2 = <e, f, g, h> 1999 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g> 2000 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h> 2001 2002 // 1. The number of elements in the mask must be a power-of-2 and at least 2. 2003 int NumElts = Mask.size(); 2004 if (NumElts < 2 || !isPowerOf2_32(NumElts)) 2005 return false; 2006 2007 // 2. The first element of the mask must be either a 0 or a 1. 2008 if (Mask[0] != 0 && Mask[0] != 1) 2009 return false; 2010 2011 // 3. The difference between the first 2 elements must be equal to the 2012 // number of elements in the mask. 2013 if ((Mask[1] - Mask[0]) != NumElts) 2014 return false; 2015 2016 // 4. The difference between consecutive even-numbered and odd-numbered 2017 // elements must be equal to 2. 2018 for (int i = 2; i < NumElts; ++i) { 2019 int MaskEltVal = Mask[i]; 2020 if (MaskEltVal == -1) 2021 return false; 2022 int MaskEltPrevVal = Mask[i - 2]; 2023 if (MaskEltVal - MaskEltPrevVal != 2) 2024 return false; 2025 } 2026 return true; 2027 } 2028 2029 bool ShuffleVectorInst::isExtractSubvectorMask(ArrayRef<int> Mask, 2030 int NumSrcElts, int &Index) { 2031 // Must extract from a single source. 2032 if (!isSingleSourceMaskImpl(Mask, NumSrcElts)) 2033 return false; 2034 2035 // Must be smaller (else this is an Identity shuffle). 2036 if (NumSrcElts <= (int)Mask.size()) 2037 return false; 2038 2039 // Find start of extraction, accounting that we may start with an UNDEF. 2040 int SubIndex = -1; 2041 for (int i = 0, e = Mask.size(); i != e; ++i) { 2042 int M = Mask[i]; 2043 if (M < 0) 2044 continue; 2045 int Offset = (M % NumSrcElts) - i; 2046 if (0 <= SubIndex && SubIndex != Offset) 2047 return false; 2048 SubIndex = Offset; 2049 } 2050 2051 if (0 <= SubIndex) { 2052 Index = SubIndex; 2053 return true; 2054 } 2055 return false; 2056 } 2057 2058 bool ShuffleVectorInst::isIdentityWithPadding() const { 2059 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 2060 int NumMaskElts = getType()->getVectorNumElements(); 2061 if (NumMaskElts <= NumOpElts) 2062 return false; 2063 2064 // The first part of the mask must choose elements from exactly 1 source op. 2065 SmallVector<int, 16> Mask = getShuffleMask(); 2066 if (!isIdentityMaskImpl(Mask, NumOpElts)) 2067 return false; 2068 2069 // All extending must be with undef elements. 2070 for (int i = NumOpElts; i < NumMaskElts; ++i) 2071 if (Mask[i] != -1) 2072 return false; 2073 2074 return true; 2075 } 2076 2077 bool ShuffleVectorInst::isIdentityWithExtract() const { 2078 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 2079 int NumMaskElts = getType()->getVectorNumElements(); 2080 if (NumMaskElts >= NumOpElts) 2081 return false; 2082 2083 return isIdentityMaskImpl(getShuffleMask(), NumOpElts); 2084 } 2085 2086 bool ShuffleVectorInst::isConcat() const { 2087 // Vector concatenation is differentiated from identity with padding. 2088 if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>())) 2089 return false; 2090 2091 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 2092 int NumMaskElts = getType()->getVectorNumElements(); 2093 if (NumMaskElts != NumOpElts * 2) 2094 return false; 2095 2096 // Use the mask length rather than the operands' vector lengths here. We 2097 // already know that the shuffle returns a vector twice as long as the inputs, 2098 // and neither of the inputs are undef vectors. If the mask picks consecutive 2099 // elements from both inputs, then this is a concatenation of the inputs. 2100 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts); 2101 } 2102 2103 //===----------------------------------------------------------------------===// 2104 // InsertValueInst Class 2105 //===----------------------------------------------------------------------===// 2106 2107 void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, 2108 const Twine &Name) { 2109 assert(getNumOperands() == 2 && "NumOperands not initialized?"); 2110 2111 // There's no fundamental reason why we require at least one index 2112 // (other than weirdness with &*IdxBegin being invalid; see 2113 // getelementptr's init routine for example). But there's no 2114 // present need to support it. 2115 assert(!Idxs.empty() && "InsertValueInst must have at least one index"); 2116 2117 assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) == 2118 Val->getType() && "Inserted value must match indexed type!"); 2119 Op<0>() = Agg; 2120 Op<1>() = Val; 2121 2122 Indices.append(Idxs.begin(), Idxs.end()); 2123 setName(Name); 2124 } 2125 2126 InsertValueInst::InsertValueInst(const InsertValueInst &IVI) 2127 : Instruction(IVI.getType(), InsertValue, 2128 OperandTraits<InsertValueInst>::op_begin(this), 2), 2129 Indices(IVI.Indices) { 2130 Op<0>() = IVI.getOperand(0); 2131 Op<1>() = IVI.getOperand(1); 2132 SubclassOptionalData = IVI.SubclassOptionalData; 2133 } 2134 2135 //===----------------------------------------------------------------------===// 2136 // ExtractValueInst Class 2137 //===----------------------------------------------------------------------===// 2138 2139 void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) { 2140 assert(getNumOperands() == 1 && "NumOperands not initialized?"); 2141 2142 // There's no fundamental reason why we require at least one index. 2143 // But there's no present need to support it. 2144 assert(!Idxs.empty() && "ExtractValueInst must have at least one index"); 2145 2146 Indices.append(Idxs.begin(), Idxs.end()); 2147 setName(Name); 2148 } 2149 2150 ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI) 2151 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)), 2152 Indices(EVI.Indices) { 2153 SubclassOptionalData = EVI.SubclassOptionalData; 2154 } 2155 2156 // getIndexedType - Returns the type of the element that would be extracted 2157 // with an extractvalue instruction with the specified parameters. 2158 // 2159 // A null type is returned if the indices are invalid for the specified 2160 // pointer type. 2161 // 2162 Type *ExtractValueInst::getIndexedType(Type *Agg, 2163 ArrayRef<unsigned> Idxs) { 2164 for (unsigned Index : Idxs) { 2165 // We can't use CompositeType::indexValid(Index) here. 2166 // indexValid() always returns true for arrays because getelementptr allows 2167 // out-of-bounds indices. Since we don't allow those for extractvalue and 2168 // insertvalue we need to check array indexing manually. 2169 // Since the only other types we can index into are struct types it's just 2170 // as easy to check those manually as well. 2171 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) { 2172 if (Index >= AT->getNumElements()) 2173 return nullptr; 2174 } else if (StructType *ST = dyn_cast<StructType>(Agg)) { 2175 if (Index >= ST->getNumElements()) 2176 return nullptr; 2177 } else { 2178 // Not a valid type to index into. 2179 return nullptr; 2180 } 2181 2182 Agg = cast<CompositeType>(Agg)->getTypeAtIndex(Index); 2183 } 2184 return const_cast<Type*>(Agg); 2185 } 2186 2187 //===----------------------------------------------------------------------===// 2188 // UnaryOperator Class 2189 //===----------------------------------------------------------------------===// 2190 2191 UnaryOperator::UnaryOperator(UnaryOps iType, Value *S, 2192 Type *Ty, const Twine &Name, 2193 Instruction *InsertBefore) 2194 : UnaryInstruction(Ty, iType, S, InsertBefore) { 2195 Op<0>() = S; 2196 setName(Name); 2197 AssertOK(); 2198 } 2199 2200 UnaryOperator::UnaryOperator(UnaryOps iType, Value *S, 2201 Type *Ty, const Twine &Name, 2202 BasicBlock *InsertAtEnd) 2203 : UnaryInstruction(Ty, iType, S, InsertAtEnd) { 2204 Op<0>() = S; 2205 setName(Name); 2206 AssertOK(); 2207 } 2208 2209 UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S, 2210 const Twine &Name, 2211 Instruction *InsertBefore) { 2212 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore); 2213 } 2214 2215 UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S, 2216 const Twine &Name, 2217 BasicBlock *InsertAtEnd) { 2218 UnaryOperator *Res = Create(Op, S, Name); 2219 InsertAtEnd->getInstList().push_back(Res); 2220 return Res; 2221 } 2222 2223 void UnaryOperator::AssertOK() { 2224 Value *LHS = getOperand(0); 2225 (void)LHS; // Silence warnings. 2226 #ifndef NDEBUG 2227 switch (getOpcode()) { 2228 case FNeg: 2229 assert(getType() == LHS->getType() && 2230 "Unary operation should return same type as operand!"); 2231 assert(getType()->isFPOrFPVectorTy() && 2232 "Tried to create a floating-point operation on a " 2233 "non-floating-point type!"); 2234 break; 2235 default: llvm_unreachable("Invalid opcode provided"); 2236 } 2237 #endif 2238 } 2239 2240 //===----------------------------------------------------------------------===// 2241 // BinaryOperator Class 2242 //===----------------------------------------------------------------------===// 2243 2244 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, 2245 Type *Ty, const Twine &Name, 2246 Instruction *InsertBefore) 2247 : Instruction(Ty, iType, 2248 OperandTraits<BinaryOperator>::op_begin(this), 2249 OperandTraits<BinaryOperator>::operands(this), 2250 InsertBefore) { 2251 Op<0>() = S1; 2252 Op<1>() = S2; 2253 setName(Name); 2254 AssertOK(); 2255 } 2256 2257 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, 2258 Type *Ty, const Twine &Name, 2259 BasicBlock *InsertAtEnd) 2260 : Instruction(Ty, iType, 2261 OperandTraits<BinaryOperator>::op_begin(this), 2262 OperandTraits<BinaryOperator>::operands(this), 2263 InsertAtEnd) { 2264 Op<0>() = S1; 2265 Op<1>() = S2; 2266 setName(Name); 2267 AssertOK(); 2268 } 2269 2270 void BinaryOperator::AssertOK() { 2271 Value *LHS = getOperand(0), *RHS = getOperand(1); 2272 (void)LHS; (void)RHS; // Silence warnings. 2273 assert(LHS->getType() == RHS->getType() && 2274 "Binary operator operand types must match!"); 2275 #ifndef NDEBUG 2276 switch (getOpcode()) { 2277 case Add: case Sub: 2278 case Mul: 2279 assert(getType() == LHS->getType() && 2280 "Arithmetic operation should return same type as operands!"); 2281 assert(getType()->isIntOrIntVectorTy() && 2282 "Tried to create an integer operation on a non-integer type!"); 2283 break; 2284 case FAdd: case FSub: 2285 case FMul: 2286 assert(getType() == LHS->getType() && 2287 "Arithmetic operation should return same type as operands!"); 2288 assert(getType()->isFPOrFPVectorTy() && 2289 "Tried to create a floating-point operation on a " 2290 "non-floating-point type!"); 2291 break; 2292 case UDiv: 2293 case SDiv: 2294 assert(getType() == LHS->getType() && 2295 "Arithmetic operation should return same type as operands!"); 2296 assert(getType()->isIntOrIntVectorTy() && 2297 "Incorrect operand type (not integer) for S/UDIV"); 2298 break; 2299 case FDiv: 2300 assert(getType() == LHS->getType() && 2301 "Arithmetic operation should return same type as operands!"); 2302 assert(getType()->isFPOrFPVectorTy() && 2303 "Incorrect operand type (not floating point) for FDIV"); 2304 break; 2305 case URem: 2306 case SRem: 2307 assert(getType() == LHS->getType() && 2308 "Arithmetic operation should return same type as operands!"); 2309 assert(getType()->isIntOrIntVectorTy() && 2310 "Incorrect operand type (not integer) for S/UREM"); 2311 break; 2312 case FRem: 2313 assert(getType() == LHS->getType() && 2314 "Arithmetic operation should return same type as operands!"); 2315 assert(getType()->isFPOrFPVectorTy() && 2316 "Incorrect operand type (not floating point) for FREM"); 2317 break; 2318 case Shl: 2319 case LShr: 2320 case AShr: 2321 assert(getType() == LHS->getType() && 2322 "Shift operation should return same type as operands!"); 2323 assert(getType()->isIntOrIntVectorTy() && 2324 "Tried to create a shift operation on a non-integral type!"); 2325 break; 2326 case And: case Or: 2327 case Xor: 2328 assert(getType() == LHS->getType() && 2329 "Logical operation should return same type as operands!"); 2330 assert(getType()->isIntOrIntVectorTy() && 2331 "Tried to create a logical operation on a non-integral type!"); 2332 break; 2333 default: llvm_unreachable("Invalid opcode provided"); 2334 } 2335 #endif 2336 } 2337 2338 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2, 2339 const Twine &Name, 2340 Instruction *InsertBefore) { 2341 assert(S1->getType() == S2->getType() && 2342 "Cannot create binary operator with two operands of differing type!"); 2343 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore); 2344 } 2345 2346 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2, 2347 const Twine &Name, 2348 BasicBlock *InsertAtEnd) { 2349 BinaryOperator *Res = Create(Op, S1, S2, Name); 2350 InsertAtEnd->getInstList().push_back(Res); 2351 return Res; 2352 } 2353 2354 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name, 2355 Instruction *InsertBefore) { 2356 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2357 return new BinaryOperator(Instruction::Sub, 2358 zero, Op, 2359 Op->getType(), Name, InsertBefore); 2360 } 2361 2362 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name, 2363 BasicBlock *InsertAtEnd) { 2364 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2365 return new BinaryOperator(Instruction::Sub, 2366 zero, Op, 2367 Op->getType(), Name, InsertAtEnd); 2368 } 2369 2370 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, 2371 Instruction *InsertBefore) { 2372 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2373 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertBefore); 2374 } 2375 2376 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, 2377 BasicBlock *InsertAtEnd) { 2378 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2379 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertAtEnd); 2380 } 2381 2382 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name, 2383 Instruction *InsertBefore) { 2384 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2385 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertBefore); 2386 } 2387 2388 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name, 2389 BasicBlock *InsertAtEnd) { 2390 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2391 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertAtEnd); 2392 } 2393 2394 BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name, 2395 Instruction *InsertBefore) { 2396 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2397 return new BinaryOperator(Instruction::FSub, zero, Op, 2398 Op->getType(), Name, InsertBefore); 2399 } 2400 2401 BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name, 2402 BasicBlock *InsertAtEnd) { 2403 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2404 return new BinaryOperator(Instruction::FSub, zero, Op, 2405 Op->getType(), Name, InsertAtEnd); 2406 } 2407 2408 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, 2409 Instruction *InsertBefore) { 2410 Constant *C = Constant::getAllOnesValue(Op->getType()); 2411 return new BinaryOperator(Instruction::Xor, Op, C, 2412 Op->getType(), Name, InsertBefore); 2413 } 2414 2415 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, 2416 BasicBlock *InsertAtEnd) { 2417 Constant *AllOnes = Constant::getAllOnesValue(Op->getType()); 2418 return new BinaryOperator(Instruction::Xor, Op, AllOnes, 2419 Op->getType(), Name, InsertAtEnd); 2420 } 2421 2422 // Exchange the two operands to this instruction. This instruction is safe to 2423 // use on any binary instruction and does not modify the semantics of the 2424 // instruction. If the instruction is order-dependent (SetLT f.e.), the opcode 2425 // is changed. 2426 bool BinaryOperator::swapOperands() { 2427 if (!isCommutative()) 2428 return true; // Can't commute operands 2429 Op<0>().swap(Op<1>()); 2430 return false; 2431 } 2432 2433 //===----------------------------------------------------------------------===// 2434 // FPMathOperator Class 2435 //===----------------------------------------------------------------------===// 2436 2437 float FPMathOperator::getFPAccuracy() const { 2438 const MDNode *MD = 2439 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath); 2440 if (!MD) 2441 return 0.0; 2442 ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0)); 2443 return Accuracy->getValueAPF().convertToFloat(); 2444 } 2445 2446 //===----------------------------------------------------------------------===// 2447 // CastInst Class 2448 //===----------------------------------------------------------------------===// 2449 2450 // Just determine if this cast only deals with integral->integral conversion. 2451 bool CastInst::isIntegerCast() const { 2452 switch (getOpcode()) { 2453 default: return false; 2454 case Instruction::ZExt: 2455 case Instruction::SExt: 2456 case Instruction::Trunc: 2457 return true; 2458 case Instruction::BitCast: 2459 return getOperand(0)->getType()->isIntegerTy() && 2460 getType()->isIntegerTy(); 2461 } 2462 } 2463 2464 bool CastInst::isLosslessCast() const { 2465 // Only BitCast can be lossless, exit fast if we're not BitCast 2466 if (getOpcode() != Instruction::BitCast) 2467 return false; 2468 2469 // Identity cast is always lossless 2470 Type *SrcTy = getOperand(0)->getType(); 2471 Type *DstTy = getType(); 2472 if (SrcTy == DstTy) 2473 return true; 2474 2475 // Pointer to pointer is always lossless. 2476 if (SrcTy->isPointerTy()) 2477 return DstTy->isPointerTy(); 2478 return false; // Other types have no identity values 2479 } 2480 2481 /// This function determines if the CastInst does not require any bits to be 2482 /// changed in order to effect the cast. Essentially, it identifies cases where 2483 /// no code gen is necessary for the cast, hence the name no-op cast. For 2484 /// example, the following are all no-op casts: 2485 /// # bitcast i32* %x to i8* 2486 /// # bitcast <2 x i32> %x to <4 x i16> 2487 /// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only 2488 /// Determine if the described cast is a no-op. 2489 bool CastInst::isNoopCast(Instruction::CastOps Opcode, 2490 Type *SrcTy, 2491 Type *DestTy, 2492 const DataLayout &DL) { 2493 switch (Opcode) { 2494 default: llvm_unreachable("Invalid CastOp"); 2495 case Instruction::Trunc: 2496 case Instruction::ZExt: 2497 case Instruction::SExt: 2498 case Instruction::FPTrunc: 2499 case Instruction::FPExt: 2500 case Instruction::UIToFP: 2501 case Instruction::SIToFP: 2502 case Instruction::FPToUI: 2503 case Instruction::FPToSI: 2504 case Instruction::AddrSpaceCast: 2505 // TODO: Target informations may give a more accurate answer here. 2506 return false; 2507 case Instruction::BitCast: 2508 return true; // BitCast never modifies bits. 2509 case Instruction::PtrToInt: 2510 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() == 2511 DestTy->getScalarSizeInBits(); 2512 case Instruction::IntToPtr: 2513 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() == 2514 SrcTy->getScalarSizeInBits(); 2515 } 2516 } 2517 2518 bool CastInst::isNoopCast(const DataLayout &DL) const { 2519 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL); 2520 } 2521 2522 /// This function determines if a pair of casts can be eliminated and what 2523 /// opcode should be used in the elimination. This assumes that there are two 2524 /// instructions like this: 2525 /// * %F = firstOpcode SrcTy %x to MidTy 2526 /// * %S = secondOpcode MidTy %F to DstTy 2527 /// The function returns a resultOpcode so these two casts can be replaced with: 2528 /// * %Replacement = resultOpcode %SrcTy %x to DstTy 2529 /// If no such cast is permitted, the function returns 0. 2530 unsigned CastInst::isEliminableCastPair( 2531 Instruction::CastOps firstOp, Instruction::CastOps secondOp, 2532 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, 2533 Type *DstIntPtrTy) { 2534 // Define the 144 possibilities for these two cast instructions. The values 2535 // in this matrix determine what to do in a given situation and select the 2536 // case in the switch below. The rows correspond to firstOp, the columns 2537 // correspond to secondOp. In looking at the table below, keep in mind 2538 // the following cast properties: 2539 // 2540 // Size Compare Source Destination 2541 // Operator Src ? Size Type Sign Type Sign 2542 // -------- ------------ ------------------- --------------------- 2543 // TRUNC > Integer Any Integral Any 2544 // ZEXT < Integral Unsigned Integer Any 2545 // SEXT < Integral Signed Integer Any 2546 // FPTOUI n/a FloatPt n/a Integral Unsigned 2547 // FPTOSI n/a FloatPt n/a Integral Signed 2548 // UITOFP n/a Integral Unsigned FloatPt n/a 2549 // SITOFP n/a Integral Signed FloatPt n/a 2550 // FPTRUNC > FloatPt n/a FloatPt n/a 2551 // FPEXT < FloatPt n/a FloatPt n/a 2552 // PTRTOINT n/a Pointer n/a Integral Unsigned 2553 // INTTOPTR n/a Integral Unsigned Pointer n/a 2554 // BITCAST = FirstClass n/a FirstClass n/a 2555 // ADDRSPCST n/a Pointer n/a Pointer n/a 2556 // 2557 // NOTE: some transforms are safe, but we consider them to be non-profitable. 2558 // For example, we could merge "fptoui double to i32" + "zext i32 to i64", 2559 // into "fptoui double to i64", but this loses information about the range 2560 // of the produced value (we no longer know the top-part is all zeros). 2561 // Further this conversion is often much more expensive for typical hardware, 2562 // and causes issues when building libgcc. We disallow fptosi+sext for the 2563 // same reason. 2564 const unsigned numCastOps = 2565 Instruction::CastOpsEnd - Instruction::CastOpsBegin; 2566 static const uint8_t CastResults[numCastOps][numCastOps] = { 2567 // T F F U S F F P I B A -+ 2568 // R Z S P P I I T P 2 N T S | 2569 // U E E 2 2 2 2 R E I T C C +- secondOp 2570 // N X X U S F F N X N 2 V V | 2571 // C T T I I P P C T T P T T -+ 2572 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+ 2573 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt | 2574 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt | 2575 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI | 2576 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI | 2577 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp 2578 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP | 2579 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc | 2580 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt | 2581 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt | 2582 { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr | 2583 { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast | 2584 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+ 2585 }; 2586 2587 // TODO: This logic could be encoded into the table above and handled in the 2588 // switch below. 2589 // If either of the casts are a bitcast from scalar to vector, disallow the 2590 // merging. However, any pair of bitcasts are allowed. 2591 bool IsFirstBitcast = (firstOp == Instruction::BitCast); 2592 bool IsSecondBitcast = (secondOp == Instruction::BitCast); 2593 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast; 2594 2595 // Check if any of the casts convert scalars <-> vectors. 2596 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) || 2597 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy))) 2598 if (!AreBothBitcasts) 2599 return 0; 2600 2601 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin] 2602 [secondOp-Instruction::CastOpsBegin]; 2603 switch (ElimCase) { 2604 case 0: 2605 // Categorically disallowed. 2606 return 0; 2607 case 1: 2608 // Allowed, use first cast's opcode. 2609 return firstOp; 2610 case 2: 2611 // Allowed, use second cast's opcode. 2612 return secondOp; 2613 case 3: 2614 // No-op cast in second op implies firstOp as long as the DestTy 2615 // is integer and we are not converting between a vector and a 2616 // non-vector type. 2617 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy()) 2618 return firstOp; 2619 return 0; 2620 case 4: 2621 // No-op cast in second op implies firstOp as long as the DestTy 2622 // is floating point. 2623 if (DstTy->isFloatingPointTy()) 2624 return firstOp; 2625 return 0; 2626 case 5: 2627 // No-op cast in first op implies secondOp as long as the SrcTy 2628 // is an integer. 2629 if (SrcTy->isIntegerTy()) 2630 return secondOp; 2631 return 0; 2632 case 6: 2633 // No-op cast in first op implies secondOp as long as the SrcTy 2634 // is a floating point. 2635 if (SrcTy->isFloatingPointTy()) 2636 return secondOp; 2637 return 0; 2638 case 7: { 2639 // Cannot simplify if address spaces are different! 2640 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) 2641 return 0; 2642 2643 unsigned MidSize = MidTy->getScalarSizeInBits(); 2644 // We can still fold this without knowing the actual sizes as long we 2645 // know that the intermediate pointer is the largest possible 2646 // pointer size. 2647 // FIXME: Is this always true? 2648 if (MidSize == 64) 2649 return Instruction::BitCast; 2650 2651 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size. 2652 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy) 2653 return 0; 2654 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits(); 2655 if (MidSize >= PtrSize) 2656 return Instruction::BitCast; 2657 return 0; 2658 } 2659 case 8: { 2660 // ext, trunc -> bitcast, if the SrcTy and DstTy are same size 2661 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy) 2662 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy) 2663 unsigned SrcSize = SrcTy->getScalarSizeInBits(); 2664 unsigned DstSize = DstTy->getScalarSizeInBits(); 2665 if (SrcSize == DstSize) 2666 return Instruction::BitCast; 2667 else if (SrcSize < DstSize) 2668 return firstOp; 2669 return secondOp; 2670 } 2671 case 9: 2672 // zext, sext -> zext, because sext can't sign extend after zext 2673 return Instruction::ZExt; 2674 case 11: { 2675 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize 2676 if (!MidIntPtrTy) 2677 return 0; 2678 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits(); 2679 unsigned SrcSize = SrcTy->getScalarSizeInBits(); 2680 unsigned DstSize = DstTy->getScalarSizeInBits(); 2681 if (SrcSize <= PtrSize && SrcSize == DstSize) 2682 return Instruction::BitCast; 2683 return 0; 2684 } 2685 case 12: 2686 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS 2687 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS 2688 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) 2689 return Instruction::AddrSpaceCast; 2690 return Instruction::BitCast; 2691 case 13: 2692 // FIXME: this state can be merged with (1), but the following assert 2693 // is useful to check the correcteness of the sequence due to semantic 2694 // change of bitcast. 2695 assert( 2696 SrcTy->isPtrOrPtrVectorTy() && 2697 MidTy->isPtrOrPtrVectorTy() && 2698 DstTy->isPtrOrPtrVectorTy() && 2699 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() && 2700 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() && 2701 "Illegal addrspacecast, bitcast sequence!"); 2702 // Allowed, use first cast's opcode 2703 return firstOp; 2704 case 14: 2705 // bitcast, addrspacecast -> addrspacecast if the element type of 2706 // bitcast's source is the same as that of addrspacecast's destination. 2707 if (SrcTy->getScalarType()->getPointerElementType() == 2708 DstTy->getScalarType()->getPointerElementType()) 2709 return Instruction::AddrSpaceCast; 2710 return 0; 2711 case 15: 2712 // FIXME: this state can be merged with (1), but the following assert 2713 // is useful to check the correcteness of the sequence due to semantic 2714 // change of bitcast. 2715 assert( 2716 SrcTy->isIntOrIntVectorTy() && 2717 MidTy->isPtrOrPtrVectorTy() && 2718 DstTy->isPtrOrPtrVectorTy() && 2719 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() && 2720 "Illegal inttoptr, bitcast sequence!"); 2721 // Allowed, use first cast's opcode 2722 return firstOp; 2723 case 16: 2724 // FIXME: this state can be merged with (2), but the following assert 2725 // is useful to check the correcteness of the sequence due to semantic 2726 // change of bitcast. 2727 assert( 2728 SrcTy->isPtrOrPtrVectorTy() && 2729 MidTy->isPtrOrPtrVectorTy() && 2730 DstTy->isIntOrIntVectorTy() && 2731 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() && 2732 "Illegal bitcast, ptrtoint sequence!"); 2733 // Allowed, use second cast's opcode 2734 return secondOp; 2735 case 17: 2736 // (sitofp (zext x)) -> (uitofp x) 2737 return Instruction::UIToFP; 2738 case 99: 2739 // Cast combination can't happen (error in input). This is for all cases 2740 // where the MidTy is not the same for the two cast instructions. 2741 llvm_unreachable("Invalid Cast Combination"); 2742 default: 2743 llvm_unreachable("Error in CastResults table!!!"); 2744 } 2745 } 2746 2747 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty, 2748 const Twine &Name, Instruction *InsertBefore) { 2749 assert(castIsValid(op, S, Ty) && "Invalid cast!"); 2750 // Construct and return the appropriate CastInst subclass 2751 switch (op) { 2752 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore); 2753 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore); 2754 case SExt: return new SExtInst (S, Ty, Name, InsertBefore); 2755 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore); 2756 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore); 2757 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore); 2758 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore); 2759 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore); 2760 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore); 2761 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore); 2762 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore); 2763 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore); 2764 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore); 2765 default: llvm_unreachable("Invalid opcode provided"); 2766 } 2767 } 2768 2769 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty, 2770 const Twine &Name, BasicBlock *InsertAtEnd) { 2771 assert(castIsValid(op, S, Ty) && "Invalid cast!"); 2772 // Construct and return the appropriate CastInst subclass 2773 switch (op) { 2774 case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd); 2775 case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd); 2776 case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd); 2777 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd); 2778 case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd); 2779 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd); 2780 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd); 2781 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd); 2782 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd); 2783 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd); 2784 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd); 2785 case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd); 2786 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd); 2787 default: llvm_unreachable("Invalid opcode provided"); 2788 } 2789 } 2790 2791 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, 2792 const Twine &Name, 2793 Instruction *InsertBefore) { 2794 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2795 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2796 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore); 2797 } 2798 2799 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, 2800 const Twine &Name, 2801 BasicBlock *InsertAtEnd) { 2802 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2803 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2804 return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd); 2805 } 2806 2807 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, 2808 const Twine &Name, 2809 Instruction *InsertBefore) { 2810 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2811 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2812 return Create(Instruction::SExt, S, Ty, Name, InsertBefore); 2813 } 2814 2815 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, 2816 const Twine &Name, 2817 BasicBlock *InsertAtEnd) { 2818 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2819 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2820 return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd); 2821 } 2822 2823 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, 2824 const Twine &Name, 2825 Instruction *InsertBefore) { 2826 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2827 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2828 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore); 2829 } 2830 2831 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, 2832 const Twine &Name, 2833 BasicBlock *InsertAtEnd) { 2834 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2835 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2836 return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd); 2837 } 2838 2839 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, 2840 const Twine &Name, 2841 BasicBlock *InsertAtEnd) { 2842 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2843 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) && 2844 "Invalid cast"); 2845 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); 2846 assert((!Ty->isVectorTy() || 2847 Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) && 2848 "Invalid cast"); 2849 2850 if (Ty->isIntOrIntVectorTy()) 2851 return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd); 2852 2853 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd); 2854 } 2855 2856 /// Create a BitCast or a PtrToInt cast instruction 2857 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, 2858 const Twine &Name, 2859 Instruction *InsertBefore) { 2860 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2861 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) && 2862 "Invalid cast"); 2863 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); 2864 assert((!Ty->isVectorTy() || 2865 Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) && 2866 "Invalid cast"); 2867 2868 if (Ty->isIntOrIntVectorTy()) 2869 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore); 2870 2871 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore); 2872 } 2873 2874 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast( 2875 Value *S, Type *Ty, 2876 const Twine &Name, 2877 BasicBlock *InsertAtEnd) { 2878 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2879 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast"); 2880 2881 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace()) 2882 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd); 2883 2884 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2885 } 2886 2887 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast( 2888 Value *S, Type *Ty, 2889 const Twine &Name, 2890 Instruction *InsertBefore) { 2891 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2892 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast"); 2893 2894 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace()) 2895 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore); 2896 2897 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2898 } 2899 2900 CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty, 2901 const Twine &Name, 2902 Instruction *InsertBefore) { 2903 if (S->getType()->isPointerTy() && Ty->isIntegerTy()) 2904 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore); 2905 if (S->getType()->isIntegerTy() && Ty->isPointerTy()) 2906 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore); 2907 2908 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2909 } 2910 2911 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, 2912 bool isSigned, const Twine &Name, 2913 Instruction *InsertBefore) { 2914 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() && 2915 "Invalid integer cast"); 2916 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2917 unsigned DstBits = Ty->getScalarSizeInBits(); 2918 Instruction::CastOps opcode = 2919 (SrcBits == DstBits ? Instruction::BitCast : 2920 (SrcBits > DstBits ? Instruction::Trunc : 2921 (isSigned ? Instruction::SExt : Instruction::ZExt))); 2922 return Create(opcode, C, Ty, Name, InsertBefore); 2923 } 2924 2925 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, 2926 bool isSigned, const Twine &Name, 2927 BasicBlock *InsertAtEnd) { 2928 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() && 2929 "Invalid cast"); 2930 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2931 unsigned DstBits = Ty->getScalarSizeInBits(); 2932 Instruction::CastOps opcode = 2933 (SrcBits == DstBits ? Instruction::BitCast : 2934 (SrcBits > DstBits ? Instruction::Trunc : 2935 (isSigned ? Instruction::SExt : Instruction::ZExt))); 2936 return Create(opcode, C, Ty, Name, InsertAtEnd); 2937 } 2938 2939 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, 2940 const Twine &Name, 2941 Instruction *InsertBefore) { 2942 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && 2943 "Invalid cast"); 2944 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2945 unsigned DstBits = Ty->getScalarSizeInBits(); 2946 Instruction::CastOps opcode = 2947 (SrcBits == DstBits ? Instruction::BitCast : 2948 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt)); 2949 return Create(opcode, C, Ty, Name, InsertBefore); 2950 } 2951 2952 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, 2953 const Twine &Name, 2954 BasicBlock *InsertAtEnd) { 2955 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && 2956 "Invalid cast"); 2957 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2958 unsigned DstBits = Ty->getScalarSizeInBits(); 2959 Instruction::CastOps opcode = 2960 (SrcBits == DstBits ? Instruction::BitCast : 2961 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt)); 2962 return Create(opcode, C, Ty, Name, InsertAtEnd); 2963 } 2964 2965 // Check whether it is valid to call getCastOpcode for these types. 2966 // This routine must be kept in sync with getCastOpcode. 2967 bool CastInst::isCastable(Type *SrcTy, Type *DestTy) { 2968 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType()) 2969 return false; 2970 2971 if (SrcTy == DestTy) 2972 return true; 2973 2974 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) 2975 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) 2976 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 2977 // An element by element cast. Valid if casting the elements is valid. 2978 SrcTy = SrcVecTy->getElementType(); 2979 DestTy = DestVecTy->getElementType(); 2980 } 2981 2982 // Get the bit sizes, we'll need these 2983 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 2984 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 2985 2986 // Run through the possibilities ... 2987 if (DestTy->isIntegerTy()) { // Casting to integral 2988 if (SrcTy->isIntegerTy()) // Casting from integral 2989 return true; 2990 if (SrcTy->isFloatingPointTy()) // Casting from floating pt 2991 return true; 2992 if (SrcTy->isVectorTy()) // Casting from vector 2993 return DestBits == SrcBits; 2994 // Casting from something else 2995 return SrcTy->isPointerTy(); 2996 } 2997 if (DestTy->isFloatingPointTy()) { // Casting to floating pt 2998 if (SrcTy->isIntegerTy()) // Casting from integral 2999 return true; 3000 if (SrcTy->isFloatingPointTy()) // Casting from floating pt 3001 return true; 3002 if (SrcTy->isVectorTy()) // Casting from vector 3003 return DestBits == SrcBits; 3004 // Casting from something else 3005 return false; 3006 } 3007 if (DestTy->isVectorTy()) // Casting to vector 3008 return DestBits == SrcBits; 3009 if (DestTy->isPointerTy()) { // Casting to pointer 3010 if (SrcTy->isPointerTy()) // Casting from pointer 3011 return true; 3012 return SrcTy->isIntegerTy(); // Casting from integral 3013 } 3014 if (DestTy->isX86_MMXTy()) { 3015 if (SrcTy->isVectorTy()) 3016 return DestBits == SrcBits; // 64-bit vector to MMX 3017 return false; 3018 } // Casting to something else 3019 return false; 3020 } 3021 3022 bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) { 3023 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType()) 3024 return false; 3025 3026 if (SrcTy == DestTy) 3027 return true; 3028 3029 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) { 3030 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) { 3031 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) { 3032 // An element by element cast. Valid if casting the elements is valid. 3033 SrcTy = SrcVecTy->getElementType(); 3034 DestTy = DestVecTy->getElementType(); 3035 } 3036 } 3037 } 3038 3039 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) { 3040 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) { 3041 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace(); 3042 } 3043 } 3044 3045 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 3046 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 3047 3048 // Could still have vectors of pointers if the number of elements doesn't 3049 // match 3050 if (SrcBits.getKnownMinSize() == 0 || DestBits.getKnownMinSize() == 0) 3051 return false; 3052 3053 if (SrcBits != DestBits) 3054 return false; 3055 3056 if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy()) 3057 return false; 3058 3059 return true; 3060 } 3061 3062 bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, 3063 const DataLayout &DL) { 3064 // ptrtoint and inttoptr are not allowed on non-integral pointers 3065 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy)) 3066 if (auto *IntTy = dyn_cast<IntegerType>(DestTy)) 3067 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) && 3068 !DL.isNonIntegralPointerType(PtrTy)); 3069 if (auto *PtrTy = dyn_cast<PointerType>(DestTy)) 3070 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy)) 3071 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) && 3072 !DL.isNonIntegralPointerType(PtrTy)); 3073 3074 return isBitCastable(SrcTy, DestTy); 3075 } 3076 3077 // Provide a way to get a "cast" where the cast opcode is inferred from the 3078 // types and size of the operand. This, basically, is a parallel of the 3079 // logic in the castIsValid function below. This axiom should hold: 3080 // castIsValid( getCastOpcode(Val, Ty), Val, Ty) 3081 // should not assert in castIsValid. In other words, this produces a "correct" 3082 // casting opcode for the arguments passed to it. 3083 // This routine must be kept in sync with isCastable. 3084 Instruction::CastOps 3085 CastInst::getCastOpcode( 3086 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) { 3087 Type *SrcTy = Src->getType(); 3088 3089 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() && 3090 "Only first class types are castable!"); 3091 3092 if (SrcTy == DestTy) 3093 return BitCast; 3094 3095 // FIXME: Check address space sizes here 3096 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) 3097 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) 3098 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 3099 // An element by element cast. Find the appropriate opcode based on the 3100 // element types. 3101 SrcTy = SrcVecTy->getElementType(); 3102 DestTy = DestVecTy->getElementType(); 3103 } 3104 3105 // Get the bit sizes, we'll need these 3106 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 3107 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 3108 3109 // Run through the possibilities ... 3110 if (DestTy->isIntegerTy()) { // Casting to integral 3111 if (SrcTy->isIntegerTy()) { // Casting from integral 3112 if (DestBits < SrcBits) 3113 return Trunc; // int -> smaller int 3114 else if (DestBits > SrcBits) { // its an extension 3115 if (SrcIsSigned) 3116 return SExt; // signed -> SEXT 3117 else 3118 return ZExt; // unsigned -> ZEXT 3119 } else { 3120 return BitCast; // Same size, No-op cast 3121 } 3122 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt 3123 if (DestIsSigned) 3124 return FPToSI; // FP -> sint 3125 else 3126 return FPToUI; // FP -> uint 3127 } else if (SrcTy->isVectorTy()) { 3128 assert(DestBits == SrcBits && 3129 "Casting vector to integer of different width"); 3130 return BitCast; // Same size, no-op cast 3131 } else { 3132 assert(SrcTy->isPointerTy() && 3133 "Casting from a value that is not first-class type"); 3134 return PtrToInt; // ptr -> int 3135 } 3136 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt 3137 if (SrcTy->isIntegerTy()) { // Casting from integral 3138 if (SrcIsSigned) 3139 return SIToFP; // sint -> FP 3140 else 3141 return UIToFP; // uint -> FP 3142 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt 3143 if (DestBits < SrcBits) { 3144 return FPTrunc; // FP -> smaller FP 3145 } else if (DestBits > SrcBits) { 3146 return FPExt; // FP -> larger FP 3147 } else { 3148 return BitCast; // same size, no-op cast 3149 } 3150 } else if (SrcTy->isVectorTy()) { 3151 assert(DestBits == SrcBits && 3152 "Casting vector to floating point of different width"); 3153 return BitCast; // same size, no-op cast 3154 } 3155 llvm_unreachable("Casting pointer or non-first class to float"); 3156 } else if (DestTy->isVectorTy()) { 3157 assert(DestBits == SrcBits && 3158 "Illegal cast to vector (wrong type or size)"); 3159 return BitCast; 3160 } else if (DestTy->isPointerTy()) { 3161 if (SrcTy->isPointerTy()) { 3162 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace()) 3163 return AddrSpaceCast; 3164 return BitCast; // ptr -> ptr 3165 } else if (SrcTy->isIntegerTy()) { 3166 return IntToPtr; // int -> ptr 3167 } 3168 llvm_unreachable("Casting pointer to other than pointer or int"); 3169 } else if (DestTy->isX86_MMXTy()) { 3170 if (SrcTy->isVectorTy()) { 3171 assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX"); 3172 return BitCast; // 64-bit vector to MMX 3173 } 3174 llvm_unreachable("Illegal cast to X86_MMX"); 3175 } 3176 llvm_unreachable("Casting to type that is not first-class"); 3177 } 3178 3179 //===----------------------------------------------------------------------===// 3180 // CastInst SubClass Constructors 3181 //===----------------------------------------------------------------------===// 3182 3183 /// Check that the construction parameters for a CastInst are correct. This 3184 /// could be broken out into the separate constructors but it is useful to have 3185 /// it in one place and to eliminate the redundant code for getting the sizes 3186 /// of the types involved. 3187 bool 3188 CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) { 3189 // Check for type sanity on the arguments 3190 Type *SrcTy = S->getType(); 3191 3192 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() || 3193 SrcTy->isAggregateType() || DstTy->isAggregateType()) 3194 return false; 3195 3196 // Get the size of the types in bits, we'll need this later 3197 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 3198 unsigned DstBitSize = DstTy->getScalarSizeInBits(); 3199 3200 // If these are vector types, get the lengths of the vectors (using zero for 3201 // scalar types means that checking that vector lengths match also checks that 3202 // scalars are not being converted to vectors or vectors to scalars). 3203 unsigned SrcLength = SrcTy->isVectorTy() ? 3204 cast<VectorType>(SrcTy)->getNumElements() : 0; 3205 unsigned DstLength = DstTy->isVectorTy() ? 3206 cast<VectorType>(DstTy)->getNumElements() : 0; 3207 3208 // Switch on the opcode provided 3209 switch (op) { 3210 default: return false; // This is an input error 3211 case Instruction::Trunc: 3212 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 3213 SrcLength == DstLength && SrcBitSize > DstBitSize; 3214 case Instruction::ZExt: 3215 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 3216 SrcLength == DstLength && SrcBitSize < DstBitSize; 3217 case Instruction::SExt: 3218 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 3219 SrcLength == DstLength && SrcBitSize < DstBitSize; 3220 case Instruction::FPTrunc: 3221 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() && 3222 SrcLength == DstLength && SrcBitSize > DstBitSize; 3223 case Instruction::FPExt: 3224 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() && 3225 SrcLength == DstLength && SrcBitSize < DstBitSize; 3226 case Instruction::UIToFP: 3227 case Instruction::SIToFP: 3228 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() && 3229 SrcLength == DstLength; 3230 case Instruction::FPToUI: 3231 case Instruction::FPToSI: 3232 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() && 3233 SrcLength == DstLength; 3234 case Instruction::PtrToInt: 3235 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy)) 3236 return false; 3237 if (VectorType *VT = dyn_cast<VectorType>(SrcTy)) 3238 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements()) 3239 return false; 3240 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy(); 3241 case Instruction::IntToPtr: 3242 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy)) 3243 return false; 3244 if (VectorType *VT = dyn_cast<VectorType>(SrcTy)) 3245 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements()) 3246 return false; 3247 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy(); 3248 case Instruction::BitCast: { 3249 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType()); 3250 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType()); 3251 3252 // BitCast implies a no-op cast of type only. No bits change. 3253 // However, you can't cast pointers to anything but pointers. 3254 if (!SrcPtrTy != !DstPtrTy) 3255 return false; 3256 3257 // For non-pointer cases, the cast is okay if the source and destination bit 3258 // widths are identical. 3259 if (!SrcPtrTy) 3260 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits(); 3261 3262 // If both are pointers then the address spaces must match. 3263 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) 3264 return false; 3265 3266 // A vector of pointers must have the same number of elements. 3267 VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy); 3268 VectorType *DstVecTy = dyn_cast<VectorType>(DstTy); 3269 if (SrcVecTy && DstVecTy) 3270 return (SrcVecTy->getNumElements() == DstVecTy->getNumElements()); 3271 if (SrcVecTy) 3272 return SrcVecTy->getNumElements() == 1; 3273 if (DstVecTy) 3274 return DstVecTy->getNumElements() == 1; 3275 3276 return true; 3277 } 3278 case Instruction::AddrSpaceCast: { 3279 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType()); 3280 if (!SrcPtrTy) 3281 return false; 3282 3283 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType()); 3284 if (!DstPtrTy) 3285 return false; 3286 3287 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace()) 3288 return false; 3289 3290 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) { 3291 if (VectorType *DstVecTy = dyn_cast<VectorType>(DstTy)) 3292 return (SrcVecTy->getNumElements() == DstVecTy->getNumElements()); 3293 3294 return false; 3295 } 3296 3297 return true; 3298 } 3299 } 3300 } 3301 3302 TruncInst::TruncInst( 3303 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3304 ) : CastInst(Ty, Trunc, S, Name, InsertBefore) { 3305 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc"); 3306 } 3307 3308 TruncInst::TruncInst( 3309 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3310 ) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) { 3311 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc"); 3312 } 3313 3314 ZExtInst::ZExtInst( 3315 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3316 ) : CastInst(Ty, ZExt, S, Name, InsertBefore) { 3317 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); 3318 } 3319 3320 ZExtInst::ZExtInst( 3321 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3322 ) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) { 3323 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); 3324 } 3325 SExtInst::SExtInst( 3326 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3327 ) : CastInst(Ty, SExt, S, Name, InsertBefore) { 3328 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); 3329 } 3330 3331 SExtInst::SExtInst( 3332 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3333 ) : CastInst(Ty, SExt, S, Name, InsertAtEnd) { 3334 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); 3335 } 3336 3337 FPTruncInst::FPTruncInst( 3338 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3339 ) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) { 3340 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); 3341 } 3342 3343 FPTruncInst::FPTruncInst( 3344 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3345 ) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) { 3346 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); 3347 } 3348 3349 FPExtInst::FPExtInst( 3350 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3351 ) : CastInst(Ty, FPExt, S, Name, InsertBefore) { 3352 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); 3353 } 3354 3355 FPExtInst::FPExtInst( 3356 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3357 ) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) { 3358 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); 3359 } 3360 3361 UIToFPInst::UIToFPInst( 3362 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3363 ) : CastInst(Ty, UIToFP, S, Name, InsertBefore) { 3364 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); 3365 } 3366 3367 UIToFPInst::UIToFPInst( 3368 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3369 ) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) { 3370 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); 3371 } 3372 3373 SIToFPInst::SIToFPInst( 3374 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3375 ) : CastInst(Ty, SIToFP, S, Name, InsertBefore) { 3376 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); 3377 } 3378 3379 SIToFPInst::SIToFPInst( 3380 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3381 ) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) { 3382 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); 3383 } 3384 3385 FPToUIInst::FPToUIInst( 3386 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3387 ) : CastInst(Ty, FPToUI, S, Name, InsertBefore) { 3388 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); 3389 } 3390 3391 FPToUIInst::FPToUIInst( 3392 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3393 ) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) { 3394 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); 3395 } 3396 3397 FPToSIInst::FPToSIInst( 3398 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3399 ) : CastInst(Ty, FPToSI, S, Name, InsertBefore) { 3400 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); 3401 } 3402 3403 FPToSIInst::FPToSIInst( 3404 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3405 ) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) { 3406 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); 3407 } 3408 3409 PtrToIntInst::PtrToIntInst( 3410 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3411 ) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) { 3412 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); 3413 } 3414 3415 PtrToIntInst::PtrToIntInst( 3416 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3417 ) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) { 3418 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); 3419 } 3420 3421 IntToPtrInst::IntToPtrInst( 3422 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3423 ) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) { 3424 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); 3425 } 3426 3427 IntToPtrInst::IntToPtrInst( 3428 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3429 ) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) { 3430 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); 3431 } 3432 3433 BitCastInst::BitCastInst( 3434 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3435 ) : CastInst(Ty, BitCast, S, Name, InsertBefore) { 3436 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); 3437 } 3438 3439 BitCastInst::BitCastInst( 3440 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3441 ) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) { 3442 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); 3443 } 3444 3445 AddrSpaceCastInst::AddrSpaceCastInst( 3446 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3447 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) { 3448 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast"); 3449 } 3450 3451 AddrSpaceCastInst::AddrSpaceCastInst( 3452 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3453 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) { 3454 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast"); 3455 } 3456 3457 //===----------------------------------------------------------------------===// 3458 // CmpInst Classes 3459 //===----------------------------------------------------------------------===// 3460 3461 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS, 3462 Value *RHS, const Twine &Name, Instruction *InsertBefore, 3463 Instruction *FlagsSource) 3464 : Instruction(ty, op, 3465 OperandTraits<CmpInst>::op_begin(this), 3466 OperandTraits<CmpInst>::operands(this), 3467 InsertBefore) { 3468 Op<0>() = LHS; 3469 Op<1>() = RHS; 3470 setPredicate((Predicate)predicate); 3471 setName(Name); 3472 if (FlagsSource) 3473 copyIRFlags(FlagsSource); 3474 } 3475 3476 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS, 3477 Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd) 3478 : Instruction(ty, op, 3479 OperandTraits<CmpInst>::op_begin(this), 3480 OperandTraits<CmpInst>::operands(this), 3481 InsertAtEnd) { 3482 Op<0>() = LHS; 3483 Op<1>() = RHS; 3484 setPredicate((Predicate)predicate); 3485 setName(Name); 3486 } 3487 3488 CmpInst * 3489 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, 3490 const Twine &Name, Instruction *InsertBefore) { 3491 if (Op == Instruction::ICmp) { 3492 if (InsertBefore) 3493 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate), 3494 S1, S2, Name); 3495 else 3496 return new ICmpInst(CmpInst::Predicate(predicate), 3497 S1, S2, Name); 3498 } 3499 3500 if (InsertBefore) 3501 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate), 3502 S1, S2, Name); 3503 else 3504 return new FCmpInst(CmpInst::Predicate(predicate), 3505 S1, S2, Name); 3506 } 3507 3508 CmpInst * 3509 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, 3510 const Twine &Name, BasicBlock *InsertAtEnd) { 3511 if (Op == Instruction::ICmp) { 3512 return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate), 3513 S1, S2, Name); 3514 } 3515 return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate), 3516 S1, S2, Name); 3517 } 3518 3519 void CmpInst::swapOperands() { 3520 if (ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3521 IC->swapOperands(); 3522 else 3523 cast<FCmpInst>(this)->swapOperands(); 3524 } 3525 3526 bool CmpInst::isCommutative() const { 3527 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3528 return IC->isCommutative(); 3529 return cast<FCmpInst>(this)->isCommutative(); 3530 } 3531 3532 bool CmpInst::isEquality() const { 3533 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3534 return IC->isEquality(); 3535 return cast<FCmpInst>(this)->isEquality(); 3536 } 3537 3538 CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) { 3539 switch (pred) { 3540 default: llvm_unreachable("Unknown cmp predicate!"); 3541 case ICMP_EQ: return ICMP_NE; 3542 case ICMP_NE: return ICMP_EQ; 3543 case ICMP_UGT: return ICMP_ULE; 3544 case ICMP_ULT: return ICMP_UGE; 3545 case ICMP_UGE: return ICMP_ULT; 3546 case ICMP_ULE: return ICMP_UGT; 3547 case ICMP_SGT: return ICMP_SLE; 3548 case ICMP_SLT: return ICMP_SGE; 3549 case ICMP_SGE: return ICMP_SLT; 3550 case ICMP_SLE: return ICMP_SGT; 3551 3552 case FCMP_OEQ: return FCMP_UNE; 3553 case FCMP_ONE: return FCMP_UEQ; 3554 case FCMP_OGT: return FCMP_ULE; 3555 case FCMP_OLT: return FCMP_UGE; 3556 case FCMP_OGE: return FCMP_ULT; 3557 case FCMP_OLE: return FCMP_UGT; 3558 case FCMP_UEQ: return FCMP_ONE; 3559 case FCMP_UNE: return FCMP_OEQ; 3560 case FCMP_UGT: return FCMP_OLE; 3561 case FCMP_ULT: return FCMP_OGE; 3562 case FCMP_UGE: return FCMP_OLT; 3563 case FCMP_ULE: return FCMP_OGT; 3564 case FCMP_ORD: return FCMP_UNO; 3565 case FCMP_UNO: return FCMP_ORD; 3566 case FCMP_TRUE: return FCMP_FALSE; 3567 case FCMP_FALSE: return FCMP_TRUE; 3568 } 3569 } 3570 3571 StringRef CmpInst::getPredicateName(Predicate Pred) { 3572 switch (Pred) { 3573 default: return "unknown"; 3574 case FCmpInst::FCMP_FALSE: return "false"; 3575 case FCmpInst::FCMP_OEQ: return "oeq"; 3576 case FCmpInst::FCMP_OGT: return "ogt"; 3577 case FCmpInst::FCMP_OGE: return "oge"; 3578 case FCmpInst::FCMP_OLT: return "olt"; 3579 case FCmpInst::FCMP_OLE: return "ole"; 3580 case FCmpInst::FCMP_ONE: return "one"; 3581 case FCmpInst::FCMP_ORD: return "ord"; 3582 case FCmpInst::FCMP_UNO: return "uno"; 3583 case FCmpInst::FCMP_UEQ: return "ueq"; 3584 case FCmpInst::FCMP_UGT: return "ugt"; 3585 case FCmpInst::FCMP_UGE: return "uge"; 3586 case FCmpInst::FCMP_ULT: return "ult"; 3587 case FCmpInst::FCMP_ULE: return "ule"; 3588 case FCmpInst::FCMP_UNE: return "une"; 3589 case FCmpInst::FCMP_TRUE: return "true"; 3590 case ICmpInst::ICMP_EQ: return "eq"; 3591 case ICmpInst::ICMP_NE: return "ne"; 3592 case ICmpInst::ICMP_SGT: return "sgt"; 3593 case ICmpInst::ICMP_SGE: return "sge"; 3594 case ICmpInst::ICMP_SLT: return "slt"; 3595 case ICmpInst::ICMP_SLE: return "sle"; 3596 case ICmpInst::ICMP_UGT: return "ugt"; 3597 case ICmpInst::ICMP_UGE: return "uge"; 3598 case ICmpInst::ICMP_ULT: return "ult"; 3599 case ICmpInst::ICMP_ULE: return "ule"; 3600 } 3601 } 3602 3603 ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) { 3604 switch (pred) { 3605 default: llvm_unreachable("Unknown icmp predicate!"); 3606 case ICMP_EQ: case ICMP_NE: 3607 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE: 3608 return pred; 3609 case ICMP_UGT: return ICMP_SGT; 3610 case ICMP_ULT: return ICMP_SLT; 3611 case ICMP_UGE: return ICMP_SGE; 3612 case ICMP_ULE: return ICMP_SLE; 3613 } 3614 } 3615 3616 ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) { 3617 switch (pred) { 3618 default: llvm_unreachable("Unknown icmp predicate!"); 3619 case ICMP_EQ: case ICMP_NE: 3620 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE: 3621 return pred; 3622 case ICMP_SGT: return ICMP_UGT; 3623 case ICMP_SLT: return ICMP_ULT; 3624 case ICMP_SGE: return ICMP_UGE; 3625 case ICMP_SLE: return ICMP_ULE; 3626 } 3627 } 3628 3629 CmpInst::Predicate CmpInst::getFlippedStrictnessPredicate(Predicate pred) { 3630 switch (pred) { 3631 default: llvm_unreachable("Unknown or unsupported cmp predicate!"); 3632 case ICMP_SGT: return ICMP_SGE; 3633 case ICMP_SLT: return ICMP_SLE; 3634 case ICMP_SGE: return ICMP_SGT; 3635 case ICMP_SLE: return ICMP_SLT; 3636 case ICMP_UGT: return ICMP_UGE; 3637 case ICMP_ULT: return ICMP_ULE; 3638 case ICMP_UGE: return ICMP_UGT; 3639 case ICMP_ULE: return ICMP_ULT; 3640 3641 case FCMP_OGT: return FCMP_OGE; 3642 case FCMP_OLT: return FCMP_OLE; 3643 case FCMP_OGE: return FCMP_OGT; 3644 case FCMP_OLE: return FCMP_OLT; 3645 case FCMP_UGT: return FCMP_UGE; 3646 case FCMP_ULT: return FCMP_ULE; 3647 case FCMP_UGE: return FCMP_UGT; 3648 case FCMP_ULE: return FCMP_ULT; 3649 } 3650 } 3651 3652 CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) { 3653 switch (pred) { 3654 default: llvm_unreachable("Unknown cmp predicate!"); 3655 case ICMP_EQ: case ICMP_NE: 3656 return pred; 3657 case ICMP_SGT: return ICMP_SLT; 3658 case ICMP_SLT: return ICMP_SGT; 3659 case ICMP_SGE: return ICMP_SLE; 3660 case ICMP_SLE: return ICMP_SGE; 3661 case ICMP_UGT: return ICMP_ULT; 3662 case ICMP_ULT: return ICMP_UGT; 3663 case ICMP_UGE: return ICMP_ULE; 3664 case ICMP_ULE: return ICMP_UGE; 3665 3666 case FCMP_FALSE: case FCMP_TRUE: 3667 case FCMP_OEQ: case FCMP_ONE: 3668 case FCMP_UEQ: case FCMP_UNE: 3669 case FCMP_ORD: case FCMP_UNO: 3670 return pred; 3671 case FCMP_OGT: return FCMP_OLT; 3672 case FCMP_OLT: return FCMP_OGT; 3673 case FCMP_OGE: return FCMP_OLE; 3674 case FCMP_OLE: return FCMP_OGE; 3675 case FCMP_UGT: return FCMP_ULT; 3676 case FCMP_ULT: return FCMP_UGT; 3677 case FCMP_UGE: return FCMP_ULE; 3678 case FCMP_ULE: return FCMP_UGE; 3679 } 3680 } 3681 3682 CmpInst::Predicate CmpInst::getNonStrictPredicate(Predicate pred) { 3683 switch (pred) { 3684 case ICMP_SGT: return ICMP_SGE; 3685 case ICMP_SLT: return ICMP_SLE; 3686 case ICMP_UGT: return ICMP_UGE; 3687 case ICMP_ULT: return ICMP_ULE; 3688 case FCMP_OGT: return FCMP_OGE; 3689 case FCMP_OLT: return FCMP_OLE; 3690 case FCMP_UGT: return FCMP_UGE; 3691 case FCMP_ULT: return FCMP_ULE; 3692 default: return pred; 3693 } 3694 } 3695 3696 CmpInst::Predicate CmpInst::getSignedPredicate(Predicate pred) { 3697 assert(CmpInst::isUnsigned(pred) && "Call only with signed predicates!"); 3698 3699 switch (pred) { 3700 default: 3701 llvm_unreachable("Unknown predicate!"); 3702 case CmpInst::ICMP_ULT: 3703 return CmpInst::ICMP_SLT; 3704 case CmpInst::ICMP_ULE: 3705 return CmpInst::ICMP_SLE; 3706 case CmpInst::ICMP_UGT: 3707 return CmpInst::ICMP_SGT; 3708 case CmpInst::ICMP_UGE: 3709 return CmpInst::ICMP_SGE; 3710 } 3711 } 3712 3713 bool CmpInst::isUnsigned(Predicate predicate) { 3714 switch (predicate) { 3715 default: return false; 3716 case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT: 3717 case ICmpInst::ICMP_UGE: return true; 3718 } 3719 } 3720 3721 bool CmpInst::isSigned(Predicate predicate) { 3722 switch (predicate) { 3723 default: return false; 3724 case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT: 3725 case ICmpInst::ICMP_SGE: return true; 3726 } 3727 } 3728 3729 bool CmpInst::isOrdered(Predicate predicate) { 3730 switch (predicate) { 3731 default: return false; 3732 case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT: 3733 case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE: 3734 case FCmpInst::FCMP_ORD: return true; 3735 } 3736 } 3737 3738 bool CmpInst::isUnordered(Predicate predicate) { 3739 switch (predicate) { 3740 default: return false; 3741 case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT: 3742 case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE: 3743 case FCmpInst::FCMP_UNO: return true; 3744 } 3745 } 3746 3747 bool CmpInst::isTrueWhenEqual(Predicate predicate) { 3748 switch(predicate) { 3749 default: return false; 3750 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE: 3751 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true; 3752 } 3753 } 3754 3755 bool CmpInst::isFalseWhenEqual(Predicate predicate) { 3756 switch(predicate) { 3757 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT: 3758 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true; 3759 default: return false; 3760 } 3761 } 3762 3763 bool CmpInst::isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2) { 3764 // If the predicates match, then we know the first condition implies the 3765 // second is true. 3766 if (Pred1 == Pred2) 3767 return true; 3768 3769 switch (Pred1) { 3770 default: 3771 break; 3772 case ICMP_EQ: 3773 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true. 3774 return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE || 3775 Pred2 == ICMP_SLE; 3776 case ICMP_UGT: // A >u B implies A != B and A >=u B are true. 3777 return Pred2 == ICMP_NE || Pred2 == ICMP_UGE; 3778 case ICMP_ULT: // A <u B implies A != B and A <=u B are true. 3779 return Pred2 == ICMP_NE || Pred2 == ICMP_ULE; 3780 case ICMP_SGT: // A >s B implies A != B and A >=s B are true. 3781 return Pred2 == ICMP_NE || Pred2 == ICMP_SGE; 3782 case ICMP_SLT: // A <s B implies A != B and A <=s B are true. 3783 return Pred2 == ICMP_NE || Pred2 == ICMP_SLE; 3784 } 3785 return false; 3786 } 3787 3788 bool CmpInst::isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2) { 3789 return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2)); 3790 } 3791 3792 //===----------------------------------------------------------------------===// 3793 // SwitchInst Implementation 3794 //===----------------------------------------------------------------------===// 3795 3796 void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) { 3797 assert(Value && Default && NumReserved); 3798 ReservedSpace = NumReserved; 3799 setNumHungOffUseOperands(2); 3800 allocHungoffUses(ReservedSpace); 3801 3802 Op<0>() = Value; 3803 Op<1>() = Default; 3804 } 3805 3806 /// SwitchInst ctor - Create a new switch instruction, specifying a value to 3807 /// switch on and a default destination. The number of additional cases can 3808 /// be specified here to make memory allocation more efficient. This 3809 /// constructor can also autoinsert before another instruction. 3810 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3811 Instruction *InsertBefore) 3812 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch, 3813 nullptr, 0, InsertBefore) { 3814 init(Value, Default, 2+NumCases*2); 3815 } 3816 3817 /// SwitchInst ctor - Create a new switch instruction, specifying a value to 3818 /// switch on and a default destination. The number of additional cases can 3819 /// be specified here to make memory allocation more efficient. This 3820 /// constructor also autoinserts at the end of the specified BasicBlock. 3821 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3822 BasicBlock *InsertAtEnd) 3823 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch, 3824 nullptr, 0, InsertAtEnd) { 3825 init(Value, Default, 2+NumCases*2); 3826 } 3827 3828 SwitchInst::SwitchInst(const SwitchInst &SI) 3829 : Instruction(SI.getType(), Instruction::Switch, nullptr, 0) { 3830 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands()); 3831 setNumHungOffUseOperands(SI.getNumOperands()); 3832 Use *OL = getOperandList(); 3833 const Use *InOL = SI.getOperandList(); 3834 for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) { 3835 OL[i] = InOL[i]; 3836 OL[i+1] = InOL[i+1]; 3837 } 3838 SubclassOptionalData = SI.SubclassOptionalData; 3839 } 3840 3841 /// addCase - Add an entry to the switch instruction... 3842 /// 3843 void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) { 3844 unsigned NewCaseIdx = getNumCases(); 3845 unsigned OpNo = getNumOperands(); 3846 if (OpNo+2 > ReservedSpace) 3847 growOperands(); // Get more space! 3848 // Initialize some new operands. 3849 assert(OpNo+1 < ReservedSpace && "Growing didn't work!"); 3850 setNumHungOffUseOperands(OpNo+2); 3851 CaseHandle Case(this, NewCaseIdx); 3852 Case.setValue(OnVal); 3853 Case.setSuccessor(Dest); 3854 } 3855 3856 /// removeCase - This method removes the specified case and its successor 3857 /// from the switch instruction. 3858 SwitchInst::CaseIt SwitchInst::removeCase(CaseIt I) { 3859 unsigned idx = I->getCaseIndex(); 3860 3861 assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!"); 3862 3863 unsigned NumOps = getNumOperands(); 3864 Use *OL = getOperandList(); 3865 3866 // Overwrite this case with the end of the list. 3867 if (2 + (idx + 1) * 2 != NumOps) { 3868 OL[2 + idx * 2] = OL[NumOps - 2]; 3869 OL[2 + idx * 2 + 1] = OL[NumOps - 1]; 3870 } 3871 3872 // Nuke the last value. 3873 OL[NumOps-2].set(nullptr); 3874 OL[NumOps-2+1].set(nullptr); 3875 setNumHungOffUseOperands(NumOps-2); 3876 3877 return CaseIt(this, idx); 3878 } 3879 3880 /// growOperands - grow operands - This grows the operand list in response 3881 /// to a push_back style of operation. This grows the number of ops by 3 times. 3882 /// 3883 void SwitchInst::growOperands() { 3884 unsigned e = getNumOperands(); 3885 unsigned NumOps = e*3; 3886 3887 ReservedSpace = NumOps; 3888 growHungoffUses(ReservedSpace); 3889 } 3890 3891 MDNode * 3892 SwitchInstProfUpdateWrapper::getProfBranchWeightsMD(const SwitchInst &SI) { 3893 if (MDNode *ProfileData = SI.getMetadata(LLVMContext::MD_prof)) 3894 if (auto *MDName = dyn_cast<MDString>(ProfileData->getOperand(0))) 3895 if (MDName->getString() == "branch_weights") 3896 return ProfileData; 3897 return nullptr; 3898 } 3899 3900 MDNode *SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() { 3901 assert(Changed && "called only if metadata has changed"); 3902 3903 if (!Weights) 3904 return nullptr; 3905 3906 assert(SI.getNumSuccessors() == Weights->size() && 3907 "num of prof branch_weights must accord with num of successors"); 3908 3909 bool AllZeroes = 3910 all_of(Weights.getValue(), [](uint32_t W) { return W == 0; }); 3911 3912 if (AllZeroes || Weights.getValue().size() < 2) 3913 return nullptr; 3914 3915 return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights); 3916 } 3917 3918 void SwitchInstProfUpdateWrapper::init() { 3919 MDNode *ProfileData = getProfBranchWeightsMD(SI); 3920 if (!ProfileData) 3921 return; 3922 3923 if (ProfileData->getNumOperands() != SI.getNumSuccessors() + 1) { 3924 llvm_unreachable("number of prof branch_weights metadata operands does " 3925 "not correspond to number of succesors"); 3926 } 3927 3928 SmallVector<uint32_t, 8> Weights; 3929 for (unsigned CI = 1, CE = SI.getNumSuccessors(); CI <= CE; ++CI) { 3930 ConstantInt *C = mdconst::extract<ConstantInt>(ProfileData->getOperand(CI)); 3931 uint32_t CW = C->getValue().getZExtValue(); 3932 Weights.push_back(CW); 3933 } 3934 this->Weights = std::move(Weights); 3935 } 3936 3937 SwitchInst::CaseIt 3938 SwitchInstProfUpdateWrapper::removeCase(SwitchInst::CaseIt I) { 3939 if (Weights) { 3940 assert(SI.getNumSuccessors() == Weights->size() && 3941 "num of prof branch_weights must accord with num of successors"); 3942 Changed = true; 3943 // Copy the last case to the place of the removed one and shrink. 3944 // This is tightly coupled with the way SwitchInst::removeCase() removes 3945 // the cases in SwitchInst::removeCase(CaseIt). 3946 Weights.getValue()[I->getCaseIndex() + 1] = Weights.getValue().back(); 3947 Weights.getValue().pop_back(); 3948 } 3949 return SI.removeCase(I); 3950 } 3951 3952 void SwitchInstProfUpdateWrapper::addCase( 3953 ConstantInt *OnVal, BasicBlock *Dest, 3954 SwitchInstProfUpdateWrapper::CaseWeightOpt W) { 3955 SI.addCase(OnVal, Dest); 3956 3957 if (!Weights && W && *W) { 3958 Changed = true; 3959 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0); 3960 Weights.getValue()[SI.getNumSuccessors() - 1] = *W; 3961 } else if (Weights) { 3962 Changed = true; 3963 Weights.getValue().push_back(W ? *W : 0); 3964 } 3965 if (Weights) 3966 assert(SI.getNumSuccessors() == Weights->size() && 3967 "num of prof branch_weights must accord with num of successors"); 3968 } 3969 3970 SymbolTableList<Instruction>::iterator 3971 SwitchInstProfUpdateWrapper::eraseFromParent() { 3972 // Instruction is erased. Mark as unchanged to not touch it in the destructor. 3973 Changed = false; 3974 if (Weights) 3975 Weights->resize(0); 3976 return SI.eraseFromParent(); 3977 } 3978 3979 SwitchInstProfUpdateWrapper::CaseWeightOpt 3980 SwitchInstProfUpdateWrapper::getSuccessorWeight(unsigned idx) { 3981 if (!Weights) 3982 return None; 3983 return Weights.getValue()[idx]; 3984 } 3985 3986 void SwitchInstProfUpdateWrapper::setSuccessorWeight( 3987 unsigned idx, SwitchInstProfUpdateWrapper::CaseWeightOpt W) { 3988 if (!W) 3989 return; 3990 3991 if (!Weights && *W) 3992 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0); 3993 3994 if (Weights) { 3995 auto &OldW = Weights.getValue()[idx]; 3996 if (*W != OldW) { 3997 Changed = true; 3998 OldW = *W; 3999 } 4000 } 4001 } 4002 4003 SwitchInstProfUpdateWrapper::CaseWeightOpt 4004 SwitchInstProfUpdateWrapper::getSuccessorWeight(const SwitchInst &SI, 4005 unsigned idx) { 4006 if (MDNode *ProfileData = getProfBranchWeightsMD(SI)) 4007 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1) 4008 return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1)) 4009 ->getValue() 4010 .getZExtValue(); 4011 4012 return None; 4013 } 4014 4015 //===----------------------------------------------------------------------===// 4016 // IndirectBrInst Implementation 4017 //===----------------------------------------------------------------------===// 4018 4019 void IndirectBrInst::init(Value *Address, unsigned NumDests) { 4020 assert(Address && Address->getType()->isPointerTy() && 4021 "Address of indirectbr must be a pointer"); 4022 ReservedSpace = 1+NumDests; 4023 setNumHungOffUseOperands(1); 4024 allocHungoffUses(ReservedSpace); 4025 4026 Op<0>() = Address; 4027 } 4028 4029 4030 /// growOperands - grow operands - This grows the operand list in response 4031 /// to a push_back style of operation. This grows the number of ops by 2 times. 4032 /// 4033 void IndirectBrInst::growOperands() { 4034 unsigned e = getNumOperands(); 4035 unsigned NumOps = e*2; 4036 4037 ReservedSpace = NumOps; 4038 growHungoffUses(ReservedSpace); 4039 } 4040 4041 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases, 4042 Instruction *InsertBefore) 4043 : Instruction(Type::getVoidTy(Address->getContext()), 4044 Instruction::IndirectBr, nullptr, 0, InsertBefore) { 4045 init(Address, NumCases); 4046 } 4047 4048 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases, 4049 BasicBlock *InsertAtEnd) 4050 : Instruction(Type::getVoidTy(Address->getContext()), 4051 Instruction::IndirectBr, nullptr, 0, InsertAtEnd) { 4052 init(Address, NumCases); 4053 } 4054 4055 IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI) 4056 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr, 4057 nullptr, IBI.getNumOperands()) { 4058 allocHungoffUses(IBI.getNumOperands()); 4059 Use *OL = getOperandList(); 4060 const Use *InOL = IBI.getOperandList(); 4061 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i) 4062 OL[i] = InOL[i]; 4063 SubclassOptionalData = IBI.SubclassOptionalData; 4064 } 4065 4066 /// addDestination - Add a destination. 4067 /// 4068 void IndirectBrInst::addDestination(BasicBlock *DestBB) { 4069 unsigned OpNo = getNumOperands(); 4070 if (OpNo+1 > ReservedSpace) 4071 growOperands(); // Get more space! 4072 // Initialize some new operands. 4073 assert(OpNo < ReservedSpace && "Growing didn't work!"); 4074 setNumHungOffUseOperands(OpNo+1); 4075 getOperandList()[OpNo] = DestBB; 4076 } 4077 4078 /// removeDestination - This method removes the specified successor from the 4079 /// indirectbr instruction. 4080 void IndirectBrInst::removeDestination(unsigned idx) { 4081 assert(idx < getNumOperands()-1 && "Successor index out of range!"); 4082 4083 unsigned NumOps = getNumOperands(); 4084 Use *OL = getOperandList(); 4085 4086 // Replace this value with the last one. 4087 OL[idx+1] = OL[NumOps-1]; 4088 4089 // Nuke the last value. 4090 OL[NumOps-1].set(nullptr); 4091 setNumHungOffUseOperands(NumOps-1); 4092 } 4093 4094 //===----------------------------------------------------------------------===// 4095 // cloneImpl() implementations 4096 //===----------------------------------------------------------------------===// 4097 4098 // Define these methods here so vtables don't get emitted into every translation 4099 // unit that uses these classes. 4100 4101 GetElementPtrInst *GetElementPtrInst::cloneImpl() const { 4102 return new (getNumOperands()) GetElementPtrInst(*this); 4103 } 4104 4105 UnaryOperator *UnaryOperator::cloneImpl() const { 4106 return Create(getOpcode(), Op<0>()); 4107 } 4108 4109 BinaryOperator *BinaryOperator::cloneImpl() const { 4110 return Create(getOpcode(), Op<0>(), Op<1>()); 4111 } 4112 4113 FCmpInst *FCmpInst::cloneImpl() const { 4114 return new FCmpInst(getPredicate(), Op<0>(), Op<1>()); 4115 } 4116 4117 ICmpInst *ICmpInst::cloneImpl() const { 4118 return new ICmpInst(getPredicate(), Op<0>(), Op<1>()); 4119 } 4120 4121 ExtractValueInst *ExtractValueInst::cloneImpl() const { 4122 return new ExtractValueInst(*this); 4123 } 4124 4125 InsertValueInst *InsertValueInst::cloneImpl() const { 4126 return new InsertValueInst(*this); 4127 } 4128 4129 AllocaInst *AllocaInst::cloneImpl() const { 4130 AllocaInst *Result = 4131 new AllocaInst(getAllocatedType(), getType()->getAddressSpace(), 4132 (Value *)getOperand(0), MaybeAlign(getAlignment())); 4133 Result->setUsedWithInAlloca(isUsedWithInAlloca()); 4134 Result->setSwiftError(isSwiftError()); 4135 return Result; 4136 } 4137 4138 LoadInst *LoadInst::cloneImpl() const { 4139 return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(), 4140 MaybeAlign(getAlignment()), getOrdering(), 4141 getSyncScopeID()); 4142 } 4143 4144 StoreInst *StoreInst::cloneImpl() const { 4145 return new StoreInst(getOperand(0), getOperand(1), isVolatile(), 4146 MaybeAlign(getAlignment()), getOrdering(), 4147 getSyncScopeID()); 4148 } 4149 4150 AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const { 4151 AtomicCmpXchgInst *Result = 4152 new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2), 4153 getSuccessOrdering(), getFailureOrdering(), 4154 getSyncScopeID()); 4155 Result->setVolatile(isVolatile()); 4156 Result->setWeak(isWeak()); 4157 return Result; 4158 } 4159 4160 AtomicRMWInst *AtomicRMWInst::cloneImpl() const { 4161 AtomicRMWInst *Result = 4162 new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1), 4163 getOrdering(), getSyncScopeID()); 4164 Result->setVolatile(isVolatile()); 4165 return Result; 4166 } 4167 4168 FenceInst *FenceInst::cloneImpl() const { 4169 return new FenceInst(getContext(), getOrdering(), getSyncScopeID()); 4170 } 4171 4172 TruncInst *TruncInst::cloneImpl() const { 4173 return new TruncInst(getOperand(0), getType()); 4174 } 4175 4176 ZExtInst *ZExtInst::cloneImpl() const { 4177 return new ZExtInst(getOperand(0), getType()); 4178 } 4179 4180 SExtInst *SExtInst::cloneImpl() const { 4181 return new SExtInst(getOperand(0), getType()); 4182 } 4183 4184 FPTruncInst *FPTruncInst::cloneImpl() const { 4185 return new FPTruncInst(getOperand(0), getType()); 4186 } 4187 4188 FPExtInst *FPExtInst::cloneImpl() const { 4189 return new FPExtInst(getOperand(0), getType()); 4190 } 4191 4192 UIToFPInst *UIToFPInst::cloneImpl() const { 4193 return new UIToFPInst(getOperand(0), getType()); 4194 } 4195 4196 SIToFPInst *SIToFPInst::cloneImpl() const { 4197 return new SIToFPInst(getOperand(0), getType()); 4198 } 4199 4200 FPToUIInst *FPToUIInst::cloneImpl() const { 4201 return new FPToUIInst(getOperand(0), getType()); 4202 } 4203 4204 FPToSIInst *FPToSIInst::cloneImpl() const { 4205 return new FPToSIInst(getOperand(0), getType()); 4206 } 4207 4208 PtrToIntInst *PtrToIntInst::cloneImpl() const { 4209 return new PtrToIntInst(getOperand(0), getType()); 4210 } 4211 4212 IntToPtrInst *IntToPtrInst::cloneImpl() const { 4213 return new IntToPtrInst(getOperand(0), getType()); 4214 } 4215 4216 BitCastInst *BitCastInst::cloneImpl() const { 4217 return new BitCastInst(getOperand(0), getType()); 4218 } 4219 4220 AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const { 4221 return new AddrSpaceCastInst(getOperand(0), getType()); 4222 } 4223 4224 CallInst *CallInst::cloneImpl() const { 4225 if (hasOperandBundles()) { 4226 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 4227 return new(getNumOperands(), DescriptorBytes) CallInst(*this); 4228 } 4229 return new(getNumOperands()) CallInst(*this); 4230 } 4231 4232 SelectInst *SelectInst::cloneImpl() const { 4233 return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2)); 4234 } 4235 4236 VAArgInst *VAArgInst::cloneImpl() const { 4237 return new VAArgInst(getOperand(0), getType()); 4238 } 4239 4240 ExtractElementInst *ExtractElementInst::cloneImpl() const { 4241 return ExtractElementInst::Create(getOperand(0), getOperand(1)); 4242 } 4243 4244 InsertElementInst *InsertElementInst::cloneImpl() const { 4245 return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2)); 4246 } 4247 4248 ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const { 4249 return new ShuffleVectorInst(getOperand(0), getOperand(1), getOperand(2)); 4250 } 4251 4252 PHINode *PHINode::cloneImpl() const { return new PHINode(*this); } 4253 4254 LandingPadInst *LandingPadInst::cloneImpl() const { 4255 return new LandingPadInst(*this); 4256 } 4257 4258 ReturnInst *ReturnInst::cloneImpl() const { 4259 return new(getNumOperands()) ReturnInst(*this); 4260 } 4261 4262 BranchInst *BranchInst::cloneImpl() const { 4263 return new(getNumOperands()) BranchInst(*this); 4264 } 4265 4266 SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); } 4267 4268 IndirectBrInst *IndirectBrInst::cloneImpl() const { 4269 return new IndirectBrInst(*this); 4270 } 4271 4272 InvokeInst *InvokeInst::cloneImpl() const { 4273 if (hasOperandBundles()) { 4274 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 4275 return new(getNumOperands(), DescriptorBytes) InvokeInst(*this); 4276 } 4277 return new(getNumOperands()) InvokeInst(*this); 4278 } 4279 4280 CallBrInst *CallBrInst::cloneImpl() const { 4281 if (hasOperandBundles()) { 4282 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 4283 return new (getNumOperands(), DescriptorBytes) CallBrInst(*this); 4284 } 4285 return new (getNumOperands()) CallBrInst(*this); 4286 } 4287 4288 ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); } 4289 4290 CleanupReturnInst *CleanupReturnInst::cloneImpl() const { 4291 return new (getNumOperands()) CleanupReturnInst(*this); 4292 } 4293 4294 CatchReturnInst *CatchReturnInst::cloneImpl() const { 4295 return new (getNumOperands()) CatchReturnInst(*this); 4296 } 4297 4298 CatchSwitchInst *CatchSwitchInst::cloneImpl() const { 4299 return new CatchSwitchInst(*this); 4300 } 4301 4302 FuncletPadInst *FuncletPadInst::cloneImpl() const { 4303 return new (getNumOperands()) FuncletPadInst(*this); 4304 } 4305 4306 UnreachableInst *UnreachableInst::cloneImpl() const { 4307 LLVMContext &Context = getContext(); 4308 return new UnreachableInst(Context); 4309 } 4310