1 //===- Instructions.cpp - Implement the LLVM instructions -----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements all of the non-inline methods for the LLVM instruction 10 // classes. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/IR/Instructions.h" 15 #include "LLVMContextImpl.h" 16 #include "llvm/ADT/None.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/ADT/Twine.h" 19 #include "llvm/IR/Attributes.h" 20 #include "llvm/IR/BasicBlock.h" 21 #include "llvm/IR/CallSite.h" 22 #include "llvm/IR/Constant.h" 23 #include "llvm/IR/Constants.h" 24 #include "llvm/IR/DataLayout.h" 25 #include "llvm/IR/DerivedTypes.h" 26 #include "llvm/IR/Function.h" 27 #include "llvm/IR/InstrTypes.h" 28 #include "llvm/IR/Instruction.h" 29 #include "llvm/IR/Intrinsics.h" 30 #include "llvm/IR/LLVMContext.h" 31 #include "llvm/IR/MDBuilder.h" 32 #include "llvm/IR/Metadata.h" 33 #include "llvm/IR/Module.h" 34 #include "llvm/IR/Operator.h" 35 #include "llvm/IR/Type.h" 36 #include "llvm/IR/Value.h" 37 #include "llvm/Support/AtomicOrdering.h" 38 #include "llvm/Support/Casting.h" 39 #include "llvm/Support/ErrorHandling.h" 40 #include "llvm/Support/MathExtras.h" 41 #include "llvm/Support/TypeSize.h" 42 #include <algorithm> 43 #include <cassert> 44 #include <cstdint> 45 #include <vector> 46 47 using namespace llvm; 48 49 //===----------------------------------------------------------------------===// 50 // AllocaInst Class 51 //===----------------------------------------------------------------------===// 52 53 Optional<uint64_t> 54 AllocaInst::getAllocationSizeInBits(const DataLayout &DL) const { 55 uint64_t Size = DL.getTypeAllocSizeInBits(getAllocatedType()); 56 if (isArrayAllocation()) { 57 auto C = dyn_cast<ConstantInt>(getArraySize()); 58 if (!C) 59 return None; 60 Size *= C->getZExtValue(); 61 } 62 return Size; 63 } 64 65 //===----------------------------------------------------------------------===// 66 // CallSite Class 67 //===----------------------------------------------------------------------===// 68 69 User::op_iterator CallSite::getCallee() const { 70 return cast<CallBase>(getInstruction())->op_end() - 1; 71 } 72 73 //===----------------------------------------------------------------------===// 74 // SelectInst Class 75 //===----------------------------------------------------------------------===// 76 77 /// areInvalidOperands - Return a string if the specified operands are invalid 78 /// for a select operation, otherwise return null. 79 const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) { 80 if (Op1->getType() != Op2->getType()) 81 return "both values to select must have same type"; 82 83 if (Op1->getType()->isTokenTy()) 84 return "select values cannot have token type"; 85 86 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) { 87 // Vector select. 88 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext())) 89 return "vector select condition element type must be i1"; 90 VectorType *ET = dyn_cast<VectorType>(Op1->getType()); 91 if (!ET) 92 return "selected values for vector select must be vectors"; 93 if (ET->getNumElements() != VT->getNumElements()) 94 return "vector select requires selected vectors to have " 95 "the same vector length as select condition"; 96 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) { 97 return "select condition must be i1 or <n x i1>"; 98 } 99 return nullptr; 100 } 101 102 //===----------------------------------------------------------------------===// 103 // PHINode Class 104 //===----------------------------------------------------------------------===// 105 106 PHINode::PHINode(const PHINode &PN) 107 : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()), 108 ReservedSpace(PN.getNumOperands()) { 109 allocHungoffUses(PN.getNumOperands()); 110 std::copy(PN.op_begin(), PN.op_end(), op_begin()); 111 std::copy(PN.block_begin(), PN.block_end(), block_begin()); 112 SubclassOptionalData = PN.SubclassOptionalData; 113 } 114 115 // removeIncomingValue - Remove an incoming value. This is useful if a 116 // predecessor basic block is deleted. 117 Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) { 118 Value *Removed = getIncomingValue(Idx); 119 120 // Move everything after this operand down. 121 // 122 // FIXME: we could just swap with the end of the list, then erase. However, 123 // clients might not expect this to happen. The code as it is thrashes the 124 // use/def lists, which is kinda lame. 125 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx); 126 std::copy(block_begin() + Idx + 1, block_end(), block_begin() + Idx); 127 128 // Nuke the last value. 129 Op<-1>().set(nullptr); 130 setNumHungOffUseOperands(getNumOperands() - 1); 131 132 // If the PHI node is dead, because it has zero entries, nuke it now. 133 if (getNumOperands() == 0 && DeletePHIIfEmpty) { 134 // If anyone is using this PHI, make them use a dummy value instead... 135 replaceAllUsesWith(UndefValue::get(getType())); 136 eraseFromParent(); 137 } 138 return Removed; 139 } 140 141 /// growOperands - grow operands - This grows the operand list in response 142 /// to a push_back style of operation. This grows the number of ops by 1.5 143 /// times. 144 /// 145 void PHINode::growOperands() { 146 unsigned e = getNumOperands(); 147 unsigned NumOps = e + e / 2; 148 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common. 149 150 ReservedSpace = NumOps; 151 growHungoffUses(ReservedSpace, /* IsPhi */ true); 152 } 153 154 /// hasConstantValue - If the specified PHI node always merges together the same 155 /// value, return the value, otherwise return null. 156 Value *PHINode::hasConstantValue() const { 157 // Exploit the fact that phi nodes always have at least one entry. 158 Value *ConstantValue = getIncomingValue(0); 159 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i) 160 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) { 161 if (ConstantValue != this) 162 return nullptr; // Incoming values not all the same. 163 // The case where the first value is this PHI. 164 ConstantValue = getIncomingValue(i); 165 } 166 if (ConstantValue == this) 167 return UndefValue::get(getType()); 168 return ConstantValue; 169 } 170 171 /// hasConstantOrUndefValue - Whether the specified PHI node always merges 172 /// together the same value, assuming that undefs result in the same value as 173 /// non-undefs. 174 /// Unlike \ref hasConstantValue, this does not return a value because the 175 /// unique non-undef incoming value need not dominate the PHI node. 176 bool PHINode::hasConstantOrUndefValue() const { 177 Value *ConstantValue = nullptr; 178 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) { 179 Value *Incoming = getIncomingValue(i); 180 if (Incoming != this && !isa<UndefValue>(Incoming)) { 181 if (ConstantValue && ConstantValue != Incoming) 182 return false; 183 ConstantValue = Incoming; 184 } 185 } 186 return true; 187 } 188 189 //===----------------------------------------------------------------------===// 190 // LandingPadInst Implementation 191 //===----------------------------------------------------------------------===// 192 193 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues, 194 const Twine &NameStr, Instruction *InsertBefore) 195 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) { 196 init(NumReservedValues, NameStr); 197 } 198 199 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues, 200 const Twine &NameStr, BasicBlock *InsertAtEnd) 201 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) { 202 init(NumReservedValues, NameStr); 203 } 204 205 LandingPadInst::LandingPadInst(const LandingPadInst &LP) 206 : Instruction(LP.getType(), Instruction::LandingPad, nullptr, 207 LP.getNumOperands()), 208 ReservedSpace(LP.getNumOperands()) { 209 allocHungoffUses(LP.getNumOperands()); 210 Use *OL = getOperandList(); 211 const Use *InOL = LP.getOperandList(); 212 for (unsigned I = 0, E = ReservedSpace; I != E; ++I) 213 OL[I] = InOL[I]; 214 215 setCleanup(LP.isCleanup()); 216 } 217 218 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses, 219 const Twine &NameStr, 220 Instruction *InsertBefore) { 221 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore); 222 } 223 224 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses, 225 const Twine &NameStr, 226 BasicBlock *InsertAtEnd) { 227 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd); 228 } 229 230 void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) { 231 ReservedSpace = NumReservedValues; 232 setNumHungOffUseOperands(0); 233 allocHungoffUses(ReservedSpace); 234 setName(NameStr); 235 setCleanup(false); 236 } 237 238 /// growOperands - grow operands - This grows the operand list in response to a 239 /// push_back style of operation. This grows the number of ops by 2 times. 240 void LandingPadInst::growOperands(unsigned Size) { 241 unsigned e = getNumOperands(); 242 if (ReservedSpace >= e + Size) return; 243 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2; 244 growHungoffUses(ReservedSpace); 245 } 246 247 void LandingPadInst::addClause(Constant *Val) { 248 unsigned OpNo = getNumOperands(); 249 growOperands(1); 250 assert(OpNo < ReservedSpace && "Growing didn't work!"); 251 setNumHungOffUseOperands(getNumOperands() + 1); 252 getOperandList()[OpNo] = Val; 253 } 254 255 //===----------------------------------------------------------------------===// 256 // CallBase Implementation 257 //===----------------------------------------------------------------------===// 258 259 Function *CallBase::getCaller() { return getParent()->getParent(); } 260 261 unsigned CallBase::getNumSubclassExtraOperandsDynamic() const { 262 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!"); 263 return cast<CallBrInst>(this)->getNumIndirectDests() + 1; 264 } 265 266 bool CallBase::isIndirectCall() const { 267 const Value *V = getCalledValue(); 268 if (isa<Function>(V) || isa<Constant>(V)) 269 return false; 270 if (const CallInst *CI = dyn_cast<CallInst>(this)) 271 if (CI->isInlineAsm()) 272 return false; 273 return true; 274 } 275 276 /// Tests if this call site must be tail call optimized. Only a CallInst can 277 /// be tail call optimized. 278 bool CallBase::isMustTailCall() const { 279 if (auto *CI = dyn_cast<CallInst>(this)) 280 return CI->isMustTailCall(); 281 return false; 282 } 283 284 /// Tests if this call site is marked as a tail call. 285 bool CallBase::isTailCall() const { 286 if (auto *CI = dyn_cast<CallInst>(this)) 287 return CI->isTailCall(); 288 return false; 289 } 290 291 Intrinsic::ID CallBase::getIntrinsicID() const { 292 if (auto *F = getCalledFunction()) 293 return F->getIntrinsicID(); 294 return Intrinsic::not_intrinsic; 295 } 296 297 bool CallBase::isReturnNonNull() const { 298 if (hasRetAttr(Attribute::NonNull)) 299 return true; 300 301 if (getDereferenceableBytes(AttributeList::ReturnIndex) > 0 && 302 !NullPointerIsDefined(getCaller(), 303 getType()->getPointerAddressSpace())) 304 return true; 305 306 return false; 307 } 308 309 Value *CallBase::getReturnedArgOperand() const { 310 unsigned Index; 311 312 if (Attrs.hasAttrSomewhere(Attribute::Returned, &Index) && Index) 313 return getArgOperand(Index - AttributeList::FirstArgIndex); 314 if (const Function *F = getCalledFunction()) 315 if (F->getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) && 316 Index) 317 return getArgOperand(Index - AttributeList::FirstArgIndex); 318 319 return nullptr; 320 } 321 322 bool CallBase::hasRetAttr(Attribute::AttrKind Kind) const { 323 if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind)) 324 return true; 325 326 // Look at the callee, if available. 327 if (const Function *F = getCalledFunction()) 328 return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind); 329 return false; 330 } 331 332 /// Determine whether the argument or parameter has the given attribute. 333 bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const { 334 assert(ArgNo < getNumArgOperands() && "Param index out of bounds!"); 335 336 if (Attrs.hasParamAttribute(ArgNo, Kind)) 337 return true; 338 if (const Function *F = getCalledFunction()) 339 return F->getAttributes().hasParamAttribute(ArgNo, Kind); 340 return false; 341 } 342 343 bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const { 344 if (const Function *F = getCalledFunction()) 345 return F->getAttributes().hasAttribute(AttributeList::FunctionIndex, Kind); 346 return false; 347 } 348 349 bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const { 350 if (const Function *F = getCalledFunction()) 351 return F->getAttributes().hasAttribute(AttributeList::FunctionIndex, Kind); 352 return false; 353 } 354 355 void CallBase::getOperandBundlesAsDefs( 356 SmallVectorImpl<OperandBundleDef> &Defs) const { 357 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) 358 Defs.emplace_back(getOperandBundleAt(i)); 359 } 360 361 CallBase::op_iterator 362 CallBase::populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles, 363 const unsigned BeginIndex) { 364 auto It = op_begin() + BeginIndex; 365 for (auto &B : Bundles) 366 It = std::copy(B.input_begin(), B.input_end(), It); 367 368 auto *ContextImpl = getContext().pImpl; 369 auto BI = Bundles.begin(); 370 unsigned CurrentIndex = BeginIndex; 371 372 for (auto &BOI : bundle_op_infos()) { 373 assert(BI != Bundles.end() && "Incorrect allocation?"); 374 375 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag()); 376 BOI.Begin = CurrentIndex; 377 BOI.End = CurrentIndex + BI->input_size(); 378 CurrentIndex = BOI.End; 379 BI++; 380 } 381 382 assert(BI == Bundles.end() && "Incorrect allocation?"); 383 384 return It; 385 } 386 387 //===----------------------------------------------------------------------===// 388 // CallInst Implementation 389 //===----------------------------------------------------------------------===// 390 391 void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, 392 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) { 393 this->FTy = FTy; 394 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 && 395 "NumOperands not set up?"); 396 setCalledOperand(Func); 397 398 #ifndef NDEBUG 399 assert((Args.size() == FTy->getNumParams() || 400 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 401 "Calling a function with bad signature!"); 402 403 for (unsigned i = 0; i != Args.size(); ++i) 404 assert((i >= FTy->getNumParams() || 405 FTy->getParamType(i) == Args[i]->getType()) && 406 "Calling a function with a bad signature!"); 407 #endif 408 409 llvm::copy(Args, op_begin()); 410 411 auto It = populateBundleOperandInfos(Bundles, Args.size()); 412 (void)It; 413 assert(It + 1 == op_end() && "Should add up!"); 414 415 setName(NameStr); 416 } 417 418 void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) { 419 this->FTy = FTy; 420 assert(getNumOperands() == 1 && "NumOperands not set up?"); 421 setCalledOperand(Func); 422 423 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature"); 424 425 setName(NameStr); 426 } 427 428 CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name, 429 Instruction *InsertBefore) 430 : CallBase(Ty->getReturnType(), Instruction::Call, 431 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) { 432 init(Ty, Func, Name); 433 } 434 435 CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name, 436 BasicBlock *InsertAtEnd) 437 : CallBase(Ty->getReturnType(), Instruction::Call, 438 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertAtEnd) { 439 init(Ty, Func, Name); 440 } 441 442 CallInst::CallInst(const CallInst &CI) 443 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call, 444 OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(), 445 CI.getNumOperands()) { 446 setTailCallKind(CI.getTailCallKind()); 447 setCallingConv(CI.getCallingConv()); 448 449 std::copy(CI.op_begin(), CI.op_end(), op_begin()); 450 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(), 451 bundle_op_info_begin()); 452 SubclassOptionalData = CI.SubclassOptionalData; 453 } 454 455 CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB, 456 Instruction *InsertPt) { 457 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end()); 458 459 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledValue(), 460 Args, OpB, CI->getName(), InsertPt); 461 NewCI->setTailCallKind(CI->getTailCallKind()); 462 NewCI->setCallingConv(CI->getCallingConv()); 463 NewCI->SubclassOptionalData = CI->SubclassOptionalData; 464 NewCI->setAttributes(CI->getAttributes()); 465 NewCI->setDebugLoc(CI->getDebugLoc()); 466 return NewCI; 467 } 468 469 // Update profile weight for call instruction by scaling it using the ratio 470 // of S/T. The meaning of "branch_weights" meta data for call instruction is 471 // transfered to represent call count. 472 void CallInst::updateProfWeight(uint64_t S, uint64_t T) { 473 auto *ProfileData = getMetadata(LLVMContext::MD_prof); 474 if (ProfileData == nullptr) 475 return; 476 477 auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0)); 478 if (!ProfDataName || (!ProfDataName->getString().equals("branch_weights") && 479 !ProfDataName->getString().equals("VP"))) 480 return; 481 482 if (T == 0) { 483 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in " 484 "div by 0. Ignoring. Likely the function " 485 << getParent()->getParent()->getName() 486 << " has 0 entry count, and contains call instructions " 487 "with non-zero prof info."); 488 return; 489 } 490 491 MDBuilder MDB(getContext()); 492 SmallVector<Metadata *, 3> Vals; 493 Vals.push_back(ProfileData->getOperand(0)); 494 APInt APS(128, S), APT(128, T); 495 if (ProfDataName->getString().equals("branch_weights") && 496 ProfileData->getNumOperands() > 0) { 497 // Using APInt::div may be expensive, but most cases should fit 64 bits. 498 APInt Val(128, mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1)) 499 ->getValue() 500 .getZExtValue()); 501 Val *= APS; 502 Vals.push_back(MDB.createConstant(ConstantInt::get( 503 Type::getInt64Ty(getContext()), Val.udiv(APT).getLimitedValue()))); 504 } else if (ProfDataName->getString().equals("VP")) 505 for (unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) { 506 // The first value is the key of the value profile, which will not change. 507 Vals.push_back(ProfileData->getOperand(i)); 508 // Using APInt::div may be expensive, but most cases should fit 64 bits. 509 APInt Val(128, 510 mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1)) 511 ->getValue() 512 .getZExtValue()); 513 Val *= APS; 514 Vals.push_back(MDB.createConstant( 515 ConstantInt::get(Type::getInt64Ty(getContext()), 516 Val.udiv(APT).getLimitedValue()))); 517 } 518 setMetadata(LLVMContext::MD_prof, MDNode::get(getContext(), Vals)); 519 } 520 521 /// IsConstantOne - Return true only if val is constant int 1 522 static bool IsConstantOne(Value *val) { 523 assert(val && "IsConstantOne does not work with nullptr val"); 524 const ConstantInt *CVal = dyn_cast<ConstantInt>(val); 525 return CVal && CVal->isOne(); 526 } 527 528 static Instruction *createMalloc(Instruction *InsertBefore, 529 BasicBlock *InsertAtEnd, Type *IntPtrTy, 530 Type *AllocTy, Value *AllocSize, 531 Value *ArraySize, 532 ArrayRef<OperandBundleDef> OpB, 533 Function *MallocF, const Twine &Name) { 534 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) && 535 "createMalloc needs either InsertBefore or InsertAtEnd"); 536 537 // malloc(type) becomes: 538 // bitcast (i8* malloc(typeSize)) to type* 539 // malloc(type, arraySize) becomes: 540 // bitcast (i8* malloc(typeSize*arraySize)) to type* 541 if (!ArraySize) 542 ArraySize = ConstantInt::get(IntPtrTy, 1); 543 else if (ArraySize->getType() != IntPtrTy) { 544 if (InsertBefore) 545 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false, 546 "", InsertBefore); 547 else 548 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false, 549 "", InsertAtEnd); 550 } 551 552 if (!IsConstantOne(ArraySize)) { 553 if (IsConstantOne(AllocSize)) { 554 AllocSize = ArraySize; // Operand * 1 = Operand 555 } else if (Constant *CO = dyn_cast<Constant>(ArraySize)) { 556 Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy, 557 false /*ZExt*/); 558 // Malloc arg is constant product of type size and array size 559 AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize)); 560 } else { 561 // Multiply type size by the array size... 562 if (InsertBefore) 563 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize, 564 "mallocsize", InsertBefore); 565 else 566 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize, 567 "mallocsize", InsertAtEnd); 568 } 569 } 570 571 assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size"); 572 // Create the call to Malloc. 573 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd; 574 Module *M = BB->getParent()->getParent(); 575 Type *BPTy = Type::getInt8PtrTy(BB->getContext()); 576 FunctionCallee MallocFunc = MallocF; 577 if (!MallocFunc) 578 // prototype malloc as "void *malloc(size_t)" 579 MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy); 580 PointerType *AllocPtrType = PointerType::getUnqual(AllocTy); 581 CallInst *MCall = nullptr; 582 Instruction *Result = nullptr; 583 if (InsertBefore) { 584 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall", 585 InsertBefore); 586 Result = MCall; 587 if (Result->getType() != AllocPtrType) 588 // Create a cast instruction to convert to the right type... 589 Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore); 590 } else { 591 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall"); 592 Result = MCall; 593 if (Result->getType() != AllocPtrType) { 594 InsertAtEnd->getInstList().push_back(MCall); 595 // Create a cast instruction to convert to the right type... 596 Result = new BitCastInst(MCall, AllocPtrType, Name); 597 } 598 } 599 MCall->setTailCall(); 600 if (Function *F = dyn_cast<Function>(MallocFunc.getCallee())) { 601 MCall->setCallingConv(F->getCallingConv()); 602 if (!F->returnDoesNotAlias()) 603 F->setReturnDoesNotAlias(); 604 } 605 assert(!MCall->getType()->isVoidTy() && "Malloc has void return type"); 606 607 return Result; 608 } 609 610 /// CreateMalloc - Generate the IR for a call to malloc: 611 /// 1. Compute the malloc call's argument as the specified type's size, 612 /// possibly multiplied by the array size if the array size is not 613 /// constant 1. 614 /// 2. Call malloc with that argument. 615 /// 3. Bitcast the result of the malloc call to the specified type. 616 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore, 617 Type *IntPtrTy, Type *AllocTy, 618 Value *AllocSize, Value *ArraySize, 619 Function *MallocF, 620 const Twine &Name) { 621 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize, 622 ArraySize, None, MallocF, Name); 623 } 624 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore, 625 Type *IntPtrTy, Type *AllocTy, 626 Value *AllocSize, Value *ArraySize, 627 ArrayRef<OperandBundleDef> OpB, 628 Function *MallocF, 629 const Twine &Name) { 630 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize, 631 ArraySize, OpB, MallocF, Name); 632 } 633 634 /// CreateMalloc - Generate the IR for a call to malloc: 635 /// 1. Compute the malloc call's argument as the specified type's size, 636 /// possibly multiplied by the array size if the array size is not 637 /// constant 1. 638 /// 2. Call malloc with that argument. 639 /// 3. Bitcast the result of the malloc call to the specified type. 640 /// Note: This function does not add the bitcast to the basic block, that is the 641 /// responsibility of the caller. 642 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd, 643 Type *IntPtrTy, Type *AllocTy, 644 Value *AllocSize, Value *ArraySize, 645 Function *MallocF, const Twine &Name) { 646 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize, 647 ArraySize, None, MallocF, Name); 648 } 649 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd, 650 Type *IntPtrTy, Type *AllocTy, 651 Value *AllocSize, Value *ArraySize, 652 ArrayRef<OperandBundleDef> OpB, 653 Function *MallocF, const Twine &Name) { 654 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize, 655 ArraySize, OpB, MallocF, Name); 656 } 657 658 static Instruction *createFree(Value *Source, 659 ArrayRef<OperandBundleDef> Bundles, 660 Instruction *InsertBefore, 661 BasicBlock *InsertAtEnd) { 662 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) && 663 "createFree needs either InsertBefore or InsertAtEnd"); 664 assert(Source->getType()->isPointerTy() && 665 "Can not free something of nonpointer type!"); 666 667 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd; 668 Module *M = BB->getParent()->getParent(); 669 670 Type *VoidTy = Type::getVoidTy(M->getContext()); 671 Type *IntPtrTy = Type::getInt8PtrTy(M->getContext()); 672 // prototype free as "void free(void*)" 673 FunctionCallee FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy); 674 CallInst *Result = nullptr; 675 Value *PtrCast = Source; 676 if (InsertBefore) { 677 if (Source->getType() != IntPtrTy) 678 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore); 679 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "", InsertBefore); 680 } else { 681 if (Source->getType() != IntPtrTy) 682 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd); 683 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, ""); 684 } 685 Result->setTailCall(); 686 if (Function *F = dyn_cast<Function>(FreeFunc.getCallee())) 687 Result->setCallingConv(F->getCallingConv()); 688 689 return Result; 690 } 691 692 /// CreateFree - Generate the IR for a call to the builtin free function. 693 Instruction *CallInst::CreateFree(Value *Source, Instruction *InsertBefore) { 694 return createFree(Source, None, InsertBefore, nullptr); 695 } 696 Instruction *CallInst::CreateFree(Value *Source, 697 ArrayRef<OperandBundleDef> Bundles, 698 Instruction *InsertBefore) { 699 return createFree(Source, Bundles, InsertBefore, nullptr); 700 } 701 702 /// CreateFree - Generate the IR for a call to the builtin free function. 703 /// Note: This function does not add the call to the basic block, that is the 704 /// responsibility of the caller. 705 Instruction *CallInst::CreateFree(Value *Source, BasicBlock *InsertAtEnd) { 706 Instruction *FreeCall = createFree(Source, None, nullptr, InsertAtEnd); 707 assert(FreeCall && "CreateFree did not create a CallInst"); 708 return FreeCall; 709 } 710 Instruction *CallInst::CreateFree(Value *Source, 711 ArrayRef<OperandBundleDef> Bundles, 712 BasicBlock *InsertAtEnd) { 713 Instruction *FreeCall = createFree(Source, Bundles, nullptr, InsertAtEnd); 714 assert(FreeCall && "CreateFree did not create a CallInst"); 715 return FreeCall; 716 } 717 718 //===----------------------------------------------------------------------===// 719 // InvokeInst Implementation 720 //===----------------------------------------------------------------------===// 721 722 void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal, 723 BasicBlock *IfException, ArrayRef<Value *> Args, 724 ArrayRef<OperandBundleDef> Bundles, 725 const Twine &NameStr) { 726 this->FTy = FTy; 727 728 assert((int)getNumOperands() == 729 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) && 730 "NumOperands not set up?"); 731 setNormalDest(IfNormal); 732 setUnwindDest(IfException); 733 setCalledOperand(Fn); 734 735 #ifndef NDEBUG 736 assert(((Args.size() == FTy->getNumParams()) || 737 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 738 "Invoking a function with bad signature"); 739 740 for (unsigned i = 0, e = Args.size(); i != e; i++) 741 assert((i >= FTy->getNumParams() || 742 FTy->getParamType(i) == Args[i]->getType()) && 743 "Invoking a function with a bad signature!"); 744 #endif 745 746 llvm::copy(Args, op_begin()); 747 748 auto It = populateBundleOperandInfos(Bundles, Args.size()); 749 (void)It; 750 assert(It + 3 == op_end() && "Should add up!"); 751 752 setName(NameStr); 753 } 754 755 InvokeInst::InvokeInst(const InvokeInst &II) 756 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke, 757 OperandTraits<CallBase>::op_end(this) - II.getNumOperands(), 758 II.getNumOperands()) { 759 setCallingConv(II.getCallingConv()); 760 std::copy(II.op_begin(), II.op_end(), op_begin()); 761 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(), 762 bundle_op_info_begin()); 763 SubclassOptionalData = II.SubclassOptionalData; 764 } 765 766 InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB, 767 Instruction *InsertPt) { 768 std::vector<Value *> Args(II->arg_begin(), II->arg_end()); 769 770 auto *NewII = InvokeInst::Create(II->getFunctionType(), II->getCalledValue(), 771 II->getNormalDest(), II->getUnwindDest(), 772 Args, OpB, II->getName(), InsertPt); 773 NewII->setCallingConv(II->getCallingConv()); 774 NewII->SubclassOptionalData = II->SubclassOptionalData; 775 NewII->setAttributes(II->getAttributes()); 776 NewII->setDebugLoc(II->getDebugLoc()); 777 return NewII; 778 } 779 780 781 LandingPadInst *InvokeInst::getLandingPadInst() const { 782 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI()); 783 } 784 785 //===----------------------------------------------------------------------===// 786 // CallBrInst Implementation 787 //===----------------------------------------------------------------------===// 788 789 void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough, 790 ArrayRef<BasicBlock *> IndirectDests, 791 ArrayRef<Value *> Args, 792 ArrayRef<OperandBundleDef> Bundles, 793 const Twine &NameStr) { 794 this->FTy = FTy; 795 796 assert((int)getNumOperands() == 797 ComputeNumOperands(Args.size(), IndirectDests.size(), 798 CountBundleInputs(Bundles)) && 799 "NumOperands not set up?"); 800 NumIndirectDests = IndirectDests.size(); 801 setDefaultDest(Fallthrough); 802 for (unsigned i = 0; i != NumIndirectDests; ++i) 803 setIndirectDest(i, IndirectDests[i]); 804 setCalledOperand(Fn); 805 806 #ifndef NDEBUG 807 assert(((Args.size() == FTy->getNumParams()) || 808 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 809 "Calling a function with bad signature"); 810 811 for (unsigned i = 0, e = Args.size(); i != e; i++) 812 assert((i >= FTy->getNumParams() || 813 FTy->getParamType(i) == Args[i]->getType()) && 814 "Calling a function with a bad signature!"); 815 #endif 816 817 std::copy(Args.begin(), Args.end(), op_begin()); 818 819 auto It = populateBundleOperandInfos(Bundles, Args.size()); 820 (void)It; 821 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!"); 822 823 setName(NameStr); 824 } 825 826 void CallBrInst::updateArgBlockAddresses(unsigned i, BasicBlock *B) { 827 assert(getNumIndirectDests() > i && "IndirectDest # out of range for callbr"); 828 if (BasicBlock *OldBB = getIndirectDest(i)) { 829 BlockAddress *Old = BlockAddress::get(OldBB); 830 BlockAddress *New = BlockAddress::get(B); 831 for (unsigned ArgNo = 0, e = getNumArgOperands(); ArgNo != e; ++ArgNo) 832 if (dyn_cast<BlockAddress>(getArgOperand(ArgNo)) == Old) 833 setArgOperand(ArgNo, New); 834 } 835 } 836 837 CallBrInst::CallBrInst(const CallBrInst &CBI) 838 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr, 839 OperandTraits<CallBase>::op_end(this) - CBI.getNumOperands(), 840 CBI.getNumOperands()) { 841 setCallingConv(CBI.getCallingConv()); 842 std::copy(CBI.op_begin(), CBI.op_end(), op_begin()); 843 std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(), 844 bundle_op_info_begin()); 845 SubclassOptionalData = CBI.SubclassOptionalData; 846 NumIndirectDests = CBI.NumIndirectDests; 847 } 848 849 CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB, 850 Instruction *InsertPt) { 851 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end()); 852 853 auto *NewCBI = CallBrInst::Create(CBI->getFunctionType(), 854 CBI->getCalledValue(), 855 CBI->getDefaultDest(), 856 CBI->getIndirectDests(), 857 Args, OpB, CBI->getName(), InsertPt); 858 NewCBI->setCallingConv(CBI->getCallingConv()); 859 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData; 860 NewCBI->setAttributes(CBI->getAttributes()); 861 NewCBI->setDebugLoc(CBI->getDebugLoc()); 862 NewCBI->NumIndirectDests = CBI->NumIndirectDests; 863 return NewCBI; 864 } 865 866 //===----------------------------------------------------------------------===// 867 // ReturnInst Implementation 868 //===----------------------------------------------------------------------===// 869 870 ReturnInst::ReturnInst(const ReturnInst &RI) 871 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret, 872 OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(), 873 RI.getNumOperands()) { 874 if (RI.getNumOperands()) 875 Op<0>() = RI.Op<0>(); 876 SubclassOptionalData = RI.SubclassOptionalData; 877 } 878 879 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore) 880 : Instruction(Type::getVoidTy(C), Instruction::Ret, 881 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal, 882 InsertBefore) { 883 if (retVal) 884 Op<0>() = retVal; 885 } 886 887 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd) 888 : Instruction(Type::getVoidTy(C), Instruction::Ret, 889 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal, 890 InsertAtEnd) { 891 if (retVal) 892 Op<0>() = retVal; 893 } 894 895 ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd) 896 : Instruction(Type::getVoidTy(Context), Instruction::Ret, 897 OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {} 898 899 //===----------------------------------------------------------------------===// 900 // ResumeInst Implementation 901 //===----------------------------------------------------------------------===// 902 903 ResumeInst::ResumeInst(const ResumeInst &RI) 904 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume, 905 OperandTraits<ResumeInst>::op_begin(this), 1) { 906 Op<0>() = RI.Op<0>(); 907 } 908 909 ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore) 910 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume, 911 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) { 912 Op<0>() = Exn; 913 } 914 915 ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd) 916 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume, 917 OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) { 918 Op<0>() = Exn; 919 } 920 921 //===----------------------------------------------------------------------===// 922 // CleanupReturnInst Implementation 923 //===----------------------------------------------------------------------===// 924 925 CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI) 926 : Instruction(CRI.getType(), Instruction::CleanupRet, 927 OperandTraits<CleanupReturnInst>::op_end(this) - 928 CRI.getNumOperands(), 929 CRI.getNumOperands()) { 930 setInstructionSubclassData(CRI.getSubclassDataFromInstruction()); 931 Op<0>() = CRI.Op<0>(); 932 if (CRI.hasUnwindDest()) 933 Op<1>() = CRI.Op<1>(); 934 } 935 936 void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) { 937 if (UnwindBB) 938 setInstructionSubclassData(getSubclassDataFromInstruction() | 1); 939 940 Op<0>() = CleanupPad; 941 if (UnwindBB) 942 Op<1>() = UnwindBB; 943 } 944 945 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, 946 unsigned Values, Instruction *InsertBefore) 947 : Instruction(Type::getVoidTy(CleanupPad->getContext()), 948 Instruction::CleanupRet, 949 OperandTraits<CleanupReturnInst>::op_end(this) - Values, 950 Values, InsertBefore) { 951 init(CleanupPad, UnwindBB); 952 } 953 954 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, 955 unsigned Values, BasicBlock *InsertAtEnd) 956 : Instruction(Type::getVoidTy(CleanupPad->getContext()), 957 Instruction::CleanupRet, 958 OperandTraits<CleanupReturnInst>::op_end(this) - Values, 959 Values, InsertAtEnd) { 960 init(CleanupPad, UnwindBB); 961 } 962 963 //===----------------------------------------------------------------------===// 964 // CatchReturnInst Implementation 965 //===----------------------------------------------------------------------===// 966 void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) { 967 Op<0>() = CatchPad; 968 Op<1>() = BB; 969 } 970 971 CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI) 972 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet, 973 OperandTraits<CatchReturnInst>::op_begin(this), 2) { 974 Op<0>() = CRI.Op<0>(); 975 Op<1>() = CRI.Op<1>(); 976 } 977 978 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB, 979 Instruction *InsertBefore) 980 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet, 981 OperandTraits<CatchReturnInst>::op_begin(this), 2, 982 InsertBefore) { 983 init(CatchPad, BB); 984 } 985 986 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB, 987 BasicBlock *InsertAtEnd) 988 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet, 989 OperandTraits<CatchReturnInst>::op_begin(this), 2, 990 InsertAtEnd) { 991 init(CatchPad, BB); 992 } 993 994 //===----------------------------------------------------------------------===// 995 // CatchSwitchInst Implementation 996 //===----------------------------------------------------------------------===// 997 998 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 999 unsigned NumReservedValues, 1000 const Twine &NameStr, 1001 Instruction *InsertBefore) 1002 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0, 1003 InsertBefore) { 1004 if (UnwindDest) 1005 ++NumReservedValues; 1006 init(ParentPad, UnwindDest, NumReservedValues + 1); 1007 setName(NameStr); 1008 } 1009 1010 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 1011 unsigned NumReservedValues, 1012 const Twine &NameStr, BasicBlock *InsertAtEnd) 1013 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0, 1014 InsertAtEnd) { 1015 if (UnwindDest) 1016 ++NumReservedValues; 1017 init(ParentPad, UnwindDest, NumReservedValues + 1); 1018 setName(NameStr); 1019 } 1020 1021 CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI) 1022 : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr, 1023 CSI.getNumOperands()) { 1024 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands()); 1025 setNumHungOffUseOperands(ReservedSpace); 1026 Use *OL = getOperandList(); 1027 const Use *InOL = CSI.getOperandList(); 1028 for (unsigned I = 1, E = ReservedSpace; I != E; ++I) 1029 OL[I] = InOL[I]; 1030 } 1031 1032 void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest, 1033 unsigned NumReservedValues) { 1034 assert(ParentPad && NumReservedValues); 1035 1036 ReservedSpace = NumReservedValues; 1037 setNumHungOffUseOperands(UnwindDest ? 2 : 1); 1038 allocHungoffUses(ReservedSpace); 1039 1040 Op<0>() = ParentPad; 1041 if (UnwindDest) { 1042 setInstructionSubclassData(getSubclassDataFromInstruction() | 1); 1043 setUnwindDest(UnwindDest); 1044 } 1045 } 1046 1047 /// growOperands - grow operands - This grows the operand list in response to a 1048 /// push_back style of operation. This grows the number of ops by 2 times. 1049 void CatchSwitchInst::growOperands(unsigned Size) { 1050 unsigned NumOperands = getNumOperands(); 1051 assert(NumOperands >= 1); 1052 if (ReservedSpace >= NumOperands + Size) 1053 return; 1054 ReservedSpace = (NumOperands + Size / 2) * 2; 1055 growHungoffUses(ReservedSpace); 1056 } 1057 1058 void CatchSwitchInst::addHandler(BasicBlock *Handler) { 1059 unsigned OpNo = getNumOperands(); 1060 growOperands(1); 1061 assert(OpNo < ReservedSpace && "Growing didn't work!"); 1062 setNumHungOffUseOperands(getNumOperands() + 1); 1063 getOperandList()[OpNo] = Handler; 1064 } 1065 1066 void CatchSwitchInst::removeHandler(handler_iterator HI) { 1067 // Move all subsequent handlers up one. 1068 Use *EndDst = op_end() - 1; 1069 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst) 1070 *CurDst = *(CurDst + 1); 1071 // Null out the last handler use. 1072 *EndDst = nullptr; 1073 1074 setNumHungOffUseOperands(getNumOperands() - 1); 1075 } 1076 1077 //===----------------------------------------------------------------------===// 1078 // FuncletPadInst Implementation 1079 //===----------------------------------------------------------------------===// 1080 void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args, 1081 const Twine &NameStr) { 1082 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?"); 1083 llvm::copy(Args, op_begin()); 1084 setParentPad(ParentPad); 1085 setName(NameStr); 1086 } 1087 1088 FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI) 1089 : Instruction(FPI.getType(), FPI.getOpcode(), 1090 OperandTraits<FuncletPadInst>::op_end(this) - 1091 FPI.getNumOperands(), 1092 FPI.getNumOperands()) { 1093 std::copy(FPI.op_begin(), FPI.op_end(), op_begin()); 1094 setParentPad(FPI.getParentPad()); 1095 } 1096 1097 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad, 1098 ArrayRef<Value *> Args, unsigned Values, 1099 const Twine &NameStr, Instruction *InsertBefore) 1100 : Instruction(ParentPad->getType(), Op, 1101 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values, 1102 InsertBefore) { 1103 init(ParentPad, Args, NameStr); 1104 } 1105 1106 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad, 1107 ArrayRef<Value *> Args, unsigned Values, 1108 const Twine &NameStr, BasicBlock *InsertAtEnd) 1109 : Instruction(ParentPad->getType(), Op, 1110 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values, 1111 InsertAtEnd) { 1112 init(ParentPad, Args, NameStr); 1113 } 1114 1115 //===----------------------------------------------------------------------===// 1116 // UnreachableInst Implementation 1117 //===----------------------------------------------------------------------===// 1118 1119 UnreachableInst::UnreachableInst(LLVMContext &Context, 1120 Instruction *InsertBefore) 1121 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr, 1122 0, InsertBefore) {} 1123 UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd) 1124 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr, 1125 0, InsertAtEnd) {} 1126 1127 //===----------------------------------------------------------------------===// 1128 // BranchInst Implementation 1129 //===----------------------------------------------------------------------===// 1130 1131 void BranchInst::AssertOK() { 1132 if (isConditional()) 1133 assert(getCondition()->getType()->isIntegerTy(1) && 1134 "May only branch on boolean predicates!"); 1135 } 1136 1137 BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore) 1138 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 1139 OperandTraits<BranchInst>::op_end(this) - 1, 1, 1140 InsertBefore) { 1141 assert(IfTrue && "Branch destination may not be null!"); 1142 Op<-1>() = IfTrue; 1143 } 1144 1145 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 1146 Instruction *InsertBefore) 1147 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 1148 OperandTraits<BranchInst>::op_end(this) - 3, 3, 1149 InsertBefore) { 1150 Op<-1>() = IfTrue; 1151 Op<-2>() = IfFalse; 1152 Op<-3>() = Cond; 1153 #ifndef NDEBUG 1154 AssertOK(); 1155 #endif 1156 } 1157 1158 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) 1159 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 1160 OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) { 1161 assert(IfTrue && "Branch destination may not be null!"); 1162 Op<-1>() = IfTrue; 1163 } 1164 1165 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 1166 BasicBlock *InsertAtEnd) 1167 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 1168 OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) { 1169 Op<-1>() = IfTrue; 1170 Op<-2>() = IfFalse; 1171 Op<-3>() = Cond; 1172 #ifndef NDEBUG 1173 AssertOK(); 1174 #endif 1175 } 1176 1177 BranchInst::BranchInst(const BranchInst &BI) 1178 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br, 1179 OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(), 1180 BI.getNumOperands()) { 1181 Op<-1>() = BI.Op<-1>(); 1182 if (BI.getNumOperands() != 1) { 1183 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!"); 1184 Op<-3>() = BI.Op<-3>(); 1185 Op<-2>() = BI.Op<-2>(); 1186 } 1187 SubclassOptionalData = BI.SubclassOptionalData; 1188 } 1189 1190 void BranchInst::swapSuccessors() { 1191 assert(isConditional() && 1192 "Cannot swap successors of an unconditional branch"); 1193 Op<-1>().swap(Op<-2>()); 1194 1195 // Update profile metadata if present and it matches our structural 1196 // expectations. 1197 swapProfMetadata(); 1198 } 1199 1200 //===----------------------------------------------------------------------===// 1201 // AllocaInst Implementation 1202 //===----------------------------------------------------------------------===// 1203 1204 static Value *getAISize(LLVMContext &Context, Value *Amt) { 1205 if (!Amt) 1206 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1); 1207 else { 1208 assert(!isa<BasicBlock>(Amt) && 1209 "Passed basic block into allocation size parameter! Use other ctor"); 1210 assert(Amt->getType()->isIntegerTy() && 1211 "Allocation array size is not an integer!"); 1212 } 1213 return Amt; 1214 } 1215 1216 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 1217 Instruction *InsertBefore) 1218 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {} 1219 1220 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 1221 BasicBlock *InsertAtEnd) 1222 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {} 1223 1224 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1225 const Twine &Name, Instruction *InsertBefore) 1226 : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/None, Name, InsertBefore) { 1227 } 1228 1229 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1230 const Twine &Name, BasicBlock *InsertAtEnd) 1231 : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/None, Name, InsertAtEnd) {} 1232 1233 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1234 MaybeAlign Align, const Twine &Name, 1235 Instruction *InsertBefore) 1236 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca, 1237 getAISize(Ty->getContext(), ArraySize), InsertBefore), 1238 AllocatedType(Ty) { 1239 setAlignment(MaybeAlign(Align)); 1240 assert(!Ty->isVoidTy() && "Cannot allocate void!"); 1241 setName(Name); 1242 } 1243 1244 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1245 MaybeAlign Align, const Twine &Name, 1246 BasicBlock *InsertAtEnd) 1247 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca, 1248 getAISize(Ty->getContext(), ArraySize), InsertAtEnd), 1249 AllocatedType(Ty) { 1250 setAlignment(Align); 1251 assert(!Ty->isVoidTy() && "Cannot allocate void!"); 1252 setName(Name); 1253 } 1254 1255 void AllocaInst::setAlignment(MaybeAlign Align) { 1256 assert((!Align || *Align <= MaximumAlignment) && 1257 "Alignment is greater than MaximumAlignment!"); 1258 setInstructionSubclassData((getSubclassDataFromInstruction() & ~31) | 1259 encode(Align)); 1260 if (Align) 1261 assert(getAlignment() == Align->value() && 1262 "Alignment representation error!"); 1263 else 1264 assert(getAlignment() == 0 && "Alignment representation error!"); 1265 } 1266 1267 bool AllocaInst::isArrayAllocation() const { 1268 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0))) 1269 return !CI->isOne(); 1270 return true; 1271 } 1272 1273 /// isStaticAlloca - Return true if this alloca is in the entry block of the 1274 /// function and is a constant size. If so, the code generator will fold it 1275 /// into the prolog/epilog code, so it is basically free. 1276 bool AllocaInst::isStaticAlloca() const { 1277 // Must be constant size. 1278 if (!isa<ConstantInt>(getArraySize())) return false; 1279 1280 // Must be in the entry block. 1281 const BasicBlock *Parent = getParent(); 1282 return Parent == &Parent->getParent()->front() && !isUsedWithInAlloca(); 1283 } 1284 1285 //===----------------------------------------------------------------------===// 1286 // LoadInst Implementation 1287 //===----------------------------------------------------------------------===// 1288 1289 void LoadInst::AssertOK() { 1290 assert(getOperand(0)->getType()->isPointerTy() && 1291 "Ptr must have pointer type."); 1292 assert(!(isAtomic() && getAlignment() == 0) && 1293 "Alignment required for atomic load"); 1294 } 1295 1296 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, 1297 Instruction *InsertBef) 1298 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {} 1299 1300 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, 1301 BasicBlock *InsertAE) 1302 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertAE) {} 1303 1304 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1305 Instruction *InsertBef) 1306 : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/None, InsertBef) {} 1307 1308 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1309 BasicBlock *InsertAE) 1310 : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/None, InsertAE) {} 1311 1312 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1313 MaybeAlign Align, Instruction *InsertBef) 1314 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, 1315 SyncScope::System, InsertBef) {} 1316 1317 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1318 MaybeAlign Align, BasicBlock *InsertAE) 1319 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, 1320 SyncScope::System, InsertAE) {} 1321 1322 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1323 MaybeAlign Align, AtomicOrdering Order, SyncScope::ID SSID, 1324 Instruction *InsertBef) 1325 : UnaryInstruction(Ty, Load, Ptr, InsertBef) { 1326 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType()); 1327 setVolatile(isVolatile); 1328 setAlignment(MaybeAlign(Align)); 1329 setAtomic(Order, SSID); 1330 AssertOK(); 1331 setName(Name); 1332 } 1333 1334 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1335 MaybeAlign Align, AtomicOrdering Order, SyncScope::ID SSID, 1336 BasicBlock *InsertAE) 1337 : UnaryInstruction(Ty, Load, Ptr, InsertAE) { 1338 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType()); 1339 setVolatile(isVolatile); 1340 setAlignment(Align); 1341 setAtomic(Order, SSID); 1342 AssertOK(); 1343 setName(Name); 1344 } 1345 1346 void LoadInst::setAlignment(MaybeAlign Align) { 1347 assert((!Align || *Align <= MaximumAlignment) && 1348 "Alignment is greater than MaximumAlignment!"); 1349 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | 1350 (encode(Align) << 1)); 1351 assert(getAlign() == Align && "Alignment representation error!"); 1352 } 1353 1354 //===----------------------------------------------------------------------===// 1355 // StoreInst Implementation 1356 //===----------------------------------------------------------------------===// 1357 1358 void StoreInst::AssertOK() { 1359 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!"); 1360 assert(getOperand(1)->getType()->isPointerTy() && 1361 "Ptr must have pointer type!"); 1362 assert(getOperand(0)->getType() == 1363 cast<PointerType>(getOperand(1)->getType())->getElementType() 1364 && "Ptr must be a pointer to Val type!"); 1365 assert(!(isAtomic() && getAlignment() == 0) && 1366 "Alignment required for atomic store"); 1367 } 1368 1369 StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore) 1370 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {} 1371 1372 StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd) 1373 : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {} 1374 1375 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1376 Instruction *InsertBefore) 1377 : StoreInst(val, addr, isVolatile, /*Align=*/None, InsertBefore) {} 1378 1379 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1380 BasicBlock *InsertAtEnd) 1381 : StoreInst(val, addr, isVolatile, /*Align=*/None, InsertAtEnd) {} 1382 1383 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align, 1384 Instruction *InsertBefore) 1385 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, 1386 SyncScope::System, InsertBefore) {} 1387 1388 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align, 1389 BasicBlock *InsertAtEnd) 1390 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, 1391 SyncScope::System, InsertAtEnd) {} 1392 1393 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align, 1394 AtomicOrdering Order, SyncScope::ID SSID, 1395 Instruction *InsertBefore) 1396 : Instruction(Type::getVoidTy(val->getContext()), Store, 1397 OperandTraits<StoreInst>::op_begin(this), 1398 OperandTraits<StoreInst>::operands(this), InsertBefore) { 1399 Op<0>() = val; 1400 Op<1>() = addr; 1401 setVolatile(isVolatile); 1402 setAlignment(Align); 1403 setAtomic(Order, SSID); 1404 AssertOK(); 1405 } 1406 1407 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align, 1408 AtomicOrdering Order, SyncScope::ID SSID, 1409 BasicBlock *InsertAtEnd) 1410 : Instruction(Type::getVoidTy(val->getContext()), Store, 1411 OperandTraits<StoreInst>::op_begin(this), 1412 OperandTraits<StoreInst>::operands(this), InsertAtEnd) { 1413 Op<0>() = val; 1414 Op<1>() = addr; 1415 setVolatile(isVolatile); 1416 setAlignment(Align); 1417 setAtomic(Order, SSID); 1418 AssertOK(); 1419 } 1420 1421 void StoreInst::setAlignment(MaybeAlign Alignment) { 1422 assert((!Alignment || *Alignment <= MaximumAlignment) && 1423 "Alignment is greater than MaximumAlignment!"); 1424 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | 1425 (encode(Alignment) << 1)); 1426 assert(getAlign() == Alignment && "Alignment representation error!"); 1427 } 1428 1429 //===----------------------------------------------------------------------===// 1430 // AtomicCmpXchgInst Implementation 1431 //===----------------------------------------------------------------------===// 1432 1433 void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal, 1434 AtomicOrdering SuccessOrdering, 1435 AtomicOrdering FailureOrdering, 1436 SyncScope::ID SSID) { 1437 Op<0>() = Ptr; 1438 Op<1>() = Cmp; 1439 Op<2>() = NewVal; 1440 setSuccessOrdering(SuccessOrdering); 1441 setFailureOrdering(FailureOrdering); 1442 setSyncScopeID(SSID); 1443 1444 assert(getOperand(0) && getOperand(1) && getOperand(2) && 1445 "All operands must be non-null!"); 1446 assert(getOperand(0)->getType()->isPointerTy() && 1447 "Ptr must have pointer type!"); 1448 assert(getOperand(1)->getType() == 1449 cast<PointerType>(getOperand(0)->getType())->getElementType() 1450 && "Ptr must be a pointer to Cmp type!"); 1451 assert(getOperand(2)->getType() == 1452 cast<PointerType>(getOperand(0)->getType())->getElementType() 1453 && "Ptr must be a pointer to NewVal type!"); 1454 assert(SuccessOrdering != AtomicOrdering::NotAtomic && 1455 "AtomicCmpXchg instructions must be atomic!"); 1456 assert(FailureOrdering != AtomicOrdering::NotAtomic && 1457 "AtomicCmpXchg instructions must be atomic!"); 1458 assert(!isStrongerThan(FailureOrdering, SuccessOrdering) && 1459 "AtomicCmpXchg failure argument shall be no stronger than the success " 1460 "argument"); 1461 assert(FailureOrdering != AtomicOrdering::Release && 1462 FailureOrdering != AtomicOrdering::AcquireRelease && 1463 "AtomicCmpXchg failure ordering cannot include release semantics"); 1464 } 1465 1466 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, 1467 AtomicOrdering SuccessOrdering, 1468 AtomicOrdering FailureOrdering, 1469 SyncScope::ID SSID, 1470 Instruction *InsertBefore) 1471 : Instruction( 1472 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())), 1473 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this), 1474 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) { 1475 Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID); 1476 } 1477 1478 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, 1479 AtomicOrdering SuccessOrdering, 1480 AtomicOrdering FailureOrdering, 1481 SyncScope::ID SSID, 1482 BasicBlock *InsertAtEnd) 1483 : Instruction( 1484 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())), 1485 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this), 1486 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) { 1487 Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID); 1488 } 1489 1490 //===----------------------------------------------------------------------===// 1491 // AtomicRMWInst Implementation 1492 //===----------------------------------------------------------------------===// 1493 1494 void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val, 1495 AtomicOrdering Ordering, 1496 SyncScope::ID SSID) { 1497 Op<0>() = Ptr; 1498 Op<1>() = Val; 1499 setOperation(Operation); 1500 setOrdering(Ordering); 1501 setSyncScopeID(SSID); 1502 1503 assert(getOperand(0) && getOperand(1) && 1504 "All operands must be non-null!"); 1505 assert(getOperand(0)->getType()->isPointerTy() && 1506 "Ptr must have pointer type!"); 1507 assert(getOperand(1)->getType() == 1508 cast<PointerType>(getOperand(0)->getType())->getElementType() 1509 && "Ptr must be a pointer to Val type!"); 1510 assert(Ordering != AtomicOrdering::NotAtomic && 1511 "AtomicRMW instructions must be atomic!"); 1512 } 1513 1514 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, 1515 AtomicOrdering Ordering, 1516 SyncScope::ID SSID, 1517 Instruction *InsertBefore) 1518 : Instruction(Val->getType(), AtomicRMW, 1519 OperandTraits<AtomicRMWInst>::op_begin(this), 1520 OperandTraits<AtomicRMWInst>::operands(this), 1521 InsertBefore) { 1522 Init(Operation, Ptr, Val, Ordering, SSID); 1523 } 1524 1525 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, 1526 AtomicOrdering Ordering, 1527 SyncScope::ID SSID, 1528 BasicBlock *InsertAtEnd) 1529 : Instruction(Val->getType(), AtomicRMW, 1530 OperandTraits<AtomicRMWInst>::op_begin(this), 1531 OperandTraits<AtomicRMWInst>::operands(this), 1532 InsertAtEnd) { 1533 Init(Operation, Ptr, Val, Ordering, SSID); 1534 } 1535 1536 StringRef AtomicRMWInst::getOperationName(BinOp Op) { 1537 switch (Op) { 1538 case AtomicRMWInst::Xchg: 1539 return "xchg"; 1540 case AtomicRMWInst::Add: 1541 return "add"; 1542 case AtomicRMWInst::Sub: 1543 return "sub"; 1544 case AtomicRMWInst::And: 1545 return "and"; 1546 case AtomicRMWInst::Nand: 1547 return "nand"; 1548 case AtomicRMWInst::Or: 1549 return "or"; 1550 case AtomicRMWInst::Xor: 1551 return "xor"; 1552 case AtomicRMWInst::Max: 1553 return "max"; 1554 case AtomicRMWInst::Min: 1555 return "min"; 1556 case AtomicRMWInst::UMax: 1557 return "umax"; 1558 case AtomicRMWInst::UMin: 1559 return "umin"; 1560 case AtomicRMWInst::FAdd: 1561 return "fadd"; 1562 case AtomicRMWInst::FSub: 1563 return "fsub"; 1564 case AtomicRMWInst::BAD_BINOP: 1565 return "<invalid operation>"; 1566 } 1567 1568 llvm_unreachable("invalid atomicrmw operation"); 1569 } 1570 1571 //===----------------------------------------------------------------------===// 1572 // FenceInst Implementation 1573 //===----------------------------------------------------------------------===// 1574 1575 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, 1576 SyncScope::ID SSID, 1577 Instruction *InsertBefore) 1578 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) { 1579 setOrdering(Ordering); 1580 setSyncScopeID(SSID); 1581 } 1582 1583 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, 1584 SyncScope::ID SSID, 1585 BasicBlock *InsertAtEnd) 1586 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) { 1587 setOrdering(Ordering); 1588 setSyncScopeID(SSID); 1589 } 1590 1591 //===----------------------------------------------------------------------===// 1592 // GetElementPtrInst Implementation 1593 //===----------------------------------------------------------------------===// 1594 1595 void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList, 1596 const Twine &Name) { 1597 assert(getNumOperands() == 1 + IdxList.size() && 1598 "NumOperands not initialized?"); 1599 Op<0>() = Ptr; 1600 llvm::copy(IdxList, op_begin() + 1); 1601 setName(Name); 1602 } 1603 1604 GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI) 1605 : Instruction(GEPI.getType(), GetElementPtr, 1606 OperandTraits<GetElementPtrInst>::op_end(this) - 1607 GEPI.getNumOperands(), 1608 GEPI.getNumOperands()), 1609 SourceElementType(GEPI.SourceElementType), 1610 ResultElementType(GEPI.ResultElementType) { 1611 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin()); 1612 SubclassOptionalData = GEPI.SubclassOptionalData; 1613 } 1614 1615 /// getIndexedType - Returns the type of the element that would be accessed with 1616 /// a gep instruction with the specified parameters. 1617 /// 1618 /// The Idxs pointer should point to a continuous piece of memory containing the 1619 /// indices, either as Value* or uint64_t. 1620 /// 1621 /// A null type is returned if the indices are invalid for the specified 1622 /// pointer type. 1623 /// 1624 template <typename IndexTy> 1625 static Type *getIndexedTypeInternal(Type *Agg, ArrayRef<IndexTy> IdxList) { 1626 // Handle the special case of the empty set index set, which is always valid. 1627 if (IdxList.empty()) 1628 return Agg; 1629 1630 // If there is at least one index, the top level type must be sized, otherwise 1631 // it cannot be 'stepped over'. 1632 if (!Agg->isSized()) 1633 return nullptr; 1634 1635 unsigned CurIdx = 1; 1636 for (; CurIdx != IdxList.size(); ++CurIdx) { 1637 CompositeType *CT = dyn_cast<CompositeType>(Agg); 1638 if (!CT || CT->isPointerTy()) return nullptr; 1639 IndexTy Index = IdxList[CurIdx]; 1640 if (!CT->indexValid(Index)) return nullptr; 1641 Agg = CT->getTypeAtIndex(Index); 1642 } 1643 return CurIdx == IdxList.size() ? Agg : nullptr; 1644 } 1645 1646 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) { 1647 return getIndexedTypeInternal(Ty, IdxList); 1648 } 1649 1650 Type *GetElementPtrInst::getIndexedType(Type *Ty, 1651 ArrayRef<Constant *> IdxList) { 1652 return getIndexedTypeInternal(Ty, IdxList); 1653 } 1654 1655 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) { 1656 return getIndexedTypeInternal(Ty, IdxList); 1657 } 1658 1659 /// hasAllZeroIndices - Return true if all of the indices of this GEP are 1660 /// zeros. If so, the result pointer and the first operand have the same 1661 /// value, just potentially different types. 1662 bool GetElementPtrInst::hasAllZeroIndices() const { 1663 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1664 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) { 1665 if (!CI->isZero()) return false; 1666 } else { 1667 return false; 1668 } 1669 } 1670 return true; 1671 } 1672 1673 /// hasAllConstantIndices - Return true if all of the indices of this GEP are 1674 /// constant integers. If so, the result pointer and the first operand have 1675 /// a constant offset between them. 1676 bool GetElementPtrInst::hasAllConstantIndices() const { 1677 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1678 if (!isa<ConstantInt>(getOperand(i))) 1679 return false; 1680 } 1681 return true; 1682 } 1683 1684 void GetElementPtrInst::setIsInBounds(bool B) { 1685 cast<GEPOperator>(this)->setIsInBounds(B); 1686 } 1687 1688 bool GetElementPtrInst::isInBounds() const { 1689 return cast<GEPOperator>(this)->isInBounds(); 1690 } 1691 1692 bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL, 1693 APInt &Offset) const { 1694 // Delegate to the generic GEPOperator implementation. 1695 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset); 1696 } 1697 1698 //===----------------------------------------------------------------------===// 1699 // ExtractElementInst Implementation 1700 //===----------------------------------------------------------------------===// 1701 1702 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index, 1703 const Twine &Name, 1704 Instruction *InsertBef) 1705 : Instruction(cast<VectorType>(Val->getType())->getElementType(), 1706 ExtractElement, 1707 OperandTraits<ExtractElementInst>::op_begin(this), 1708 2, InsertBef) { 1709 assert(isValidOperands(Val, Index) && 1710 "Invalid extractelement instruction operands!"); 1711 Op<0>() = Val; 1712 Op<1>() = Index; 1713 setName(Name); 1714 } 1715 1716 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index, 1717 const Twine &Name, 1718 BasicBlock *InsertAE) 1719 : Instruction(cast<VectorType>(Val->getType())->getElementType(), 1720 ExtractElement, 1721 OperandTraits<ExtractElementInst>::op_begin(this), 1722 2, InsertAE) { 1723 assert(isValidOperands(Val, Index) && 1724 "Invalid extractelement instruction operands!"); 1725 1726 Op<0>() = Val; 1727 Op<1>() = Index; 1728 setName(Name); 1729 } 1730 1731 bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) { 1732 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy()) 1733 return false; 1734 return true; 1735 } 1736 1737 //===----------------------------------------------------------------------===// 1738 // InsertElementInst Implementation 1739 //===----------------------------------------------------------------------===// 1740 1741 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index, 1742 const Twine &Name, 1743 Instruction *InsertBef) 1744 : Instruction(Vec->getType(), InsertElement, 1745 OperandTraits<InsertElementInst>::op_begin(this), 1746 3, InsertBef) { 1747 assert(isValidOperands(Vec, Elt, Index) && 1748 "Invalid insertelement instruction operands!"); 1749 Op<0>() = Vec; 1750 Op<1>() = Elt; 1751 Op<2>() = Index; 1752 setName(Name); 1753 } 1754 1755 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index, 1756 const Twine &Name, 1757 BasicBlock *InsertAE) 1758 : Instruction(Vec->getType(), InsertElement, 1759 OperandTraits<InsertElementInst>::op_begin(this), 1760 3, InsertAE) { 1761 assert(isValidOperands(Vec, Elt, Index) && 1762 "Invalid insertelement instruction operands!"); 1763 1764 Op<0>() = Vec; 1765 Op<1>() = Elt; 1766 Op<2>() = Index; 1767 setName(Name); 1768 } 1769 1770 bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt, 1771 const Value *Index) { 1772 if (!Vec->getType()->isVectorTy()) 1773 return false; // First operand of insertelement must be vector type. 1774 1775 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType()) 1776 return false;// Second operand of insertelement must be vector element type. 1777 1778 if (!Index->getType()->isIntegerTy()) 1779 return false; // Third operand of insertelement must be i32. 1780 return true; 1781 } 1782 1783 //===----------------------------------------------------------------------===// 1784 // ShuffleVectorInst Implementation 1785 //===----------------------------------------------------------------------===// 1786 1787 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 1788 const Twine &Name, 1789 Instruction *InsertBefore) 1790 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(), 1791 cast<VectorType>(Mask->getType())->getElementCount()), 1792 ShuffleVector, 1793 OperandTraits<ShuffleVectorInst>::op_begin(this), 1794 OperandTraits<ShuffleVectorInst>::operands(this), 1795 InsertBefore) { 1796 assert(isValidOperands(V1, V2, Mask) && 1797 "Invalid shuffle vector instruction operands!"); 1798 Op<0>() = V1; 1799 Op<1>() = V2; 1800 Op<2>() = Mask; 1801 setName(Name); 1802 } 1803 1804 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 1805 const Twine &Name, 1806 BasicBlock *InsertAtEnd) 1807 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(), 1808 cast<VectorType>(Mask->getType())->getElementCount()), 1809 ShuffleVector, 1810 OperandTraits<ShuffleVectorInst>::op_begin(this), 1811 OperandTraits<ShuffleVectorInst>::operands(this), 1812 InsertAtEnd) { 1813 assert(isValidOperands(V1, V2, Mask) && 1814 "Invalid shuffle vector instruction operands!"); 1815 1816 Op<0>() = V1; 1817 Op<1>() = V2; 1818 Op<2>() = Mask; 1819 setName(Name); 1820 } 1821 1822 void ShuffleVectorInst::commute() { 1823 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 1824 int NumMaskElts = getMask()->getType()->getVectorNumElements(); 1825 SmallVector<Constant*, 16> NewMask(NumMaskElts); 1826 Type *Int32Ty = Type::getInt32Ty(getContext()); 1827 for (int i = 0; i != NumMaskElts; ++i) { 1828 int MaskElt = getMaskValue(i); 1829 if (MaskElt == -1) { 1830 NewMask[i] = UndefValue::get(Int32Ty); 1831 continue; 1832 } 1833 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask"); 1834 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts; 1835 NewMask[i] = ConstantInt::get(Int32Ty, MaskElt); 1836 } 1837 Op<2>() = ConstantVector::get(NewMask); 1838 Op<0>().swap(Op<1>()); 1839 } 1840 1841 bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2, 1842 const Value *Mask) { 1843 // V1 and V2 must be vectors of the same type. 1844 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType()) 1845 return false; 1846 1847 // Mask must be vector of i32. 1848 auto *MaskTy = dyn_cast<VectorType>(Mask->getType()); 1849 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32)) 1850 return false; 1851 1852 // Check to see if Mask is valid. 1853 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask)) 1854 return true; 1855 1856 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) { 1857 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); 1858 for (Value *Op : MV->operands()) { 1859 if (auto *CI = dyn_cast<ConstantInt>(Op)) { 1860 if (CI->uge(V1Size*2)) 1861 return false; 1862 } else if (!isa<UndefValue>(Op)) { 1863 return false; 1864 } 1865 } 1866 return true; 1867 } 1868 1869 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) { 1870 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); 1871 for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i) 1872 if (CDS->getElementAsInteger(i) >= V1Size*2) 1873 return false; 1874 return true; 1875 } 1876 1877 // The bitcode reader can create a place holder for a forward reference 1878 // used as the shuffle mask. When this occurs, the shuffle mask will 1879 // fall into this case and fail. To avoid this error, do this bit of 1880 // ugliness to allow such a mask pass. 1881 if (const auto *CE = dyn_cast<ConstantExpr>(Mask)) 1882 if (CE->getOpcode() == Instruction::UserOp1) 1883 return true; 1884 1885 return false; 1886 } 1887 1888 int ShuffleVectorInst::getMaskValue(const Constant *Mask, unsigned i) { 1889 assert(i < Mask->getType()->getVectorNumElements() && "Index out of range"); 1890 assert(!Mask->getType()->getVectorElementCount().Scalable && 1891 "Length of scalable vectors unknown at compile time"); 1892 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) 1893 return CDS->getElementAsInteger(i); 1894 Constant *C = Mask->getAggregateElement(i); 1895 if (isa<UndefValue>(C)) 1896 return -1; 1897 return cast<ConstantInt>(C)->getZExtValue(); 1898 } 1899 1900 void ShuffleVectorInst::getShuffleMask(const Constant *Mask, 1901 SmallVectorImpl<int> &Result) { 1902 assert(!Mask->getType()->getVectorElementCount().Scalable && 1903 "Length of scalable vectors unknown at compile time"); 1904 unsigned NumElts = Mask->getType()->getVectorNumElements(); 1905 1906 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) { 1907 for (unsigned i = 0; i != NumElts; ++i) 1908 Result.push_back(CDS->getElementAsInteger(i)); 1909 return; 1910 } 1911 for (unsigned i = 0; i != NumElts; ++i) { 1912 Constant *C = Mask->getAggregateElement(i); 1913 Result.push_back(isa<UndefValue>(C) ? -1 : 1914 cast<ConstantInt>(C)->getZExtValue()); 1915 } 1916 } 1917 1918 static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) { 1919 assert(!Mask.empty() && "Shuffle mask must contain elements"); 1920 bool UsesLHS = false; 1921 bool UsesRHS = false; 1922 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) { 1923 if (Mask[i] == -1) 1924 continue; 1925 assert(Mask[i] >= 0 && Mask[i] < (NumOpElts * 2) && 1926 "Out-of-bounds shuffle mask element"); 1927 UsesLHS |= (Mask[i] < NumOpElts); 1928 UsesRHS |= (Mask[i] >= NumOpElts); 1929 if (UsesLHS && UsesRHS) 1930 return false; 1931 } 1932 assert((UsesLHS ^ UsesRHS) && "Should have selected from exactly 1 source"); 1933 return true; 1934 } 1935 1936 bool ShuffleVectorInst::isSingleSourceMask(ArrayRef<int> Mask) { 1937 // We don't have vector operand size information, so assume operands are the 1938 // same size as the mask. 1939 return isSingleSourceMaskImpl(Mask, Mask.size()); 1940 } 1941 1942 static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) { 1943 if (!isSingleSourceMaskImpl(Mask, NumOpElts)) 1944 return false; 1945 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) { 1946 if (Mask[i] == -1) 1947 continue; 1948 if (Mask[i] != i && Mask[i] != (NumOpElts + i)) 1949 return false; 1950 } 1951 return true; 1952 } 1953 1954 bool ShuffleVectorInst::isIdentityMask(ArrayRef<int> Mask) { 1955 // We don't have vector operand size information, so assume operands are the 1956 // same size as the mask. 1957 return isIdentityMaskImpl(Mask, Mask.size()); 1958 } 1959 1960 bool ShuffleVectorInst::isReverseMask(ArrayRef<int> Mask) { 1961 if (!isSingleSourceMask(Mask)) 1962 return false; 1963 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 1964 if (Mask[i] == -1) 1965 continue; 1966 if (Mask[i] != (NumElts - 1 - i) && Mask[i] != (NumElts + NumElts - 1 - i)) 1967 return false; 1968 } 1969 return true; 1970 } 1971 1972 bool ShuffleVectorInst::isZeroEltSplatMask(ArrayRef<int> Mask) { 1973 if (!isSingleSourceMask(Mask)) 1974 return false; 1975 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 1976 if (Mask[i] == -1) 1977 continue; 1978 if (Mask[i] != 0 && Mask[i] != NumElts) 1979 return false; 1980 } 1981 return true; 1982 } 1983 1984 bool ShuffleVectorInst::isSelectMask(ArrayRef<int> Mask) { 1985 // Select is differentiated from identity. It requires using both sources. 1986 if (isSingleSourceMask(Mask)) 1987 return false; 1988 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 1989 if (Mask[i] == -1) 1990 continue; 1991 if (Mask[i] != i && Mask[i] != (NumElts + i)) 1992 return false; 1993 } 1994 return true; 1995 } 1996 1997 bool ShuffleVectorInst::isTransposeMask(ArrayRef<int> Mask) { 1998 // Example masks that will return true: 1999 // v1 = <a, b, c, d> 2000 // v2 = <e, f, g, h> 2001 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g> 2002 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h> 2003 2004 // 1. The number of elements in the mask must be a power-of-2 and at least 2. 2005 int NumElts = Mask.size(); 2006 if (NumElts < 2 || !isPowerOf2_32(NumElts)) 2007 return false; 2008 2009 // 2. The first element of the mask must be either a 0 or a 1. 2010 if (Mask[0] != 0 && Mask[0] != 1) 2011 return false; 2012 2013 // 3. The difference between the first 2 elements must be equal to the 2014 // number of elements in the mask. 2015 if ((Mask[1] - Mask[0]) != NumElts) 2016 return false; 2017 2018 // 4. The difference between consecutive even-numbered and odd-numbered 2019 // elements must be equal to 2. 2020 for (int i = 2; i < NumElts; ++i) { 2021 int MaskEltVal = Mask[i]; 2022 if (MaskEltVal == -1) 2023 return false; 2024 int MaskEltPrevVal = Mask[i - 2]; 2025 if (MaskEltVal - MaskEltPrevVal != 2) 2026 return false; 2027 } 2028 return true; 2029 } 2030 2031 bool ShuffleVectorInst::isExtractSubvectorMask(ArrayRef<int> Mask, 2032 int NumSrcElts, int &Index) { 2033 // Must extract from a single source. 2034 if (!isSingleSourceMaskImpl(Mask, NumSrcElts)) 2035 return false; 2036 2037 // Must be smaller (else this is an Identity shuffle). 2038 if (NumSrcElts <= (int)Mask.size()) 2039 return false; 2040 2041 // Find start of extraction, accounting that we may start with an UNDEF. 2042 int SubIndex = -1; 2043 for (int i = 0, e = Mask.size(); i != e; ++i) { 2044 int M = Mask[i]; 2045 if (M < 0) 2046 continue; 2047 int Offset = (M % NumSrcElts) - i; 2048 if (0 <= SubIndex && SubIndex != Offset) 2049 return false; 2050 SubIndex = Offset; 2051 } 2052 2053 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) { 2054 Index = SubIndex; 2055 return true; 2056 } 2057 return false; 2058 } 2059 2060 bool ShuffleVectorInst::isIdentityWithPadding() const { 2061 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 2062 int NumMaskElts = getType()->getVectorNumElements(); 2063 if (NumMaskElts <= NumOpElts) 2064 return false; 2065 2066 // The first part of the mask must choose elements from exactly 1 source op. 2067 SmallVector<int, 16> Mask = getShuffleMask(); 2068 if (!isIdentityMaskImpl(Mask, NumOpElts)) 2069 return false; 2070 2071 // All extending must be with undef elements. 2072 for (int i = NumOpElts; i < NumMaskElts; ++i) 2073 if (Mask[i] != -1) 2074 return false; 2075 2076 return true; 2077 } 2078 2079 bool ShuffleVectorInst::isIdentityWithExtract() const { 2080 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 2081 int NumMaskElts = getType()->getVectorNumElements(); 2082 if (NumMaskElts >= NumOpElts) 2083 return false; 2084 2085 return isIdentityMaskImpl(getShuffleMask(), NumOpElts); 2086 } 2087 2088 bool ShuffleVectorInst::isConcat() const { 2089 // Vector concatenation is differentiated from identity with padding. 2090 if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>())) 2091 return false; 2092 2093 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 2094 int NumMaskElts = getType()->getVectorNumElements(); 2095 if (NumMaskElts != NumOpElts * 2) 2096 return false; 2097 2098 // Use the mask length rather than the operands' vector lengths here. We 2099 // already know that the shuffle returns a vector twice as long as the inputs, 2100 // and neither of the inputs are undef vectors. If the mask picks consecutive 2101 // elements from both inputs, then this is a concatenation of the inputs. 2102 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts); 2103 } 2104 2105 //===----------------------------------------------------------------------===// 2106 // InsertValueInst Class 2107 //===----------------------------------------------------------------------===// 2108 2109 void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, 2110 const Twine &Name) { 2111 assert(getNumOperands() == 2 && "NumOperands not initialized?"); 2112 2113 // There's no fundamental reason why we require at least one index 2114 // (other than weirdness with &*IdxBegin being invalid; see 2115 // getelementptr's init routine for example). But there's no 2116 // present need to support it. 2117 assert(!Idxs.empty() && "InsertValueInst must have at least one index"); 2118 2119 assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) == 2120 Val->getType() && "Inserted value must match indexed type!"); 2121 Op<0>() = Agg; 2122 Op<1>() = Val; 2123 2124 Indices.append(Idxs.begin(), Idxs.end()); 2125 setName(Name); 2126 } 2127 2128 InsertValueInst::InsertValueInst(const InsertValueInst &IVI) 2129 : Instruction(IVI.getType(), InsertValue, 2130 OperandTraits<InsertValueInst>::op_begin(this), 2), 2131 Indices(IVI.Indices) { 2132 Op<0>() = IVI.getOperand(0); 2133 Op<1>() = IVI.getOperand(1); 2134 SubclassOptionalData = IVI.SubclassOptionalData; 2135 } 2136 2137 //===----------------------------------------------------------------------===// 2138 // ExtractValueInst Class 2139 //===----------------------------------------------------------------------===// 2140 2141 void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) { 2142 assert(getNumOperands() == 1 && "NumOperands not initialized?"); 2143 2144 // There's no fundamental reason why we require at least one index. 2145 // But there's no present need to support it. 2146 assert(!Idxs.empty() && "ExtractValueInst must have at least one index"); 2147 2148 Indices.append(Idxs.begin(), Idxs.end()); 2149 setName(Name); 2150 } 2151 2152 ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI) 2153 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)), 2154 Indices(EVI.Indices) { 2155 SubclassOptionalData = EVI.SubclassOptionalData; 2156 } 2157 2158 // getIndexedType - Returns the type of the element that would be extracted 2159 // with an extractvalue instruction with the specified parameters. 2160 // 2161 // A null type is returned if the indices are invalid for the specified 2162 // pointer type. 2163 // 2164 Type *ExtractValueInst::getIndexedType(Type *Agg, 2165 ArrayRef<unsigned> Idxs) { 2166 for (unsigned Index : Idxs) { 2167 // We can't use CompositeType::indexValid(Index) here. 2168 // indexValid() always returns true for arrays because getelementptr allows 2169 // out-of-bounds indices. Since we don't allow those for extractvalue and 2170 // insertvalue we need to check array indexing manually. 2171 // Since the only other types we can index into are struct types it's just 2172 // as easy to check those manually as well. 2173 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) { 2174 if (Index >= AT->getNumElements()) 2175 return nullptr; 2176 } else if (StructType *ST = dyn_cast<StructType>(Agg)) { 2177 if (Index >= ST->getNumElements()) 2178 return nullptr; 2179 } else { 2180 // Not a valid type to index into. 2181 return nullptr; 2182 } 2183 2184 Agg = cast<CompositeType>(Agg)->getTypeAtIndex(Index); 2185 } 2186 return const_cast<Type*>(Agg); 2187 } 2188 2189 //===----------------------------------------------------------------------===// 2190 // UnaryOperator Class 2191 //===----------------------------------------------------------------------===// 2192 2193 UnaryOperator::UnaryOperator(UnaryOps iType, Value *S, 2194 Type *Ty, const Twine &Name, 2195 Instruction *InsertBefore) 2196 : UnaryInstruction(Ty, iType, S, InsertBefore) { 2197 Op<0>() = S; 2198 setName(Name); 2199 AssertOK(); 2200 } 2201 2202 UnaryOperator::UnaryOperator(UnaryOps iType, Value *S, 2203 Type *Ty, const Twine &Name, 2204 BasicBlock *InsertAtEnd) 2205 : UnaryInstruction(Ty, iType, S, InsertAtEnd) { 2206 Op<0>() = S; 2207 setName(Name); 2208 AssertOK(); 2209 } 2210 2211 UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S, 2212 const Twine &Name, 2213 Instruction *InsertBefore) { 2214 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore); 2215 } 2216 2217 UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S, 2218 const Twine &Name, 2219 BasicBlock *InsertAtEnd) { 2220 UnaryOperator *Res = Create(Op, S, Name); 2221 InsertAtEnd->getInstList().push_back(Res); 2222 return Res; 2223 } 2224 2225 void UnaryOperator::AssertOK() { 2226 Value *LHS = getOperand(0); 2227 (void)LHS; // Silence warnings. 2228 #ifndef NDEBUG 2229 switch (getOpcode()) { 2230 case FNeg: 2231 assert(getType() == LHS->getType() && 2232 "Unary operation should return same type as operand!"); 2233 assert(getType()->isFPOrFPVectorTy() && 2234 "Tried to create a floating-point operation on a " 2235 "non-floating-point type!"); 2236 break; 2237 default: llvm_unreachable("Invalid opcode provided"); 2238 } 2239 #endif 2240 } 2241 2242 //===----------------------------------------------------------------------===// 2243 // BinaryOperator Class 2244 //===----------------------------------------------------------------------===// 2245 2246 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, 2247 Type *Ty, const Twine &Name, 2248 Instruction *InsertBefore) 2249 : Instruction(Ty, iType, 2250 OperandTraits<BinaryOperator>::op_begin(this), 2251 OperandTraits<BinaryOperator>::operands(this), 2252 InsertBefore) { 2253 Op<0>() = S1; 2254 Op<1>() = S2; 2255 setName(Name); 2256 AssertOK(); 2257 } 2258 2259 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, 2260 Type *Ty, const Twine &Name, 2261 BasicBlock *InsertAtEnd) 2262 : Instruction(Ty, iType, 2263 OperandTraits<BinaryOperator>::op_begin(this), 2264 OperandTraits<BinaryOperator>::operands(this), 2265 InsertAtEnd) { 2266 Op<0>() = S1; 2267 Op<1>() = S2; 2268 setName(Name); 2269 AssertOK(); 2270 } 2271 2272 void BinaryOperator::AssertOK() { 2273 Value *LHS = getOperand(0), *RHS = getOperand(1); 2274 (void)LHS; (void)RHS; // Silence warnings. 2275 assert(LHS->getType() == RHS->getType() && 2276 "Binary operator operand types must match!"); 2277 #ifndef NDEBUG 2278 switch (getOpcode()) { 2279 case Add: case Sub: 2280 case Mul: 2281 assert(getType() == LHS->getType() && 2282 "Arithmetic operation should return same type as operands!"); 2283 assert(getType()->isIntOrIntVectorTy() && 2284 "Tried to create an integer operation on a non-integer type!"); 2285 break; 2286 case FAdd: case FSub: 2287 case FMul: 2288 assert(getType() == LHS->getType() && 2289 "Arithmetic operation should return same type as operands!"); 2290 assert(getType()->isFPOrFPVectorTy() && 2291 "Tried to create a floating-point operation on a " 2292 "non-floating-point type!"); 2293 break; 2294 case UDiv: 2295 case SDiv: 2296 assert(getType() == LHS->getType() && 2297 "Arithmetic operation should return same type as operands!"); 2298 assert(getType()->isIntOrIntVectorTy() && 2299 "Incorrect operand type (not integer) for S/UDIV"); 2300 break; 2301 case FDiv: 2302 assert(getType() == LHS->getType() && 2303 "Arithmetic operation should return same type as operands!"); 2304 assert(getType()->isFPOrFPVectorTy() && 2305 "Incorrect operand type (not floating point) for FDIV"); 2306 break; 2307 case URem: 2308 case SRem: 2309 assert(getType() == LHS->getType() && 2310 "Arithmetic operation should return same type as operands!"); 2311 assert(getType()->isIntOrIntVectorTy() && 2312 "Incorrect operand type (not integer) for S/UREM"); 2313 break; 2314 case FRem: 2315 assert(getType() == LHS->getType() && 2316 "Arithmetic operation should return same type as operands!"); 2317 assert(getType()->isFPOrFPVectorTy() && 2318 "Incorrect operand type (not floating point) for FREM"); 2319 break; 2320 case Shl: 2321 case LShr: 2322 case AShr: 2323 assert(getType() == LHS->getType() && 2324 "Shift operation should return same type as operands!"); 2325 assert(getType()->isIntOrIntVectorTy() && 2326 "Tried to create a shift operation on a non-integral type!"); 2327 break; 2328 case And: case Or: 2329 case Xor: 2330 assert(getType() == LHS->getType() && 2331 "Logical operation should return same type as operands!"); 2332 assert(getType()->isIntOrIntVectorTy() && 2333 "Tried to create a logical operation on a non-integral type!"); 2334 break; 2335 default: llvm_unreachable("Invalid opcode provided"); 2336 } 2337 #endif 2338 } 2339 2340 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2, 2341 const Twine &Name, 2342 Instruction *InsertBefore) { 2343 assert(S1->getType() == S2->getType() && 2344 "Cannot create binary operator with two operands of differing type!"); 2345 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore); 2346 } 2347 2348 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2, 2349 const Twine &Name, 2350 BasicBlock *InsertAtEnd) { 2351 BinaryOperator *Res = Create(Op, S1, S2, Name); 2352 InsertAtEnd->getInstList().push_back(Res); 2353 return Res; 2354 } 2355 2356 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name, 2357 Instruction *InsertBefore) { 2358 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2359 return new BinaryOperator(Instruction::Sub, 2360 zero, Op, 2361 Op->getType(), Name, InsertBefore); 2362 } 2363 2364 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name, 2365 BasicBlock *InsertAtEnd) { 2366 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2367 return new BinaryOperator(Instruction::Sub, 2368 zero, Op, 2369 Op->getType(), Name, InsertAtEnd); 2370 } 2371 2372 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, 2373 Instruction *InsertBefore) { 2374 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2375 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertBefore); 2376 } 2377 2378 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, 2379 BasicBlock *InsertAtEnd) { 2380 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2381 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertAtEnd); 2382 } 2383 2384 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name, 2385 Instruction *InsertBefore) { 2386 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2387 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertBefore); 2388 } 2389 2390 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name, 2391 BasicBlock *InsertAtEnd) { 2392 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2393 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertAtEnd); 2394 } 2395 2396 BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name, 2397 Instruction *InsertBefore) { 2398 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2399 return new BinaryOperator(Instruction::FSub, zero, Op, 2400 Op->getType(), Name, InsertBefore); 2401 } 2402 2403 BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name, 2404 BasicBlock *InsertAtEnd) { 2405 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2406 return new BinaryOperator(Instruction::FSub, zero, Op, 2407 Op->getType(), Name, InsertAtEnd); 2408 } 2409 2410 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, 2411 Instruction *InsertBefore) { 2412 Constant *C = Constant::getAllOnesValue(Op->getType()); 2413 return new BinaryOperator(Instruction::Xor, Op, C, 2414 Op->getType(), Name, InsertBefore); 2415 } 2416 2417 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, 2418 BasicBlock *InsertAtEnd) { 2419 Constant *AllOnes = Constant::getAllOnesValue(Op->getType()); 2420 return new BinaryOperator(Instruction::Xor, Op, AllOnes, 2421 Op->getType(), Name, InsertAtEnd); 2422 } 2423 2424 // Exchange the two operands to this instruction. This instruction is safe to 2425 // use on any binary instruction and does not modify the semantics of the 2426 // instruction. If the instruction is order-dependent (SetLT f.e.), the opcode 2427 // is changed. 2428 bool BinaryOperator::swapOperands() { 2429 if (!isCommutative()) 2430 return true; // Can't commute operands 2431 Op<0>().swap(Op<1>()); 2432 return false; 2433 } 2434 2435 //===----------------------------------------------------------------------===// 2436 // FPMathOperator Class 2437 //===----------------------------------------------------------------------===// 2438 2439 float FPMathOperator::getFPAccuracy() const { 2440 const MDNode *MD = 2441 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath); 2442 if (!MD) 2443 return 0.0; 2444 ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0)); 2445 return Accuracy->getValueAPF().convertToFloat(); 2446 } 2447 2448 //===----------------------------------------------------------------------===// 2449 // CastInst Class 2450 //===----------------------------------------------------------------------===// 2451 2452 // Just determine if this cast only deals with integral->integral conversion. 2453 bool CastInst::isIntegerCast() const { 2454 switch (getOpcode()) { 2455 default: return false; 2456 case Instruction::ZExt: 2457 case Instruction::SExt: 2458 case Instruction::Trunc: 2459 return true; 2460 case Instruction::BitCast: 2461 return getOperand(0)->getType()->isIntegerTy() && 2462 getType()->isIntegerTy(); 2463 } 2464 } 2465 2466 bool CastInst::isLosslessCast() const { 2467 // Only BitCast can be lossless, exit fast if we're not BitCast 2468 if (getOpcode() != Instruction::BitCast) 2469 return false; 2470 2471 // Identity cast is always lossless 2472 Type *SrcTy = getOperand(0)->getType(); 2473 Type *DstTy = getType(); 2474 if (SrcTy == DstTy) 2475 return true; 2476 2477 // Pointer to pointer is always lossless. 2478 if (SrcTy->isPointerTy()) 2479 return DstTy->isPointerTy(); 2480 return false; // Other types have no identity values 2481 } 2482 2483 /// This function determines if the CastInst does not require any bits to be 2484 /// changed in order to effect the cast. Essentially, it identifies cases where 2485 /// no code gen is necessary for the cast, hence the name no-op cast. For 2486 /// example, the following are all no-op casts: 2487 /// # bitcast i32* %x to i8* 2488 /// # bitcast <2 x i32> %x to <4 x i16> 2489 /// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only 2490 /// Determine if the described cast is a no-op. 2491 bool CastInst::isNoopCast(Instruction::CastOps Opcode, 2492 Type *SrcTy, 2493 Type *DestTy, 2494 const DataLayout &DL) { 2495 switch (Opcode) { 2496 default: llvm_unreachable("Invalid CastOp"); 2497 case Instruction::Trunc: 2498 case Instruction::ZExt: 2499 case Instruction::SExt: 2500 case Instruction::FPTrunc: 2501 case Instruction::FPExt: 2502 case Instruction::UIToFP: 2503 case Instruction::SIToFP: 2504 case Instruction::FPToUI: 2505 case Instruction::FPToSI: 2506 case Instruction::AddrSpaceCast: 2507 // TODO: Target informations may give a more accurate answer here. 2508 return false; 2509 case Instruction::BitCast: 2510 return true; // BitCast never modifies bits. 2511 case Instruction::PtrToInt: 2512 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() == 2513 DestTy->getScalarSizeInBits(); 2514 case Instruction::IntToPtr: 2515 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() == 2516 SrcTy->getScalarSizeInBits(); 2517 } 2518 } 2519 2520 bool CastInst::isNoopCast(const DataLayout &DL) const { 2521 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL); 2522 } 2523 2524 /// This function determines if a pair of casts can be eliminated and what 2525 /// opcode should be used in the elimination. This assumes that there are two 2526 /// instructions like this: 2527 /// * %F = firstOpcode SrcTy %x to MidTy 2528 /// * %S = secondOpcode MidTy %F to DstTy 2529 /// The function returns a resultOpcode so these two casts can be replaced with: 2530 /// * %Replacement = resultOpcode %SrcTy %x to DstTy 2531 /// If no such cast is permitted, the function returns 0. 2532 unsigned CastInst::isEliminableCastPair( 2533 Instruction::CastOps firstOp, Instruction::CastOps secondOp, 2534 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, 2535 Type *DstIntPtrTy) { 2536 // Define the 144 possibilities for these two cast instructions. The values 2537 // in this matrix determine what to do in a given situation and select the 2538 // case in the switch below. The rows correspond to firstOp, the columns 2539 // correspond to secondOp. In looking at the table below, keep in mind 2540 // the following cast properties: 2541 // 2542 // Size Compare Source Destination 2543 // Operator Src ? Size Type Sign Type Sign 2544 // -------- ------------ ------------------- --------------------- 2545 // TRUNC > Integer Any Integral Any 2546 // ZEXT < Integral Unsigned Integer Any 2547 // SEXT < Integral Signed Integer Any 2548 // FPTOUI n/a FloatPt n/a Integral Unsigned 2549 // FPTOSI n/a FloatPt n/a Integral Signed 2550 // UITOFP n/a Integral Unsigned FloatPt n/a 2551 // SITOFP n/a Integral Signed FloatPt n/a 2552 // FPTRUNC > FloatPt n/a FloatPt n/a 2553 // FPEXT < FloatPt n/a FloatPt n/a 2554 // PTRTOINT n/a Pointer n/a Integral Unsigned 2555 // INTTOPTR n/a Integral Unsigned Pointer n/a 2556 // BITCAST = FirstClass n/a FirstClass n/a 2557 // ADDRSPCST n/a Pointer n/a Pointer n/a 2558 // 2559 // NOTE: some transforms are safe, but we consider them to be non-profitable. 2560 // For example, we could merge "fptoui double to i32" + "zext i32 to i64", 2561 // into "fptoui double to i64", but this loses information about the range 2562 // of the produced value (we no longer know the top-part is all zeros). 2563 // Further this conversion is often much more expensive for typical hardware, 2564 // and causes issues when building libgcc. We disallow fptosi+sext for the 2565 // same reason. 2566 const unsigned numCastOps = 2567 Instruction::CastOpsEnd - Instruction::CastOpsBegin; 2568 static const uint8_t CastResults[numCastOps][numCastOps] = { 2569 // T F F U S F F P I B A -+ 2570 // R Z S P P I I T P 2 N T S | 2571 // U E E 2 2 2 2 R E I T C C +- secondOp 2572 // N X X U S F F N X N 2 V V | 2573 // C T T I I P P C T T P T T -+ 2574 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+ 2575 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt | 2576 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt | 2577 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI | 2578 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI | 2579 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp 2580 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP | 2581 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc | 2582 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt | 2583 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt | 2584 { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr | 2585 { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast | 2586 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+ 2587 }; 2588 2589 // TODO: This logic could be encoded into the table above and handled in the 2590 // switch below. 2591 // If either of the casts are a bitcast from scalar to vector, disallow the 2592 // merging. However, any pair of bitcasts are allowed. 2593 bool IsFirstBitcast = (firstOp == Instruction::BitCast); 2594 bool IsSecondBitcast = (secondOp == Instruction::BitCast); 2595 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast; 2596 2597 // Check if any of the casts convert scalars <-> vectors. 2598 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) || 2599 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy))) 2600 if (!AreBothBitcasts) 2601 return 0; 2602 2603 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin] 2604 [secondOp-Instruction::CastOpsBegin]; 2605 switch (ElimCase) { 2606 case 0: 2607 // Categorically disallowed. 2608 return 0; 2609 case 1: 2610 // Allowed, use first cast's opcode. 2611 return firstOp; 2612 case 2: 2613 // Allowed, use second cast's opcode. 2614 return secondOp; 2615 case 3: 2616 // No-op cast in second op implies firstOp as long as the DestTy 2617 // is integer and we are not converting between a vector and a 2618 // non-vector type. 2619 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy()) 2620 return firstOp; 2621 return 0; 2622 case 4: 2623 // No-op cast in second op implies firstOp as long as the DestTy 2624 // is floating point. 2625 if (DstTy->isFloatingPointTy()) 2626 return firstOp; 2627 return 0; 2628 case 5: 2629 // No-op cast in first op implies secondOp as long as the SrcTy 2630 // is an integer. 2631 if (SrcTy->isIntegerTy()) 2632 return secondOp; 2633 return 0; 2634 case 6: 2635 // No-op cast in first op implies secondOp as long as the SrcTy 2636 // is a floating point. 2637 if (SrcTy->isFloatingPointTy()) 2638 return secondOp; 2639 return 0; 2640 case 7: { 2641 // Cannot simplify if address spaces are different! 2642 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) 2643 return 0; 2644 2645 unsigned MidSize = MidTy->getScalarSizeInBits(); 2646 // We can still fold this without knowing the actual sizes as long we 2647 // know that the intermediate pointer is the largest possible 2648 // pointer size. 2649 // FIXME: Is this always true? 2650 if (MidSize == 64) 2651 return Instruction::BitCast; 2652 2653 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size. 2654 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy) 2655 return 0; 2656 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits(); 2657 if (MidSize >= PtrSize) 2658 return Instruction::BitCast; 2659 return 0; 2660 } 2661 case 8: { 2662 // ext, trunc -> bitcast, if the SrcTy and DstTy are same size 2663 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy) 2664 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy) 2665 unsigned SrcSize = SrcTy->getScalarSizeInBits(); 2666 unsigned DstSize = DstTy->getScalarSizeInBits(); 2667 if (SrcSize == DstSize) 2668 return Instruction::BitCast; 2669 else if (SrcSize < DstSize) 2670 return firstOp; 2671 return secondOp; 2672 } 2673 case 9: 2674 // zext, sext -> zext, because sext can't sign extend after zext 2675 return Instruction::ZExt; 2676 case 11: { 2677 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize 2678 if (!MidIntPtrTy) 2679 return 0; 2680 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits(); 2681 unsigned SrcSize = SrcTy->getScalarSizeInBits(); 2682 unsigned DstSize = DstTy->getScalarSizeInBits(); 2683 if (SrcSize <= PtrSize && SrcSize == DstSize) 2684 return Instruction::BitCast; 2685 return 0; 2686 } 2687 case 12: 2688 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS 2689 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS 2690 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) 2691 return Instruction::AddrSpaceCast; 2692 return Instruction::BitCast; 2693 case 13: 2694 // FIXME: this state can be merged with (1), but the following assert 2695 // is useful to check the correcteness of the sequence due to semantic 2696 // change of bitcast. 2697 assert( 2698 SrcTy->isPtrOrPtrVectorTy() && 2699 MidTy->isPtrOrPtrVectorTy() && 2700 DstTy->isPtrOrPtrVectorTy() && 2701 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() && 2702 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() && 2703 "Illegal addrspacecast, bitcast sequence!"); 2704 // Allowed, use first cast's opcode 2705 return firstOp; 2706 case 14: 2707 // bitcast, addrspacecast -> addrspacecast if the element type of 2708 // bitcast's source is the same as that of addrspacecast's destination. 2709 if (SrcTy->getScalarType()->getPointerElementType() == 2710 DstTy->getScalarType()->getPointerElementType()) 2711 return Instruction::AddrSpaceCast; 2712 return 0; 2713 case 15: 2714 // FIXME: this state can be merged with (1), but the following assert 2715 // is useful to check the correcteness of the sequence due to semantic 2716 // change of bitcast. 2717 assert( 2718 SrcTy->isIntOrIntVectorTy() && 2719 MidTy->isPtrOrPtrVectorTy() && 2720 DstTy->isPtrOrPtrVectorTy() && 2721 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() && 2722 "Illegal inttoptr, bitcast sequence!"); 2723 // Allowed, use first cast's opcode 2724 return firstOp; 2725 case 16: 2726 // FIXME: this state can be merged with (2), but the following assert 2727 // is useful to check the correcteness of the sequence due to semantic 2728 // change of bitcast. 2729 assert( 2730 SrcTy->isPtrOrPtrVectorTy() && 2731 MidTy->isPtrOrPtrVectorTy() && 2732 DstTy->isIntOrIntVectorTy() && 2733 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() && 2734 "Illegal bitcast, ptrtoint sequence!"); 2735 // Allowed, use second cast's opcode 2736 return secondOp; 2737 case 17: 2738 // (sitofp (zext x)) -> (uitofp x) 2739 return Instruction::UIToFP; 2740 case 99: 2741 // Cast combination can't happen (error in input). This is for all cases 2742 // where the MidTy is not the same for the two cast instructions. 2743 llvm_unreachable("Invalid Cast Combination"); 2744 default: 2745 llvm_unreachable("Error in CastResults table!!!"); 2746 } 2747 } 2748 2749 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty, 2750 const Twine &Name, Instruction *InsertBefore) { 2751 assert(castIsValid(op, S, Ty) && "Invalid cast!"); 2752 // Construct and return the appropriate CastInst subclass 2753 switch (op) { 2754 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore); 2755 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore); 2756 case SExt: return new SExtInst (S, Ty, Name, InsertBefore); 2757 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore); 2758 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore); 2759 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore); 2760 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore); 2761 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore); 2762 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore); 2763 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore); 2764 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore); 2765 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore); 2766 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore); 2767 default: llvm_unreachable("Invalid opcode provided"); 2768 } 2769 } 2770 2771 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty, 2772 const Twine &Name, BasicBlock *InsertAtEnd) { 2773 assert(castIsValid(op, S, Ty) && "Invalid cast!"); 2774 // Construct and return the appropriate CastInst subclass 2775 switch (op) { 2776 case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd); 2777 case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd); 2778 case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd); 2779 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd); 2780 case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd); 2781 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd); 2782 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd); 2783 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd); 2784 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd); 2785 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd); 2786 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd); 2787 case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd); 2788 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd); 2789 default: llvm_unreachable("Invalid opcode provided"); 2790 } 2791 } 2792 2793 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, 2794 const Twine &Name, 2795 Instruction *InsertBefore) { 2796 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2797 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2798 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore); 2799 } 2800 2801 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, 2802 const Twine &Name, 2803 BasicBlock *InsertAtEnd) { 2804 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2805 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2806 return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd); 2807 } 2808 2809 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, 2810 const Twine &Name, 2811 Instruction *InsertBefore) { 2812 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2813 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2814 return Create(Instruction::SExt, S, Ty, Name, InsertBefore); 2815 } 2816 2817 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, 2818 const Twine &Name, 2819 BasicBlock *InsertAtEnd) { 2820 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2821 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2822 return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd); 2823 } 2824 2825 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, 2826 const Twine &Name, 2827 Instruction *InsertBefore) { 2828 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2829 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2830 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore); 2831 } 2832 2833 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, 2834 const Twine &Name, 2835 BasicBlock *InsertAtEnd) { 2836 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2837 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2838 return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd); 2839 } 2840 2841 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, 2842 const Twine &Name, 2843 BasicBlock *InsertAtEnd) { 2844 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2845 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) && 2846 "Invalid cast"); 2847 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); 2848 assert((!Ty->isVectorTy() || 2849 Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) && 2850 "Invalid cast"); 2851 2852 if (Ty->isIntOrIntVectorTy()) 2853 return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd); 2854 2855 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd); 2856 } 2857 2858 /// Create a BitCast or a PtrToInt cast instruction 2859 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, 2860 const Twine &Name, 2861 Instruction *InsertBefore) { 2862 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2863 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) && 2864 "Invalid cast"); 2865 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); 2866 assert((!Ty->isVectorTy() || 2867 Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) && 2868 "Invalid cast"); 2869 2870 if (Ty->isIntOrIntVectorTy()) 2871 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore); 2872 2873 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore); 2874 } 2875 2876 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast( 2877 Value *S, Type *Ty, 2878 const Twine &Name, 2879 BasicBlock *InsertAtEnd) { 2880 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2881 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast"); 2882 2883 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace()) 2884 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd); 2885 2886 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2887 } 2888 2889 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast( 2890 Value *S, Type *Ty, 2891 const Twine &Name, 2892 Instruction *InsertBefore) { 2893 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2894 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast"); 2895 2896 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace()) 2897 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore); 2898 2899 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2900 } 2901 2902 CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty, 2903 const Twine &Name, 2904 Instruction *InsertBefore) { 2905 if (S->getType()->isPointerTy() && Ty->isIntegerTy()) 2906 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore); 2907 if (S->getType()->isIntegerTy() && Ty->isPointerTy()) 2908 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore); 2909 2910 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2911 } 2912 2913 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, 2914 bool isSigned, const Twine &Name, 2915 Instruction *InsertBefore) { 2916 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() && 2917 "Invalid integer cast"); 2918 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2919 unsigned DstBits = Ty->getScalarSizeInBits(); 2920 Instruction::CastOps opcode = 2921 (SrcBits == DstBits ? Instruction::BitCast : 2922 (SrcBits > DstBits ? Instruction::Trunc : 2923 (isSigned ? Instruction::SExt : Instruction::ZExt))); 2924 return Create(opcode, C, Ty, Name, InsertBefore); 2925 } 2926 2927 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, 2928 bool isSigned, const Twine &Name, 2929 BasicBlock *InsertAtEnd) { 2930 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() && 2931 "Invalid cast"); 2932 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2933 unsigned DstBits = Ty->getScalarSizeInBits(); 2934 Instruction::CastOps opcode = 2935 (SrcBits == DstBits ? Instruction::BitCast : 2936 (SrcBits > DstBits ? Instruction::Trunc : 2937 (isSigned ? Instruction::SExt : Instruction::ZExt))); 2938 return Create(opcode, C, Ty, Name, InsertAtEnd); 2939 } 2940 2941 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, 2942 const Twine &Name, 2943 Instruction *InsertBefore) { 2944 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && 2945 "Invalid cast"); 2946 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2947 unsigned DstBits = Ty->getScalarSizeInBits(); 2948 Instruction::CastOps opcode = 2949 (SrcBits == DstBits ? Instruction::BitCast : 2950 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt)); 2951 return Create(opcode, C, Ty, Name, InsertBefore); 2952 } 2953 2954 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, 2955 const Twine &Name, 2956 BasicBlock *InsertAtEnd) { 2957 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && 2958 "Invalid cast"); 2959 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2960 unsigned DstBits = Ty->getScalarSizeInBits(); 2961 Instruction::CastOps opcode = 2962 (SrcBits == DstBits ? Instruction::BitCast : 2963 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt)); 2964 return Create(opcode, C, Ty, Name, InsertAtEnd); 2965 } 2966 2967 // Check whether it is valid to call getCastOpcode for these types. 2968 // This routine must be kept in sync with getCastOpcode. 2969 bool CastInst::isCastable(Type *SrcTy, Type *DestTy) { 2970 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType()) 2971 return false; 2972 2973 if (SrcTy == DestTy) 2974 return true; 2975 2976 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) 2977 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) 2978 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 2979 // An element by element cast. Valid if casting the elements is valid. 2980 SrcTy = SrcVecTy->getElementType(); 2981 DestTy = DestVecTy->getElementType(); 2982 } 2983 2984 // Get the bit sizes, we'll need these 2985 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 2986 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 2987 2988 // Run through the possibilities ... 2989 if (DestTy->isIntegerTy()) { // Casting to integral 2990 if (SrcTy->isIntegerTy()) // Casting from integral 2991 return true; 2992 if (SrcTy->isFloatingPointTy()) // Casting from floating pt 2993 return true; 2994 if (SrcTy->isVectorTy()) // Casting from vector 2995 return DestBits == SrcBits; 2996 // Casting from something else 2997 return SrcTy->isPointerTy(); 2998 } 2999 if (DestTy->isFloatingPointTy()) { // Casting to floating pt 3000 if (SrcTy->isIntegerTy()) // Casting from integral 3001 return true; 3002 if (SrcTy->isFloatingPointTy()) // Casting from floating pt 3003 return true; 3004 if (SrcTy->isVectorTy()) // Casting from vector 3005 return DestBits == SrcBits; 3006 // Casting from something else 3007 return false; 3008 } 3009 if (DestTy->isVectorTy()) // Casting to vector 3010 return DestBits == SrcBits; 3011 if (DestTy->isPointerTy()) { // Casting to pointer 3012 if (SrcTy->isPointerTy()) // Casting from pointer 3013 return true; 3014 return SrcTy->isIntegerTy(); // Casting from integral 3015 } 3016 if (DestTy->isX86_MMXTy()) { 3017 if (SrcTy->isVectorTy()) 3018 return DestBits == SrcBits; // 64-bit vector to MMX 3019 return false; 3020 } // Casting to something else 3021 return false; 3022 } 3023 3024 bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) { 3025 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType()) 3026 return false; 3027 3028 if (SrcTy == DestTy) 3029 return true; 3030 3031 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) { 3032 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) { 3033 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) { 3034 // An element by element cast. Valid if casting the elements is valid. 3035 SrcTy = SrcVecTy->getElementType(); 3036 DestTy = DestVecTy->getElementType(); 3037 } 3038 } 3039 } 3040 3041 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) { 3042 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) { 3043 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace(); 3044 } 3045 } 3046 3047 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 3048 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 3049 3050 // Could still have vectors of pointers if the number of elements doesn't 3051 // match 3052 if (SrcBits.getKnownMinSize() == 0 || DestBits.getKnownMinSize() == 0) 3053 return false; 3054 3055 if (SrcBits != DestBits) 3056 return false; 3057 3058 if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy()) 3059 return false; 3060 3061 return true; 3062 } 3063 3064 bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, 3065 const DataLayout &DL) { 3066 // ptrtoint and inttoptr are not allowed on non-integral pointers 3067 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy)) 3068 if (auto *IntTy = dyn_cast<IntegerType>(DestTy)) 3069 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) && 3070 !DL.isNonIntegralPointerType(PtrTy)); 3071 if (auto *PtrTy = dyn_cast<PointerType>(DestTy)) 3072 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy)) 3073 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) && 3074 !DL.isNonIntegralPointerType(PtrTy)); 3075 3076 return isBitCastable(SrcTy, DestTy); 3077 } 3078 3079 // Provide a way to get a "cast" where the cast opcode is inferred from the 3080 // types and size of the operand. This, basically, is a parallel of the 3081 // logic in the castIsValid function below. This axiom should hold: 3082 // castIsValid( getCastOpcode(Val, Ty), Val, Ty) 3083 // should not assert in castIsValid. In other words, this produces a "correct" 3084 // casting opcode for the arguments passed to it. 3085 // This routine must be kept in sync with isCastable. 3086 Instruction::CastOps 3087 CastInst::getCastOpcode( 3088 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) { 3089 Type *SrcTy = Src->getType(); 3090 3091 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() && 3092 "Only first class types are castable!"); 3093 3094 if (SrcTy == DestTy) 3095 return BitCast; 3096 3097 // FIXME: Check address space sizes here 3098 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) 3099 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) 3100 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 3101 // An element by element cast. Find the appropriate opcode based on the 3102 // element types. 3103 SrcTy = SrcVecTy->getElementType(); 3104 DestTy = DestVecTy->getElementType(); 3105 } 3106 3107 // Get the bit sizes, we'll need these 3108 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 3109 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 3110 3111 // Run through the possibilities ... 3112 if (DestTy->isIntegerTy()) { // Casting to integral 3113 if (SrcTy->isIntegerTy()) { // Casting from integral 3114 if (DestBits < SrcBits) 3115 return Trunc; // int -> smaller int 3116 else if (DestBits > SrcBits) { // its an extension 3117 if (SrcIsSigned) 3118 return SExt; // signed -> SEXT 3119 else 3120 return ZExt; // unsigned -> ZEXT 3121 } else { 3122 return BitCast; // Same size, No-op cast 3123 } 3124 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt 3125 if (DestIsSigned) 3126 return FPToSI; // FP -> sint 3127 else 3128 return FPToUI; // FP -> uint 3129 } else if (SrcTy->isVectorTy()) { 3130 assert(DestBits == SrcBits && 3131 "Casting vector to integer of different width"); 3132 return BitCast; // Same size, no-op cast 3133 } else { 3134 assert(SrcTy->isPointerTy() && 3135 "Casting from a value that is not first-class type"); 3136 return PtrToInt; // ptr -> int 3137 } 3138 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt 3139 if (SrcTy->isIntegerTy()) { // Casting from integral 3140 if (SrcIsSigned) 3141 return SIToFP; // sint -> FP 3142 else 3143 return UIToFP; // uint -> FP 3144 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt 3145 if (DestBits < SrcBits) { 3146 return FPTrunc; // FP -> smaller FP 3147 } else if (DestBits > SrcBits) { 3148 return FPExt; // FP -> larger FP 3149 } else { 3150 return BitCast; // same size, no-op cast 3151 } 3152 } else if (SrcTy->isVectorTy()) { 3153 assert(DestBits == SrcBits && 3154 "Casting vector to floating point of different width"); 3155 return BitCast; // same size, no-op cast 3156 } 3157 llvm_unreachable("Casting pointer or non-first class to float"); 3158 } else if (DestTy->isVectorTy()) { 3159 assert(DestBits == SrcBits && 3160 "Illegal cast to vector (wrong type or size)"); 3161 return BitCast; 3162 } else if (DestTy->isPointerTy()) { 3163 if (SrcTy->isPointerTy()) { 3164 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace()) 3165 return AddrSpaceCast; 3166 return BitCast; // ptr -> ptr 3167 } else if (SrcTy->isIntegerTy()) { 3168 return IntToPtr; // int -> ptr 3169 } 3170 llvm_unreachable("Casting pointer to other than pointer or int"); 3171 } else if (DestTy->isX86_MMXTy()) { 3172 if (SrcTy->isVectorTy()) { 3173 assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX"); 3174 return BitCast; // 64-bit vector to MMX 3175 } 3176 llvm_unreachable("Illegal cast to X86_MMX"); 3177 } 3178 llvm_unreachable("Casting to type that is not first-class"); 3179 } 3180 3181 //===----------------------------------------------------------------------===// 3182 // CastInst SubClass Constructors 3183 //===----------------------------------------------------------------------===// 3184 3185 /// Check that the construction parameters for a CastInst are correct. This 3186 /// could be broken out into the separate constructors but it is useful to have 3187 /// it in one place and to eliminate the redundant code for getting the sizes 3188 /// of the types involved. 3189 bool 3190 CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) { 3191 // Check for type sanity on the arguments 3192 Type *SrcTy = S->getType(); 3193 3194 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() || 3195 SrcTy->isAggregateType() || DstTy->isAggregateType()) 3196 return false; 3197 3198 // Get the size of the types in bits, we'll need this later 3199 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 3200 unsigned DstBitSize = DstTy->getScalarSizeInBits(); 3201 3202 // If these are vector types, get the lengths of the vectors (using zero for 3203 // scalar types means that checking that vector lengths match also checks that 3204 // scalars are not being converted to vectors or vectors to scalars). 3205 unsigned SrcLength = SrcTy->isVectorTy() ? 3206 cast<VectorType>(SrcTy)->getNumElements() : 0; 3207 unsigned DstLength = DstTy->isVectorTy() ? 3208 cast<VectorType>(DstTy)->getNumElements() : 0; 3209 3210 // Switch on the opcode provided 3211 switch (op) { 3212 default: return false; // This is an input error 3213 case Instruction::Trunc: 3214 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 3215 SrcLength == DstLength && SrcBitSize > DstBitSize; 3216 case Instruction::ZExt: 3217 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 3218 SrcLength == DstLength && SrcBitSize < DstBitSize; 3219 case Instruction::SExt: 3220 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 3221 SrcLength == DstLength && SrcBitSize < DstBitSize; 3222 case Instruction::FPTrunc: 3223 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() && 3224 SrcLength == DstLength && SrcBitSize > DstBitSize; 3225 case Instruction::FPExt: 3226 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() && 3227 SrcLength == DstLength && SrcBitSize < DstBitSize; 3228 case Instruction::UIToFP: 3229 case Instruction::SIToFP: 3230 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() && 3231 SrcLength == DstLength; 3232 case Instruction::FPToUI: 3233 case Instruction::FPToSI: 3234 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() && 3235 SrcLength == DstLength; 3236 case Instruction::PtrToInt: 3237 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy)) 3238 return false; 3239 if (VectorType *VT = dyn_cast<VectorType>(SrcTy)) 3240 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements()) 3241 return false; 3242 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy(); 3243 case Instruction::IntToPtr: 3244 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy)) 3245 return false; 3246 if (VectorType *VT = dyn_cast<VectorType>(SrcTy)) 3247 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements()) 3248 return false; 3249 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy(); 3250 case Instruction::BitCast: { 3251 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType()); 3252 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType()); 3253 3254 // BitCast implies a no-op cast of type only. No bits change. 3255 // However, you can't cast pointers to anything but pointers. 3256 if (!SrcPtrTy != !DstPtrTy) 3257 return false; 3258 3259 // For non-pointer cases, the cast is okay if the source and destination bit 3260 // widths are identical. 3261 if (!SrcPtrTy) 3262 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits(); 3263 3264 // If both are pointers then the address spaces must match. 3265 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) 3266 return false; 3267 3268 // A vector of pointers must have the same number of elements. 3269 VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy); 3270 VectorType *DstVecTy = dyn_cast<VectorType>(DstTy); 3271 if (SrcVecTy && DstVecTy) 3272 return (SrcVecTy->getNumElements() == DstVecTy->getNumElements()); 3273 if (SrcVecTy) 3274 return SrcVecTy->getNumElements() == 1; 3275 if (DstVecTy) 3276 return DstVecTy->getNumElements() == 1; 3277 3278 return true; 3279 } 3280 case Instruction::AddrSpaceCast: { 3281 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType()); 3282 if (!SrcPtrTy) 3283 return false; 3284 3285 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType()); 3286 if (!DstPtrTy) 3287 return false; 3288 3289 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace()) 3290 return false; 3291 3292 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) { 3293 if (VectorType *DstVecTy = dyn_cast<VectorType>(DstTy)) 3294 return (SrcVecTy->getNumElements() == DstVecTy->getNumElements()); 3295 3296 return false; 3297 } 3298 3299 return true; 3300 } 3301 } 3302 } 3303 3304 TruncInst::TruncInst( 3305 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3306 ) : CastInst(Ty, Trunc, S, Name, InsertBefore) { 3307 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc"); 3308 } 3309 3310 TruncInst::TruncInst( 3311 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3312 ) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) { 3313 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc"); 3314 } 3315 3316 ZExtInst::ZExtInst( 3317 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3318 ) : CastInst(Ty, ZExt, S, Name, InsertBefore) { 3319 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); 3320 } 3321 3322 ZExtInst::ZExtInst( 3323 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3324 ) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) { 3325 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); 3326 } 3327 SExtInst::SExtInst( 3328 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3329 ) : CastInst(Ty, SExt, S, Name, InsertBefore) { 3330 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); 3331 } 3332 3333 SExtInst::SExtInst( 3334 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3335 ) : CastInst(Ty, SExt, S, Name, InsertAtEnd) { 3336 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); 3337 } 3338 3339 FPTruncInst::FPTruncInst( 3340 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3341 ) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) { 3342 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); 3343 } 3344 3345 FPTruncInst::FPTruncInst( 3346 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3347 ) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) { 3348 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); 3349 } 3350 3351 FPExtInst::FPExtInst( 3352 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3353 ) : CastInst(Ty, FPExt, S, Name, InsertBefore) { 3354 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); 3355 } 3356 3357 FPExtInst::FPExtInst( 3358 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3359 ) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) { 3360 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); 3361 } 3362 3363 UIToFPInst::UIToFPInst( 3364 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3365 ) : CastInst(Ty, UIToFP, S, Name, InsertBefore) { 3366 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); 3367 } 3368 3369 UIToFPInst::UIToFPInst( 3370 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3371 ) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) { 3372 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); 3373 } 3374 3375 SIToFPInst::SIToFPInst( 3376 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3377 ) : CastInst(Ty, SIToFP, S, Name, InsertBefore) { 3378 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); 3379 } 3380 3381 SIToFPInst::SIToFPInst( 3382 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3383 ) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) { 3384 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); 3385 } 3386 3387 FPToUIInst::FPToUIInst( 3388 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3389 ) : CastInst(Ty, FPToUI, S, Name, InsertBefore) { 3390 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); 3391 } 3392 3393 FPToUIInst::FPToUIInst( 3394 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3395 ) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) { 3396 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); 3397 } 3398 3399 FPToSIInst::FPToSIInst( 3400 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3401 ) : CastInst(Ty, FPToSI, S, Name, InsertBefore) { 3402 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); 3403 } 3404 3405 FPToSIInst::FPToSIInst( 3406 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3407 ) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) { 3408 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); 3409 } 3410 3411 PtrToIntInst::PtrToIntInst( 3412 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3413 ) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) { 3414 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); 3415 } 3416 3417 PtrToIntInst::PtrToIntInst( 3418 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3419 ) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) { 3420 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); 3421 } 3422 3423 IntToPtrInst::IntToPtrInst( 3424 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3425 ) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) { 3426 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); 3427 } 3428 3429 IntToPtrInst::IntToPtrInst( 3430 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3431 ) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) { 3432 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); 3433 } 3434 3435 BitCastInst::BitCastInst( 3436 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3437 ) : CastInst(Ty, BitCast, S, Name, InsertBefore) { 3438 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); 3439 } 3440 3441 BitCastInst::BitCastInst( 3442 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3443 ) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) { 3444 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); 3445 } 3446 3447 AddrSpaceCastInst::AddrSpaceCastInst( 3448 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3449 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) { 3450 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast"); 3451 } 3452 3453 AddrSpaceCastInst::AddrSpaceCastInst( 3454 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3455 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) { 3456 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast"); 3457 } 3458 3459 //===----------------------------------------------------------------------===// 3460 // CmpInst Classes 3461 //===----------------------------------------------------------------------===// 3462 3463 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS, 3464 Value *RHS, const Twine &Name, Instruction *InsertBefore, 3465 Instruction *FlagsSource) 3466 : Instruction(ty, op, 3467 OperandTraits<CmpInst>::op_begin(this), 3468 OperandTraits<CmpInst>::operands(this), 3469 InsertBefore) { 3470 Op<0>() = LHS; 3471 Op<1>() = RHS; 3472 setPredicate((Predicate)predicate); 3473 setName(Name); 3474 if (FlagsSource) 3475 copyIRFlags(FlagsSource); 3476 } 3477 3478 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS, 3479 Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd) 3480 : Instruction(ty, op, 3481 OperandTraits<CmpInst>::op_begin(this), 3482 OperandTraits<CmpInst>::operands(this), 3483 InsertAtEnd) { 3484 Op<0>() = LHS; 3485 Op<1>() = RHS; 3486 setPredicate((Predicate)predicate); 3487 setName(Name); 3488 } 3489 3490 CmpInst * 3491 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, 3492 const Twine &Name, Instruction *InsertBefore) { 3493 if (Op == Instruction::ICmp) { 3494 if (InsertBefore) 3495 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate), 3496 S1, S2, Name); 3497 else 3498 return new ICmpInst(CmpInst::Predicate(predicate), 3499 S1, S2, Name); 3500 } 3501 3502 if (InsertBefore) 3503 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate), 3504 S1, S2, Name); 3505 else 3506 return new FCmpInst(CmpInst::Predicate(predicate), 3507 S1, S2, Name); 3508 } 3509 3510 CmpInst * 3511 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, 3512 const Twine &Name, BasicBlock *InsertAtEnd) { 3513 if (Op == Instruction::ICmp) { 3514 return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate), 3515 S1, S2, Name); 3516 } 3517 return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate), 3518 S1, S2, Name); 3519 } 3520 3521 void CmpInst::swapOperands() { 3522 if (ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3523 IC->swapOperands(); 3524 else 3525 cast<FCmpInst>(this)->swapOperands(); 3526 } 3527 3528 bool CmpInst::isCommutative() const { 3529 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3530 return IC->isCommutative(); 3531 return cast<FCmpInst>(this)->isCommutative(); 3532 } 3533 3534 bool CmpInst::isEquality() const { 3535 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3536 return IC->isEquality(); 3537 return cast<FCmpInst>(this)->isEquality(); 3538 } 3539 3540 CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) { 3541 switch (pred) { 3542 default: llvm_unreachable("Unknown cmp predicate!"); 3543 case ICMP_EQ: return ICMP_NE; 3544 case ICMP_NE: return ICMP_EQ; 3545 case ICMP_UGT: return ICMP_ULE; 3546 case ICMP_ULT: return ICMP_UGE; 3547 case ICMP_UGE: return ICMP_ULT; 3548 case ICMP_ULE: return ICMP_UGT; 3549 case ICMP_SGT: return ICMP_SLE; 3550 case ICMP_SLT: return ICMP_SGE; 3551 case ICMP_SGE: return ICMP_SLT; 3552 case ICMP_SLE: return ICMP_SGT; 3553 3554 case FCMP_OEQ: return FCMP_UNE; 3555 case FCMP_ONE: return FCMP_UEQ; 3556 case FCMP_OGT: return FCMP_ULE; 3557 case FCMP_OLT: return FCMP_UGE; 3558 case FCMP_OGE: return FCMP_ULT; 3559 case FCMP_OLE: return FCMP_UGT; 3560 case FCMP_UEQ: return FCMP_ONE; 3561 case FCMP_UNE: return FCMP_OEQ; 3562 case FCMP_UGT: return FCMP_OLE; 3563 case FCMP_ULT: return FCMP_OGE; 3564 case FCMP_UGE: return FCMP_OLT; 3565 case FCMP_ULE: return FCMP_OGT; 3566 case FCMP_ORD: return FCMP_UNO; 3567 case FCMP_UNO: return FCMP_ORD; 3568 case FCMP_TRUE: return FCMP_FALSE; 3569 case FCMP_FALSE: return FCMP_TRUE; 3570 } 3571 } 3572 3573 StringRef CmpInst::getPredicateName(Predicate Pred) { 3574 switch (Pred) { 3575 default: return "unknown"; 3576 case FCmpInst::FCMP_FALSE: return "false"; 3577 case FCmpInst::FCMP_OEQ: return "oeq"; 3578 case FCmpInst::FCMP_OGT: return "ogt"; 3579 case FCmpInst::FCMP_OGE: return "oge"; 3580 case FCmpInst::FCMP_OLT: return "olt"; 3581 case FCmpInst::FCMP_OLE: return "ole"; 3582 case FCmpInst::FCMP_ONE: return "one"; 3583 case FCmpInst::FCMP_ORD: return "ord"; 3584 case FCmpInst::FCMP_UNO: return "uno"; 3585 case FCmpInst::FCMP_UEQ: return "ueq"; 3586 case FCmpInst::FCMP_UGT: return "ugt"; 3587 case FCmpInst::FCMP_UGE: return "uge"; 3588 case FCmpInst::FCMP_ULT: return "ult"; 3589 case FCmpInst::FCMP_ULE: return "ule"; 3590 case FCmpInst::FCMP_UNE: return "une"; 3591 case FCmpInst::FCMP_TRUE: return "true"; 3592 case ICmpInst::ICMP_EQ: return "eq"; 3593 case ICmpInst::ICMP_NE: return "ne"; 3594 case ICmpInst::ICMP_SGT: return "sgt"; 3595 case ICmpInst::ICMP_SGE: return "sge"; 3596 case ICmpInst::ICMP_SLT: return "slt"; 3597 case ICmpInst::ICMP_SLE: return "sle"; 3598 case ICmpInst::ICMP_UGT: return "ugt"; 3599 case ICmpInst::ICMP_UGE: return "uge"; 3600 case ICmpInst::ICMP_ULT: return "ult"; 3601 case ICmpInst::ICMP_ULE: return "ule"; 3602 } 3603 } 3604 3605 ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) { 3606 switch (pred) { 3607 default: llvm_unreachable("Unknown icmp predicate!"); 3608 case ICMP_EQ: case ICMP_NE: 3609 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE: 3610 return pred; 3611 case ICMP_UGT: return ICMP_SGT; 3612 case ICMP_ULT: return ICMP_SLT; 3613 case ICMP_UGE: return ICMP_SGE; 3614 case ICMP_ULE: return ICMP_SLE; 3615 } 3616 } 3617 3618 ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) { 3619 switch (pred) { 3620 default: llvm_unreachable("Unknown icmp predicate!"); 3621 case ICMP_EQ: case ICMP_NE: 3622 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE: 3623 return pred; 3624 case ICMP_SGT: return ICMP_UGT; 3625 case ICMP_SLT: return ICMP_ULT; 3626 case ICMP_SGE: return ICMP_UGE; 3627 case ICMP_SLE: return ICMP_ULE; 3628 } 3629 } 3630 3631 CmpInst::Predicate CmpInst::getFlippedStrictnessPredicate(Predicate pred) { 3632 switch (pred) { 3633 default: llvm_unreachable("Unknown or unsupported cmp predicate!"); 3634 case ICMP_SGT: return ICMP_SGE; 3635 case ICMP_SLT: return ICMP_SLE; 3636 case ICMP_SGE: return ICMP_SGT; 3637 case ICMP_SLE: return ICMP_SLT; 3638 case ICMP_UGT: return ICMP_UGE; 3639 case ICMP_ULT: return ICMP_ULE; 3640 case ICMP_UGE: return ICMP_UGT; 3641 case ICMP_ULE: return ICMP_ULT; 3642 3643 case FCMP_OGT: return FCMP_OGE; 3644 case FCMP_OLT: return FCMP_OLE; 3645 case FCMP_OGE: return FCMP_OGT; 3646 case FCMP_OLE: return FCMP_OLT; 3647 case FCMP_UGT: return FCMP_UGE; 3648 case FCMP_ULT: return FCMP_ULE; 3649 case FCMP_UGE: return FCMP_UGT; 3650 case FCMP_ULE: return FCMP_ULT; 3651 } 3652 } 3653 3654 CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) { 3655 switch (pred) { 3656 default: llvm_unreachable("Unknown cmp predicate!"); 3657 case ICMP_EQ: case ICMP_NE: 3658 return pred; 3659 case ICMP_SGT: return ICMP_SLT; 3660 case ICMP_SLT: return ICMP_SGT; 3661 case ICMP_SGE: return ICMP_SLE; 3662 case ICMP_SLE: return ICMP_SGE; 3663 case ICMP_UGT: return ICMP_ULT; 3664 case ICMP_ULT: return ICMP_UGT; 3665 case ICMP_UGE: return ICMP_ULE; 3666 case ICMP_ULE: return ICMP_UGE; 3667 3668 case FCMP_FALSE: case FCMP_TRUE: 3669 case FCMP_OEQ: case FCMP_ONE: 3670 case FCMP_UEQ: case FCMP_UNE: 3671 case FCMP_ORD: case FCMP_UNO: 3672 return pred; 3673 case FCMP_OGT: return FCMP_OLT; 3674 case FCMP_OLT: return FCMP_OGT; 3675 case FCMP_OGE: return FCMP_OLE; 3676 case FCMP_OLE: return FCMP_OGE; 3677 case FCMP_UGT: return FCMP_ULT; 3678 case FCMP_ULT: return FCMP_UGT; 3679 case FCMP_UGE: return FCMP_ULE; 3680 case FCMP_ULE: return FCMP_UGE; 3681 } 3682 } 3683 3684 CmpInst::Predicate CmpInst::getNonStrictPredicate(Predicate pred) { 3685 switch (pred) { 3686 case ICMP_SGT: return ICMP_SGE; 3687 case ICMP_SLT: return ICMP_SLE; 3688 case ICMP_UGT: return ICMP_UGE; 3689 case ICMP_ULT: return ICMP_ULE; 3690 case FCMP_OGT: return FCMP_OGE; 3691 case FCMP_OLT: return FCMP_OLE; 3692 case FCMP_UGT: return FCMP_UGE; 3693 case FCMP_ULT: return FCMP_ULE; 3694 default: return pred; 3695 } 3696 } 3697 3698 CmpInst::Predicate CmpInst::getSignedPredicate(Predicate pred) { 3699 assert(CmpInst::isUnsigned(pred) && "Call only with signed predicates!"); 3700 3701 switch (pred) { 3702 default: 3703 llvm_unreachable("Unknown predicate!"); 3704 case CmpInst::ICMP_ULT: 3705 return CmpInst::ICMP_SLT; 3706 case CmpInst::ICMP_ULE: 3707 return CmpInst::ICMP_SLE; 3708 case CmpInst::ICMP_UGT: 3709 return CmpInst::ICMP_SGT; 3710 case CmpInst::ICMP_UGE: 3711 return CmpInst::ICMP_SGE; 3712 } 3713 } 3714 3715 bool CmpInst::isUnsigned(Predicate predicate) { 3716 switch (predicate) { 3717 default: return false; 3718 case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT: 3719 case ICmpInst::ICMP_UGE: return true; 3720 } 3721 } 3722 3723 bool CmpInst::isSigned(Predicate predicate) { 3724 switch (predicate) { 3725 default: return false; 3726 case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT: 3727 case ICmpInst::ICMP_SGE: return true; 3728 } 3729 } 3730 3731 bool CmpInst::isOrdered(Predicate predicate) { 3732 switch (predicate) { 3733 default: return false; 3734 case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT: 3735 case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE: 3736 case FCmpInst::FCMP_ORD: return true; 3737 } 3738 } 3739 3740 bool CmpInst::isUnordered(Predicate predicate) { 3741 switch (predicate) { 3742 default: return false; 3743 case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT: 3744 case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE: 3745 case FCmpInst::FCMP_UNO: return true; 3746 } 3747 } 3748 3749 bool CmpInst::isTrueWhenEqual(Predicate predicate) { 3750 switch(predicate) { 3751 default: return false; 3752 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE: 3753 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true; 3754 } 3755 } 3756 3757 bool CmpInst::isFalseWhenEqual(Predicate predicate) { 3758 switch(predicate) { 3759 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT: 3760 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true; 3761 default: return false; 3762 } 3763 } 3764 3765 bool CmpInst::isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2) { 3766 // If the predicates match, then we know the first condition implies the 3767 // second is true. 3768 if (Pred1 == Pred2) 3769 return true; 3770 3771 switch (Pred1) { 3772 default: 3773 break; 3774 case ICMP_EQ: 3775 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true. 3776 return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE || 3777 Pred2 == ICMP_SLE; 3778 case ICMP_UGT: // A >u B implies A != B and A >=u B are true. 3779 return Pred2 == ICMP_NE || Pred2 == ICMP_UGE; 3780 case ICMP_ULT: // A <u B implies A != B and A <=u B are true. 3781 return Pred2 == ICMP_NE || Pred2 == ICMP_ULE; 3782 case ICMP_SGT: // A >s B implies A != B and A >=s B are true. 3783 return Pred2 == ICMP_NE || Pred2 == ICMP_SGE; 3784 case ICMP_SLT: // A <s B implies A != B and A <=s B are true. 3785 return Pred2 == ICMP_NE || Pred2 == ICMP_SLE; 3786 } 3787 return false; 3788 } 3789 3790 bool CmpInst::isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2) { 3791 return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2)); 3792 } 3793 3794 //===----------------------------------------------------------------------===// 3795 // SwitchInst Implementation 3796 //===----------------------------------------------------------------------===// 3797 3798 void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) { 3799 assert(Value && Default && NumReserved); 3800 ReservedSpace = NumReserved; 3801 setNumHungOffUseOperands(2); 3802 allocHungoffUses(ReservedSpace); 3803 3804 Op<0>() = Value; 3805 Op<1>() = Default; 3806 } 3807 3808 /// SwitchInst ctor - Create a new switch instruction, specifying a value to 3809 /// switch on and a default destination. The number of additional cases can 3810 /// be specified here to make memory allocation more efficient. This 3811 /// constructor can also autoinsert before another instruction. 3812 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3813 Instruction *InsertBefore) 3814 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch, 3815 nullptr, 0, InsertBefore) { 3816 init(Value, Default, 2+NumCases*2); 3817 } 3818 3819 /// SwitchInst ctor - Create a new switch instruction, specifying a value to 3820 /// switch on and a default destination. The number of additional cases can 3821 /// be specified here to make memory allocation more efficient. This 3822 /// constructor also autoinserts at the end of the specified BasicBlock. 3823 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3824 BasicBlock *InsertAtEnd) 3825 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch, 3826 nullptr, 0, InsertAtEnd) { 3827 init(Value, Default, 2+NumCases*2); 3828 } 3829 3830 SwitchInst::SwitchInst(const SwitchInst &SI) 3831 : Instruction(SI.getType(), Instruction::Switch, nullptr, 0) { 3832 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands()); 3833 setNumHungOffUseOperands(SI.getNumOperands()); 3834 Use *OL = getOperandList(); 3835 const Use *InOL = SI.getOperandList(); 3836 for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) { 3837 OL[i] = InOL[i]; 3838 OL[i+1] = InOL[i+1]; 3839 } 3840 SubclassOptionalData = SI.SubclassOptionalData; 3841 } 3842 3843 /// addCase - Add an entry to the switch instruction... 3844 /// 3845 void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) { 3846 unsigned NewCaseIdx = getNumCases(); 3847 unsigned OpNo = getNumOperands(); 3848 if (OpNo+2 > ReservedSpace) 3849 growOperands(); // Get more space! 3850 // Initialize some new operands. 3851 assert(OpNo+1 < ReservedSpace && "Growing didn't work!"); 3852 setNumHungOffUseOperands(OpNo+2); 3853 CaseHandle Case(this, NewCaseIdx); 3854 Case.setValue(OnVal); 3855 Case.setSuccessor(Dest); 3856 } 3857 3858 /// removeCase - This method removes the specified case and its successor 3859 /// from the switch instruction. 3860 SwitchInst::CaseIt SwitchInst::removeCase(CaseIt I) { 3861 unsigned idx = I->getCaseIndex(); 3862 3863 assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!"); 3864 3865 unsigned NumOps = getNumOperands(); 3866 Use *OL = getOperandList(); 3867 3868 // Overwrite this case with the end of the list. 3869 if (2 + (idx + 1) * 2 != NumOps) { 3870 OL[2 + idx * 2] = OL[NumOps - 2]; 3871 OL[2 + idx * 2 + 1] = OL[NumOps - 1]; 3872 } 3873 3874 // Nuke the last value. 3875 OL[NumOps-2].set(nullptr); 3876 OL[NumOps-2+1].set(nullptr); 3877 setNumHungOffUseOperands(NumOps-2); 3878 3879 return CaseIt(this, idx); 3880 } 3881 3882 /// growOperands - grow operands - This grows the operand list in response 3883 /// to a push_back style of operation. This grows the number of ops by 3 times. 3884 /// 3885 void SwitchInst::growOperands() { 3886 unsigned e = getNumOperands(); 3887 unsigned NumOps = e*3; 3888 3889 ReservedSpace = NumOps; 3890 growHungoffUses(ReservedSpace); 3891 } 3892 3893 MDNode * 3894 SwitchInstProfUpdateWrapper::getProfBranchWeightsMD(const SwitchInst &SI) { 3895 if (MDNode *ProfileData = SI.getMetadata(LLVMContext::MD_prof)) 3896 if (auto *MDName = dyn_cast<MDString>(ProfileData->getOperand(0))) 3897 if (MDName->getString() == "branch_weights") 3898 return ProfileData; 3899 return nullptr; 3900 } 3901 3902 MDNode *SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() { 3903 assert(Changed && "called only if metadata has changed"); 3904 3905 if (!Weights) 3906 return nullptr; 3907 3908 assert(SI.getNumSuccessors() == Weights->size() && 3909 "num of prof branch_weights must accord with num of successors"); 3910 3911 bool AllZeroes = 3912 all_of(Weights.getValue(), [](uint32_t W) { return W == 0; }); 3913 3914 if (AllZeroes || Weights.getValue().size() < 2) 3915 return nullptr; 3916 3917 return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights); 3918 } 3919 3920 void SwitchInstProfUpdateWrapper::init() { 3921 MDNode *ProfileData = getProfBranchWeightsMD(SI); 3922 if (!ProfileData) 3923 return; 3924 3925 if (ProfileData->getNumOperands() != SI.getNumSuccessors() + 1) { 3926 llvm_unreachable("number of prof branch_weights metadata operands does " 3927 "not correspond to number of succesors"); 3928 } 3929 3930 SmallVector<uint32_t, 8> Weights; 3931 for (unsigned CI = 1, CE = SI.getNumSuccessors(); CI <= CE; ++CI) { 3932 ConstantInt *C = mdconst::extract<ConstantInt>(ProfileData->getOperand(CI)); 3933 uint32_t CW = C->getValue().getZExtValue(); 3934 Weights.push_back(CW); 3935 } 3936 this->Weights = std::move(Weights); 3937 } 3938 3939 SwitchInst::CaseIt 3940 SwitchInstProfUpdateWrapper::removeCase(SwitchInst::CaseIt I) { 3941 if (Weights) { 3942 assert(SI.getNumSuccessors() == Weights->size() && 3943 "num of prof branch_weights must accord with num of successors"); 3944 Changed = true; 3945 // Copy the last case to the place of the removed one and shrink. 3946 // This is tightly coupled with the way SwitchInst::removeCase() removes 3947 // the cases in SwitchInst::removeCase(CaseIt). 3948 Weights.getValue()[I->getCaseIndex() + 1] = Weights.getValue().back(); 3949 Weights.getValue().pop_back(); 3950 } 3951 return SI.removeCase(I); 3952 } 3953 3954 void SwitchInstProfUpdateWrapper::addCase( 3955 ConstantInt *OnVal, BasicBlock *Dest, 3956 SwitchInstProfUpdateWrapper::CaseWeightOpt W) { 3957 SI.addCase(OnVal, Dest); 3958 3959 if (!Weights && W && *W) { 3960 Changed = true; 3961 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0); 3962 Weights.getValue()[SI.getNumSuccessors() - 1] = *W; 3963 } else if (Weights) { 3964 Changed = true; 3965 Weights.getValue().push_back(W ? *W : 0); 3966 } 3967 if (Weights) 3968 assert(SI.getNumSuccessors() == Weights->size() && 3969 "num of prof branch_weights must accord with num of successors"); 3970 } 3971 3972 SymbolTableList<Instruction>::iterator 3973 SwitchInstProfUpdateWrapper::eraseFromParent() { 3974 // Instruction is erased. Mark as unchanged to not touch it in the destructor. 3975 Changed = false; 3976 if (Weights) 3977 Weights->resize(0); 3978 return SI.eraseFromParent(); 3979 } 3980 3981 SwitchInstProfUpdateWrapper::CaseWeightOpt 3982 SwitchInstProfUpdateWrapper::getSuccessorWeight(unsigned idx) { 3983 if (!Weights) 3984 return None; 3985 return Weights.getValue()[idx]; 3986 } 3987 3988 void SwitchInstProfUpdateWrapper::setSuccessorWeight( 3989 unsigned idx, SwitchInstProfUpdateWrapper::CaseWeightOpt W) { 3990 if (!W) 3991 return; 3992 3993 if (!Weights && *W) 3994 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0); 3995 3996 if (Weights) { 3997 auto &OldW = Weights.getValue()[idx]; 3998 if (*W != OldW) { 3999 Changed = true; 4000 OldW = *W; 4001 } 4002 } 4003 } 4004 4005 SwitchInstProfUpdateWrapper::CaseWeightOpt 4006 SwitchInstProfUpdateWrapper::getSuccessorWeight(const SwitchInst &SI, 4007 unsigned idx) { 4008 if (MDNode *ProfileData = getProfBranchWeightsMD(SI)) 4009 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1) 4010 return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1)) 4011 ->getValue() 4012 .getZExtValue(); 4013 4014 return None; 4015 } 4016 4017 //===----------------------------------------------------------------------===// 4018 // IndirectBrInst Implementation 4019 //===----------------------------------------------------------------------===// 4020 4021 void IndirectBrInst::init(Value *Address, unsigned NumDests) { 4022 assert(Address && Address->getType()->isPointerTy() && 4023 "Address of indirectbr must be a pointer"); 4024 ReservedSpace = 1+NumDests; 4025 setNumHungOffUseOperands(1); 4026 allocHungoffUses(ReservedSpace); 4027 4028 Op<0>() = Address; 4029 } 4030 4031 4032 /// growOperands - grow operands - This grows the operand list in response 4033 /// to a push_back style of operation. This grows the number of ops by 2 times. 4034 /// 4035 void IndirectBrInst::growOperands() { 4036 unsigned e = getNumOperands(); 4037 unsigned NumOps = e*2; 4038 4039 ReservedSpace = NumOps; 4040 growHungoffUses(ReservedSpace); 4041 } 4042 4043 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases, 4044 Instruction *InsertBefore) 4045 : Instruction(Type::getVoidTy(Address->getContext()), 4046 Instruction::IndirectBr, nullptr, 0, InsertBefore) { 4047 init(Address, NumCases); 4048 } 4049 4050 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases, 4051 BasicBlock *InsertAtEnd) 4052 : Instruction(Type::getVoidTy(Address->getContext()), 4053 Instruction::IndirectBr, nullptr, 0, InsertAtEnd) { 4054 init(Address, NumCases); 4055 } 4056 4057 IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI) 4058 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr, 4059 nullptr, IBI.getNumOperands()) { 4060 allocHungoffUses(IBI.getNumOperands()); 4061 Use *OL = getOperandList(); 4062 const Use *InOL = IBI.getOperandList(); 4063 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i) 4064 OL[i] = InOL[i]; 4065 SubclassOptionalData = IBI.SubclassOptionalData; 4066 } 4067 4068 /// addDestination - Add a destination. 4069 /// 4070 void IndirectBrInst::addDestination(BasicBlock *DestBB) { 4071 unsigned OpNo = getNumOperands(); 4072 if (OpNo+1 > ReservedSpace) 4073 growOperands(); // Get more space! 4074 // Initialize some new operands. 4075 assert(OpNo < ReservedSpace && "Growing didn't work!"); 4076 setNumHungOffUseOperands(OpNo+1); 4077 getOperandList()[OpNo] = DestBB; 4078 } 4079 4080 /// removeDestination - This method removes the specified successor from the 4081 /// indirectbr instruction. 4082 void IndirectBrInst::removeDestination(unsigned idx) { 4083 assert(idx < getNumOperands()-1 && "Successor index out of range!"); 4084 4085 unsigned NumOps = getNumOperands(); 4086 Use *OL = getOperandList(); 4087 4088 // Replace this value with the last one. 4089 OL[idx+1] = OL[NumOps-1]; 4090 4091 // Nuke the last value. 4092 OL[NumOps-1].set(nullptr); 4093 setNumHungOffUseOperands(NumOps-1); 4094 } 4095 4096 //===----------------------------------------------------------------------===// 4097 // FreezeInst Implementation 4098 //===----------------------------------------------------------------------===// 4099 4100 FreezeInst::FreezeInst(Value *S, 4101 const Twine &Name, Instruction *InsertBefore) 4102 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) { 4103 setName(Name); 4104 } 4105 4106 FreezeInst::FreezeInst(Value *S, 4107 const Twine &Name, BasicBlock *InsertAtEnd) 4108 : UnaryInstruction(S->getType(), Freeze, S, InsertAtEnd) { 4109 setName(Name); 4110 } 4111 4112 //===----------------------------------------------------------------------===// 4113 // cloneImpl() implementations 4114 //===----------------------------------------------------------------------===// 4115 4116 // Define these methods here so vtables don't get emitted into every translation 4117 // unit that uses these classes. 4118 4119 GetElementPtrInst *GetElementPtrInst::cloneImpl() const { 4120 return new (getNumOperands()) GetElementPtrInst(*this); 4121 } 4122 4123 UnaryOperator *UnaryOperator::cloneImpl() const { 4124 return Create(getOpcode(), Op<0>()); 4125 } 4126 4127 BinaryOperator *BinaryOperator::cloneImpl() const { 4128 return Create(getOpcode(), Op<0>(), Op<1>()); 4129 } 4130 4131 FCmpInst *FCmpInst::cloneImpl() const { 4132 return new FCmpInst(getPredicate(), Op<0>(), Op<1>()); 4133 } 4134 4135 ICmpInst *ICmpInst::cloneImpl() const { 4136 return new ICmpInst(getPredicate(), Op<0>(), Op<1>()); 4137 } 4138 4139 ExtractValueInst *ExtractValueInst::cloneImpl() const { 4140 return new ExtractValueInst(*this); 4141 } 4142 4143 InsertValueInst *InsertValueInst::cloneImpl() const { 4144 return new InsertValueInst(*this); 4145 } 4146 4147 AllocaInst *AllocaInst::cloneImpl() const { 4148 AllocaInst *Result = 4149 new AllocaInst(getAllocatedType(), getType()->getAddressSpace(), 4150 (Value *)getOperand(0), MaybeAlign(getAlignment())); 4151 Result->setUsedWithInAlloca(isUsedWithInAlloca()); 4152 Result->setSwiftError(isSwiftError()); 4153 return Result; 4154 } 4155 4156 LoadInst *LoadInst::cloneImpl() const { 4157 return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(), 4158 MaybeAlign(getAlignment()), getOrdering(), 4159 getSyncScopeID()); 4160 } 4161 4162 StoreInst *StoreInst::cloneImpl() const { 4163 return new StoreInst(getOperand(0), getOperand(1), isVolatile(), 4164 MaybeAlign(getAlignment()), getOrdering(), 4165 getSyncScopeID()); 4166 } 4167 4168 AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const { 4169 AtomicCmpXchgInst *Result = 4170 new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2), 4171 getSuccessOrdering(), getFailureOrdering(), 4172 getSyncScopeID()); 4173 Result->setVolatile(isVolatile()); 4174 Result->setWeak(isWeak()); 4175 return Result; 4176 } 4177 4178 AtomicRMWInst *AtomicRMWInst::cloneImpl() const { 4179 AtomicRMWInst *Result = 4180 new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1), 4181 getOrdering(), getSyncScopeID()); 4182 Result->setVolatile(isVolatile()); 4183 return Result; 4184 } 4185 4186 FenceInst *FenceInst::cloneImpl() const { 4187 return new FenceInst(getContext(), getOrdering(), getSyncScopeID()); 4188 } 4189 4190 TruncInst *TruncInst::cloneImpl() const { 4191 return new TruncInst(getOperand(0), getType()); 4192 } 4193 4194 ZExtInst *ZExtInst::cloneImpl() const { 4195 return new ZExtInst(getOperand(0), getType()); 4196 } 4197 4198 SExtInst *SExtInst::cloneImpl() const { 4199 return new SExtInst(getOperand(0), getType()); 4200 } 4201 4202 FPTruncInst *FPTruncInst::cloneImpl() const { 4203 return new FPTruncInst(getOperand(0), getType()); 4204 } 4205 4206 FPExtInst *FPExtInst::cloneImpl() const { 4207 return new FPExtInst(getOperand(0), getType()); 4208 } 4209 4210 UIToFPInst *UIToFPInst::cloneImpl() const { 4211 return new UIToFPInst(getOperand(0), getType()); 4212 } 4213 4214 SIToFPInst *SIToFPInst::cloneImpl() const { 4215 return new SIToFPInst(getOperand(0), getType()); 4216 } 4217 4218 FPToUIInst *FPToUIInst::cloneImpl() const { 4219 return new FPToUIInst(getOperand(0), getType()); 4220 } 4221 4222 FPToSIInst *FPToSIInst::cloneImpl() const { 4223 return new FPToSIInst(getOperand(0), getType()); 4224 } 4225 4226 PtrToIntInst *PtrToIntInst::cloneImpl() const { 4227 return new PtrToIntInst(getOperand(0), getType()); 4228 } 4229 4230 IntToPtrInst *IntToPtrInst::cloneImpl() const { 4231 return new IntToPtrInst(getOperand(0), getType()); 4232 } 4233 4234 BitCastInst *BitCastInst::cloneImpl() const { 4235 return new BitCastInst(getOperand(0), getType()); 4236 } 4237 4238 AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const { 4239 return new AddrSpaceCastInst(getOperand(0), getType()); 4240 } 4241 4242 CallInst *CallInst::cloneImpl() const { 4243 if (hasOperandBundles()) { 4244 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 4245 return new(getNumOperands(), DescriptorBytes) CallInst(*this); 4246 } 4247 return new(getNumOperands()) CallInst(*this); 4248 } 4249 4250 SelectInst *SelectInst::cloneImpl() const { 4251 return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2)); 4252 } 4253 4254 VAArgInst *VAArgInst::cloneImpl() const { 4255 return new VAArgInst(getOperand(0), getType()); 4256 } 4257 4258 ExtractElementInst *ExtractElementInst::cloneImpl() const { 4259 return ExtractElementInst::Create(getOperand(0), getOperand(1)); 4260 } 4261 4262 InsertElementInst *InsertElementInst::cloneImpl() const { 4263 return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2)); 4264 } 4265 4266 ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const { 4267 return new ShuffleVectorInst(getOperand(0), getOperand(1), getOperand(2)); 4268 } 4269 4270 PHINode *PHINode::cloneImpl() const { return new PHINode(*this); } 4271 4272 LandingPadInst *LandingPadInst::cloneImpl() const { 4273 return new LandingPadInst(*this); 4274 } 4275 4276 ReturnInst *ReturnInst::cloneImpl() const { 4277 return new(getNumOperands()) ReturnInst(*this); 4278 } 4279 4280 BranchInst *BranchInst::cloneImpl() const { 4281 return new(getNumOperands()) BranchInst(*this); 4282 } 4283 4284 SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); } 4285 4286 IndirectBrInst *IndirectBrInst::cloneImpl() const { 4287 return new IndirectBrInst(*this); 4288 } 4289 4290 InvokeInst *InvokeInst::cloneImpl() const { 4291 if (hasOperandBundles()) { 4292 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 4293 return new(getNumOperands(), DescriptorBytes) InvokeInst(*this); 4294 } 4295 return new(getNumOperands()) InvokeInst(*this); 4296 } 4297 4298 CallBrInst *CallBrInst::cloneImpl() const { 4299 if (hasOperandBundles()) { 4300 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 4301 return new (getNumOperands(), DescriptorBytes) CallBrInst(*this); 4302 } 4303 return new (getNumOperands()) CallBrInst(*this); 4304 } 4305 4306 ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); } 4307 4308 CleanupReturnInst *CleanupReturnInst::cloneImpl() const { 4309 return new (getNumOperands()) CleanupReturnInst(*this); 4310 } 4311 4312 CatchReturnInst *CatchReturnInst::cloneImpl() const { 4313 return new (getNumOperands()) CatchReturnInst(*this); 4314 } 4315 4316 CatchSwitchInst *CatchSwitchInst::cloneImpl() const { 4317 return new CatchSwitchInst(*this); 4318 } 4319 4320 FuncletPadInst *FuncletPadInst::cloneImpl() const { 4321 return new (getNumOperands()) FuncletPadInst(*this); 4322 } 4323 4324 UnreachableInst *UnreachableInst::cloneImpl() const { 4325 LLVMContext &Context = getContext(); 4326 return new UnreachableInst(Context); 4327 } 4328 4329 FreezeInst *FreezeInst::cloneImpl() const { 4330 return new FreezeInst(getOperand(0)); 4331 } 4332