1 //===-- Instructions.cpp - Implement the LLVM instructions ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements all of the non-inline methods for the LLVM instruction 11 // classes. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/IR/Instructions.h" 16 #include "LLVMContextImpl.h" 17 #include "llvm/IR/CallSite.h" 18 #include "llvm/IR/ConstantRange.h" 19 #include "llvm/IR/Constants.h" 20 #include "llvm/IR/DataLayout.h" 21 #include "llvm/IR/DerivedTypes.h" 22 #include "llvm/IR/Function.h" 23 #include "llvm/IR/Module.h" 24 #include "llvm/IR/Operator.h" 25 #include "llvm/Support/ErrorHandling.h" 26 #include "llvm/Support/MathExtras.h" 27 using namespace llvm; 28 29 //===----------------------------------------------------------------------===// 30 // CallSite Class 31 //===----------------------------------------------------------------------===// 32 33 User::op_iterator CallSite::getCallee() const { 34 Instruction *II(getInstruction()); 35 return isCall() 36 ? cast<CallInst>(II)->op_end() - 1 // Skip Callee 37 : cast<InvokeInst>(II)->op_end() - 3; // Skip BB, BB, Callee 38 } 39 40 //===----------------------------------------------------------------------===// 41 // TerminatorInst Class 42 //===----------------------------------------------------------------------===// 43 44 // Out of line virtual method, so the vtable, etc has a home. 45 TerminatorInst::~TerminatorInst() { 46 } 47 48 //===----------------------------------------------------------------------===// 49 // UnaryInstruction Class 50 //===----------------------------------------------------------------------===// 51 52 // Out of line virtual method, so the vtable, etc has a home. 53 UnaryInstruction::~UnaryInstruction() { 54 } 55 56 //===----------------------------------------------------------------------===// 57 // SelectInst Class 58 //===----------------------------------------------------------------------===// 59 60 /// areInvalidOperands - Return a string if the specified operands are invalid 61 /// for a select operation, otherwise return null. 62 const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) { 63 if (Op1->getType() != Op2->getType()) 64 return "both values to select must have same type"; 65 66 if (Op1->getType()->isTokenTy()) 67 return "select values cannot have token type"; 68 69 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) { 70 // Vector select. 71 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext())) 72 return "vector select condition element type must be i1"; 73 VectorType *ET = dyn_cast<VectorType>(Op1->getType()); 74 if (!ET) 75 return "selected values for vector select must be vectors"; 76 if (ET->getNumElements() != VT->getNumElements()) 77 return "vector select requires selected vectors to have " 78 "the same vector length as select condition"; 79 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) { 80 return "select condition must be i1 or <n x i1>"; 81 } 82 return nullptr; 83 } 84 85 86 //===----------------------------------------------------------------------===// 87 // PHINode Class 88 //===----------------------------------------------------------------------===// 89 90 void PHINode::anchor() {} 91 92 PHINode::PHINode(const PHINode &PN) 93 : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()), 94 ReservedSpace(PN.getNumOperands()) { 95 allocHungoffUses(PN.getNumOperands()); 96 std::copy(PN.op_begin(), PN.op_end(), op_begin()); 97 std::copy(PN.block_begin(), PN.block_end(), block_begin()); 98 SubclassOptionalData = PN.SubclassOptionalData; 99 } 100 101 // removeIncomingValue - Remove an incoming value. This is useful if a 102 // predecessor basic block is deleted. 103 Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) { 104 Value *Removed = getIncomingValue(Idx); 105 106 // Move everything after this operand down. 107 // 108 // FIXME: we could just swap with the end of the list, then erase. However, 109 // clients might not expect this to happen. The code as it is thrashes the 110 // use/def lists, which is kinda lame. 111 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx); 112 std::copy(block_begin() + Idx + 1, block_end(), block_begin() + Idx); 113 114 // Nuke the last value. 115 Op<-1>().set(nullptr); 116 setNumHungOffUseOperands(getNumOperands() - 1); 117 118 // If the PHI node is dead, because it has zero entries, nuke it now. 119 if (getNumOperands() == 0 && DeletePHIIfEmpty) { 120 // If anyone is using this PHI, make them use a dummy value instead... 121 replaceAllUsesWith(UndefValue::get(getType())); 122 eraseFromParent(); 123 } 124 return Removed; 125 } 126 127 /// growOperands - grow operands - This grows the operand list in response 128 /// to a push_back style of operation. This grows the number of ops by 1.5 129 /// times. 130 /// 131 void PHINode::growOperands() { 132 unsigned e = getNumOperands(); 133 unsigned NumOps = e + e / 2; 134 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common. 135 136 ReservedSpace = NumOps; 137 growHungoffUses(ReservedSpace, /* IsPhi */ true); 138 } 139 140 /// hasConstantValue - If the specified PHI node always merges together the same 141 /// value, return the value, otherwise return null. 142 Value *PHINode::hasConstantValue() const { 143 // Exploit the fact that phi nodes always have at least one entry. 144 Value *ConstantValue = getIncomingValue(0); 145 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i) 146 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) { 147 if (ConstantValue != this) 148 return nullptr; // Incoming values not all the same. 149 // The case where the first value is this PHI. 150 ConstantValue = getIncomingValue(i); 151 } 152 if (ConstantValue == this) 153 return UndefValue::get(getType()); 154 return ConstantValue; 155 } 156 157 //===----------------------------------------------------------------------===// 158 // LandingPadInst Implementation 159 //===----------------------------------------------------------------------===// 160 161 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues, 162 const Twine &NameStr, Instruction *InsertBefore) 163 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) { 164 init(NumReservedValues, NameStr); 165 } 166 167 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues, 168 const Twine &NameStr, BasicBlock *InsertAtEnd) 169 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) { 170 init(NumReservedValues, NameStr); 171 } 172 173 LandingPadInst::LandingPadInst(const LandingPadInst &LP) 174 : Instruction(LP.getType(), Instruction::LandingPad, nullptr, 175 LP.getNumOperands()), 176 ReservedSpace(LP.getNumOperands()) { 177 allocHungoffUses(LP.getNumOperands()); 178 Use *OL = getOperandList(); 179 const Use *InOL = LP.getOperandList(); 180 for (unsigned I = 0, E = ReservedSpace; I != E; ++I) 181 OL[I] = InOL[I]; 182 183 setCleanup(LP.isCleanup()); 184 } 185 186 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses, 187 const Twine &NameStr, 188 Instruction *InsertBefore) { 189 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore); 190 } 191 192 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses, 193 const Twine &NameStr, 194 BasicBlock *InsertAtEnd) { 195 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd); 196 } 197 198 void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) { 199 ReservedSpace = NumReservedValues; 200 setNumHungOffUseOperands(0); 201 allocHungoffUses(ReservedSpace); 202 setName(NameStr); 203 setCleanup(false); 204 } 205 206 /// growOperands - grow operands - This grows the operand list in response to a 207 /// push_back style of operation. This grows the number of ops by 2 times. 208 void LandingPadInst::growOperands(unsigned Size) { 209 unsigned e = getNumOperands(); 210 if (ReservedSpace >= e + Size) return; 211 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2; 212 growHungoffUses(ReservedSpace); 213 } 214 215 void LandingPadInst::addClause(Constant *Val) { 216 unsigned OpNo = getNumOperands(); 217 growOperands(1); 218 assert(OpNo < ReservedSpace && "Growing didn't work!"); 219 setNumHungOffUseOperands(getNumOperands() + 1); 220 getOperandList()[OpNo] = Val; 221 } 222 223 //===----------------------------------------------------------------------===// 224 // CallInst Implementation 225 //===----------------------------------------------------------------------===// 226 227 CallInst::~CallInst() { 228 } 229 230 void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, 231 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) { 232 this->FTy = FTy; 233 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 && 234 "NumOperands not set up?"); 235 Op<-1>() = Func; 236 237 #ifndef NDEBUG 238 assert((Args.size() == FTy->getNumParams() || 239 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 240 "Calling a function with bad signature!"); 241 242 for (unsigned i = 0; i != Args.size(); ++i) 243 assert((i >= FTy->getNumParams() || 244 FTy->getParamType(i) == Args[i]->getType()) && 245 "Calling a function with a bad signature!"); 246 #endif 247 248 std::copy(Args.begin(), Args.end(), op_begin()); 249 250 auto It = populateBundleOperandInfos(Bundles, Args.size()); 251 (void)It; 252 assert(It + 1 == op_end() && "Should add up!"); 253 254 setName(NameStr); 255 } 256 257 void CallInst::init(Value *Func, const Twine &NameStr) { 258 FTy = 259 cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType()); 260 assert(getNumOperands() == 1 && "NumOperands not set up?"); 261 Op<-1>() = Func; 262 263 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature"); 264 265 setName(NameStr); 266 } 267 268 CallInst::CallInst(Value *Func, const Twine &Name, 269 Instruction *InsertBefore) 270 : Instruction(cast<FunctionType>(cast<PointerType>(Func->getType()) 271 ->getElementType())->getReturnType(), 272 Instruction::Call, 273 OperandTraits<CallInst>::op_end(this) - 1, 274 1, InsertBefore) { 275 init(Func, Name); 276 } 277 278 CallInst::CallInst(Value *Func, const Twine &Name, 279 BasicBlock *InsertAtEnd) 280 : Instruction(cast<FunctionType>(cast<PointerType>(Func->getType()) 281 ->getElementType())->getReturnType(), 282 Instruction::Call, 283 OperandTraits<CallInst>::op_end(this) - 1, 284 1, InsertAtEnd) { 285 init(Func, Name); 286 } 287 288 CallInst::CallInst(const CallInst &CI) 289 : Instruction(CI.getType(), Instruction::Call, 290 OperandTraits<CallInst>::op_end(this) - CI.getNumOperands(), 291 CI.getNumOperands()), 292 AttributeList(CI.AttributeList), FTy(CI.FTy) { 293 setTailCallKind(CI.getTailCallKind()); 294 setCallingConv(CI.getCallingConv()); 295 296 std::copy(CI.op_begin(), CI.op_end(), op_begin()); 297 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(), 298 bundle_op_info_begin()); 299 SubclassOptionalData = CI.SubclassOptionalData; 300 } 301 302 CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB, 303 Instruction *InsertPt) { 304 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end()); 305 306 auto *NewCI = CallInst::Create(CI->getCalledValue(), Args, OpB, CI->getName(), 307 InsertPt); 308 NewCI->setTailCallKind(CI->getTailCallKind()); 309 NewCI->setCallingConv(CI->getCallingConv()); 310 NewCI->SubclassOptionalData = CI->SubclassOptionalData; 311 NewCI->setAttributes(CI->getAttributes()); 312 NewCI->setDebugLoc(CI->getDebugLoc()); 313 return NewCI; 314 } 315 316 void CallInst::addAttribute(unsigned i, Attribute::AttrKind attr) { 317 AttributeSet PAL = getAttributes(); 318 PAL = PAL.addAttribute(getContext(), i, attr); 319 setAttributes(PAL); 320 } 321 322 void CallInst::addAttribute(unsigned i, StringRef Kind, StringRef Value) { 323 AttributeSet PAL = getAttributes(); 324 PAL = PAL.addAttribute(getContext(), i, Kind, Value); 325 setAttributes(PAL); 326 } 327 328 void CallInst::removeAttribute(unsigned i, Attribute attr) { 329 AttributeSet PAL = getAttributes(); 330 AttrBuilder B(attr); 331 LLVMContext &Context = getContext(); 332 PAL = PAL.removeAttributes(Context, i, 333 AttributeSet::get(Context, i, B)); 334 setAttributes(PAL); 335 } 336 337 void CallInst::addDereferenceableAttr(unsigned i, uint64_t Bytes) { 338 AttributeSet PAL = getAttributes(); 339 PAL = PAL.addDereferenceableAttr(getContext(), i, Bytes); 340 setAttributes(PAL); 341 } 342 343 void CallInst::addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes) { 344 AttributeSet PAL = getAttributes(); 345 PAL = PAL.addDereferenceableOrNullAttr(getContext(), i, Bytes); 346 setAttributes(PAL); 347 } 348 349 bool CallInst::paramHasAttr(unsigned i, Attribute::AttrKind A) const { 350 assert(i < (getNumArgOperands() + 1) && "Param index out of bounds!"); 351 352 if (AttributeList.hasAttribute(i, A)) 353 return true; 354 if (const Function *F = getCalledFunction()) 355 return F->getAttributes().hasAttribute(i, A); 356 return false; 357 } 358 359 bool CallInst::dataOperandHasImpliedAttr(unsigned i, 360 Attribute::AttrKind A) const { 361 362 // There are getNumOperands() - 1 data operands. The last operand is the 363 // callee. 364 assert(i < getNumOperands() && "Data operand index out of bounds!"); 365 366 // The attribute A can either be directly specified, if the operand in 367 // question is a call argument; or be indirectly implied by the kind of its 368 // containing operand bundle, if the operand is a bundle operand. 369 370 if (i < (getNumArgOperands() + 1)) 371 return paramHasAttr(i, A); 372 373 assert(hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) && 374 "Must be either a call argument or an operand bundle!"); 375 return bundleOperandHasAttr(i - 1, A); 376 } 377 378 /// IsConstantOne - Return true only if val is constant int 1 379 static bool IsConstantOne(Value *val) { 380 assert(val && "IsConstantOne does not work with nullptr val"); 381 const ConstantInt *CVal = dyn_cast<ConstantInt>(val); 382 return CVal && CVal->isOne(); 383 } 384 385 static Instruction *createMalloc(Instruction *InsertBefore, 386 BasicBlock *InsertAtEnd, Type *IntPtrTy, 387 Type *AllocTy, Value *AllocSize, 388 Value *ArraySize, Function *MallocF, 389 const Twine &Name) { 390 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) && 391 "createMalloc needs either InsertBefore or InsertAtEnd"); 392 393 // malloc(type) becomes: 394 // bitcast (i8* malloc(typeSize)) to type* 395 // malloc(type, arraySize) becomes: 396 // bitcast (i8 *malloc(typeSize*arraySize)) to type* 397 if (!ArraySize) 398 ArraySize = ConstantInt::get(IntPtrTy, 1); 399 else if (ArraySize->getType() != IntPtrTy) { 400 if (InsertBefore) 401 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false, 402 "", InsertBefore); 403 else 404 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false, 405 "", InsertAtEnd); 406 } 407 408 if (!IsConstantOne(ArraySize)) { 409 if (IsConstantOne(AllocSize)) { 410 AllocSize = ArraySize; // Operand * 1 = Operand 411 } else if (Constant *CO = dyn_cast<Constant>(ArraySize)) { 412 Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy, 413 false /*ZExt*/); 414 // Malloc arg is constant product of type size and array size 415 AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize)); 416 } else { 417 // Multiply type size by the array size... 418 if (InsertBefore) 419 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize, 420 "mallocsize", InsertBefore); 421 else 422 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize, 423 "mallocsize", InsertAtEnd); 424 } 425 } 426 427 assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size"); 428 // Create the call to Malloc. 429 BasicBlock* BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd; 430 Module* M = BB->getParent()->getParent(); 431 Type *BPTy = Type::getInt8PtrTy(BB->getContext()); 432 Value *MallocFunc = MallocF; 433 if (!MallocFunc) 434 // prototype malloc as "void *malloc(size_t)" 435 MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy, nullptr); 436 PointerType *AllocPtrType = PointerType::getUnqual(AllocTy); 437 CallInst *MCall = nullptr; 438 Instruction *Result = nullptr; 439 if (InsertBefore) { 440 MCall = CallInst::Create(MallocFunc, AllocSize, "malloccall", InsertBefore); 441 Result = MCall; 442 if (Result->getType() != AllocPtrType) 443 // Create a cast instruction to convert to the right type... 444 Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore); 445 } else { 446 MCall = CallInst::Create(MallocFunc, AllocSize, "malloccall"); 447 Result = MCall; 448 if (Result->getType() != AllocPtrType) { 449 InsertAtEnd->getInstList().push_back(MCall); 450 // Create a cast instruction to convert to the right type... 451 Result = new BitCastInst(MCall, AllocPtrType, Name); 452 } 453 } 454 MCall->setTailCall(); 455 if (Function *F = dyn_cast<Function>(MallocFunc)) { 456 MCall->setCallingConv(F->getCallingConv()); 457 if (!F->doesNotAlias(0)) F->setDoesNotAlias(0); 458 } 459 assert(!MCall->getType()->isVoidTy() && "Malloc has void return type"); 460 461 return Result; 462 } 463 464 /// CreateMalloc - Generate the IR for a call to malloc: 465 /// 1. Compute the malloc call's argument as the specified type's size, 466 /// possibly multiplied by the array size if the array size is not 467 /// constant 1. 468 /// 2. Call malloc with that argument. 469 /// 3. Bitcast the result of the malloc call to the specified type. 470 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore, 471 Type *IntPtrTy, Type *AllocTy, 472 Value *AllocSize, Value *ArraySize, 473 Function * MallocF, 474 const Twine &Name) { 475 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize, 476 ArraySize, MallocF, Name); 477 } 478 479 /// CreateMalloc - Generate the IR for a call to malloc: 480 /// 1. Compute the malloc call's argument as the specified type's size, 481 /// possibly multiplied by the array size if the array size is not 482 /// constant 1. 483 /// 2. Call malloc with that argument. 484 /// 3. Bitcast the result of the malloc call to the specified type. 485 /// Note: This function does not add the bitcast to the basic block, that is the 486 /// responsibility of the caller. 487 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd, 488 Type *IntPtrTy, Type *AllocTy, 489 Value *AllocSize, Value *ArraySize, 490 Function *MallocF, const Twine &Name) { 491 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize, 492 ArraySize, MallocF, Name); 493 } 494 495 static Instruction* createFree(Value* Source, Instruction *InsertBefore, 496 BasicBlock *InsertAtEnd) { 497 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) && 498 "createFree needs either InsertBefore or InsertAtEnd"); 499 assert(Source->getType()->isPointerTy() && 500 "Can not free something of nonpointer type!"); 501 502 BasicBlock* BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd; 503 Module* M = BB->getParent()->getParent(); 504 505 Type *VoidTy = Type::getVoidTy(M->getContext()); 506 Type *IntPtrTy = Type::getInt8PtrTy(M->getContext()); 507 // prototype free as "void free(void*)" 508 Value *FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy, nullptr); 509 CallInst* Result = nullptr; 510 Value *PtrCast = Source; 511 if (InsertBefore) { 512 if (Source->getType() != IntPtrTy) 513 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore); 514 Result = CallInst::Create(FreeFunc, PtrCast, "", InsertBefore); 515 } else { 516 if (Source->getType() != IntPtrTy) 517 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd); 518 Result = CallInst::Create(FreeFunc, PtrCast, ""); 519 } 520 Result->setTailCall(); 521 if (Function *F = dyn_cast<Function>(FreeFunc)) 522 Result->setCallingConv(F->getCallingConv()); 523 524 return Result; 525 } 526 527 /// CreateFree - Generate the IR for a call to the builtin free function. 528 Instruction * CallInst::CreateFree(Value* Source, Instruction *InsertBefore) { 529 return createFree(Source, InsertBefore, nullptr); 530 } 531 532 /// CreateFree - Generate the IR for a call to the builtin free function. 533 /// Note: This function does not add the call to the basic block, that is the 534 /// responsibility of the caller. 535 Instruction* CallInst::CreateFree(Value* Source, BasicBlock *InsertAtEnd) { 536 Instruction* FreeCall = createFree(Source, nullptr, InsertAtEnd); 537 assert(FreeCall && "CreateFree did not create a CallInst"); 538 return FreeCall; 539 } 540 541 //===----------------------------------------------------------------------===// 542 // InvokeInst Implementation 543 //===----------------------------------------------------------------------===// 544 545 void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal, 546 BasicBlock *IfException, ArrayRef<Value *> Args, 547 ArrayRef<OperandBundleDef> Bundles, 548 const Twine &NameStr) { 549 this->FTy = FTy; 550 551 assert(getNumOperands() == 3 + Args.size() + CountBundleInputs(Bundles) && 552 "NumOperands not set up?"); 553 Op<-3>() = Fn; 554 Op<-2>() = IfNormal; 555 Op<-1>() = IfException; 556 557 #ifndef NDEBUG 558 assert(((Args.size() == FTy->getNumParams()) || 559 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 560 "Invoking a function with bad signature"); 561 562 for (unsigned i = 0, e = Args.size(); i != e; i++) 563 assert((i >= FTy->getNumParams() || 564 FTy->getParamType(i) == Args[i]->getType()) && 565 "Invoking a function with a bad signature!"); 566 #endif 567 568 std::copy(Args.begin(), Args.end(), op_begin()); 569 570 auto It = populateBundleOperandInfos(Bundles, Args.size()); 571 (void)It; 572 assert(It + 3 == op_end() && "Should add up!"); 573 574 setName(NameStr); 575 } 576 577 InvokeInst::InvokeInst(const InvokeInst &II) 578 : TerminatorInst(II.getType(), Instruction::Invoke, 579 OperandTraits<InvokeInst>::op_end(this) - 580 II.getNumOperands(), 581 II.getNumOperands()), 582 AttributeList(II.AttributeList), FTy(II.FTy) { 583 setCallingConv(II.getCallingConv()); 584 std::copy(II.op_begin(), II.op_end(), op_begin()); 585 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(), 586 bundle_op_info_begin()); 587 SubclassOptionalData = II.SubclassOptionalData; 588 } 589 590 InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB, 591 Instruction *InsertPt) { 592 std::vector<Value *> Args(II->arg_begin(), II->arg_end()); 593 594 auto *NewII = InvokeInst::Create(II->getCalledValue(), II->getNormalDest(), 595 II->getUnwindDest(), Args, OpB, 596 II->getName(), InsertPt); 597 NewII->setCallingConv(II->getCallingConv()); 598 NewII->SubclassOptionalData = II->SubclassOptionalData; 599 NewII->setAttributes(II->getAttributes()); 600 NewII->setDebugLoc(II->getDebugLoc()); 601 return NewII; 602 } 603 604 BasicBlock *InvokeInst::getSuccessorV(unsigned idx) const { 605 return getSuccessor(idx); 606 } 607 unsigned InvokeInst::getNumSuccessorsV() const { 608 return getNumSuccessors(); 609 } 610 void InvokeInst::setSuccessorV(unsigned idx, BasicBlock *B) { 611 return setSuccessor(idx, B); 612 } 613 614 bool InvokeInst::paramHasAttr(unsigned i, Attribute::AttrKind A) const { 615 assert(i < (getNumArgOperands() + 1) && "Param index out of bounds!"); 616 617 if (AttributeList.hasAttribute(i, A)) 618 return true; 619 if (const Function *F = getCalledFunction()) 620 return F->getAttributes().hasAttribute(i, A); 621 return false; 622 } 623 624 bool InvokeInst::dataOperandHasImpliedAttr(unsigned i, 625 Attribute::AttrKind A) const { 626 // There are getNumOperands() - 3 data operands. The last three operands are 627 // the callee and the two successor basic blocks. 628 assert(i < (getNumOperands() - 2) && "Data operand index out of bounds!"); 629 630 // The attribute A can either be directly specified, if the operand in 631 // question is an invoke argument; or be indirectly implied by the kind of its 632 // containing operand bundle, if the operand is a bundle operand. 633 634 if (i < (getNumArgOperands() + 1)) 635 return paramHasAttr(i, A); 636 637 assert(hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) && 638 "Must be either an invoke argument or an operand bundle!"); 639 return bundleOperandHasAttr(i - 1, A); 640 } 641 642 void InvokeInst::addAttribute(unsigned i, Attribute::AttrKind attr) { 643 AttributeSet PAL = getAttributes(); 644 PAL = PAL.addAttribute(getContext(), i, attr); 645 setAttributes(PAL); 646 } 647 648 void InvokeInst::removeAttribute(unsigned i, Attribute attr) { 649 AttributeSet PAL = getAttributes(); 650 AttrBuilder B(attr); 651 PAL = PAL.removeAttributes(getContext(), i, 652 AttributeSet::get(getContext(), i, B)); 653 setAttributes(PAL); 654 } 655 656 void InvokeInst::addDereferenceableAttr(unsigned i, uint64_t Bytes) { 657 AttributeSet PAL = getAttributes(); 658 PAL = PAL.addDereferenceableAttr(getContext(), i, Bytes); 659 setAttributes(PAL); 660 } 661 662 void InvokeInst::addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes) { 663 AttributeSet PAL = getAttributes(); 664 PAL = PAL.addDereferenceableOrNullAttr(getContext(), i, Bytes); 665 setAttributes(PAL); 666 } 667 668 LandingPadInst *InvokeInst::getLandingPadInst() const { 669 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI()); 670 } 671 672 //===----------------------------------------------------------------------===// 673 // ReturnInst Implementation 674 //===----------------------------------------------------------------------===// 675 676 ReturnInst::ReturnInst(const ReturnInst &RI) 677 : TerminatorInst(Type::getVoidTy(RI.getContext()), Instruction::Ret, 678 OperandTraits<ReturnInst>::op_end(this) - 679 RI.getNumOperands(), 680 RI.getNumOperands()) { 681 if (RI.getNumOperands()) 682 Op<0>() = RI.Op<0>(); 683 SubclassOptionalData = RI.SubclassOptionalData; 684 } 685 686 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore) 687 : TerminatorInst(Type::getVoidTy(C), Instruction::Ret, 688 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal, 689 InsertBefore) { 690 if (retVal) 691 Op<0>() = retVal; 692 } 693 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd) 694 : TerminatorInst(Type::getVoidTy(C), Instruction::Ret, 695 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal, 696 InsertAtEnd) { 697 if (retVal) 698 Op<0>() = retVal; 699 } 700 ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd) 701 : TerminatorInst(Type::getVoidTy(Context), Instruction::Ret, 702 OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) { 703 } 704 705 unsigned ReturnInst::getNumSuccessorsV() const { 706 return getNumSuccessors(); 707 } 708 709 /// Out-of-line ReturnInst method, put here so the C++ compiler can choose to 710 /// emit the vtable for the class in this translation unit. 711 void ReturnInst::setSuccessorV(unsigned idx, BasicBlock *NewSucc) { 712 llvm_unreachable("ReturnInst has no successors!"); 713 } 714 715 BasicBlock *ReturnInst::getSuccessorV(unsigned idx) const { 716 llvm_unreachable("ReturnInst has no successors!"); 717 } 718 719 ReturnInst::~ReturnInst() { 720 } 721 722 //===----------------------------------------------------------------------===// 723 // ResumeInst Implementation 724 //===----------------------------------------------------------------------===// 725 726 ResumeInst::ResumeInst(const ResumeInst &RI) 727 : TerminatorInst(Type::getVoidTy(RI.getContext()), Instruction::Resume, 728 OperandTraits<ResumeInst>::op_begin(this), 1) { 729 Op<0>() = RI.Op<0>(); 730 } 731 732 ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore) 733 : TerminatorInst(Type::getVoidTy(Exn->getContext()), Instruction::Resume, 734 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) { 735 Op<0>() = Exn; 736 } 737 738 ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd) 739 : TerminatorInst(Type::getVoidTy(Exn->getContext()), Instruction::Resume, 740 OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) { 741 Op<0>() = Exn; 742 } 743 744 unsigned ResumeInst::getNumSuccessorsV() const { 745 return getNumSuccessors(); 746 } 747 748 void ResumeInst::setSuccessorV(unsigned idx, BasicBlock *NewSucc) { 749 llvm_unreachable("ResumeInst has no successors!"); 750 } 751 752 BasicBlock *ResumeInst::getSuccessorV(unsigned idx) const { 753 llvm_unreachable("ResumeInst has no successors!"); 754 } 755 756 //===----------------------------------------------------------------------===// 757 // CleanupReturnInst Implementation 758 //===----------------------------------------------------------------------===// 759 760 CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI) 761 : TerminatorInst(CRI.getType(), Instruction::CleanupRet, 762 OperandTraits<CleanupReturnInst>::op_end(this) - 763 CRI.getNumOperands(), 764 CRI.getNumOperands()) { 765 setInstructionSubclassData(CRI.getSubclassDataFromInstruction()); 766 Op<0>() = CRI.Op<0>(); 767 if (CRI.hasUnwindDest()) 768 Op<1>() = CRI.Op<1>(); 769 } 770 771 void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) { 772 if (UnwindBB) 773 setInstructionSubclassData(getSubclassDataFromInstruction() | 1); 774 775 Op<0>() = CleanupPad; 776 if (UnwindBB) 777 Op<1>() = UnwindBB; 778 } 779 780 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, 781 unsigned Values, Instruction *InsertBefore) 782 : TerminatorInst(Type::getVoidTy(CleanupPad->getContext()), 783 Instruction::CleanupRet, 784 OperandTraits<CleanupReturnInst>::op_end(this) - Values, 785 Values, InsertBefore) { 786 init(CleanupPad, UnwindBB); 787 } 788 789 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, 790 unsigned Values, BasicBlock *InsertAtEnd) 791 : TerminatorInst(Type::getVoidTy(CleanupPad->getContext()), 792 Instruction::CleanupRet, 793 OperandTraits<CleanupReturnInst>::op_end(this) - Values, 794 Values, InsertAtEnd) { 795 init(CleanupPad, UnwindBB); 796 } 797 798 BasicBlock *CleanupReturnInst::getSuccessorV(unsigned Idx) const { 799 assert(Idx == 0); 800 return getUnwindDest(); 801 } 802 unsigned CleanupReturnInst::getNumSuccessorsV() const { 803 return getNumSuccessors(); 804 } 805 void CleanupReturnInst::setSuccessorV(unsigned Idx, BasicBlock *B) { 806 assert(Idx == 0); 807 setUnwindDest(B); 808 } 809 810 //===----------------------------------------------------------------------===// 811 // CatchReturnInst Implementation 812 //===----------------------------------------------------------------------===// 813 void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) { 814 Op<0>() = CatchPad; 815 Op<1>() = BB; 816 } 817 818 CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI) 819 : TerminatorInst(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet, 820 OperandTraits<CatchReturnInst>::op_begin(this), 2) { 821 Op<0>() = CRI.Op<0>(); 822 Op<1>() = CRI.Op<1>(); 823 } 824 825 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB, 826 Instruction *InsertBefore) 827 : TerminatorInst(Type::getVoidTy(BB->getContext()), Instruction::CatchRet, 828 OperandTraits<CatchReturnInst>::op_begin(this), 2, 829 InsertBefore) { 830 init(CatchPad, BB); 831 } 832 833 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB, 834 BasicBlock *InsertAtEnd) 835 : TerminatorInst(Type::getVoidTy(BB->getContext()), Instruction::CatchRet, 836 OperandTraits<CatchReturnInst>::op_begin(this), 2, 837 InsertAtEnd) { 838 init(CatchPad, BB); 839 } 840 841 BasicBlock *CatchReturnInst::getSuccessorV(unsigned Idx) const { 842 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!"); 843 return getSuccessor(); 844 } 845 unsigned CatchReturnInst::getNumSuccessorsV() const { 846 return getNumSuccessors(); 847 } 848 void CatchReturnInst::setSuccessorV(unsigned Idx, BasicBlock *B) { 849 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!"); 850 setSuccessor(B); 851 } 852 853 //===----------------------------------------------------------------------===// 854 // CatchSwitchInst Implementation 855 //===----------------------------------------------------------------------===// 856 857 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 858 unsigned NumReservedValues, 859 const Twine &NameStr, 860 Instruction *InsertBefore) 861 : TerminatorInst(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0, 862 InsertBefore) { 863 if (UnwindDest) 864 ++NumReservedValues; 865 init(ParentPad, UnwindDest, NumReservedValues + 1); 866 setName(NameStr); 867 } 868 869 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 870 unsigned NumReservedValues, 871 const Twine &NameStr, BasicBlock *InsertAtEnd) 872 : TerminatorInst(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0, 873 InsertAtEnd) { 874 if (UnwindDest) 875 ++NumReservedValues; 876 init(ParentPad, UnwindDest, NumReservedValues + 1); 877 setName(NameStr); 878 } 879 880 CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI) 881 : TerminatorInst(CSI.getType(), Instruction::CatchSwitch, nullptr, 882 CSI.getNumOperands()) { 883 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands()); 884 setNumHungOffUseOperands(ReservedSpace); 885 Use *OL = getOperandList(); 886 const Use *InOL = CSI.getOperandList(); 887 for (unsigned I = 1, E = ReservedSpace; I != E; ++I) 888 OL[I] = InOL[I]; 889 } 890 891 void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest, 892 unsigned NumReservedValues) { 893 assert(ParentPad && NumReservedValues); 894 895 ReservedSpace = NumReservedValues; 896 setNumHungOffUseOperands(UnwindDest ? 2 : 1); 897 allocHungoffUses(ReservedSpace); 898 899 Op<0>() = ParentPad; 900 if (UnwindDest) { 901 setInstructionSubclassData(getSubclassDataFromInstruction() | 1); 902 setUnwindDest(UnwindDest); 903 } 904 } 905 906 /// growOperands - grow operands - This grows the operand list in response to a 907 /// push_back style of operation. This grows the number of ops by 2 times. 908 void CatchSwitchInst::growOperands(unsigned Size) { 909 unsigned NumOperands = getNumOperands(); 910 assert(NumOperands >= 1); 911 if (ReservedSpace >= NumOperands + Size) 912 return; 913 ReservedSpace = (NumOperands + Size / 2) * 2; 914 growHungoffUses(ReservedSpace); 915 } 916 917 void CatchSwitchInst::addHandler(BasicBlock *Handler) { 918 unsigned OpNo = getNumOperands(); 919 growOperands(1); 920 assert(OpNo < ReservedSpace && "Growing didn't work!"); 921 setNumHungOffUseOperands(getNumOperands() + 1); 922 getOperandList()[OpNo] = Handler; 923 } 924 925 void CatchSwitchInst::removeHandler(handler_iterator HI) { 926 // Move all subsequent handlers up one. 927 Use *EndDst = op_end() - 1; 928 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst) 929 *CurDst = *(CurDst + 1); 930 // Null out the last handler use. 931 *EndDst = nullptr; 932 933 setNumHungOffUseOperands(getNumOperands() - 1); 934 } 935 936 BasicBlock *CatchSwitchInst::getSuccessorV(unsigned idx) const { 937 return getSuccessor(idx); 938 } 939 unsigned CatchSwitchInst::getNumSuccessorsV() const { 940 return getNumSuccessors(); 941 } 942 void CatchSwitchInst::setSuccessorV(unsigned idx, BasicBlock *B) { 943 setSuccessor(idx, B); 944 } 945 946 //===----------------------------------------------------------------------===// 947 // FuncletPadInst Implementation 948 //===----------------------------------------------------------------------===// 949 void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args, 950 const Twine &NameStr) { 951 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?"); 952 std::copy(Args.begin(), Args.end(), op_begin()); 953 setParentPad(ParentPad); 954 setName(NameStr); 955 } 956 957 FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI) 958 : Instruction(FPI.getType(), FPI.getOpcode(), 959 OperandTraits<FuncletPadInst>::op_end(this) - 960 FPI.getNumOperands(), 961 FPI.getNumOperands()) { 962 std::copy(FPI.op_begin(), FPI.op_end(), op_begin()); 963 setParentPad(FPI.getParentPad()); 964 } 965 966 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad, 967 ArrayRef<Value *> Args, unsigned Values, 968 const Twine &NameStr, Instruction *InsertBefore) 969 : Instruction(ParentPad->getType(), Op, 970 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values, 971 InsertBefore) { 972 init(ParentPad, Args, NameStr); 973 } 974 975 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad, 976 ArrayRef<Value *> Args, unsigned Values, 977 const Twine &NameStr, BasicBlock *InsertAtEnd) 978 : Instruction(ParentPad->getType(), Op, 979 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values, 980 InsertAtEnd) { 981 init(ParentPad, Args, NameStr); 982 } 983 984 //===----------------------------------------------------------------------===// 985 // UnreachableInst Implementation 986 //===----------------------------------------------------------------------===// 987 988 UnreachableInst::UnreachableInst(LLVMContext &Context, 989 Instruction *InsertBefore) 990 : TerminatorInst(Type::getVoidTy(Context), Instruction::Unreachable, 991 nullptr, 0, InsertBefore) { 992 } 993 UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd) 994 : TerminatorInst(Type::getVoidTy(Context), Instruction::Unreachable, 995 nullptr, 0, InsertAtEnd) { 996 } 997 998 unsigned UnreachableInst::getNumSuccessorsV() const { 999 return getNumSuccessors(); 1000 } 1001 1002 void UnreachableInst::setSuccessorV(unsigned idx, BasicBlock *NewSucc) { 1003 llvm_unreachable("UnreachableInst has no successors!"); 1004 } 1005 1006 BasicBlock *UnreachableInst::getSuccessorV(unsigned idx) const { 1007 llvm_unreachable("UnreachableInst has no successors!"); 1008 } 1009 1010 //===----------------------------------------------------------------------===// 1011 // BranchInst Implementation 1012 //===----------------------------------------------------------------------===// 1013 1014 void BranchInst::AssertOK() { 1015 if (isConditional()) 1016 assert(getCondition()->getType()->isIntegerTy(1) && 1017 "May only branch on boolean predicates!"); 1018 } 1019 1020 BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore) 1021 : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 1022 OperandTraits<BranchInst>::op_end(this) - 1, 1023 1, InsertBefore) { 1024 assert(IfTrue && "Branch destination may not be null!"); 1025 Op<-1>() = IfTrue; 1026 } 1027 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 1028 Instruction *InsertBefore) 1029 : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 1030 OperandTraits<BranchInst>::op_end(this) - 3, 1031 3, InsertBefore) { 1032 Op<-1>() = IfTrue; 1033 Op<-2>() = IfFalse; 1034 Op<-3>() = Cond; 1035 #ifndef NDEBUG 1036 AssertOK(); 1037 #endif 1038 } 1039 1040 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) 1041 : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 1042 OperandTraits<BranchInst>::op_end(this) - 1, 1043 1, InsertAtEnd) { 1044 assert(IfTrue && "Branch destination may not be null!"); 1045 Op<-1>() = IfTrue; 1046 } 1047 1048 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 1049 BasicBlock *InsertAtEnd) 1050 : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 1051 OperandTraits<BranchInst>::op_end(this) - 3, 1052 3, InsertAtEnd) { 1053 Op<-1>() = IfTrue; 1054 Op<-2>() = IfFalse; 1055 Op<-3>() = Cond; 1056 #ifndef NDEBUG 1057 AssertOK(); 1058 #endif 1059 } 1060 1061 1062 BranchInst::BranchInst(const BranchInst &BI) : 1063 TerminatorInst(Type::getVoidTy(BI.getContext()), Instruction::Br, 1064 OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(), 1065 BI.getNumOperands()) { 1066 Op<-1>() = BI.Op<-1>(); 1067 if (BI.getNumOperands() != 1) { 1068 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!"); 1069 Op<-3>() = BI.Op<-3>(); 1070 Op<-2>() = BI.Op<-2>(); 1071 } 1072 SubclassOptionalData = BI.SubclassOptionalData; 1073 } 1074 1075 void BranchInst::swapSuccessors() { 1076 assert(isConditional() && 1077 "Cannot swap successors of an unconditional branch"); 1078 Op<-1>().swap(Op<-2>()); 1079 1080 // Update profile metadata if present and it matches our structural 1081 // expectations. 1082 MDNode *ProfileData = getMetadata(LLVMContext::MD_prof); 1083 if (!ProfileData || ProfileData->getNumOperands() != 3) 1084 return; 1085 1086 // The first operand is the name. Fetch them backwards and build a new one. 1087 Metadata *Ops[] = {ProfileData->getOperand(0), ProfileData->getOperand(2), 1088 ProfileData->getOperand(1)}; 1089 setMetadata(LLVMContext::MD_prof, 1090 MDNode::get(ProfileData->getContext(), Ops)); 1091 } 1092 1093 BasicBlock *BranchInst::getSuccessorV(unsigned idx) const { 1094 return getSuccessor(idx); 1095 } 1096 unsigned BranchInst::getNumSuccessorsV() const { 1097 return getNumSuccessors(); 1098 } 1099 void BranchInst::setSuccessorV(unsigned idx, BasicBlock *B) { 1100 setSuccessor(idx, B); 1101 } 1102 1103 1104 //===----------------------------------------------------------------------===// 1105 // AllocaInst Implementation 1106 //===----------------------------------------------------------------------===// 1107 1108 static Value *getAISize(LLVMContext &Context, Value *Amt) { 1109 if (!Amt) 1110 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1); 1111 else { 1112 assert(!isa<BasicBlock>(Amt) && 1113 "Passed basic block into allocation size parameter! Use other ctor"); 1114 assert(Amt->getType()->isIntegerTy() && 1115 "Allocation array size is not an integer!"); 1116 } 1117 return Amt; 1118 } 1119 1120 AllocaInst::AllocaInst(Type *Ty, const Twine &Name, Instruction *InsertBefore) 1121 : AllocaInst(Ty, /*ArraySize=*/nullptr, Name, InsertBefore) {} 1122 1123 AllocaInst::AllocaInst(Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd) 1124 : AllocaInst(Ty, /*ArraySize=*/nullptr, Name, InsertAtEnd) {} 1125 1126 AllocaInst::AllocaInst(Type *Ty, Value *ArraySize, const Twine &Name, 1127 Instruction *InsertBefore) 1128 : AllocaInst(Ty, ArraySize, /*Align=*/0, Name, InsertBefore) {} 1129 1130 AllocaInst::AllocaInst(Type *Ty, Value *ArraySize, const Twine &Name, 1131 BasicBlock *InsertAtEnd) 1132 : AllocaInst(Ty, ArraySize, /*Align=*/0, Name, InsertAtEnd) {} 1133 1134 AllocaInst::AllocaInst(Type *Ty, Value *ArraySize, unsigned Align, 1135 const Twine &Name, Instruction *InsertBefore) 1136 : UnaryInstruction(PointerType::getUnqual(Ty), Alloca, 1137 getAISize(Ty->getContext(), ArraySize), InsertBefore), 1138 AllocatedType(Ty) { 1139 setAlignment(Align); 1140 assert(!Ty->isVoidTy() && "Cannot allocate void!"); 1141 setName(Name); 1142 } 1143 1144 AllocaInst::AllocaInst(Type *Ty, Value *ArraySize, unsigned Align, 1145 const Twine &Name, BasicBlock *InsertAtEnd) 1146 : UnaryInstruction(PointerType::getUnqual(Ty), Alloca, 1147 getAISize(Ty->getContext(), ArraySize), InsertAtEnd), 1148 AllocatedType(Ty) { 1149 setAlignment(Align); 1150 assert(!Ty->isVoidTy() && "Cannot allocate void!"); 1151 setName(Name); 1152 } 1153 1154 // Out of line virtual method, so the vtable, etc has a home. 1155 AllocaInst::~AllocaInst() { 1156 } 1157 1158 void AllocaInst::setAlignment(unsigned Align) { 1159 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); 1160 assert(Align <= MaximumAlignment && 1161 "Alignment is greater than MaximumAlignment!"); 1162 setInstructionSubclassData((getSubclassDataFromInstruction() & ~31) | 1163 (Log2_32(Align) + 1)); 1164 assert(getAlignment() == Align && "Alignment representation error!"); 1165 } 1166 1167 bool AllocaInst::isArrayAllocation() const { 1168 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0))) 1169 return !CI->isOne(); 1170 return true; 1171 } 1172 1173 /// isStaticAlloca - Return true if this alloca is in the entry block of the 1174 /// function and is a constant size. If so, the code generator will fold it 1175 /// into the prolog/epilog code, so it is basically free. 1176 bool AllocaInst::isStaticAlloca() const { 1177 // Must be constant size. 1178 if (!isa<ConstantInt>(getArraySize())) return false; 1179 1180 // Must be in the entry block. 1181 const BasicBlock *Parent = getParent(); 1182 return Parent == &Parent->getParent()->front() && !isUsedWithInAlloca(); 1183 } 1184 1185 //===----------------------------------------------------------------------===// 1186 // LoadInst Implementation 1187 //===----------------------------------------------------------------------===// 1188 1189 void LoadInst::AssertOK() { 1190 assert(getOperand(0)->getType()->isPointerTy() && 1191 "Ptr must have pointer type."); 1192 assert(!(isAtomic() && getAlignment() == 0) && 1193 "Alignment required for atomic load"); 1194 } 1195 1196 LoadInst::LoadInst(Value *Ptr, const Twine &Name, Instruction *InsertBef) 1197 : LoadInst(Ptr, Name, /*isVolatile=*/false, InsertBef) {} 1198 1199 LoadInst::LoadInst(Value *Ptr, const Twine &Name, BasicBlock *InsertAE) 1200 : LoadInst(Ptr, Name, /*isVolatile=*/false, InsertAE) {} 1201 1202 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1203 Instruction *InsertBef) 1204 : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/0, InsertBef) {} 1205 1206 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, 1207 BasicBlock *InsertAE) 1208 : LoadInst(Ptr, Name, isVolatile, /*Align=*/0, InsertAE) {} 1209 1210 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1211 unsigned Align, Instruction *InsertBef) 1212 : LoadInst(Ty, Ptr, Name, isVolatile, Align, NotAtomic, CrossThread, 1213 InsertBef) {} 1214 1215 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, 1216 unsigned Align, BasicBlock *InsertAE) 1217 : LoadInst(Ptr, Name, isVolatile, Align, NotAtomic, CrossThread, InsertAE) { 1218 } 1219 1220 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1221 unsigned Align, AtomicOrdering Order, 1222 SynchronizationScope SynchScope, Instruction *InsertBef) 1223 : UnaryInstruction(Ty, Load, Ptr, InsertBef) { 1224 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType()); 1225 setVolatile(isVolatile); 1226 setAlignment(Align); 1227 setAtomic(Order, SynchScope); 1228 AssertOK(); 1229 setName(Name); 1230 } 1231 1232 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, 1233 unsigned Align, AtomicOrdering Order, 1234 SynchronizationScope SynchScope, 1235 BasicBlock *InsertAE) 1236 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), 1237 Load, Ptr, InsertAE) { 1238 setVolatile(isVolatile); 1239 setAlignment(Align); 1240 setAtomic(Order, SynchScope); 1241 AssertOK(); 1242 setName(Name); 1243 } 1244 1245 LoadInst::LoadInst(Value *Ptr, const char *Name, Instruction *InsertBef) 1246 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), 1247 Load, Ptr, InsertBef) { 1248 setVolatile(false); 1249 setAlignment(0); 1250 setAtomic(NotAtomic); 1251 AssertOK(); 1252 if (Name && Name[0]) setName(Name); 1253 } 1254 1255 LoadInst::LoadInst(Value *Ptr, const char *Name, BasicBlock *InsertAE) 1256 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), 1257 Load, Ptr, InsertAE) { 1258 setVolatile(false); 1259 setAlignment(0); 1260 setAtomic(NotAtomic); 1261 AssertOK(); 1262 if (Name && Name[0]) setName(Name); 1263 } 1264 1265 LoadInst::LoadInst(Type *Ty, Value *Ptr, const char *Name, bool isVolatile, 1266 Instruction *InsertBef) 1267 : UnaryInstruction(Ty, Load, Ptr, InsertBef) { 1268 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType()); 1269 setVolatile(isVolatile); 1270 setAlignment(0); 1271 setAtomic(NotAtomic); 1272 AssertOK(); 1273 if (Name && Name[0]) setName(Name); 1274 } 1275 1276 LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile, 1277 BasicBlock *InsertAE) 1278 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), 1279 Load, Ptr, InsertAE) { 1280 setVolatile(isVolatile); 1281 setAlignment(0); 1282 setAtomic(NotAtomic); 1283 AssertOK(); 1284 if (Name && Name[0]) setName(Name); 1285 } 1286 1287 void LoadInst::setAlignment(unsigned Align) { 1288 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); 1289 assert(Align <= MaximumAlignment && 1290 "Alignment is greater than MaximumAlignment!"); 1291 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | 1292 ((Log2_32(Align)+1)<<1)); 1293 assert(getAlignment() == Align && "Alignment representation error!"); 1294 } 1295 1296 //===----------------------------------------------------------------------===// 1297 // StoreInst Implementation 1298 //===----------------------------------------------------------------------===// 1299 1300 void StoreInst::AssertOK() { 1301 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!"); 1302 assert(getOperand(1)->getType()->isPointerTy() && 1303 "Ptr must have pointer type!"); 1304 assert(getOperand(0)->getType() == 1305 cast<PointerType>(getOperand(1)->getType())->getElementType() 1306 && "Ptr must be a pointer to Val type!"); 1307 assert(!(isAtomic() && getAlignment() == 0) && 1308 "Alignment required for atomic store"); 1309 } 1310 1311 StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore) 1312 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {} 1313 1314 StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd) 1315 : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {} 1316 1317 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1318 Instruction *InsertBefore) 1319 : StoreInst(val, addr, isVolatile, /*Align=*/0, InsertBefore) {} 1320 1321 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1322 BasicBlock *InsertAtEnd) 1323 : StoreInst(val, addr, isVolatile, /*Align=*/0, InsertAtEnd) {} 1324 1325 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align, 1326 Instruction *InsertBefore) 1327 : StoreInst(val, addr, isVolatile, Align, NotAtomic, CrossThread, 1328 InsertBefore) {} 1329 1330 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align, 1331 BasicBlock *InsertAtEnd) 1332 : StoreInst(val, addr, isVolatile, Align, NotAtomic, CrossThread, 1333 InsertAtEnd) {} 1334 1335 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1336 unsigned Align, AtomicOrdering Order, 1337 SynchronizationScope SynchScope, 1338 Instruction *InsertBefore) 1339 : Instruction(Type::getVoidTy(val->getContext()), Store, 1340 OperandTraits<StoreInst>::op_begin(this), 1341 OperandTraits<StoreInst>::operands(this), 1342 InsertBefore) { 1343 Op<0>() = val; 1344 Op<1>() = addr; 1345 setVolatile(isVolatile); 1346 setAlignment(Align); 1347 setAtomic(Order, SynchScope); 1348 AssertOK(); 1349 } 1350 1351 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1352 unsigned Align, AtomicOrdering Order, 1353 SynchronizationScope SynchScope, 1354 BasicBlock *InsertAtEnd) 1355 : Instruction(Type::getVoidTy(val->getContext()), Store, 1356 OperandTraits<StoreInst>::op_begin(this), 1357 OperandTraits<StoreInst>::operands(this), 1358 InsertAtEnd) { 1359 Op<0>() = val; 1360 Op<1>() = addr; 1361 setVolatile(isVolatile); 1362 setAlignment(Align); 1363 setAtomic(Order, SynchScope); 1364 AssertOK(); 1365 } 1366 1367 void StoreInst::setAlignment(unsigned Align) { 1368 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); 1369 assert(Align <= MaximumAlignment && 1370 "Alignment is greater than MaximumAlignment!"); 1371 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | 1372 ((Log2_32(Align)+1) << 1)); 1373 assert(getAlignment() == Align && "Alignment representation error!"); 1374 } 1375 1376 //===----------------------------------------------------------------------===// 1377 // AtomicCmpXchgInst Implementation 1378 //===----------------------------------------------------------------------===// 1379 1380 void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal, 1381 AtomicOrdering SuccessOrdering, 1382 AtomicOrdering FailureOrdering, 1383 SynchronizationScope SynchScope) { 1384 Op<0>() = Ptr; 1385 Op<1>() = Cmp; 1386 Op<2>() = NewVal; 1387 setSuccessOrdering(SuccessOrdering); 1388 setFailureOrdering(FailureOrdering); 1389 setSynchScope(SynchScope); 1390 1391 assert(getOperand(0) && getOperand(1) && getOperand(2) && 1392 "All operands must be non-null!"); 1393 assert(getOperand(0)->getType()->isPointerTy() && 1394 "Ptr must have pointer type!"); 1395 assert(getOperand(1)->getType() == 1396 cast<PointerType>(getOperand(0)->getType())->getElementType() 1397 && "Ptr must be a pointer to Cmp type!"); 1398 assert(getOperand(2)->getType() == 1399 cast<PointerType>(getOperand(0)->getType())->getElementType() 1400 && "Ptr must be a pointer to NewVal type!"); 1401 assert(SuccessOrdering != NotAtomic && 1402 "AtomicCmpXchg instructions must be atomic!"); 1403 assert(FailureOrdering != NotAtomic && 1404 "AtomicCmpXchg instructions must be atomic!"); 1405 assert(SuccessOrdering >= FailureOrdering && 1406 "AtomicCmpXchg success ordering must be at least as strong as fail"); 1407 assert(FailureOrdering != Release && FailureOrdering != AcquireRelease && 1408 "AtomicCmpXchg failure ordering cannot include release semantics"); 1409 } 1410 1411 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, 1412 AtomicOrdering SuccessOrdering, 1413 AtomicOrdering FailureOrdering, 1414 SynchronizationScope SynchScope, 1415 Instruction *InsertBefore) 1416 : Instruction( 1417 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext()), 1418 nullptr), 1419 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this), 1420 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) { 1421 Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SynchScope); 1422 } 1423 1424 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, 1425 AtomicOrdering SuccessOrdering, 1426 AtomicOrdering FailureOrdering, 1427 SynchronizationScope SynchScope, 1428 BasicBlock *InsertAtEnd) 1429 : Instruction( 1430 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext()), 1431 nullptr), 1432 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this), 1433 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) { 1434 Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SynchScope); 1435 } 1436 1437 //===----------------------------------------------------------------------===// 1438 // AtomicRMWInst Implementation 1439 //===----------------------------------------------------------------------===// 1440 1441 void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val, 1442 AtomicOrdering Ordering, 1443 SynchronizationScope SynchScope) { 1444 Op<0>() = Ptr; 1445 Op<1>() = Val; 1446 setOperation(Operation); 1447 setOrdering(Ordering); 1448 setSynchScope(SynchScope); 1449 1450 assert(getOperand(0) && getOperand(1) && 1451 "All operands must be non-null!"); 1452 assert(getOperand(0)->getType()->isPointerTy() && 1453 "Ptr must have pointer type!"); 1454 assert(getOperand(1)->getType() == 1455 cast<PointerType>(getOperand(0)->getType())->getElementType() 1456 && "Ptr must be a pointer to Val type!"); 1457 assert(Ordering != NotAtomic && 1458 "AtomicRMW instructions must be atomic!"); 1459 } 1460 1461 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, 1462 AtomicOrdering Ordering, 1463 SynchronizationScope SynchScope, 1464 Instruction *InsertBefore) 1465 : Instruction(Val->getType(), AtomicRMW, 1466 OperandTraits<AtomicRMWInst>::op_begin(this), 1467 OperandTraits<AtomicRMWInst>::operands(this), 1468 InsertBefore) { 1469 Init(Operation, Ptr, Val, Ordering, SynchScope); 1470 } 1471 1472 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, 1473 AtomicOrdering Ordering, 1474 SynchronizationScope SynchScope, 1475 BasicBlock *InsertAtEnd) 1476 : Instruction(Val->getType(), AtomicRMW, 1477 OperandTraits<AtomicRMWInst>::op_begin(this), 1478 OperandTraits<AtomicRMWInst>::operands(this), 1479 InsertAtEnd) { 1480 Init(Operation, Ptr, Val, Ordering, SynchScope); 1481 } 1482 1483 //===----------------------------------------------------------------------===// 1484 // FenceInst Implementation 1485 //===----------------------------------------------------------------------===// 1486 1487 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, 1488 SynchronizationScope SynchScope, 1489 Instruction *InsertBefore) 1490 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) { 1491 setOrdering(Ordering); 1492 setSynchScope(SynchScope); 1493 } 1494 1495 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, 1496 SynchronizationScope SynchScope, 1497 BasicBlock *InsertAtEnd) 1498 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) { 1499 setOrdering(Ordering); 1500 setSynchScope(SynchScope); 1501 } 1502 1503 //===----------------------------------------------------------------------===// 1504 // GetElementPtrInst Implementation 1505 //===----------------------------------------------------------------------===// 1506 1507 void GetElementPtrInst::anchor() {} 1508 1509 void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList, 1510 const Twine &Name) { 1511 assert(getNumOperands() == 1 + IdxList.size() && 1512 "NumOperands not initialized?"); 1513 Op<0>() = Ptr; 1514 std::copy(IdxList.begin(), IdxList.end(), op_begin() + 1); 1515 setName(Name); 1516 } 1517 1518 GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI) 1519 : Instruction(GEPI.getType(), GetElementPtr, 1520 OperandTraits<GetElementPtrInst>::op_end(this) - 1521 GEPI.getNumOperands(), 1522 GEPI.getNumOperands()), 1523 SourceElementType(GEPI.SourceElementType), 1524 ResultElementType(GEPI.ResultElementType) { 1525 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin()); 1526 SubclassOptionalData = GEPI.SubclassOptionalData; 1527 } 1528 1529 /// getIndexedType - Returns the type of the element that would be accessed with 1530 /// a gep instruction with the specified parameters. 1531 /// 1532 /// The Idxs pointer should point to a continuous piece of memory containing the 1533 /// indices, either as Value* or uint64_t. 1534 /// 1535 /// A null type is returned if the indices are invalid for the specified 1536 /// pointer type. 1537 /// 1538 template <typename IndexTy> 1539 static Type *getIndexedTypeInternal(Type *Agg, ArrayRef<IndexTy> IdxList) { 1540 // Handle the special case of the empty set index set, which is always valid. 1541 if (IdxList.empty()) 1542 return Agg; 1543 1544 // If there is at least one index, the top level type must be sized, otherwise 1545 // it cannot be 'stepped over'. 1546 if (!Agg->isSized()) 1547 return nullptr; 1548 1549 unsigned CurIdx = 1; 1550 for (; CurIdx != IdxList.size(); ++CurIdx) { 1551 CompositeType *CT = dyn_cast<CompositeType>(Agg); 1552 if (!CT || CT->isPointerTy()) return nullptr; 1553 IndexTy Index = IdxList[CurIdx]; 1554 if (!CT->indexValid(Index)) return nullptr; 1555 Agg = CT->getTypeAtIndex(Index); 1556 } 1557 return CurIdx == IdxList.size() ? Agg : nullptr; 1558 } 1559 1560 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) { 1561 return getIndexedTypeInternal(Ty, IdxList); 1562 } 1563 1564 Type *GetElementPtrInst::getIndexedType(Type *Ty, 1565 ArrayRef<Constant *> IdxList) { 1566 return getIndexedTypeInternal(Ty, IdxList); 1567 } 1568 1569 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) { 1570 return getIndexedTypeInternal(Ty, IdxList); 1571 } 1572 1573 /// hasAllZeroIndices - Return true if all of the indices of this GEP are 1574 /// zeros. If so, the result pointer and the first operand have the same 1575 /// value, just potentially different types. 1576 bool GetElementPtrInst::hasAllZeroIndices() const { 1577 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1578 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) { 1579 if (!CI->isZero()) return false; 1580 } else { 1581 return false; 1582 } 1583 } 1584 return true; 1585 } 1586 1587 /// hasAllConstantIndices - Return true if all of the indices of this GEP are 1588 /// constant integers. If so, the result pointer and the first operand have 1589 /// a constant offset between them. 1590 bool GetElementPtrInst::hasAllConstantIndices() const { 1591 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1592 if (!isa<ConstantInt>(getOperand(i))) 1593 return false; 1594 } 1595 return true; 1596 } 1597 1598 void GetElementPtrInst::setIsInBounds(bool B) { 1599 cast<GEPOperator>(this)->setIsInBounds(B); 1600 } 1601 1602 bool GetElementPtrInst::isInBounds() const { 1603 return cast<GEPOperator>(this)->isInBounds(); 1604 } 1605 1606 bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL, 1607 APInt &Offset) const { 1608 // Delegate to the generic GEPOperator implementation. 1609 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset); 1610 } 1611 1612 //===----------------------------------------------------------------------===// 1613 // ExtractElementInst Implementation 1614 //===----------------------------------------------------------------------===// 1615 1616 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index, 1617 const Twine &Name, 1618 Instruction *InsertBef) 1619 : Instruction(cast<VectorType>(Val->getType())->getElementType(), 1620 ExtractElement, 1621 OperandTraits<ExtractElementInst>::op_begin(this), 1622 2, InsertBef) { 1623 assert(isValidOperands(Val, Index) && 1624 "Invalid extractelement instruction operands!"); 1625 Op<0>() = Val; 1626 Op<1>() = Index; 1627 setName(Name); 1628 } 1629 1630 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index, 1631 const Twine &Name, 1632 BasicBlock *InsertAE) 1633 : Instruction(cast<VectorType>(Val->getType())->getElementType(), 1634 ExtractElement, 1635 OperandTraits<ExtractElementInst>::op_begin(this), 1636 2, InsertAE) { 1637 assert(isValidOperands(Val, Index) && 1638 "Invalid extractelement instruction operands!"); 1639 1640 Op<0>() = Val; 1641 Op<1>() = Index; 1642 setName(Name); 1643 } 1644 1645 1646 bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) { 1647 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy()) 1648 return false; 1649 return true; 1650 } 1651 1652 1653 //===----------------------------------------------------------------------===// 1654 // InsertElementInst Implementation 1655 //===----------------------------------------------------------------------===// 1656 1657 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index, 1658 const Twine &Name, 1659 Instruction *InsertBef) 1660 : Instruction(Vec->getType(), InsertElement, 1661 OperandTraits<InsertElementInst>::op_begin(this), 1662 3, InsertBef) { 1663 assert(isValidOperands(Vec, Elt, Index) && 1664 "Invalid insertelement instruction operands!"); 1665 Op<0>() = Vec; 1666 Op<1>() = Elt; 1667 Op<2>() = Index; 1668 setName(Name); 1669 } 1670 1671 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index, 1672 const Twine &Name, 1673 BasicBlock *InsertAE) 1674 : Instruction(Vec->getType(), InsertElement, 1675 OperandTraits<InsertElementInst>::op_begin(this), 1676 3, InsertAE) { 1677 assert(isValidOperands(Vec, Elt, Index) && 1678 "Invalid insertelement instruction operands!"); 1679 1680 Op<0>() = Vec; 1681 Op<1>() = Elt; 1682 Op<2>() = Index; 1683 setName(Name); 1684 } 1685 1686 bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt, 1687 const Value *Index) { 1688 if (!Vec->getType()->isVectorTy()) 1689 return false; // First operand of insertelement must be vector type. 1690 1691 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType()) 1692 return false;// Second operand of insertelement must be vector element type. 1693 1694 if (!Index->getType()->isIntegerTy()) 1695 return false; // Third operand of insertelement must be i32. 1696 return true; 1697 } 1698 1699 1700 //===----------------------------------------------------------------------===// 1701 // ShuffleVectorInst Implementation 1702 //===----------------------------------------------------------------------===// 1703 1704 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 1705 const Twine &Name, 1706 Instruction *InsertBefore) 1707 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(), 1708 cast<VectorType>(Mask->getType())->getNumElements()), 1709 ShuffleVector, 1710 OperandTraits<ShuffleVectorInst>::op_begin(this), 1711 OperandTraits<ShuffleVectorInst>::operands(this), 1712 InsertBefore) { 1713 assert(isValidOperands(V1, V2, Mask) && 1714 "Invalid shuffle vector instruction operands!"); 1715 Op<0>() = V1; 1716 Op<1>() = V2; 1717 Op<2>() = Mask; 1718 setName(Name); 1719 } 1720 1721 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 1722 const Twine &Name, 1723 BasicBlock *InsertAtEnd) 1724 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(), 1725 cast<VectorType>(Mask->getType())->getNumElements()), 1726 ShuffleVector, 1727 OperandTraits<ShuffleVectorInst>::op_begin(this), 1728 OperandTraits<ShuffleVectorInst>::operands(this), 1729 InsertAtEnd) { 1730 assert(isValidOperands(V1, V2, Mask) && 1731 "Invalid shuffle vector instruction operands!"); 1732 1733 Op<0>() = V1; 1734 Op<1>() = V2; 1735 Op<2>() = Mask; 1736 setName(Name); 1737 } 1738 1739 bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2, 1740 const Value *Mask) { 1741 // V1 and V2 must be vectors of the same type. 1742 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType()) 1743 return false; 1744 1745 // Mask must be vector of i32. 1746 VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType()); 1747 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32)) 1748 return false; 1749 1750 // Check to see if Mask is valid. 1751 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask)) 1752 return true; 1753 1754 if (const ConstantVector *MV = dyn_cast<ConstantVector>(Mask)) { 1755 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); 1756 for (Value *Op : MV->operands()) { 1757 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 1758 if (CI->uge(V1Size*2)) 1759 return false; 1760 } else if (!isa<UndefValue>(Op)) { 1761 return false; 1762 } 1763 } 1764 return true; 1765 } 1766 1767 if (const ConstantDataSequential *CDS = 1768 dyn_cast<ConstantDataSequential>(Mask)) { 1769 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); 1770 for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i) 1771 if (CDS->getElementAsInteger(i) >= V1Size*2) 1772 return false; 1773 return true; 1774 } 1775 1776 // The bitcode reader can create a place holder for a forward reference 1777 // used as the shuffle mask. When this occurs, the shuffle mask will 1778 // fall into this case and fail. To avoid this error, do this bit of 1779 // ugliness to allow such a mask pass. 1780 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(Mask)) 1781 if (CE->getOpcode() == Instruction::UserOp1) 1782 return true; 1783 1784 return false; 1785 } 1786 1787 /// getMaskValue - Return the index from the shuffle mask for the specified 1788 /// output result. This is either -1 if the element is undef or a number less 1789 /// than 2*numelements. 1790 int ShuffleVectorInst::getMaskValue(Constant *Mask, unsigned i) { 1791 assert(i < Mask->getType()->getVectorNumElements() && "Index out of range"); 1792 if (ConstantDataSequential *CDS =dyn_cast<ConstantDataSequential>(Mask)) 1793 return CDS->getElementAsInteger(i); 1794 Constant *C = Mask->getAggregateElement(i); 1795 if (isa<UndefValue>(C)) 1796 return -1; 1797 return cast<ConstantInt>(C)->getZExtValue(); 1798 } 1799 1800 /// getShuffleMask - Return the full mask for this instruction, where each 1801 /// element is the element number and undef's are returned as -1. 1802 void ShuffleVectorInst::getShuffleMask(Constant *Mask, 1803 SmallVectorImpl<int> &Result) { 1804 unsigned NumElts = Mask->getType()->getVectorNumElements(); 1805 1806 if (ConstantDataSequential *CDS=dyn_cast<ConstantDataSequential>(Mask)) { 1807 for (unsigned i = 0; i != NumElts; ++i) 1808 Result.push_back(CDS->getElementAsInteger(i)); 1809 return; 1810 } 1811 for (unsigned i = 0; i != NumElts; ++i) { 1812 Constant *C = Mask->getAggregateElement(i); 1813 Result.push_back(isa<UndefValue>(C) ? -1 : 1814 cast<ConstantInt>(C)->getZExtValue()); 1815 } 1816 } 1817 1818 1819 //===----------------------------------------------------------------------===// 1820 // InsertValueInst Class 1821 //===----------------------------------------------------------------------===// 1822 1823 void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, 1824 const Twine &Name) { 1825 assert(getNumOperands() == 2 && "NumOperands not initialized?"); 1826 1827 // There's no fundamental reason why we require at least one index 1828 // (other than weirdness with &*IdxBegin being invalid; see 1829 // getelementptr's init routine for example). But there's no 1830 // present need to support it. 1831 assert(Idxs.size() > 0 && "InsertValueInst must have at least one index"); 1832 1833 assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) == 1834 Val->getType() && "Inserted value must match indexed type!"); 1835 Op<0>() = Agg; 1836 Op<1>() = Val; 1837 1838 Indices.append(Idxs.begin(), Idxs.end()); 1839 setName(Name); 1840 } 1841 1842 InsertValueInst::InsertValueInst(const InsertValueInst &IVI) 1843 : Instruction(IVI.getType(), InsertValue, 1844 OperandTraits<InsertValueInst>::op_begin(this), 2), 1845 Indices(IVI.Indices) { 1846 Op<0>() = IVI.getOperand(0); 1847 Op<1>() = IVI.getOperand(1); 1848 SubclassOptionalData = IVI.SubclassOptionalData; 1849 } 1850 1851 //===----------------------------------------------------------------------===// 1852 // ExtractValueInst Class 1853 //===----------------------------------------------------------------------===// 1854 1855 void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) { 1856 assert(getNumOperands() == 1 && "NumOperands not initialized?"); 1857 1858 // There's no fundamental reason why we require at least one index. 1859 // But there's no present need to support it. 1860 assert(Idxs.size() > 0 && "ExtractValueInst must have at least one index"); 1861 1862 Indices.append(Idxs.begin(), Idxs.end()); 1863 setName(Name); 1864 } 1865 1866 ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI) 1867 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)), 1868 Indices(EVI.Indices) { 1869 SubclassOptionalData = EVI.SubclassOptionalData; 1870 } 1871 1872 // getIndexedType - Returns the type of the element that would be extracted 1873 // with an extractvalue instruction with the specified parameters. 1874 // 1875 // A null type is returned if the indices are invalid for the specified 1876 // pointer type. 1877 // 1878 Type *ExtractValueInst::getIndexedType(Type *Agg, 1879 ArrayRef<unsigned> Idxs) { 1880 for (unsigned Index : Idxs) { 1881 // We can't use CompositeType::indexValid(Index) here. 1882 // indexValid() always returns true for arrays because getelementptr allows 1883 // out-of-bounds indices. Since we don't allow those for extractvalue and 1884 // insertvalue we need to check array indexing manually. 1885 // Since the only other types we can index into are struct types it's just 1886 // as easy to check those manually as well. 1887 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) { 1888 if (Index >= AT->getNumElements()) 1889 return nullptr; 1890 } else if (StructType *ST = dyn_cast<StructType>(Agg)) { 1891 if (Index >= ST->getNumElements()) 1892 return nullptr; 1893 } else { 1894 // Not a valid type to index into. 1895 return nullptr; 1896 } 1897 1898 Agg = cast<CompositeType>(Agg)->getTypeAtIndex(Index); 1899 } 1900 return const_cast<Type*>(Agg); 1901 } 1902 1903 //===----------------------------------------------------------------------===// 1904 // BinaryOperator Class 1905 //===----------------------------------------------------------------------===// 1906 1907 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, 1908 Type *Ty, const Twine &Name, 1909 Instruction *InsertBefore) 1910 : Instruction(Ty, iType, 1911 OperandTraits<BinaryOperator>::op_begin(this), 1912 OperandTraits<BinaryOperator>::operands(this), 1913 InsertBefore) { 1914 Op<0>() = S1; 1915 Op<1>() = S2; 1916 init(iType); 1917 setName(Name); 1918 } 1919 1920 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, 1921 Type *Ty, const Twine &Name, 1922 BasicBlock *InsertAtEnd) 1923 : Instruction(Ty, iType, 1924 OperandTraits<BinaryOperator>::op_begin(this), 1925 OperandTraits<BinaryOperator>::operands(this), 1926 InsertAtEnd) { 1927 Op<0>() = S1; 1928 Op<1>() = S2; 1929 init(iType); 1930 setName(Name); 1931 } 1932 1933 1934 void BinaryOperator::init(BinaryOps iType) { 1935 Value *LHS = getOperand(0), *RHS = getOperand(1); 1936 (void)LHS; (void)RHS; // Silence warnings. 1937 assert(LHS->getType() == RHS->getType() && 1938 "Binary operator operand types must match!"); 1939 #ifndef NDEBUG 1940 switch (iType) { 1941 case Add: case Sub: 1942 case Mul: 1943 assert(getType() == LHS->getType() && 1944 "Arithmetic operation should return same type as operands!"); 1945 assert(getType()->isIntOrIntVectorTy() && 1946 "Tried to create an integer operation on a non-integer type!"); 1947 break; 1948 case FAdd: case FSub: 1949 case FMul: 1950 assert(getType() == LHS->getType() && 1951 "Arithmetic operation should return same type as operands!"); 1952 assert(getType()->isFPOrFPVectorTy() && 1953 "Tried to create a floating-point operation on a " 1954 "non-floating-point type!"); 1955 break; 1956 case UDiv: 1957 case SDiv: 1958 assert(getType() == LHS->getType() && 1959 "Arithmetic operation should return same type as operands!"); 1960 assert((getType()->isIntegerTy() || (getType()->isVectorTy() && 1961 cast<VectorType>(getType())->getElementType()->isIntegerTy())) && 1962 "Incorrect operand type (not integer) for S/UDIV"); 1963 break; 1964 case FDiv: 1965 assert(getType() == LHS->getType() && 1966 "Arithmetic operation should return same type as operands!"); 1967 assert(getType()->isFPOrFPVectorTy() && 1968 "Incorrect operand type (not floating point) for FDIV"); 1969 break; 1970 case URem: 1971 case SRem: 1972 assert(getType() == LHS->getType() && 1973 "Arithmetic operation should return same type as operands!"); 1974 assert((getType()->isIntegerTy() || (getType()->isVectorTy() && 1975 cast<VectorType>(getType())->getElementType()->isIntegerTy())) && 1976 "Incorrect operand type (not integer) for S/UREM"); 1977 break; 1978 case FRem: 1979 assert(getType() == LHS->getType() && 1980 "Arithmetic operation should return same type as operands!"); 1981 assert(getType()->isFPOrFPVectorTy() && 1982 "Incorrect operand type (not floating point) for FREM"); 1983 break; 1984 case Shl: 1985 case LShr: 1986 case AShr: 1987 assert(getType() == LHS->getType() && 1988 "Shift operation should return same type as operands!"); 1989 assert((getType()->isIntegerTy() || 1990 (getType()->isVectorTy() && 1991 cast<VectorType>(getType())->getElementType()->isIntegerTy())) && 1992 "Tried to create a shift operation on a non-integral type!"); 1993 break; 1994 case And: case Or: 1995 case Xor: 1996 assert(getType() == LHS->getType() && 1997 "Logical operation should return same type as operands!"); 1998 assert((getType()->isIntegerTy() || 1999 (getType()->isVectorTy() && 2000 cast<VectorType>(getType())->getElementType()->isIntegerTy())) && 2001 "Tried to create a logical operation on a non-integral type!"); 2002 break; 2003 default: 2004 break; 2005 } 2006 #endif 2007 } 2008 2009 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2, 2010 const Twine &Name, 2011 Instruction *InsertBefore) { 2012 assert(S1->getType() == S2->getType() && 2013 "Cannot create binary operator with two operands of differing type!"); 2014 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore); 2015 } 2016 2017 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2, 2018 const Twine &Name, 2019 BasicBlock *InsertAtEnd) { 2020 BinaryOperator *Res = Create(Op, S1, S2, Name); 2021 InsertAtEnd->getInstList().push_back(Res); 2022 return Res; 2023 } 2024 2025 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name, 2026 Instruction *InsertBefore) { 2027 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2028 return new BinaryOperator(Instruction::Sub, 2029 zero, Op, 2030 Op->getType(), Name, InsertBefore); 2031 } 2032 2033 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name, 2034 BasicBlock *InsertAtEnd) { 2035 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2036 return new BinaryOperator(Instruction::Sub, 2037 zero, Op, 2038 Op->getType(), Name, InsertAtEnd); 2039 } 2040 2041 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, 2042 Instruction *InsertBefore) { 2043 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2044 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertBefore); 2045 } 2046 2047 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, 2048 BasicBlock *InsertAtEnd) { 2049 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2050 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertAtEnd); 2051 } 2052 2053 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name, 2054 Instruction *InsertBefore) { 2055 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2056 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertBefore); 2057 } 2058 2059 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name, 2060 BasicBlock *InsertAtEnd) { 2061 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2062 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertAtEnd); 2063 } 2064 2065 BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name, 2066 Instruction *InsertBefore) { 2067 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2068 return new BinaryOperator(Instruction::FSub, zero, Op, 2069 Op->getType(), Name, InsertBefore); 2070 } 2071 2072 BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name, 2073 BasicBlock *InsertAtEnd) { 2074 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2075 return new BinaryOperator(Instruction::FSub, zero, Op, 2076 Op->getType(), Name, InsertAtEnd); 2077 } 2078 2079 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, 2080 Instruction *InsertBefore) { 2081 Constant *C = Constant::getAllOnesValue(Op->getType()); 2082 return new BinaryOperator(Instruction::Xor, Op, C, 2083 Op->getType(), Name, InsertBefore); 2084 } 2085 2086 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, 2087 BasicBlock *InsertAtEnd) { 2088 Constant *AllOnes = Constant::getAllOnesValue(Op->getType()); 2089 return new BinaryOperator(Instruction::Xor, Op, AllOnes, 2090 Op->getType(), Name, InsertAtEnd); 2091 } 2092 2093 2094 // isConstantAllOnes - Helper function for several functions below 2095 static inline bool isConstantAllOnes(const Value *V) { 2096 if (const Constant *C = dyn_cast<Constant>(V)) 2097 return C->isAllOnesValue(); 2098 return false; 2099 } 2100 2101 bool BinaryOperator::isNeg(const Value *V) { 2102 if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(V)) 2103 if (Bop->getOpcode() == Instruction::Sub) 2104 if (Constant* C = dyn_cast<Constant>(Bop->getOperand(0))) 2105 return C->isNegativeZeroValue(); 2106 return false; 2107 } 2108 2109 bool BinaryOperator::isFNeg(const Value *V, bool IgnoreZeroSign) { 2110 if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(V)) 2111 if (Bop->getOpcode() == Instruction::FSub) 2112 if (Constant* C = dyn_cast<Constant>(Bop->getOperand(0))) { 2113 if (!IgnoreZeroSign) 2114 IgnoreZeroSign = cast<Instruction>(V)->hasNoSignedZeros(); 2115 return !IgnoreZeroSign ? C->isNegativeZeroValue() : C->isZeroValue(); 2116 } 2117 return false; 2118 } 2119 2120 bool BinaryOperator::isNot(const Value *V) { 2121 if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(V)) 2122 return (Bop->getOpcode() == Instruction::Xor && 2123 (isConstantAllOnes(Bop->getOperand(1)) || 2124 isConstantAllOnes(Bop->getOperand(0)))); 2125 return false; 2126 } 2127 2128 Value *BinaryOperator::getNegArgument(Value *BinOp) { 2129 return cast<BinaryOperator>(BinOp)->getOperand(1); 2130 } 2131 2132 const Value *BinaryOperator::getNegArgument(const Value *BinOp) { 2133 return getNegArgument(const_cast<Value*>(BinOp)); 2134 } 2135 2136 Value *BinaryOperator::getFNegArgument(Value *BinOp) { 2137 return cast<BinaryOperator>(BinOp)->getOperand(1); 2138 } 2139 2140 const Value *BinaryOperator::getFNegArgument(const Value *BinOp) { 2141 return getFNegArgument(const_cast<Value*>(BinOp)); 2142 } 2143 2144 Value *BinaryOperator::getNotArgument(Value *BinOp) { 2145 assert(isNot(BinOp) && "getNotArgument on non-'not' instruction!"); 2146 BinaryOperator *BO = cast<BinaryOperator>(BinOp); 2147 Value *Op0 = BO->getOperand(0); 2148 Value *Op1 = BO->getOperand(1); 2149 if (isConstantAllOnes(Op0)) return Op1; 2150 2151 assert(isConstantAllOnes(Op1)); 2152 return Op0; 2153 } 2154 2155 const Value *BinaryOperator::getNotArgument(const Value *BinOp) { 2156 return getNotArgument(const_cast<Value*>(BinOp)); 2157 } 2158 2159 2160 // swapOperands - Exchange the two operands to this instruction. This 2161 // instruction is safe to use on any binary instruction and does not 2162 // modify the semantics of the instruction. If the instruction is 2163 // order dependent (SetLT f.e.) the opcode is changed. 2164 // 2165 bool BinaryOperator::swapOperands() { 2166 if (!isCommutative()) 2167 return true; // Can't commute operands 2168 Op<0>().swap(Op<1>()); 2169 return false; 2170 } 2171 2172 void BinaryOperator::setHasNoUnsignedWrap(bool b) { 2173 cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(b); 2174 } 2175 2176 void BinaryOperator::setHasNoSignedWrap(bool b) { 2177 cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(b); 2178 } 2179 2180 void BinaryOperator::setIsExact(bool b) { 2181 cast<PossiblyExactOperator>(this)->setIsExact(b); 2182 } 2183 2184 bool BinaryOperator::hasNoUnsignedWrap() const { 2185 return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap(); 2186 } 2187 2188 bool BinaryOperator::hasNoSignedWrap() const { 2189 return cast<OverflowingBinaryOperator>(this)->hasNoSignedWrap(); 2190 } 2191 2192 bool BinaryOperator::isExact() const { 2193 return cast<PossiblyExactOperator>(this)->isExact(); 2194 } 2195 2196 void BinaryOperator::copyIRFlags(const Value *V) { 2197 // Copy the wrapping flags. 2198 if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) { 2199 setHasNoSignedWrap(OB->hasNoSignedWrap()); 2200 setHasNoUnsignedWrap(OB->hasNoUnsignedWrap()); 2201 } 2202 2203 // Copy the exact flag. 2204 if (auto *PE = dyn_cast<PossiblyExactOperator>(V)) 2205 setIsExact(PE->isExact()); 2206 2207 // Copy the fast-math flags. 2208 if (auto *FP = dyn_cast<FPMathOperator>(V)) 2209 copyFastMathFlags(FP->getFastMathFlags()); 2210 } 2211 2212 void BinaryOperator::andIRFlags(const Value *V) { 2213 if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) { 2214 setHasNoSignedWrap(hasNoSignedWrap() & OB->hasNoSignedWrap()); 2215 setHasNoUnsignedWrap(hasNoUnsignedWrap() & OB->hasNoUnsignedWrap()); 2216 } 2217 2218 if (auto *PE = dyn_cast<PossiblyExactOperator>(V)) 2219 setIsExact(isExact() & PE->isExact()); 2220 2221 if (auto *FP = dyn_cast<FPMathOperator>(V)) { 2222 FastMathFlags FM = getFastMathFlags(); 2223 FM &= FP->getFastMathFlags(); 2224 copyFastMathFlags(FM); 2225 } 2226 } 2227 2228 2229 //===----------------------------------------------------------------------===// 2230 // FPMathOperator Class 2231 //===----------------------------------------------------------------------===// 2232 2233 /// getFPAccuracy - Get the maximum error permitted by this operation in ULPs. 2234 /// An accuracy of 0.0 means that the operation should be performed with the 2235 /// default precision. 2236 float FPMathOperator::getFPAccuracy() const { 2237 const MDNode *MD = 2238 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath); 2239 if (!MD) 2240 return 0.0; 2241 ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0)); 2242 return Accuracy->getValueAPF().convertToFloat(); 2243 } 2244 2245 2246 //===----------------------------------------------------------------------===// 2247 // CastInst Class 2248 //===----------------------------------------------------------------------===// 2249 2250 void CastInst::anchor() {} 2251 2252 // Just determine if this cast only deals with integral->integral conversion. 2253 bool CastInst::isIntegerCast() const { 2254 switch (getOpcode()) { 2255 default: return false; 2256 case Instruction::ZExt: 2257 case Instruction::SExt: 2258 case Instruction::Trunc: 2259 return true; 2260 case Instruction::BitCast: 2261 return getOperand(0)->getType()->isIntegerTy() && 2262 getType()->isIntegerTy(); 2263 } 2264 } 2265 2266 bool CastInst::isLosslessCast() const { 2267 // Only BitCast can be lossless, exit fast if we're not BitCast 2268 if (getOpcode() != Instruction::BitCast) 2269 return false; 2270 2271 // Identity cast is always lossless 2272 Type* SrcTy = getOperand(0)->getType(); 2273 Type* DstTy = getType(); 2274 if (SrcTy == DstTy) 2275 return true; 2276 2277 // Pointer to pointer is always lossless. 2278 if (SrcTy->isPointerTy()) 2279 return DstTy->isPointerTy(); 2280 return false; // Other types have no identity values 2281 } 2282 2283 /// This function determines if the CastInst does not require any bits to be 2284 /// changed in order to effect the cast. Essentially, it identifies cases where 2285 /// no code gen is necessary for the cast, hence the name no-op cast. For 2286 /// example, the following are all no-op casts: 2287 /// # bitcast i32* %x to i8* 2288 /// # bitcast <2 x i32> %x to <4 x i16> 2289 /// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only 2290 /// @brief Determine if the described cast is a no-op. 2291 bool CastInst::isNoopCast(Instruction::CastOps Opcode, 2292 Type *SrcTy, 2293 Type *DestTy, 2294 Type *IntPtrTy) { 2295 switch (Opcode) { 2296 default: llvm_unreachable("Invalid CastOp"); 2297 case Instruction::Trunc: 2298 case Instruction::ZExt: 2299 case Instruction::SExt: 2300 case Instruction::FPTrunc: 2301 case Instruction::FPExt: 2302 case Instruction::UIToFP: 2303 case Instruction::SIToFP: 2304 case Instruction::FPToUI: 2305 case Instruction::FPToSI: 2306 case Instruction::AddrSpaceCast: 2307 // TODO: Target informations may give a more accurate answer here. 2308 return false; 2309 case Instruction::BitCast: 2310 return true; // BitCast never modifies bits. 2311 case Instruction::PtrToInt: 2312 return IntPtrTy->getScalarSizeInBits() == 2313 DestTy->getScalarSizeInBits(); 2314 case Instruction::IntToPtr: 2315 return IntPtrTy->getScalarSizeInBits() == 2316 SrcTy->getScalarSizeInBits(); 2317 } 2318 } 2319 2320 /// @brief Determine if a cast is a no-op. 2321 bool CastInst::isNoopCast(Type *IntPtrTy) const { 2322 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), IntPtrTy); 2323 } 2324 2325 bool CastInst::isNoopCast(const DataLayout &DL) const { 2326 Type *PtrOpTy = nullptr; 2327 if (getOpcode() == Instruction::PtrToInt) 2328 PtrOpTy = getOperand(0)->getType(); 2329 else if (getOpcode() == Instruction::IntToPtr) 2330 PtrOpTy = getType(); 2331 2332 Type *IntPtrTy = 2333 PtrOpTy ? DL.getIntPtrType(PtrOpTy) : DL.getIntPtrType(getContext(), 0); 2334 2335 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), IntPtrTy); 2336 } 2337 2338 /// This function determines if a pair of casts can be eliminated and what 2339 /// opcode should be used in the elimination. This assumes that there are two 2340 /// instructions like this: 2341 /// * %F = firstOpcode SrcTy %x to MidTy 2342 /// * %S = secondOpcode MidTy %F to DstTy 2343 /// The function returns a resultOpcode so these two casts can be replaced with: 2344 /// * %Replacement = resultOpcode %SrcTy %x to DstTy 2345 /// If no such cast is permitted, the function returns 0. 2346 unsigned CastInst::isEliminableCastPair( 2347 Instruction::CastOps firstOp, Instruction::CastOps secondOp, 2348 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, 2349 Type *DstIntPtrTy) { 2350 // Define the 144 possibilities for these two cast instructions. The values 2351 // in this matrix determine what to do in a given situation and select the 2352 // case in the switch below. The rows correspond to firstOp, the columns 2353 // correspond to secondOp. In looking at the table below, keep in mind 2354 // the following cast properties: 2355 // 2356 // Size Compare Source Destination 2357 // Operator Src ? Size Type Sign Type Sign 2358 // -------- ------------ ------------------- --------------------- 2359 // TRUNC > Integer Any Integral Any 2360 // ZEXT < Integral Unsigned Integer Any 2361 // SEXT < Integral Signed Integer Any 2362 // FPTOUI n/a FloatPt n/a Integral Unsigned 2363 // FPTOSI n/a FloatPt n/a Integral Signed 2364 // UITOFP n/a Integral Unsigned FloatPt n/a 2365 // SITOFP n/a Integral Signed FloatPt n/a 2366 // FPTRUNC > FloatPt n/a FloatPt n/a 2367 // FPEXT < FloatPt n/a FloatPt n/a 2368 // PTRTOINT n/a Pointer n/a Integral Unsigned 2369 // INTTOPTR n/a Integral Unsigned Pointer n/a 2370 // BITCAST = FirstClass n/a FirstClass n/a 2371 // ADDRSPCST n/a Pointer n/a Pointer n/a 2372 // 2373 // NOTE: some transforms are safe, but we consider them to be non-profitable. 2374 // For example, we could merge "fptoui double to i32" + "zext i32 to i64", 2375 // into "fptoui double to i64", but this loses information about the range 2376 // of the produced value (we no longer know the top-part is all zeros). 2377 // Further this conversion is often much more expensive for typical hardware, 2378 // and causes issues when building libgcc. We disallow fptosi+sext for the 2379 // same reason. 2380 const unsigned numCastOps = 2381 Instruction::CastOpsEnd - Instruction::CastOpsBegin; 2382 static const uint8_t CastResults[numCastOps][numCastOps] = { 2383 // T F F U S F F P I B A -+ 2384 // R Z S P P I I T P 2 N T S | 2385 // U E E 2 2 2 2 R E I T C C +- secondOp 2386 // N X X U S F F N X N 2 V V | 2387 // C T T I I P P C T T P T T -+ 2388 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+ 2389 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt | 2390 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt | 2391 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI | 2392 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI | 2393 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp 2394 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP | 2395 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc | 2396 { 99,99,99, 2, 2,99,99,10, 2,99,99, 4, 0}, // FPExt | 2397 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt | 2398 { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr | 2399 { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast | 2400 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+ 2401 }; 2402 2403 // TODO: This logic could be encoded into the table above and handled in the 2404 // switch below. 2405 // If either of the casts are a bitcast from scalar to vector, disallow the 2406 // merging. However, any pair of bitcasts are allowed. 2407 bool IsFirstBitcast = (firstOp == Instruction::BitCast); 2408 bool IsSecondBitcast = (secondOp == Instruction::BitCast); 2409 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast; 2410 2411 // Check if any of the casts convert scalars <-> vectors. 2412 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) || 2413 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy))) 2414 if (!AreBothBitcasts) 2415 return 0; 2416 2417 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin] 2418 [secondOp-Instruction::CastOpsBegin]; 2419 switch (ElimCase) { 2420 case 0: 2421 // Categorically disallowed. 2422 return 0; 2423 case 1: 2424 // Allowed, use first cast's opcode. 2425 return firstOp; 2426 case 2: 2427 // Allowed, use second cast's opcode. 2428 return secondOp; 2429 case 3: 2430 // No-op cast in second op implies firstOp as long as the DestTy 2431 // is integer and we are not converting between a vector and a 2432 // non-vector type. 2433 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy()) 2434 return firstOp; 2435 return 0; 2436 case 4: 2437 // No-op cast in second op implies firstOp as long as the DestTy 2438 // is floating point. 2439 if (DstTy->isFloatingPointTy()) 2440 return firstOp; 2441 return 0; 2442 case 5: 2443 // No-op cast in first op implies secondOp as long as the SrcTy 2444 // is an integer. 2445 if (SrcTy->isIntegerTy()) 2446 return secondOp; 2447 return 0; 2448 case 6: 2449 // No-op cast in first op implies secondOp as long as the SrcTy 2450 // is a floating point. 2451 if (SrcTy->isFloatingPointTy()) 2452 return secondOp; 2453 return 0; 2454 case 7: { 2455 // Cannot simplify if address spaces are different! 2456 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) 2457 return 0; 2458 2459 unsigned MidSize = MidTy->getScalarSizeInBits(); 2460 // We can still fold this without knowing the actual sizes as long we 2461 // know that the intermediate pointer is the largest possible 2462 // pointer size. 2463 // FIXME: Is this always true? 2464 if (MidSize == 64) 2465 return Instruction::BitCast; 2466 2467 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size. 2468 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy) 2469 return 0; 2470 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits(); 2471 if (MidSize >= PtrSize) 2472 return Instruction::BitCast; 2473 return 0; 2474 } 2475 case 8: { 2476 // ext, trunc -> bitcast, if the SrcTy and DstTy are same size 2477 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy) 2478 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy) 2479 unsigned SrcSize = SrcTy->getScalarSizeInBits(); 2480 unsigned DstSize = DstTy->getScalarSizeInBits(); 2481 if (SrcSize == DstSize) 2482 return Instruction::BitCast; 2483 else if (SrcSize < DstSize) 2484 return firstOp; 2485 return secondOp; 2486 } 2487 case 9: 2488 // zext, sext -> zext, because sext can't sign extend after zext 2489 return Instruction::ZExt; 2490 case 10: 2491 // fpext followed by ftrunc is allowed if the bit size returned to is 2492 // the same as the original, in which case its just a bitcast 2493 if (SrcTy == DstTy) 2494 return Instruction::BitCast; 2495 return 0; // If the types are not the same we can't eliminate it. 2496 case 11: { 2497 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize 2498 if (!MidIntPtrTy) 2499 return 0; 2500 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits(); 2501 unsigned SrcSize = SrcTy->getScalarSizeInBits(); 2502 unsigned DstSize = DstTy->getScalarSizeInBits(); 2503 if (SrcSize <= PtrSize && SrcSize == DstSize) 2504 return Instruction::BitCast; 2505 return 0; 2506 } 2507 case 12: { 2508 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS 2509 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS 2510 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) 2511 return Instruction::AddrSpaceCast; 2512 return Instruction::BitCast; 2513 } 2514 case 13: 2515 // FIXME: this state can be merged with (1), but the following assert 2516 // is useful to check the correcteness of the sequence due to semantic 2517 // change of bitcast. 2518 assert( 2519 SrcTy->isPtrOrPtrVectorTy() && 2520 MidTy->isPtrOrPtrVectorTy() && 2521 DstTy->isPtrOrPtrVectorTy() && 2522 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() && 2523 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() && 2524 "Illegal addrspacecast, bitcast sequence!"); 2525 // Allowed, use first cast's opcode 2526 return firstOp; 2527 case 14: 2528 // bitcast, addrspacecast -> addrspacecast if the element type of 2529 // bitcast's source is the same as that of addrspacecast's destination. 2530 if (SrcTy->getPointerElementType() == DstTy->getPointerElementType()) 2531 return Instruction::AddrSpaceCast; 2532 return 0; 2533 2534 case 15: 2535 // FIXME: this state can be merged with (1), but the following assert 2536 // is useful to check the correcteness of the sequence due to semantic 2537 // change of bitcast. 2538 assert( 2539 SrcTy->isIntOrIntVectorTy() && 2540 MidTy->isPtrOrPtrVectorTy() && 2541 DstTy->isPtrOrPtrVectorTy() && 2542 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() && 2543 "Illegal inttoptr, bitcast sequence!"); 2544 // Allowed, use first cast's opcode 2545 return firstOp; 2546 case 16: 2547 // FIXME: this state can be merged with (2), but the following assert 2548 // is useful to check the correcteness of the sequence due to semantic 2549 // change of bitcast. 2550 assert( 2551 SrcTy->isPtrOrPtrVectorTy() && 2552 MidTy->isPtrOrPtrVectorTy() && 2553 DstTy->isIntOrIntVectorTy() && 2554 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() && 2555 "Illegal bitcast, ptrtoint sequence!"); 2556 // Allowed, use second cast's opcode 2557 return secondOp; 2558 case 17: 2559 // (sitofp (zext x)) -> (uitofp x) 2560 return Instruction::UIToFP; 2561 case 99: 2562 // Cast combination can't happen (error in input). This is for all cases 2563 // where the MidTy is not the same for the two cast instructions. 2564 llvm_unreachable("Invalid Cast Combination"); 2565 default: 2566 llvm_unreachable("Error in CastResults table!!!"); 2567 } 2568 } 2569 2570 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty, 2571 const Twine &Name, Instruction *InsertBefore) { 2572 assert(castIsValid(op, S, Ty) && "Invalid cast!"); 2573 // Construct and return the appropriate CastInst subclass 2574 switch (op) { 2575 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore); 2576 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore); 2577 case SExt: return new SExtInst (S, Ty, Name, InsertBefore); 2578 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore); 2579 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore); 2580 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore); 2581 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore); 2582 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore); 2583 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore); 2584 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore); 2585 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore); 2586 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore); 2587 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore); 2588 default: llvm_unreachable("Invalid opcode provided"); 2589 } 2590 } 2591 2592 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty, 2593 const Twine &Name, BasicBlock *InsertAtEnd) { 2594 assert(castIsValid(op, S, Ty) && "Invalid cast!"); 2595 // Construct and return the appropriate CastInst subclass 2596 switch (op) { 2597 case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd); 2598 case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd); 2599 case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd); 2600 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd); 2601 case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd); 2602 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd); 2603 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd); 2604 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd); 2605 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd); 2606 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd); 2607 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd); 2608 case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd); 2609 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd); 2610 default: llvm_unreachable("Invalid opcode provided"); 2611 } 2612 } 2613 2614 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, 2615 const Twine &Name, 2616 Instruction *InsertBefore) { 2617 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2618 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2619 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore); 2620 } 2621 2622 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, 2623 const Twine &Name, 2624 BasicBlock *InsertAtEnd) { 2625 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2626 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2627 return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd); 2628 } 2629 2630 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, 2631 const Twine &Name, 2632 Instruction *InsertBefore) { 2633 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2634 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2635 return Create(Instruction::SExt, S, Ty, Name, InsertBefore); 2636 } 2637 2638 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, 2639 const Twine &Name, 2640 BasicBlock *InsertAtEnd) { 2641 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2642 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2643 return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd); 2644 } 2645 2646 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, 2647 const Twine &Name, 2648 Instruction *InsertBefore) { 2649 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2650 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2651 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore); 2652 } 2653 2654 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, 2655 const Twine &Name, 2656 BasicBlock *InsertAtEnd) { 2657 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2658 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2659 return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd); 2660 } 2661 2662 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, 2663 const Twine &Name, 2664 BasicBlock *InsertAtEnd) { 2665 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2666 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) && 2667 "Invalid cast"); 2668 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); 2669 assert((!Ty->isVectorTy() || 2670 Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) && 2671 "Invalid cast"); 2672 2673 if (Ty->isIntOrIntVectorTy()) 2674 return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd); 2675 2676 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd); 2677 } 2678 2679 /// @brief Create a BitCast or a PtrToInt cast instruction 2680 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, 2681 const Twine &Name, 2682 Instruction *InsertBefore) { 2683 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2684 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) && 2685 "Invalid cast"); 2686 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); 2687 assert((!Ty->isVectorTy() || 2688 Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) && 2689 "Invalid cast"); 2690 2691 if (Ty->isIntOrIntVectorTy()) 2692 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore); 2693 2694 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore); 2695 } 2696 2697 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast( 2698 Value *S, Type *Ty, 2699 const Twine &Name, 2700 BasicBlock *InsertAtEnd) { 2701 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2702 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast"); 2703 2704 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace()) 2705 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd); 2706 2707 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2708 } 2709 2710 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast( 2711 Value *S, Type *Ty, 2712 const Twine &Name, 2713 Instruction *InsertBefore) { 2714 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2715 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast"); 2716 2717 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace()) 2718 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore); 2719 2720 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2721 } 2722 2723 CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty, 2724 const Twine &Name, 2725 Instruction *InsertBefore) { 2726 if (S->getType()->isPointerTy() && Ty->isIntegerTy()) 2727 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore); 2728 if (S->getType()->isIntegerTy() && Ty->isPointerTy()) 2729 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore); 2730 2731 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2732 } 2733 2734 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, 2735 bool isSigned, const Twine &Name, 2736 Instruction *InsertBefore) { 2737 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() && 2738 "Invalid integer cast"); 2739 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2740 unsigned DstBits = Ty->getScalarSizeInBits(); 2741 Instruction::CastOps opcode = 2742 (SrcBits == DstBits ? Instruction::BitCast : 2743 (SrcBits > DstBits ? Instruction::Trunc : 2744 (isSigned ? Instruction::SExt : Instruction::ZExt))); 2745 return Create(opcode, C, Ty, Name, InsertBefore); 2746 } 2747 2748 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, 2749 bool isSigned, const Twine &Name, 2750 BasicBlock *InsertAtEnd) { 2751 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() && 2752 "Invalid cast"); 2753 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2754 unsigned DstBits = Ty->getScalarSizeInBits(); 2755 Instruction::CastOps opcode = 2756 (SrcBits == DstBits ? Instruction::BitCast : 2757 (SrcBits > DstBits ? Instruction::Trunc : 2758 (isSigned ? Instruction::SExt : Instruction::ZExt))); 2759 return Create(opcode, C, Ty, Name, InsertAtEnd); 2760 } 2761 2762 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, 2763 const Twine &Name, 2764 Instruction *InsertBefore) { 2765 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && 2766 "Invalid cast"); 2767 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2768 unsigned DstBits = Ty->getScalarSizeInBits(); 2769 Instruction::CastOps opcode = 2770 (SrcBits == DstBits ? Instruction::BitCast : 2771 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt)); 2772 return Create(opcode, C, Ty, Name, InsertBefore); 2773 } 2774 2775 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, 2776 const Twine &Name, 2777 BasicBlock *InsertAtEnd) { 2778 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && 2779 "Invalid cast"); 2780 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2781 unsigned DstBits = Ty->getScalarSizeInBits(); 2782 Instruction::CastOps opcode = 2783 (SrcBits == DstBits ? Instruction::BitCast : 2784 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt)); 2785 return Create(opcode, C, Ty, Name, InsertAtEnd); 2786 } 2787 2788 // Check whether it is valid to call getCastOpcode for these types. 2789 // This routine must be kept in sync with getCastOpcode. 2790 bool CastInst::isCastable(Type *SrcTy, Type *DestTy) { 2791 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType()) 2792 return false; 2793 2794 if (SrcTy == DestTy) 2795 return true; 2796 2797 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) 2798 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) 2799 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 2800 // An element by element cast. Valid if casting the elements is valid. 2801 SrcTy = SrcVecTy->getElementType(); 2802 DestTy = DestVecTy->getElementType(); 2803 } 2804 2805 // Get the bit sizes, we'll need these 2806 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 2807 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 2808 2809 // Run through the possibilities ... 2810 if (DestTy->isIntegerTy()) { // Casting to integral 2811 if (SrcTy->isIntegerTy()) // Casting from integral 2812 return true; 2813 if (SrcTy->isFloatingPointTy()) // Casting from floating pt 2814 return true; 2815 if (SrcTy->isVectorTy()) // Casting from vector 2816 return DestBits == SrcBits; 2817 // Casting from something else 2818 return SrcTy->isPointerTy(); 2819 } 2820 if (DestTy->isFloatingPointTy()) { // Casting to floating pt 2821 if (SrcTy->isIntegerTy()) // Casting from integral 2822 return true; 2823 if (SrcTy->isFloatingPointTy()) // Casting from floating pt 2824 return true; 2825 if (SrcTy->isVectorTy()) // Casting from vector 2826 return DestBits == SrcBits; 2827 // Casting from something else 2828 return false; 2829 } 2830 if (DestTy->isVectorTy()) // Casting to vector 2831 return DestBits == SrcBits; 2832 if (DestTy->isPointerTy()) { // Casting to pointer 2833 if (SrcTy->isPointerTy()) // Casting from pointer 2834 return true; 2835 return SrcTy->isIntegerTy(); // Casting from integral 2836 } 2837 if (DestTy->isX86_MMXTy()) { 2838 if (SrcTy->isVectorTy()) 2839 return DestBits == SrcBits; // 64-bit vector to MMX 2840 return false; 2841 } // Casting to something else 2842 return false; 2843 } 2844 2845 bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) { 2846 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType()) 2847 return false; 2848 2849 if (SrcTy == DestTy) 2850 return true; 2851 2852 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) { 2853 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) { 2854 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 2855 // An element by element cast. Valid if casting the elements is valid. 2856 SrcTy = SrcVecTy->getElementType(); 2857 DestTy = DestVecTy->getElementType(); 2858 } 2859 } 2860 } 2861 2862 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) { 2863 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) { 2864 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace(); 2865 } 2866 } 2867 2868 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 2869 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 2870 2871 // Could still have vectors of pointers if the number of elements doesn't 2872 // match 2873 if (SrcBits == 0 || DestBits == 0) 2874 return false; 2875 2876 if (SrcBits != DestBits) 2877 return false; 2878 2879 if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy()) 2880 return false; 2881 2882 return true; 2883 } 2884 2885 bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, 2886 const DataLayout &DL) { 2887 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy)) 2888 if (auto *IntTy = dyn_cast<IntegerType>(DestTy)) 2889 return IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy); 2890 if (auto *PtrTy = dyn_cast<PointerType>(DestTy)) 2891 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy)) 2892 return IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy); 2893 2894 return isBitCastable(SrcTy, DestTy); 2895 } 2896 2897 // Provide a way to get a "cast" where the cast opcode is inferred from the 2898 // types and size of the operand. This, basically, is a parallel of the 2899 // logic in the castIsValid function below. This axiom should hold: 2900 // castIsValid( getCastOpcode(Val, Ty), Val, Ty) 2901 // should not assert in castIsValid. In other words, this produces a "correct" 2902 // casting opcode for the arguments passed to it. 2903 // This routine must be kept in sync with isCastable. 2904 Instruction::CastOps 2905 CastInst::getCastOpcode( 2906 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) { 2907 Type *SrcTy = Src->getType(); 2908 2909 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() && 2910 "Only first class types are castable!"); 2911 2912 if (SrcTy == DestTy) 2913 return BitCast; 2914 2915 // FIXME: Check address space sizes here 2916 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) 2917 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) 2918 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 2919 // An element by element cast. Find the appropriate opcode based on the 2920 // element types. 2921 SrcTy = SrcVecTy->getElementType(); 2922 DestTy = DestVecTy->getElementType(); 2923 } 2924 2925 // Get the bit sizes, we'll need these 2926 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 2927 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 2928 2929 // Run through the possibilities ... 2930 if (DestTy->isIntegerTy()) { // Casting to integral 2931 if (SrcTy->isIntegerTy()) { // Casting from integral 2932 if (DestBits < SrcBits) 2933 return Trunc; // int -> smaller int 2934 else if (DestBits > SrcBits) { // its an extension 2935 if (SrcIsSigned) 2936 return SExt; // signed -> SEXT 2937 else 2938 return ZExt; // unsigned -> ZEXT 2939 } else { 2940 return BitCast; // Same size, No-op cast 2941 } 2942 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt 2943 if (DestIsSigned) 2944 return FPToSI; // FP -> sint 2945 else 2946 return FPToUI; // FP -> uint 2947 } else if (SrcTy->isVectorTy()) { 2948 assert(DestBits == SrcBits && 2949 "Casting vector to integer of different width"); 2950 return BitCast; // Same size, no-op cast 2951 } else { 2952 assert(SrcTy->isPointerTy() && 2953 "Casting from a value that is not first-class type"); 2954 return PtrToInt; // ptr -> int 2955 } 2956 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt 2957 if (SrcTy->isIntegerTy()) { // Casting from integral 2958 if (SrcIsSigned) 2959 return SIToFP; // sint -> FP 2960 else 2961 return UIToFP; // uint -> FP 2962 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt 2963 if (DestBits < SrcBits) { 2964 return FPTrunc; // FP -> smaller FP 2965 } else if (DestBits > SrcBits) { 2966 return FPExt; // FP -> larger FP 2967 } else { 2968 return BitCast; // same size, no-op cast 2969 } 2970 } else if (SrcTy->isVectorTy()) { 2971 assert(DestBits == SrcBits && 2972 "Casting vector to floating point of different width"); 2973 return BitCast; // same size, no-op cast 2974 } 2975 llvm_unreachable("Casting pointer or non-first class to float"); 2976 } else if (DestTy->isVectorTy()) { 2977 assert(DestBits == SrcBits && 2978 "Illegal cast to vector (wrong type or size)"); 2979 return BitCast; 2980 } else if (DestTy->isPointerTy()) { 2981 if (SrcTy->isPointerTy()) { 2982 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace()) 2983 return AddrSpaceCast; 2984 return BitCast; // ptr -> ptr 2985 } else if (SrcTy->isIntegerTy()) { 2986 return IntToPtr; // int -> ptr 2987 } 2988 llvm_unreachable("Casting pointer to other than pointer or int"); 2989 } else if (DestTy->isX86_MMXTy()) { 2990 if (SrcTy->isVectorTy()) { 2991 assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX"); 2992 return BitCast; // 64-bit vector to MMX 2993 } 2994 llvm_unreachable("Illegal cast to X86_MMX"); 2995 } 2996 llvm_unreachable("Casting to type that is not first-class"); 2997 } 2998 2999 //===----------------------------------------------------------------------===// 3000 // CastInst SubClass Constructors 3001 //===----------------------------------------------------------------------===// 3002 3003 /// Check that the construction parameters for a CastInst are correct. This 3004 /// could be broken out into the separate constructors but it is useful to have 3005 /// it in one place and to eliminate the redundant code for getting the sizes 3006 /// of the types involved. 3007 bool 3008 CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) { 3009 3010 // Check for type sanity on the arguments 3011 Type *SrcTy = S->getType(); 3012 3013 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() || 3014 SrcTy->isAggregateType() || DstTy->isAggregateType()) 3015 return false; 3016 3017 // Get the size of the types in bits, we'll need this later 3018 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 3019 unsigned DstBitSize = DstTy->getScalarSizeInBits(); 3020 3021 // If these are vector types, get the lengths of the vectors (using zero for 3022 // scalar types means that checking that vector lengths match also checks that 3023 // scalars are not being converted to vectors or vectors to scalars). 3024 unsigned SrcLength = SrcTy->isVectorTy() ? 3025 cast<VectorType>(SrcTy)->getNumElements() : 0; 3026 unsigned DstLength = DstTy->isVectorTy() ? 3027 cast<VectorType>(DstTy)->getNumElements() : 0; 3028 3029 // Switch on the opcode provided 3030 switch (op) { 3031 default: return false; // This is an input error 3032 case Instruction::Trunc: 3033 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 3034 SrcLength == DstLength && SrcBitSize > DstBitSize; 3035 case Instruction::ZExt: 3036 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 3037 SrcLength == DstLength && SrcBitSize < DstBitSize; 3038 case Instruction::SExt: 3039 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 3040 SrcLength == DstLength && SrcBitSize < DstBitSize; 3041 case Instruction::FPTrunc: 3042 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() && 3043 SrcLength == DstLength && SrcBitSize > DstBitSize; 3044 case Instruction::FPExt: 3045 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() && 3046 SrcLength == DstLength && SrcBitSize < DstBitSize; 3047 case Instruction::UIToFP: 3048 case Instruction::SIToFP: 3049 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() && 3050 SrcLength == DstLength; 3051 case Instruction::FPToUI: 3052 case Instruction::FPToSI: 3053 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() && 3054 SrcLength == DstLength; 3055 case Instruction::PtrToInt: 3056 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy)) 3057 return false; 3058 if (VectorType *VT = dyn_cast<VectorType>(SrcTy)) 3059 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements()) 3060 return false; 3061 return SrcTy->getScalarType()->isPointerTy() && 3062 DstTy->getScalarType()->isIntegerTy(); 3063 case Instruction::IntToPtr: 3064 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy)) 3065 return false; 3066 if (VectorType *VT = dyn_cast<VectorType>(SrcTy)) 3067 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements()) 3068 return false; 3069 return SrcTy->getScalarType()->isIntegerTy() && 3070 DstTy->getScalarType()->isPointerTy(); 3071 case Instruction::BitCast: { 3072 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType()); 3073 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType()); 3074 3075 // BitCast implies a no-op cast of type only. No bits change. 3076 // However, you can't cast pointers to anything but pointers. 3077 if (!SrcPtrTy != !DstPtrTy) 3078 return false; 3079 3080 // For non-pointer cases, the cast is okay if the source and destination bit 3081 // widths are identical. 3082 if (!SrcPtrTy) 3083 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits(); 3084 3085 // If both are pointers then the address spaces must match. 3086 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) 3087 return false; 3088 3089 // A vector of pointers must have the same number of elements. 3090 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) { 3091 if (VectorType *DstVecTy = dyn_cast<VectorType>(DstTy)) 3092 return (SrcVecTy->getNumElements() == DstVecTy->getNumElements()); 3093 3094 return false; 3095 } 3096 3097 return true; 3098 } 3099 case Instruction::AddrSpaceCast: { 3100 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType()); 3101 if (!SrcPtrTy) 3102 return false; 3103 3104 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType()); 3105 if (!DstPtrTy) 3106 return false; 3107 3108 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace()) 3109 return false; 3110 3111 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) { 3112 if (VectorType *DstVecTy = dyn_cast<VectorType>(DstTy)) 3113 return (SrcVecTy->getNumElements() == DstVecTy->getNumElements()); 3114 3115 return false; 3116 } 3117 3118 return true; 3119 } 3120 } 3121 } 3122 3123 TruncInst::TruncInst( 3124 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3125 ) : CastInst(Ty, Trunc, S, Name, InsertBefore) { 3126 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc"); 3127 } 3128 3129 TruncInst::TruncInst( 3130 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3131 ) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) { 3132 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc"); 3133 } 3134 3135 ZExtInst::ZExtInst( 3136 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3137 ) : CastInst(Ty, ZExt, S, Name, InsertBefore) { 3138 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); 3139 } 3140 3141 ZExtInst::ZExtInst( 3142 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3143 ) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) { 3144 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); 3145 } 3146 SExtInst::SExtInst( 3147 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3148 ) : CastInst(Ty, SExt, S, Name, InsertBefore) { 3149 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); 3150 } 3151 3152 SExtInst::SExtInst( 3153 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3154 ) : CastInst(Ty, SExt, S, Name, InsertAtEnd) { 3155 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); 3156 } 3157 3158 FPTruncInst::FPTruncInst( 3159 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3160 ) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) { 3161 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); 3162 } 3163 3164 FPTruncInst::FPTruncInst( 3165 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3166 ) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) { 3167 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); 3168 } 3169 3170 FPExtInst::FPExtInst( 3171 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3172 ) : CastInst(Ty, FPExt, S, Name, InsertBefore) { 3173 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); 3174 } 3175 3176 FPExtInst::FPExtInst( 3177 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3178 ) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) { 3179 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); 3180 } 3181 3182 UIToFPInst::UIToFPInst( 3183 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3184 ) : CastInst(Ty, UIToFP, S, Name, InsertBefore) { 3185 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); 3186 } 3187 3188 UIToFPInst::UIToFPInst( 3189 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3190 ) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) { 3191 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); 3192 } 3193 3194 SIToFPInst::SIToFPInst( 3195 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3196 ) : CastInst(Ty, SIToFP, S, Name, InsertBefore) { 3197 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); 3198 } 3199 3200 SIToFPInst::SIToFPInst( 3201 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3202 ) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) { 3203 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); 3204 } 3205 3206 FPToUIInst::FPToUIInst( 3207 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3208 ) : CastInst(Ty, FPToUI, S, Name, InsertBefore) { 3209 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); 3210 } 3211 3212 FPToUIInst::FPToUIInst( 3213 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3214 ) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) { 3215 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); 3216 } 3217 3218 FPToSIInst::FPToSIInst( 3219 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3220 ) : CastInst(Ty, FPToSI, S, Name, InsertBefore) { 3221 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); 3222 } 3223 3224 FPToSIInst::FPToSIInst( 3225 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3226 ) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) { 3227 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); 3228 } 3229 3230 PtrToIntInst::PtrToIntInst( 3231 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3232 ) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) { 3233 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); 3234 } 3235 3236 PtrToIntInst::PtrToIntInst( 3237 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3238 ) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) { 3239 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); 3240 } 3241 3242 IntToPtrInst::IntToPtrInst( 3243 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3244 ) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) { 3245 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); 3246 } 3247 3248 IntToPtrInst::IntToPtrInst( 3249 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3250 ) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) { 3251 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); 3252 } 3253 3254 BitCastInst::BitCastInst( 3255 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3256 ) : CastInst(Ty, BitCast, S, Name, InsertBefore) { 3257 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); 3258 } 3259 3260 BitCastInst::BitCastInst( 3261 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3262 ) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) { 3263 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); 3264 } 3265 3266 AddrSpaceCastInst::AddrSpaceCastInst( 3267 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3268 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) { 3269 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast"); 3270 } 3271 3272 AddrSpaceCastInst::AddrSpaceCastInst( 3273 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3274 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) { 3275 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast"); 3276 } 3277 3278 //===----------------------------------------------------------------------===// 3279 // CmpInst Classes 3280 //===----------------------------------------------------------------------===// 3281 3282 void CmpInst::anchor() {} 3283 3284 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS, 3285 Value *RHS, const Twine &Name, Instruction *InsertBefore) 3286 : Instruction(ty, op, 3287 OperandTraits<CmpInst>::op_begin(this), 3288 OperandTraits<CmpInst>::operands(this), 3289 InsertBefore) { 3290 Op<0>() = LHS; 3291 Op<1>() = RHS; 3292 setPredicate((Predicate)predicate); 3293 setName(Name); 3294 } 3295 3296 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS, 3297 Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd) 3298 : Instruction(ty, op, 3299 OperandTraits<CmpInst>::op_begin(this), 3300 OperandTraits<CmpInst>::operands(this), 3301 InsertAtEnd) { 3302 Op<0>() = LHS; 3303 Op<1>() = RHS; 3304 setPredicate((Predicate)predicate); 3305 setName(Name); 3306 } 3307 3308 CmpInst * 3309 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, 3310 const Twine &Name, Instruction *InsertBefore) { 3311 if (Op == Instruction::ICmp) { 3312 if (InsertBefore) 3313 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate), 3314 S1, S2, Name); 3315 else 3316 return new ICmpInst(CmpInst::Predicate(predicate), 3317 S1, S2, Name); 3318 } 3319 3320 if (InsertBefore) 3321 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate), 3322 S1, S2, Name); 3323 else 3324 return new FCmpInst(CmpInst::Predicate(predicate), 3325 S1, S2, Name); 3326 } 3327 3328 CmpInst * 3329 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, 3330 const Twine &Name, BasicBlock *InsertAtEnd) { 3331 if (Op == Instruction::ICmp) { 3332 return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate), 3333 S1, S2, Name); 3334 } 3335 return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate), 3336 S1, S2, Name); 3337 } 3338 3339 void CmpInst::swapOperands() { 3340 if (ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3341 IC->swapOperands(); 3342 else 3343 cast<FCmpInst>(this)->swapOperands(); 3344 } 3345 3346 bool CmpInst::isCommutative() const { 3347 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3348 return IC->isCommutative(); 3349 return cast<FCmpInst>(this)->isCommutative(); 3350 } 3351 3352 bool CmpInst::isEquality() const { 3353 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3354 return IC->isEquality(); 3355 return cast<FCmpInst>(this)->isEquality(); 3356 } 3357 3358 3359 CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) { 3360 switch (pred) { 3361 default: llvm_unreachable("Unknown cmp predicate!"); 3362 case ICMP_EQ: return ICMP_NE; 3363 case ICMP_NE: return ICMP_EQ; 3364 case ICMP_UGT: return ICMP_ULE; 3365 case ICMP_ULT: return ICMP_UGE; 3366 case ICMP_UGE: return ICMP_ULT; 3367 case ICMP_ULE: return ICMP_UGT; 3368 case ICMP_SGT: return ICMP_SLE; 3369 case ICMP_SLT: return ICMP_SGE; 3370 case ICMP_SGE: return ICMP_SLT; 3371 case ICMP_SLE: return ICMP_SGT; 3372 3373 case FCMP_OEQ: return FCMP_UNE; 3374 case FCMP_ONE: return FCMP_UEQ; 3375 case FCMP_OGT: return FCMP_ULE; 3376 case FCMP_OLT: return FCMP_UGE; 3377 case FCMP_OGE: return FCMP_ULT; 3378 case FCMP_OLE: return FCMP_UGT; 3379 case FCMP_UEQ: return FCMP_ONE; 3380 case FCMP_UNE: return FCMP_OEQ; 3381 case FCMP_UGT: return FCMP_OLE; 3382 case FCMP_ULT: return FCMP_OGE; 3383 case FCMP_UGE: return FCMP_OLT; 3384 case FCMP_ULE: return FCMP_OGT; 3385 case FCMP_ORD: return FCMP_UNO; 3386 case FCMP_UNO: return FCMP_ORD; 3387 case FCMP_TRUE: return FCMP_FALSE; 3388 case FCMP_FALSE: return FCMP_TRUE; 3389 } 3390 } 3391 3392 void ICmpInst::anchor() {} 3393 3394 ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) { 3395 switch (pred) { 3396 default: llvm_unreachable("Unknown icmp predicate!"); 3397 case ICMP_EQ: case ICMP_NE: 3398 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE: 3399 return pred; 3400 case ICMP_UGT: return ICMP_SGT; 3401 case ICMP_ULT: return ICMP_SLT; 3402 case ICMP_UGE: return ICMP_SGE; 3403 case ICMP_ULE: return ICMP_SLE; 3404 } 3405 } 3406 3407 ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) { 3408 switch (pred) { 3409 default: llvm_unreachable("Unknown icmp predicate!"); 3410 case ICMP_EQ: case ICMP_NE: 3411 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE: 3412 return pred; 3413 case ICMP_SGT: return ICMP_UGT; 3414 case ICMP_SLT: return ICMP_ULT; 3415 case ICMP_SGE: return ICMP_UGE; 3416 case ICMP_SLE: return ICMP_ULE; 3417 } 3418 } 3419 3420 /// Initialize a set of values that all satisfy the condition with C. 3421 /// 3422 ConstantRange 3423 ICmpInst::makeConstantRange(Predicate pred, const APInt &C) { 3424 APInt Lower(C); 3425 APInt Upper(C); 3426 uint32_t BitWidth = C.getBitWidth(); 3427 switch (pred) { 3428 default: llvm_unreachable("Invalid ICmp opcode to ConstantRange ctor!"); 3429 case ICmpInst::ICMP_EQ: ++Upper; break; 3430 case ICmpInst::ICMP_NE: ++Lower; break; 3431 case ICmpInst::ICMP_ULT: 3432 Lower = APInt::getMinValue(BitWidth); 3433 // Check for an empty-set condition. 3434 if (Lower == Upper) 3435 return ConstantRange(BitWidth, /*isFullSet=*/false); 3436 break; 3437 case ICmpInst::ICMP_SLT: 3438 Lower = APInt::getSignedMinValue(BitWidth); 3439 // Check for an empty-set condition. 3440 if (Lower == Upper) 3441 return ConstantRange(BitWidth, /*isFullSet=*/false); 3442 break; 3443 case ICmpInst::ICMP_UGT: 3444 ++Lower; Upper = APInt::getMinValue(BitWidth); // Min = Next(Max) 3445 // Check for an empty-set condition. 3446 if (Lower == Upper) 3447 return ConstantRange(BitWidth, /*isFullSet=*/false); 3448 break; 3449 case ICmpInst::ICMP_SGT: 3450 ++Lower; Upper = APInt::getSignedMinValue(BitWidth); // Min = Next(Max) 3451 // Check for an empty-set condition. 3452 if (Lower == Upper) 3453 return ConstantRange(BitWidth, /*isFullSet=*/false); 3454 break; 3455 case ICmpInst::ICMP_ULE: 3456 Lower = APInt::getMinValue(BitWidth); ++Upper; 3457 // Check for a full-set condition. 3458 if (Lower == Upper) 3459 return ConstantRange(BitWidth, /*isFullSet=*/true); 3460 break; 3461 case ICmpInst::ICMP_SLE: 3462 Lower = APInt::getSignedMinValue(BitWidth); ++Upper; 3463 // Check for a full-set condition. 3464 if (Lower == Upper) 3465 return ConstantRange(BitWidth, /*isFullSet=*/true); 3466 break; 3467 case ICmpInst::ICMP_UGE: 3468 Upper = APInt::getMinValue(BitWidth); // Min = Next(Max) 3469 // Check for a full-set condition. 3470 if (Lower == Upper) 3471 return ConstantRange(BitWidth, /*isFullSet=*/true); 3472 break; 3473 case ICmpInst::ICMP_SGE: 3474 Upper = APInt::getSignedMinValue(BitWidth); // Min = Next(Max) 3475 // Check for a full-set condition. 3476 if (Lower == Upper) 3477 return ConstantRange(BitWidth, /*isFullSet=*/true); 3478 break; 3479 } 3480 return ConstantRange(Lower, Upper); 3481 } 3482 3483 CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) { 3484 switch (pred) { 3485 default: llvm_unreachable("Unknown cmp predicate!"); 3486 case ICMP_EQ: case ICMP_NE: 3487 return pred; 3488 case ICMP_SGT: return ICMP_SLT; 3489 case ICMP_SLT: return ICMP_SGT; 3490 case ICMP_SGE: return ICMP_SLE; 3491 case ICMP_SLE: return ICMP_SGE; 3492 case ICMP_UGT: return ICMP_ULT; 3493 case ICMP_ULT: return ICMP_UGT; 3494 case ICMP_UGE: return ICMP_ULE; 3495 case ICMP_ULE: return ICMP_UGE; 3496 3497 case FCMP_FALSE: case FCMP_TRUE: 3498 case FCMP_OEQ: case FCMP_ONE: 3499 case FCMP_UEQ: case FCMP_UNE: 3500 case FCMP_ORD: case FCMP_UNO: 3501 return pred; 3502 case FCMP_OGT: return FCMP_OLT; 3503 case FCMP_OLT: return FCMP_OGT; 3504 case FCMP_OGE: return FCMP_OLE; 3505 case FCMP_OLE: return FCMP_OGE; 3506 case FCMP_UGT: return FCMP_ULT; 3507 case FCMP_ULT: return FCMP_UGT; 3508 case FCMP_UGE: return FCMP_ULE; 3509 case FCMP_ULE: return FCMP_UGE; 3510 } 3511 } 3512 3513 CmpInst::Predicate CmpInst::getSignedPredicate(Predicate pred) { 3514 assert(CmpInst::isUnsigned(pred) && "Call only with signed predicates!"); 3515 3516 switch (pred) { 3517 default: 3518 llvm_unreachable("Unknown predicate!"); 3519 case CmpInst::ICMP_ULT: 3520 return CmpInst::ICMP_SLT; 3521 case CmpInst::ICMP_ULE: 3522 return CmpInst::ICMP_SLE; 3523 case CmpInst::ICMP_UGT: 3524 return CmpInst::ICMP_SGT; 3525 case CmpInst::ICMP_UGE: 3526 return CmpInst::ICMP_SGE; 3527 } 3528 } 3529 3530 bool CmpInst::isUnsigned(Predicate predicate) { 3531 switch (predicate) { 3532 default: return false; 3533 case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT: 3534 case ICmpInst::ICMP_UGE: return true; 3535 } 3536 } 3537 3538 bool CmpInst::isSigned(Predicate predicate) { 3539 switch (predicate) { 3540 default: return false; 3541 case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT: 3542 case ICmpInst::ICMP_SGE: return true; 3543 } 3544 } 3545 3546 bool CmpInst::isOrdered(Predicate predicate) { 3547 switch (predicate) { 3548 default: return false; 3549 case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT: 3550 case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE: 3551 case FCmpInst::FCMP_ORD: return true; 3552 } 3553 } 3554 3555 bool CmpInst::isUnordered(Predicate predicate) { 3556 switch (predicate) { 3557 default: return false; 3558 case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT: 3559 case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE: 3560 case FCmpInst::FCMP_UNO: return true; 3561 } 3562 } 3563 3564 bool CmpInst::isTrueWhenEqual(Predicate predicate) { 3565 switch(predicate) { 3566 default: return false; 3567 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE: 3568 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true; 3569 } 3570 } 3571 3572 bool CmpInst::isFalseWhenEqual(Predicate predicate) { 3573 switch(predicate) { 3574 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT: 3575 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true; 3576 default: return false; 3577 } 3578 } 3579 3580 3581 //===----------------------------------------------------------------------===// 3582 // SwitchInst Implementation 3583 //===----------------------------------------------------------------------===// 3584 3585 void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) { 3586 assert(Value && Default && NumReserved); 3587 ReservedSpace = NumReserved; 3588 setNumHungOffUseOperands(2); 3589 allocHungoffUses(ReservedSpace); 3590 3591 Op<0>() = Value; 3592 Op<1>() = Default; 3593 } 3594 3595 /// SwitchInst ctor - Create a new switch instruction, specifying a value to 3596 /// switch on and a default destination. The number of additional cases can 3597 /// be specified here to make memory allocation more efficient. This 3598 /// constructor can also autoinsert before another instruction. 3599 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3600 Instruction *InsertBefore) 3601 : TerminatorInst(Type::getVoidTy(Value->getContext()), Instruction::Switch, 3602 nullptr, 0, InsertBefore) { 3603 init(Value, Default, 2+NumCases*2); 3604 } 3605 3606 /// SwitchInst ctor - Create a new switch instruction, specifying a value to 3607 /// switch on and a default destination. The number of additional cases can 3608 /// be specified here to make memory allocation more efficient. This 3609 /// constructor also autoinserts at the end of the specified BasicBlock. 3610 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3611 BasicBlock *InsertAtEnd) 3612 : TerminatorInst(Type::getVoidTy(Value->getContext()), Instruction::Switch, 3613 nullptr, 0, InsertAtEnd) { 3614 init(Value, Default, 2+NumCases*2); 3615 } 3616 3617 SwitchInst::SwitchInst(const SwitchInst &SI) 3618 : TerminatorInst(SI.getType(), Instruction::Switch, nullptr, 0) { 3619 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands()); 3620 setNumHungOffUseOperands(SI.getNumOperands()); 3621 Use *OL = getOperandList(); 3622 const Use *InOL = SI.getOperandList(); 3623 for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) { 3624 OL[i] = InOL[i]; 3625 OL[i+1] = InOL[i+1]; 3626 } 3627 SubclassOptionalData = SI.SubclassOptionalData; 3628 } 3629 3630 3631 /// addCase - Add an entry to the switch instruction... 3632 /// 3633 void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) { 3634 unsigned NewCaseIdx = getNumCases(); 3635 unsigned OpNo = getNumOperands(); 3636 if (OpNo+2 > ReservedSpace) 3637 growOperands(); // Get more space! 3638 // Initialize some new operands. 3639 assert(OpNo+1 < ReservedSpace && "Growing didn't work!"); 3640 setNumHungOffUseOperands(OpNo+2); 3641 CaseIt Case(this, NewCaseIdx); 3642 Case.setValue(OnVal); 3643 Case.setSuccessor(Dest); 3644 } 3645 3646 /// removeCase - This method removes the specified case and its successor 3647 /// from the switch instruction. 3648 void SwitchInst::removeCase(CaseIt i) { 3649 unsigned idx = i.getCaseIndex(); 3650 3651 assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!"); 3652 3653 unsigned NumOps = getNumOperands(); 3654 Use *OL = getOperandList(); 3655 3656 // Overwrite this case with the end of the list. 3657 if (2 + (idx + 1) * 2 != NumOps) { 3658 OL[2 + idx * 2] = OL[NumOps - 2]; 3659 OL[2 + idx * 2 + 1] = OL[NumOps - 1]; 3660 } 3661 3662 // Nuke the last value. 3663 OL[NumOps-2].set(nullptr); 3664 OL[NumOps-2+1].set(nullptr); 3665 setNumHungOffUseOperands(NumOps-2); 3666 } 3667 3668 /// growOperands - grow operands - This grows the operand list in response 3669 /// to a push_back style of operation. This grows the number of ops by 3 times. 3670 /// 3671 void SwitchInst::growOperands() { 3672 unsigned e = getNumOperands(); 3673 unsigned NumOps = e*3; 3674 3675 ReservedSpace = NumOps; 3676 growHungoffUses(ReservedSpace); 3677 } 3678 3679 3680 BasicBlock *SwitchInst::getSuccessorV(unsigned idx) const { 3681 return getSuccessor(idx); 3682 } 3683 unsigned SwitchInst::getNumSuccessorsV() const { 3684 return getNumSuccessors(); 3685 } 3686 void SwitchInst::setSuccessorV(unsigned idx, BasicBlock *B) { 3687 setSuccessor(idx, B); 3688 } 3689 3690 //===----------------------------------------------------------------------===// 3691 // IndirectBrInst Implementation 3692 //===----------------------------------------------------------------------===// 3693 3694 void IndirectBrInst::init(Value *Address, unsigned NumDests) { 3695 assert(Address && Address->getType()->isPointerTy() && 3696 "Address of indirectbr must be a pointer"); 3697 ReservedSpace = 1+NumDests; 3698 setNumHungOffUseOperands(1); 3699 allocHungoffUses(ReservedSpace); 3700 3701 Op<0>() = Address; 3702 } 3703 3704 3705 /// growOperands - grow operands - This grows the operand list in response 3706 /// to a push_back style of operation. This grows the number of ops by 2 times. 3707 /// 3708 void IndirectBrInst::growOperands() { 3709 unsigned e = getNumOperands(); 3710 unsigned NumOps = e*2; 3711 3712 ReservedSpace = NumOps; 3713 growHungoffUses(ReservedSpace); 3714 } 3715 3716 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases, 3717 Instruction *InsertBefore) 3718 : TerminatorInst(Type::getVoidTy(Address->getContext()),Instruction::IndirectBr, 3719 nullptr, 0, InsertBefore) { 3720 init(Address, NumCases); 3721 } 3722 3723 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases, 3724 BasicBlock *InsertAtEnd) 3725 : TerminatorInst(Type::getVoidTy(Address->getContext()),Instruction::IndirectBr, 3726 nullptr, 0, InsertAtEnd) { 3727 init(Address, NumCases); 3728 } 3729 3730 IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI) 3731 : TerminatorInst(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr, 3732 nullptr, IBI.getNumOperands()) { 3733 allocHungoffUses(IBI.getNumOperands()); 3734 Use *OL = getOperandList(); 3735 const Use *InOL = IBI.getOperandList(); 3736 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i) 3737 OL[i] = InOL[i]; 3738 SubclassOptionalData = IBI.SubclassOptionalData; 3739 } 3740 3741 /// addDestination - Add a destination. 3742 /// 3743 void IndirectBrInst::addDestination(BasicBlock *DestBB) { 3744 unsigned OpNo = getNumOperands(); 3745 if (OpNo+1 > ReservedSpace) 3746 growOperands(); // Get more space! 3747 // Initialize some new operands. 3748 assert(OpNo < ReservedSpace && "Growing didn't work!"); 3749 setNumHungOffUseOperands(OpNo+1); 3750 getOperandList()[OpNo] = DestBB; 3751 } 3752 3753 /// removeDestination - This method removes the specified successor from the 3754 /// indirectbr instruction. 3755 void IndirectBrInst::removeDestination(unsigned idx) { 3756 assert(idx < getNumOperands()-1 && "Successor index out of range!"); 3757 3758 unsigned NumOps = getNumOperands(); 3759 Use *OL = getOperandList(); 3760 3761 // Replace this value with the last one. 3762 OL[idx+1] = OL[NumOps-1]; 3763 3764 // Nuke the last value. 3765 OL[NumOps-1].set(nullptr); 3766 setNumHungOffUseOperands(NumOps-1); 3767 } 3768 3769 BasicBlock *IndirectBrInst::getSuccessorV(unsigned idx) const { 3770 return getSuccessor(idx); 3771 } 3772 unsigned IndirectBrInst::getNumSuccessorsV() const { 3773 return getNumSuccessors(); 3774 } 3775 void IndirectBrInst::setSuccessorV(unsigned idx, BasicBlock *B) { 3776 setSuccessor(idx, B); 3777 } 3778 3779 //===----------------------------------------------------------------------===// 3780 // cloneImpl() implementations 3781 //===----------------------------------------------------------------------===// 3782 3783 // Define these methods here so vtables don't get emitted into every translation 3784 // unit that uses these classes. 3785 3786 GetElementPtrInst *GetElementPtrInst::cloneImpl() const { 3787 return new (getNumOperands()) GetElementPtrInst(*this); 3788 } 3789 3790 BinaryOperator *BinaryOperator::cloneImpl() const { 3791 return Create(getOpcode(), Op<0>(), Op<1>()); 3792 } 3793 3794 FCmpInst *FCmpInst::cloneImpl() const { 3795 return new FCmpInst(getPredicate(), Op<0>(), Op<1>()); 3796 } 3797 3798 ICmpInst *ICmpInst::cloneImpl() const { 3799 return new ICmpInst(getPredicate(), Op<0>(), Op<1>()); 3800 } 3801 3802 ExtractValueInst *ExtractValueInst::cloneImpl() const { 3803 return new ExtractValueInst(*this); 3804 } 3805 3806 InsertValueInst *InsertValueInst::cloneImpl() const { 3807 return new InsertValueInst(*this); 3808 } 3809 3810 AllocaInst *AllocaInst::cloneImpl() const { 3811 AllocaInst *Result = new AllocaInst(getAllocatedType(), 3812 (Value *)getOperand(0), getAlignment()); 3813 Result->setUsedWithInAlloca(isUsedWithInAlloca()); 3814 return Result; 3815 } 3816 3817 LoadInst *LoadInst::cloneImpl() const { 3818 return new LoadInst(getOperand(0), Twine(), isVolatile(), 3819 getAlignment(), getOrdering(), getSynchScope()); 3820 } 3821 3822 StoreInst *StoreInst::cloneImpl() const { 3823 return new StoreInst(getOperand(0), getOperand(1), isVolatile(), 3824 getAlignment(), getOrdering(), getSynchScope()); 3825 3826 } 3827 3828 AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const { 3829 AtomicCmpXchgInst *Result = 3830 new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2), 3831 getSuccessOrdering(), getFailureOrdering(), 3832 getSynchScope()); 3833 Result->setVolatile(isVolatile()); 3834 Result->setWeak(isWeak()); 3835 return Result; 3836 } 3837 3838 AtomicRMWInst *AtomicRMWInst::cloneImpl() const { 3839 AtomicRMWInst *Result = 3840 new AtomicRMWInst(getOperation(),getOperand(0), getOperand(1), 3841 getOrdering(), getSynchScope()); 3842 Result->setVolatile(isVolatile()); 3843 return Result; 3844 } 3845 3846 FenceInst *FenceInst::cloneImpl() const { 3847 return new FenceInst(getContext(), getOrdering(), getSynchScope()); 3848 } 3849 3850 TruncInst *TruncInst::cloneImpl() const { 3851 return new TruncInst(getOperand(0), getType()); 3852 } 3853 3854 ZExtInst *ZExtInst::cloneImpl() const { 3855 return new ZExtInst(getOperand(0), getType()); 3856 } 3857 3858 SExtInst *SExtInst::cloneImpl() const { 3859 return new SExtInst(getOperand(0), getType()); 3860 } 3861 3862 FPTruncInst *FPTruncInst::cloneImpl() const { 3863 return new FPTruncInst(getOperand(0), getType()); 3864 } 3865 3866 FPExtInst *FPExtInst::cloneImpl() const { 3867 return new FPExtInst(getOperand(0), getType()); 3868 } 3869 3870 UIToFPInst *UIToFPInst::cloneImpl() const { 3871 return new UIToFPInst(getOperand(0), getType()); 3872 } 3873 3874 SIToFPInst *SIToFPInst::cloneImpl() const { 3875 return new SIToFPInst(getOperand(0), getType()); 3876 } 3877 3878 FPToUIInst *FPToUIInst::cloneImpl() const { 3879 return new FPToUIInst(getOperand(0), getType()); 3880 } 3881 3882 FPToSIInst *FPToSIInst::cloneImpl() const { 3883 return new FPToSIInst(getOperand(0), getType()); 3884 } 3885 3886 PtrToIntInst *PtrToIntInst::cloneImpl() const { 3887 return new PtrToIntInst(getOperand(0), getType()); 3888 } 3889 3890 IntToPtrInst *IntToPtrInst::cloneImpl() const { 3891 return new IntToPtrInst(getOperand(0), getType()); 3892 } 3893 3894 BitCastInst *BitCastInst::cloneImpl() const { 3895 return new BitCastInst(getOperand(0), getType()); 3896 } 3897 3898 AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const { 3899 return new AddrSpaceCastInst(getOperand(0), getType()); 3900 } 3901 3902 CallInst *CallInst::cloneImpl() const { 3903 if (hasOperandBundles()) { 3904 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 3905 return new(getNumOperands(), DescriptorBytes) CallInst(*this); 3906 } 3907 return new(getNumOperands()) CallInst(*this); 3908 } 3909 3910 SelectInst *SelectInst::cloneImpl() const { 3911 return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2)); 3912 } 3913 3914 VAArgInst *VAArgInst::cloneImpl() const { 3915 return new VAArgInst(getOperand(0), getType()); 3916 } 3917 3918 ExtractElementInst *ExtractElementInst::cloneImpl() const { 3919 return ExtractElementInst::Create(getOperand(0), getOperand(1)); 3920 } 3921 3922 InsertElementInst *InsertElementInst::cloneImpl() const { 3923 return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2)); 3924 } 3925 3926 ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const { 3927 return new ShuffleVectorInst(getOperand(0), getOperand(1), getOperand(2)); 3928 } 3929 3930 PHINode *PHINode::cloneImpl() const { return new PHINode(*this); } 3931 3932 LandingPadInst *LandingPadInst::cloneImpl() const { 3933 return new LandingPadInst(*this); 3934 } 3935 3936 ReturnInst *ReturnInst::cloneImpl() const { 3937 return new(getNumOperands()) ReturnInst(*this); 3938 } 3939 3940 BranchInst *BranchInst::cloneImpl() const { 3941 return new(getNumOperands()) BranchInst(*this); 3942 } 3943 3944 SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); } 3945 3946 IndirectBrInst *IndirectBrInst::cloneImpl() const { 3947 return new IndirectBrInst(*this); 3948 } 3949 3950 InvokeInst *InvokeInst::cloneImpl() const { 3951 if (hasOperandBundles()) { 3952 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 3953 return new(getNumOperands(), DescriptorBytes) InvokeInst(*this); 3954 } 3955 return new(getNumOperands()) InvokeInst(*this); 3956 } 3957 3958 ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); } 3959 3960 CleanupReturnInst *CleanupReturnInst::cloneImpl() const { 3961 return new (getNumOperands()) CleanupReturnInst(*this); 3962 } 3963 3964 CatchReturnInst *CatchReturnInst::cloneImpl() const { 3965 return new (getNumOperands()) CatchReturnInst(*this); 3966 } 3967 3968 CatchSwitchInst *CatchSwitchInst::cloneImpl() const { 3969 return new CatchSwitchInst(*this); 3970 } 3971 3972 FuncletPadInst *FuncletPadInst::cloneImpl() const { 3973 return new (getNumOperands()) FuncletPadInst(*this); 3974 } 3975 3976 UnreachableInst *UnreachableInst::cloneImpl() const { 3977 LLVMContext &Context = getContext(); 3978 return new UnreachableInst(Context); 3979 } 3980