1 //===- Instructions.cpp - Implement the LLVM instructions -----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements all of the non-inline methods for the LLVM instruction 11 // classes. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/IR/Instructions.h" 16 #include "LLVMContextImpl.h" 17 #include "llvm/ADT/None.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/Twine.h" 20 #include "llvm/IR/Attributes.h" 21 #include "llvm/IR/BasicBlock.h" 22 #include "llvm/IR/CallSite.h" 23 #include "llvm/IR/Constant.h" 24 #include "llvm/IR/Constants.h" 25 #include "llvm/IR/DataLayout.h" 26 #include "llvm/IR/DerivedTypes.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/IR/InstrTypes.h" 29 #include "llvm/IR/Instruction.h" 30 #include "llvm/IR/LLVMContext.h" 31 #include "llvm/IR/Metadata.h" 32 #include "llvm/IR/Module.h" 33 #include "llvm/IR/Operator.h" 34 #include "llvm/IR/Type.h" 35 #include "llvm/IR/Value.h" 36 #include "llvm/Support/AtomicOrdering.h" 37 #include "llvm/Support/Casting.h" 38 #include "llvm/Support/ErrorHandling.h" 39 #include "llvm/Support/MathExtras.h" 40 #include <algorithm> 41 #include <cassert> 42 #include <cstdint> 43 #include <vector> 44 45 using namespace llvm; 46 47 //===----------------------------------------------------------------------===// 48 // AllocaInst Class 49 //===----------------------------------------------------------------------===// 50 51 Optional<uint64_t> 52 AllocaInst::getAllocationSizeInBits(const DataLayout &DL) const { 53 uint64_t Size = DL.getTypeAllocSizeInBits(getAllocatedType()); 54 if (isArrayAllocation()) { 55 auto C = dyn_cast<ConstantInt>(getArraySize()); 56 if (!C) 57 return None; 58 Size *= C->getZExtValue(); 59 } 60 return Size; 61 } 62 63 //===----------------------------------------------------------------------===// 64 // CallSite Class 65 //===----------------------------------------------------------------------===// 66 67 User::op_iterator CallSite::getCallee() const { 68 Instruction *II(getInstruction()); 69 return isCall() 70 ? cast<CallInst>(II)->op_end() - 1 // Skip Callee 71 : cast<InvokeInst>(II)->op_end() - 3; // Skip BB, BB, Callee 72 } 73 74 //===----------------------------------------------------------------------===// 75 // SelectInst Class 76 //===----------------------------------------------------------------------===// 77 78 /// areInvalidOperands - Return a string if the specified operands are invalid 79 /// for a select operation, otherwise return null. 80 const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) { 81 if (Op1->getType() != Op2->getType()) 82 return "both values to select must have same type"; 83 84 if (Op1->getType()->isTokenTy()) 85 return "select values cannot have token type"; 86 87 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) { 88 // Vector select. 89 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext())) 90 return "vector select condition element type must be i1"; 91 VectorType *ET = dyn_cast<VectorType>(Op1->getType()); 92 if (!ET) 93 return "selected values for vector select must be vectors"; 94 if (ET->getNumElements() != VT->getNumElements()) 95 return "vector select requires selected vectors to have " 96 "the same vector length as select condition"; 97 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) { 98 return "select condition must be i1 or <n x i1>"; 99 } 100 return nullptr; 101 } 102 103 //===----------------------------------------------------------------------===// 104 // PHINode Class 105 //===----------------------------------------------------------------------===// 106 107 PHINode::PHINode(const PHINode &PN) 108 : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()), 109 ReservedSpace(PN.getNumOperands()) { 110 allocHungoffUses(PN.getNumOperands()); 111 std::copy(PN.op_begin(), PN.op_end(), op_begin()); 112 std::copy(PN.block_begin(), PN.block_end(), block_begin()); 113 SubclassOptionalData = PN.SubclassOptionalData; 114 } 115 116 // removeIncomingValue - Remove an incoming value. This is useful if a 117 // predecessor basic block is deleted. 118 Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) { 119 Value *Removed = getIncomingValue(Idx); 120 121 // Move everything after this operand down. 122 // 123 // FIXME: we could just swap with the end of the list, then erase. However, 124 // clients might not expect this to happen. The code as it is thrashes the 125 // use/def lists, which is kinda lame. 126 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx); 127 std::copy(block_begin() + Idx + 1, block_end(), block_begin() + Idx); 128 129 // Nuke the last value. 130 Op<-1>().set(nullptr); 131 setNumHungOffUseOperands(getNumOperands() - 1); 132 133 // If the PHI node is dead, because it has zero entries, nuke it now. 134 if (getNumOperands() == 0 && DeletePHIIfEmpty) { 135 // If anyone is using this PHI, make them use a dummy value instead... 136 replaceAllUsesWith(UndefValue::get(getType())); 137 eraseFromParent(); 138 } 139 return Removed; 140 } 141 142 /// growOperands - grow operands - This grows the operand list in response 143 /// to a push_back style of operation. This grows the number of ops by 1.5 144 /// times. 145 /// 146 void PHINode::growOperands() { 147 unsigned e = getNumOperands(); 148 unsigned NumOps = e + e / 2; 149 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common. 150 151 ReservedSpace = NumOps; 152 growHungoffUses(ReservedSpace, /* IsPhi */ true); 153 } 154 155 /// hasConstantValue - If the specified PHI node always merges together the same 156 /// value, return the value, otherwise return null. 157 Value *PHINode::hasConstantValue() const { 158 // Exploit the fact that phi nodes always have at least one entry. 159 Value *ConstantValue = getIncomingValue(0); 160 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i) 161 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) { 162 if (ConstantValue != this) 163 return nullptr; // Incoming values not all the same. 164 // The case where the first value is this PHI. 165 ConstantValue = getIncomingValue(i); 166 } 167 if (ConstantValue == this) 168 return UndefValue::get(getType()); 169 return ConstantValue; 170 } 171 172 /// hasConstantOrUndefValue - Whether the specified PHI node always merges 173 /// together the same value, assuming that undefs result in the same value as 174 /// non-undefs. 175 /// Unlike \ref hasConstantValue, this does not return a value because the 176 /// unique non-undef incoming value need not dominate the PHI node. 177 bool PHINode::hasConstantOrUndefValue() const { 178 Value *ConstantValue = nullptr; 179 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) { 180 Value *Incoming = getIncomingValue(i); 181 if (Incoming != this && !isa<UndefValue>(Incoming)) { 182 if (ConstantValue && ConstantValue != Incoming) 183 return false; 184 ConstantValue = Incoming; 185 } 186 } 187 return true; 188 } 189 190 //===----------------------------------------------------------------------===// 191 // LandingPadInst Implementation 192 //===----------------------------------------------------------------------===// 193 194 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues, 195 const Twine &NameStr, Instruction *InsertBefore) 196 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) { 197 init(NumReservedValues, NameStr); 198 } 199 200 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues, 201 const Twine &NameStr, BasicBlock *InsertAtEnd) 202 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) { 203 init(NumReservedValues, NameStr); 204 } 205 206 LandingPadInst::LandingPadInst(const LandingPadInst &LP) 207 : Instruction(LP.getType(), Instruction::LandingPad, nullptr, 208 LP.getNumOperands()), 209 ReservedSpace(LP.getNumOperands()) { 210 allocHungoffUses(LP.getNumOperands()); 211 Use *OL = getOperandList(); 212 const Use *InOL = LP.getOperandList(); 213 for (unsigned I = 0, E = ReservedSpace; I != E; ++I) 214 OL[I] = InOL[I]; 215 216 setCleanup(LP.isCleanup()); 217 } 218 219 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses, 220 const Twine &NameStr, 221 Instruction *InsertBefore) { 222 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore); 223 } 224 225 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses, 226 const Twine &NameStr, 227 BasicBlock *InsertAtEnd) { 228 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd); 229 } 230 231 void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) { 232 ReservedSpace = NumReservedValues; 233 setNumHungOffUseOperands(0); 234 allocHungoffUses(ReservedSpace); 235 setName(NameStr); 236 setCleanup(false); 237 } 238 239 /// growOperands - grow operands - This grows the operand list in response to a 240 /// push_back style of operation. This grows the number of ops by 2 times. 241 void LandingPadInst::growOperands(unsigned Size) { 242 unsigned e = getNumOperands(); 243 if (ReservedSpace >= e + Size) return; 244 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2; 245 growHungoffUses(ReservedSpace); 246 } 247 248 void LandingPadInst::addClause(Constant *Val) { 249 unsigned OpNo = getNumOperands(); 250 growOperands(1); 251 assert(OpNo < ReservedSpace && "Growing didn't work!"); 252 setNumHungOffUseOperands(getNumOperands() + 1); 253 getOperandList()[OpNo] = Val; 254 } 255 256 //===----------------------------------------------------------------------===// 257 // CallInst Implementation 258 //===----------------------------------------------------------------------===// 259 260 void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, 261 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) { 262 this->FTy = FTy; 263 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 && 264 "NumOperands not set up?"); 265 Op<-1>() = Func; 266 267 #ifndef NDEBUG 268 assert((Args.size() == FTy->getNumParams() || 269 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 270 "Calling a function with bad signature!"); 271 272 for (unsigned i = 0; i != Args.size(); ++i) 273 assert((i >= FTy->getNumParams() || 274 FTy->getParamType(i) == Args[i]->getType()) && 275 "Calling a function with a bad signature!"); 276 #endif 277 278 std::copy(Args.begin(), Args.end(), op_begin()); 279 280 auto It = populateBundleOperandInfos(Bundles, Args.size()); 281 (void)It; 282 assert(It + 1 == op_end() && "Should add up!"); 283 284 setName(NameStr); 285 } 286 287 void CallInst::init(Value *Func, const Twine &NameStr) { 288 FTy = 289 cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType()); 290 assert(getNumOperands() == 1 && "NumOperands not set up?"); 291 Op<-1>() = Func; 292 293 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature"); 294 295 setName(NameStr); 296 } 297 298 CallInst::CallInst(Value *Func, const Twine &Name, Instruction *InsertBefore) 299 : CallBase<CallInst>( 300 cast<FunctionType>( 301 cast<PointerType>(Func->getType())->getElementType()) 302 ->getReturnType(), 303 Instruction::Call, 304 OperandTraits<CallBase<CallInst>>::op_end(this) - 1, 1, 305 InsertBefore) { 306 init(Func, Name); 307 } 308 309 CallInst::CallInst(Value *Func, const Twine &Name, BasicBlock *InsertAtEnd) 310 : CallBase<CallInst>( 311 cast<FunctionType>( 312 cast<PointerType>(Func->getType())->getElementType()) 313 ->getReturnType(), 314 Instruction::Call, 315 OperandTraits<CallBase<CallInst>>::op_end(this) - 1, 1, InsertAtEnd) { 316 init(Func, Name); 317 } 318 319 CallInst::CallInst(const CallInst &CI) 320 : CallBase<CallInst>(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call, 321 OperandTraits<CallBase<CallInst>>::op_end(this) - 322 CI.getNumOperands(), 323 CI.getNumOperands()) { 324 setTailCallKind(CI.getTailCallKind()); 325 setCallingConv(CI.getCallingConv()); 326 327 std::copy(CI.op_begin(), CI.op_end(), op_begin()); 328 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(), 329 bundle_op_info_begin()); 330 SubclassOptionalData = CI.SubclassOptionalData; 331 } 332 333 CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB, 334 Instruction *InsertPt) { 335 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end()); 336 337 auto *NewCI = CallInst::Create(CI->getCalledValue(), Args, OpB, CI->getName(), 338 InsertPt); 339 NewCI->setTailCallKind(CI->getTailCallKind()); 340 NewCI->setCallingConv(CI->getCallingConv()); 341 NewCI->SubclassOptionalData = CI->SubclassOptionalData; 342 NewCI->setAttributes(CI->getAttributes()); 343 NewCI->setDebugLoc(CI->getDebugLoc()); 344 return NewCI; 345 } 346 347 348 349 350 351 352 353 354 355 356 /// IsConstantOne - Return true only if val is constant int 1 357 static bool IsConstantOne(Value *val) { 358 assert(val && "IsConstantOne does not work with nullptr val"); 359 const ConstantInt *CVal = dyn_cast<ConstantInt>(val); 360 return CVal && CVal->isOne(); 361 } 362 363 static Instruction *createMalloc(Instruction *InsertBefore, 364 BasicBlock *InsertAtEnd, Type *IntPtrTy, 365 Type *AllocTy, Value *AllocSize, 366 Value *ArraySize, 367 ArrayRef<OperandBundleDef> OpB, 368 Function *MallocF, const Twine &Name) { 369 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) && 370 "createMalloc needs either InsertBefore or InsertAtEnd"); 371 372 // malloc(type) becomes: 373 // bitcast (i8* malloc(typeSize)) to type* 374 // malloc(type, arraySize) becomes: 375 // bitcast (i8* malloc(typeSize*arraySize)) to type* 376 if (!ArraySize) 377 ArraySize = ConstantInt::get(IntPtrTy, 1); 378 else if (ArraySize->getType() != IntPtrTy) { 379 if (InsertBefore) 380 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false, 381 "", InsertBefore); 382 else 383 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false, 384 "", InsertAtEnd); 385 } 386 387 if (!IsConstantOne(ArraySize)) { 388 if (IsConstantOne(AllocSize)) { 389 AllocSize = ArraySize; // Operand * 1 = Operand 390 } else if (Constant *CO = dyn_cast<Constant>(ArraySize)) { 391 Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy, 392 false /*ZExt*/); 393 // Malloc arg is constant product of type size and array size 394 AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize)); 395 } else { 396 // Multiply type size by the array size... 397 if (InsertBefore) 398 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize, 399 "mallocsize", InsertBefore); 400 else 401 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize, 402 "mallocsize", InsertAtEnd); 403 } 404 } 405 406 assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size"); 407 // Create the call to Malloc. 408 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd; 409 Module *M = BB->getParent()->getParent(); 410 Type *BPTy = Type::getInt8PtrTy(BB->getContext()); 411 Value *MallocFunc = MallocF; 412 if (!MallocFunc) 413 // prototype malloc as "void *malloc(size_t)" 414 MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy); 415 PointerType *AllocPtrType = PointerType::getUnqual(AllocTy); 416 CallInst *MCall = nullptr; 417 Instruction *Result = nullptr; 418 if (InsertBefore) { 419 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall", 420 InsertBefore); 421 Result = MCall; 422 if (Result->getType() != AllocPtrType) 423 // Create a cast instruction to convert to the right type... 424 Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore); 425 } else { 426 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall"); 427 Result = MCall; 428 if (Result->getType() != AllocPtrType) { 429 InsertAtEnd->getInstList().push_back(MCall); 430 // Create a cast instruction to convert to the right type... 431 Result = new BitCastInst(MCall, AllocPtrType, Name); 432 } 433 } 434 MCall->setTailCall(); 435 if (Function *F = dyn_cast<Function>(MallocFunc)) { 436 MCall->setCallingConv(F->getCallingConv()); 437 if (!F->returnDoesNotAlias()) 438 F->setReturnDoesNotAlias(); 439 } 440 assert(!MCall->getType()->isVoidTy() && "Malloc has void return type"); 441 442 return Result; 443 } 444 445 /// CreateMalloc - Generate the IR for a call to malloc: 446 /// 1. Compute the malloc call's argument as the specified type's size, 447 /// possibly multiplied by the array size if the array size is not 448 /// constant 1. 449 /// 2. Call malloc with that argument. 450 /// 3. Bitcast the result of the malloc call to the specified type. 451 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore, 452 Type *IntPtrTy, Type *AllocTy, 453 Value *AllocSize, Value *ArraySize, 454 Function *MallocF, 455 const Twine &Name) { 456 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize, 457 ArraySize, None, MallocF, Name); 458 } 459 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore, 460 Type *IntPtrTy, Type *AllocTy, 461 Value *AllocSize, Value *ArraySize, 462 ArrayRef<OperandBundleDef> OpB, 463 Function *MallocF, 464 const Twine &Name) { 465 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize, 466 ArraySize, OpB, MallocF, Name); 467 } 468 469 /// CreateMalloc - Generate the IR for a call to malloc: 470 /// 1. Compute the malloc call's argument as the specified type's size, 471 /// possibly multiplied by the array size if the array size is not 472 /// constant 1. 473 /// 2. Call malloc with that argument. 474 /// 3. Bitcast the result of the malloc call to the specified type. 475 /// Note: This function does not add the bitcast to the basic block, that is the 476 /// responsibility of the caller. 477 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd, 478 Type *IntPtrTy, Type *AllocTy, 479 Value *AllocSize, Value *ArraySize, 480 Function *MallocF, const Twine &Name) { 481 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize, 482 ArraySize, None, MallocF, Name); 483 } 484 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd, 485 Type *IntPtrTy, Type *AllocTy, 486 Value *AllocSize, Value *ArraySize, 487 ArrayRef<OperandBundleDef> OpB, 488 Function *MallocF, const Twine &Name) { 489 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize, 490 ArraySize, OpB, MallocF, Name); 491 } 492 493 static Instruction *createFree(Value *Source, 494 ArrayRef<OperandBundleDef> Bundles, 495 Instruction *InsertBefore, 496 BasicBlock *InsertAtEnd) { 497 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) && 498 "createFree needs either InsertBefore or InsertAtEnd"); 499 assert(Source->getType()->isPointerTy() && 500 "Can not free something of nonpointer type!"); 501 502 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd; 503 Module *M = BB->getParent()->getParent(); 504 505 Type *VoidTy = Type::getVoidTy(M->getContext()); 506 Type *IntPtrTy = Type::getInt8PtrTy(M->getContext()); 507 // prototype free as "void free(void*)" 508 Value *FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy); 509 CallInst *Result = nullptr; 510 Value *PtrCast = Source; 511 if (InsertBefore) { 512 if (Source->getType() != IntPtrTy) 513 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore); 514 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "", InsertBefore); 515 } else { 516 if (Source->getType() != IntPtrTy) 517 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd); 518 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, ""); 519 } 520 Result->setTailCall(); 521 if (Function *F = dyn_cast<Function>(FreeFunc)) 522 Result->setCallingConv(F->getCallingConv()); 523 524 return Result; 525 } 526 527 /// CreateFree - Generate the IR for a call to the builtin free function. 528 Instruction *CallInst::CreateFree(Value *Source, Instruction *InsertBefore) { 529 return createFree(Source, None, InsertBefore, nullptr); 530 } 531 Instruction *CallInst::CreateFree(Value *Source, 532 ArrayRef<OperandBundleDef> Bundles, 533 Instruction *InsertBefore) { 534 return createFree(Source, Bundles, InsertBefore, nullptr); 535 } 536 537 /// CreateFree - Generate the IR for a call to the builtin free function. 538 /// Note: This function does not add the call to the basic block, that is the 539 /// responsibility of the caller. 540 Instruction *CallInst::CreateFree(Value *Source, BasicBlock *InsertAtEnd) { 541 Instruction *FreeCall = createFree(Source, None, nullptr, InsertAtEnd); 542 assert(FreeCall && "CreateFree did not create a CallInst"); 543 return FreeCall; 544 } 545 Instruction *CallInst::CreateFree(Value *Source, 546 ArrayRef<OperandBundleDef> Bundles, 547 BasicBlock *InsertAtEnd) { 548 Instruction *FreeCall = createFree(Source, Bundles, nullptr, InsertAtEnd); 549 assert(FreeCall && "CreateFree did not create a CallInst"); 550 return FreeCall; 551 } 552 553 //===----------------------------------------------------------------------===// 554 // InvokeInst Implementation 555 //===----------------------------------------------------------------------===// 556 557 void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal, 558 BasicBlock *IfException, ArrayRef<Value *> Args, 559 ArrayRef<OperandBundleDef> Bundles, 560 const Twine &NameStr) { 561 this->FTy = FTy; 562 563 assert(getNumOperands() == 3 + Args.size() + CountBundleInputs(Bundles) && 564 "NumOperands not set up?"); 565 Op<-3>() = Fn; 566 Op<-2>() = IfNormal; 567 Op<-1>() = IfException; 568 569 #ifndef NDEBUG 570 assert(((Args.size() == FTy->getNumParams()) || 571 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 572 "Invoking a function with bad signature"); 573 574 for (unsigned i = 0, e = Args.size(); i != e; i++) 575 assert((i >= FTy->getNumParams() || 576 FTy->getParamType(i) == Args[i]->getType()) && 577 "Invoking a function with a bad signature!"); 578 #endif 579 580 std::copy(Args.begin(), Args.end(), op_begin()); 581 582 auto It = populateBundleOperandInfos(Bundles, Args.size()); 583 (void)It; 584 assert(It + 3 == op_end() && "Should add up!"); 585 586 setName(NameStr); 587 } 588 589 InvokeInst::InvokeInst(const InvokeInst &II) 590 : CallBase<InvokeInst>(II.Attrs, II.FTy, II.getType(), Instruction::Invoke, 591 OperandTraits<CallBase<InvokeInst>>::op_end(this) - 592 II.getNumOperands(), 593 II.getNumOperands()) { 594 setCallingConv(II.getCallingConv()); 595 std::copy(II.op_begin(), II.op_end(), op_begin()); 596 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(), 597 bundle_op_info_begin()); 598 SubclassOptionalData = II.SubclassOptionalData; 599 } 600 601 InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB, 602 Instruction *InsertPt) { 603 std::vector<Value *> Args(II->arg_begin(), II->arg_end()); 604 605 auto *NewII = InvokeInst::Create(II->getCalledValue(), II->getNormalDest(), 606 II->getUnwindDest(), Args, OpB, 607 II->getName(), InsertPt); 608 NewII->setCallingConv(II->getCallingConv()); 609 NewII->SubclassOptionalData = II->SubclassOptionalData; 610 NewII->setAttributes(II->getAttributes()); 611 NewII->setDebugLoc(II->getDebugLoc()); 612 return NewII; 613 } 614 615 616 LandingPadInst *InvokeInst::getLandingPadInst() const { 617 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI()); 618 } 619 620 //===----------------------------------------------------------------------===// 621 // ReturnInst Implementation 622 //===----------------------------------------------------------------------===// 623 624 ReturnInst::ReturnInst(const ReturnInst &RI) 625 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret, 626 OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(), 627 RI.getNumOperands()) { 628 if (RI.getNumOperands()) 629 Op<0>() = RI.Op<0>(); 630 SubclassOptionalData = RI.SubclassOptionalData; 631 } 632 633 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore) 634 : Instruction(Type::getVoidTy(C), Instruction::Ret, 635 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal, 636 InsertBefore) { 637 if (retVal) 638 Op<0>() = retVal; 639 } 640 641 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd) 642 : Instruction(Type::getVoidTy(C), Instruction::Ret, 643 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal, 644 InsertAtEnd) { 645 if (retVal) 646 Op<0>() = retVal; 647 } 648 649 ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd) 650 : Instruction(Type::getVoidTy(Context), Instruction::Ret, 651 OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {} 652 653 //===----------------------------------------------------------------------===// 654 // ResumeInst Implementation 655 //===----------------------------------------------------------------------===// 656 657 ResumeInst::ResumeInst(const ResumeInst &RI) 658 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume, 659 OperandTraits<ResumeInst>::op_begin(this), 1) { 660 Op<0>() = RI.Op<0>(); 661 } 662 663 ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore) 664 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume, 665 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) { 666 Op<0>() = Exn; 667 } 668 669 ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd) 670 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume, 671 OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) { 672 Op<0>() = Exn; 673 } 674 675 //===----------------------------------------------------------------------===// 676 // CleanupReturnInst Implementation 677 //===----------------------------------------------------------------------===// 678 679 CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI) 680 : Instruction(CRI.getType(), Instruction::CleanupRet, 681 OperandTraits<CleanupReturnInst>::op_end(this) - 682 CRI.getNumOperands(), 683 CRI.getNumOperands()) { 684 setInstructionSubclassData(CRI.getSubclassDataFromInstruction()); 685 Op<0>() = CRI.Op<0>(); 686 if (CRI.hasUnwindDest()) 687 Op<1>() = CRI.Op<1>(); 688 } 689 690 void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) { 691 if (UnwindBB) 692 setInstructionSubclassData(getSubclassDataFromInstruction() | 1); 693 694 Op<0>() = CleanupPad; 695 if (UnwindBB) 696 Op<1>() = UnwindBB; 697 } 698 699 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, 700 unsigned Values, Instruction *InsertBefore) 701 : Instruction(Type::getVoidTy(CleanupPad->getContext()), 702 Instruction::CleanupRet, 703 OperandTraits<CleanupReturnInst>::op_end(this) - Values, 704 Values, InsertBefore) { 705 init(CleanupPad, UnwindBB); 706 } 707 708 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, 709 unsigned Values, BasicBlock *InsertAtEnd) 710 : Instruction(Type::getVoidTy(CleanupPad->getContext()), 711 Instruction::CleanupRet, 712 OperandTraits<CleanupReturnInst>::op_end(this) - Values, 713 Values, InsertAtEnd) { 714 init(CleanupPad, UnwindBB); 715 } 716 717 //===----------------------------------------------------------------------===// 718 // CatchReturnInst Implementation 719 //===----------------------------------------------------------------------===// 720 void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) { 721 Op<0>() = CatchPad; 722 Op<1>() = BB; 723 } 724 725 CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI) 726 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet, 727 OperandTraits<CatchReturnInst>::op_begin(this), 2) { 728 Op<0>() = CRI.Op<0>(); 729 Op<1>() = CRI.Op<1>(); 730 } 731 732 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB, 733 Instruction *InsertBefore) 734 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet, 735 OperandTraits<CatchReturnInst>::op_begin(this), 2, 736 InsertBefore) { 737 init(CatchPad, BB); 738 } 739 740 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB, 741 BasicBlock *InsertAtEnd) 742 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet, 743 OperandTraits<CatchReturnInst>::op_begin(this), 2, 744 InsertAtEnd) { 745 init(CatchPad, BB); 746 } 747 748 //===----------------------------------------------------------------------===// 749 // CatchSwitchInst Implementation 750 //===----------------------------------------------------------------------===// 751 752 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 753 unsigned NumReservedValues, 754 const Twine &NameStr, 755 Instruction *InsertBefore) 756 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0, 757 InsertBefore) { 758 if (UnwindDest) 759 ++NumReservedValues; 760 init(ParentPad, UnwindDest, NumReservedValues + 1); 761 setName(NameStr); 762 } 763 764 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 765 unsigned NumReservedValues, 766 const Twine &NameStr, BasicBlock *InsertAtEnd) 767 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0, 768 InsertAtEnd) { 769 if (UnwindDest) 770 ++NumReservedValues; 771 init(ParentPad, UnwindDest, NumReservedValues + 1); 772 setName(NameStr); 773 } 774 775 CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI) 776 : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr, 777 CSI.getNumOperands()) { 778 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands()); 779 setNumHungOffUseOperands(ReservedSpace); 780 Use *OL = getOperandList(); 781 const Use *InOL = CSI.getOperandList(); 782 for (unsigned I = 1, E = ReservedSpace; I != E; ++I) 783 OL[I] = InOL[I]; 784 } 785 786 void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest, 787 unsigned NumReservedValues) { 788 assert(ParentPad && NumReservedValues); 789 790 ReservedSpace = NumReservedValues; 791 setNumHungOffUseOperands(UnwindDest ? 2 : 1); 792 allocHungoffUses(ReservedSpace); 793 794 Op<0>() = ParentPad; 795 if (UnwindDest) { 796 setInstructionSubclassData(getSubclassDataFromInstruction() | 1); 797 setUnwindDest(UnwindDest); 798 } 799 } 800 801 /// growOperands - grow operands - This grows the operand list in response to a 802 /// push_back style of operation. This grows the number of ops by 2 times. 803 void CatchSwitchInst::growOperands(unsigned Size) { 804 unsigned NumOperands = getNumOperands(); 805 assert(NumOperands >= 1); 806 if (ReservedSpace >= NumOperands + Size) 807 return; 808 ReservedSpace = (NumOperands + Size / 2) * 2; 809 growHungoffUses(ReservedSpace); 810 } 811 812 void CatchSwitchInst::addHandler(BasicBlock *Handler) { 813 unsigned OpNo = getNumOperands(); 814 growOperands(1); 815 assert(OpNo < ReservedSpace && "Growing didn't work!"); 816 setNumHungOffUseOperands(getNumOperands() + 1); 817 getOperandList()[OpNo] = Handler; 818 } 819 820 void CatchSwitchInst::removeHandler(handler_iterator HI) { 821 // Move all subsequent handlers up one. 822 Use *EndDst = op_end() - 1; 823 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst) 824 *CurDst = *(CurDst + 1); 825 // Null out the last handler use. 826 *EndDst = nullptr; 827 828 setNumHungOffUseOperands(getNumOperands() - 1); 829 } 830 831 //===----------------------------------------------------------------------===// 832 // FuncletPadInst Implementation 833 //===----------------------------------------------------------------------===// 834 void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args, 835 const Twine &NameStr) { 836 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?"); 837 std::copy(Args.begin(), Args.end(), op_begin()); 838 setParentPad(ParentPad); 839 setName(NameStr); 840 } 841 842 FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI) 843 : Instruction(FPI.getType(), FPI.getOpcode(), 844 OperandTraits<FuncletPadInst>::op_end(this) - 845 FPI.getNumOperands(), 846 FPI.getNumOperands()) { 847 std::copy(FPI.op_begin(), FPI.op_end(), op_begin()); 848 setParentPad(FPI.getParentPad()); 849 } 850 851 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad, 852 ArrayRef<Value *> Args, unsigned Values, 853 const Twine &NameStr, Instruction *InsertBefore) 854 : Instruction(ParentPad->getType(), Op, 855 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values, 856 InsertBefore) { 857 init(ParentPad, Args, NameStr); 858 } 859 860 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad, 861 ArrayRef<Value *> Args, unsigned Values, 862 const Twine &NameStr, BasicBlock *InsertAtEnd) 863 : Instruction(ParentPad->getType(), Op, 864 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values, 865 InsertAtEnd) { 866 init(ParentPad, Args, NameStr); 867 } 868 869 //===----------------------------------------------------------------------===// 870 // UnreachableInst Implementation 871 //===----------------------------------------------------------------------===// 872 873 UnreachableInst::UnreachableInst(LLVMContext &Context, 874 Instruction *InsertBefore) 875 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr, 876 0, InsertBefore) {} 877 UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd) 878 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr, 879 0, InsertAtEnd) {} 880 881 //===----------------------------------------------------------------------===// 882 // BranchInst Implementation 883 //===----------------------------------------------------------------------===// 884 885 void BranchInst::AssertOK() { 886 if (isConditional()) 887 assert(getCondition()->getType()->isIntegerTy(1) && 888 "May only branch on boolean predicates!"); 889 } 890 891 BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore) 892 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 893 OperandTraits<BranchInst>::op_end(this) - 1, 1, 894 InsertBefore) { 895 assert(IfTrue && "Branch destination may not be null!"); 896 Op<-1>() = IfTrue; 897 } 898 899 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 900 Instruction *InsertBefore) 901 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 902 OperandTraits<BranchInst>::op_end(this) - 3, 3, 903 InsertBefore) { 904 Op<-1>() = IfTrue; 905 Op<-2>() = IfFalse; 906 Op<-3>() = Cond; 907 #ifndef NDEBUG 908 AssertOK(); 909 #endif 910 } 911 912 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) 913 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 914 OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) { 915 assert(IfTrue && "Branch destination may not be null!"); 916 Op<-1>() = IfTrue; 917 } 918 919 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 920 BasicBlock *InsertAtEnd) 921 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 922 OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) { 923 Op<-1>() = IfTrue; 924 Op<-2>() = IfFalse; 925 Op<-3>() = Cond; 926 #ifndef NDEBUG 927 AssertOK(); 928 #endif 929 } 930 931 BranchInst::BranchInst(const BranchInst &BI) 932 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br, 933 OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(), 934 BI.getNumOperands()) { 935 Op<-1>() = BI.Op<-1>(); 936 if (BI.getNumOperands() != 1) { 937 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!"); 938 Op<-3>() = BI.Op<-3>(); 939 Op<-2>() = BI.Op<-2>(); 940 } 941 SubclassOptionalData = BI.SubclassOptionalData; 942 } 943 944 void BranchInst::swapSuccessors() { 945 assert(isConditional() && 946 "Cannot swap successors of an unconditional branch"); 947 Op<-1>().swap(Op<-2>()); 948 949 // Update profile metadata if present and it matches our structural 950 // expectations. 951 swapProfMetadata(); 952 } 953 954 //===----------------------------------------------------------------------===// 955 // AllocaInst Implementation 956 //===----------------------------------------------------------------------===// 957 958 static Value *getAISize(LLVMContext &Context, Value *Amt) { 959 if (!Amt) 960 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1); 961 else { 962 assert(!isa<BasicBlock>(Amt) && 963 "Passed basic block into allocation size parameter! Use other ctor"); 964 assert(Amt->getType()->isIntegerTy() && 965 "Allocation array size is not an integer!"); 966 } 967 return Amt; 968 } 969 970 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 971 Instruction *InsertBefore) 972 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {} 973 974 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 975 BasicBlock *InsertAtEnd) 976 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {} 977 978 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 979 const Twine &Name, Instruction *InsertBefore) 980 : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/0, Name, InsertBefore) {} 981 982 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 983 const Twine &Name, BasicBlock *InsertAtEnd) 984 : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/0, Name, InsertAtEnd) {} 985 986 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 987 unsigned Align, const Twine &Name, 988 Instruction *InsertBefore) 989 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca, 990 getAISize(Ty->getContext(), ArraySize), InsertBefore), 991 AllocatedType(Ty) { 992 setAlignment(Align); 993 assert(!Ty->isVoidTy() && "Cannot allocate void!"); 994 setName(Name); 995 } 996 997 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 998 unsigned Align, const Twine &Name, 999 BasicBlock *InsertAtEnd) 1000 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca, 1001 getAISize(Ty->getContext(), ArraySize), InsertAtEnd), 1002 AllocatedType(Ty) { 1003 setAlignment(Align); 1004 assert(!Ty->isVoidTy() && "Cannot allocate void!"); 1005 setName(Name); 1006 } 1007 1008 void AllocaInst::setAlignment(unsigned Align) { 1009 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); 1010 assert(Align <= MaximumAlignment && 1011 "Alignment is greater than MaximumAlignment!"); 1012 setInstructionSubclassData((getSubclassDataFromInstruction() & ~31) | 1013 (Log2_32(Align) + 1)); 1014 assert(getAlignment() == Align && "Alignment representation error!"); 1015 } 1016 1017 bool AllocaInst::isArrayAllocation() const { 1018 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0))) 1019 return !CI->isOne(); 1020 return true; 1021 } 1022 1023 /// isStaticAlloca - Return true if this alloca is in the entry block of the 1024 /// function and is a constant size. If so, the code generator will fold it 1025 /// into the prolog/epilog code, so it is basically free. 1026 bool AllocaInst::isStaticAlloca() const { 1027 // Must be constant size. 1028 if (!isa<ConstantInt>(getArraySize())) return false; 1029 1030 // Must be in the entry block. 1031 const BasicBlock *Parent = getParent(); 1032 return Parent == &Parent->getParent()->front() && !isUsedWithInAlloca(); 1033 } 1034 1035 //===----------------------------------------------------------------------===// 1036 // LoadInst Implementation 1037 //===----------------------------------------------------------------------===// 1038 1039 void LoadInst::AssertOK() { 1040 assert(getOperand(0)->getType()->isPointerTy() && 1041 "Ptr must have pointer type."); 1042 assert(!(isAtomic() && getAlignment() == 0) && 1043 "Alignment required for atomic load"); 1044 } 1045 1046 LoadInst::LoadInst(Value *Ptr, const Twine &Name, Instruction *InsertBef) 1047 : LoadInst(Ptr, Name, /*isVolatile=*/false, InsertBef) {} 1048 1049 LoadInst::LoadInst(Value *Ptr, const Twine &Name, BasicBlock *InsertAE) 1050 : LoadInst(Ptr, Name, /*isVolatile=*/false, InsertAE) {} 1051 1052 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1053 Instruction *InsertBef) 1054 : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/0, InsertBef) {} 1055 1056 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, 1057 BasicBlock *InsertAE) 1058 : LoadInst(Ptr, Name, isVolatile, /*Align=*/0, InsertAE) {} 1059 1060 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1061 unsigned Align, Instruction *InsertBef) 1062 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, 1063 SyncScope::System, InsertBef) {} 1064 1065 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, 1066 unsigned Align, BasicBlock *InsertAE) 1067 : LoadInst(Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, 1068 SyncScope::System, InsertAE) {} 1069 1070 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1071 unsigned Align, AtomicOrdering Order, 1072 SyncScope::ID SSID, Instruction *InsertBef) 1073 : UnaryInstruction(Ty, Load, Ptr, InsertBef) { 1074 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType()); 1075 setVolatile(isVolatile); 1076 setAlignment(Align); 1077 setAtomic(Order, SSID); 1078 AssertOK(); 1079 setName(Name); 1080 } 1081 1082 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, 1083 unsigned Align, AtomicOrdering Order, 1084 SyncScope::ID SSID, 1085 BasicBlock *InsertAE) 1086 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), 1087 Load, Ptr, InsertAE) { 1088 setVolatile(isVolatile); 1089 setAlignment(Align); 1090 setAtomic(Order, SSID); 1091 AssertOK(); 1092 setName(Name); 1093 } 1094 1095 LoadInst::LoadInst(Value *Ptr, const char *Name, Instruction *InsertBef) 1096 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), 1097 Load, Ptr, InsertBef) { 1098 setVolatile(false); 1099 setAlignment(0); 1100 setAtomic(AtomicOrdering::NotAtomic); 1101 AssertOK(); 1102 if (Name && Name[0]) setName(Name); 1103 } 1104 1105 LoadInst::LoadInst(Value *Ptr, const char *Name, BasicBlock *InsertAE) 1106 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), 1107 Load, Ptr, InsertAE) { 1108 setVolatile(false); 1109 setAlignment(0); 1110 setAtomic(AtomicOrdering::NotAtomic); 1111 AssertOK(); 1112 if (Name && Name[0]) setName(Name); 1113 } 1114 1115 LoadInst::LoadInst(Type *Ty, Value *Ptr, const char *Name, bool isVolatile, 1116 Instruction *InsertBef) 1117 : UnaryInstruction(Ty, Load, Ptr, InsertBef) { 1118 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType()); 1119 setVolatile(isVolatile); 1120 setAlignment(0); 1121 setAtomic(AtomicOrdering::NotAtomic); 1122 AssertOK(); 1123 if (Name && Name[0]) setName(Name); 1124 } 1125 1126 LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile, 1127 BasicBlock *InsertAE) 1128 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), 1129 Load, Ptr, InsertAE) { 1130 setVolatile(isVolatile); 1131 setAlignment(0); 1132 setAtomic(AtomicOrdering::NotAtomic); 1133 AssertOK(); 1134 if (Name && Name[0]) setName(Name); 1135 } 1136 1137 void LoadInst::setAlignment(unsigned Align) { 1138 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); 1139 assert(Align <= MaximumAlignment && 1140 "Alignment is greater than MaximumAlignment!"); 1141 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | 1142 ((Log2_32(Align)+1)<<1)); 1143 assert(getAlignment() == Align && "Alignment representation error!"); 1144 } 1145 1146 //===----------------------------------------------------------------------===// 1147 // StoreInst Implementation 1148 //===----------------------------------------------------------------------===// 1149 1150 void StoreInst::AssertOK() { 1151 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!"); 1152 assert(getOperand(1)->getType()->isPointerTy() && 1153 "Ptr must have pointer type!"); 1154 assert(getOperand(0)->getType() == 1155 cast<PointerType>(getOperand(1)->getType())->getElementType() 1156 && "Ptr must be a pointer to Val type!"); 1157 assert(!(isAtomic() && getAlignment() == 0) && 1158 "Alignment required for atomic store"); 1159 } 1160 1161 StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore) 1162 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {} 1163 1164 StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd) 1165 : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {} 1166 1167 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1168 Instruction *InsertBefore) 1169 : StoreInst(val, addr, isVolatile, /*Align=*/0, InsertBefore) {} 1170 1171 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1172 BasicBlock *InsertAtEnd) 1173 : StoreInst(val, addr, isVolatile, /*Align=*/0, InsertAtEnd) {} 1174 1175 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align, 1176 Instruction *InsertBefore) 1177 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, 1178 SyncScope::System, InsertBefore) {} 1179 1180 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align, 1181 BasicBlock *InsertAtEnd) 1182 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, 1183 SyncScope::System, InsertAtEnd) {} 1184 1185 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1186 unsigned Align, AtomicOrdering Order, 1187 SyncScope::ID SSID, 1188 Instruction *InsertBefore) 1189 : Instruction(Type::getVoidTy(val->getContext()), Store, 1190 OperandTraits<StoreInst>::op_begin(this), 1191 OperandTraits<StoreInst>::operands(this), 1192 InsertBefore) { 1193 Op<0>() = val; 1194 Op<1>() = addr; 1195 setVolatile(isVolatile); 1196 setAlignment(Align); 1197 setAtomic(Order, SSID); 1198 AssertOK(); 1199 } 1200 1201 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1202 unsigned Align, AtomicOrdering Order, 1203 SyncScope::ID SSID, 1204 BasicBlock *InsertAtEnd) 1205 : Instruction(Type::getVoidTy(val->getContext()), Store, 1206 OperandTraits<StoreInst>::op_begin(this), 1207 OperandTraits<StoreInst>::operands(this), 1208 InsertAtEnd) { 1209 Op<0>() = val; 1210 Op<1>() = addr; 1211 setVolatile(isVolatile); 1212 setAlignment(Align); 1213 setAtomic(Order, SSID); 1214 AssertOK(); 1215 } 1216 1217 void StoreInst::setAlignment(unsigned Align) { 1218 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); 1219 assert(Align <= MaximumAlignment && 1220 "Alignment is greater than MaximumAlignment!"); 1221 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | 1222 ((Log2_32(Align)+1) << 1)); 1223 assert(getAlignment() == Align && "Alignment representation error!"); 1224 } 1225 1226 //===----------------------------------------------------------------------===// 1227 // AtomicCmpXchgInst Implementation 1228 //===----------------------------------------------------------------------===// 1229 1230 void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal, 1231 AtomicOrdering SuccessOrdering, 1232 AtomicOrdering FailureOrdering, 1233 SyncScope::ID SSID) { 1234 Op<0>() = Ptr; 1235 Op<1>() = Cmp; 1236 Op<2>() = NewVal; 1237 setSuccessOrdering(SuccessOrdering); 1238 setFailureOrdering(FailureOrdering); 1239 setSyncScopeID(SSID); 1240 1241 assert(getOperand(0) && getOperand(1) && getOperand(2) && 1242 "All operands must be non-null!"); 1243 assert(getOperand(0)->getType()->isPointerTy() && 1244 "Ptr must have pointer type!"); 1245 assert(getOperand(1)->getType() == 1246 cast<PointerType>(getOperand(0)->getType())->getElementType() 1247 && "Ptr must be a pointer to Cmp type!"); 1248 assert(getOperand(2)->getType() == 1249 cast<PointerType>(getOperand(0)->getType())->getElementType() 1250 && "Ptr must be a pointer to NewVal type!"); 1251 assert(SuccessOrdering != AtomicOrdering::NotAtomic && 1252 "AtomicCmpXchg instructions must be atomic!"); 1253 assert(FailureOrdering != AtomicOrdering::NotAtomic && 1254 "AtomicCmpXchg instructions must be atomic!"); 1255 assert(!isStrongerThan(FailureOrdering, SuccessOrdering) && 1256 "AtomicCmpXchg failure argument shall be no stronger than the success " 1257 "argument"); 1258 assert(FailureOrdering != AtomicOrdering::Release && 1259 FailureOrdering != AtomicOrdering::AcquireRelease && 1260 "AtomicCmpXchg failure ordering cannot include release semantics"); 1261 } 1262 1263 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, 1264 AtomicOrdering SuccessOrdering, 1265 AtomicOrdering FailureOrdering, 1266 SyncScope::ID SSID, 1267 Instruction *InsertBefore) 1268 : Instruction( 1269 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())), 1270 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this), 1271 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) { 1272 Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID); 1273 } 1274 1275 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, 1276 AtomicOrdering SuccessOrdering, 1277 AtomicOrdering FailureOrdering, 1278 SyncScope::ID SSID, 1279 BasicBlock *InsertAtEnd) 1280 : Instruction( 1281 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())), 1282 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this), 1283 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) { 1284 Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID); 1285 } 1286 1287 //===----------------------------------------------------------------------===// 1288 // AtomicRMWInst Implementation 1289 //===----------------------------------------------------------------------===// 1290 1291 void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val, 1292 AtomicOrdering Ordering, 1293 SyncScope::ID SSID) { 1294 Op<0>() = Ptr; 1295 Op<1>() = Val; 1296 setOperation(Operation); 1297 setOrdering(Ordering); 1298 setSyncScopeID(SSID); 1299 1300 assert(getOperand(0) && getOperand(1) && 1301 "All operands must be non-null!"); 1302 assert(getOperand(0)->getType()->isPointerTy() && 1303 "Ptr must have pointer type!"); 1304 assert(getOperand(1)->getType() == 1305 cast<PointerType>(getOperand(0)->getType())->getElementType() 1306 && "Ptr must be a pointer to Val type!"); 1307 assert(Ordering != AtomicOrdering::NotAtomic && 1308 "AtomicRMW instructions must be atomic!"); 1309 } 1310 1311 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, 1312 AtomicOrdering Ordering, 1313 SyncScope::ID SSID, 1314 Instruction *InsertBefore) 1315 : Instruction(Val->getType(), AtomicRMW, 1316 OperandTraits<AtomicRMWInst>::op_begin(this), 1317 OperandTraits<AtomicRMWInst>::operands(this), 1318 InsertBefore) { 1319 Init(Operation, Ptr, Val, Ordering, SSID); 1320 } 1321 1322 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, 1323 AtomicOrdering Ordering, 1324 SyncScope::ID SSID, 1325 BasicBlock *InsertAtEnd) 1326 : Instruction(Val->getType(), AtomicRMW, 1327 OperandTraits<AtomicRMWInst>::op_begin(this), 1328 OperandTraits<AtomicRMWInst>::operands(this), 1329 InsertAtEnd) { 1330 Init(Operation, Ptr, Val, Ordering, SSID); 1331 } 1332 1333 StringRef AtomicRMWInst::getOperationName(BinOp Op) { 1334 switch (Op) { 1335 case AtomicRMWInst::Xchg: 1336 return "xchg"; 1337 case AtomicRMWInst::Add: 1338 return "add"; 1339 case AtomicRMWInst::Sub: 1340 return "sub"; 1341 case AtomicRMWInst::And: 1342 return "and"; 1343 case AtomicRMWInst::Nand: 1344 return "nand"; 1345 case AtomicRMWInst::Or: 1346 return "or"; 1347 case AtomicRMWInst::Xor: 1348 return "xor"; 1349 case AtomicRMWInst::Max: 1350 return "max"; 1351 case AtomicRMWInst::Min: 1352 return "min"; 1353 case AtomicRMWInst::UMax: 1354 return "umax"; 1355 case AtomicRMWInst::UMin: 1356 return "umin"; 1357 case AtomicRMWInst::BAD_BINOP: 1358 return "<invalid operation>"; 1359 } 1360 1361 llvm_unreachable("invalid atomicrmw operation"); 1362 } 1363 1364 //===----------------------------------------------------------------------===// 1365 // FenceInst Implementation 1366 //===----------------------------------------------------------------------===// 1367 1368 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, 1369 SyncScope::ID SSID, 1370 Instruction *InsertBefore) 1371 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) { 1372 setOrdering(Ordering); 1373 setSyncScopeID(SSID); 1374 } 1375 1376 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, 1377 SyncScope::ID SSID, 1378 BasicBlock *InsertAtEnd) 1379 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) { 1380 setOrdering(Ordering); 1381 setSyncScopeID(SSID); 1382 } 1383 1384 //===----------------------------------------------------------------------===// 1385 // GetElementPtrInst Implementation 1386 //===----------------------------------------------------------------------===// 1387 1388 void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList, 1389 const Twine &Name) { 1390 assert(getNumOperands() == 1 + IdxList.size() && 1391 "NumOperands not initialized?"); 1392 Op<0>() = Ptr; 1393 std::copy(IdxList.begin(), IdxList.end(), op_begin() + 1); 1394 setName(Name); 1395 } 1396 1397 GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI) 1398 : Instruction(GEPI.getType(), GetElementPtr, 1399 OperandTraits<GetElementPtrInst>::op_end(this) - 1400 GEPI.getNumOperands(), 1401 GEPI.getNumOperands()), 1402 SourceElementType(GEPI.SourceElementType), 1403 ResultElementType(GEPI.ResultElementType) { 1404 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin()); 1405 SubclassOptionalData = GEPI.SubclassOptionalData; 1406 } 1407 1408 /// getIndexedType - Returns the type of the element that would be accessed with 1409 /// a gep instruction with the specified parameters. 1410 /// 1411 /// The Idxs pointer should point to a continuous piece of memory containing the 1412 /// indices, either as Value* or uint64_t. 1413 /// 1414 /// A null type is returned if the indices are invalid for the specified 1415 /// pointer type. 1416 /// 1417 template <typename IndexTy> 1418 static Type *getIndexedTypeInternal(Type *Agg, ArrayRef<IndexTy> IdxList) { 1419 // Handle the special case of the empty set index set, which is always valid. 1420 if (IdxList.empty()) 1421 return Agg; 1422 1423 // If there is at least one index, the top level type must be sized, otherwise 1424 // it cannot be 'stepped over'. 1425 if (!Agg->isSized()) 1426 return nullptr; 1427 1428 unsigned CurIdx = 1; 1429 for (; CurIdx != IdxList.size(); ++CurIdx) { 1430 CompositeType *CT = dyn_cast<CompositeType>(Agg); 1431 if (!CT || CT->isPointerTy()) return nullptr; 1432 IndexTy Index = IdxList[CurIdx]; 1433 if (!CT->indexValid(Index)) return nullptr; 1434 Agg = CT->getTypeAtIndex(Index); 1435 } 1436 return CurIdx == IdxList.size() ? Agg : nullptr; 1437 } 1438 1439 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) { 1440 return getIndexedTypeInternal(Ty, IdxList); 1441 } 1442 1443 Type *GetElementPtrInst::getIndexedType(Type *Ty, 1444 ArrayRef<Constant *> IdxList) { 1445 return getIndexedTypeInternal(Ty, IdxList); 1446 } 1447 1448 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) { 1449 return getIndexedTypeInternal(Ty, IdxList); 1450 } 1451 1452 /// hasAllZeroIndices - Return true if all of the indices of this GEP are 1453 /// zeros. If so, the result pointer and the first operand have the same 1454 /// value, just potentially different types. 1455 bool GetElementPtrInst::hasAllZeroIndices() const { 1456 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1457 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) { 1458 if (!CI->isZero()) return false; 1459 } else { 1460 return false; 1461 } 1462 } 1463 return true; 1464 } 1465 1466 /// hasAllConstantIndices - Return true if all of the indices of this GEP are 1467 /// constant integers. If so, the result pointer and the first operand have 1468 /// a constant offset between them. 1469 bool GetElementPtrInst::hasAllConstantIndices() const { 1470 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1471 if (!isa<ConstantInt>(getOperand(i))) 1472 return false; 1473 } 1474 return true; 1475 } 1476 1477 void GetElementPtrInst::setIsInBounds(bool B) { 1478 cast<GEPOperator>(this)->setIsInBounds(B); 1479 } 1480 1481 bool GetElementPtrInst::isInBounds() const { 1482 return cast<GEPOperator>(this)->isInBounds(); 1483 } 1484 1485 bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL, 1486 APInt &Offset) const { 1487 // Delegate to the generic GEPOperator implementation. 1488 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset); 1489 } 1490 1491 //===----------------------------------------------------------------------===// 1492 // ExtractElementInst Implementation 1493 //===----------------------------------------------------------------------===// 1494 1495 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index, 1496 const Twine &Name, 1497 Instruction *InsertBef) 1498 : Instruction(cast<VectorType>(Val->getType())->getElementType(), 1499 ExtractElement, 1500 OperandTraits<ExtractElementInst>::op_begin(this), 1501 2, InsertBef) { 1502 assert(isValidOperands(Val, Index) && 1503 "Invalid extractelement instruction operands!"); 1504 Op<0>() = Val; 1505 Op<1>() = Index; 1506 setName(Name); 1507 } 1508 1509 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index, 1510 const Twine &Name, 1511 BasicBlock *InsertAE) 1512 : Instruction(cast<VectorType>(Val->getType())->getElementType(), 1513 ExtractElement, 1514 OperandTraits<ExtractElementInst>::op_begin(this), 1515 2, InsertAE) { 1516 assert(isValidOperands(Val, Index) && 1517 "Invalid extractelement instruction operands!"); 1518 1519 Op<0>() = Val; 1520 Op<1>() = Index; 1521 setName(Name); 1522 } 1523 1524 bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) { 1525 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy()) 1526 return false; 1527 return true; 1528 } 1529 1530 //===----------------------------------------------------------------------===// 1531 // InsertElementInst Implementation 1532 //===----------------------------------------------------------------------===// 1533 1534 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index, 1535 const Twine &Name, 1536 Instruction *InsertBef) 1537 : Instruction(Vec->getType(), InsertElement, 1538 OperandTraits<InsertElementInst>::op_begin(this), 1539 3, InsertBef) { 1540 assert(isValidOperands(Vec, Elt, Index) && 1541 "Invalid insertelement instruction operands!"); 1542 Op<0>() = Vec; 1543 Op<1>() = Elt; 1544 Op<2>() = Index; 1545 setName(Name); 1546 } 1547 1548 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index, 1549 const Twine &Name, 1550 BasicBlock *InsertAE) 1551 : Instruction(Vec->getType(), InsertElement, 1552 OperandTraits<InsertElementInst>::op_begin(this), 1553 3, InsertAE) { 1554 assert(isValidOperands(Vec, Elt, Index) && 1555 "Invalid insertelement instruction operands!"); 1556 1557 Op<0>() = Vec; 1558 Op<1>() = Elt; 1559 Op<2>() = Index; 1560 setName(Name); 1561 } 1562 1563 bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt, 1564 const Value *Index) { 1565 if (!Vec->getType()->isVectorTy()) 1566 return false; // First operand of insertelement must be vector type. 1567 1568 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType()) 1569 return false;// Second operand of insertelement must be vector element type. 1570 1571 if (!Index->getType()->isIntegerTy()) 1572 return false; // Third operand of insertelement must be i32. 1573 return true; 1574 } 1575 1576 //===----------------------------------------------------------------------===// 1577 // ShuffleVectorInst Implementation 1578 //===----------------------------------------------------------------------===// 1579 1580 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 1581 const Twine &Name, 1582 Instruction *InsertBefore) 1583 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(), 1584 cast<VectorType>(Mask->getType())->getNumElements()), 1585 ShuffleVector, 1586 OperandTraits<ShuffleVectorInst>::op_begin(this), 1587 OperandTraits<ShuffleVectorInst>::operands(this), 1588 InsertBefore) { 1589 assert(isValidOperands(V1, V2, Mask) && 1590 "Invalid shuffle vector instruction operands!"); 1591 Op<0>() = V1; 1592 Op<1>() = V2; 1593 Op<2>() = Mask; 1594 setName(Name); 1595 } 1596 1597 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 1598 const Twine &Name, 1599 BasicBlock *InsertAtEnd) 1600 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(), 1601 cast<VectorType>(Mask->getType())->getNumElements()), 1602 ShuffleVector, 1603 OperandTraits<ShuffleVectorInst>::op_begin(this), 1604 OperandTraits<ShuffleVectorInst>::operands(this), 1605 InsertAtEnd) { 1606 assert(isValidOperands(V1, V2, Mask) && 1607 "Invalid shuffle vector instruction operands!"); 1608 1609 Op<0>() = V1; 1610 Op<1>() = V2; 1611 Op<2>() = Mask; 1612 setName(Name); 1613 } 1614 1615 bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2, 1616 const Value *Mask) { 1617 // V1 and V2 must be vectors of the same type. 1618 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType()) 1619 return false; 1620 1621 // Mask must be vector of i32. 1622 auto *MaskTy = dyn_cast<VectorType>(Mask->getType()); 1623 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32)) 1624 return false; 1625 1626 // Check to see if Mask is valid. 1627 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask)) 1628 return true; 1629 1630 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) { 1631 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); 1632 for (Value *Op : MV->operands()) { 1633 if (auto *CI = dyn_cast<ConstantInt>(Op)) { 1634 if (CI->uge(V1Size*2)) 1635 return false; 1636 } else if (!isa<UndefValue>(Op)) { 1637 return false; 1638 } 1639 } 1640 return true; 1641 } 1642 1643 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) { 1644 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); 1645 for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i) 1646 if (CDS->getElementAsInteger(i) >= V1Size*2) 1647 return false; 1648 return true; 1649 } 1650 1651 // The bitcode reader can create a place holder for a forward reference 1652 // used as the shuffle mask. When this occurs, the shuffle mask will 1653 // fall into this case and fail. To avoid this error, do this bit of 1654 // ugliness to allow such a mask pass. 1655 if (const auto *CE = dyn_cast<ConstantExpr>(Mask)) 1656 if (CE->getOpcode() == Instruction::UserOp1) 1657 return true; 1658 1659 return false; 1660 } 1661 1662 int ShuffleVectorInst::getMaskValue(const Constant *Mask, unsigned i) { 1663 assert(i < Mask->getType()->getVectorNumElements() && "Index out of range"); 1664 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) 1665 return CDS->getElementAsInteger(i); 1666 Constant *C = Mask->getAggregateElement(i); 1667 if (isa<UndefValue>(C)) 1668 return -1; 1669 return cast<ConstantInt>(C)->getZExtValue(); 1670 } 1671 1672 void ShuffleVectorInst::getShuffleMask(const Constant *Mask, 1673 SmallVectorImpl<int> &Result) { 1674 unsigned NumElts = Mask->getType()->getVectorNumElements(); 1675 1676 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) { 1677 for (unsigned i = 0; i != NumElts; ++i) 1678 Result.push_back(CDS->getElementAsInteger(i)); 1679 return; 1680 } 1681 for (unsigned i = 0; i != NumElts; ++i) { 1682 Constant *C = Mask->getAggregateElement(i); 1683 Result.push_back(isa<UndefValue>(C) ? -1 : 1684 cast<ConstantInt>(C)->getZExtValue()); 1685 } 1686 } 1687 1688 static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) { 1689 assert(!Mask.empty() && "Shuffle mask must contain elements"); 1690 bool UsesLHS = false; 1691 bool UsesRHS = false; 1692 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) { 1693 if (Mask[i] == -1) 1694 continue; 1695 assert(Mask[i] >= 0 && Mask[i] < (NumOpElts * 2) && 1696 "Out-of-bounds shuffle mask element"); 1697 UsesLHS |= (Mask[i] < NumOpElts); 1698 UsesRHS |= (Mask[i] >= NumOpElts); 1699 if (UsesLHS && UsesRHS) 1700 return false; 1701 } 1702 assert((UsesLHS ^ UsesRHS) && "Should have selected from exactly 1 source"); 1703 return true; 1704 } 1705 1706 bool ShuffleVectorInst::isSingleSourceMask(ArrayRef<int> Mask) { 1707 // We don't have vector operand size information, so assume operands are the 1708 // same size as the mask. 1709 return isSingleSourceMaskImpl(Mask, Mask.size()); 1710 } 1711 1712 static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) { 1713 if (!isSingleSourceMaskImpl(Mask, NumOpElts)) 1714 return false; 1715 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) { 1716 if (Mask[i] == -1) 1717 continue; 1718 if (Mask[i] != i && Mask[i] != (NumOpElts + i)) 1719 return false; 1720 } 1721 return true; 1722 } 1723 1724 bool ShuffleVectorInst::isIdentityMask(ArrayRef<int> Mask) { 1725 // We don't have vector operand size information, so assume operands are the 1726 // same size as the mask. 1727 return isIdentityMaskImpl(Mask, Mask.size()); 1728 } 1729 1730 bool ShuffleVectorInst::isReverseMask(ArrayRef<int> Mask) { 1731 if (!isSingleSourceMask(Mask)) 1732 return false; 1733 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 1734 if (Mask[i] == -1) 1735 continue; 1736 if (Mask[i] != (NumElts - 1 - i) && Mask[i] != (NumElts + NumElts - 1 - i)) 1737 return false; 1738 } 1739 return true; 1740 } 1741 1742 bool ShuffleVectorInst::isZeroEltSplatMask(ArrayRef<int> Mask) { 1743 if (!isSingleSourceMask(Mask)) 1744 return false; 1745 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 1746 if (Mask[i] == -1) 1747 continue; 1748 if (Mask[i] != 0 && Mask[i] != NumElts) 1749 return false; 1750 } 1751 return true; 1752 } 1753 1754 bool ShuffleVectorInst::isSelectMask(ArrayRef<int> Mask) { 1755 // Select is differentiated from identity. It requires using both sources. 1756 if (isSingleSourceMask(Mask)) 1757 return false; 1758 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 1759 if (Mask[i] == -1) 1760 continue; 1761 if (Mask[i] != i && Mask[i] != (NumElts + i)) 1762 return false; 1763 } 1764 return true; 1765 } 1766 1767 bool ShuffleVectorInst::isTransposeMask(ArrayRef<int> Mask) { 1768 // Example masks that will return true: 1769 // v1 = <a, b, c, d> 1770 // v2 = <e, f, g, h> 1771 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g> 1772 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h> 1773 1774 // 1. The number of elements in the mask must be a power-of-2 and at least 2. 1775 int NumElts = Mask.size(); 1776 if (NumElts < 2 || !isPowerOf2_32(NumElts)) 1777 return false; 1778 1779 // 2. The first element of the mask must be either a 0 or a 1. 1780 if (Mask[0] != 0 && Mask[0] != 1) 1781 return false; 1782 1783 // 3. The difference between the first 2 elements must be equal to the 1784 // number of elements in the mask. 1785 if ((Mask[1] - Mask[0]) != NumElts) 1786 return false; 1787 1788 // 4. The difference between consecutive even-numbered and odd-numbered 1789 // elements must be equal to 2. 1790 for (int i = 2; i < NumElts; ++i) { 1791 int MaskEltVal = Mask[i]; 1792 if (MaskEltVal == -1) 1793 return false; 1794 int MaskEltPrevVal = Mask[i - 2]; 1795 if (MaskEltVal - MaskEltPrevVal != 2) 1796 return false; 1797 } 1798 return true; 1799 } 1800 1801 bool ShuffleVectorInst::isIdentityWithPadding() const { 1802 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 1803 int NumMaskElts = getType()->getVectorNumElements(); 1804 if (NumMaskElts <= NumOpElts) 1805 return false; 1806 1807 // The first part of the mask must choose elements from exactly 1 source op. 1808 SmallVector<int, 16> Mask = getShuffleMask(); 1809 if (!isIdentityMaskImpl(Mask, NumOpElts)) 1810 return false; 1811 1812 // All extending must be with undef elements. 1813 for (int i = NumOpElts; i < NumMaskElts; ++i) 1814 if (Mask[i] != -1) 1815 return false; 1816 1817 return true; 1818 } 1819 1820 bool ShuffleVectorInst::isIdentityWithExtract() const { 1821 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 1822 int NumMaskElts = getType()->getVectorNumElements(); 1823 if (NumMaskElts >= NumOpElts) 1824 return false; 1825 1826 return isIdentityMaskImpl(getShuffleMask(), NumOpElts); 1827 } 1828 1829 bool ShuffleVectorInst::isConcat() const { 1830 // Vector concatenation is differentiated from identity with padding. 1831 if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>())) 1832 return false; 1833 1834 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 1835 int NumMaskElts = getType()->getVectorNumElements(); 1836 if (NumMaskElts != NumOpElts * 2) 1837 return false; 1838 1839 // Use the mask length rather than the operands' vector lengths here. We 1840 // already know that the shuffle returns a vector twice as long as the inputs, 1841 // and neither of the inputs are undef vectors. If the mask picks consecutive 1842 // elements from both inputs, then this is a concatenation of the inputs. 1843 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts); 1844 } 1845 1846 //===----------------------------------------------------------------------===// 1847 // InsertValueInst Class 1848 //===----------------------------------------------------------------------===// 1849 1850 void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, 1851 const Twine &Name) { 1852 assert(getNumOperands() == 2 && "NumOperands not initialized?"); 1853 1854 // There's no fundamental reason why we require at least one index 1855 // (other than weirdness with &*IdxBegin being invalid; see 1856 // getelementptr's init routine for example). But there's no 1857 // present need to support it. 1858 assert(!Idxs.empty() && "InsertValueInst must have at least one index"); 1859 1860 assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) == 1861 Val->getType() && "Inserted value must match indexed type!"); 1862 Op<0>() = Agg; 1863 Op<1>() = Val; 1864 1865 Indices.append(Idxs.begin(), Idxs.end()); 1866 setName(Name); 1867 } 1868 1869 InsertValueInst::InsertValueInst(const InsertValueInst &IVI) 1870 : Instruction(IVI.getType(), InsertValue, 1871 OperandTraits<InsertValueInst>::op_begin(this), 2), 1872 Indices(IVI.Indices) { 1873 Op<0>() = IVI.getOperand(0); 1874 Op<1>() = IVI.getOperand(1); 1875 SubclassOptionalData = IVI.SubclassOptionalData; 1876 } 1877 1878 //===----------------------------------------------------------------------===// 1879 // ExtractValueInst Class 1880 //===----------------------------------------------------------------------===// 1881 1882 void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) { 1883 assert(getNumOperands() == 1 && "NumOperands not initialized?"); 1884 1885 // There's no fundamental reason why we require at least one index. 1886 // But there's no present need to support it. 1887 assert(!Idxs.empty() && "ExtractValueInst must have at least one index"); 1888 1889 Indices.append(Idxs.begin(), Idxs.end()); 1890 setName(Name); 1891 } 1892 1893 ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI) 1894 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)), 1895 Indices(EVI.Indices) { 1896 SubclassOptionalData = EVI.SubclassOptionalData; 1897 } 1898 1899 // getIndexedType - Returns the type of the element that would be extracted 1900 // with an extractvalue instruction with the specified parameters. 1901 // 1902 // A null type is returned if the indices are invalid for the specified 1903 // pointer type. 1904 // 1905 Type *ExtractValueInst::getIndexedType(Type *Agg, 1906 ArrayRef<unsigned> Idxs) { 1907 for (unsigned Index : Idxs) { 1908 // We can't use CompositeType::indexValid(Index) here. 1909 // indexValid() always returns true for arrays because getelementptr allows 1910 // out-of-bounds indices. Since we don't allow those for extractvalue and 1911 // insertvalue we need to check array indexing manually. 1912 // Since the only other types we can index into are struct types it's just 1913 // as easy to check those manually as well. 1914 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) { 1915 if (Index >= AT->getNumElements()) 1916 return nullptr; 1917 } else if (StructType *ST = dyn_cast<StructType>(Agg)) { 1918 if (Index >= ST->getNumElements()) 1919 return nullptr; 1920 } else { 1921 // Not a valid type to index into. 1922 return nullptr; 1923 } 1924 1925 Agg = cast<CompositeType>(Agg)->getTypeAtIndex(Index); 1926 } 1927 return const_cast<Type*>(Agg); 1928 } 1929 1930 //===----------------------------------------------------------------------===// 1931 // BinaryOperator Class 1932 //===----------------------------------------------------------------------===// 1933 1934 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, 1935 Type *Ty, const Twine &Name, 1936 Instruction *InsertBefore) 1937 : Instruction(Ty, iType, 1938 OperandTraits<BinaryOperator>::op_begin(this), 1939 OperandTraits<BinaryOperator>::operands(this), 1940 InsertBefore) { 1941 Op<0>() = S1; 1942 Op<1>() = S2; 1943 setName(Name); 1944 AssertOK(); 1945 } 1946 1947 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, 1948 Type *Ty, const Twine &Name, 1949 BasicBlock *InsertAtEnd) 1950 : Instruction(Ty, iType, 1951 OperandTraits<BinaryOperator>::op_begin(this), 1952 OperandTraits<BinaryOperator>::operands(this), 1953 InsertAtEnd) { 1954 Op<0>() = S1; 1955 Op<1>() = S2; 1956 setName(Name); 1957 AssertOK(); 1958 } 1959 1960 void BinaryOperator::AssertOK() { 1961 Value *LHS = getOperand(0), *RHS = getOperand(1); 1962 (void)LHS; (void)RHS; // Silence warnings. 1963 assert(LHS->getType() == RHS->getType() && 1964 "Binary operator operand types must match!"); 1965 #ifndef NDEBUG 1966 switch (getOpcode()) { 1967 case Add: case Sub: 1968 case Mul: 1969 assert(getType() == LHS->getType() && 1970 "Arithmetic operation should return same type as operands!"); 1971 assert(getType()->isIntOrIntVectorTy() && 1972 "Tried to create an integer operation on a non-integer type!"); 1973 break; 1974 case FAdd: case FSub: 1975 case FMul: 1976 assert(getType() == LHS->getType() && 1977 "Arithmetic operation should return same type as operands!"); 1978 assert(getType()->isFPOrFPVectorTy() && 1979 "Tried to create a floating-point operation on a " 1980 "non-floating-point type!"); 1981 break; 1982 case UDiv: 1983 case SDiv: 1984 assert(getType() == LHS->getType() && 1985 "Arithmetic operation should return same type as operands!"); 1986 assert(getType()->isIntOrIntVectorTy() && 1987 "Incorrect operand type (not integer) for S/UDIV"); 1988 break; 1989 case FDiv: 1990 assert(getType() == LHS->getType() && 1991 "Arithmetic operation should return same type as operands!"); 1992 assert(getType()->isFPOrFPVectorTy() && 1993 "Incorrect operand type (not floating point) for FDIV"); 1994 break; 1995 case URem: 1996 case SRem: 1997 assert(getType() == LHS->getType() && 1998 "Arithmetic operation should return same type as operands!"); 1999 assert(getType()->isIntOrIntVectorTy() && 2000 "Incorrect operand type (not integer) for S/UREM"); 2001 break; 2002 case FRem: 2003 assert(getType() == LHS->getType() && 2004 "Arithmetic operation should return same type as operands!"); 2005 assert(getType()->isFPOrFPVectorTy() && 2006 "Incorrect operand type (not floating point) for FREM"); 2007 break; 2008 case Shl: 2009 case LShr: 2010 case AShr: 2011 assert(getType() == LHS->getType() && 2012 "Shift operation should return same type as operands!"); 2013 assert(getType()->isIntOrIntVectorTy() && 2014 "Tried to create a shift operation on a non-integral type!"); 2015 break; 2016 case And: case Or: 2017 case Xor: 2018 assert(getType() == LHS->getType() && 2019 "Logical operation should return same type as operands!"); 2020 assert(getType()->isIntOrIntVectorTy() && 2021 "Tried to create a logical operation on a non-integral type!"); 2022 break; 2023 default: llvm_unreachable("Invalid opcode provided"); 2024 } 2025 #endif 2026 } 2027 2028 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2, 2029 const Twine &Name, 2030 Instruction *InsertBefore) { 2031 assert(S1->getType() == S2->getType() && 2032 "Cannot create binary operator with two operands of differing type!"); 2033 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore); 2034 } 2035 2036 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2, 2037 const Twine &Name, 2038 BasicBlock *InsertAtEnd) { 2039 BinaryOperator *Res = Create(Op, S1, S2, Name); 2040 InsertAtEnd->getInstList().push_back(Res); 2041 return Res; 2042 } 2043 2044 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name, 2045 Instruction *InsertBefore) { 2046 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2047 return new BinaryOperator(Instruction::Sub, 2048 zero, Op, 2049 Op->getType(), Name, InsertBefore); 2050 } 2051 2052 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name, 2053 BasicBlock *InsertAtEnd) { 2054 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2055 return new BinaryOperator(Instruction::Sub, 2056 zero, Op, 2057 Op->getType(), Name, InsertAtEnd); 2058 } 2059 2060 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, 2061 Instruction *InsertBefore) { 2062 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2063 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertBefore); 2064 } 2065 2066 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, 2067 BasicBlock *InsertAtEnd) { 2068 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2069 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertAtEnd); 2070 } 2071 2072 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name, 2073 Instruction *InsertBefore) { 2074 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2075 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertBefore); 2076 } 2077 2078 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name, 2079 BasicBlock *InsertAtEnd) { 2080 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2081 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertAtEnd); 2082 } 2083 2084 BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name, 2085 Instruction *InsertBefore) { 2086 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2087 return new BinaryOperator(Instruction::FSub, zero, Op, 2088 Op->getType(), Name, InsertBefore); 2089 } 2090 2091 BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name, 2092 BasicBlock *InsertAtEnd) { 2093 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2094 return new BinaryOperator(Instruction::FSub, zero, Op, 2095 Op->getType(), Name, InsertAtEnd); 2096 } 2097 2098 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, 2099 Instruction *InsertBefore) { 2100 Constant *C = Constant::getAllOnesValue(Op->getType()); 2101 return new BinaryOperator(Instruction::Xor, Op, C, 2102 Op->getType(), Name, InsertBefore); 2103 } 2104 2105 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, 2106 BasicBlock *InsertAtEnd) { 2107 Constant *AllOnes = Constant::getAllOnesValue(Op->getType()); 2108 return new BinaryOperator(Instruction::Xor, Op, AllOnes, 2109 Op->getType(), Name, InsertAtEnd); 2110 } 2111 2112 // Exchange the two operands to this instruction. This instruction is safe to 2113 // use on any binary instruction and does not modify the semantics of the 2114 // instruction. If the instruction is order-dependent (SetLT f.e.), the opcode 2115 // is changed. 2116 bool BinaryOperator::swapOperands() { 2117 if (!isCommutative()) 2118 return true; // Can't commute operands 2119 Op<0>().swap(Op<1>()); 2120 return false; 2121 } 2122 2123 //===----------------------------------------------------------------------===// 2124 // FPMathOperator Class 2125 //===----------------------------------------------------------------------===// 2126 2127 float FPMathOperator::getFPAccuracy() const { 2128 const MDNode *MD = 2129 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath); 2130 if (!MD) 2131 return 0.0; 2132 ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0)); 2133 return Accuracy->getValueAPF().convertToFloat(); 2134 } 2135 2136 //===----------------------------------------------------------------------===// 2137 // CastInst Class 2138 //===----------------------------------------------------------------------===// 2139 2140 // Just determine if this cast only deals with integral->integral conversion. 2141 bool CastInst::isIntegerCast() const { 2142 switch (getOpcode()) { 2143 default: return false; 2144 case Instruction::ZExt: 2145 case Instruction::SExt: 2146 case Instruction::Trunc: 2147 return true; 2148 case Instruction::BitCast: 2149 return getOperand(0)->getType()->isIntegerTy() && 2150 getType()->isIntegerTy(); 2151 } 2152 } 2153 2154 bool CastInst::isLosslessCast() const { 2155 // Only BitCast can be lossless, exit fast if we're not BitCast 2156 if (getOpcode() != Instruction::BitCast) 2157 return false; 2158 2159 // Identity cast is always lossless 2160 Type *SrcTy = getOperand(0)->getType(); 2161 Type *DstTy = getType(); 2162 if (SrcTy == DstTy) 2163 return true; 2164 2165 // Pointer to pointer is always lossless. 2166 if (SrcTy->isPointerTy()) 2167 return DstTy->isPointerTy(); 2168 return false; // Other types have no identity values 2169 } 2170 2171 /// This function determines if the CastInst does not require any bits to be 2172 /// changed in order to effect the cast. Essentially, it identifies cases where 2173 /// no code gen is necessary for the cast, hence the name no-op cast. For 2174 /// example, the following are all no-op casts: 2175 /// # bitcast i32* %x to i8* 2176 /// # bitcast <2 x i32> %x to <4 x i16> 2177 /// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only 2178 /// Determine if the described cast is a no-op. 2179 bool CastInst::isNoopCast(Instruction::CastOps Opcode, 2180 Type *SrcTy, 2181 Type *DestTy, 2182 const DataLayout &DL) { 2183 switch (Opcode) { 2184 default: llvm_unreachable("Invalid CastOp"); 2185 case Instruction::Trunc: 2186 case Instruction::ZExt: 2187 case Instruction::SExt: 2188 case Instruction::FPTrunc: 2189 case Instruction::FPExt: 2190 case Instruction::UIToFP: 2191 case Instruction::SIToFP: 2192 case Instruction::FPToUI: 2193 case Instruction::FPToSI: 2194 case Instruction::AddrSpaceCast: 2195 // TODO: Target informations may give a more accurate answer here. 2196 return false; 2197 case Instruction::BitCast: 2198 return true; // BitCast never modifies bits. 2199 case Instruction::PtrToInt: 2200 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() == 2201 DestTy->getScalarSizeInBits(); 2202 case Instruction::IntToPtr: 2203 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() == 2204 SrcTy->getScalarSizeInBits(); 2205 } 2206 } 2207 2208 bool CastInst::isNoopCast(const DataLayout &DL) const { 2209 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL); 2210 } 2211 2212 /// This function determines if a pair of casts can be eliminated and what 2213 /// opcode should be used in the elimination. This assumes that there are two 2214 /// instructions like this: 2215 /// * %F = firstOpcode SrcTy %x to MidTy 2216 /// * %S = secondOpcode MidTy %F to DstTy 2217 /// The function returns a resultOpcode so these two casts can be replaced with: 2218 /// * %Replacement = resultOpcode %SrcTy %x to DstTy 2219 /// If no such cast is permitted, the function returns 0. 2220 unsigned CastInst::isEliminableCastPair( 2221 Instruction::CastOps firstOp, Instruction::CastOps secondOp, 2222 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, 2223 Type *DstIntPtrTy) { 2224 // Define the 144 possibilities for these two cast instructions. The values 2225 // in this matrix determine what to do in a given situation and select the 2226 // case in the switch below. The rows correspond to firstOp, the columns 2227 // correspond to secondOp. In looking at the table below, keep in mind 2228 // the following cast properties: 2229 // 2230 // Size Compare Source Destination 2231 // Operator Src ? Size Type Sign Type Sign 2232 // -------- ------------ ------------------- --------------------- 2233 // TRUNC > Integer Any Integral Any 2234 // ZEXT < Integral Unsigned Integer Any 2235 // SEXT < Integral Signed Integer Any 2236 // FPTOUI n/a FloatPt n/a Integral Unsigned 2237 // FPTOSI n/a FloatPt n/a Integral Signed 2238 // UITOFP n/a Integral Unsigned FloatPt n/a 2239 // SITOFP n/a Integral Signed FloatPt n/a 2240 // FPTRUNC > FloatPt n/a FloatPt n/a 2241 // FPEXT < FloatPt n/a FloatPt n/a 2242 // PTRTOINT n/a Pointer n/a Integral Unsigned 2243 // INTTOPTR n/a Integral Unsigned Pointer n/a 2244 // BITCAST = FirstClass n/a FirstClass n/a 2245 // ADDRSPCST n/a Pointer n/a Pointer n/a 2246 // 2247 // NOTE: some transforms are safe, but we consider them to be non-profitable. 2248 // For example, we could merge "fptoui double to i32" + "zext i32 to i64", 2249 // into "fptoui double to i64", but this loses information about the range 2250 // of the produced value (we no longer know the top-part is all zeros). 2251 // Further this conversion is often much more expensive for typical hardware, 2252 // and causes issues when building libgcc. We disallow fptosi+sext for the 2253 // same reason. 2254 const unsigned numCastOps = 2255 Instruction::CastOpsEnd - Instruction::CastOpsBegin; 2256 static const uint8_t CastResults[numCastOps][numCastOps] = { 2257 // T F F U S F F P I B A -+ 2258 // R Z S P P I I T P 2 N T S | 2259 // U E E 2 2 2 2 R E I T C C +- secondOp 2260 // N X X U S F F N X N 2 V V | 2261 // C T T I I P P C T T P T T -+ 2262 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+ 2263 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt | 2264 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt | 2265 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI | 2266 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI | 2267 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp 2268 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP | 2269 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc | 2270 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt | 2271 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt | 2272 { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr | 2273 { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast | 2274 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+ 2275 }; 2276 2277 // TODO: This logic could be encoded into the table above and handled in the 2278 // switch below. 2279 // If either of the casts are a bitcast from scalar to vector, disallow the 2280 // merging. However, any pair of bitcasts are allowed. 2281 bool IsFirstBitcast = (firstOp == Instruction::BitCast); 2282 bool IsSecondBitcast = (secondOp == Instruction::BitCast); 2283 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast; 2284 2285 // Check if any of the casts convert scalars <-> vectors. 2286 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) || 2287 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy))) 2288 if (!AreBothBitcasts) 2289 return 0; 2290 2291 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin] 2292 [secondOp-Instruction::CastOpsBegin]; 2293 switch (ElimCase) { 2294 case 0: 2295 // Categorically disallowed. 2296 return 0; 2297 case 1: 2298 // Allowed, use first cast's opcode. 2299 return firstOp; 2300 case 2: 2301 // Allowed, use second cast's opcode. 2302 return secondOp; 2303 case 3: 2304 // No-op cast in second op implies firstOp as long as the DestTy 2305 // is integer and we are not converting between a vector and a 2306 // non-vector type. 2307 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy()) 2308 return firstOp; 2309 return 0; 2310 case 4: 2311 // No-op cast in second op implies firstOp as long as the DestTy 2312 // is floating point. 2313 if (DstTy->isFloatingPointTy()) 2314 return firstOp; 2315 return 0; 2316 case 5: 2317 // No-op cast in first op implies secondOp as long as the SrcTy 2318 // is an integer. 2319 if (SrcTy->isIntegerTy()) 2320 return secondOp; 2321 return 0; 2322 case 6: 2323 // No-op cast in first op implies secondOp as long as the SrcTy 2324 // is a floating point. 2325 if (SrcTy->isFloatingPointTy()) 2326 return secondOp; 2327 return 0; 2328 case 7: { 2329 // Cannot simplify if address spaces are different! 2330 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) 2331 return 0; 2332 2333 unsigned MidSize = MidTy->getScalarSizeInBits(); 2334 // We can still fold this without knowing the actual sizes as long we 2335 // know that the intermediate pointer is the largest possible 2336 // pointer size. 2337 // FIXME: Is this always true? 2338 if (MidSize == 64) 2339 return Instruction::BitCast; 2340 2341 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size. 2342 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy) 2343 return 0; 2344 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits(); 2345 if (MidSize >= PtrSize) 2346 return Instruction::BitCast; 2347 return 0; 2348 } 2349 case 8: { 2350 // ext, trunc -> bitcast, if the SrcTy and DstTy are same size 2351 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy) 2352 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy) 2353 unsigned SrcSize = SrcTy->getScalarSizeInBits(); 2354 unsigned DstSize = DstTy->getScalarSizeInBits(); 2355 if (SrcSize == DstSize) 2356 return Instruction::BitCast; 2357 else if (SrcSize < DstSize) 2358 return firstOp; 2359 return secondOp; 2360 } 2361 case 9: 2362 // zext, sext -> zext, because sext can't sign extend after zext 2363 return Instruction::ZExt; 2364 case 11: { 2365 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize 2366 if (!MidIntPtrTy) 2367 return 0; 2368 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits(); 2369 unsigned SrcSize = SrcTy->getScalarSizeInBits(); 2370 unsigned DstSize = DstTy->getScalarSizeInBits(); 2371 if (SrcSize <= PtrSize && SrcSize == DstSize) 2372 return Instruction::BitCast; 2373 return 0; 2374 } 2375 case 12: 2376 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS 2377 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS 2378 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) 2379 return Instruction::AddrSpaceCast; 2380 return Instruction::BitCast; 2381 case 13: 2382 // FIXME: this state can be merged with (1), but the following assert 2383 // is useful to check the correcteness of the sequence due to semantic 2384 // change of bitcast. 2385 assert( 2386 SrcTy->isPtrOrPtrVectorTy() && 2387 MidTy->isPtrOrPtrVectorTy() && 2388 DstTy->isPtrOrPtrVectorTy() && 2389 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() && 2390 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() && 2391 "Illegal addrspacecast, bitcast sequence!"); 2392 // Allowed, use first cast's opcode 2393 return firstOp; 2394 case 14: 2395 // bitcast, addrspacecast -> addrspacecast if the element type of 2396 // bitcast's source is the same as that of addrspacecast's destination. 2397 if (SrcTy->getScalarType()->getPointerElementType() == 2398 DstTy->getScalarType()->getPointerElementType()) 2399 return Instruction::AddrSpaceCast; 2400 return 0; 2401 case 15: 2402 // FIXME: this state can be merged with (1), but the following assert 2403 // is useful to check the correcteness of the sequence due to semantic 2404 // change of bitcast. 2405 assert( 2406 SrcTy->isIntOrIntVectorTy() && 2407 MidTy->isPtrOrPtrVectorTy() && 2408 DstTy->isPtrOrPtrVectorTy() && 2409 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() && 2410 "Illegal inttoptr, bitcast sequence!"); 2411 // Allowed, use first cast's opcode 2412 return firstOp; 2413 case 16: 2414 // FIXME: this state can be merged with (2), but the following assert 2415 // is useful to check the correcteness of the sequence due to semantic 2416 // change of bitcast. 2417 assert( 2418 SrcTy->isPtrOrPtrVectorTy() && 2419 MidTy->isPtrOrPtrVectorTy() && 2420 DstTy->isIntOrIntVectorTy() && 2421 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() && 2422 "Illegal bitcast, ptrtoint sequence!"); 2423 // Allowed, use second cast's opcode 2424 return secondOp; 2425 case 17: 2426 // (sitofp (zext x)) -> (uitofp x) 2427 return Instruction::UIToFP; 2428 case 99: 2429 // Cast combination can't happen (error in input). This is for all cases 2430 // where the MidTy is not the same for the two cast instructions. 2431 llvm_unreachable("Invalid Cast Combination"); 2432 default: 2433 llvm_unreachable("Error in CastResults table!!!"); 2434 } 2435 } 2436 2437 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty, 2438 const Twine &Name, Instruction *InsertBefore) { 2439 assert(castIsValid(op, S, Ty) && "Invalid cast!"); 2440 // Construct and return the appropriate CastInst subclass 2441 switch (op) { 2442 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore); 2443 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore); 2444 case SExt: return new SExtInst (S, Ty, Name, InsertBefore); 2445 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore); 2446 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore); 2447 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore); 2448 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore); 2449 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore); 2450 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore); 2451 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore); 2452 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore); 2453 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore); 2454 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore); 2455 default: llvm_unreachable("Invalid opcode provided"); 2456 } 2457 } 2458 2459 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty, 2460 const Twine &Name, BasicBlock *InsertAtEnd) { 2461 assert(castIsValid(op, S, Ty) && "Invalid cast!"); 2462 // Construct and return the appropriate CastInst subclass 2463 switch (op) { 2464 case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd); 2465 case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd); 2466 case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd); 2467 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd); 2468 case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd); 2469 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd); 2470 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd); 2471 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd); 2472 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd); 2473 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd); 2474 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd); 2475 case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd); 2476 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd); 2477 default: llvm_unreachable("Invalid opcode provided"); 2478 } 2479 } 2480 2481 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, 2482 const Twine &Name, 2483 Instruction *InsertBefore) { 2484 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2485 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2486 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore); 2487 } 2488 2489 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, 2490 const Twine &Name, 2491 BasicBlock *InsertAtEnd) { 2492 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2493 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2494 return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd); 2495 } 2496 2497 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, 2498 const Twine &Name, 2499 Instruction *InsertBefore) { 2500 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2501 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2502 return Create(Instruction::SExt, S, Ty, Name, InsertBefore); 2503 } 2504 2505 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, 2506 const Twine &Name, 2507 BasicBlock *InsertAtEnd) { 2508 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2509 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2510 return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd); 2511 } 2512 2513 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, 2514 const Twine &Name, 2515 Instruction *InsertBefore) { 2516 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2517 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2518 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore); 2519 } 2520 2521 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, 2522 const Twine &Name, 2523 BasicBlock *InsertAtEnd) { 2524 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2525 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2526 return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd); 2527 } 2528 2529 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, 2530 const Twine &Name, 2531 BasicBlock *InsertAtEnd) { 2532 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2533 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) && 2534 "Invalid cast"); 2535 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); 2536 assert((!Ty->isVectorTy() || 2537 Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) && 2538 "Invalid cast"); 2539 2540 if (Ty->isIntOrIntVectorTy()) 2541 return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd); 2542 2543 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd); 2544 } 2545 2546 /// Create a BitCast or a PtrToInt cast instruction 2547 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, 2548 const Twine &Name, 2549 Instruction *InsertBefore) { 2550 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2551 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) && 2552 "Invalid cast"); 2553 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); 2554 assert((!Ty->isVectorTy() || 2555 Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) && 2556 "Invalid cast"); 2557 2558 if (Ty->isIntOrIntVectorTy()) 2559 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore); 2560 2561 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore); 2562 } 2563 2564 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast( 2565 Value *S, Type *Ty, 2566 const Twine &Name, 2567 BasicBlock *InsertAtEnd) { 2568 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2569 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast"); 2570 2571 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace()) 2572 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd); 2573 2574 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2575 } 2576 2577 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast( 2578 Value *S, Type *Ty, 2579 const Twine &Name, 2580 Instruction *InsertBefore) { 2581 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2582 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast"); 2583 2584 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace()) 2585 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore); 2586 2587 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2588 } 2589 2590 CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty, 2591 const Twine &Name, 2592 Instruction *InsertBefore) { 2593 if (S->getType()->isPointerTy() && Ty->isIntegerTy()) 2594 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore); 2595 if (S->getType()->isIntegerTy() && Ty->isPointerTy()) 2596 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore); 2597 2598 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2599 } 2600 2601 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, 2602 bool isSigned, const Twine &Name, 2603 Instruction *InsertBefore) { 2604 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() && 2605 "Invalid integer cast"); 2606 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2607 unsigned DstBits = Ty->getScalarSizeInBits(); 2608 Instruction::CastOps opcode = 2609 (SrcBits == DstBits ? Instruction::BitCast : 2610 (SrcBits > DstBits ? Instruction::Trunc : 2611 (isSigned ? Instruction::SExt : Instruction::ZExt))); 2612 return Create(opcode, C, Ty, Name, InsertBefore); 2613 } 2614 2615 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, 2616 bool isSigned, const Twine &Name, 2617 BasicBlock *InsertAtEnd) { 2618 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() && 2619 "Invalid cast"); 2620 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2621 unsigned DstBits = Ty->getScalarSizeInBits(); 2622 Instruction::CastOps opcode = 2623 (SrcBits == DstBits ? Instruction::BitCast : 2624 (SrcBits > DstBits ? Instruction::Trunc : 2625 (isSigned ? Instruction::SExt : Instruction::ZExt))); 2626 return Create(opcode, C, Ty, Name, InsertAtEnd); 2627 } 2628 2629 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, 2630 const Twine &Name, 2631 Instruction *InsertBefore) { 2632 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && 2633 "Invalid cast"); 2634 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2635 unsigned DstBits = Ty->getScalarSizeInBits(); 2636 Instruction::CastOps opcode = 2637 (SrcBits == DstBits ? Instruction::BitCast : 2638 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt)); 2639 return Create(opcode, C, Ty, Name, InsertBefore); 2640 } 2641 2642 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, 2643 const Twine &Name, 2644 BasicBlock *InsertAtEnd) { 2645 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && 2646 "Invalid cast"); 2647 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2648 unsigned DstBits = Ty->getScalarSizeInBits(); 2649 Instruction::CastOps opcode = 2650 (SrcBits == DstBits ? Instruction::BitCast : 2651 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt)); 2652 return Create(opcode, C, Ty, Name, InsertAtEnd); 2653 } 2654 2655 // Check whether it is valid to call getCastOpcode for these types. 2656 // This routine must be kept in sync with getCastOpcode. 2657 bool CastInst::isCastable(Type *SrcTy, Type *DestTy) { 2658 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType()) 2659 return false; 2660 2661 if (SrcTy == DestTy) 2662 return true; 2663 2664 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) 2665 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) 2666 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 2667 // An element by element cast. Valid if casting the elements is valid. 2668 SrcTy = SrcVecTy->getElementType(); 2669 DestTy = DestVecTy->getElementType(); 2670 } 2671 2672 // Get the bit sizes, we'll need these 2673 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 2674 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 2675 2676 // Run through the possibilities ... 2677 if (DestTy->isIntegerTy()) { // Casting to integral 2678 if (SrcTy->isIntegerTy()) // Casting from integral 2679 return true; 2680 if (SrcTy->isFloatingPointTy()) // Casting from floating pt 2681 return true; 2682 if (SrcTy->isVectorTy()) // Casting from vector 2683 return DestBits == SrcBits; 2684 // Casting from something else 2685 return SrcTy->isPointerTy(); 2686 } 2687 if (DestTy->isFloatingPointTy()) { // Casting to floating pt 2688 if (SrcTy->isIntegerTy()) // Casting from integral 2689 return true; 2690 if (SrcTy->isFloatingPointTy()) // Casting from floating pt 2691 return true; 2692 if (SrcTy->isVectorTy()) // Casting from vector 2693 return DestBits == SrcBits; 2694 // Casting from something else 2695 return false; 2696 } 2697 if (DestTy->isVectorTy()) // Casting to vector 2698 return DestBits == SrcBits; 2699 if (DestTy->isPointerTy()) { // Casting to pointer 2700 if (SrcTy->isPointerTy()) // Casting from pointer 2701 return true; 2702 return SrcTy->isIntegerTy(); // Casting from integral 2703 } 2704 if (DestTy->isX86_MMXTy()) { 2705 if (SrcTy->isVectorTy()) 2706 return DestBits == SrcBits; // 64-bit vector to MMX 2707 return false; 2708 } // Casting to something else 2709 return false; 2710 } 2711 2712 bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) { 2713 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType()) 2714 return false; 2715 2716 if (SrcTy == DestTy) 2717 return true; 2718 2719 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) { 2720 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) { 2721 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 2722 // An element by element cast. Valid if casting the elements is valid. 2723 SrcTy = SrcVecTy->getElementType(); 2724 DestTy = DestVecTy->getElementType(); 2725 } 2726 } 2727 } 2728 2729 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) { 2730 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) { 2731 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace(); 2732 } 2733 } 2734 2735 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 2736 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 2737 2738 // Could still have vectors of pointers if the number of elements doesn't 2739 // match 2740 if (SrcBits == 0 || DestBits == 0) 2741 return false; 2742 2743 if (SrcBits != DestBits) 2744 return false; 2745 2746 if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy()) 2747 return false; 2748 2749 return true; 2750 } 2751 2752 bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, 2753 const DataLayout &DL) { 2754 // ptrtoint and inttoptr are not allowed on non-integral pointers 2755 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy)) 2756 if (auto *IntTy = dyn_cast<IntegerType>(DestTy)) 2757 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) && 2758 !DL.isNonIntegralPointerType(PtrTy)); 2759 if (auto *PtrTy = dyn_cast<PointerType>(DestTy)) 2760 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy)) 2761 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) && 2762 !DL.isNonIntegralPointerType(PtrTy)); 2763 2764 return isBitCastable(SrcTy, DestTy); 2765 } 2766 2767 // Provide a way to get a "cast" where the cast opcode is inferred from the 2768 // types and size of the operand. This, basically, is a parallel of the 2769 // logic in the castIsValid function below. This axiom should hold: 2770 // castIsValid( getCastOpcode(Val, Ty), Val, Ty) 2771 // should not assert in castIsValid. In other words, this produces a "correct" 2772 // casting opcode for the arguments passed to it. 2773 // This routine must be kept in sync with isCastable. 2774 Instruction::CastOps 2775 CastInst::getCastOpcode( 2776 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) { 2777 Type *SrcTy = Src->getType(); 2778 2779 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() && 2780 "Only first class types are castable!"); 2781 2782 if (SrcTy == DestTy) 2783 return BitCast; 2784 2785 // FIXME: Check address space sizes here 2786 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) 2787 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) 2788 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 2789 // An element by element cast. Find the appropriate opcode based on the 2790 // element types. 2791 SrcTy = SrcVecTy->getElementType(); 2792 DestTy = DestVecTy->getElementType(); 2793 } 2794 2795 // Get the bit sizes, we'll need these 2796 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 2797 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 2798 2799 // Run through the possibilities ... 2800 if (DestTy->isIntegerTy()) { // Casting to integral 2801 if (SrcTy->isIntegerTy()) { // Casting from integral 2802 if (DestBits < SrcBits) 2803 return Trunc; // int -> smaller int 2804 else if (DestBits > SrcBits) { // its an extension 2805 if (SrcIsSigned) 2806 return SExt; // signed -> SEXT 2807 else 2808 return ZExt; // unsigned -> ZEXT 2809 } else { 2810 return BitCast; // Same size, No-op cast 2811 } 2812 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt 2813 if (DestIsSigned) 2814 return FPToSI; // FP -> sint 2815 else 2816 return FPToUI; // FP -> uint 2817 } else if (SrcTy->isVectorTy()) { 2818 assert(DestBits == SrcBits && 2819 "Casting vector to integer of different width"); 2820 return BitCast; // Same size, no-op cast 2821 } else { 2822 assert(SrcTy->isPointerTy() && 2823 "Casting from a value that is not first-class type"); 2824 return PtrToInt; // ptr -> int 2825 } 2826 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt 2827 if (SrcTy->isIntegerTy()) { // Casting from integral 2828 if (SrcIsSigned) 2829 return SIToFP; // sint -> FP 2830 else 2831 return UIToFP; // uint -> FP 2832 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt 2833 if (DestBits < SrcBits) { 2834 return FPTrunc; // FP -> smaller FP 2835 } else if (DestBits > SrcBits) { 2836 return FPExt; // FP -> larger FP 2837 } else { 2838 return BitCast; // same size, no-op cast 2839 } 2840 } else if (SrcTy->isVectorTy()) { 2841 assert(DestBits == SrcBits && 2842 "Casting vector to floating point of different width"); 2843 return BitCast; // same size, no-op cast 2844 } 2845 llvm_unreachable("Casting pointer or non-first class to float"); 2846 } else if (DestTy->isVectorTy()) { 2847 assert(DestBits == SrcBits && 2848 "Illegal cast to vector (wrong type or size)"); 2849 return BitCast; 2850 } else if (DestTy->isPointerTy()) { 2851 if (SrcTy->isPointerTy()) { 2852 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace()) 2853 return AddrSpaceCast; 2854 return BitCast; // ptr -> ptr 2855 } else if (SrcTy->isIntegerTy()) { 2856 return IntToPtr; // int -> ptr 2857 } 2858 llvm_unreachable("Casting pointer to other than pointer or int"); 2859 } else if (DestTy->isX86_MMXTy()) { 2860 if (SrcTy->isVectorTy()) { 2861 assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX"); 2862 return BitCast; // 64-bit vector to MMX 2863 } 2864 llvm_unreachable("Illegal cast to X86_MMX"); 2865 } 2866 llvm_unreachable("Casting to type that is not first-class"); 2867 } 2868 2869 //===----------------------------------------------------------------------===// 2870 // CastInst SubClass Constructors 2871 //===----------------------------------------------------------------------===// 2872 2873 /// Check that the construction parameters for a CastInst are correct. This 2874 /// could be broken out into the separate constructors but it is useful to have 2875 /// it in one place and to eliminate the redundant code for getting the sizes 2876 /// of the types involved. 2877 bool 2878 CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) { 2879 // Check for type sanity on the arguments 2880 Type *SrcTy = S->getType(); 2881 2882 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() || 2883 SrcTy->isAggregateType() || DstTy->isAggregateType()) 2884 return false; 2885 2886 // Get the size of the types in bits, we'll need this later 2887 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 2888 unsigned DstBitSize = DstTy->getScalarSizeInBits(); 2889 2890 // If these are vector types, get the lengths of the vectors (using zero for 2891 // scalar types means that checking that vector lengths match also checks that 2892 // scalars are not being converted to vectors or vectors to scalars). 2893 unsigned SrcLength = SrcTy->isVectorTy() ? 2894 cast<VectorType>(SrcTy)->getNumElements() : 0; 2895 unsigned DstLength = DstTy->isVectorTy() ? 2896 cast<VectorType>(DstTy)->getNumElements() : 0; 2897 2898 // Switch on the opcode provided 2899 switch (op) { 2900 default: return false; // This is an input error 2901 case Instruction::Trunc: 2902 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 2903 SrcLength == DstLength && SrcBitSize > DstBitSize; 2904 case Instruction::ZExt: 2905 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 2906 SrcLength == DstLength && SrcBitSize < DstBitSize; 2907 case Instruction::SExt: 2908 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 2909 SrcLength == DstLength && SrcBitSize < DstBitSize; 2910 case Instruction::FPTrunc: 2911 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() && 2912 SrcLength == DstLength && SrcBitSize > DstBitSize; 2913 case Instruction::FPExt: 2914 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() && 2915 SrcLength == DstLength && SrcBitSize < DstBitSize; 2916 case Instruction::UIToFP: 2917 case Instruction::SIToFP: 2918 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() && 2919 SrcLength == DstLength; 2920 case Instruction::FPToUI: 2921 case Instruction::FPToSI: 2922 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() && 2923 SrcLength == DstLength; 2924 case Instruction::PtrToInt: 2925 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy)) 2926 return false; 2927 if (VectorType *VT = dyn_cast<VectorType>(SrcTy)) 2928 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements()) 2929 return false; 2930 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy(); 2931 case Instruction::IntToPtr: 2932 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy)) 2933 return false; 2934 if (VectorType *VT = dyn_cast<VectorType>(SrcTy)) 2935 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements()) 2936 return false; 2937 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy(); 2938 case Instruction::BitCast: { 2939 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType()); 2940 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType()); 2941 2942 // BitCast implies a no-op cast of type only. No bits change. 2943 // However, you can't cast pointers to anything but pointers. 2944 if (!SrcPtrTy != !DstPtrTy) 2945 return false; 2946 2947 // For non-pointer cases, the cast is okay if the source and destination bit 2948 // widths are identical. 2949 if (!SrcPtrTy) 2950 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits(); 2951 2952 // If both are pointers then the address spaces must match. 2953 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) 2954 return false; 2955 2956 // A vector of pointers must have the same number of elements. 2957 VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy); 2958 VectorType *DstVecTy = dyn_cast<VectorType>(DstTy); 2959 if (SrcVecTy && DstVecTy) 2960 return (SrcVecTy->getNumElements() == DstVecTy->getNumElements()); 2961 if (SrcVecTy) 2962 return SrcVecTy->getNumElements() == 1; 2963 if (DstVecTy) 2964 return DstVecTy->getNumElements() == 1; 2965 2966 return true; 2967 } 2968 case Instruction::AddrSpaceCast: { 2969 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType()); 2970 if (!SrcPtrTy) 2971 return false; 2972 2973 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType()); 2974 if (!DstPtrTy) 2975 return false; 2976 2977 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace()) 2978 return false; 2979 2980 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) { 2981 if (VectorType *DstVecTy = dyn_cast<VectorType>(DstTy)) 2982 return (SrcVecTy->getNumElements() == DstVecTy->getNumElements()); 2983 2984 return false; 2985 } 2986 2987 return true; 2988 } 2989 } 2990 } 2991 2992 TruncInst::TruncInst( 2993 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 2994 ) : CastInst(Ty, Trunc, S, Name, InsertBefore) { 2995 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc"); 2996 } 2997 2998 TruncInst::TruncInst( 2999 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3000 ) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) { 3001 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc"); 3002 } 3003 3004 ZExtInst::ZExtInst( 3005 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3006 ) : CastInst(Ty, ZExt, S, Name, InsertBefore) { 3007 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); 3008 } 3009 3010 ZExtInst::ZExtInst( 3011 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3012 ) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) { 3013 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); 3014 } 3015 SExtInst::SExtInst( 3016 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3017 ) : CastInst(Ty, SExt, S, Name, InsertBefore) { 3018 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); 3019 } 3020 3021 SExtInst::SExtInst( 3022 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3023 ) : CastInst(Ty, SExt, S, Name, InsertAtEnd) { 3024 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); 3025 } 3026 3027 FPTruncInst::FPTruncInst( 3028 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3029 ) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) { 3030 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); 3031 } 3032 3033 FPTruncInst::FPTruncInst( 3034 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3035 ) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) { 3036 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); 3037 } 3038 3039 FPExtInst::FPExtInst( 3040 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3041 ) : CastInst(Ty, FPExt, S, Name, InsertBefore) { 3042 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); 3043 } 3044 3045 FPExtInst::FPExtInst( 3046 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3047 ) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) { 3048 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); 3049 } 3050 3051 UIToFPInst::UIToFPInst( 3052 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3053 ) : CastInst(Ty, UIToFP, S, Name, InsertBefore) { 3054 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); 3055 } 3056 3057 UIToFPInst::UIToFPInst( 3058 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3059 ) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) { 3060 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); 3061 } 3062 3063 SIToFPInst::SIToFPInst( 3064 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3065 ) : CastInst(Ty, SIToFP, S, Name, InsertBefore) { 3066 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); 3067 } 3068 3069 SIToFPInst::SIToFPInst( 3070 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3071 ) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) { 3072 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); 3073 } 3074 3075 FPToUIInst::FPToUIInst( 3076 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3077 ) : CastInst(Ty, FPToUI, S, Name, InsertBefore) { 3078 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); 3079 } 3080 3081 FPToUIInst::FPToUIInst( 3082 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3083 ) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) { 3084 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); 3085 } 3086 3087 FPToSIInst::FPToSIInst( 3088 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3089 ) : CastInst(Ty, FPToSI, S, Name, InsertBefore) { 3090 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); 3091 } 3092 3093 FPToSIInst::FPToSIInst( 3094 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3095 ) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) { 3096 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); 3097 } 3098 3099 PtrToIntInst::PtrToIntInst( 3100 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3101 ) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) { 3102 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); 3103 } 3104 3105 PtrToIntInst::PtrToIntInst( 3106 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3107 ) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) { 3108 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); 3109 } 3110 3111 IntToPtrInst::IntToPtrInst( 3112 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3113 ) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) { 3114 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); 3115 } 3116 3117 IntToPtrInst::IntToPtrInst( 3118 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3119 ) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) { 3120 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); 3121 } 3122 3123 BitCastInst::BitCastInst( 3124 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3125 ) : CastInst(Ty, BitCast, S, Name, InsertBefore) { 3126 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); 3127 } 3128 3129 BitCastInst::BitCastInst( 3130 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3131 ) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) { 3132 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); 3133 } 3134 3135 AddrSpaceCastInst::AddrSpaceCastInst( 3136 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3137 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) { 3138 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast"); 3139 } 3140 3141 AddrSpaceCastInst::AddrSpaceCastInst( 3142 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3143 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) { 3144 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast"); 3145 } 3146 3147 //===----------------------------------------------------------------------===// 3148 // CmpInst Classes 3149 //===----------------------------------------------------------------------===// 3150 3151 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS, 3152 Value *RHS, const Twine &Name, Instruction *InsertBefore) 3153 : Instruction(ty, op, 3154 OperandTraits<CmpInst>::op_begin(this), 3155 OperandTraits<CmpInst>::operands(this), 3156 InsertBefore) { 3157 Op<0>() = LHS; 3158 Op<1>() = RHS; 3159 setPredicate((Predicate)predicate); 3160 setName(Name); 3161 } 3162 3163 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS, 3164 Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd) 3165 : Instruction(ty, op, 3166 OperandTraits<CmpInst>::op_begin(this), 3167 OperandTraits<CmpInst>::operands(this), 3168 InsertAtEnd) { 3169 Op<0>() = LHS; 3170 Op<1>() = RHS; 3171 setPredicate((Predicate)predicate); 3172 setName(Name); 3173 } 3174 3175 CmpInst * 3176 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, 3177 const Twine &Name, Instruction *InsertBefore) { 3178 if (Op == Instruction::ICmp) { 3179 if (InsertBefore) 3180 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate), 3181 S1, S2, Name); 3182 else 3183 return new ICmpInst(CmpInst::Predicate(predicate), 3184 S1, S2, Name); 3185 } 3186 3187 if (InsertBefore) 3188 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate), 3189 S1, S2, Name); 3190 else 3191 return new FCmpInst(CmpInst::Predicate(predicate), 3192 S1, S2, Name); 3193 } 3194 3195 CmpInst * 3196 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, 3197 const Twine &Name, BasicBlock *InsertAtEnd) { 3198 if (Op == Instruction::ICmp) { 3199 return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate), 3200 S1, S2, Name); 3201 } 3202 return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate), 3203 S1, S2, Name); 3204 } 3205 3206 void CmpInst::swapOperands() { 3207 if (ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3208 IC->swapOperands(); 3209 else 3210 cast<FCmpInst>(this)->swapOperands(); 3211 } 3212 3213 bool CmpInst::isCommutative() const { 3214 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3215 return IC->isCommutative(); 3216 return cast<FCmpInst>(this)->isCommutative(); 3217 } 3218 3219 bool CmpInst::isEquality() const { 3220 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3221 return IC->isEquality(); 3222 return cast<FCmpInst>(this)->isEquality(); 3223 } 3224 3225 CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) { 3226 switch (pred) { 3227 default: llvm_unreachable("Unknown cmp predicate!"); 3228 case ICMP_EQ: return ICMP_NE; 3229 case ICMP_NE: return ICMP_EQ; 3230 case ICMP_UGT: return ICMP_ULE; 3231 case ICMP_ULT: return ICMP_UGE; 3232 case ICMP_UGE: return ICMP_ULT; 3233 case ICMP_ULE: return ICMP_UGT; 3234 case ICMP_SGT: return ICMP_SLE; 3235 case ICMP_SLT: return ICMP_SGE; 3236 case ICMP_SGE: return ICMP_SLT; 3237 case ICMP_SLE: return ICMP_SGT; 3238 3239 case FCMP_OEQ: return FCMP_UNE; 3240 case FCMP_ONE: return FCMP_UEQ; 3241 case FCMP_OGT: return FCMP_ULE; 3242 case FCMP_OLT: return FCMP_UGE; 3243 case FCMP_OGE: return FCMP_ULT; 3244 case FCMP_OLE: return FCMP_UGT; 3245 case FCMP_UEQ: return FCMP_ONE; 3246 case FCMP_UNE: return FCMP_OEQ; 3247 case FCMP_UGT: return FCMP_OLE; 3248 case FCMP_ULT: return FCMP_OGE; 3249 case FCMP_UGE: return FCMP_OLT; 3250 case FCMP_ULE: return FCMP_OGT; 3251 case FCMP_ORD: return FCMP_UNO; 3252 case FCMP_UNO: return FCMP_ORD; 3253 case FCMP_TRUE: return FCMP_FALSE; 3254 case FCMP_FALSE: return FCMP_TRUE; 3255 } 3256 } 3257 3258 StringRef CmpInst::getPredicateName(Predicate Pred) { 3259 switch (Pred) { 3260 default: return "unknown"; 3261 case FCmpInst::FCMP_FALSE: return "false"; 3262 case FCmpInst::FCMP_OEQ: return "oeq"; 3263 case FCmpInst::FCMP_OGT: return "ogt"; 3264 case FCmpInst::FCMP_OGE: return "oge"; 3265 case FCmpInst::FCMP_OLT: return "olt"; 3266 case FCmpInst::FCMP_OLE: return "ole"; 3267 case FCmpInst::FCMP_ONE: return "one"; 3268 case FCmpInst::FCMP_ORD: return "ord"; 3269 case FCmpInst::FCMP_UNO: return "uno"; 3270 case FCmpInst::FCMP_UEQ: return "ueq"; 3271 case FCmpInst::FCMP_UGT: return "ugt"; 3272 case FCmpInst::FCMP_UGE: return "uge"; 3273 case FCmpInst::FCMP_ULT: return "ult"; 3274 case FCmpInst::FCMP_ULE: return "ule"; 3275 case FCmpInst::FCMP_UNE: return "une"; 3276 case FCmpInst::FCMP_TRUE: return "true"; 3277 case ICmpInst::ICMP_EQ: return "eq"; 3278 case ICmpInst::ICMP_NE: return "ne"; 3279 case ICmpInst::ICMP_SGT: return "sgt"; 3280 case ICmpInst::ICMP_SGE: return "sge"; 3281 case ICmpInst::ICMP_SLT: return "slt"; 3282 case ICmpInst::ICMP_SLE: return "sle"; 3283 case ICmpInst::ICMP_UGT: return "ugt"; 3284 case ICmpInst::ICMP_UGE: return "uge"; 3285 case ICmpInst::ICMP_ULT: return "ult"; 3286 case ICmpInst::ICMP_ULE: return "ule"; 3287 } 3288 } 3289 3290 ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) { 3291 switch (pred) { 3292 default: llvm_unreachable("Unknown icmp predicate!"); 3293 case ICMP_EQ: case ICMP_NE: 3294 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE: 3295 return pred; 3296 case ICMP_UGT: return ICMP_SGT; 3297 case ICMP_ULT: return ICMP_SLT; 3298 case ICMP_UGE: return ICMP_SGE; 3299 case ICMP_ULE: return ICMP_SLE; 3300 } 3301 } 3302 3303 ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) { 3304 switch (pred) { 3305 default: llvm_unreachable("Unknown icmp predicate!"); 3306 case ICMP_EQ: case ICMP_NE: 3307 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE: 3308 return pred; 3309 case ICMP_SGT: return ICMP_UGT; 3310 case ICMP_SLT: return ICMP_ULT; 3311 case ICMP_SGE: return ICMP_UGE; 3312 case ICMP_SLE: return ICMP_ULE; 3313 } 3314 } 3315 3316 CmpInst::Predicate CmpInst::getFlippedStrictnessPredicate(Predicate pred) { 3317 switch (pred) { 3318 default: llvm_unreachable("Unknown or unsupported cmp predicate!"); 3319 case ICMP_SGT: return ICMP_SGE; 3320 case ICMP_SLT: return ICMP_SLE; 3321 case ICMP_SGE: return ICMP_SGT; 3322 case ICMP_SLE: return ICMP_SLT; 3323 case ICMP_UGT: return ICMP_UGE; 3324 case ICMP_ULT: return ICMP_ULE; 3325 case ICMP_UGE: return ICMP_UGT; 3326 case ICMP_ULE: return ICMP_ULT; 3327 3328 case FCMP_OGT: return FCMP_OGE; 3329 case FCMP_OLT: return FCMP_OLE; 3330 case FCMP_OGE: return FCMP_OGT; 3331 case FCMP_OLE: return FCMP_OLT; 3332 case FCMP_UGT: return FCMP_UGE; 3333 case FCMP_ULT: return FCMP_ULE; 3334 case FCMP_UGE: return FCMP_UGT; 3335 case FCMP_ULE: return FCMP_ULT; 3336 } 3337 } 3338 3339 CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) { 3340 switch (pred) { 3341 default: llvm_unreachable("Unknown cmp predicate!"); 3342 case ICMP_EQ: case ICMP_NE: 3343 return pred; 3344 case ICMP_SGT: return ICMP_SLT; 3345 case ICMP_SLT: return ICMP_SGT; 3346 case ICMP_SGE: return ICMP_SLE; 3347 case ICMP_SLE: return ICMP_SGE; 3348 case ICMP_UGT: return ICMP_ULT; 3349 case ICMP_ULT: return ICMP_UGT; 3350 case ICMP_UGE: return ICMP_ULE; 3351 case ICMP_ULE: return ICMP_UGE; 3352 3353 case FCMP_FALSE: case FCMP_TRUE: 3354 case FCMP_OEQ: case FCMP_ONE: 3355 case FCMP_UEQ: case FCMP_UNE: 3356 case FCMP_ORD: case FCMP_UNO: 3357 return pred; 3358 case FCMP_OGT: return FCMP_OLT; 3359 case FCMP_OLT: return FCMP_OGT; 3360 case FCMP_OGE: return FCMP_OLE; 3361 case FCMP_OLE: return FCMP_OGE; 3362 case FCMP_UGT: return FCMP_ULT; 3363 case FCMP_ULT: return FCMP_UGT; 3364 case FCMP_UGE: return FCMP_ULE; 3365 case FCMP_ULE: return FCMP_UGE; 3366 } 3367 } 3368 3369 CmpInst::Predicate CmpInst::getNonStrictPredicate(Predicate pred) { 3370 switch (pred) { 3371 case ICMP_SGT: return ICMP_SGE; 3372 case ICMP_SLT: return ICMP_SLE; 3373 case ICMP_UGT: return ICMP_UGE; 3374 case ICMP_ULT: return ICMP_ULE; 3375 case FCMP_OGT: return FCMP_OGE; 3376 case FCMP_OLT: return FCMP_OLE; 3377 case FCMP_UGT: return FCMP_UGE; 3378 case FCMP_ULT: return FCMP_ULE; 3379 default: return pred; 3380 } 3381 } 3382 3383 CmpInst::Predicate CmpInst::getSignedPredicate(Predicate pred) { 3384 assert(CmpInst::isUnsigned(pred) && "Call only with signed predicates!"); 3385 3386 switch (pred) { 3387 default: 3388 llvm_unreachable("Unknown predicate!"); 3389 case CmpInst::ICMP_ULT: 3390 return CmpInst::ICMP_SLT; 3391 case CmpInst::ICMP_ULE: 3392 return CmpInst::ICMP_SLE; 3393 case CmpInst::ICMP_UGT: 3394 return CmpInst::ICMP_SGT; 3395 case CmpInst::ICMP_UGE: 3396 return CmpInst::ICMP_SGE; 3397 } 3398 } 3399 3400 bool CmpInst::isUnsigned(Predicate predicate) { 3401 switch (predicate) { 3402 default: return false; 3403 case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT: 3404 case ICmpInst::ICMP_UGE: return true; 3405 } 3406 } 3407 3408 bool CmpInst::isSigned(Predicate predicate) { 3409 switch (predicate) { 3410 default: return false; 3411 case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT: 3412 case ICmpInst::ICMP_SGE: return true; 3413 } 3414 } 3415 3416 bool CmpInst::isOrdered(Predicate predicate) { 3417 switch (predicate) { 3418 default: return false; 3419 case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT: 3420 case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE: 3421 case FCmpInst::FCMP_ORD: return true; 3422 } 3423 } 3424 3425 bool CmpInst::isUnordered(Predicate predicate) { 3426 switch (predicate) { 3427 default: return false; 3428 case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT: 3429 case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE: 3430 case FCmpInst::FCMP_UNO: return true; 3431 } 3432 } 3433 3434 bool CmpInst::isTrueWhenEqual(Predicate predicate) { 3435 switch(predicate) { 3436 default: return false; 3437 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE: 3438 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true; 3439 } 3440 } 3441 3442 bool CmpInst::isFalseWhenEqual(Predicate predicate) { 3443 switch(predicate) { 3444 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT: 3445 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true; 3446 default: return false; 3447 } 3448 } 3449 3450 bool CmpInst::isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2) { 3451 // If the predicates match, then we know the first condition implies the 3452 // second is true. 3453 if (Pred1 == Pred2) 3454 return true; 3455 3456 switch (Pred1) { 3457 default: 3458 break; 3459 case ICMP_EQ: 3460 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true. 3461 return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE || 3462 Pred2 == ICMP_SLE; 3463 case ICMP_UGT: // A >u B implies A != B and A >=u B are true. 3464 return Pred2 == ICMP_NE || Pred2 == ICMP_UGE; 3465 case ICMP_ULT: // A <u B implies A != B and A <=u B are true. 3466 return Pred2 == ICMP_NE || Pred2 == ICMP_ULE; 3467 case ICMP_SGT: // A >s B implies A != B and A >=s B are true. 3468 return Pred2 == ICMP_NE || Pred2 == ICMP_SGE; 3469 case ICMP_SLT: // A <s B implies A != B and A <=s B are true. 3470 return Pred2 == ICMP_NE || Pred2 == ICMP_SLE; 3471 } 3472 return false; 3473 } 3474 3475 bool CmpInst::isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2) { 3476 return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2)); 3477 } 3478 3479 //===----------------------------------------------------------------------===// 3480 // SwitchInst Implementation 3481 //===----------------------------------------------------------------------===// 3482 3483 void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) { 3484 assert(Value && Default && NumReserved); 3485 ReservedSpace = NumReserved; 3486 setNumHungOffUseOperands(2); 3487 allocHungoffUses(ReservedSpace); 3488 3489 Op<0>() = Value; 3490 Op<1>() = Default; 3491 } 3492 3493 /// SwitchInst ctor - Create a new switch instruction, specifying a value to 3494 /// switch on and a default destination. The number of additional cases can 3495 /// be specified here to make memory allocation more efficient. This 3496 /// constructor can also autoinsert before another instruction. 3497 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3498 Instruction *InsertBefore) 3499 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch, 3500 nullptr, 0, InsertBefore) { 3501 init(Value, Default, 2+NumCases*2); 3502 } 3503 3504 /// SwitchInst ctor - Create a new switch instruction, specifying a value to 3505 /// switch on and a default destination. The number of additional cases can 3506 /// be specified here to make memory allocation more efficient. This 3507 /// constructor also autoinserts at the end of the specified BasicBlock. 3508 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3509 BasicBlock *InsertAtEnd) 3510 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch, 3511 nullptr, 0, InsertAtEnd) { 3512 init(Value, Default, 2+NumCases*2); 3513 } 3514 3515 SwitchInst::SwitchInst(const SwitchInst &SI) 3516 : Instruction(SI.getType(), Instruction::Switch, nullptr, 0) { 3517 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands()); 3518 setNumHungOffUseOperands(SI.getNumOperands()); 3519 Use *OL = getOperandList(); 3520 const Use *InOL = SI.getOperandList(); 3521 for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) { 3522 OL[i] = InOL[i]; 3523 OL[i+1] = InOL[i+1]; 3524 } 3525 SubclassOptionalData = SI.SubclassOptionalData; 3526 } 3527 3528 /// addCase - Add an entry to the switch instruction... 3529 /// 3530 void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) { 3531 unsigned NewCaseIdx = getNumCases(); 3532 unsigned OpNo = getNumOperands(); 3533 if (OpNo+2 > ReservedSpace) 3534 growOperands(); // Get more space! 3535 // Initialize some new operands. 3536 assert(OpNo+1 < ReservedSpace && "Growing didn't work!"); 3537 setNumHungOffUseOperands(OpNo+2); 3538 CaseHandle Case(this, NewCaseIdx); 3539 Case.setValue(OnVal); 3540 Case.setSuccessor(Dest); 3541 } 3542 3543 /// removeCase - This method removes the specified case and its successor 3544 /// from the switch instruction. 3545 SwitchInst::CaseIt SwitchInst::removeCase(CaseIt I) { 3546 unsigned idx = I->getCaseIndex(); 3547 3548 assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!"); 3549 3550 unsigned NumOps = getNumOperands(); 3551 Use *OL = getOperandList(); 3552 3553 // Overwrite this case with the end of the list. 3554 if (2 + (idx + 1) * 2 != NumOps) { 3555 OL[2 + idx * 2] = OL[NumOps - 2]; 3556 OL[2 + idx * 2 + 1] = OL[NumOps - 1]; 3557 } 3558 3559 // Nuke the last value. 3560 OL[NumOps-2].set(nullptr); 3561 OL[NumOps-2+1].set(nullptr); 3562 setNumHungOffUseOperands(NumOps-2); 3563 3564 return CaseIt(this, idx); 3565 } 3566 3567 /// growOperands - grow operands - This grows the operand list in response 3568 /// to a push_back style of operation. This grows the number of ops by 3 times. 3569 /// 3570 void SwitchInst::growOperands() { 3571 unsigned e = getNumOperands(); 3572 unsigned NumOps = e*3; 3573 3574 ReservedSpace = NumOps; 3575 growHungoffUses(ReservedSpace); 3576 } 3577 3578 //===----------------------------------------------------------------------===// 3579 // IndirectBrInst Implementation 3580 //===----------------------------------------------------------------------===// 3581 3582 void IndirectBrInst::init(Value *Address, unsigned NumDests) { 3583 assert(Address && Address->getType()->isPointerTy() && 3584 "Address of indirectbr must be a pointer"); 3585 ReservedSpace = 1+NumDests; 3586 setNumHungOffUseOperands(1); 3587 allocHungoffUses(ReservedSpace); 3588 3589 Op<0>() = Address; 3590 } 3591 3592 3593 /// growOperands - grow operands - This grows the operand list in response 3594 /// to a push_back style of operation. This grows the number of ops by 2 times. 3595 /// 3596 void IndirectBrInst::growOperands() { 3597 unsigned e = getNumOperands(); 3598 unsigned NumOps = e*2; 3599 3600 ReservedSpace = NumOps; 3601 growHungoffUses(ReservedSpace); 3602 } 3603 3604 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases, 3605 Instruction *InsertBefore) 3606 : Instruction(Type::getVoidTy(Address->getContext()), 3607 Instruction::IndirectBr, nullptr, 0, InsertBefore) { 3608 init(Address, NumCases); 3609 } 3610 3611 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases, 3612 BasicBlock *InsertAtEnd) 3613 : Instruction(Type::getVoidTy(Address->getContext()), 3614 Instruction::IndirectBr, nullptr, 0, InsertAtEnd) { 3615 init(Address, NumCases); 3616 } 3617 3618 IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI) 3619 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr, 3620 nullptr, IBI.getNumOperands()) { 3621 allocHungoffUses(IBI.getNumOperands()); 3622 Use *OL = getOperandList(); 3623 const Use *InOL = IBI.getOperandList(); 3624 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i) 3625 OL[i] = InOL[i]; 3626 SubclassOptionalData = IBI.SubclassOptionalData; 3627 } 3628 3629 /// addDestination - Add a destination. 3630 /// 3631 void IndirectBrInst::addDestination(BasicBlock *DestBB) { 3632 unsigned OpNo = getNumOperands(); 3633 if (OpNo+1 > ReservedSpace) 3634 growOperands(); // Get more space! 3635 // Initialize some new operands. 3636 assert(OpNo < ReservedSpace && "Growing didn't work!"); 3637 setNumHungOffUseOperands(OpNo+1); 3638 getOperandList()[OpNo] = DestBB; 3639 } 3640 3641 /// removeDestination - This method removes the specified successor from the 3642 /// indirectbr instruction. 3643 void IndirectBrInst::removeDestination(unsigned idx) { 3644 assert(idx < getNumOperands()-1 && "Successor index out of range!"); 3645 3646 unsigned NumOps = getNumOperands(); 3647 Use *OL = getOperandList(); 3648 3649 // Replace this value with the last one. 3650 OL[idx+1] = OL[NumOps-1]; 3651 3652 // Nuke the last value. 3653 OL[NumOps-1].set(nullptr); 3654 setNumHungOffUseOperands(NumOps-1); 3655 } 3656 3657 //===----------------------------------------------------------------------===// 3658 // cloneImpl() implementations 3659 //===----------------------------------------------------------------------===// 3660 3661 // Define these methods here so vtables don't get emitted into every translation 3662 // unit that uses these classes. 3663 3664 GetElementPtrInst *GetElementPtrInst::cloneImpl() const { 3665 return new (getNumOperands()) GetElementPtrInst(*this); 3666 } 3667 3668 BinaryOperator *BinaryOperator::cloneImpl() const { 3669 return Create(getOpcode(), Op<0>(), Op<1>()); 3670 } 3671 3672 FCmpInst *FCmpInst::cloneImpl() const { 3673 return new FCmpInst(getPredicate(), Op<0>(), Op<1>()); 3674 } 3675 3676 ICmpInst *ICmpInst::cloneImpl() const { 3677 return new ICmpInst(getPredicate(), Op<0>(), Op<1>()); 3678 } 3679 3680 ExtractValueInst *ExtractValueInst::cloneImpl() const { 3681 return new ExtractValueInst(*this); 3682 } 3683 3684 InsertValueInst *InsertValueInst::cloneImpl() const { 3685 return new InsertValueInst(*this); 3686 } 3687 3688 AllocaInst *AllocaInst::cloneImpl() const { 3689 AllocaInst *Result = new AllocaInst(getAllocatedType(), 3690 getType()->getAddressSpace(), 3691 (Value *)getOperand(0), getAlignment()); 3692 Result->setUsedWithInAlloca(isUsedWithInAlloca()); 3693 Result->setSwiftError(isSwiftError()); 3694 return Result; 3695 } 3696 3697 LoadInst *LoadInst::cloneImpl() const { 3698 return new LoadInst(getOperand(0), Twine(), isVolatile(), 3699 getAlignment(), getOrdering(), getSyncScopeID()); 3700 } 3701 3702 StoreInst *StoreInst::cloneImpl() const { 3703 return new StoreInst(getOperand(0), getOperand(1), isVolatile(), 3704 getAlignment(), getOrdering(), getSyncScopeID()); 3705 3706 } 3707 3708 AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const { 3709 AtomicCmpXchgInst *Result = 3710 new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2), 3711 getSuccessOrdering(), getFailureOrdering(), 3712 getSyncScopeID()); 3713 Result->setVolatile(isVolatile()); 3714 Result->setWeak(isWeak()); 3715 return Result; 3716 } 3717 3718 AtomicRMWInst *AtomicRMWInst::cloneImpl() const { 3719 AtomicRMWInst *Result = 3720 new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1), 3721 getOrdering(), getSyncScopeID()); 3722 Result->setVolatile(isVolatile()); 3723 return Result; 3724 } 3725 3726 FenceInst *FenceInst::cloneImpl() const { 3727 return new FenceInst(getContext(), getOrdering(), getSyncScopeID()); 3728 } 3729 3730 TruncInst *TruncInst::cloneImpl() const { 3731 return new TruncInst(getOperand(0), getType()); 3732 } 3733 3734 ZExtInst *ZExtInst::cloneImpl() const { 3735 return new ZExtInst(getOperand(0), getType()); 3736 } 3737 3738 SExtInst *SExtInst::cloneImpl() const { 3739 return new SExtInst(getOperand(0), getType()); 3740 } 3741 3742 FPTruncInst *FPTruncInst::cloneImpl() const { 3743 return new FPTruncInst(getOperand(0), getType()); 3744 } 3745 3746 FPExtInst *FPExtInst::cloneImpl() const { 3747 return new FPExtInst(getOperand(0), getType()); 3748 } 3749 3750 UIToFPInst *UIToFPInst::cloneImpl() const { 3751 return new UIToFPInst(getOperand(0), getType()); 3752 } 3753 3754 SIToFPInst *SIToFPInst::cloneImpl() const { 3755 return new SIToFPInst(getOperand(0), getType()); 3756 } 3757 3758 FPToUIInst *FPToUIInst::cloneImpl() const { 3759 return new FPToUIInst(getOperand(0), getType()); 3760 } 3761 3762 FPToSIInst *FPToSIInst::cloneImpl() const { 3763 return new FPToSIInst(getOperand(0), getType()); 3764 } 3765 3766 PtrToIntInst *PtrToIntInst::cloneImpl() const { 3767 return new PtrToIntInst(getOperand(0), getType()); 3768 } 3769 3770 IntToPtrInst *IntToPtrInst::cloneImpl() const { 3771 return new IntToPtrInst(getOperand(0), getType()); 3772 } 3773 3774 BitCastInst *BitCastInst::cloneImpl() const { 3775 return new BitCastInst(getOperand(0), getType()); 3776 } 3777 3778 AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const { 3779 return new AddrSpaceCastInst(getOperand(0), getType()); 3780 } 3781 3782 CallInst *CallInst::cloneImpl() const { 3783 if (hasOperandBundles()) { 3784 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 3785 return new(getNumOperands(), DescriptorBytes) CallInst(*this); 3786 } 3787 return new(getNumOperands()) CallInst(*this); 3788 } 3789 3790 SelectInst *SelectInst::cloneImpl() const { 3791 return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2)); 3792 } 3793 3794 VAArgInst *VAArgInst::cloneImpl() const { 3795 return new VAArgInst(getOperand(0), getType()); 3796 } 3797 3798 ExtractElementInst *ExtractElementInst::cloneImpl() const { 3799 return ExtractElementInst::Create(getOperand(0), getOperand(1)); 3800 } 3801 3802 InsertElementInst *InsertElementInst::cloneImpl() const { 3803 return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2)); 3804 } 3805 3806 ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const { 3807 return new ShuffleVectorInst(getOperand(0), getOperand(1), getOperand(2)); 3808 } 3809 3810 PHINode *PHINode::cloneImpl() const { return new PHINode(*this); } 3811 3812 LandingPadInst *LandingPadInst::cloneImpl() const { 3813 return new LandingPadInst(*this); 3814 } 3815 3816 ReturnInst *ReturnInst::cloneImpl() const { 3817 return new(getNumOperands()) ReturnInst(*this); 3818 } 3819 3820 BranchInst *BranchInst::cloneImpl() const { 3821 return new(getNumOperands()) BranchInst(*this); 3822 } 3823 3824 SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); } 3825 3826 IndirectBrInst *IndirectBrInst::cloneImpl() const { 3827 return new IndirectBrInst(*this); 3828 } 3829 3830 InvokeInst *InvokeInst::cloneImpl() const { 3831 if (hasOperandBundles()) { 3832 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 3833 return new(getNumOperands(), DescriptorBytes) InvokeInst(*this); 3834 } 3835 return new(getNumOperands()) InvokeInst(*this); 3836 } 3837 3838 ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); } 3839 3840 CleanupReturnInst *CleanupReturnInst::cloneImpl() const { 3841 return new (getNumOperands()) CleanupReturnInst(*this); 3842 } 3843 3844 CatchReturnInst *CatchReturnInst::cloneImpl() const { 3845 return new (getNumOperands()) CatchReturnInst(*this); 3846 } 3847 3848 CatchSwitchInst *CatchSwitchInst::cloneImpl() const { 3849 return new CatchSwitchInst(*this); 3850 } 3851 3852 FuncletPadInst *FuncletPadInst::cloneImpl() const { 3853 return new (getNumOperands()) FuncletPadInst(*this); 3854 } 3855 3856 UnreachableInst *UnreachableInst::cloneImpl() const { 3857 LLVMContext &Context = getContext(); 3858 return new UnreachableInst(Context); 3859 } 3860