1 //===- Instructions.cpp - Implement the LLVM instructions -----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements all of the non-inline methods for the LLVM instruction 11 // classes. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/IR/Instructions.h" 16 #include "LLVMContextImpl.h" 17 #include "llvm/ADT/None.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/Twine.h" 20 #include "llvm/IR/Attributes.h" 21 #include "llvm/IR/BasicBlock.h" 22 #include "llvm/IR/CallSite.h" 23 #include "llvm/IR/Constant.h" 24 #include "llvm/IR/Constants.h" 25 #include "llvm/IR/DataLayout.h" 26 #include "llvm/IR/DerivedTypes.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/IR/InstrTypes.h" 29 #include "llvm/IR/Instruction.h" 30 #include "llvm/IR/LLVMContext.h" 31 #include "llvm/IR/Metadata.h" 32 #include "llvm/IR/Module.h" 33 #include "llvm/IR/Operator.h" 34 #include "llvm/IR/Type.h" 35 #include "llvm/IR/Value.h" 36 #include "llvm/Support/AtomicOrdering.h" 37 #include "llvm/Support/Casting.h" 38 #include "llvm/Support/ErrorHandling.h" 39 #include "llvm/Support/MathExtras.h" 40 #include <algorithm> 41 #include <cassert> 42 #include <cstdint> 43 #include <vector> 44 45 using namespace llvm; 46 47 //===----------------------------------------------------------------------===// 48 // AllocaInst Class 49 //===----------------------------------------------------------------------===// 50 51 Optional<uint64_t> 52 AllocaInst::getAllocationSizeInBits(const DataLayout &DL) const { 53 uint64_t Size = DL.getTypeAllocSizeInBits(getAllocatedType()); 54 if (isArrayAllocation()) { 55 auto C = dyn_cast<ConstantInt>(getArraySize()); 56 if (!C) 57 return None; 58 Size *= C->getZExtValue(); 59 } 60 return Size; 61 } 62 63 //===----------------------------------------------------------------------===// 64 // CallSite Class 65 //===----------------------------------------------------------------------===// 66 67 User::op_iterator CallSite::getCallee() const { 68 Instruction *II(getInstruction()); 69 return isCall() 70 ? cast<CallInst>(II)->op_end() - 1 // Skip Callee 71 : cast<InvokeInst>(II)->op_end() - 3; // Skip BB, BB, Callee 72 } 73 74 //===----------------------------------------------------------------------===// 75 // SelectInst Class 76 //===----------------------------------------------------------------------===// 77 78 /// areInvalidOperands - Return a string if the specified operands are invalid 79 /// for a select operation, otherwise return null. 80 const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) { 81 if (Op1->getType() != Op2->getType()) 82 return "both values to select must have same type"; 83 84 if (Op1->getType()->isTokenTy()) 85 return "select values cannot have token type"; 86 87 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) { 88 // Vector select. 89 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext())) 90 return "vector select condition element type must be i1"; 91 VectorType *ET = dyn_cast<VectorType>(Op1->getType()); 92 if (!ET) 93 return "selected values for vector select must be vectors"; 94 if (ET->getNumElements() != VT->getNumElements()) 95 return "vector select requires selected vectors to have " 96 "the same vector length as select condition"; 97 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) { 98 return "select condition must be i1 or <n x i1>"; 99 } 100 return nullptr; 101 } 102 103 //===----------------------------------------------------------------------===// 104 // PHINode Class 105 //===----------------------------------------------------------------------===// 106 107 PHINode::PHINode(const PHINode &PN) 108 : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()), 109 ReservedSpace(PN.getNumOperands()) { 110 allocHungoffUses(PN.getNumOperands()); 111 std::copy(PN.op_begin(), PN.op_end(), op_begin()); 112 std::copy(PN.block_begin(), PN.block_end(), block_begin()); 113 SubclassOptionalData = PN.SubclassOptionalData; 114 } 115 116 // removeIncomingValue - Remove an incoming value. This is useful if a 117 // predecessor basic block is deleted. 118 Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) { 119 Value *Removed = getIncomingValue(Idx); 120 121 // Move everything after this operand down. 122 // 123 // FIXME: we could just swap with the end of the list, then erase. However, 124 // clients might not expect this to happen. The code as it is thrashes the 125 // use/def lists, which is kinda lame. 126 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx); 127 std::copy(block_begin() + Idx + 1, block_end(), block_begin() + Idx); 128 129 // Nuke the last value. 130 Op<-1>().set(nullptr); 131 setNumHungOffUseOperands(getNumOperands() - 1); 132 133 // If the PHI node is dead, because it has zero entries, nuke it now. 134 if (getNumOperands() == 0 && DeletePHIIfEmpty) { 135 // If anyone is using this PHI, make them use a dummy value instead... 136 replaceAllUsesWith(UndefValue::get(getType())); 137 eraseFromParent(); 138 } 139 return Removed; 140 } 141 142 /// growOperands - grow operands - This grows the operand list in response 143 /// to a push_back style of operation. This grows the number of ops by 1.5 144 /// times. 145 /// 146 void PHINode::growOperands() { 147 unsigned e = getNumOperands(); 148 unsigned NumOps = e + e / 2; 149 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common. 150 151 ReservedSpace = NumOps; 152 growHungoffUses(ReservedSpace, /* IsPhi */ true); 153 } 154 155 /// hasConstantValue - If the specified PHI node always merges together the same 156 /// value, return the value, otherwise return null. 157 Value *PHINode::hasConstantValue() const { 158 // Exploit the fact that phi nodes always have at least one entry. 159 Value *ConstantValue = getIncomingValue(0); 160 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i) 161 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) { 162 if (ConstantValue != this) 163 return nullptr; // Incoming values not all the same. 164 // The case where the first value is this PHI. 165 ConstantValue = getIncomingValue(i); 166 } 167 if (ConstantValue == this) 168 return UndefValue::get(getType()); 169 return ConstantValue; 170 } 171 172 /// hasConstantOrUndefValue - Whether the specified PHI node always merges 173 /// together the same value, assuming that undefs result in the same value as 174 /// non-undefs. 175 /// Unlike \ref hasConstantValue, this does not return a value because the 176 /// unique non-undef incoming value need not dominate the PHI node. 177 bool PHINode::hasConstantOrUndefValue() const { 178 Value *ConstantValue = nullptr; 179 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) { 180 Value *Incoming = getIncomingValue(i); 181 if (Incoming != this && !isa<UndefValue>(Incoming)) { 182 if (ConstantValue && ConstantValue != Incoming) 183 return false; 184 ConstantValue = Incoming; 185 } 186 } 187 return true; 188 } 189 190 //===----------------------------------------------------------------------===// 191 // LandingPadInst Implementation 192 //===----------------------------------------------------------------------===// 193 194 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues, 195 const Twine &NameStr, Instruction *InsertBefore) 196 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) { 197 init(NumReservedValues, NameStr); 198 } 199 200 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues, 201 const Twine &NameStr, BasicBlock *InsertAtEnd) 202 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) { 203 init(NumReservedValues, NameStr); 204 } 205 206 LandingPadInst::LandingPadInst(const LandingPadInst &LP) 207 : Instruction(LP.getType(), Instruction::LandingPad, nullptr, 208 LP.getNumOperands()), 209 ReservedSpace(LP.getNumOperands()) { 210 allocHungoffUses(LP.getNumOperands()); 211 Use *OL = getOperandList(); 212 const Use *InOL = LP.getOperandList(); 213 for (unsigned I = 0, E = ReservedSpace; I != E; ++I) 214 OL[I] = InOL[I]; 215 216 setCleanup(LP.isCleanup()); 217 } 218 219 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses, 220 const Twine &NameStr, 221 Instruction *InsertBefore) { 222 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore); 223 } 224 225 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses, 226 const Twine &NameStr, 227 BasicBlock *InsertAtEnd) { 228 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd); 229 } 230 231 void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) { 232 ReservedSpace = NumReservedValues; 233 setNumHungOffUseOperands(0); 234 allocHungoffUses(ReservedSpace); 235 setName(NameStr); 236 setCleanup(false); 237 } 238 239 /// growOperands - grow operands - This grows the operand list in response to a 240 /// push_back style of operation. This grows the number of ops by 2 times. 241 void LandingPadInst::growOperands(unsigned Size) { 242 unsigned e = getNumOperands(); 243 if (ReservedSpace >= e + Size) return; 244 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2; 245 growHungoffUses(ReservedSpace); 246 } 247 248 void LandingPadInst::addClause(Constant *Val) { 249 unsigned OpNo = getNumOperands(); 250 growOperands(1); 251 assert(OpNo < ReservedSpace && "Growing didn't work!"); 252 setNumHungOffUseOperands(getNumOperands() + 1); 253 getOperandList()[OpNo] = Val; 254 } 255 256 //===----------------------------------------------------------------------===// 257 // CallInst Implementation 258 //===----------------------------------------------------------------------===// 259 260 void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, 261 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) { 262 this->FTy = FTy; 263 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 && 264 "NumOperands not set up?"); 265 Op<-1>() = Func; 266 267 #ifndef NDEBUG 268 assert((Args.size() == FTy->getNumParams() || 269 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 270 "Calling a function with bad signature!"); 271 272 for (unsigned i = 0; i != Args.size(); ++i) 273 assert((i >= FTy->getNumParams() || 274 FTy->getParamType(i) == Args[i]->getType()) && 275 "Calling a function with a bad signature!"); 276 #endif 277 278 std::copy(Args.begin(), Args.end(), op_begin()); 279 280 auto It = populateBundleOperandInfos(Bundles, Args.size()); 281 (void)It; 282 assert(It + 1 == op_end() && "Should add up!"); 283 284 setName(NameStr); 285 } 286 287 void CallInst::init(Value *Func, const Twine &NameStr) { 288 FTy = 289 cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType()); 290 assert(getNumOperands() == 1 && "NumOperands not set up?"); 291 Op<-1>() = Func; 292 293 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature"); 294 295 setName(NameStr); 296 } 297 298 CallInst::CallInst(Value *Func, const Twine &Name, Instruction *InsertBefore) 299 : CallBase<CallInst>( 300 cast<FunctionType>( 301 cast<PointerType>(Func->getType())->getElementType()) 302 ->getReturnType(), 303 Instruction::Call, 304 OperandTraits<CallBase<CallInst>>::op_end(this) - 1, 1, 305 InsertBefore) { 306 init(Func, Name); 307 } 308 309 CallInst::CallInst(Value *Func, const Twine &Name, BasicBlock *InsertAtEnd) 310 : CallBase<CallInst>( 311 cast<FunctionType>( 312 cast<PointerType>(Func->getType())->getElementType()) 313 ->getReturnType(), 314 Instruction::Call, 315 OperandTraits<CallBase<CallInst>>::op_end(this) - 1, 1, InsertAtEnd) { 316 init(Func, Name); 317 } 318 319 CallInst::CallInst(const CallInst &CI) 320 : CallBase<CallInst>(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call, 321 OperandTraits<CallBase<CallInst>>::op_end(this) - 322 CI.getNumOperands(), 323 CI.getNumOperands()) { 324 setTailCallKind(CI.getTailCallKind()); 325 setCallingConv(CI.getCallingConv()); 326 327 std::copy(CI.op_begin(), CI.op_end(), op_begin()); 328 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(), 329 bundle_op_info_begin()); 330 SubclassOptionalData = CI.SubclassOptionalData; 331 } 332 333 CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB, 334 Instruction *InsertPt) { 335 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end()); 336 337 auto *NewCI = CallInst::Create(CI->getCalledValue(), Args, OpB, CI->getName(), 338 InsertPt); 339 NewCI->setTailCallKind(CI->getTailCallKind()); 340 NewCI->setCallingConv(CI->getCallingConv()); 341 NewCI->SubclassOptionalData = CI->SubclassOptionalData; 342 NewCI->setAttributes(CI->getAttributes()); 343 NewCI->setDebugLoc(CI->getDebugLoc()); 344 return NewCI; 345 } 346 347 348 349 350 351 352 353 354 355 356 /// IsConstantOne - Return true only if val is constant int 1 357 static bool IsConstantOne(Value *val) { 358 assert(val && "IsConstantOne does not work with nullptr val"); 359 const ConstantInt *CVal = dyn_cast<ConstantInt>(val); 360 return CVal && CVal->isOne(); 361 } 362 363 static Instruction *createMalloc(Instruction *InsertBefore, 364 BasicBlock *InsertAtEnd, Type *IntPtrTy, 365 Type *AllocTy, Value *AllocSize, 366 Value *ArraySize, 367 ArrayRef<OperandBundleDef> OpB, 368 Function *MallocF, const Twine &Name) { 369 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) && 370 "createMalloc needs either InsertBefore or InsertAtEnd"); 371 372 // malloc(type) becomes: 373 // bitcast (i8* malloc(typeSize)) to type* 374 // malloc(type, arraySize) becomes: 375 // bitcast (i8* malloc(typeSize*arraySize)) to type* 376 if (!ArraySize) 377 ArraySize = ConstantInt::get(IntPtrTy, 1); 378 else if (ArraySize->getType() != IntPtrTy) { 379 if (InsertBefore) 380 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false, 381 "", InsertBefore); 382 else 383 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false, 384 "", InsertAtEnd); 385 } 386 387 if (!IsConstantOne(ArraySize)) { 388 if (IsConstantOne(AllocSize)) { 389 AllocSize = ArraySize; // Operand * 1 = Operand 390 } else if (Constant *CO = dyn_cast<Constant>(ArraySize)) { 391 Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy, 392 false /*ZExt*/); 393 // Malloc arg is constant product of type size and array size 394 AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize)); 395 } else { 396 // Multiply type size by the array size... 397 if (InsertBefore) 398 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize, 399 "mallocsize", InsertBefore); 400 else 401 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize, 402 "mallocsize", InsertAtEnd); 403 } 404 } 405 406 assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size"); 407 // Create the call to Malloc. 408 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd; 409 Module *M = BB->getParent()->getParent(); 410 Type *BPTy = Type::getInt8PtrTy(BB->getContext()); 411 Value *MallocFunc = MallocF; 412 if (!MallocFunc) 413 // prototype malloc as "void *malloc(size_t)" 414 MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy); 415 PointerType *AllocPtrType = PointerType::getUnqual(AllocTy); 416 CallInst *MCall = nullptr; 417 Instruction *Result = nullptr; 418 if (InsertBefore) { 419 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall", 420 InsertBefore); 421 Result = MCall; 422 if (Result->getType() != AllocPtrType) 423 // Create a cast instruction to convert to the right type... 424 Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore); 425 } else { 426 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall"); 427 Result = MCall; 428 if (Result->getType() != AllocPtrType) { 429 InsertAtEnd->getInstList().push_back(MCall); 430 // Create a cast instruction to convert to the right type... 431 Result = new BitCastInst(MCall, AllocPtrType, Name); 432 } 433 } 434 MCall->setTailCall(); 435 if (Function *F = dyn_cast<Function>(MallocFunc)) { 436 MCall->setCallingConv(F->getCallingConv()); 437 if (!F->returnDoesNotAlias()) 438 F->setReturnDoesNotAlias(); 439 } 440 assert(!MCall->getType()->isVoidTy() && "Malloc has void return type"); 441 442 return Result; 443 } 444 445 /// CreateMalloc - Generate the IR for a call to malloc: 446 /// 1. Compute the malloc call's argument as the specified type's size, 447 /// possibly multiplied by the array size if the array size is not 448 /// constant 1. 449 /// 2. Call malloc with that argument. 450 /// 3. Bitcast the result of the malloc call to the specified type. 451 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore, 452 Type *IntPtrTy, Type *AllocTy, 453 Value *AllocSize, Value *ArraySize, 454 Function *MallocF, 455 const Twine &Name) { 456 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize, 457 ArraySize, None, MallocF, Name); 458 } 459 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore, 460 Type *IntPtrTy, Type *AllocTy, 461 Value *AllocSize, Value *ArraySize, 462 ArrayRef<OperandBundleDef> OpB, 463 Function *MallocF, 464 const Twine &Name) { 465 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize, 466 ArraySize, OpB, MallocF, Name); 467 } 468 469 /// CreateMalloc - Generate the IR for a call to malloc: 470 /// 1. Compute the malloc call's argument as the specified type's size, 471 /// possibly multiplied by the array size if the array size is not 472 /// constant 1. 473 /// 2. Call malloc with that argument. 474 /// 3. Bitcast the result of the malloc call to the specified type. 475 /// Note: This function does not add the bitcast to the basic block, that is the 476 /// responsibility of the caller. 477 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd, 478 Type *IntPtrTy, Type *AllocTy, 479 Value *AllocSize, Value *ArraySize, 480 Function *MallocF, const Twine &Name) { 481 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize, 482 ArraySize, None, MallocF, Name); 483 } 484 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd, 485 Type *IntPtrTy, Type *AllocTy, 486 Value *AllocSize, Value *ArraySize, 487 ArrayRef<OperandBundleDef> OpB, 488 Function *MallocF, const Twine &Name) { 489 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize, 490 ArraySize, OpB, MallocF, Name); 491 } 492 493 static Instruction *createFree(Value *Source, 494 ArrayRef<OperandBundleDef> Bundles, 495 Instruction *InsertBefore, 496 BasicBlock *InsertAtEnd) { 497 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) && 498 "createFree needs either InsertBefore or InsertAtEnd"); 499 assert(Source->getType()->isPointerTy() && 500 "Can not free something of nonpointer type!"); 501 502 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd; 503 Module *M = BB->getParent()->getParent(); 504 505 Type *VoidTy = Type::getVoidTy(M->getContext()); 506 Type *IntPtrTy = Type::getInt8PtrTy(M->getContext()); 507 // prototype free as "void free(void*)" 508 Value *FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy); 509 CallInst *Result = nullptr; 510 Value *PtrCast = Source; 511 if (InsertBefore) { 512 if (Source->getType() != IntPtrTy) 513 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore); 514 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "", InsertBefore); 515 } else { 516 if (Source->getType() != IntPtrTy) 517 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd); 518 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, ""); 519 } 520 Result->setTailCall(); 521 if (Function *F = dyn_cast<Function>(FreeFunc)) 522 Result->setCallingConv(F->getCallingConv()); 523 524 return Result; 525 } 526 527 /// CreateFree - Generate the IR for a call to the builtin free function. 528 Instruction *CallInst::CreateFree(Value *Source, Instruction *InsertBefore) { 529 return createFree(Source, None, InsertBefore, nullptr); 530 } 531 Instruction *CallInst::CreateFree(Value *Source, 532 ArrayRef<OperandBundleDef> Bundles, 533 Instruction *InsertBefore) { 534 return createFree(Source, Bundles, InsertBefore, nullptr); 535 } 536 537 /// CreateFree - Generate the IR for a call to the builtin free function. 538 /// Note: This function does not add the call to the basic block, that is the 539 /// responsibility of the caller. 540 Instruction *CallInst::CreateFree(Value *Source, BasicBlock *InsertAtEnd) { 541 Instruction *FreeCall = createFree(Source, None, nullptr, InsertAtEnd); 542 assert(FreeCall && "CreateFree did not create a CallInst"); 543 return FreeCall; 544 } 545 Instruction *CallInst::CreateFree(Value *Source, 546 ArrayRef<OperandBundleDef> Bundles, 547 BasicBlock *InsertAtEnd) { 548 Instruction *FreeCall = createFree(Source, Bundles, nullptr, InsertAtEnd); 549 assert(FreeCall && "CreateFree did not create a CallInst"); 550 return FreeCall; 551 } 552 553 //===----------------------------------------------------------------------===// 554 // InvokeInst Implementation 555 //===----------------------------------------------------------------------===// 556 557 void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal, 558 BasicBlock *IfException, ArrayRef<Value *> Args, 559 ArrayRef<OperandBundleDef> Bundles, 560 const Twine &NameStr) { 561 this->FTy = FTy; 562 563 assert(getNumOperands() == 3 + Args.size() + CountBundleInputs(Bundles) && 564 "NumOperands not set up?"); 565 Op<-3>() = Fn; 566 Op<-2>() = IfNormal; 567 Op<-1>() = IfException; 568 569 #ifndef NDEBUG 570 assert(((Args.size() == FTy->getNumParams()) || 571 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 572 "Invoking a function with bad signature"); 573 574 for (unsigned i = 0, e = Args.size(); i != e; i++) 575 assert((i >= FTy->getNumParams() || 576 FTy->getParamType(i) == Args[i]->getType()) && 577 "Invoking a function with a bad signature!"); 578 #endif 579 580 std::copy(Args.begin(), Args.end(), op_begin()); 581 582 auto It = populateBundleOperandInfos(Bundles, Args.size()); 583 (void)It; 584 assert(It + 3 == op_end() && "Should add up!"); 585 586 setName(NameStr); 587 } 588 589 InvokeInst::InvokeInst(const InvokeInst &II) 590 : CallBase<InvokeInst>(II.Attrs, II.FTy, II.getType(), Instruction::Invoke, 591 OperandTraits<CallBase<InvokeInst>>::op_end(this) - 592 II.getNumOperands(), 593 II.getNumOperands()) { 594 setCallingConv(II.getCallingConv()); 595 std::copy(II.op_begin(), II.op_end(), op_begin()); 596 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(), 597 bundle_op_info_begin()); 598 SubclassOptionalData = II.SubclassOptionalData; 599 } 600 601 InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB, 602 Instruction *InsertPt) { 603 std::vector<Value *> Args(II->arg_begin(), II->arg_end()); 604 605 auto *NewII = InvokeInst::Create(II->getCalledValue(), II->getNormalDest(), 606 II->getUnwindDest(), Args, OpB, 607 II->getName(), InsertPt); 608 NewII->setCallingConv(II->getCallingConv()); 609 NewII->SubclassOptionalData = II->SubclassOptionalData; 610 NewII->setAttributes(II->getAttributes()); 611 NewII->setDebugLoc(II->getDebugLoc()); 612 return NewII; 613 } 614 615 616 LandingPadInst *InvokeInst::getLandingPadInst() const { 617 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI()); 618 } 619 620 //===----------------------------------------------------------------------===// 621 // ReturnInst Implementation 622 //===----------------------------------------------------------------------===// 623 624 ReturnInst::ReturnInst(const ReturnInst &RI) 625 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret, 626 OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(), 627 RI.getNumOperands()) { 628 if (RI.getNumOperands()) 629 Op<0>() = RI.Op<0>(); 630 SubclassOptionalData = RI.SubclassOptionalData; 631 } 632 633 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore) 634 : Instruction(Type::getVoidTy(C), Instruction::Ret, 635 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal, 636 InsertBefore) { 637 if (retVal) 638 Op<0>() = retVal; 639 } 640 641 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd) 642 : Instruction(Type::getVoidTy(C), Instruction::Ret, 643 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal, 644 InsertAtEnd) { 645 if (retVal) 646 Op<0>() = retVal; 647 } 648 649 ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd) 650 : Instruction(Type::getVoidTy(Context), Instruction::Ret, 651 OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {} 652 653 //===----------------------------------------------------------------------===// 654 // ResumeInst Implementation 655 //===----------------------------------------------------------------------===// 656 657 ResumeInst::ResumeInst(const ResumeInst &RI) 658 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume, 659 OperandTraits<ResumeInst>::op_begin(this), 1) { 660 Op<0>() = RI.Op<0>(); 661 } 662 663 ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore) 664 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume, 665 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) { 666 Op<0>() = Exn; 667 } 668 669 ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd) 670 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume, 671 OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) { 672 Op<0>() = Exn; 673 } 674 675 //===----------------------------------------------------------------------===// 676 // CleanupReturnInst Implementation 677 //===----------------------------------------------------------------------===// 678 679 CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI) 680 : Instruction(CRI.getType(), Instruction::CleanupRet, 681 OperandTraits<CleanupReturnInst>::op_end(this) - 682 CRI.getNumOperands(), 683 CRI.getNumOperands()) { 684 setInstructionSubclassData(CRI.getSubclassDataFromInstruction()); 685 Op<0>() = CRI.Op<0>(); 686 if (CRI.hasUnwindDest()) 687 Op<1>() = CRI.Op<1>(); 688 } 689 690 void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) { 691 if (UnwindBB) 692 setInstructionSubclassData(getSubclassDataFromInstruction() | 1); 693 694 Op<0>() = CleanupPad; 695 if (UnwindBB) 696 Op<1>() = UnwindBB; 697 } 698 699 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, 700 unsigned Values, Instruction *InsertBefore) 701 : Instruction(Type::getVoidTy(CleanupPad->getContext()), 702 Instruction::CleanupRet, 703 OperandTraits<CleanupReturnInst>::op_end(this) - Values, 704 Values, InsertBefore) { 705 init(CleanupPad, UnwindBB); 706 } 707 708 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, 709 unsigned Values, BasicBlock *InsertAtEnd) 710 : Instruction(Type::getVoidTy(CleanupPad->getContext()), 711 Instruction::CleanupRet, 712 OperandTraits<CleanupReturnInst>::op_end(this) - Values, 713 Values, InsertAtEnd) { 714 init(CleanupPad, UnwindBB); 715 } 716 717 //===----------------------------------------------------------------------===// 718 // CatchReturnInst Implementation 719 //===----------------------------------------------------------------------===// 720 void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) { 721 Op<0>() = CatchPad; 722 Op<1>() = BB; 723 } 724 725 CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI) 726 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet, 727 OperandTraits<CatchReturnInst>::op_begin(this), 2) { 728 Op<0>() = CRI.Op<0>(); 729 Op<1>() = CRI.Op<1>(); 730 } 731 732 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB, 733 Instruction *InsertBefore) 734 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet, 735 OperandTraits<CatchReturnInst>::op_begin(this), 2, 736 InsertBefore) { 737 init(CatchPad, BB); 738 } 739 740 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB, 741 BasicBlock *InsertAtEnd) 742 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet, 743 OperandTraits<CatchReturnInst>::op_begin(this), 2, 744 InsertAtEnd) { 745 init(CatchPad, BB); 746 } 747 748 //===----------------------------------------------------------------------===// 749 // CatchSwitchInst Implementation 750 //===----------------------------------------------------------------------===// 751 752 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 753 unsigned NumReservedValues, 754 const Twine &NameStr, 755 Instruction *InsertBefore) 756 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0, 757 InsertBefore) { 758 if (UnwindDest) 759 ++NumReservedValues; 760 init(ParentPad, UnwindDest, NumReservedValues + 1); 761 setName(NameStr); 762 } 763 764 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 765 unsigned NumReservedValues, 766 const Twine &NameStr, BasicBlock *InsertAtEnd) 767 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0, 768 InsertAtEnd) { 769 if (UnwindDest) 770 ++NumReservedValues; 771 init(ParentPad, UnwindDest, NumReservedValues + 1); 772 setName(NameStr); 773 } 774 775 CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI) 776 : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr, 777 CSI.getNumOperands()) { 778 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands()); 779 setNumHungOffUseOperands(ReservedSpace); 780 Use *OL = getOperandList(); 781 const Use *InOL = CSI.getOperandList(); 782 for (unsigned I = 1, E = ReservedSpace; I != E; ++I) 783 OL[I] = InOL[I]; 784 } 785 786 void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest, 787 unsigned NumReservedValues) { 788 assert(ParentPad && NumReservedValues); 789 790 ReservedSpace = NumReservedValues; 791 setNumHungOffUseOperands(UnwindDest ? 2 : 1); 792 allocHungoffUses(ReservedSpace); 793 794 Op<0>() = ParentPad; 795 if (UnwindDest) { 796 setInstructionSubclassData(getSubclassDataFromInstruction() | 1); 797 setUnwindDest(UnwindDest); 798 } 799 } 800 801 /// growOperands - grow operands - This grows the operand list in response to a 802 /// push_back style of operation. This grows the number of ops by 2 times. 803 void CatchSwitchInst::growOperands(unsigned Size) { 804 unsigned NumOperands = getNumOperands(); 805 assert(NumOperands >= 1); 806 if (ReservedSpace >= NumOperands + Size) 807 return; 808 ReservedSpace = (NumOperands + Size / 2) * 2; 809 growHungoffUses(ReservedSpace); 810 } 811 812 void CatchSwitchInst::addHandler(BasicBlock *Handler) { 813 unsigned OpNo = getNumOperands(); 814 growOperands(1); 815 assert(OpNo < ReservedSpace && "Growing didn't work!"); 816 setNumHungOffUseOperands(getNumOperands() + 1); 817 getOperandList()[OpNo] = Handler; 818 } 819 820 void CatchSwitchInst::removeHandler(handler_iterator HI) { 821 // Move all subsequent handlers up one. 822 Use *EndDst = op_end() - 1; 823 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst) 824 *CurDst = *(CurDst + 1); 825 // Null out the last handler use. 826 *EndDst = nullptr; 827 828 setNumHungOffUseOperands(getNumOperands() - 1); 829 } 830 831 //===----------------------------------------------------------------------===// 832 // FuncletPadInst Implementation 833 //===----------------------------------------------------------------------===// 834 void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args, 835 const Twine &NameStr) { 836 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?"); 837 std::copy(Args.begin(), Args.end(), op_begin()); 838 setParentPad(ParentPad); 839 setName(NameStr); 840 } 841 842 FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI) 843 : Instruction(FPI.getType(), FPI.getOpcode(), 844 OperandTraits<FuncletPadInst>::op_end(this) - 845 FPI.getNumOperands(), 846 FPI.getNumOperands()) { 847 std::copy(FPI.op_begin(), FPI.op_end(), op_begin()); 848 setParentPad(FPI.getParentPad()); 849 } 850 851 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad, 852 ArrayRef<Value *> Args, unsigned Values, 853 const Twine &NameStr, Instruction *InsertBefore) 854 : Instruction(ParentPad->getType(), Op, 855 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values, 856 InsertBefore) { 857 init(ParentPad, Args, NameStr); 858 } 859 860 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad, 861 ArrayRef<Value *> Args, unsigned Values, 862 const Twine &NameStr, BasicBlock *InsertAtEnd) 863 : Instruction(ParentPad->getType(), Op, 864 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values, 865 InsertAtEnd) { 866 init(ParentPad, Args, NameStr); 867 } 868 869 //===----------------------------------------------------------------------===// 870 // UnreachableInst Implementation 871 //===----------------------------------------------------------------------===// 872 873 UnreachableInst::UnreachableInst(LLVMContext &Context, 874 Instruction *InsertBefore) 875 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr, 876 0, InsertBefore) {} 877 UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd) 878 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr, 879 0, InsertAtEnd) {} 880 881 //===----------------------------------------------------------------------===// 882 // BranchInst Implementation 883 //===----------------------------------------------------------------------===// 884 885 void BranchInst::AssertOK() { 886 if (isConditional()) 887 assert(getCondition()->getType()->isIntegerTy(1) && 888 "May only branch on boolean predicates!"); 889 } 890 891 BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore) 892 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 893 OperandTraits<BranchInst>::op_end(this) - 1, 1, 894 InsertBefore) { 895 assert(IfTrue && "Branch destination may not be null!"); 896 Op<-1>() = IfTrue; 897 } 898 899 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 900 Instruction *InsertBefore) 901 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 902 OperandTraits<BranchInst>::op_end(this) - 3, 3, 903 InsertBefore) { 904 Op<-1>() = IfTrue; 905 Op<-2>() = IfFalse; 906 Op<-3>() = Cond; 907 #ifndef NDEBUG 908 AssertOK(); 909 #endif 910 } 911 912 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) 913 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 914 OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) { 915 assert(IfTrue && "Branch destination may not be null!"); 916 Op<-1>() = IfTrue; 917 } 918 919 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 920 BasicBlock *InsertAtEnd) 921 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 922 OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) { 923 Op<-1>() = IfTrue; 924 Op<-2>() = IfFalse; 925 Op<-3>() = Cond; 926 #ifndef NDEBUG 927 AssertOK(); 928 #endif 929 } 930 931 BranchInst::BranchInst(const BranchInst &BI) 932 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br, 933 OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(), 934 BI.getNumOperands()) { 935 Op<-1>() = BI.Op<-1>(); 936 if (BI.getNumOperands() != 1) { 937 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!"); 938 Op<-3>() = BI.Op<-3>(); 939 Op<-2>() = BI.Op<-2>(); 940 } 941 SubclassOptionalData = BI.SubclassOptionalData; 942 } 943 944 void BranchInst::swapSuccessors() { 945 assert(isConditional() && 946 "Cannot swap successors of an unconditional branch"); 947 Op<-1>().swap(Op<-2>()); 948 949 // Update profile metadata if present and it matches our structural 950 // expectations. 951 swapProfMetadata(); 952 } 953 954 //===----------------------------------------------------------------------===// 955 // AllocaInst Implementation 956 //===----------------------------------------------------------------------===// 957 958 static Value *getAISize(LLVMContext &Context, Value *Amt) { 959 if (!Amt) 960 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1); 961 else { 962 assert(!isa<BasicBlock>(Amt) && 963 "Passed basic block into allocation size parameter! Use other ctor"); 964 assert(Amt->getType()->isIntegerTy() && 965 "Allocation array size is not an integer!"); 966 } 967 return Amt; 968 } 969 970 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 971 Instruction *InsertBefore) 972 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {} 973 974 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 975 BasicBlock *InsertAtEnd) 976 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {} 977 978 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 979 const Twine &Name, Instruction *InsertBefore) 980 : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/0, Name, InsertBefore) {} 981 982 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 983 const Twine &Name, BasicBlock *InsertAtEnd) 984 : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/0, Name, InsertAtEnd) {} 985 986 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 987 unsigned Align, const Twine &Name, 988 Instruction *InsertBefore) 989 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca, 990 getAISize(Ty->getContext(), ArraySize), InsertBefore), 991 AllocatedType(Ty) { 992 setAlignment(Align); 993 assert(!Ty->isVoidTy() && "Cannot allocate void!"); 994 setName(Name); 995 } 996 997 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 998 unsigned Align, const Twine &Name, 999 BasicBlock *InsertAtEnd) 1000 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca, 1001 getAISize(Ty->getContext(), ArraySize), InsertAtEnd), 1002 AllocatedType(Ty) { 1003 setAlignment(Align); 1004 assert(!Ty->isVoidTy() && "Cannot allocate void!"); 1005 setName(Name); 1006 } 1007 1008 void AllocaInst::setAlignment(unsigned Align) { 1009 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); 1010 assert(Align <= MaximumAlignment && 1011 "Alignment is greater than MaximumAlignment!"); 1012 setInstructionSubclassData((getSubclassDataFromInstruction() & ~31) | 1013 (Log2_32(Align) + 1)); 1014 assert(getAlignment() == Align && "Alignment representation error!"); 1015 } 1016 1017 bool AllocaInst::isArrayAllocation() const { 1018 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0))) 1019 return !CI->isOne(); 1020 return true; 1021 } 1022 1023 /// isStaticAlloca - Return true if this alloca is in the entry block of the 1024 /// function and is a constant size. If so, the code generator will fold it 1025 /// into the prolog/epilog code, so it is basically free. 1026 bool AllocaInst::isStaticAlloca() const { 1027 // Must be constant size. 1028 if (!isa<ConstantInt>(getArraySize())) return false; 1029 1030 // Must be in the entry block. 1031 const BasicBlock *Parent = getParent(); 1032 return Parent == &Parent->getParent()->front() && !isUsedWithInAlloca(); 1033 } 1034 1035 //===----------------------------------------------------------------------===// 1036 // LoadInst Implementation 1037 //===----------------------------------------------------------------------===// 1038 1039 void LoadInst::AssertOK() { 1040 assert(getOperand(0)->getType()->isPointerTy() && 1041 "Ptr must have pointer type."); 1042 assert(!(isAtomic() && getAlignment() == 0) && 1043 "Alignment required for atomic load"); 1044 } 1045 1046 LoadInst::LoadInst(Value *Ptr, const Twine &Name, Instruction *InsertBef) 1047 : LoadInst(Ptr, Name, /*isVolatile=*/false, InsertBef) {} 1048 1049 LoadInst::LoadInst(Value *Ptr, const Twine &Name, BasicBlock *InsertAE) 1050 : LoadInst(Ptr, Name, /*isVolatile=*/false, InsertAE) {} 1051 1052 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1053 Instruction *InsertBef) 1054 : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/0, InsertBef) {} 1055 1056 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, 1057 BasicBlock *InsertAE) 1058 : LoadInst(Ptr, Name, isVolatile, /*Align=*/0, InsertAE) {} 1059 1060 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1061 unsigned Align, Instruction *InsertBef) 1062 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, 1063 SyncScope::System, InsertBef) {} 1064 1065 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, 1066 unsigned Align, BasicBlock *InsertAE) 1067 : LoadInst(Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, 1068 SyncScope::System, InsertAE) {} 1069 1070 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1071 unsigned Align, AtomicOrdering Order, 1072 SyncScope::ID SSID, Instruction *InsertBef) 1073 : UnaryInstruction(Ty, Load, Ptr, InsertBef) { 1074 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType()); 1075 setVolatile(isVolatile); 1076 setAlignment(Align); 1077 setAtomic(Order, SSID); 1078 AssertOK(); 1079 setName(Name); 1080 } 1081 1082 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, 1083 unsigned Align, AtomicOrdering Order, 1084 SyncScope::ID SSID, 1085 BasicBlock *InsertAE) 1086 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), 1087 Load, Ptr, InsertAE) { 1088 setVolatile(isVolatile); 1089 setAlignment(Align); 1090 setAtomic(Order, SSID); 1091 AssertOK(); 1092 setName(Name); 1093 } 1094 1095 LoadInst::LoadInst(Value *Ptr, const char *Name, Instruction *InsertBef) 1096 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), 1097 Load, Ptr, InsertBef) { 1098 setVolatile(false); 1099 setAlignment(0); 1100 setAtomic(AtomicOrdering::NotAtomic); 1101 AssertOK(); 1102 if (Name && Name[0]) setName(Name); 1103 } 1104 1105 LoadInst::LoadInst(Value *Ptr, const char *Name, BasicBlock *InsertAE) 1106 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), 1107 Load, Ptr, InsertAE) { 1108 setVolatile(false); 1109 setAlignment(0); 1110 setAtomic(AtomicOrdering::NotAtomic); 1111 AssertOK(); 1112 if (Name && Name[0]) setName(Name); 1113 } 1114 1115 LoadInst::LoadInst(Type *Ty, Value *Ptr, const char *Name, bool isVolatile, 1116 Instruction *InsertBef) 1117 : UnaryInstruction(Ty, Load, Ptr, InsertBef) { 1118 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType()); 1119 setVolatile(isVolatile); 1120 setAlignment(0); 1121 setAtomic(AtomicOrdering::NotAtomic); 1122 AssertOK(); 1123 if (Name && Name[0]) setName(Name); 1124 } 1125 1126 LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile, 1127 BasicBlock *InsertAE) 1128 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), 1129 Load, Ptr, InsertAE) { 1130 setVolatile(isVolatile); 1131 setAlignment(0); 1132 setAtomic(AtomicOrdering::NotAtomic); 1133 AssertOK(); 1134 if (Name && Name[0]) setName(Name); 1135 } 1136 1137 void LoadInst::setAlignment(unsigned Align) { 1138 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); 1139 assert(Align <= MaximumAlignment && 1140 "Alignment is greater than MaximumAlignment!"); 1141 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | 1142 ((Log2_32(Align)+1)<<1)); 1143 assert(getAlignment() == Align && "Alignment representation error!"); 1144 } 1145 1146 //===----------------------------------------------------------------------===// 1147 // StoreInst Implementation 1148 //===----------------------------------------------------------------------===// 1149 1150 void StoreInst::AssertOK() { 1151 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!"); 1152 assert(getOperand(1)->getType()->isPointerTy() && 1153 "Ptr must have pointer type!"); 1154 assert(getOperand(0)->getType() == 1155 cast<PointerType>(getOperand(1)->getType())->getElementType() 1156 && "Ptr must be a pointer to Val type!"); 1157 assert(!(isAtomic() && getAlignment() == 0) && 1158 "Alignment required for atomic store"); 1159 } 1160 1161 StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore) 1162 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {} 1163 1164 StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd) 1165 : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {} 1166 1167 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1168 Instruction *InsertBefore) 1169 : StoreInst(val, addr, isVolatile, /*Align=*/0, InsertBefore) {} 1170 1171 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1172 BasicBlock *InsertAtEnd) 1173 : StoreInst(val, addr, isVolatile, /*Align=*/0, InsertAtEnd) {} 1174 1175 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align, 1176 Instruction *InsertBefore) 1177 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, 1178 SyncScope::System, InsertBefore) {} 1179 1180 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align, 1181 BasicBlock *InsertAtEnd) 1182 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, 1183 SyncScope::System, InsertAtEnd) {} 1184 1185 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1186 unsigned Align, AtomicOrdering Order, 1187 SyncScope::ID SSID, 1188 Instruction *InsertBefore) 1189 : Instruction(Type::getVoidTy(val->getContext()), Store, 1190 OperandTraits<StoreInst>::op_begin(this), 1191 OperandTraits<StoreInst>::operands(this), 1192 InsertBefore) { 1193 Op<0>() = val; 1194 Op<1>() = addr; 1195 setVolatile(isVolatile); 1196 setAlignment(Align); 1197 setAtomic(Order, SSID); 1198 AssertOK(); 1199 } 1200 1201 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1202 unsigned Align, AtomicOrdering Order, 1203 SyncScope::ID SSID, 1204 BasicBlock *InsertAtEnd) 1205 : Instruction(Type::getVoidTy(val->getContext()), Store, 1206 OperandTraits<StoreInst>::op_begin(this), 1207 OperandTraits<StoreInst>::operands(this), 1208 InsertAtEnd) { 1209 Op<0>() = val; 1210 Op<1>() = addr; 1211 setVolatile(isVolatile); 1212 setAlignment(Align); 1213 setAtomic(Order, SSID); 1214 AssertOK(); 1215 } 1216 1217 void StoreInst::setAlignment(unsigned Align) { 1218 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); 1219 assert(Align <= MaximumAlignment && 1220 "Alignment is greater than MaximumAlignment!"); 1221 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | 1222 ((Log2_32(Align)+1) << 1)); 1223 assert(getAlignment() == Align && "Alignment representation error!"); 1224 } 1225 1226 //===----------------------------------------------------------------------===// 1227 // AtomicCmpXchgInst Implementation 1228 //===----------------------------------------------------------------------===// 1229 1230 void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal, 1231 AtomicOrdering SuccessOrdering, 1232 AtomicOrdering FailureOrdering, 1233 SyncScope::ID SSID) { 1234 Op<0>() = Ptr; 1235 Op<1>() = Cmp; 1236 Op<2>() = NewVal; 1237 setSuccessOrdering(SuccessOrdering); 1238 setFailureOrdering(FailureOrdering); 1239 setSyncScopeID(SSID); 1240 1241 assert(getOperand(0) && getOperand(1) && getOperand(2) && 1242 "All operands must be non-null!"); 1243 assert(getOperand(0)->getType()->isPointerTy() && 1244 "Ptr must have pointer type!"); 1245 assert(getOperand(1)->getType() == 1246 cast<PointerType>(getOperand(0)->getType())->getElementType() 1247 && "Ptr must be a pointer to Cmp type!"); 1248 assert(getOperand(2)->getType() == 1249 cast<PointerType>(getOperand(0)->getType())->getElementType() 1250 && "Ptr must be a pointer to NewVal type!"); 1251 assert(SuccessOrdering != AtomicOrdering::NotAtomic && 1252 "AtomicCmpXchg instructions must be atomic!"); 1253 assert(FailureOrdering != AtomicOrdering::NotAtomic && 1254 "AtomicCmpXchg instructions must be atomic!"); 1255 assert(!isStrongerThan(FailureOrdering, SuccessOrdering) && 1256 "AtomicCmpXchg failure argument shall be no stronger than the success " 1257 "argument"); 1258 assert(FailureOrdering != AtomicOrdering::Release && 1259 FailureOrdering != AtomicOrdering::AcquireRelease && 1260 "AtomicCmpXchg failure ordering cannot include release semantics"); 1261 } 1262 1263 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, 1264 AtomicOrdering SuccessOrdering, 1265 AtomicOrdering FailureOrdering, 1266 SyncScope::ID SSID, 1267 Instruction *InsertBefore) 1268 : Instruction( 1269 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())), 1270 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this), 1271 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) { 1272 Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID); 1273 } 1274 1275 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, 1276 AtomicOrdering SuccessOrdering, 1277 AtomicOrdering FailureOrdering, 1278 SyncScope::ID SSID, 1279 BasicBlock *InsertAtEnd) 1280 : Instruction( 1281 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())), 1282 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this), 1283 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) { 1284 Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID); 1285 } 1286 1287 //===----------------------------------------------------------------------===// 1288 // AtomicRMWInst Implementation 1289 //===----------------------------------------------------------------------===// 1290 1291 void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val, 1292 AtomicOrdering Ordering, 1293 SyncScope::ID SSID) { 1294 Op<0>() = Ptr; 1295 Op<1>() = Val; 1296 setOperation(Operation); 1297 setOrdering(Ordering); 1298 setSyncScopeID(SSID); 1299 1300 assert(getOperand(0) && getOperand(1) && 1301 "All operands must be non-null!"); 1302 assert(getOperand(0)->getType()->isPointerTy() && 1303 "Ptr must have pointer type!"); 1304 assert(getOperand(1)->getType() == 1305 cast<PointerType>(getOperand(0)->getType())->getElementType() 1306 && "Ptr must be a pointer to Val type!"); 1307 assert(Ordering != AtomicOrdering::NotAtomic && 1308 "AtomicRMW instructions must be atomic!"); 1309 } 1310 1311 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, 1312 AtomicOrdering Ordering, 1313 SyncScope::ID SSID, 1314 Instruction *InsertBefore) 1315 : Instruction(Val->getType(), AtomicRMW, 1316 OperandTraits<AtomicRMWInst>::op_begin(this), 1317 OperandTraits<AtomicRMWInst>::operands(this), 1318 InsertBefore) { 1319 Init(Operation, Ptr, Val, Ordering, SSID); 1320 } 1321 1322 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, 1323 AtomicOrdering Ordering, 1324 SyncScope::ID SSID, 1325 BasicBlock *InsertAtEnd) 1326 : Instruction(Val->getType(), AtomicRMW, 1327 OperandTraits<AtomicRMWInst>::op_begin(this), 1328 OperandTraits<AtomicRMWInst>::operands(this), 1329 InsertAtEnd) { 1330 Init(Operation, Ptr, Val, Ordering, SSID); 1331 } 1332 1333 StringRef AtomicRMWInst::getOperationName(BinOp Op) { 1334 switch (Op) { 1335 case AtomicRMWInst::Xchg: 1336 return "xchg"; 1337 case AtomicRMWInst::Add: 1338 return "add"; 1339 case AtomicRMWInst::Sub: 1340 return "sub"; 1341 case AtomicRMWInst::And: 1342 return "and"; 1343 case AtomicRMWInst::Nand: 1344 return "nand"; 1345 case AtomicRMWInst::Or: 1346 return "or"; 1347 case AtomicRMWInst::Xor: 1348 return "xor"; 1349 case AtomicRMWInst::Max: 1350 return "max"; 1351 case AtomicRMWInst::Min: 1352 return "min"; 1353 case AtomicRMWInst::UMax: 1354 return "umax"; 1355 case AtomicRMWInst::UMin: 1356 return "umin"; 1357 case AtomicRMWInst::BAD_BINOP: 1358 return "<invalid operation>"; 1359 } 1360 1361 llvm_unreachable("invalid atomicrmw operation"); 1362 } 1363 1364 //===----------------------------------------------------------------------===// 1365 // FenceInst Implementation 1366 //===----------------------------------------------------------------------===// 1367 1368 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, 1369 SyncScope::ID SSID, 1370 Instruction *InsertBefore) 1371 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) { 1372 setOrdering(Ordering); 1373 setSyncScopeID(SSID); 1374 } 1375 1376 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, 1377 SyncScope::ID SSID, 1378 BasicBlock *InsertAtEnd) 1379 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) { 1380 setOrdering(Ordering); 1381 setSyncScopeID(SSID); 1382 } 1383 1384 //===----------------------------------------------------------------------===// 1385 // GetElementPtrInst Implementation 1386 //===----------------------------------------------------------------------===// 1387 1388 void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList, 1389 const Twine &Name) { 1390 assert(getNumOperands() == 1 + IdxList.size() && 1391 "NumOperands not initialized?"); 1392 Op<0>() = Ptr; 1393 std::copy(IdxList.begin(), IdxList.end(), op_begin() + 1); 1394 setName(Name); 1395 } 1396 1397 GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI) 1398 : Instruction(GEPI.getType(), GetElementPtr, 1399 OperandTraits<GetElementPtrInst>::op_end(this) - 1400 GEPI.getNumOperands(), 1401 GEPI.getNumOperands()), 1402 SourceElementType(GEPI.SourceElementType), 1403 ResultElementType(GEPI.ResultElementType) { 1404 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin()); 1405 SubclassOptionalData = GEPI.SubclassOptionalData; 1406 } 1407 1408 /// getIndexedType - Returns the type of the element that would be accessed with 1409 /// a gep instruction with the specified parameters. 1410 /// 1411 /// The Idxs pointer should point to a continuous piece of memory containing the 1412 /// indices, either as Value* or uint64_t. 1413 /// 1414 /// A null type is returned if the indices are invalid for the specified 1415 /// pointer type. 1416 /// 1417 template <typename IndexTy> 1418 static Type *getIndexedTypeInternal(Type *Agg, ArrayRef<IndexTy> IdxList) { 1419 // Handle the special case of the empty set index set, which is always valid. 1420 if (IdxList.empty()) 1421 return Agg; 1422 1423 // If there is at least one index, the top level type must be sized, otherwise 1424 // it cannot be 'stepped over'. 1425 if (!Agg->isSized()) 1426 return nullptr; 1427 1428 unsigned CurIdx = 1; 1429 for (; CurIdx != IdxList.size(); ++CurIdx) { 1430 CompositeType *CT = dyn_cast<CompositeType>(Agg); 1431 if (!CT || CT->isPointerTy()) return nullptr; 1432 IndexTy Index = IdxList[CurIdx]; 1433 if (!CT->indexValid(Index)) return nullptr; 1434 Agg = CT->getTypeAtIndex(Index); 1435 } 1436 return CurIdx == IdxList.size() ? Agg : nullptr; 1437 } 1438 1439 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) { 1440 return getIndexedTypeInternal(Ty, IdxList); 1441 } 1442 1443 Type *GetElementPtrInst::getIndexedType(Type *Ty, 1444 ArrayRef<Constant *> IdxList) { 1445 return getIndexedTypeInternal(Ty, IdxList); 1446 } 1447 1448 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) { 1449 return getIndexedTypeInternal(Ty, IdxList); 1450 } 1451 1452 /// hasAllZeroIndices - Return true if all of the indices of this GEP are 1453 /// zeros. If so, the result pointer and the first operand have the same 1454 /// value, just potentially different types. 1455 bool GetElementPtrInst::hasAllZeroIndices() const { 1456 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1457 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) { 1458 if (!CI->isZero()) return false; 1459 } else { 1460 return false; 1461 } 1462 } 1463 return true; 1464 } 1465 1466 /// hasAllConstantIndices - Return true if all of the indices of this GEP are 1467 /// constant integers. If so, the result pointer and the first operand have 1468 /// a constant offset between them. 1469 bool GetElementPtrInst::hasAllConstantIndices() const { 1470 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1471 if (!isa<ConstantInt>(getOperand(i))) 1472 return false; 1473 } 1474 return true; 1475 } 1476 1477 void GetElementPtrInst::setIsInBounds(bool B) { 1478 cast<GEPOperator>(this)->setIsInBounds(B); 1479 } 1480 1481 bool GetElementPtrInst::isInBounds() const { 1482 return cast<GEPOperator>(this)->isInBounds(); 1483 } 1484 1485 bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL, 1486 APInt &Offset) const { 1487 // Delegate to the generic GEPOperator implementation. 1488 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset); 1489 } 1490 1491 //===----------------------------------------------------------------------===// 1492 // ExtractElementInst Implementation 1493 //===----------------------------------------------------------------------===// 1494 1495 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index, 1496 const Twine &Name, 1497 Instruction *InsertBef) 1498 : Instruction(cast<VectorType>(Val->getType())->getElementType(), 1499 ExtractElement, 1500 OperandTraits<ExtractElementInst>::op_begin(this), 1501 2, InsertBef) { 1502 assert(isValidOperands(Val, Index) && 1503 "Invalid extractelement instruction operands!"); 1504 Op<0>() = Val; 1505 Op<1>() = Index; 1506 setName(Name); 1507 } 1508 1509 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index, 1510 const Twine &Name, 1511 BasicBlock *InsertAE) 1512 : Instruction(cast<VectorType>(Val->getType())->getElementType(), 1513 ExtractElement, 1514 OperandTraits<ExtractElementInst>::op_begin(this), 1515 2, InsertAE) { 1516 assert(isValidOperands(Val, Index) && 1517 "Invalid extractelement instruction operands!"); 1518 1519 Op<0>() = Val; 1520 Op<1>() = Index; 1521 setName(Name); 1522 } 1523 1524 bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) { 1525 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy()) 1526 return false; 1527 return true; 1528 } 1529 1530 //===----------------------------------------------------------------------===// 1531 // InsertElementInst Implementation 1532 //===----------------------------------------------------------------------===// 1533 1534 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index, 1535 const Twine &Name, 1536 Instruction *InsertBef) 1537 : Instruction(Vec->getType(), InsertElement, 1538 OperandTraits<InsertElementInst>::op_begin(this), 1539 3, InsertBef) { 1540 assert(isValidOperands(Vec, Elt, Index) && 1541 "Invalid insertelement instruction operands!"); 1542 Op<0>() = Vec; 1543 Op<1>() = Elt; 1544 Op<2>() = Index; 1545 setName(Name); 1546 } 1547 1548 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index, 1549 const Twine &Name, 1550 BasicBlock *InsertAE) 1551 : Instruction(Vec->getType(), InsertElement, 1552 OperandTraits<InsertElementInst>::op_begin(this), 1553 3, InsertAE) { 1554 assert(isValidOperands(Vec, Elt, Index) && 1555 "Invalid insertelement instruction operands!"); 1556 1557 Op<0>() = Vec; 1558 Op<1>() = Elt; 1559 Op<2>() = Index; 1560 setName(Name); 1561 } 1562 1563 bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt, 1564 const Value *Index) { 1565 if (!Vec->getType()->isVectorTy()) 1566 return false; // First operand of insertelement must be vector type. 1567 1568 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType()) 1569 return false;// Second operand of insertelement must be vector element type. 1570 1571 if (!Index->getType()->isIntegerTy()) 1572 return false; // Third operand of insertelement must be i32. 1573 return true; 1574 } 1575 1576 //===----------------------------------------------------------------------===// 1577 // ShuffleVectorInst Implementation 1578 //===----------------------------------------------------------------------===// 1579 1580 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 1581 const Twine &Name, 1582 Instruction *InsertBefore) 1583 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(), 1584 cast<VectorType>(Mask->getType())->getNumElements()), 1585 ShuffleVector, 1586 OperandTraits<ShuffleVectorInst>::op_begin(this), 1587 OperandTraits<ShuffleVectorInst>::operands(this), 1588 InsertBefore) { 1589 assert(isValidOperands(V1, V2, Mask) && 1590 "Invalid shuffle vector instruction operands!"); 1591 Op<0>() = V1; 1592 Op<1>() = V2; 1593 Op<2>() = Mask; 1594 setName(Name); 1595 } 1596 1597 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 1598 const Twine &Name, 1599 BasicBlock *InsertAtEnd) 1600 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(), 1601 cast<VectorType>(Mask->getType())->getNumElements()), 1602 ShuffleVector, 1603 OperandTraits<ShuffleVectorInst>::op_begin(this), 1604 OperandTraits<ShuffleVectorInst>::operands(this), 1605 InsertAtEnd) { 1606 assert(isValidOperands(V1, V2, Mask) && 1607 "Invalid shuffle vector instruction operands!"); 1608 1609 Op<0>() = V1; 1610 Op<1>() = V2; 1611 Op<2>() = Mask; 1612 setName(Name); 1613 } 1614 1615 bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2, 1616 const Value *Mask) { 1617 // V1 and V2 must be vectors of the same type. 1618 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType()) 1619 return false; 1620 1621 // Mask must be vector of i32. 1622 auto *MaskTy = dyn_cast<VectorType>(Mask->getType()); 1623 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32)) 1624 return false; 1625 1626 // Check to see if Mask is valid. 1627 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask)) 1628 return true; 1629 1630 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) { 1631 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); 1632 for (Value *Op : MV->operands()) { 1633 if (auto *CI = dyn_cast<ConstantInt>(Op)) { 1634 if (CI->uge(V1Size*2)) 1635 return false; 1636 } else if (!isa<UndefValue>(Op)) { 1637 return false; 1638 } 1639 } 1640 return true; 1641 } 1642 1643 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) { 1644 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); 1645 for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i) 1646 if (CDS->getElementAsInteger(i) >= V1Size*2) 1647 return false; 1648 return true; 1649 } 1650 1651 // The bitcode reader can create a place holder for a forward reference 1652 // used as the shuffle mask. When this occurs, the shuffle mask will 1653 // fall into this case and fail. To avoid this error, do this bit of 1654 // ugliness to allow such a mask pass. 1655 if (const auto *CE = dyn_cast<ConstantExpr>(Mask)) 1656 if (CE->getOpcode() == Instruction::UserOp1) 1657 return true; 1658 1659 return false; 1660 } 1661 1662 int ShuffleVectorInst::getMaskValue(const Constant *Mask, unsigned i) { 1663 assert(i < Mask->getType()->getVectorNumElements() && "Index out of range"); 1664 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) 1665 return CDS->getElementAsInteger(i); 1666 Constant *C = Mask->getAggregateElement(i); 1667 if (isa<UndefValue>(C)) 1668 return -1; 1669 return cast<ConstantInt>(C)->getZExtValue(); 1670 } 1671 1672 void ShuffleVectorInst::getShuffleMask(const Constant *Mask, 1673 SmallVectorImpl<int> &Result) { 1674 unsigned NumElts = Mask->getType()->getVectorNumElements(); 1675 1676 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) { 1677 for (unsigned i = 0; i != NumElts; ++i) 1678 Result.push_back(CDS->getElementAsInteger(i)); 1679 return; 1680 } 1681 for (unsigned i = 0; i != NumElts; ++i) { 1682 Constant *C = Mask->getAggregateElement(i); 1683 Result.push_back(isa<UndefValue>(C) ? -1 : 1684 cast<ConstantInt>(C)->getZExtValue()); 1685 } 1686 } 1687 1688 static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) { 1689 assert(!Mask.empty() && "Shuffle mask must contain elements"); 1690 bool UsesLHS = false; 1691 bool UsesRHS = false; 1692 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) { 1693 if (Mask[i] == -1) 1694 continue; 1695 assert(Mask[i] >= 0 && Mask[i] < (NumOpElts * 2) && 1696 "Out-of-bounds shuffle mask element"); 1697 UsesLHS |= (Mask[i] < NumOpElts); 1698 UsesRHS |= (Mask[i] >= NumOpElts); 1699 if (UsesLHS && UsesRHS) 1700 return false; 1701 } 1702 assert((UsesLHS ^ UsesRHS) && "Should have selected from exactly 1 source"); 1703 return true; 1704 } 1705 1706 bool ShuffleVectorInst::isSingleSourceMask(ArrayRef<int> Mask) { 1707 // We don't have vector operand size information, so assume operands are the 1708 // same size as the mask. 1709 return isSingleSourceMaskImpl(Mask, Mask.size()); 1710 } 1711 1712 static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) { 1713 if (!isSingleSourceMaskImpl(Mask, NumOpElts)) 1714 return false; 1715 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) { 1716 if (Mask[i] == -1) 1717 continue; 1718 if (Mask[i] != i && Mask[i] != (NumOpElts + i)) 1719 return false; 1720 } 1721 return true; 1722 } 1723 1724 bool ShuffleVectorInst::isIdentityMask(ArrayRef<int> Mask) { 1725 // We don't have vector operand size information, so assume operands are the 1726 // same size as the mask. 1727 return isIdentityMaskImpl(Mask, Mask.size()); 1728 } 1729 1730 bool ShuffleVectorInst::isReverseMask(ArrayRef<int> Mask) { 1731 if (!isSingleSourceMask(Mask)) 1732 return false; 1733 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 1734 if (Mask[i] == -1) 1735 continue; 1736 if (Mask[i] != (NumElts - 1 - i) && Mask[i] != (NumElts + NumElts - 1 - i)) 1737 return false; 1738 } 1739 return true; 1740 } 1741 1742 bool ShuffleVectorInst::isZeroEltSplatMask(ArrayRef<int> Mask) { 1743 if (!isSingleSourceMask(Mask)) 1744 return false; 1745 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 1746 if (Mask[i] == -1) 1747 continue; 1748 if (Mask[i] != 0 && Mask[i] != NumElts) 1749 return false; 1750 } 1751 return true; 1752 } 1753 1754 bool ShuffleVectorInst::isSelectMask(ArrayRef<int> Mask) { 1755 // Select is differentiated from identity. It requires using both sources. 1756 if (isSingleSourceMask(Mask)) 1757 return false; 1758 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 1759 if (Mask[i] == -1) 1760 continue; 1761 if (Mask[i] != i && Mask[i] != (NumElts + i)) 1762 return false; 1763 } 1764 return true; 1765 } 1766 1767 bool ShuffleVectorInst::isTransposeMask(ArrayRef<int> Mask) { 1768 // Example masks that will return true: 1769 // v1 = <a, b, c, d> 1770 // v2 = <e, f, g, h> 1771 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g> 1772 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h> 1773 1774 // 1. The number of elements in the mask must be a power-of-2 and at least 2. 1775 int NumElts = Mask.size(); 1776 if (NumElts < 2 || !isPowerOf2_32(NumElts)) 1777 return false; 1778 1779 // 2. The first element of the mask must be either a 0 or a 1. 1780 if (Mask[0] != 0 && Mask[0] != 1) 1781 return false; 1782 1783 // 3. The difference between the first 2 elements must be equal to the 1784 // number of elements in the mask. 1785 if ((Mask[1] - Mask[0]) != NumElts) 1786 return false; 1787 1788 // 4. The difference between consecutive even-numbered and odd-numbered 1789 // elements must be equal to 2. 1790 for (int i = 2; i < NumElts; ++i) { 1791 int MaskEltVal = Mask[i]; 1792 if (MaskEltVal == -1) 1793 return false; 1794 int MaskEltPrevVal = Mask[i - 2]; 1795 if (MaskEltVal - MaskEltPrevVal != 2) 1796 return false; 1797 } 1798 return true; 1799 } 1800 1801 bool ShuffleVectorInst::isExtractSubvectorMask(ArrayRef<int> Mask, 1802 int NumSrcElts, int &Index) { 1803 // Must extract from a single source. 1804 if (!isSingleSourceMaskImpl(Mask, NumSrcElts)) 1805 return false; 1806 1807 // Must be smaller (else this is an Identity shuffle). 1808 if (NumSrcElts <= (int)Mask.size()) 1809 return false; 1810 1811 // Find start of extraction, accounting that we may start with an UNDEF. 1812 int SubIndex = -1; 1813 for (int i = 0, e = Mask.size(); i != e; ++i) { 1814 int M = Mask[i]; 1815 if (M < 0) 1816 continue; 1817 int Offset = (M % NumSrcElts) - i; 1818 if (0 <= SubIndex && SubIndex != Offset) 1819 return false; 1820 SubIndex = Offset; 1821 } 1822 1823 if (0 <= SubIndex) { 1824 Index = SubIndex; 1825 return true; 1826 } 1827 return false; 1828 } 1829 1830 bool ShuffleVectorInst::isIdentityWithPadding() const { 1831 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 1832 int NumMaskElts = getType()->getVectorNumElements(); 1833 if (NumMaskElts <= NumOpElts) 1834 return false; 1835 1836 // The first part of the mask must choose elements from exactly 1 source op. 1837 SmallVector<int, 16> Mask = getShuffleMask(); 1838 if (!isIdentityMaskImpl(Mask, NumOpElts)) 1839 return false; 1840 1841 // All extending must be with undef elements. 1842 for (int i = NumOpElts; i < NumMaskElts; ++i) 1843 if (Mask[i] != -1) 1844 return false; 1845 1846 return true; 1847 } 1848 1849 bool ShuffleVectorInst::isIdentityWithExtract() const { 1850 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 1851 int NumMaskElts = getType()->getVectorNumElements(); 1852 if (NumMaskElts >= NumOpElts) 1853 return false; 1854 1855 return isIdentityMaskImpl(getShuffleMask(), NumOpElts); 1856 } 1857 1858 bool ShuffleVectorInst::isConcat() const { 1859 // Vector concatenation is differentiated from identity with padding. 1860 if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>())) 1861 return false; 1862 1863 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 1864 int NumMaskElts = getType()->getVectorNumElements(); 1865 if (NumMaskElts != NumOpElts * 2) 1866 return false; 1867 1868 // Use the mask length rather than the operands' vector lengths here. We 1869 // already know that the shuffle returns a vector twice as long as the inputs, 1870 // and neither of the inputs are undef vectors. If the mask picks consecutive 1871 // elements from both inputs, then this is a concatenation of the inputs. 1872 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts); 1873 } 1874 1875 //===----------------------------------------------------------------------===// 1876 // InsertValueInst Class 1877 //===----------------------------------------------------------------------===// 1878 1879 void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, 1880 const Twine &Name) { 1881 assert(getNumOperands() == 2 && "NumOperands not initialized?"); 1882 1883 // There's no fundamental reason why we require at least one index 1884 // (other than weirdness with &*IdxBegin being invalid; see 1885 // getelementptr's init routine for example). But there's no 1886 // present need to support it. 1887 assert(!Idxs.empty() && "InsertValueInst must have at least one index"); 1888 1889 assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) == 1890 Val->getType() && "Inserted value must match indexed type!"); 1891 Op<0>() = Agg; 1892 Op<1>() = Val; 1893 1894 Indices.append(Idxs.begin(), Idxs.end()); 1895 setName(Name); 1896 } 1897 1898 InsertValueInst::InsertValueInst(const InsertValueInst &IVI) 1899 : Instruction(IVI.getType(), InsertValue, 1900 OperandTraits<InsertValueInst>::op_begin(this), 2), 1901 Indices(IVI.Indices) { 1902 Op<0>() = IVI.getOperand(0); 1903 Op<1>() = IVI.getOperand(1); 1904 SubclassOptionalData = IVI.SubclassOptionalData; 1905 } 1906 1907 //===----------------------------------------------------------------------===// 1908 // ExtractValueInst Class 1909 //===----------------------------------------------------------------------===// 1910 1911 void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) { 1912 assert(getNumOperands() == 1 && "NumOperands not initialized?"); 1913 1914 // There's no fundamental reason why we require at least one index. 1915 // But there's no present need to support it. 1916 assert(!Idxs.empty() && "ExtractValueInst must have at least one index"); 1917 1918 Indices.append(Idxs.begin(), Idxs.end()); 1919 setName(Name); 1920 } 1921 1922 ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI) 1923 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)), 1924 Indices(EVI.Indices) { 1925 SubclassOptionalData = EVI.SubclassOptionalData; 1926 } 1927 1928 // getIndexedType - Returns the type of the element that would be extracted 1929 // with an extractvalue instruction with the specified parameters. 1930 // 1931 // A null type is returned if the indices are invalid for the specified 1932 // pointer type. 1933 // 1934 Type *ExtractValueInst::getIndexedType(Type *Agg, 1935 ArrayRef<unsigned> Idxs) { 1936 for (unsigned Index : Idxs) { 1937 // We can't use CompositeType::indexValid(Index) here. 1938 // indexValid() always returns true for arrays because getelementptr allows 1939 // out-of-bounds indices. Since we don't allow those for extractvalue and 1940 // insertvalue we need to check array indexing manually. 1941 // Since the only other types we can index into are struct types it's just 1942 // as easy to check those manually as well. 1943 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) { 1944 if (Index >= AT->getNumElements()) 1945 return nullptr; 1946 } else if (StructType *ST = dyn_cast<StructType>(Agg)) { 1947 if (Index >= ST->getNumElements()) 1948 return nullptr; 1949 } else { 1950 // Not a valid type to index into. 1951 return nullptr; 1952 } 1953 1954 Agg = cast<CompositeType>(Agg)->getTypeAtIndex(Index); 1955 } 1956 return const_cast<Type*>(Agg); 1957 } 1958 1959 //===----------------------------------------------------------------------===// 1960 // UnaryOperator Class 1961 //===----------------------------------------------------------------------===// 1962 1963 UnaryOperator::UnaryOperator(UnaryOps iType, Value *S, 1964 Type *Ty, const Twine &Name, 1965 Instruction *InsertBefore) 1966 : UnaryInstruction(Ty, iType, S, InsertBefore) { 1967 Op<0>() = S; 1968 setName(Name); 1969 AssertOK(); 1970 } 1971 1972 UnaryOperator::UnaryOperator(UnaryOps iType, Value *S, 1973 Type *Ty, const Twine &Name, 1974 BasicBlock *InsertAtEnd) 1975 : UnaryInstruction(Ty, iType, S, InsertAtEnd) { 1976 Op<0>() = S; 1977 setName(Name); 1978 AssertOK(); 1979 } 1980 1981 UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S, 1982 const Twine &Name, 1983 Instruction *InsertBefore) { 1984 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore); 1985 } 1986 1987 UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S, 1988 const Twine &Name, 1989 BasicBlock *InsertAtEnd) { 1990 UnaryOperator *Res = Create(Op, S, Name); 1991 InsertAtEnd->getInstList().push_back(Res); 1992 return Res; 1993 } 1994 1995 void UnaryOperator::AssertOK() { 1996 Value *LHS = getOperand(0); 1997 (void)LHS; // Silence warnings. 1998 #ifndef NDEBUG 1999 switch (getOpcode()) { 2000 case FNeg: 2001 assert(getType() == LHS->getType() && 2002 "Unary operation should return same type as operand!"); 2003 assert(getType()->isFPOrFPVectorTy() && 2004 "Tried to create a floating-point operation on a " 2005 "non-floating-point type!"); 2006 break; 2007 default: llvm_unreachable("Invalid opcode provided"); 2008 } 2009 #endif 2010 } 2011 2012 //===----------------------------------------------------------------------===// 2013 // BinaryOperator Class 2014 //===----------------------------------------------------------------------===// 2015 2016 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, 2017 Type *Ty, const Twine &Name, 2018 Instruction *InsertBefore) 2019 : Instruction(Ty, iType, 2020 OperandTraits<BinaryOperator>::op_begin(this), 2021 OperandTraits<BinaryOperator>::operands(this), 2022 InsertBefore) { 2023 Op<0>() = S1; 2024 Op<1>() = S2; 2025 setName(Name); 2026 AssertOK(); 2027 } 2028 2029 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, 2030 Type *Ty, const Twine &Name, 2031 BasicBlock *InsertAtEnd) 2032 : Instruction(Ty, iType, 2033 OperandTraits<BinaryOperator>::op_begin(this), 2034 OperandTraits<BinaryOperator>::operands(this), 2035 InsertAtEnd) { 2036 Op<0>() = S1; 2037 Op<1>() = S2; 2038 setName(Name); 2039 AssertOK(); 2040 } 2041 2042 void BinaryOperator::AssertOK() { 2043 Value *LHS = getOperand(0), *RHS = getOperand(1); 2044 (void)LHS; (void)RHS; // Silence warnings. 2045 assert(LHS->getType() == RHS->getType() && 2046 "Binary operator operand types must match!"); 2047 #ifndef NDEBUG 2048 switch (getOpcode()) { 2049 case Add: case Sub: 2050 case Mul: 2051 assert(getType() == LHS->getType() && 2052 "Arithmetic operation should return same type as operands!"); 2053 assert(getType()->isIntOrIntVectorTy() && 2054 "Tried to create an integer operation on a non-integer type!"); 2055 break; 2056 case FAdd: case FSub: 2057 case FMul: 2058 assert(getType() == LHS->getType() && 2059 "Arithmetic operation should return same type as operands!"); 2060 assert(getType()->isFPOrFPVectorTy() && 2061 "Tried to create a floating-point operation on a " 2062 "non-floating-point type!"); 2063 break; 2064 case UDiv: 2065 case SDiv: 2066 assert(getType() == LHS->getType() && 2067 "Arithmetic operation should return same type as operands!"); 2068 assert(getType()->isIntOrIntVectorTy() && 2069 "Incorrect operand type (not integer) for S/UDIV"); 2070 break; 2071 case FDiv: 2072 assert(getType() == LHS->getType() && 2073 "Arithmetic operation should return same type as operands!"); 2074 assert(getType()->isFPOrFPVectorTy() && 2075 "Incorrect operand type (not floating point) for FDIV"); 2076 break; 2077 case URem: 2078 case SRem: 2079 assert(getType() == LHS->getType() && 2080 "Arithmetic operation should return same type as operands!"); 2081 assert(getType()->isIntOrIntVectorTy() && 2082 "Incorrect operand type (not integer) for S/UREM"); 2083 break; 2084 case FRem: 2085 assert(getType() == LHS->getType() && 2086 "Arithmetic operation should return same type as operands!"); 2087 assert(getType()->isFPOrFPVectorTy() && 2088 "Incorrect operand type (not floating point) for FREM"); 2089 break; 2090 case Shl: 2091 case LShr: 2092 case AShr: 2093 assert(getType() == LHS->getType() && 2094 "Shift operation should return same type as operands!"); 2095 assert(getType()->isIntOrIntVectorTy() && 2096 "Tried to create a shift operation on a non-integral type!"); 2097 break; 2098 case And: case Or: 2099 case Xor: 2100 assert(getType() == LHS->getType() && 2101 "Logical operation should return same type as operands!"); 2102 assert(getType()->isIntOrIntVectorTy() && 2103 "Tried to create a logical operation on a non-integral type!"); 2104 break; 2105 default: llvm_unreachable("Invalid opcode provided"); 2106 } 2107 #endif 2108 } 2109 2110 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2, 2111 const Twine &Name, 2112 Instruction *InsertBefore) { 2113 assert(S1->getType() == S2->getType() && 2114 "Cannot create binary operator with two operands of differing type!"); 2115 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore); 2116 } 2117 2118 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2, 2119 const Twine &Name, 2120 BasicBlock *InsertAtEnd) { 2121 BinaryOperator *Res = Create(Op, S1, S2, Name); 2122 InsertAtEnd->getInstList().push_back(Res); 2123 return Res; 2124 } 2125 2126 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name, 2127 Instruction *InsertBefore) { 2128 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2129 return new BinaryOperator(Instruction::Sub, 2130 zero, Op, 2131 Op->getType(), Name, InsertBefore); 2132 } 2133 2134 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name, 2135 BasicBlock *InsertAtEnd) { 2136 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2137 return new BinaryOperator(Instruction::Sub, 2138 zero, Op, 2139 Op->getType(), Name, InsertAtEnd); 2140 } 2141 2142 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, 2143 Instruction *InsertBefore) { 2144 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2145 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertBefore); 2146 } 2147 2148 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, 2149 BasicBlock *InsertAtEnd) { 2150 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2151 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertAtEnd); 2152 } 2153 2154 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name, 2155 Instruction *InsertBefore) { 2156 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2157 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertBefore); 2158 } 2159 2160 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name, 2161 BasicBlock *InsertAtEnd) { 2162 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2163 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertAtEnd); 2164 } 2165 2166 BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name, 2167 Instruction *InsertBefore) { 2168 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2169 return new BinaryOperator(Instruction::FSub, zero, Op, 2170 Op->getType(), Name, InsertBefore); 2171 } 2172 2173 BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name, 2174 BasicBlock *InsertAtEnd) { 2175 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2176 return new BinaryOperator(Instruction::FSub, zero, Op, 2177 Op->getType(), Name, InsertAtEnd); 2178 } 2179 2180 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, 2181 Instruction *InsertBefore) { 2182 Constant *C = Constant::getAllOnesValue(Op->getType()); 2183 return new BinaryOperator(Instruction::Xor, Op, C, 2184 Op->getType(), Name, InsertBefore); 2185 } 2186 2187 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, 2188 BasicBlock *InsertAtEnd) { 2189 Constant *AllOnes = Constant::getAllOnesValue(Op->getType()); 2190 return new BinaryOperator(Instruction::Xor, Op, AllOnes, 2191 Op->getType(), Name, InsertAtEnd); 2192 } 2193 2194 // Exchange the two operands to this instruction. This instruction is safe to 2195 // use on any binary instruction and does not modify the semantics of the 2196 // instruction. If the instruction is order-dependent (SetLT f.e.), the opcode 2197 // is changed. 2198 bool BinaryOperator::swapOperands() { 2199 if (!isCommutative()) 2200 return true; // Can't commute operands 2201 Op<0>().swap(Op<1>()); 2202 return false; 2203 } 2204 2205 //===----------------------------------------------------------------------===// 2206 // FPMathOperator Class 2207 //===----------------------------------------------------------------------===// 2208 2209 float FPMathOperator::getFPAccuracy() const { 2210 const MDNode *MD = 2211 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath); 2212 if (!MD) 2213 return 0.0; 2214 ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0)); 2215 return Accuracy->getValueAPF().convertToFloat(); 2216 } 2217 2218 //===----------------------------------------------------------------------===// 2219 // CastInst Class 2220 //===----------------------------------------------------------------------===// 2221 2222 // Just determine if this cast only deals with integral->integral conversion. 2223 bool CastInst::isIntegerCast() const { 2224 switch (getOpcode()) { 2225 default: return false; 2226 case Instruction::ZExt: 2227 case Instruction::SExt: 2228 case Instruction::Trunc: 2229 return true; 2230 case Instruction::BitCast: 2231 return getOperand(0)->getType()->isIntegerTy() && 2232 getType()->isIntegerTy(); 2233 } 2234 } 2235 2236 bool CastInst::isLosslessCast() const { 2237 // Only BitCast can be lossless, exit fast if we're not BitCast 2238 if (getOpcode() != Instruction::BitCast) 2239 return false; 2240 2241 // Identity cast is always lossless 2242 Type *SrcTy = getOperand(0)->getType(); 2243 Type *DstTy = getType(); 2244 if (SrcTy == DstTy) 2245 return true; 2246 2247 // Pointer to pointer is always lossless. 2248 if (SrcTy->isPointerTy()) 2249 return DstTy->isPointerTy(); 2250 return false; // Other types have no identity values 2251 } 2252 2253 /// This function determines if the CastInst does not require any bits to be 2254 /// changed in order to effect the cast. Essentially, it identifies cases where 2255 /// no code gen is necessary for the cast, hence the name no-op cast. For 2256 /// example, the following are all no-op casts: 2257 /// # bitcast i32* %x to i8* 2258 /// # bitcast <2 x i32> %x to <4 x i16> 2259 /// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only 2260 /// Determine if the described cast is a no-op. 2261 bool CastInst::isNoopCast(Instruction::CastOps Opcode, 2262 Type *SrcTy, 2263 Type *DestTy, 2264 const DataLayout &DL) { 2265 switch (Opcode) { 2266 default: llvm_unreachable("Invalid CastOp"); 2267 case Instruction::Trunc: 2268 case Instruction::ZExt: 2269 case Instruction::SExt: 2270 case Instruction::FPTrunc: 2271 case Instruction::FPExt: 2272 case Instruction::UIToFP: 2273 case Instruction::SIToFP: 2274 case Instruction::FPToUI: 2275 case Instruction::FPToSI: 2276 case Instruction::AddrSpaceCast: 2277 // TODO: Target informations may give a more accurate answer here. 2278 return false; 2279 case Instruction::BitCast: 2280 return true; // BitCast never modifies bits. 2281 case Instruction::PtrToInt: 2282 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() == 2283 DestTy->getScalarSizeInBits(); 2284 case Instruction::IntToPtr: 2285 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() == 2286 SrcTy->getScalarSizeInBits(); 2287 } 2288 } 2289 2290 bool CastInst::isNoopCast(const DataLayout &DL) const { 2291 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL); 2292 } 2293 2294 /// This function determines if a pair of casts can be eliminated and what 2295 /// opcode should be used in the elimination. This assumes that there are two 2296 /// instructions like this: 2297 /// * %F = firstOpcode SrcTy %x to MidTy 2298 /// * %S = secondOpcode MidTy %F to DstTy 2299 /// The function returns a resultOpcode so these two casts can be replaced with: 2300 /// * %Replacement = resultOpcode %SrcTy %x to DstTy 2301 /// If no such cast is permitted, the function returns 0. 2302 unsigned CastInst::isEliminableCastPair( 2303 Instruction::CastOps firstOp, Instruction::CastOps secondOp, 2304 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, 2305 Type *DstIntPtrTy) { 2306 // Define the 144 possibilities for these two cast instructions. The values 2307 // in this matrix determine what to do in a given situation and select the 2308 // case in the switch below. The rows correspond to firstOp, the columns 2309 // correspond to secondOp. In looking at the table below, keep in mind 2310 // the following cast properties: 2311 // 2312 // Size Compare Source Destination 2313 // Operator Src ? Size Type Sign Type Sign 2314 // -------- ------------ ------------------- --------------------- 2315 // TRUNC > Integer Any Integral Any 2316 // ZEXT < Integral Unsigned Integer Any 2317 // SEXT < Integral Signed Integer Any 2318 // FPTOUI n/a FloatPt n/a Integral Unsigned 2319 // FPTOSI n/a FloatPt n/a Integral Signed 2320 // UITOFP n/a Integral Unsigned FloatPt n/a 2321 // SITOFP n/a Integral Signed FloatPt n/a 2322 // FPTRUNC > FloatPt n/a FloatPt n/a 2323 // FPEXT < FloatPt n/a FloatPt n/a 2324 // PTRTOINT n/a Pointer n/a Integral Unsigned 2325 // INTTOPTR n/a Integral Unsigned Pointer n/a 2326 // BITCAST = FirstClass n/a FirstClass n/a 2327 // ADDRSPCST n/a Pointer n/a Pointer n/a 2328 // 2329 // NOTE: some transforms are safe, but we consider them to be non-profitable. 2330 // For example, we could merge "fptoui double to i32" + "zext i32 to i64", 2331 // into "fptoui double to i64", but this loses information about the range 2332 // of the produced value (we no longer know the top-part is all zeros). 2333 // Further this conversion is often much more expensive for typical hardware, 2334 // and causes issues when building libgcc. We disallow fptosi+sext for the 2335 // same reason. 2336 const unsigned numCastOps = 2337 Instruction::CastOpsEnd - Instruction::CastOpsBegin; 2338 static const uint8_t CastResults[numCastOps][numCastOps] = { 2339 // T F F U S F F P I B A -+ 2340 // R Z S P P I I T P 2 N T S | 2341 // U E E 2 2 2 2 R E I T C C +- secondOp 2342 // N X X U S F F N X N 2 V V | 2343 // C T T I I P P C T T P T T -+ 2344 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+ 2345 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt | 2346 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt | 2347 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI | 2348 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI | 2349 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp 2350 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP | 2351 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc | 2352 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt | 2353 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt | 2354 { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr | 2355 { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast | 2356 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+ 2357 }; 2358 2359 // TODO: This logic could be encoded into the table above and handled in the 2360 // switch below. 2361 // If either of the casts are a bitcast from scalar to vector, disallow the 2362 // merging. However, any pair of bitcasts are allowed. 2363 bool IsFirstBitcast = (firstOp == Instruction::BitCast); 2364 bool IsSecondBitcast = (secondOp == Instruction::BitCast); 2365 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast; 2366 2367 // Check if any of the casts convert scalars <-> vectors. 2368 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) || 2369 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy))) 2370 if (!AreBothBitcasts) 2371 return 0; 2372 2373 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin] 2374 [secondOp-Instruction::CastOpsBegin]; 2375 switch (ElimCase) { 2376 case 0: 2377 // Categorically disallowed. 2378 return 0; 2379 case 1: 2380 // Allowed, use first cast's opcode. 2381 return firstOp; 2382 case 2: 2383 // Allowed, use second cast's opcode. 2384 return secondOp; 2385 case 3: 2386 // No-op cast in second op implies firstOp as long as the DestTy 2387 // is integer and we are not converting between a vector and a 2388 // non-vector type. 2389 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy()) 2390 return firstOp; 2391 return 0; 2392 case 4: 2393 // No-op cast in second op implies firstOp as long as the DestTy 2394 // is floating point. 2395 if (DstTy->isFloatingPointTy()) 2396 return firstOp; 2397 return 0; 2398 case 5: 2399 // No-op cast in first op implies secondOp as long as the SrcTy 2400 // is an integer. 2401 if (SrcTy->isIntegerTy()) 2402 return secondOp; 2403 return 0; 2404 case 6: 2405 // No-op cast in first op implies secondOp as long as the SrcTy 2406 // is a floating point. 2407 if (SrcTy->isFloatingPointTy()) 2408 return secondOp; 2409 return 0; 2410 case 7: { 2411 // Cannot simplify if address spaces are different! 2412 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) 2413 return 0; 2414 2415 unsigned MidSize = MidTy->getScalarSizeInBits(); 2416 // We can still fold this without knowing the actual sizes as long we 2417 // know that the intermediate pointer is the largest possible 2418 // pointer size. 2419 // FIXME: Is this always true? 2420 if (MidSize == 64) 2421 return Instruction::BitCast; 2422 2423 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size. 2424 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy) 2425 return 0; 2426 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits(); 2427 if (MidSize >= PtrSize) 2428 return Instruction::BitCast; 2429 return 0; 2430 } 2431 case 8: { 2432 // ext, trunc -> bitcast, if the SrcTy and DstTy are same size 2433 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy) 2434 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy) 2435 unsigned SrcSize = SrcTy->getScalarSizeInBits(); 2436 unsigned DstSize = DstTy->getScalarSizeInBits(); 2437 if (SrcSize == DstSize) 2438 return Instruction::BitCast; 2439 else if (SrcSize < DstSize) 2440 return firstOp; 2441 return secondOp; 2442 } 2443 case 9: 2444 // zext, sext -> zext, because sext can't sign extend after zext 2445 return Instruction::ZExt; 2446 case 11: { 2447 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize 2448 if (!MidIntPtrTy) 2449 return 0; 2450 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits(); 2451 unsigned SrcSize = SrcTy->getScalarSizeInBits(); 2452 unsigned DstSize = DstTy->getScalarSizeInBits(); 2453 if (SrcSize <= PtrSize && SrcSize == DstSize) 2454 return Instruction::BitCast; 2455 return 0; 2456 } 2457 case 12: 2458 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS 2459 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS 2460 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) 2461 return Instruction::AddrSpaceCast; 2462 return Instruction::BitCast; 2463 case 13: 2464 // FIXME: this state can be merged with (1), but the following assert 2465 // is useful to check the correcteness of the sequence due to semantic 2466 // change of bitcast. 2467 assert( 2468 SrcTy->isPtrOrPtrVectorTy() && 2469 MidTy->isPtrOrPtrVectorTy() && 2470 DstTy->isPtrOrPtrVectorTy() && 2471 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() && 2472 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() && 2473 "Illegal addrspacecast, bitcast sequence!"); 2474 // Allowed, use first cast's opcode 2475 return firstOp; 2476 case 14: 2477 // bitcast, addrspacecast -> addrspacecast if the element type of 2478 // bitcast's source is the same as that of addrspacecast's destination. 2479 if (SrcTy->getScalarType()->getPointerElementType() == 2480 DstTy->getScalarType()->getPointerElementType()) 2481 return Instruction::AddrSpaceCast; 2482 return 0; 2483 case 15: 2484 // FIXME: this state can be merged with (1), but the following assert 2485 // is useful to check the correcteness of the sequence due to semantic 2486 // change of bitcast. 2487 assert( 2488 SrcTy->isIntOrIntVectorTy() && 2489 MidTy->isPtrOrPtrVectorTy() && 2490 DstTy->isPtrOrPtrVectorTy() && 2491 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() && 2492 "Illegal inttoptr, bitcast sequence!"); 2493 // Allowed, use first cast's opcode 2494 return firstOp; 2495 case 16: 2496 // FIXME: this state can be merged with (2), but the following assert 2497 // is useful to check the correcteness of the sequence due to semantic 2498 // change of bitcast. 2499 assert( 2500 SrcTy->isPtrOrPtrVectorTy() && 2501 MidTy->isPtrOrPtrVectorTy() && 2502 DstTy->isIntOrIntVectorTy() && 2503 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() && 2504 "Illegal bitcast, ptrtoint sequence!"); 2505 // Allowed, use second cast's opcode 2506 return secondOp; 2507 case 17: 2508 // (sitofp (zext x)) -> (uitofp x) 2509 return Instruction::UIToFP; 2510 case 99: 2511 // Cast combination can't happen (error in input). This is for all cases 2512 // where the MidTy is not the same for the two cast instructions. 2513 llvm_unreachable("Invalid Cast Combination"); 2514 default: 2515 llvm_unreachable("Error in CastResults table!!!"); 2516 } 2517 } 2518 2519 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty, 2520 const Twine &Name, Instruction *InsertBefore) { 2521 assert(castIsValid(op, S, Ty) && "Invalid cast!"); 2522 // Construct and return the appropriate CastInst subclass 2523 switch (op) { 2524 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore); 2525 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore); 2526 case SExt: return new SExtInst (S, Ty, Name, InsertBefore); 2527 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore); 2528 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore); 2529 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore); 2530 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore); 2531 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore); 2532 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore); 2533 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore); 2534 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore); 2535 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore); 2536 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore); 2537 default: llvm_unreachable("Invalid opcode provided"); 2538 } 2539 } 2540 2541 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty, 2542 const Twine &Name, BasicBlock *InsertAtEnd) { 2543 assert(castIsValid(op, S, Ty) && "Invalid cast!"); 2544 // Construct and return the appropriate CastInst subclass 2545 switch (op) { 2546 case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd); 2547 case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd); 2548 case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd); 2549 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd); 2550 case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd); 2551 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd); 2552 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd); 2553 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd); 2554 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd); 2555 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd); 2556 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd); 2557 case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd); 2558 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd); 2559 default: llvm_unreachable("Invalid opcode provided"); 2560 } 2561 } 2562 2563 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, 2564 const Twine &Name, 2565 Instruction *InsertBefore) { 2566 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2567 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2568 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore); 2569 } 2570 2571 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, 2572 const Twine &Name, 2573 BasicBlock *InsertAtEnd) { 2574 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2575 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2576 return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd); 2577 } 2578 2579 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, 2580 const Twine &Name, 2581 Instruction *InsertBefore) { 2582 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2583 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2584 return Create(Instruction::SExt, S, Ty, Name, InsertBefore); 2585 } 2586 2587 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, 2588 const Twine &Name, 2589 BasicBlock *InsertAtEnd) { 2590 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2591 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2592 return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd); 2593 } 2594 2595 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, 2596 const Twine &Name, 2597 Instruction *InsertBefore) { 2598 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2599 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2600 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore); 2601 } 2602 2603 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, 2604 const Twine &Name, 2605 BasicBlock *InsertAtEnd) { 2606 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2607 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2608 return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd); 2609 } 2610 2611 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, 2612 const Twine &Name, 2613 BasicBlock *InsertAtEnd) { 2614 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2615 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) && 2616 "Invalid cast"); 2617 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); 2618 assert((!Ty->isVectorTy() || 2619 Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) && 2620 "Invalid cast"); 2621 2622 if (Ty->isIntOrIntVectorTy()) 2623 return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd); 2624 2625 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd); 2626 } 2627 2628 /// Create a BitCast or a PtrToInt cast instruction 2629 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, 2630 const Twine &Name, 2631 Instruction *InsertBefore) { 2632 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2633 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) && 2634 "Invalid cast"); 2635 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); 2636 assert((!Ty->isVectorTy() || 2637 Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) && 2638 "Invalid cast"); 2639 2640 if (Ty->isIntOrIntVectorTy()) 2641 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore); 2642 2643 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore); 2644 } 2645 2646 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast( 2647 Value *S, Type *Ty, 2648 const Twine &Name, 2649 BasicBlock *InsertAtEnd) { 2650 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2651 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast"); 2652 2653 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace()) 2654 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd); 2655 2656 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2657 } 2658 2659 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast( 2660 Value *S, Type *Ty, 2661 const Twine &Name, 2662 Instruction *InsertBefore) { 2663 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2664 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast"); 2665 2666 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace()) 2667 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore); 2668 2669 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2670 } 2671 2672 CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty, 2673 const Twine &Name, 2674 Instruction *InsertBefore) { 2675 if (S->getType()->isPointerTy() && Ty->isIntegerTy()) 2676 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore); 2677 if (S->getType()->isIntegerTy() && Ty->isPointerTy()) 2678 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore); 2679 2680 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2681 } 2682 2683 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, 2684 bool isSigned, const Twine &Name, 2685 Instruction *InsertBefore) { 2686 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() && 2687 "Invalid integer cast"); 2688 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2689 unsigned DstBits = Ty->getScalarSizeInBits(); 2690 Instruction::CastOps opcode = 2691 (SrcBits == DstBits ? Instruction::BitCast : 2692 (SrcBits > DstBits ? Instruction::Trunc : 2693 (isSigned ? Instruction::SExt : Instruction::ZExt))); 2694 return Create(opcode, C, Ty, Name, InsertBefore); 2695 } 2696 2697 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, 2698 bool isSigned, const Twine &Name, 2699 BasicBlock *InsertAtEnd) { 2700 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() && 2701 "Invalid cast"); 2702 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2703 unsigned DstBits = Ty->getScalarSizeInBits(); 2704 Instruction::CastOps opcode = 2705 (SrcBits == DstBits ? Instruction::BitCast : 2706 (SrcBits > DstBits ? Instruction::Trunc : 2707 (isSigned ? Instruction::SExt : Instruction::ZExt))); 2708 return Create(opcode, C, Ty, Name, InsertAtEnd); 2709 } 2710 2711 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, 2712 const Twine &Name, 2713 Instruction *InsertBefore) { 2714 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && 2715 "Invalid cast"); 2716 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2717 unsigned DstBits = Ty->getScalarSizeInBits(); 2718 Instruction::CastOps opcode = 2719 (SrcBits == DstBits ? Instruction::BitCast : 2720 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt)); 2721 return Create(opcode, C, Ty, Name, InsertBefore); 2722 } 2723 2724 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, 2725 const Twine &Name, 2726 BasicBlock *InsertAtEnd) { 2727 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && 2728 "Invalid cast"); 2729 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2730 unsigned DstBits = Ty->getScalarSizeInBits(); 2731 Instruction::CastOps opcode = 2732 (SrcBits == DstBits ? Instruction::BitCast : 2733 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt)); 2734 return Create(opcode, C, Ty, Name, InsertAtEnd); 2735 } 2736 2737 // Check whether it is valid to call getCastOpcode for these types. 2738 // This routine must be kept in sync with getCastOpcode. 2739 bool CastInst::isCastable(Type *SrcTy, Type *DestTy) { 2740 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType()) 2741 return false; 2742 2743 if (SrcTy == DestTy) 2744 return true; 2745 2746 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) 2747 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) 2748 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 2749 // An element by element cast. Valid if casting the elements is valid. 2750 SrcTy = SrcVecTy->getElementType(); 2751 DestTy = DestVecTy->getElementType(); 2752 } 2753 2754 // Get the bit sizes, we'll need these 2755 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 2756 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 2757 2758 // Run through the possibilities ... 2759 if (DestTy->isIntegerTy()) { // Casting to integral 2760 if (SrcTy->isIntegerTy()) // Casting from integral 2761 return true; 2762 if (SrcTy->isFloatingPointTy()) // Casting from floating pt 2763 return true; 2764 if (SrcTy->isVectorTy()) // Casting from vector 2765 return DestBits == SrcBits; 2766 // Casting from something else 2767 return SrcTy->isPointerTy(); 2768 } 2769 if (DestTy->isFloatingPointTy()) { // Casting to floating pt 2770 if (SrcTy->isIntegerTy()) // Casting from integral 2771 return true; 2772 if (SrcTy->isFloatingPointTy()) // Casting from floating pt 2773 return true; 2774 if (SrcTy->isVectorTy()) // Casting from vector 2775 return DestBits == SrcBits; 2776 // Casting from something else 2777 return false; 2778 } 2779 if (DestTy->isVectorTy()) // Casting to vector 2780 return DestBits == SrcBits; 2781 if (DestTy->isPointerTy()) { // Casting to pointer 2782 if (SrcTy->isPointerTy()) // Casting from pointer 2783 return true; 2784 return SrcTy->isIntegerTy(); // Casting from integral 2785 } 2786 if (DestTy->isX86_MMXTy()) { 2787 if (SrcTy->isVectorTy()) 2788 return DestBits == SrcBits; // 64-bit vector to MMX 2789 return false; 2790 } // Casting to something else 2791 return false; 2792 } 2793 2794 bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) { 2795 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType()) 2796 return false; 2797 2798 if (SrcTy == DestTy) 2799 return true; 2800 2801 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) { 2802 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) { 2803 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 2804 // An element by element cast. Valid if casting the elements is valid. 2805 SrcTy = SrcVecTy->getElementType(); 2806 DestTy = DestVecTy->getElementType(); 2807 } 2808 } 2809 } 2810 2811 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) { 2812 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) { 2813 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace(); 2814 } 2815 } 2816 2817 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 2818 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 2819 2820 // Could still have vectors of pointers if the number of elements doesn't 2821 // match 2822 if (SrcBits == 0 || DestBits == 0) 2823 return false; 2824 2825 if (SrcBits != DestBits) 2826 return false; 2827 2828 if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy()) 2829 return false; 2830 2831 return true; 2832 } 2833 2834 bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, 2835 const DataLayout &DL) { 2836 // ptrtoint and inttoptr are not allowed on non-integral pointers 2837 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy)) 2838 if (auto *IntTy = dyn_cast<IntegerType>(DestTy)) 2839 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) && 2840 !DL.isNonIntegralPointerType(PtrTy)); 2841 if (auto *PtrTy = dyn_cast<PointerType>(DestTy)) 2842 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy)) 2843 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) && 2844 !DL.isNonIntegralPointerType(PtrTy)); 2845 2846 return isBitCastable(SrcTy, DestTy); 2847 } 2848 2849 // Provide a way to get a "cast" where the cast opcode is inferred from the 2850 // types and size of the operand. This, basically, is a parallel of the 2851 // logic in the castIsValid function below. This axiom should hold: 2852 // castIsValid( getCastOpcode(Val, Ty), Val, Ty) 2853 // should not assert in castIsValid. In other words, this produces a "correct" 2854 // casting opcode for the arguments passed to it. 2855 // This routine must be kept in sync with isCastable. 2856 Instruction::CastOps 2857 CastInst::getCastOpcode( 2858 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) { 2859 Type *SrcTy = Src->getType(); 2860 2861 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() && 2862 "Only first class types are castable!"); 2863 2864 if (SrcTy == DestTy) 2865 return BitCast; 2866 2867 // FIXME: Check address space sizes here 2868 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) 2869 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) 2870 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 2871 // An element by element cast. Find the appropriate opcode based on the 2872 // element types. 2873 SrcTy = SrcVecTy->getElementType(); 2874 DestTy = DestVecTy->getElementType(); 2875 } 2876 2877 // Get the bit sizes, we'll need these 2878 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 2879 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 2880 2881 // Run through the possibilities ... 2882 if (DestTy->isIntegerTy()) { // Casting to integral 2883 if (SrcTy->isIntegerTy()) { // Casting from integral 2884 if (DestBits < SrcBits) 2885 return Trunc; // int -> smaller int 2886 else if (DestBits > SrcBits) { // its an extension 2887 if (SrcIsSigned) 2888 return SExt; // signed -> SEXT 2889 else 2890 return ZExt; // unsigned -> ZEXT 2891 } else { 2892 return BitCast; // Same size, No-op cast 2893 } 2894 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt 2895 if (DestIsSigned) 2896 return FPToSI; // FP -> sint 2897 else 2898 return FPToUI; // FP -> uint 2899 } else if (SrcTy->isVectorTy()) { 2900 assert(DestBits == SrcBits && 2901 "Casting vector to integer of different width"); 2902 return BitCast; // Same size, no-op cast 2903 } else { 2904 assert(SrcTy->isPointerTy() && 2905 "Casting from a value that is not first-class type"); 2906 return PtrToInt; // ptr -> int 2907 } 2908 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt 2909 if (SrcTy->isIntegerTy()) { // Casting from integral 2910 if (SrcIsSigned) 2911 return SIToFP; // sint -> FP 2912 else 2913 return UIToFP; // uint -> FP 2914 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt 2915 if (DestBits < SrcBits) { 2916 return FPTrunc; // FP -> smaller FP 2917 } else if (DestBits > SrcBits) { 2918 return FPExt; // FP -> larger FP 2919 } else { 2920 return BitCast; // same size, no-op cast 2921 } 2922 } else if (SrcTy->isVectorTy()) { 2923 assert(DestBits == SrcBits && 2924 "Casting vector to floating point of different width"); 2925 return BitCast; // same size, no-op cast 2926 } 2927 llvm_unreachable("Casting pointer or non-first class to float"); 2928 } else if (DestTy->isVectorTy()) { 2929 assert(DestBits == SrcBits && 2930 "Illegal cast to vector (wrong type or size)"); 2931 return BitCast; 2932 } else if (DestTy->isPointerTy()) { 2933 if (SrcTy->isPointerTy()) { 2934 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace()) 2935 return AddrSpaceCast; 2936 return BitCast; // ptr -> ptr 2937 } else if (SrcTy->isIntegerTy()) { 2938 return IntToPtr; // int -> ptr 2939 } 2940 llvm_unreachable("Casting pointer to other than pointer or int"); 2941 } else if (DestTy->isX86_MMXTy()) { 2942 if (SrcTy->isVectorTy()) { 2943 assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX"); 2944 return BitCast; // 64-bit vector to MMX 2945 } 2946 llvm_unreachable("Illegal cast to X86_MMX"); 2947 } 2948 llvm_unreachable("Casting to type that is not first-class"); 2949 } 2950 2951 //===----------------------------------------------------------------------===// 2952 // CastInst SubClass Constructors 2953 //===----------------------------------------------------------------------===// 2954 2955 /// Check that the construction parameters for a CastInst are correct. This 2956 /// could be broken out into the separate constructors but it is useful to have 2957 /// it in one place and to eliminate the redundant code for getting the sizes 2958 /// of the types involved. 2959 bool 2960 CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) { 2961 // Check for type sanity on the arguments 2962 Type *SrcTy = S->getType(); 2963 2964 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() || 2965 SrcTy->isAggregateType() || DstTy->isAggregateType()) 2966 return false; 2967 2968 // Get the size of the types in bits, we'll need this later 2969 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 2970 unsigned DstBitSize = DstTy->getScalarSizeInBits(); 2971 2972 // If these are vector types, get the lengths of the vectors (using zero for 2973 // scalar types means that checking that vector lengths match also checks that 2974 // scalars are not being converted to vectors or vectors to scalars). 2975 unsigned SrcLength = SrcTy->isVectorTy() ? 2976 cast<VectorType>(SrcTy)->getNumElements() : 0; 2977 unsigned DstLength = DstTy->isVectorTy() ? 2978 cast<VectorType>(DstTy)->getNumElements() : 0; 2979 2980 // Switch on the opcode provided 2981 switch (op) { 2982 default: return false; // This is an input error 2983 case Instruction::Trunc: 2984 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 2985 SrcLength == DstLength && SrcBitSize > DstBitSize; 2986 case Instruction::ZExt: 2987 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 2988 SrcLength == DstLength && SrcBitSize < DstBitSize; 2989 case Instruction::SExt: 2990 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 2991 SrcLength == DstLength && SrcBitSize < DstBitSize; 2992 case Instruction::FPTrunc: 2993 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() && 2994 SrcLength == DstLength && SrcBitSize > DstBitSize; 2995 case Instruction::FPExt: 2996 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() && 2997 SrcLength == DstLength && SrcBitSize < DstBitSize; 2998 case Instruction::UIToFP: 2999 case Instruction::SIToFP: 3000 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() && 3001 SrcLength == DstLength; 3002 case Instruction::FPToUI: 3003 case Instruction::FPToSI: 3004 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() && 3005 SrcLength == DstLength; 3006 case Instruction::PtrToInt: 3007 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy)) 3008 return false; 3009 if (VectorType *VT = dyn_cast<VectorType>(SrcTy)) 3010 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements()) 3011 return false; 3012 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy(); 3013 case Instruction::IntToPtr: 3014 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy)) 3015 return false; 3016 if (VectorType *VT = dyn_cast<VectorType>(SrcTy)) 3017 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements()) 3018 return false; 3019 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy(); 3020 case Instruction::BitCast: { 3021 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType()); 3022 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType()); 3023 3024 // BitCast implies a no-op cast of type only. No bits change. 3025 // However, you can't cast pointers to anything but pointers. 3026 if (!SrcPtrTy != !DstPtrTy) 3027 return false; 3028 3029 // For non-pointer cases, the cast is okay if the source and destination bit 3030 // widths are identical. 3031 if (!SrcPtrTy) 3032 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits(); 3033 3034 // If both are pointers then the address spaces must match. 3035 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) 3036 return false; 3037 3038 // A vector of pointers must have the same number of elements. 3039 VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy); 3040 VectorType *DstVecTy = dyn_cast<VectorType>(DstTy); 3041 if (SrcVecTy && DstVecTy) 3042 return (SrcVecTy->getNumElements() == DstVecTy->getNumElements()); 3043 if (SrcVecTy) 3044 return SrcVecTy->getNumElements() == 1; 3045 if (DstVecTy) 3046 return DstVecTy->getNumElements() == 1; 3047 3048 return true; 3049 } 3050 case Instruction::AddrSpaceCast: { 3051 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType()); 3052 if (!SrcPtrTy) 3053 return false; 3054 3055 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType()); 3056 if (!DstPtrTy) 3057 return false; 3058 3059 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace()) 3060 return false; 3061 3062 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) { 3063 if (VectorType *DstVecTy = dyn_cast<VectorType>(DstTy)) 3064 return (SrcVecTy->getNumElements() == DstVecTy->getNumElements()); 3065 3066 return false; 3067 } 3068 3069 return true; 3070 } 3071 } 3072 } 3073 3074 TruncInst::TruncInst( 3075 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3076 ) : CastInst(Ty, Trunc, S, Name, InsertBefore) { 3077 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc"); 3078 } 3079 3080 TruncInst::TruncInst( 3081 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3082 ) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) { 3083 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc"); 3084 } 3085 3086 ZExtInst::ZExtInst( 3087 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3088 ) : CastInst(Ty, ZExt, S, Name, InsertBefore) { 3089 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); 3090 } 3091 3092 ZExtInst::ZExtInst( 3093 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3094 ) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) { 3095 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); 3096 } 3097 SExtInst::SExtInst( 3098 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3099 ) : CastInst(Ty, SExt, S, Name, InsertBefore) { 3100 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); 3101 } 3102 3103 SExtInst::SExtInst( 3104 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3105 ) : CastInst(Ty, SExt, S, Name, InsertAtEnd) { 3106 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); 3107 } 3108 3109 FPTruncInst::FPTruncInst( 3110 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3111 ) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) { 3112 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); 3113 } 3114 3115 FPTruncInst::FPTruncInst( 3116 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3117 ) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) { 3118 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); 3119 } 3120 3121 FPExtInst::FPExtInst( 3122 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3123 ) : CastInst(Ty, FPExt, S, Name, InsertBefore) { 3124 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); 3125 } 3126 3127 FPExtInst::FPExtInst( 3128 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3129 ) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) { 3130 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); 3131 } 3132 3133 UIToFPInst::UIToFPInst( 3134 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3135 ) : CastInst(Ty, UIToFP, S, Name, InsertBefore) { 3136 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); 3137 } 3138 3139 UIToFPInst::UIToFPInst( 3140 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3141 ) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) { 3142 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); 3143 } 3144 3145 SIToFPInst::SIToFPInst( 3146 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3147 ) : CastInst(Ty, SIToFP, S, Name, InsertBefore) { 3148 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); 3149 } 3150 3151 SIToFPInst::SIToFPInst( 3152 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3153 ) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) { 3154 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); 3155 } 3156 3157 FPToUIInst::FPToUIInst( 3158 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3159 ) : CastInst(Ty, FPToUI, S, Name, InsertBefore) { 3160 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); 3161 } 3162 3163 FPToUIInst::FPToUIInst( 3164 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3165 ) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) { 3166 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); 3167 } 3168 3169 FPToSIInst::FPToSIInst( 3170 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3171 ) : CastInst(Ty, FPToSI, S, Name, InsertBefore) { 3172 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); 3173 } 3174 3175 FPToSIInst::FPToSIInst( 3176 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3177 ) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) { 3178 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); 3179 } 3180 3181 PtrToIntInst::PtrToIntInst( 3182 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3183 ) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) { 3184 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); 3185 } 3186 3187 PtrToIntInst::PtrToIntInst( 3188 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3189 ) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) { 3190 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); 3191 } 3192 3193 IntToPtrInst::IntToPtrInst( 3194 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3195 ) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) { 3196 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); 3197 } 3198 3199 IntToPtrInst::IntToPtrInst( 3200 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3201 ) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) { 3202 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); 3203 } 3204 3205 BitCastInst::BitCastInst( 3206 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3207 ) : CastInst(Ty, BitCast, S, Name, InsertBefore) { 3208 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); 3209 } 3210 3211 BitCastInst::BitCastInst( 3212 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3213 ) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) { 3214 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); 3215 } 3216 3217 AddrSpaceCastInst::AddrSpaceCastInst( 3218 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3219 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) { 3220 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast"); 3221 } 3222 3223 AddrSpaceCastInst::AddrSpaceCastInst( 3224 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3225 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) { 3226 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast"); 3227 } 3228 3229 //===----------------------------------------------------------------------===// 3230 // CmpInst Classes 3231 //===----------------------------------------------------------------------===// 3232 3233 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS, 3234 Value *RHS, const Twine &Name, Instruction *InsertBefore, 3235 Instruction *FlagsSource) 3236 : Instruction(ty, op, 3237 OperandTraits<CmpInst>::op_begin(this), 3238 OperandTraits<CmpInst>::operands(this), 3239 InsertBefore) { 3240 Op<0>() = LHS; 3241 Op<1>() = RHS; 3242 setPredicate((Predicate)predicate); 3243 setName(Name); 3244 if (FlagsSource) 3245 copyIRFlags(FlagsSource); 3246 } 3247 3248 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS, 3249 Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd) 3250 : Instruction(ty, op, 3251 OperandTraits<CmpInst>::op_begin(this), 3252 OperandTraits<CmpInst>::operands(this), 3253 InsertAtEnd) { 3254 Op<0>() = LHS; 3255 Op<1>() = RHS; 3256 setPredicate((Predicate)predicate); 3257 setName(Name); 3258 } 3259 3260 CmpInst * 3261 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, 3262 const Twine &Name, Instruction *InsertBefore) { 3263 if (Op == Instruction::ICmp) { 3264 if (InsertBefore) 3265 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate), 3266 S1, S2, Name); 3267 else 3268 return new ICmpInst(CmpInst::Predicate(predicate), 3269 S1, S2, Name); 3270 } 3271 3272 if (InsertBefore) 3273 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate), 3274 S1, S2, Name); 3275 else 3276 return new FCmpInst(CmpInst::Predicate(predicate), 3277 S1, S2, Name); 3278 } 3279 3280 CmpInst * 3281 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, 3282 const Twine &Name, BasicBlock *InsertAtEnd) { 3283 if (Op == Instruction::ICmp) { 3284 return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate), 3285 S1, S2, Name); 3286 } 3287 return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate), 3288 S1, S2, Name); 3289 } 3290 3291 void CmpInst::swapOperands() { 3292 if (ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3293 IC->swapOperands(); 3294 else 3295 cast<FCmpInst>(this)->swapOperands(); 3296 } 3297 3298 bool CmpInst::isCommutative() const { 3299 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3300 return IC->isCommutative(); 3301 return cast<FCmpInst>(this)->isCommutative(); 3302 } 3303 3304 bool CmpInst::isEquality() const { 3305 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3306 return IC->isEquality(); 3307 return cast<FCmpInst>(this)->isEquality(); 3308 } 3309 3310 CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) { 3311 switch (pred) { 3312 default: llvm_unreachable("Unknown cmp predicate!"); 3313 case ICMP_EQ: return ICMP_NE; 3314 case ICMP_NE: return ICMP_EQ; 3315 case ICMP_UGT: return ICMP_ULE; 3316 case ICMP_ULT: return ICMP_UGE; 3317 case ICMP_UGE: return ICMP_ULT; 3318 case ICMP_ULE: return ICMP_UGT; 3319 case ICMP_SGT: return ICMP_SLE; 3320 case ICMP_SLT: return ICMP_SGE; 3321 case ICMP_SGE: return ICMP_SLT; 3322 case ICMP_SLE: return ICMP_SGT; 3323 3324 case FCMP_OEQ: return FCMP_UNE; 3325 case FCMP_ONE: return FCMP_UEQ; 3326 case FCMP_OGT: return FCMP_ULE; 3327 case FCMP_OLT: return FCMP_UGE; 3328 case FCMP_OGE: return FCMP_ULT; 3329 case FCMP_OLE: return FCMP_UGT; 3330 case FCMP_UEQ: return FCMP_ONE; 3331 case FCMP_UNE: return FCMP_OEQ; 3332 case FCMP_UGT: return FCMP_OLE; 3333 case FCMP_ULT: return FCMP_OGE; 3334 case FCMP_UGE: return FCMP_OLT; 3335 case FCMP_ULE: return FCMP_OGT; 3336 case FCMP_ORD: return FCMP_UNO; 3337 case FCMP_UNO: return FCMP_ORD; 3338 case FCMP_TRUE: return FCMP_FALSE; 3339 case FCMP_FALSE: return FCMP_TRUE; 3340 } 3341 } 3342 3343 StringRef CmpInst::getPredicateName(Predicate Pred) { 3344 switch (Pred) { 3345 default: return "unknown"; 3346 case FCmpInst::FCMP_FALSE: return "false"; 3347 case FCmpInst::FCMP_OEQ: return "oeq"; 3348 case FCmpInst::FCMP_OGT: return "ogt"; 3349 case FCmpInst::FCMP_OGE: return "oge"; 3350 case FCmpInst::FCMP_OLT: return "olt"; 3351 case FCmpInst::FCMP_OLE: return "ole"; 3352 case FCmpInst::FCMP_ONE: return "one"; 3353 case FCmpInst::FCMP_ORD: return "ord"; 3354 case FCmpInst::FCMP_UNO: return "uno"; 3355 case FCmpInst::FCMP_UEQ: return "ueq"; 3356 case FCmpInst::FCMP_UGT: return "ugt"; 3357 case FCmpInst::FCMP_UGE: return "uge"; 3358 case FCmpInst::FCMP_ULT: return "ult"; 3359 case FCmpInst::FCMP_ULE: return "ule"; 3360 case FCmpInst::FCMP_UNE: return "une"; 3361 case FCmpInst::FCMP_TRUE: return "true"; 3362 case ICmpInst::ICMP_EQ: return "eq"; 3363 case ICmpInst::ICMP_NE: return "ne"; 3364 case ICmpInst::ICMP_SGT: return "sgt"; 3365 case ICmpInst::ICMP_SGE: return "sge"; 3366 case ICmpInst::ICMP_SLT: return "slt"; 3367 case ICmpInst::ICMP_SLE: return "sle"; 3368 case ICmpInst::ICMP_UGT: return "ugt"; 3369 case ICmpInst::ICMP_UGE: return "uge"; 3370 case ICmpInst::ICMP_ULT: return "ult"; 3371 case ICmpInst::ICMP_ULE: return "ule"; 3372 } 3373 } 3374 3375 ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) { 3376 switch (pred) { 3377 default: llvm_unreachable("Unknown icmp predicate!"); 3378 case ICMP_EQ: case ICMP_NE: 3379 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE: 3380 return pred; 3381 case ICMP_UGT: return ICMP_SGT; 3382 case ICMP_ULT: return ICMP_SLT; 3383 case ICMP_UGE: return ICMP_SGE; 3384 case ICMP_ULE: return ICMP_SLE; 3385 } 3386 } 3387 3388 ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) { 3389 switch (pred) { 3390 default: llvm_unreachable("Unknown icmp predicate!"); 3391 case ICMP_EQ: case ICMP_NE: 3392 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE: 3393 return pred; 3394 case ICMP_SGT: return ICMP_UGT; 3395 case ICMP_SLT: return ICMP_ULT; 3396 case ICMP_SGE: return ICMP_UGE; 3397 case ICMP_SLE: return ICMP_ULE; 3398 } 3399 } 3400 3401 CmpInst::Predicate CmpInst::getFlippedStrictnessPredicate(Predicate pred) { 3402 switch (pred) { 3403 default: llvm_unreachable("Unknown or unsupported cmp predicate!"); 3404 case ICMP_SGT: return ICMP_SGE; 3405 case ICMP_SLT: return ICMP_SLE; 3406 case ICMP_SGE: return ICMP_SGT; 3407 case ICMP_SLE: return ICMP_SLT; 3408 case ICMP_UGT: return ICMP_UGE; 3409 case ICMP_ULT: return ICMP_ULE; 3410 case ICMP_UGE: return ICMP_UGT; 3411 case ICMP_ULE: return ICMP_ULT; 3412 3413 case FCMP_OGT: return FCMP_OGE; 3414 case FCMP_OLT: return FCMP_OLE; 3415 case FCMP_OGE: return FCMP_OGT; 3416 case FCMP_OLE: return FCMP_OLT; 3417 case FCMP_UGT: return FCMP_UGE; 3418 case FCMP_ULT: return FCMP_ULE; 3419 case FCMP_UGE: return FCMP_UGT; 3420 case FCMP_ULE: return FCMP_ULT; 3421 } 3422 } 3423 3424 CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) { 3425 switch (pred) { 3426 default: llvm_unreachable("Unknown cmp predicate!"); 3427 case ICMP_EQ: case ICMP_NE: 3428 return pred; 3429 case ICMP_SGT: return ICMP_SLT; 3430 case ICMP_SLT: return ICMP_SGT; 3431 case ICMP_SGE: return ICMP_SLE; 3432 case ICMP_SLE: return ICMP_SGE; 3433 case ICMP_UGT: return ICMP_ULT; 3434 case ICMP_ULT: return ICMP_UGT; 3435 case ICMP_UGE: return ICMP_ULE; 3436 case ICMP_ULE: return ICMP_UGE; 3437 3438 case FCMP_FALSE: case FCMP_TRUE: 3439 case FCMP_OEQ: case FCMP_ONE: 3440 case FCMP_UEQ: case FCMP_UNE: 3441 case FCMP_ORD: case FCMP_UNO: 3442 return pred; 3443 case FCMP_OGT: return FCMP_OLT; 3444 case FCMP_OLT: return FCMP_OGT; 3445 case FCMP_OGE: return FCMP_OLE; 3446 case FCMP_OLE: return FCMP_OGE; 3447 case FCMP_UGT: return FCMP_ULT; 3448 case FCMP_ULT: return FCMP_UGT; 3449 case FCMP_UGE: return FCMP_ULE; 3450 case FCMP_ULE: return FCMP_UGE; 3451 } 3452 } 3453 3454 CmpInst::Predicate CmpInst::getNonStrictPredicate(Predicate pred) { 3455 switch (pred) { 3456 case ICMP_SGT: return ICMP_SGE; 3457 case ICMP_SLT: return ICMP_SLE; 3458 case ICMP_UGT: return ICMP_UGE; 3459 case ICMP_ULT: return ICMP_ULE; 3460 case FCMP_OGT: return FCMP_OGE; 3461 case FCMP_OLT: return FCMP_OLE; 3462 case FCMP_UGT: return FCMP_UGE; 3463 case FCMP_ULT: return FCMP_ULE; 3464 default: return pred; 3465 } 3466 } 3467 3468 CmpInst::Predicate CmpInst::getSignedPredicate(Predicate pred) { 3469 assert(CmpInst::isUnsigned(pred) && "Call only with signed predicates!"); 3470 3471 switch (pred) { 3472 default: 3473 llvm_unreachable("Unknown predicate!"); 3474 case CmpInst::ICMP_ULT: 3475 return CmpInst::ICMP_SLT; 3476 case CmpInst::ICMP_ULE: 3477 return CmpInst::ICMP_SLE; 3478 case CmpInst::ICMP_UGT: 3479 return CmpInst::ICMP_SGT; 3480 case CmpInst::ICMP_UGE: 3481 return CmpInst::ICMP_SGE; 3482 } 3483 } 3484 3485 bool CmpInst::isUnsigned(Predicate predicate) { 3486 switch (predicate) { 3487 default: return false; 3488 case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT: 3489 case ICmpInst::ICMP_UGE: return true; 3490 } 3491 } 3492 3493 bool CmpInst::isSigned(Predicate predicate) { 3494 switch (predicate) { 3495 default: return false; 3496 case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT: 3497 case ICmpInst::ICMP_SGE: return true; 3498 } 3499 } 3500 3501 bool CmpInst::isOrdered(Predicate predicate) { 3502 switch (predicate) { 3503 default: return false; 3504 case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT: 3505 case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE: 3506 case FCmpInst::FCMP_ORD: return true; 3507 } 3508 } 3509 3510 bool CmpInst::isUnordered(Predicate predicate) { 3511 switch (predicate) { 3512 default: return false; 3513 case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT: 3514 case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE: 3515 case FCmpInst::FCMP_UNO: return true; 3516 } 3517 } 3518 3519 bool CmpInst::isTrueWhenEqual(Predicate predicate) { 3520 switch(predicate) { 3521 default: return false; 3522 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE: 3523 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true; 3524 } 3525 } 3526 3527 bool CmpInst::isFalseWhenEqual(Predicate predicate) { 3528 switch(predicate) { 3529 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT: 3530 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true; 3531 default: return false; 3532 } 3533 } 3534 3535 bool CmpInst::isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2) { 3536 // If the predicates match, then we know the first condition implies the 3537 // second is true. 3538 if (Pred1 == Pred2) 3539 return true; 3540 3541 switch (Pred1) { 3542 default: 3543 break; 3544 case ICMP_EQ: 3545 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true. 3546 return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE || 3547 Pred2 == ICMP_SLE; 3548 case ICMP_UGT: // A >u B implies A != B and A >=u B are true. 3549 return Pred2 == ICMP_NE || Pred2 == ICMP_UGE; 3550 case ICMP_ULT: // A <u B implies A != B and A <=u B are true. 3551 return Pred2 == ICMP_NE || Pred2 == ICMP_ULE; 3552 case ICMP_SGT: // A >s B implies A != B and A >=s B are true. 3553 return Pred2 == ICMP_NE || Pred2 == ICMP_SGE; 3554 case ICMP_SLT: // A <s B implies A != B and A <=s B are true. 3555 return Pred2 == ICMP_NE || Pred2 == ICMP_SLE; 3556 } 3557 return false; 3558 } 3559 3560 bool CmpInst::isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2) { 3561 return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2)); 3562 } 3563 3564 //===----------------------------------------------------------------------===// 3565 // SwitchInst Implementation 3566 //===----------------------------------------------------------------------===// 3567 3568 void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) { 3569 assert(Value && Default && NumReserved); 3570 ReservedSpace = NumReserved; 3571 setNumHungOffUseOperands(2); 3572 allocHungoffUses(ReservedSpace); 3573 3574 Op<0>() = Value; 3575 Op<1>() = Default; 3576 } 3577 3578 /// SwitchInst ctor - Create a new switch instruction, specifying a value to 3579 /// switch on and a default destination. The number of additional cases can 3580 /// be specified here to make memory allocation more efficient. This 3581 /// constructor can also autoinsert before another instruction. 3582 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3583 Instruction *InsertBefore) 3584 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch, 3585 nullptr, 0, InsertBefore) { 3586 init(Value, Default, 2+NumCases*2); 3587 } 3588 3589 /// SwitchInst ctor - Create a new switch instruction, specifying a value to 3590 /// switch on and a default destination. The number of additional cases can 3591 /// be specified here to make memory allocation more efficient. This 3592 /// constructor also autoinserts at the end of the specified BasicBlock. 3593 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3594 BasicBlock *InsertAtEnd) 3595 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch, 3596 nullptr, 0, InsertAtEnd) { 3597 init(Value, Default, 2+NumCases*2); 3598 } 3599 3600 SwitchInst::SwitchInst(const SwitchInst &SI) 3601 : Instruction(SI.getType(), Instruction::Switch, nullptr, 0) { 3602 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands()); 3603 setNumHungOffUseOperands(SI.getNumOperands()); 3604 Use *OL = getOperandList(); 3605 const Use *InOL = SI.getOperandList(); 3606 for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) { 3607 OL[i] = InOL[i]; 3608 OL[i+1] = InOL[i+1]; 3609 } 3610 SubclassOptionalData = SI.SubclassOptionalData; 3611 } 3612 3613 /// addCase - Add an entry to the switch instruction... 3614 /// 3615 void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) { 3616 unsigned NewCaseIdx = getNumCases(); 3617 unsigned OpNo = getNumOperands(); 3618 if (OpNo+2 > ReservedSpace) 3619 growOperands(); // Get more space! 3620 // Initialize some new operands. 3621 assert(OpNo+1 < ReservedSpace && "Growing didn't work!"); 3622 setNumHungOffUseOperands(OpNo+2); 3623 CaseHandle Case(this, NewCaseIdx); 3624 Case.setValue(OnVal); 3625 Case.setSuccessor(Dest); 3626 } 3627 3628 /// removeCase - This method removes the specified case and its successor 3629 /// from the switch instruction. 3630 SwitchInst::CaseIt SwitchInst::removeCase(CaseIt I) { 3631 unsigned idx = I->getCaseIndex(); 3632 3633 assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!"); 3634 3635 unsigned NumOps = getNumOperands(); 3636 Use *OL = getOperandList(); 3637 3638 // Overwrite this case with the end of the list. 3639 if (2 + (idx + 1) * 2 != NumOps) { 3640 OL[2 + idx * 2] = OL[NumOps - 2]; 3641 OL[2 + idx * 2 + 1] = OL[NumOps - 1]; 3642 } 3643 3644 // Nuke the last value. 3645 OL[NumOps-2].set(nullptr); 3646 OL[NumOps-2+1].set(nullptr); 3647 setNumHungOffUseOperands(NumOps-2); 3648 3649 return CaseIt(this, idx); 3650 } 3651 3652 /// growOperands - grow operands - This grows the operand list in response 3653 /// to a push_back style of operation. This grows the number of ops by 3 times. 3654 /// 3655 void SwitchInst::growOperands() { 3656 unsigned e = getNumOperands(); 3657 unsigned NumOps = e*3; 3658 3659 ReservedSpace = NumOps; 3660 growHungoffUses(ReservedSpace); 3661 } 3662 3663 //===----------------------------------------------------------------------===// 3664 // IndirectBrInst Implementation 3665 //===----------------------------------------------------------------------===// 3666 3667 void IndirectBrInst::init(Value *Address, unsigned NumDests) { 3668 assert(Address && Address->getType()->isPointerTy() && 3669 "Address of indirectbr must be a pointer"); 3670 ReservedSpace = 1+NumDests; 3671 setNumHungOffUseOperands(1); 3672 allocHungoffUses(ReservedSpace); 3673 3674 Op<0>() = Address; 3675 } 3676 3677 3678 /// growOperands - grow operands - This grows the operand list in response 3679 /// to a push_back style of operation. This grows the number of ops by 2 times. 3680 /// 3681 void IndirectBrInst::growOperands() { 3682 unsigned e = getNumOperands(); 3683 unsigned NumOps = e*2; 3684 3685 ReservedSpace = NumOps; 3686 growHungoffUses(ReservedSpace); 3687 } 3688 3689 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases, 3690 Instruction *InsertBefore) 3691 : Instruction(Type::getVoidTy(Address->getContext()), 3692 Instruction::IndirectBr, nullptr, 0, InsertBefore) { 3693 init(Address, NumCases); 3694 } 3695 3696 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases, 3697 BasicBlock *InsertAtEnd) 3698 : Instruction(Type::getVoidTy(Address->getContext()), 3699 Instruction::IndirectBr, nullptr, 0, InsertAtEnd) { 3700 init(Address, NumCases); 3701 } 3702 3703 IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI) 3704 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr, 3705 nullptr, IBI.getNumOperands()) { 3706 allocHungoffUses(IBI.getNumOperands()); 3707 Use *OL = getOperandList(); 3708 const Use *InOL = IBI.getOperandList(); 3709 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i) 3710 OL[i] = InOL[i]; 3711 SubclassOptionalData = IBI.SubclassOptionalData; 3712 } 3713 3714 /// addDestination - Add a destination. 3715 /// 3716 void IndirectBrInst::addDestination(BasicBlock *DestBB) { 3717 unsigned OpNo = getNumOperands(); 3718 if (OpNo+1 > ReservedSpace) 3719 growOperands(); // Get more space! 3720 // Initialize some new operands. 3721 assert(OpNo < ReservedSpace && "Growing didn't work!"); 3722 setNumHungOffUseOperands(OpNo+1); 3723 getOperandList()[OpNo] = DestBB; 3724 } 3725 3726 /// removeDestination - This method removes the specified successor from the 3727 /// indirectbr instruction. 3728 void IndirectBrInst::removeDestination(unsigned idx) { 3729 assert(idx < getNumOperands()-1 && "Successor index out of range!"); 3730 3731 unsigned NumOps = getNumOperands(); 3732 Use *OL = getOperandList(); 3733 3734 // Replace this value with the last one. 3735 OL[idx+1] = OL[NumOps-1]; 3736 3737 // Nuke the last value. 3738 OL[NumOps-1].set(nullptr); 3739 setNumHungOffUseOperands(NumOps-1); 3740 } 3741 3742 //===----------------------------------------------------------------------===// 3743 // cloneImpl() implementations 3744 //===----------------------------------------------------------------------===// 3745 3746 // Define these methods here so vtables don't get emitted into every translation 3747 // unit that uses these classes. 3748 3749 GetElementPtrInst *GetElementPtrInst::cloneImpl() const { 3750 return new (getNumOperands()) GetElementPtrInst(*this); 3751 } 3752 3753 UnaryOperator *UnaryOperator::cloneImpl() const { 3754 return Create(getOpcode(), Op<0>()); 3755 } 3756 3757 BinaryOperator *BinaryOperator::cloneImpl() const { 3758 return Create(getOpcode(), Op<0>(), Op<1>()); 3759 } 3760 3761 FCmpInst *FCmpInst::cloneImpl() const { 3762 return new FCmpInst(getPredicate(), Op<0>(), Op<1>()); 3763 } 3764 3765 ICmpInst *ICmpInst::cloneImpl() const { 3766 return new ICmpInst(getPredicate(), Op<0>(), Op<1>()); 3767 } 3768 3769 ExtractValueInst *ExtractValueInst::cloneImpl() const { 3770 return new ExtractValueInst(*this); 3771 } 3772 3773 InsertValueInst *InsertValueInst::cloneImpl() const { 3774 return new InsertValueInst(*this); 3775 } 3776 3777 AllocaInst *AllocaInst::cloneImpl() const { 3778 AllocaInst *Result = new AllocaInst(getAllocatedType(), 3779 getType()->getAddressSpace(), 3780 (Value *)getOperand(0), getAlignment()); 3781 Result->setUsedWithInAlloca(isUsedWithInAlloca()); 3782 Result->setSwiftError(isSwiftError()); 3783 return Result; 3784 } 3785 3786 LoadInst *LoadInst::cloneImpl() const { 3787 return new LoadInst(getOperand(0), Twine(), isVolatile(), 3788 getAlignment(), getOrdering(), getSyncScopeID()); 3789 } 3790 3791 StoreInst *StoreInst::cloneImpl() const { 3792 return new StoreInst(getOperand(0), getOperand(1), isVolatile(), 3793 getAlignment(), getOrdering(), getSyncScopeID()); 3794 3795 } 3796 3797 AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const { 3798 AtomicCmpXchgInst *Result = 3799 new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2), 3800 getSuccessOrdering(), getFailureOrdering(), 3801 getSyncScopeID()); 3802 Result->setVolatile(isVolatile()); 3803 Result->setWeak(isWeak()); 3804 return Result; 3805 } 3806 3807 AtomicRMWInst *AtomicRMWInst::cloneImpl() const { 3808 AtomicRMWInst *Result = 3809 new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1), 3810 getOrdering(), getSyncScopeID()); 3811 Result->setVolatile(isVolatile()); 3812 return Result; 3813 } 3814 3815 FenceInst *FenceInst::cloneImpl() const { 3816 return new FenceInst(getContext(), getOrdering(), getSyncScopeID()); 3817 } 3818 3819 TruncInst *TruncInst::cloneImpl() const { 3820 return new TruncInst(getOperand(0), getType()); 3821 } 3822 3823 ZExtInst *ZExtInst::cloneImpl() const { 3824 return new ZExtInst(getOperand(0), getType()); 3825 } 3826 3827 SExtInst *SExtInst::cloneImpl() const { 3828 return new SExtInst(getOperand(0), getType()); 3829 } 3830 3831 FPTruncInst *FPTruncInst::cloneImpl() const { 3832 return new FPTruncInst(getOperand(0), getType()); 3833 } 3834 3835 FPExtInst *FPExtInst::cloneImpl() const { 3836 return new FPExtInst(getOperand(0), getType()); 3837 } 3838 3839 UIToFPInst *UIToFPInst::cloneImpl() const { 3840 return new UIToFPInst(getOperand(0), getType()); 3841 } 3842 3843 SIToFPInst *SIToFPInst::cloneImpl() const { 3844 return new SIToFPInst(getOperand(0), getType()); 3845 } 3846 3847 FPToUIInst *FPToUIInst::cloneImpl() const { 3848 return new FPToUIInst(getOperand(0), getType()); 3849 } 3850 3851 FPToSIInst *FPToSIInst::cloneImpl() const { 3852 return new FPToSIInst(getOperand(0), getType()); 3853 } 3854 3855 PtrToIntInst *PtrToIntInst::cloneImpl() const { 3856 return new PtrToIntInst(getOperand(0), getType()); 3857 } 3858 3859 IntToPtrInst *IntToPtrInst::cloneImpl() const { 3860 return new IntToPtrInst(getOperand(0), getType()); 3861 } 3862 3863 BitCastInst *BitCastInst::cloneImpl() const { 3864 return new BitCastInst(getOperand(0), getType()); 3865 } 3866 3867 AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const { 3868 return new AddrSpaceCastInst(getOperand(0), getType()); 3869 } 3870 3871 CallInst *CallInst::cloneImpl() const { 3872 if (hasOperandBundles()) { 3873 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 3874 return new(getNumOperands(), DescriptorBytes) CallInst(*this); 3875 } 3876 return new(getNumOperands()) CallInst(*this); 3877 } 3878 3879 SelectInst *SelectInst::cloneImpl() const { 3880 return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2)); 3881 } 3882 3883 VAArgInst *VAArgInst::cloneImpl() const { 3884 return new VAArgInst(getOperand(0), getType()); 3885 } 3886 3887 ExtractElementInst *ExtractElementInst::cloneImpl() const { 3888 return ExtractElementInst::Create(getOperand(0), getOperand(1)); 3889 } 3890 3891 InsertElementInst *InsertElementInst::cloneImpl() const { 3892 return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2)); 3893 } 3894 3895 ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const { 3896 return new ShuffleVectorInst(getOperand(0), getOperand(1), getOperand(2)); 3897 } 3898 3899 PHINode *PHINode::cloneImpl() const { return new PHINode(*this); } 3900 3901 LandingPadInst *LandingPadInst::cloneImpl() const { 3902 return new LandingPadInst(*this); 3903 } 3904 3905 ReturnInst *ReturnInst::cloneImpl() const { 3906 return new(getNumOperands()) ReturnInst(*this); 3907 } 3908 3909 BranchInst *BranchInst::cloneImpl() const { 3910 return new(getNumOperands()) BranchInst(*this); 3911 } 3912 3913 SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); } 3914 3915 IndirectBrInst *IndirectBrInst::cloneImpl() const { 3916 return new IndirectBrInst(*this); 3917 } 3918 3919 InvokeInst *InvokeInst::cloneImpl() const { 3920 if (hasOperandBundles()) { 3921 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 3922 return new(getNumOperands(), DescriptorBytes) InvokeInst(*this); 3923 } 3924 return new(getNumOperands()) InvokeInst(*this); 3925 } 3926 3927 ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); } 3928 3929 CleanupReturnInst *CleanupReturnInst::cloneImpl() const { 3930 return new (getNumOperands()) CleanupReturnInst(*this); 3931 } 3932 3933 CatchReturnInst *CatchReturnInst::cloneImpl() const { 3934 return new (getNumOperands()) CatchReturnInst(*this); 3935 } 3936 3937 CatchSwitchInst *CatchSwitchInst::cloneImpl() const { 3938 return new CatchSwitchInst(*this); 3939 } 3940 3941 FuncletPadInst *FuncletPadInst::cloneImpl() const { 3942 return new (getNumOperands()) FuncletPadInst(*this); 3943 } 3944 3945 UnreachableInst *UnreachableInst::cloneImpl() const { 3946 LLVMContext &Context = getContext(); 3947 return new UnreachableInst(Context); 3948 } 3949