1 //===- Instructions.cpp - Implement the LLVM instructions -----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements all of the non-inline methods for the LLVM instruction 11 // classes. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/IR/Instructions.h" 16 #include "LLVMContextImpl.h" 17 #include "llvm/ADT/None.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/Twine.h" 20 #include "llvm/IR/Attributes.h" 21 #include "llvm/IR/BasicBlock.h" 22 #include "llvm/IR/CallSite.h" 23 #include "llvm/IR/Constant.h" 24 #include "llvm/IR/Constants.h" 25 #include "llvm/IR/DataLayout.h" 26 #include "llvm/IR/DerivedTypes.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/IR/InstrTypes.h" 29 #include "llvm/IR/Instruction.h" 30 #include "llvm/IR/LLVMContext.h" 31 #include "llvm/IR/Metadata.h" 32 #include "llvm/IR/Module.h" 33 #include "llvm/IR/Operator.h" 34 #include "llvm/IR/Type.h" 35 #include "llvm/IR/Value.h" 36 #include "llvm/Support/AtomicOrdering.h" 37 #include "llvm/Support/Casting.h" 38 #include "llvm/Support/ErrorHandling.h" 39 #include "llvm/Support/MathExtras.h" 40 #include <algorithm> 41 #include <cassert> 42 #include <cstdint> 43 #include <vector> 44 45 using namespace llvm; 46 47 //===----------------------------------------------------------------------===// 48 // AllocaInst Class 49 //===----------------------------------------------------------------------===// 50 51 Optional<uint64_t> 52 AllocaInst::getAllocationSizeInBits(const DataLayout &DL) const { 53 uint64_t Size = DL.getTypeAllocSizeInBits(getAllocatedType()); 54 if (isArrayAllocation()) { 55 auto C = dyn_cast<ConstantInt>(getArraySize()); 56 if (!C) 57 return None; 58 Size *= C->getZExtValue(); 59 } 60 return Size; 61 } 62 63 //===----------------------------------------------------------------------===// 64 // CallSite Class 65 //===----------------------------------------------------------------------===// 66 67 User::op_iterator CallSite::getCallee() const { 68 Instruction *II(getInstruction()); 69 return isCall() 70 ? cast<CallInst>(II)->op_end() - 1 // Skip Callee 71 : cast<InvokeInst>(II)->op_end() - 3; // Skip BB, BB, Callee 72 } 73 74 //===----------------------------------------------------------------------===// 75 // SelectInst Class 76 //===----------------------------------------------------------------------===// 77 78 /// areInvalidOperands - Return a string if the specified operands are invalid 79 /// for a select operation, otherwise return null. 80 const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) { 81 if (Op1->getType() != Op2->getType()) 82 return "both values to select must have same type"; 83 84 if (Op1->getType()->isTokenTy()) 85 return "select values cannot have token type"; 86 87 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) { 88 // Vector select. 89 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext())) 90 return "vector select condition element type must be i1"; 91 VectorType *ET = dyn_cast<VectorType>(Op1->getType()); 92 if (!ET) 93 return "selected values for vector select must be vectors"; 94 if (ET->getNumElements() != VT->getNumElements()) 95 return "vector select requires selected vectors to have " 96 "the same vector length as select condition"; 97 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) { 98 return "select condition must be i1 or <n x i1>"; 99 } 100 return nullptr; 101 } 102 103 //===----------------------------------------------------------------------===// 104 // PHINode Class 105 //===----------------------------------------------------------------------===// 106 107 PHINode::PHINode(const PHINode &PN) 108 : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()), 109 ReservedSpace(PN.getNumOperands()) { 110 allocHungoffUses(PN.getNumOperands()); 111 std::copy(PN.op_begin(), PN.op_end(), op_begin()); 112 std::copy(PN.block_begin(), PN.block_end(), block_begin()); 113 SubclassOptionalData = PN.SubclassOptionalData; 114 } 115 116 // removeIncomingValue - Remove an incoming value. This is useful if a 117 // predecessor basic block is deleted. 118 Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) { 119 Value *Removed = getIncomingValue(Idx); 120 121 // Move everything after this operand down. 122 // 123 // FIXME: we could just swap with the end of the list, then erase. However, 124 // clients might not expect this to happen. The code as it is thrashes the 125 // use/def lists, which is kinda lame. 126 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx); 127 std::copy(block_begin() + Idx + 1, block_end(), block_begin() + Idx); 128 129 // Nuke the last value. 130 Op<-1>().set(nullptr); 131 setNumHungOffUseOperands(getNumOperands() - 1); 132 133 // If the PHI node is dead, because it has zero entries, nuke it now. 134 if (getNumOperands() == 0 && DeletePHIIfEmpty) { 135 // If anyone is using this PHI, make them use a dummy value instead... 136 replaceAllUsesWith(UndefValue::get(getType())); 137 eraseFromParent(); 138 } 139 return Removed; 140 } 141 142 /// growOperands - grow operands - This grows the operand list in response 143 /// to a push_back style of operation. This grows the number of ops by 1.5 144 /// times. 145 /// 146 void PHINode::growOperands() { 147 unsigned e = getNumOperands(); 148 unsigned NumOps = e + e / 2; 149 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common. 150 151 ReservedSpace = NumOps; 152 growHungoffUses(ReservedSpace, /* IsPhi */ true); 153 } 154 155 /// hasConstantValue - If the specified PHI node always merges together the same 156 /// value, return the value, otherwise return null. 157 Value *PHINode::hasConstantValue() const { 158 // Exploit the fact that phi nodes always have at least one entry. 159 Value *ConstantValue = getIncomingValue(0); 160 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i) 161 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) { 162 if (ConstantValue != this) 163 return nullptr; // Incoming values not all the same. 164 // The case where the first value is this PHI. 165 ConstantValue = getIncomingValue(i); 166 } 167 if (ConstantValue == this) 168 return UndefValue::get(getType()); 169 return ConstantValue; 170 } 171 172 /// hasConstantOrUndefValue - Whether the specified PHI node always merges 173 /// together the same value, assuming that undefs result in the same value as 174 /// non-undefs. 175 /// Unlike \ref hasConstantValue, this does not return a value because the 176 /// unique non-undef incoming value need not dominate the PHI node. 177 bool PHINode::hasConstantOrUndefValue() const { 178 Value *ConstantValue = nullptr; 179 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) { 180 Value *Incoming = getIncomingValue(i); 181 if (Incoming != this && !isa<UndefValue>(Incoming)) { 182 if (ConstantValue && ConstantValue != Incoming) 183 return false; 184 ConstantValue = Incoming; 185 } 186 } 187 return true; 188 } 189 190 //===----------------------------------------------------------------------===// 191 // LandingPadInst Implementation 192 //===----------------------------------------------------------------------===// 193 194 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues, 195 const Twine &NameStr, Instruction *InsertBefore) 196 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) { 197 init(NumReservedValues, NameStr); 198 } 199 200 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues, 201 const Twine &NameStr, BasicBlock *InsertAtEnd) 202 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) { 203 init(NumReservedValues, NameStr); 204 } 205 206 LandingPadInst::LandingPadInst(const LandingPadInst &LP) 207 : Instruction(LP.getType(), Instruction::LandingPad, nullptr, 208 LP.getNumOperands()), 209 ReservedSpace(LP.getNumOperands()) { 210 allocHungoffUses(LP.getNumOperands()); 211 Use *OL = getOperandList(); 212 const Use *InOL = LP.getOperandList(); 213 for (unsigned I = 0, E = ReservedSpace; I != E; ++I) 214 OL[I] = InOL[I]; 215 216 setCleanup(LP.isCleanup()); 217 } 218 219 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses, 220 const Twine &NameStr, 221 Instruction *InsertBefore) { 222 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore); 223 } 224 225 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses, 226 const Twine &NameStr, 227 BasicBlock *InsertAtEnd) { 228 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd); 229 } 230 231 void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) { 232 ReservedSpace = NumReservedValues; 233 setNumHungOffUseOperands(0); 234 allocHungoffUses(ReservedSpace); 235 setName(NameStr); 236 setCleanup(false); 237 } 238 239 /// growOperands - grow operands - This grows the operand list in response to a 240 /// push_back style of operation. This grows the number of ops by 2 times. 241 void LandingPadInst::growOperands(unsigned Size) { 242 unsigned e = getNumOperands(); 243 if (ReservedSpace >= e + Size) return; 244 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2; 245 growHungoffUses(ReservedSpace); 246 } 247 248 void LandingPadInst::addClause(Constant *Val) { 249 unsigned OpNo = getNumOperands(); 250 growOperands(1); 251 assert(OpNo < ReservedSpace && "Growing didn't work!"); 252 setNumHungOffUseOperands(getNumOperands() + 1); 253 getOperandList()[OpNo] = Val; 254 } 255 256 //===----------------------------------------------------------------------===// 257 // CallInst Implementation 258 //===----------------------------------------------------------------------===// 259 260 void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, 261 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) { 262 this->FTy = FTy; 263 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 && 264 "NumOperands not set up?"); 265 Op<-1>() = Func; 266 267 #ifndef NDEBUG 268 assert((Args.size() == FTy->getNumParams() || 269 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 270 "Calling a function with bad signature!"); 271 272 for (unsigned i = 0; i != Args.size(); ++i) 273 assert((i >= FTy->getNumParams() || 274 FTy->getParamType(i) == Args[i]->getType()) && 275 "Calling a function with a bad signature!"); 276 #endif 277 278 std::copy(Args.begin(), Args.end(), op_begin()); 279 280 auto It = populateBundleOperandInfos(Bundles, Args.size()); 281 (void)It; 282 assert(It + 1 == op_end() && "Should add up!"); 283 284 setName(NameStr); 285 } 286 287 void CallInst::init(Value *Func, const Twine &NameStr) { 288 FTy = 289 cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType()); 290 assert(getNumOperands() == 1 && "NumOperands not set up?"); 291 Op<-1>() = Func; 292 293 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature"); 294 295 setName(NameStr); 296 } 297 298 CallInst::CallInst(Value *Func, const Twine &Name, Instruction *InsertBefore) 299 : CallBase<CallInst>( 300 cast<FunctionType>( 301 cast<PointerType>(Func->getType())->getElementType()) 302 ->getReturnType(), 303 Instruction::Call, 304 OperandTraits<CallBase<CallInst>>::op_end(this) - 1, 1, 305 InsertBefore) { 306 init(Func, Name); 307 } 308 309 CallInst::CallInst(Value *Func, const Twine &Name, BasicBlock *InsertAtEnd) 310 : CallBase<CallInst>( 311 cast<FunctionType>( 312 cast<PointerType>(Func->getType())->getElementType()) 313 ->getReturnType(), 314 Instruction::Call, 315 OperandTraits<CallBase<CallInst>>::op_end(this) - 1, 1, InsertAtEnd) { 316 init(Func, Name); 317 } 318 319 CallInst::CallInst(const CallInst &CI) 320 : CallBase<CallInst>(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call, 321 OperandTraits<CallBase<CallInst>>::op_end(this) - 322 CI.getNumOperands(), 323 CI.getNumOperands()) { 324 setTailCallKind(CI.getTailCallKind()); 325 setCallingConv(CI.getCallingConv()); 326 327 std::copy(CI.op_begin(), CI.op_end(), op_begin()); 328 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(), 329 bundle_op_info_begin()); 330 SubclassOptionalData = CI.SubclassOptionalData; 331 } 332 333 CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB, 334 Instruction *InsertPt) { 335 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end()); 336 337 auto *NewCI = CallInst::Create(CI->getCalledValue(), Args, OpB, CI->getName(), 338 InsertPt); 339 NewCI->setTailCallKind(CI->getTailCallKind()); 340 NewCI->setCallingConv(CI->getCallingConv()); 341 NewCI->SubclassOptionalData = CI->SubclassOptionalData; 342 NewCI->setAttributes(CI->getAttributes()); 343 NewCI->setDebugLoc(CI->getDebugLoc()); 344 return NewCI; 345 } 346 347 348 349 350 351 352 353 354 355 356 /// IsConstantOne - Return true only if val is constant int 1 357 static bool IsConstantOne(Value *val) { 358 assert(val && "IsConstantOne does not work with nullptr val"); 359 const ConstantInt *CVal = dyn_cast<ConstantInt>(val); 360 return CVal && CVal->isOne(); 361 } 362 363 static Instruction *createMalloc(Instruction *InsertBefore, 364 BasicBlock *InsertAtEnd, Type *IntPtrTy, 365 Type *AllocTy, Value *AllocSize, 366 Value *ArraySize, 367 ArrayRef<OperandBundleDef> OpB, 368 Function *MallocF, const Twine &Name) { 369 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) && 370 "createMalloc needs either InsertBefore or InsertAtEnd"); 371 372 // malloc(type) becomes: 373 // bitcast (i8* malloc(typeSize)) to type* 374 // malloc(type, arraySize) becomes: 375 // bitcast (i8* malloc(typeSize*arraySize)) to type* 376 if (!ArraySize) 377 ArraySize = ConstantInt::get(IntPtrTy, 1); 378 else if (ArraySize->getType() != IntPtrTy) { 379 if (InsertBefore) 380 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false, 381 "", InsertBefore); 382 else 383 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false, 384 "", InsertAtEnd); 385 } 386 387 if (!IsConstantOne(ArraySize)) { 388 if (IsConstantOne(AllocSize)) { 389 AllocSize = ArraySize; // Operand * 1 = Operand 390 } else if (Constant *CO = dyn_cast<Constant>(ArraySize)) { 391 Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy, 392 false /*ZExt*/); 393 // Malloc arg is constant product of type size and array size 394 AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize)); 395 } else { 396 // Multiply type size by the array size... 397 if (InsertBefore) 398 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize, 399 "mallocsize", InsertBefore); 400 else 401 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize, 402 "mallocsize", InsertAtEnd); 403 } 404 } 405 406 assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size"); 407 // Create the call to Malloc. 408 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd; 409 Module *M = BB->getParent()->getParent(); 410 Type *BPTy = Type::getInt8PtrTy(BB->getContext()); 411 Value *MallocFunc = MallocF; 412 if (!MallocFunc) 413 // prototype malloc as "void *malloc(size_t)" 414 MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy); 415 PointerType *AllocPtrType = PointerType::getUnqual(AllocTy); 416 CallInst *MCall = nullptr; 417 Instruction *Result = nullptr; 418 if (InsertBefore) { 419 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall", 420 InsertBefore); 421 Result = MCall; 422 if (Result->getType() != AllocPtrType) 423 // Create a cast instruction to convert to the right type... 424 Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore); 425 } else { 426 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall"); 427 Result = MCall; 428 if (Result->getType() != AllocPtrType) { 429 InsertAtEnd->getInstList().push_back(MCall); 430 // Create a cast instruction to convert to the right type... 431 Result = new BitCastInst(MCall, AllocPtrType, Name); 432 } 433 } 434 MCall->setTailCall(); 435 if (Function *F = dyn_cast<Function>(MallocFunc)) { 436 MCall->setCallingConv(F->getCallingConv()); 437 if (!F->returnDoesNotAlias()) 438 F->setReturnDoesNotAlias(); 439 } 440 assert(!MCall->getType()->isVoidTy() && "Malloc has void return type"); 441 442 return Result; 443 } 444 445 /// CreateMalloc - Generate the IR for a call to malloc: 446 /// 1. Compute the malloc call's argument as the specified type's size, 447 /// possibly multiplied by the array size if the array size is not 448 /// constant 1. 449 /// 2. Call malloc with that argument. 450 /// 3. Bitcast the result of the malloc call to the specified type. 451 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore, 452 Type *IntPtrTy, Type *AllocTy, 453 Value *AllocSize, Value *ArraySize, 454 Function *MallocF, 455 const Twine &Name) { 456 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize, 457 ArraySize, None, MallocF, Name); 458 } 459 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore, 460 Type *IntPtrTy, Type *AllocTy, 461 Value *AllocSize, Value *ArraySize, 462 ArrayRef<OperandBundleDef> OpB, 463 Function *MallocF, 464 const Twine &Name) { 465 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize, 466 ArraySize, OpB, MallocF, Name); 467 } 468 469 /// CreateMalloc - Generate the IR for a call to malloc: 470 /// 1. Compute the malloc call's argument as the specified type's size, 471 /// possibly multiplied by the array size if the array size is not 472 /// constant 1. 473 /// 2. Call malloc with that argument. 474 /// 3. Bitcast the result of the malloc call to the specified type. 475 /// Note: This function does not add the bitcast to the basic block, that is the 476 /// responsibility of the caller. 477 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd, 478 Type *IntPtrTy, Type *AllocTy, 479 Value *AllocSize, Value *ArraySize, 480 Function *MallocF, const Twine &Name) { 481 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize, 482 ArraySize, None, MallocF, Name); 483 } 484 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd, 485 Type *IntPtrTy, Type *AllocTy, 486 Value *AllocSize, Value *ArraySize, 487 ArrayRef<OperandBundleDef> OpB, 488 Function *MallocF, const Twine &Name) { 489 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize, 490 ArraySize, OpB, MallocF, Name); 491 } 492 493 static Instruction *createFree(Value *Source, 494 ArrayRef<OperandBundleDef> Bundles, 495 Instruction *InsertBefore, 496 BasicBlock *InsertAtEnd) { 497 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) && 498 "createFree needs either InsertBefore or InsertAtEnd"); 499 assert(Source->getType()->isPointerTy() && 500 "Can not free something of nonpointer type!"); 501 502 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd; 503 Module *M = BB->getParent()->getParent(); 504 505 Type *VoidTy = Type::getVoidTy(M->getContext()); 506 Type *IntPtrTy = Type::getInt8PtrTy(M->getContext()); 507 // prototype free as "void free(void*)" 508 Value *FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy); 509 CallInst *Result = nullptr; 510 Value *PtrCast = Source; 511 if (InsertBefore) { 512 if (Source->getType() != IntPtrTy) 513 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore); 514 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "", InsertBefore); 515 } else { 516 if (Source->getType() != IntPtrTy) 517 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd); 518 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, ""); 519 } 520 Result->setTailCall(); 521 if (Function *F = dyn_cast<Function>(FreeFunc)) 522 Result->setCallingConv(F->getCallingConv()); 523 524 return Result; 525 } 526 527 /// CreateFree - Generate the IR for a call to the builtin free function. 528 Instruction *CallInst::CreateFree(Value *Source, Instruction *InsertBefore) { 529 return createFree(Source, None, InsertBefore, nullptr); 530 } 531 Instruction *CallInst::CreateFree(Value *Source, 532 ArrayRef<OperandBundleDef> Bundles, 533 Instruction *InsertBefore) { 534 return createFree(Source, Bundles, InsertBefore, nullptr); 535 } 536 537 /// CreateFree - Generate the IR for a call to the builtin free function. 538 /// Note: This function does not add the call to the basic block, that is the 539 /// responsibility of the caller. 540 Instruction *CallInst::CreateFree(Value *Source, BasicBlock *InsertAtEnd) { 541 Instruction *FreeCall = createFree(Source, None, nullptr, InsertAtEnd); 542 assert(FreeCall && "CreateFree did not create a CallInst"); 543 return FreeCall; 544 } 545 Instruction *CallInst::CreateFree(Value *Source, 546 ArrayRef<OperandBundleDef> Bundles, 547 BasicBlock *InsertAtEnd) { 548 Instruction *FreeCall = createFree(Source, Bundles, nullptr, InsertAtEnd); 549 assert(FreeCall && "CreateFree did not create a CallInst"); 550 return FreeCall; 551 } 552 553 //===----------------------------------------------------------------------===// 554 // InvokeInst Implementation 555 //===----------------------------------------------------------------------===// 556 557 void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal, 558 BasicBlock *IfException, ArrayRef<Value *> Args, 559 ArrayRef<OperandBundleDef> Bundles, 560 const Twine &NameStr) { 561 this->FTy = FTy; 562 563 assert(getNumOperands() == 3 + Args.size() + CountBundleInputs(Bundles) && 564 "NumOperands not set up?"); 565 Op<-3>() = Fn; 566 Op<-2>() = IfNormal; 567 Op<-1>() = IfException; 568 569 #ifndef NDEBUG 570 assert(((Args.size() == FTy->getNumParams()) || 571 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 572 "Invoking a function with bad signature"); 573 574 for (unsigned i = 0, e = Args.size(); i != e; i++) 575 assert((i >= FTy->getNumParams() || 576 FTy->getParamType(i) == Args[i]->getType()) && 577 "Invoking a function with a bad signature!"); 578 #endif 579 580 std::copy(Args.begin(), Args.end(), op_begin()); 581 582 auto It = populateBundleOperandInfos(Bundles, Args.size()); 583 (void)It; 584 assert(It + 3 == op_end() && "Should add up!"); 585 586 setName(NameStr); 587 } 588 589 InvokeInst::InvokeInst(const InvokeInst &II) 590 : CallBase<InvokeInst>(II.Attrs, II.FTy, II.getType(), Instruction::Invoke, 591 OperandTraits<CallBase<InvokeInst>>::op_end(this) - 592 II.getNumOperands(), 593 II.getNumOperands()) { 594 setCallingConv(II.getCallingConv()); 595 std::copy(II.op_begin(), II.op_end(), op_begin()); 596 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(), 597 bundle_op_info_begin()); 598 SubclassOptionalData = II.SubclassOptionalData; 599 } 600 601 InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB, 602 Instruction *InsertPt) { 603 std::vector<Value *> Args(II->arg_begin(), II->arg_end()); 604 605 auto *NewII = InvokeInst::Create(II->getCalledValue(), II->getNormalDest(), 606 II->getUnwindDest(), Args, OpB, 607 II->getName(), InsertPt); 608 NewII->setCallingConv(II->getCallingConv()); 609 NewII->SubclassOptionalData = II->SubclassOptionalData; 610 NewII->setAttributes(II->getAttributes()); 611 NewII->setDebugLoc(II->getDebugLoc()); 612 return NewII; 613 } 614 615 616 LandingPadInst *InvokeInst::getLandingPadInst() const { 617 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI()); 618 } 619 620 //===----------------------------------------------------------------------===// 621 // ReturnInst Implementation 622 //===----------------------------------------------------------------------===// 623 624 ReturnInst::ReturnInst(const ReturnInst &RI) 625 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret, 626 OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(), 627 RI.getNumOperands()) { 628 if (RI.getNumOperands()) 629 Op<0>() = RI.Op<0>(); 630 SubclassOptionalData = RI.SubclassOptionalData; 631 } 632 633 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore) 634 : Instruction(Type::getVoidTy(C), Instruction::Ret, 635 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal, 636 InsertBefore) { 637 if (retVal) 638 Op<0>() = retVal; 639 } 640 641 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd) 642 : Instruction(Type::getVoidTy(C), Instruction::Ret, 643 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal, 644 InsertAtEnd) { 645 if (retVal) 646 Op<0>() = retVal; 647 } 648 649 ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd) 650 : Instruction(Type::getVoidTy(Context), Instruction::Ret, 651 OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {} 652 653 //===----------------------------------------------------------------------===// 654 // ResumeInst Implementation 655 //===----------------------------------------------------------------------===// 656 657 ResumeInst::ResumeInst(const ResumeInst &RI) 658 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume, 659 OperandTraits<ResumeInst>::op_begin(this), 1) { 660 Op<0>() = RI.Op<0>(); 661 } 662 663 ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore) 664 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume, 665 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) { 666 Op<0>() = Exn; 667 } 668 669 ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd) 670 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume, 671 OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) { 672 Op<0>() = Exn; 673 } 674 675 //===----------------------------------------------------------------------===// 676 // CleanupReturnInst Implementation 677 //===----------------------------------------------------------------------===// 678 679 CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI) 680 : Instruction(CRI.getType(), Instruction::CleanupRet, 681 OperandTraits<CleanupReturnInst>::op_end(this) - 682 CRI.getNumOperands(), 683 CRI.getNumOperands()) { 684 setInstructionSubclassData(CRI.getSubclassDataFromInstruction()); 685 Op<0>() = CRI.Op<0>(); 686 if (CRI.hasUnwindDest()) 687 Op<1>() = CRI.Op<1>(); 688 } 689 690 void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) { 691 if (UnwindBB) 692 setInstructionSubclassData(getSubclassDataFromInstruction() | 1); 693 694 Op<0>() = CleanupPad; 695 if (UnwindBB) 696 Op<1>() = UnwindBB; 697 } 698 699 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, 700 unsigned Values, Instruction *InsertBefore) 701 : Instruction(Type::getVoidTy(CleanupPad->getContext()), 702 Instruction::CleanupRet, 703 OperandTraits<CleanupReturnInst>::op_end(this) - Values, 704 Values, InsertBefore) { 705 init(CleanupPad, UnwindBB); 706 } 707 708 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, 709 unsigned Values, BasicBlock *InsertAtEnd) 710 : Instruction(Type::getVoidTy(CleanupPad->getContext()), 711 Instruction::CleanupRet, 712 OperandTraits<CleanupReturnInst>::op_end(this) - Values, 713 Values, InsertAtEnd) { 714 init(CleanupPad, UnwindBB); 715 } 716 717 //===----------------------------------------------------------------------===// 718 // CatchReturnInst Implementation 719 //===----------------------------------------------------------------------===// 720 void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) { 721 Op<0>() = CatchPad; 722 Op<1>() = BB; 723 } 724 725 CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI) 726 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet, 727 OperandTraits<CatchReturnInst>::op_begin(this), 2) { 728 Op<0>() = CRI.Op<0>(); 729 Op<1>() = CRI.Op<1>(); 730 } 731 732 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB, 733 Instruction *InsertBefore) 734 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet, 735 OperandTraits<CatchReturnInst>::op_begin(this), 2, 736 InsertBefore) { 737 init(CatchPad, BB); 738 } 739 740 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB, 741 BasicBlock *InsertAtEnd) 742 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet, 743 OperandTraits<CatchReturnInst>::op_begin(this), 2, 744 InsertAtEnd) { 745 init(CatchPad, BB); 746 } 747 748 //===----------------------------------------------------------------------===// 749 // CatchSwitchInst Implementation 750 //===----------------------------------------------------------------------===// 751 752 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 753 unsigned NumReservedValues, 754 const Twine &NameStr, 755 Instruction *InsertBefore) 756 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0, 757 InsertBefore) { 758 if (UnwindDest) 759 ++NumReservedValues; 760 init(ParentPad, UnwindDest, NumReservedValues + 1); 761 setName(NameStr); 762 } 763 764 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 765 unsigned NumReservedValues, 766 const Twine &NameStr, BasicBlock *InsertAtEnd) 767 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0, 768 InsertAtEnd) { 769 if (UnwindDest) 770 ++NumReservedValues; 771 init(ParentPad, UnwindDest, NumReservedValues + 1); 772 setName(NameStr); 773 } 774 775 CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI) 776 : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr, 777 CSI.getNumOperands()) { 778 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands()); 779 setNumHungOffUseOperands(ReservedSpace); 780 Use *OL = getOperandList(); 781 const Use *InOL = CSI.getOperandList(); 782 for (unsigned I = 1, E = ReservedSpace; I != E; ++I) 783 OL[I] = InOL[I]; 784 } 785 786 void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest, 787 unsigned NumReservedValues) { 788 assert(ParentPad && NumReservedValues); 789 790 ReservedSpace = NumReservedValues; 791 setNumHungOffUseOperands(UnwindDest ? 2 : 1); 792 allocHungoffUses(ReservedSpace); 793 794 Op<0>() = ParentPad; 795 if (UnwindDest) { 796 setInstructionSubclassData(getSubclassDataFromInstruction() | 1); 797 setUnwindDest(UnwindDest); 798 } 799 } 800 801 /// growOperands - grow operands - This grows the operand list in response to a 802 /// push_back style of operation. This grows the number of ops by 2 times. 803 void CatchSwitchInst::growOperands(unsigned Size) { 804 unsigned NumOperands = getNumOperands(); 805 assert(NumOperands >= 1); 806 if (ReservedSpace >= NumOperands + Size) 807 return; 808 ReservedSpace = (NumOperands + Size / 2) * 2; 809 growHungoffUses(ReservedSpace); 810 } 811 812 void CatchSwitchInst::addHandler(BasicBlock *Handler) { 813 unsigned OpNo = getNumOperands(); 814 growOperands(1); 815 assert(OpNo < ReservedSpace && "Growing didn't work!"); 816 setNumHungOffUseOperands(getNumOperands() + 1); 817 getOperandList()[OpNo] = Handler; 818 } 819 820 void CatchSwitchInst::removeHandler(handler_iterator HI) { 821 // Move all subsequent handlers up one. 822 Use *EndDst = op_end() - 1; 823 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst) 824 *CurDst = *(CurDst + 1); 825 // Null out the last handler use. 826 *EndDst = nullptr; 827 828 setNumHungOffUseOperands(getNumOperands() - 1); 829 } 830 831 //===----------------------------------------------------------------------===// 832 // FuncletPadInst Implementation 833 //===----------------------------------------------------------------------===// 834 void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args, 835 const Twine &NameStr) { 836 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?"); 837 std::copy(Args.begin(), Args.end(), op_begin()); 838 setParentPad(ParentPad); 839 setName(NameStr); 840 } 841 842 FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI) 843 : Instruction(FPI.getType(), FPI.getOpcode(), 844 OperandTraits<FuncletPadInst>::op_end(this) - 845 FPI.getNumOperands(), 846 FPI.getNumOperands()) { 847 std::copy(FPI.op_begin(), FPI.op_end(), op_begin()); 848 setParentPad(FPI.getParentPad()); 849 } 850 851 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad, 852 ArrayRef<Value *> Args, unsigned Values, 853 const Twine &NameStr, Instruction *InsertBefore) 854 : Instruction(ParentPad->getType(), Op, 855 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values, 856 InsertBefore) { 857 init(ParentPad, Args, NameStr); 858 } 859 860 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad, 861 ArrayRef<Value *> Args, unsigned Values, 862 const Twine &NameStr, BasicBlock *InsertAtEnd) 863 : Instruction(ParentPad->getType(), Op, 864 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values, 865 InsertAtEnd) { 866 init(ParentPad, Args, NameStr); 867 } 868 869 //===----------------------------------------------------------------------===// 870 // UnreachableInst Implementation 871 //===----------------------------------------------------------------------===// 872 873 UnreachableInst::UnreachableInst(LLVMContext &Context, 874 Instruction *InsertBefore) 875 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr, 876 0, InsertBefore) {} 877 UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd) 878 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr, 879 0, InsertAtEnd) {} 880 881 //===----------------------------------------------------------------------===// 882 // BranchInst Implementation 883 //===----------------------------------------------------------------------===// 884 885 void BranchInst::AssertOK() { 886 if (isConditional()) 887 assert(getCondition()->getType()->isIntegerTy(1) && 888 "May only branch on boolean predicates!"); 889 } 890 891 BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore) 892 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 893 OperandTraits<BranchInst>::op_end(this) - 1, 1, 894 InsertBefore) { 895 assert(IfTrue && "Branch destination may not be null!"); 896 Op<-1>() = IfTrue; 897 } 898 899 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 900 Instruction *InsertBefore) 901 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 902 OperandTraits<BranchInst>::op_end(this) - 3, 3, 903 InsertBefore) { 904 Op<-1>() = IfTrue; 905 Op<-2>() = IfFalse; 906 Op<-3>() = Cond; 907 #ifndef NDEBUG 908 AssertOK(); 909 #endif 910 } 911 912 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) 913 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 914 OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) { 915 assert(IfTrue && "Branch destination may not be null!"); 916 Op<-1>() = IfTrue; 917 } 918 919 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 920 BasicBlock *InsertAtEnd) 921 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 922 OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) { 923 Op<-1>() = IfTrue; 924 Op<-2>() = IfFalse; 925 Op<-3>() = Cond; 926 #ifndef NDEBUG 927 AssertOK(); 928 #endif 929 } 930 931 BranchInst::BranchInst(const BranchInst &BI) 932 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br, 933 OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(), 934 BI.getNumOperands()) { 935 Op<-1>() = BI.Op<-1>(); 936 if (BI.getNumOperands() != 1) { 937 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!"); 938 Op<-3>() = BI.Op<-3>(); 939 Op<-2>() = BI.Op<-2>(); 940 } 941 SubclassOptionalData = BI.SubclassOptionalData; 942 } 943 944 void BranchInst::swapSuccessors() { 945 assert(isConditional() && 946 "Cannot swap successors of an unconditional branch"); 947 Op<-1>().swap(Op<-2>()); 948 949 // Update profile metadata if present and it matches our structural 950 // expectations. 951 swapProfMetadata(); 952 } 953 954 //===----------------------------------------------------------------------===// 955 // AllocaInst Implementation 956 //===----------------------------------------------------------------------===// 957 958 static Value *getAISize(LLVMContext &Context, Value *Amt) { 959 if (!Amt) 960 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1); 961 else { 962 assert(!isa<BasicBlock>(Amt) && 963 "Passed basic block into allocation size parameter! Use other ctor"); 964 assert(Amt->getType()->isIntegerTy() && 965 "Allocation array size is not an integer!"); 966 } 967 return Amt; 968 } 969 970 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 971 Instruction *InsertBefore) 972 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {} 973 974 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 975 BasicBlock *InsertAtEnd) 976 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {} 977 978 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 979 const Twine &Name, Instruction *InsertBefore) 980 : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/0, Name, InsertBefore) {} 981 982 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 983 const Twine &Name, BasicBlock *InsertAtEnd) 984 : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/0, Name, InsertAtEnd) {} 985 986 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 987 unsigned Align, const Twine &Name, 988 Instruction *InsertBefore) 989 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca, 990 getAISize(Ty->getContext(), ArraySize), InsertBefore), 991 AllocatedType(Ty) { 992 setAlignment(Align); 993 assert(!Ty->isVoidTy() && "Cannot allocate void!"); 994 setName(Name); 995 } 996 997 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 998 unsigned Align, const Twine &Name, 999 BasicBlock *InsertAtEnd) 1000 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca, 1001 getAISize(Ty->getContext(), ArraySize), InsertAtEnd), 1002 AllocatedType(Ty) { 1003 setAlignment(Align); 1004 assert(!Ty->isVoidTy() && "Cannot allocate void!"); 1005 setName(Name); 1006 } 1007 1008 void AllocaInst::setAlignment(unsigned Align) { 1009 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); 1010 assert(Align <= MaximumAlignment && 1011 "Alignment is greater than MaximumAlignment!"); 1012 setInstructionSubclassData((getSubclassDataFromInstruction() & ~31) | 1013 (Log2_32(Align) + 1)); 1014 assert(getAlignment() == Align && "Alignment representation error!"); 1015 } 1016 1017 bool AllocaInst::isArrayAllocation() const { 1018 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0))) 1019 return !CI->isOne(); 1020 return true; 1021 } 1022 1023 /// isStaticAlloca - Return true if this alloca is in the entry block of the 1024 /// function and is a constant size. If so, the code generator will fold it 1025 /// into the prolog/epilog code, so it is basically free. 1026 bool AllocaInst::isStaticAlloca() const { 1027 // Must be constant size. 1028 if (!isa<ConstantInt>(getArraySize())) return false; 1029 1030 // Must be in the entry block. 1031 const BasicBlock *Parent = getParent(); 1032 return Parent == &Parent->getParent()->front() && !isUsedWithInAlloca(); 1033 } 1034 1035 //===----------------------------------------------------------------------===// 1036 // LoadInst Implementation 1037 //===----------------------------------------------------------------------===// 1038 1039 void LoadInst::AssertOK() { 1040 assert(getOperand(0)->getType()->isPointerTy() && 1041 "Ptr must have pointer type."); 1042 assert(!(isAtomic() && getAlignment() == 0) && 1043 "Alignment required for atomic load"); 1044 } 1045 1046 LoadInst::LoadInst(Value *Ptr, const Twine &Name, Instruction *InsertBef) 1047 : LoadInst(Ptr, Name, /*isVolatile=*/false, InsertBef) {} 1048 1049 LoadInst::LoadInst(Value *Ptr, const Twine &Name, BasicBlock *InsertAE) 1050 : LoadInst(Ptr, Name, /*isVolatile=*/false, InsertAE) {} 1051 1052 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1053 Instruction *InsertBef) 1054 : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/0, InsertBef) {} 1055 1056 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, 1057 BasicBlock *InsertAE) 1058 : LoadInst(Ptr, Name, isVolatile, /*Align=*/0, InsertAE) {} 1059 1060 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1061 unsigned Align, Instruction *InsertBef) 1062 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, 1063 SyncScope::System, InsertBef) {} 1064 1065 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, 1066 unsigned Align, BasicBlock *InsertAE) 1067 : LoadInst(Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, 1068 SyncScope::System, InsertAE) {} 1069 1070 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1071 unsigned Align, AtomicOrdering Order, 1072 SyncScope::ID SSID, Instruction *InsertBef) 1073 : UnaryInstruction(Ty, Load, Ptr, InsertBef) { 1074 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType()); 1075 setVolatile(isVolatile); 1076 setAlignment(Align); 1077 setAtomic(Order, SSID); 1078 AssertOK(); 1079 setName(Name); 1080 } 1081 1082 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, 1083 unsigned Align, AtomicOrdering Order, 1084 SyncScope::ID SSID, 1085 BasicBlock *InsertAE) 1086 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), 1087 Load, Ptr, InsertAE) { 1088 setVolatile(isVolatile); 1089 setAlignment(Align); 1090 setAtomic(Order, SSID); 1091 AssertOK(); 1092 setName(Name); 1093 } 1094 1095 LoadInst::LoadInst(Value *Ptr, const char *Name, Instruction *InsertBef) 1096 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), 1097 Load, Ptr, InsertBef) { 1098 setVolatile(false); 1099 setAlignment(0); 1100 setAtomic(AtomicOrdering::NotAtomic); 1101 AssertOK(); 1102 if (Name && Name[0]) setName(Name); 1103 } 1104 1105 LoadInst::LoadInst(Value *Ptr, const char *Name, BasicBlock *InsertAE) 1106 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), 1107 Load, Ptr, InsertAE) { 1108 setVolatile(false); 1109 setAlignment(0); 1110 setAtomic(AtomicOrdering::NotAtomic); 1111 AssertOK(); 1112 if (Name && Name[0]) setName(Name); 1113 } 1114 1115 LoadInst::LoadInst(Type *Ty, Value *Ptr, const char *Name, bool isVolatile, 1116 Instruction *InsertBef) 1117 : UnaryInstruction(Ty, Load, Ptr, InsertBef) { 1118 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType()); 1119 setVolatile(isVolatile); 1120 setAlignment(0); 1121 setAtomic(AtomicOrdering::NotAtomic); 1122 AssertOK(); 1123 if (Name && Name[0]) setName(Name); 1124 } 1125 1126 LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile, 1127 BasicBlock *InsertAE) 1128 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), 1129 Load, Ptr, InsertAE) { 1130 setVolatile(isVolatile); 1131 setAlignment(0); 1132 setAtomic(AtomicOrdering::NotAtomic); 1133 AssertOK(); 1134 if (Name && Name[0]) setName(Name); 1135 } 1136 1137 void LoadInst::setAlignment(unsigned Align) { 1138 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); 1139 assert(Align <= MaximumAlignment && 1140 "Alignment is greater than MaximumAlignment!"); 1141 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | 1142 ((Log2_32(Align)+1)<<1)); 1143 assert(getAlignment() == Align && "Alignment representation error!"); 1144 } 1145 1146 //===----------------------------------------------------------------------===// 1147 // StoreInst Implementation 1148 //===----------------------------------------------------------------------===// 1149 1150 void StoreInst::AssertOK() { 1151 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!"); 1152 assert(getOperand(1)->getType()->isPointerTy() && 1153 "Ptr must have pointer type!"); 1154 assert(getOperand(0)->getType() == 1155 cast<PointerType>(getOperand(1)->getType())->getElementType() 1156 && "Ptr must be a pointer to Val type!"); 1157 assert(!(isAtomic() && getAlignment() == 0) && 1158 "Alignment required for atomic store"); 1159 } 1160 1161 StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore) 1162 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {} 1163 1164 StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd) 1165 : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {} 1166 1167 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1168 Instruction *InsertBefore) 1169 : StoreInst(val, addr, isVolatile, /*Align=*/0, InsertBefore) {} 1170 1171 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1172 BasicBlock *InsertAtEnd) 1173 : StoreInst(val, addr, isVolatile, /*Align=*/0, InsertAtEnd) {} 1174 1175 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align, 1176 Instruction *InsertBefore) 1177 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, 1178 SyncScope::System, InsertBefore) {} 1179 1180 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align, 1181 BasicBlock *InsertAtEnd) 1182 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, 1183 SyncScope::System, InsertAtEnd) {} 1184 1185 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1186 unsigned Align, AtomicOrdering Order, 1187 SyncScope::ID SSID, 1188 Instruction *InsertBefore) 1189 : Instruction(Type::getVoidTy(val->getContext()), Store, 1190 OperandTraits<StoreInst>::op_begin(this), 1191 OperandTraits<StoreInst>::operands(this), 1192 InsertBefore) { 1193 Op<0>() = val; 1194 Op<1>() = addr; 1195 setVolatile(isVolatile); 1196 setAlignment(Align); 1197 setAtomic(Order, SSID); 1198 AssertOK(); 1199 } 1200 1201 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1202 unsigned Align, AtomicOrdering Order, 1203 SyncScope::ID SSID, 1204 BasicBlock *InsertAtEnd) 1205 : Instruction(Type::getVoidTy(val->getContext()), Store, 1206 OperandTraits<StoreInst>::op_begin(this), 1207 OperandTraits<StoreInst>::operands(this), 1208 InsertAtEnd) { 1209 Op<0>() = val; 1210 Op<1>() = addr; 1211 setVolatile(isVolatile); 1212 setAlignment(Align); 1213 setAtomic(Order, SSID); 1214 AssertOK(); 1215 } 1216 1217 void StoreInst::setAlignment(unsigned Align) { 1218 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); 1219 assert(Align <= MaximumAlignment && 1220 "Alignment is greater than MaximumAlignment!"); 1221 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | 1222 ((Log2_32(Align)+1) << 1)); 1223 assert(getAlignment() == Align && "Alignment representation error!"); 1224 } 1225 1226 //===----------------------------------------------------------------------===// 1227 // AtomicCmpXchgInst Implementation 1228 //===----------------------------------------------------------------------===// 1229 1230 void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal, 1231 AtomicOrdering SuccessOrdering, 1232 AtomicOrdering FailureOrdering, 1233 SyncScope::ID SSID) { 1234 Op<0>() = Ptr; 1235 Op<1>() = Cmp; 1236 Op<2>() = NewVal; 1237 setSuccessOrdering(SuccessOrdering); 1238 setFailureOrdering(FailureOrdering); 1239 setSyncScopeID(SSID); 1240 1241 assert(getOperand(0) && getOperand(1) && getOperand(2) && 1242 "All operands must be non-null!"); 1243 assert(getOperand(0)->getType()->isPointerTy() && 1244 "Ptr must have pointer type!"); 1245 assert(getOperand(1)->getType() == 1246 cast<PointerType>(getOperand(0)->getType())->getElementType() 1247 && "Ptr must be a pointer to Cmp type!"); 1248 assert(getOperand(2)->getType() == 1249 cast<PointerType>(getOperand(0)->getType())->getElementType() 1250 && "Ptr must be a pointer to NewVal type!"); 1251 assert(SuccessOrdering != AtomicOrdering::NotAtomic && 1252 "AtomicCmpXchg instructions must be atomic!"); 1253 assert(FailureOrdering != AtomicOrdering::NotAtomic && 1254 "AtomicCmpXchg instructions must be atomic!"); 1255 assert(!isStrongerThan(FailureOrdering, SuccessOrdering) && 1256 "AtomicCmpXchg failure argument shall be no stronger than the success " 1257 "argument"); 1258 assert(FailureOrdering != AtomicOrdering::Release && 1259 FailureOrdering != AtomicOrdering::AcquireRelease && 1260 "AtomicCmpXchg failure ordering cannot include release semantics"); 1261 } 1262 1263 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, 1264 AtomicOrdering SuccessOrdering, 1265 AtomicOrdering FailureOrdering, 1266 SyncScope::ID SSID, 1267 Instruction *InsertBefore) 1268 : Instruction( 1269 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())), 1270 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this), 1271 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) { 1272 Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID); 1273 } 1274 1275 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, 1276 AtomicOrdering SuccessOrdering, 1277 AtomicOrdering FailureOrdering, 1278 SyncScope::ID SSID, 1279 BasicBlock *InsertAtEnd) 1280 : Instruction( 1281 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())), 1282 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this), 1283 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) { 1284 Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID); 1285 } 1286 1287 //===----------------------------------------------------------------------===// 1288 // AtomicRMWInst Implementation 1289 //===----------------------------------------------------------------------===// 1290 1291 void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val, 1292 AtomicOrdering Ordering, 1293 SyncScope::ID SSID) { 1294 Op<0>() = Ptr; 1295 Op<1>() = Val; 1296 setOperation(Operation); 1297 setOrdering(Ordering); 1298 setSyncScopeID(SSID); 1299 1300 assert(getOperand(0) && getOperand(1) && 1301 "All operands must be non-null!"); 1302 assert(getOperand(0)->getType()->isPointerTy() && 1303 "Ptr must have pointer type!"); 1304 assert(getOperand(1)->getType() == 1305 cast<PointerType>(getOperand(0)->getType())->getElementType() 1306 && "Ptr must be a pointer to Val type!"); 1307 assert(Ordering != AtomicOrdering::NotAtomic && 1308 "AtomicRMW instructions must be atomic!"); 1309 } 1310 1311 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, 1312 AtomicOrdering Ordering, 1313 SyncScope::ID SSID, 1314 Instruction *InsertBefore) 1315 : Instruction(Val->getType(), AtomicRMW, 1316 OperandTraits<AtomicRMWInst>::op_begin(this), 1317 OperandTraits<AtomicRMWInst>::operands(this), 1318 InsertBefore) { 1319 Init(Operation, Ptr, Val, Ordering, SSID); 1320 } 1321 1322 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, 1323 AtomicOrdering Ordering, 1324 SyncScope::ID SSID, 1325 BasicBlock *InsertAtEnd) 1326 : Instruction(Val->getType(), AtomicRMW, 1327 OperandTraits<AtomicRMWInst>::op_begin(this), 1328 OperandTraits<AtomicRMWInst>::operands(this), 1329 InsertAtEnd) { 1330 Init(Operation, Ptr, Val, Ordering, SSID); 1331 } 1332 1333 StringRef AtomicRMWInst::getOperationName(BinOp Op) { 1334 switch (Op) { 1335 case AtomicRMWInst::Xchg: 1336 return "xchg"; 1337 case AtomicRMWInst::Add: 1338 return "add"; 1339 case AtomicRMWInst::Sub: 1340 return "sub"; 1341 case AtomicRMWInst::And: 1342 return "and"; 1343 case AtomicRMWInst::Nand: 1344 return "nand"; 1345 case AtomicRMWInst::Or: 1346 return "or"; 1347 case AtomicRMWInst::Xor: 1348 return "xor"; 1349 case AtomicRMWInst::Max: 1350 return "max"; 1351 case AtomicRMWInst::Min: 1352 return "min"; 1353 case AtomicRMWInst::UMax: 1354 return "umax"; 1355 case AtomicRMWInst::UMin: 1356 return "umin"; 1357 case AtomicRMWInst::BAD_BINOP: 1358 return "<invalid operation>"; 1359 } 1360 1361 llvm_unreachable("invalid atomicrmw operation"); 1362 } 1363 1364 //===----------------------------------------------------------------------===// 1365 // FenceInst Implementation 1366 //===----------------------------------------------------------------------===// 1367 1368 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, 1369 SyncScope::ID SSID, 1370 Instruction *InsertBefore) 1371 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) { 1372 setOrdering(Ordering); 1373 setSyncScopeID(SSID); 1374 } 1375 1376 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, 1377 SyncScope::ID SSID, 1378 BasicBlock *InsertAtEnd) 1379 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) { 1380 setOrdering(Ordering); 1381 setSyncScopeID(SSID); 1382 } 1383 1384 //===----------------------------------------------------------------------===// 1385 // GetElementPtrInst Implementation 1386 //===----------------------------------------------------------------------===// 1387 1388 void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList, 1389 const Twine &Name) { 1390 assert(getNumOperands() == 1 + IdxList.size() && 1391 "NumOperands not initialized?"); 1392 Op<0>() = Ptr; 1393 std::copy(IdxList.begin(), IdxList.end(), op_begin() + 1); 1394 setName(Name); 1395 } 1396 1397 GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI) 1398 : Instruction(GEPI.getType(), GetElementPtr, 1399 OperandTraits<GetElementPtrInst>::op_end(this) - 1400 GEPI.getNumOperands(), 1401 GEPI.getNumOperands()), 1402 SourceElementType(GEPI.SourceElementType), 1403 ResultElementType(GEPI.ResultElementType) { 1404 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin()); 1405 SubclassOptionalData = GEPI.SubclassOptionalData; 1406 } 1407 1408 /// getIndexedType - Returns the type of the element that would be accessed with 1409 /// a gep instruction with the specified parameters. 1410 /// 1411 /// The Idxs pointer should point to a continuous piece of memory containing the 1412 /// indices, either as Value* or uint64_t. 1413 /// 1414 /// A null type is returned if the indices are invalid for the specified 1415 /// pointer type. 1416 /// 1417 template <typename IndexTy> 1418 static Type *getIndexedTypeInternal(Type *Agg, ArrayRef<IndexTy> IdxList) { 1419 // Handle the special case of the empty set index set, which is always valid. 1420 if (IdxList.empty()) 1421 return Agg; 1422 1423 // If there is at least one index, the top level type must be sized, otherwise 1424 // it cannot be 'stepped over'. 1425 if (!Agg->isSized()) 1426 return nullptr; 1427 1428 unsigned CurIdx = 1; 1429 for (; CurIdx != IdxList.size(); ++CurIdx) { 1430 CompositeType *CT = dyn_cast<CompositeType>(Agg); 1431 if (!CT || CT->isPointerTy()) return nullptr; 1432 IndexTy Index = IdxList[CurIdx]; 1433 if (!CT->indexValid(Index)) return nullptr; 1434 Agg = CT->getTypeAtIndex(Index); 1435 } 1436 return CurIdx == IdxList.size() ? Agg : nullptr; 1437 } 1438 1439 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) { 1440 return getIndexedTypeInternal(Ty, IdxList); 1441 } 1442 1443 Type *GetElementPtrInst::getIndexedType(Type *Ty, 1444 ArrayRef<Constant *> IdxList) { 1445 return getIndexedTypeInternal(Ty, IdxList); 1446 } 1447 1448 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) { 1449 return getIndexedTypeInternal(Ty, IdxList); 1450 } 1451 1452 /// hasAllZeroIndices - Return true if all of the indices of this GEP are 1453 /// zeros. If so, the result pointer and the first operand have the same 1454 /// value, just potentially different types. 1455 bool GetElementPtrInst::hasAllZeroIndices() const { 1456 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1457 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) { 1458 if (!CI->isZero()) return false; 1459 } else { 1460 return false; 1461 } 1462 } 1463 return true; 1464 } 1465 1466 /// hasAllConstantIndices - Return true if all of the indices of this GEP are 1467 /// constant integers. If so, the result pointer and the first operand have 1468 /// a constant offset between them. 1469 bool GetElementPtrInst::hasAllConstantIndices() const { 1470 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1471 if (!isa<ConstantInt>(getOperand(i))) 1472 return false; 1473 } 1474 return true; 1475 } 1476 1477 void GetElementPtrInst::setIsInBounds(bool B) { 1478 cast<GEPOperator>(this)->setIsInBounds(B); 1479 } 1480 1481 bool GetElementPtrInst::isInBounds() const { 1482 return cast<GEPOperator>(this)->isInBounds(); 1483 } 1484 1485 bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL, 1486 APInt &Offset) const { 1487 // Delegate to the generic GEPOperator implementation. 1488 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset); 1489 } 1490 1491 //===----------------------------------------------------------------------===// 1492 // ExtractElementInst Implementation 1493 //===----------------------------------------------------------------------===// 1494 1495 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index, 1496 const Twine &Name, 1497 Instruction *InsertBef) 1498 : Instruction(cast<VectorType>(Val->getType())->getElementType(), 1499 ExtractElement, 1500 OperandTraits<ExtractElementInst>::op_begin(this), 1501 2, InsertBef) { 1502 assert(isValidOperands(Val, Index) && 1503 "Invalid extractelement instruction operands!"); 1504 Op<0>() = Val; 1505 Op<1>() = Index; 1506 setName(Name); 1507 } 1508 1509 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index, 1510 const Twine &Name, 1511 BasicBlock *InsertAE) 1512 : Instruction(cast<VectorType>(Val->getType())->getElementType(), 1513 ExtractElement, 1514 OperandTraits<ExtractElementInst>::op_begin(this), 1515 2, InsertAE) { 1516 assert(isValidOperands(Val, Index) && 1517 "Invalid extractelement instruction operands!"); 1518 1519 Op<0>() = Val; 1520 Op<1>() = Index; 1521 setName(Name); 1522 } 1523 1524 bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) { 1525 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy()) 1526 return false; 1527 return true; 1528 } 1529 1530 //===----------------------------------------------------------------------===// 1531 // InsertElementInst Implementation 1532 //===----------------------------------------------------------------------===// 1533 1534 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index, 1535 const Twine &Name, 1536 Instruction *InsertBef) 1537 : Instruction(Vec->getType(), InsertElement, 1538 OperandTraits<InsertElementInst>::op_begin(this), 1539 3, InsertBef) { 1540 assert(isValidOperands(Vec, Elt, Index) && 1541 "Invalid insertelement instruction operands!"); 1542 Op<0>() = Vec; 1543 Op<1>() = Elt; 1544 Op<2>() = Index; 1545 setName(Name); 1546 } 1547 1548 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index, 1549 const Twine &Name, 1550 BasicBlock *InsertAE) 1551 : Instruction(Vec->getType(), InsertElement, 1552 OperandTraits<InsertElementInst>::op_begin(this), 1553 3, InsertAE) { 1554 assert(isValidOperands(Vec, Elt, Index) && 1555 "Invalid insertelement instruction operands!"); 1556 1557 Op<0>() = Vec; 1558 Op<1>() = Elt; 1559 Op<2>() = Index; 1560 setName(Name); 1561 } 1562 1563 bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt, 1564 const Value *Index) { 1565 if (!Vec->getType()->isVectorTy()) 1566 return false; // First operand of insertelement must be vector type. 1567 1568 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType()) 1569 return false;// Second operand of insertelement must be vector element type. 1570 1571 if (!Index->getType()->isIntegerTy()) 1572 return false; // Third operand of insertelement must be i32. 1573 return true; 1574 } 1575 1576 //===----------------------------------------------------------------------===// 1577 // ShuffleVectorInst Implementation 1578 //===----------------------------------------------------------------------===// 1579 1580 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 1581 const Twine &Name, 1582 Instruction *InsertBefore) 1583 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(), 1584 cast<VectorType>(Mask->getType())->getNumElements()), 1585 ShuffleVector, 1586 OperandTraits<ShuffleVectorInst>::op_begin(this), 1587 OperandTraits<ShuffleVectorInst>::operands(this), 1588 InsertBefore) { 1589 assert(isValidOperands(V1, V2, Mask) && 1590 "Invalid shuffle vector instruction operands!"); 1591 Op<0>() = V1; 1592 Op<1>() = V2; 1593 Op<2>() = Mask; 1594 setName(Name); 1595 } 1596 1597 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 1598 const Twine &Name, 1599 BasicBlock *InsertAtEnd) 1600 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(), 1601 cast<VectorType>(Mask->getType())->getNumElements()), 1602 ShuffleVector, 1603 OperandTraits<ShuffleVectorInst>::op_begin(this), 1604 OperandTraits<ShuffleVectorInst>::operands(this), 1605 InsertAtEnd) { 1606 assert(isValidOperands(V1, V2, Mask) && 1607 "Invalid shuffle vector instruction operands!"); 1608 1609 Op<0>() = V1; 1610 Op<1>() = V2; 1611 Op<2>() = Mask; 1612 setName(Name); 1613 } 1614 1615 bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2, 1616 const Value *Mask) { 1617 // V1 and V2 must be vectors of the same type. 1618 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType()) 1619 return false; 1620 1621 // Mask must be vector of i32. 1622 auto *MaskTy = dyn_cast<VectorType>(Mask->getType()); 1623 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32)) 1624 return false; 1625 1626 // Check to see if Mask is valid. 1627 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask)) 1628 return true; 1629 1630 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) { 1631 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); 1632 for (Value *Op : MV->operands()) { 1633 if (auto *CI = dyn_cast<ConstantInt>(Op)) { 1634 if (CI->uge(V1Size*2)) 1635 return false; 1636 } else if (!isa<UndefValue>(Op)) { 1637 return false; 1638 } 1639 } 1640 return true; 1641 } 1642 1643 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) { 1644 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); 1645 for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i) 1646 if (CDS->getElementAsInteger(i) >= V1Size*2) 1647 return false; 1648 return true; 1649 } 1650 1651 // The bitcode reader can create a place holder for a forward reference 1652 // used as the shuffle mask. When this occurs, the shuffle mask will 1653 // fall into this case and fail. To avoid this error, do this bit of 1654 // ugliness to allow such a mask pass. 1655 if (const auto *CE = dyn_cast<ConstantExpr>(Mask)) 1656 if (CE->getOpcode() == Instruction::UserOp1) 1657 return true; 1658 1659 return false; 1660 } 1661 1662 int ShuffleVectorInst::getMaskValue(const Constant *Mask, unsigned i) { 1663 assert(i < Mask->getType()->getVectorNumElements() && "Index out of range"); 1664 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) 1665 return CDS->getElementAsInteger(i); 1666 Constant *C = Mask->getAggregateElement(i); 1667 if (isa<UndefValue>(C)) 1668 return -1; 1669 return cast<ConstantInt>(C)->getZExtValue(); 1670 } 1671 1672 void ShuffleVectorInst::getShuffleMask(const Constant *Mask, 1673 SmallVectorImpl<int> &Result) { 1674 unsigned NumElts = Mask->getType()->getVectorNumElements(); 1675 1676 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) { 1677 for (unsigned i = 0; i != NumElts; ++i) 1678 Result.push_back(CDS->getElementAsInteger(i)); 1679 return; 1680 } 1681 for (unsigned i = 0; i != NumElts; ++i) { 1682 Constant *C = Mask->getAggregateElement(i); 1683 Result.push_back(isa<UndefValue>(C) ? -1 : 1684 cast<ConstantInt>(C)->getZExtValue()); 1685 } 1686 } 1687 1688 static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) { 1689 assert(!Mask.empty() && "Shuffle mask must contain elements"); 1690 bool UsesLHS = false; 1691 bool UsesRHS = false; 1692 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) { 1693 if (Mask[i] == -1) 1694 continue; 1695 assert(Mask[i] >= 0 && Mask[i] < (NumOpElts * 2) && 1696 "Out-of-bounds shuffle mask element"); 1697 UsesLHS |= (Mask[i] < NumOpElts); 1698 UsesRHS |= (Mask[i] >= NumOpElts); 1699 if (UsesLHS && UsesRHS) 1700 return false; 1701 } 1702 assert((UsesLHS ^ UsesRHS) && "Should have selected from exactly 1 source"); 1703 return true; 1704 } 1705 1706 bool ShuffleVectorInst::isSingleSourceMask(ArrayRef<int> Mask) { 1707 // We don't have vector operand size information, so assume operands are the 1708 // same size as the mask. 1709 return isSingleSourceMaskImpl(Mask, Mask.size()); 1710 } 1711 1712 static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) { 1713 if (!isSingleSourceMaskImpl(Mask, NumOpElts)) 1714 return false; 1715 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) { 1716 if (Mask[i] == -1) 1717 continue; 1718 if (Mask[i] != i && Mask[i] != (NumOpElts + i)) 1719 return false; 1720 } 1721 return true; 1722 } 1723 1724 bool ShuffleVectorInst::isIdentityMask(ArrayRef<int> Mask) { 1725 // We don't have vector operand size information, so assume operands are the 1726 // same size as the mask. 1727 return isIdentityMaskImpl(Mask, Mask.size()); 1728 } 1729 1730 bool ShuffleVectorInst::isReverseMask(ArrayRef<int> Mask) { 1731 if (!isSingleSourceMask(Mask)) 1732 return false; 1733 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 1734 if (Mask[i] == -1) 1735 continue; 1736 if (Mask[i] != (NumElts - 1 - i) && Mask[i] != (NumElts + NumElts - 1 - i)) 1737 return false; 1738 } 1739 return true; 1740 } 1741 1742 bool ShuffleVectorInst::isZeroEltSplatMask(ArrayRef<int> Mask) { 1743 if (!isSingleSourceMask(Mask)) 1744 return false; 1745 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 1746 if (Mask[i] == -1) 1747 continue; 1748 if (Mask[i] != 0 && Mask[i] != NumElts) 1749 return false; 1750 } 1751 return true; 1752 } 1753 1754 bool ShuffleVectorInst::isSelectMask(ArrayRef<int> Mask) { 1755 // Select is differentiated from identity. It requires using both sources. 1756 if (isSingleSourceMask(Mask)) 1757 return false; 1758 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 1759 if (Mask[i] == -1) 1760 continue; 1761 if (Mask[i] != i && Mask[i] != (NumElts + i)) 1762 return false; 1763 } 1764 return true; 1765 } 1766 1767 bool ShuffleVectorInst::isTransposeMask(ArrayRef<int> Mask) { 1768 // Example masks that will return true: 1769 // v1 = <a, b, c, d> 1770 // v2 = <e, f, g, h> 1771 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g> 1772 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h> 1773 1774 // 1. The number of elements in the mask must be a power-of-2 and at least 2. 1775 int NumElts = Mask.size(); 1776 if (NumElts < 2 || !isPowerOf2_32(NumElts)) 1777 return false; 1778 1779 // 2. The first element of the mask must be either a 0 or a 1. 1780 if (Mask[0] != 0 && Mask[0] != 1) 1781 return false; 1782 1783 // 3. The difference between the first 2 elements must be equal to the 1784 // number of elements in the mask. 1785 if ((Mask[1] - Mask[0]) != NumElts) 1786 return false; 1787 1788 // 4. The difference between consecutive even-numbered and odd-numbered 1789 // elements must be equal to 2. 1790 for (int i = 2; i < NumElts; ++i) { 1791 int MaskEltVal = Mask[i]; 1792 if (MaskEltVal == -1) 1793 return false; 1794 int MaskEltPrevVal = Mask[i - 2]; 1795 if (MaskEltVal - MaskEltPrevVal != 2) 1796 return false; 1797 } 1798 return true; 1799 } 1800 1801 bool ShuffleVectorInst::isIdentityWithPadding() const { 1802 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 1803 int NumMaskElts = getType()->getVectorNumElements(); 1804 if (NumMaskElts <= NumOpElts) 1805 return false; 1806 1807 // The first part of the mask must choose elements from exactly 1 source op. 1808 SmallVector<int, 16> Mask = getShuffleMask(); 1809 if (!isIdentityMaskImpl(Mask, NumOpElts)) 1810 return false; 1811 1812 // All extending must be with undef elements. 1813 for (int i = NumOpElts; i < NumMaskElts; ++i) 1814 if (Mask[i] != -1) 1815 return false; 1816 1817 return true; 1818 } 1819 1820 bool ShuffleVectorInst::isIdentityWithExtract() const { 1821 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 1822 int NumMaskElts = getType()->getVectorNumElements(); 1823 if (NumMaskElts >= NumOpElts) 1824 return false; 1825 1826 return isIdentityMaskImpl(getShuffleMask(), NumOpElts); 1827 } 1828 1829 bool ShuffleVectorInst::isConcat() const { 1830 // Vector concatenation is differentiated from identity with padding. 1831 if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>())) 1832 return false; 1833 1834 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 1835 int NumMaskElts = getType()->getVectorNumElements(); 1836 if (NumMaskElts != NumOpElts * 2) 1837 return false; 1838 1839 // Use the mask length rather than the operands' vector lengths here. We 1840 // already know that the shuffle returns a vector twice as long as the inputs, 1841 // and neither of the inputs are undef vectors. If the mask picks consecutive 1842 // elements from both inputs, then this is a concatenation of the inputs. 1843 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts); 1844 } 1845 1846 //===----------------------------------------------------------------------===// 1847 // InsertValueInst Class 1848 //===----------------------------------------------------------------------===// 1849 1850 void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, 1851 const Twine &Name) { 1852 assert(getNumOperands() == 2 && "NumOperands not initialized?"); 1853 1854 // There's no fundamental reason why we require at least one index 1855 // (other than weirdness with &*IdxBegin being invalid; see 1856 // getelementptr's init routine for example). But there's no 1857 // present need to support it. 1858 assert(!Idxs.empty() && "InsertValueInst must have at least one index"); 1859 1860 assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) == 1861 Val->getType() && "Inserted value must match indexed type!"); 1862 Op<0>() = Agg; 1863 Op<1>() = Val; 1864 1865 Indices.append(Idxs.begin(), Idxs.end()); 1866 setName(Name); 1867 } 1868 1869 InsertValueInst::InsertValueInst(const InsertValueInst &IVI) 1870 : Instruction(IVI.getType(), InsertValue, 1871 OperandTraits<InsertValueInst>::op_begin(this), 2), 1872 Indices(IVI.Indices) { 1873 Op<0>() = IVI.getOperand(0); 1874 Op<1>() = IVI.getOperand(1); 1875 SubclassOptionalData = IVI.SubclassOptionalData; 1876 } 1877 1878 //===----------------------------------------------------------------------===// 1879 // ExtractValueInst Class 1880 //===----------------------------------------------------------------------===// 1881 1882 void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) { 1883 assert(getNumOperands() == 1 && "NumOperands not initialized?"); 1884 1885 // There's no fundamental reason why we require at least one index. 1886 // But there's no present need to support it. 1887 assert(!Idxs.empty() && "ExtractValueInst must have at least one index"); 1888 1889 Indices.append(Idxs.begin(), Idxs.end()); 1890 setName(Name); 1891 } 1892 1893 ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI) 1894 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)), 1895 Indices(EVI.Indices) { 1896 SubclassOptionalData = EVI.SubclassOptionalData; 1897 } 1898 1899 // getIndexedType - Returns the type of the element that would be extracted 1900 // with an extractvalue instruction with the specified parameters. 1901 // 1902 // A null type is returned if the indices are invalid for the specified 1903 // pointer type. 1904 // 1905 Type *ExtractValueInst::getIndexedType(Type *Agg, 1906 ArrayRef<unsigned> Idxs) { 1907 for (unsigned Index : Idxs) { 1908 // We can't use CompositeType::indexValid(Index) here. 1909 // indexValid() always returns true for arrays because getelementptr allows 1910 // out-of-bounds indices. Since we don't allow those for extractvalue and 1911 // insertvalue we need to check array indexing manually. 1912 // Since the only other types we can index into are struct types it's just 1913 // as easy to check those manually as well. 1914 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) { 1915 if (Index >= AT->getNumElements()) 1916 return nullptr; 1917 } else if (StructType *ST = dyn_cast<StructType>(Agg)) { 1918 if (Index >= ST->getNumElements()) 1919 return nullptr; 1920 } else { 1921 // Not a valid type to index into. 1922 return nullptr; 1923 } 1924 1925 Agg = cast<CompositeType>(Agg)->getTypeAtIndex(Index); 1926 } 1927 return const_cast<Type*>(Agg); 1928 } 1929 1930 //===----------------------------------------------------------------------===// 1931 // BinaryOperator Class 1932 //===----------------------------------------------------------------------===// 1933 1934 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, 1935 Type *Ty, const Twine &Name, 1936 Instruction *InsertBefore) 1937 : Instruction(Ty, iType, 1938 OperandTraits<BinaryOperator>::op_begin(this), 1939 OperandTraits<BinaryOperator>::operands(this), 1940 InsertBefore) { 1941 Op<0>() = S1; 1942 Op<1>() = S2; 1943 setName(Name); 1944 AssertOK(); 1945 } 1946 1947 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, 1948 Type *Ty, const Twine &Name, 1949 BasicBlock *InsertAtEnd) 1950 : Instruction(Ty, iType, 1951 OperandTraits<BinaryOperator>::op_begin(this), 1952 OperandTraits<BinaryOperator>::operands(this), 1953 InsertAtEnd) { 1954 Op<0>() = S1; 1955 Op<1>() = S2; 1956 setName(Name); 1957 AssertOK(); 1958 } 1959 1960 void BinaryOperator::AssertOK() { 1961 Value *LHS = getOperand(0), *RHS = getOperand(1); 1962 (void)LHS; (void)RHS; // Silence warnings. 1963 assert(LHS->getType() == RHS->getType() && 1964 "Binary operator operand types must match!"); 1965 #ifndef NDEBUG 1966 switch (getOpcode()) { 1967 case Add: case Sub: 1968 case Mul: 1969 assert(getType() == LHS->getType() && 1970 "Arithmetic operation should return same type as operands!"); 1971 assert(getType()->isIntOrIntVectorTy() && 1972 "Tried to create an integer operation on a non-integer type!"); 1973 break; 1974 case FAdd: case FSub: 1975 case FMul: 1976 assert(getType() == LHS->getType() && 1977 "Arithmetic operation should return same type as operands!"); 1978 assert(getType()->isFPOrFPVectorTy() && 1979 "Tried to create a floating-point operation on a " 1980 "non-floating-point type!"); 1981 break; 1982 case UDiv: 1983 case SDiv: 1984 assert(getType() == LHS->getType() && 1985 "Arithmetic operation should return same type as operands!"); 1986 assert(getType()->isIntOrIntVectorTy() && 1987 "Incorrect operand type (not integer) for S/UDIV"); 1988 break; 1989 case FDiv: 1990 assert(getType() == LHS->getType() && 1991 "Arithmetic operation should return same type as operands!"); 1992 assert(getType()->isFPOrFPVectorTy() && 1993 "Incorrect operand type (not floating point) for FDIV"); 1994 break; 1995 case URem: 1996 case SRem: 1997 assert(getType() == LHS->getType() && 1998 "Arithmetic operation should return same type as operands!"); 1999 assert(getType()->isIntOrIntVectorTy() && 2000 "Incorrect operand type (not integer) for S/UREM"); 2001 break; 2002 case FRem: 2003 assert(getType() == LHS->getType() && 2004 "Arithmetic operation should return same type as operands!"); 2005 assert(getType()->isFPOrFPVectorTy() && 2006 "Incorrect operand type (not floating point) for FREM"); 2007 break; 2008 case Shl: 2009 case LShr: 2010 case AShr: 2011 assert(getType() == LHS->getType() && 2012 "Shift operation should return same type as operands!"); 2013 assert(getType()->isIntOrIntVectorTy() && 2014 "Tried to create a shift operation on a non-integral type!"); 2015 break; 2016 case And: case Or: 2017 case Xor: 2018 assert(getType() == LHS->getType() && 2019 "Logical operation should return same type as operands!"); 2020 assert(getType()->isIntOrIntVectorTy() && 2021 "Tried to create a logical operation on a non-integral type!"); 2022 break; 2023 default: llvm_unreachable("Invalid opcode provided"); 2024 } 2025 #endif 2026 } 2027 2028 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2, 2029 const Twine &Name, 2030 Instruction *InsertBefore) { 2031 assert(S1->getType() == S2->getType() && 2032 "Cannot create binary operator with two operands of differing type!"); 2033 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore); 2034 } 2035 2036 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2, 2037 const Twine &Name, 2038 BasicBlock *InsertAtEnd) { 2039 BinaryOperator *Res = Create(Op, S1, S2, Name); 2040 InsertAtEnd->getInstList().push_back(Res); 2041 return Res; 2042 } 2043 2044 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name, 2045 Instruction *InsertBefore) { 2046 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2047 return new BinaryOperator(Instruction::Sub, 2048 zero, Op, 2049 Op->getType(), Name, InsertBefore); 2050 } 2051 2052 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name, 2053 BasicBlock *InsertAtEnd) { 2054 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2055 return new BinaryOperator(Instruction::Sub, 2056 zero, Op, 2057 Op->getType(), Name, InsertAtEnd); 2058 } 2059 2060 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, 2061 Instruction *InsertBefore) { 2062 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2063 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertBefore); 2064 } 2065 2066 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, 2067 BasicBlock *InsertAtEnd) { 2068 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2069 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertAtEnd); 2070 } 2071 2072 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name, 2073 Instruction *InsertBefore) { 2074 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2075 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertBefore); 2076 } 2077 2078 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name, 2079 BasicBlock *InsertAtEnd) { 2080 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2081 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertAtEnd); 2082 } 2083 2084 BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name, 2085 Instruction *InsertBefore) { 2086 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2087 return new BinaryOperator(Instruction::FSub, zero, Op, 2088 Op->getType(), Name, InsertBefore); 2089 } 2090 2091 BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name, 2092 BasicBlock *InsertAtEnd) { 2093 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2094 return new BinaryOperator(Instruction::FSub, zero, Op, 2095 Op->getType(), Name, InsertAtEnd); 2096 } 2097 2098 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, 2099 Instruction *InsertBefore) { 2100 Constant *C = Constant::getAllOnesValue(Op->getType()); 2101 return new BinaryOperator(Instruction::Xor, Op, C, 2102 Op->getType(), Name, InsertBefore); 2103 } 2104 2105 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, 2106 BasicBlock *InsertAtEnd) { 2107 Constant *AllOnes = Constant::getAllOnesValue(Op->getType()); 2108 return new BinaryOperator(Instruction::Xor, Op, AllOnes, 2109 Op->getType(), Name, InsertAtEnd); 2110 } 2111 2112 // isConstantAllOnes - Helper function for several functions below 2113 static inline bool isConstantAllOnes(const Value *V) { 2114 if (const Constant *C = dyn_cast<Constant>(V)) 2115 return C->isAllOnesValue(); 2116 return false; 2117 } 2118 2119 bool BinaryOperator::isNeg(const Value *V) { 2120 if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(V)) 2121 if (Bop->getOpcode() == Instruction::Sub) 2122 if (Constant *C = dyn_cast<Constant>(Bop->getOperand(0))) 2123 return C->isNegativeZeroValue(); 2124 return false; 2125 } 2126 2127 bool BinaryOperator::isFNeg(const Value *V, bool IgnoreZeroSign) { 2128 if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(V)) 2129 if (Bop->getOpcode() == Instruction::FSub) 2130 if (Constant *C = dyn_cast<Constant>(Bop->getOperand(0))) { 2131 if (!IgnoreZeroSign) 2132 IgnoreZeroSign = cast<Instruction>(V)->hasNoSignedZeros(); 2133 return !IgnoreZeroSign ? C->isNegativeZeroValue() : C->isZeroValue(); 2134 } 2135 return false; 2136 } 2137 2138 bool BinaryOperator::isNot(const Value *V) { 2139 if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(V)) 2140 return (Bop->getOpcode() == Instruction::Xor && 2141 (isConstantAllOnes(Bop->getOperand(1)) || 2142 isConstantAllOnes(Bop->getOperand(0)))); 2143 return false; 2144 } 2145 2146 Value *BinaryOperator::getNegArgument(Value *BinOp) { 2147 return cast<BinaryOperator>(BinOp)->getOperand(1); 2148 } 2149 2150 const Value *BinaryOperator::getNegArgument(const Value *BinOp) { 2151 return getNegArgument(const_cast<Value*>(BinOp)); 2152 } 2153 2154 Value *BinaryOperator::getFNegArgument(Value *BinOp) { 2155 return cast<BinaryOperator>(BinOp)->getOperand(1); 2156 } 2157 2158 const Value *BinaryOperator::getFNegArgument(const Value *BinOp) { 2159 return getFNegArgument(const_cast<Value*>(BinOp)); 2160 } 2161 2162 Value *BinaryOperator::getNotArgument(Value *BinOp) { 2163 assert(isNot(BinOp) && "getNotArgument on non-'not' instruction!"); 2164 BinaryOperator *BO = cast<BinaryOperator>(BinOp); 2165 Value *Op0 = BO->getOperand(0); 2166 Value *Op1 = BO->getOperand(1); 2167 if (isConstantAllOnes(Op0)) return Op1; 2168 2169 assert(isConstantAllOnes(Op1)); 2170 return Op0; 2171 } 2172 2173 const Value *BinaryOperator::getNotArgument(const Value *BinOp) { 2174 return getNotArgument(const_cast<Value*>(BinOp)); 2175 } 2176 2177 // Exchange the two operands to this instruction. This instruction is safe to 2178 // use on any binary instruction and does not modify the semantics of the 2179 // instruction. If the instruction is order-dependent (SetLT f.e.), the opcode 2180 // is changed. 2181 bool BinaryOperator::swapOperands() { 2182 if (!isCommutative()) 2183 return true; // Can't commute operands 2184 Op<0>().swap(Op<1>()); 2185 return false; 2186 } 2187 2188 //===----------------------------------------------------------------------===// 2189 // FPMathOperator Class 2190 //===----------------------------------------------------------------------===// 2191 2192 float FPMathOperator::getFPAccuracy() const { 2193 const MDNode *MD = 2194 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath); 2195 if (!MD) 2196 return 0.0; 2197 ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0)); 2198 return Accuracy->getValueAPF().convertToFloat(); 2199 } 2200 2201 //===----------------------------------------------------------------------===// 2202 // CastInst Class 2203 //===----------------------------------------------------------------------===// 2204 2205 // Just determine if this cast only deals with integral->integral conversion. 2206 bool CastInst::isIntegerCast() const { 2207 switch (getOpcode()) { 2208 default: return false; 2209 case Instruction::ZExt: 2210 case Instruction::SExt: 2211 case Instruction::Trunc: 2212 return true; 2213 case Instruction::BitCast: 2214 return getOperand(0)->getType()->isIntegerTy() && 2215 getType()->isIntegerTy(); 2216 } 2217 } 2218 2219 bool CastInst::isLosslessCast() const { 2220 // Only BitCast can be lossless, exit fast if we're not BitCast 2221 if (getOpcode() != Instruction::BitCast) 2222 return false; 2223 2224 // Identity cast is always lossless 2225 Type *SrcTy = getOperand(0)->getType(); 2226 Type *DstTy = getType(); 2227 if (SrcTy == DstTy) 2228 return true; 2229 2230 // Pointer to pointer is always lossless. 2231 if (SrcTy->isPointerTy()) 2232 return DstTy->isPointerTy(); 2233 return false; // Other types have no identity values 2234 } 2235 2236 /// This function determines if the CastInst does not require any bits to be 2237 /// changed in order to effect the cast. Essentially, it identifies cases where 2238 /// no code gen is necessary for the cast, hence the name no-op cast. For 2239 /// example, the following are all no-op casts: 2240 /// # bitcast i32* %x to i8* 2241 /// # bitcast <2 x i32> %x to <4 x i16> 2242 /// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only 2243 /// Determine if the described cast is a no-op. 2244 bool CastInst::isNoopCast(Instruction::CastOps Opcode, 2245 Type *SrcTy, 2246 Type *DestTy, 2247 const DataLayout &DL) { 2248 switch (Opcode) { 2249 default: llvm_unreachable("Invalid CastOp"); 2250 case Instruction::Trunc: 2251 case Instruction::ZExt: 2252 case Instruction::SExt: 2253 case Instruction::FPTrunc: 2254 case Instruction::FPExt: 2255 case Instruction::UIToFP: 2256 case Instruction::SIToFP: 2257 case Instruction::FPToUI: 2258 case Instruction::FPToSI: 2259 case Instruction::AddrSpaceCast: 2260 // TODO: Target informations may give a more accurate answer here. 2261 return false; 2262 case Instruction::BitCast: 2263 return true; // BitCast never modifies bits. 2264 case Instruction::PtrToInt: 2265 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() == 2266 DestTy->getScalarSizeInBits(); 2267 case Instruction::IntToPtr: 2268 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() == 2269 SrcTy->getScalarSizeInBits(); 2270 } 2271 } 2272 2273 bool CastInst::isNoopCast(const DataLayout &DL) const { 2274 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL); 2275 } 2276 2277 /// This function determines if a pair of casts can be eliminated and what 2278 /// opcode should be used in the elimination. This assumes that there are two 2279 /// instructions like this: 2280 /// * %F = firstOpcode SrcTy %x to MidTy 2281 /// * %S = secondOpcode MidTy %F to DstTy 2282 /// The function returns a resultOpcode so these two casts can be replaced with: 2283 /// * %Replacement = resultOpcode %SrcTy %x to DstTy 2284 /// If no such cast is permitted, the function returns 0. 2285 unsigned CastInst::isEliminableCastPair( 2286 Instruction::CastOps firstOp, Instruction::CastOps secondOp, 2287 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, 2288 Type *DstIntPtrTy) { 2289 // Define the 144 possibilities for these two cast instructions. The values 2290 // in this matrix determine what to do in a given situation and select the 2291 // case in the switch below. The rows correspond to firstOp, the columns 2292 // correspond to secondOp. In looking at the table below, keep in mind 2293 // the following cast properties: 2294 // 2295 // Size Compare Source Destination 2296 // Operator Src ? Size Type Sign Type Sign 2297 // -------- ------------ ------------------- --------------------- 2298 // TRUNC > Integer Any Integral Any 2299 // ZEXT < Integral Unsigned Integer Any 2300 // SEXT < Integral Signed Integer Any 2301 // FPTOUI n/a FloatPt n/a Integral Unsigned 2302 // FPTOSI n/a FloatPt n/a Integral Signed 2303 // UITOFP n/a Integral Unsigned FloatPt n/a 2304 // SITOFP n/a Integral Signed FloatPt n/a 2305 // FPTRUNC > FloatPt n/a FloatPt n/a 2306 // FPEXT < FloatPt n/a FloatPt n/a 2307 // PTRTOINT n/a Pointer n/a Integral Unsigned 2308 // INTTOPTR n/a Integral Unsigned Pointer n/a 2309 // BITCAST = FirstClass n/a FirstClass n/a 2310 // ADDRSPCST n/a Pointer n/a Pointer n/a 2311 // 2312 // NOTE: some transforms are safe, but we consider them to be non-profitable. 2313 // For example, we could merge "fptoui double to i32" + "zext i32 to i64", 2314 // into "fptoui double to i64", but this loses information about the range 2315 // of the produced value (we no longer know the top-part is all zeros). 2316 // Further this conversion is often much more expensive for typical hardware, 2317 // and causes issues when building libgcc. We disallow fptosi+sext for the 2318 // same reason. 2319 const unsigned numCastOps = 2320 Instruction::CastOpsEnd - Instruction::CastOpsBegin; 2321 static const uint8_t CastResults[numCastOps][numCastOps] = { 2322 // T F F U S F F P I B A -+ 2323 // R Z S P P I I T P 2 N T S | 2324 // U E E 2 2 2 2 R E I T C C +- secondOp 2325 // N X X U S F F N X N 2 V V | 2326 // C T T I I P P C T T P T T -+ 2327 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+ 2328 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt | 2329 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt | 2330 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI | 2331 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI | 2332 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp 2333 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP | 2334 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc | 2335 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt | 2336 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt | 2337 { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr | 2338 { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast | 2339 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+ 2340 }; 2341 2342 // TODO: This logic could be encoded into the table above and handled in the 2343 // switch below. 2344 // If either of the casts are a bitcast from scalar to vector, disallow the 2345 // merging. However, any pair of bitcasts are allowed. 2346 bool IsFirstBitcast = (firstOp == Instruction::BitCast); 2347 bool IsSecondBitcast = (secondOp == Instruction::BitCast); 2348 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast; 2349 2350 // Check if any of the casts convert scalars <-> vectors. 2351 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) || 2352 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy))) 2353 if (!AreBothBitcasts) 2354 return 0; 2355 2356 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin] 2357 [secondOp-Instruction::CastOpsBegin]; 2358 switch (ElimCase) { 2359 case 0: 2360 // Categorically disallowed. 2361 return 0; 2362 case 1: 2363 // Allowed, use first cast's opcode. 2364 return firstOp; 2365 case 2: 2366 // Allowed, use second cast's opcode. 2367 return secondOp; 2368 case 3: 2369 // No-op cast in second op implies firstOp as long as the DestTy 2370 // is integer and we are not converting between a vector and a 2371 // non-vector type. 2372 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy()) 2373 return firstOp; 2374 return 0; 2375 case 4: 2376 // No-op cast in second op implies firstOp as long as the DestTy 2377 // is floating point. 2378 if (DstTy->isFloatingPointTy()) 2379 return firstOp; 2380 return 0; 2381 case 5: 2382 // No-op cast in first op implies secondOp as long as the SrcTy 2383 // is an integer. 2384 if (SrcTy->isIntegerTy()) 2385 return secondOp; 2386 return 0; 2387 case 6: 2388 // No-op cast in first op implies secondOp as long as the SrcTy 2389 // is a floating point. 2390 if (SrcTy->isFloatingPointTy()) 2391 return secondOp; 2392 return 0; 2393 case 7: { 2394 // Cannot simplify if address spaces are different! 2395 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) 2396 return 0; 2397 2398 unsigned MidSize = MidTy->getScalarSizeInBits(); 2399 // We can still fold this without knowing the actual sizes as long we 2400 // know that the intermediate pointer is the largest possible 2401 // pointer size. 2402 // FIXME: Is this always true? 2403 if (MidSize == 64) 2404 return Instruction::BitCast; 2405 2406 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size. 2407 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy) 2408 return 0; 2409 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits(); 2410 if (MidSize >= PtrSize) 2411 return Instruction::BitCast; 2412 return 0; 2413 } 2414 case 8: { 2415 // ext, trunc -> bitcast, if the SrcTy and DstTy are same size 2416 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy) 2417 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy) 2418 unsigned SrcSize = SrcTy->getScalarSizeInBits(); 2419 unsigned DstSize = DstTy->getScalarSizeInBits(); 2420 if (SrcSize == DstSize) 2421 return Instruction::BitCast; 2422 else if (SrcSize < DstSize) 2423 return firstOp; 2424 return secondOp; 2425 } 2426 case 9: 2427 // zext, sext -> zext, because sext can't sign extend after zext 2428 return Instruction::ZExt; 2429 case 11: { 2430 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize 2431 if (!MidIntPtrTy) 2432 return 0; 2433 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits(); 2434 unsigned SrcSize = SrcTy->getScalarSizeInBits(); 2435 unsigned DstSize = DstTy->getScalarSizeInBits(); 2436 if (SrcSize <= PtrSize && SrcSize == DstSize) 2437 return Instruction::BitCast; 2438 return 0; 2439 } 2440 case 12: 2441 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS 2442 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS 2443 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) 2444 return Instruction::AddrSpaceCast; 2445 return Instruction::BitCast; 2446 case 13: 2447 // FIXME: this state can be merged with (1), but the following assert 2448 // is useful to check the correcteness of the sequence due to semantic 2449 // change of bitcast. 2450 assert( 2451 SrcTy->isPtrOrPtrVectorTy() && 2452 MidTy->isPtrOrPtrVectorTy() && 2453 DstTy->isPtrOrPtrVectorTy() && 2454 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() && 2455 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() && 2456 "Illegal addrspacecast, bitcast sequence!"); 2457 // Allowed, use first cast's opcode 2458 return firstOp; 2459 case 14: 2460 // bitcast, addrspacecast -> addrspacecast if the element type of 2461 // bitcast's source is the same as that of addrspacecast's destination. 2462 if (SrcTy->getScalarType()->getPointerElementType() == 2463 DstTy->getScalarType()->getPointerElementType()) 2464 return Instruction::AddrSpaceCast; 2465 return 0; 2466 case 15: 2467 // FIXME: this state can be merged with (1), but the following assert 2468 // is useful to check the correcteness of the sequence due to semantic 2469 // change of bitcast. 2470 assert( 2471 SrcTy->isIntOrIntVectorTy() && 2472 MidTy->isPtrOrPtrVectorTy() && 2473 DstTy->isPtrOrPtrVectorTy() && 2474 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() && 2475 "Illegal inttoptr, bitcast sequence!"); 2476 // Allowed, use first cast's opcode 2477 return firstOp; 2478 case 16: 2479 // FIXME: this state can be merged with (2), but the following assert 2480 // is useful to check the correcteness of the sequence due to semantic 2481 // change of bitcast. 2482 assert( 2483 SrcTy->isPtrOrPtrVectorTy() && 2484 MidTy->isPtrOrPtrVectorTy() && 2485 DstTy->isIntOrIntVectorTy() && 2486 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() && 2487 "Illegal bitcast, ptrtoint sequence!"); 2488 // Allowed, use second cast's opcode 2489 return secondOp; 2490 case 17: 2491 // (sitofp (zext x)) -> (uitofp x) 2492 return Instruction::UIToFP; 2493 case 99: 2494 // Cast combination can't happen (error in input). This is for all cases 2495 // where the MidTy is not the same for the two cast instructions. 2496 llvm_unreachable("Invalid Cast Combination"); 2497 default: 2498 llvm_unreachable("Error in CastResults table!!!"); 2499 } 2500 } 2501 2502 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty, 2503 const Twine &Name, Instruction *InsertBefore) { 2504 assert(castIsValid(op, S, Ty) && "Invalid cast!"); 2505 // Construct and return the appropriate CastInst subclass 2506 switch (op) { 2507 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore); 2508 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore); 2509 case SExt: return new SExtInst (S, Ty, Name, InsertBefore); 2510 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore); 2511 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore); 2512 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore); 2513 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore); 2514 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore); 2515 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore); 2516 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore); 2517 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore); 2518 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore); 2519 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore); 2520 default: llvm_unreachable("Invalid opcode provided"); 2521 } 2522 } 2523 2524 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty, 2525 const Twine &Name, BasicBlock *InsertAtEnd) { 2526 assert(castIsValid(op, S, Ty) && "Invalid cast!"); 2527 // Construct and return the appropriate CastInst subclass 2528 switch (op) { 2529 case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd); 2530 case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd); 2531 case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd); 2532 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd); 2533 case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd); 2534 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd); 2535 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd); 2536 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd); 2537 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd); 2538 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd); 2539 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd); 2540 case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd); 2541 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd); 2542 default: llvm_unreachable("Invalid opcode provided"); 2543 } 2544 } 2545 2546 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, 2547 const Twine &Name, 2548 Instruction *InsertBefore) { 2549 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2550 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2551 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore); 2552 } 2553 2554 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, 2555 const Twine &Name, 2556 BasicBlock *InsertAtEnd) { 2557 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2558 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2559 return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd); 2560 } 2561 2562 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, 2563 const Twine &Name, 2564 Instruction *InsertBefore) { 2565 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2566 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2567 return Create(Instruction::SExt, S, Ty, Name, InsertBefore); 2568 } 2569 2570 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, 2571 const Twine &Name, 2572 BasicBlock *InsertAtEnd) { 2573 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2574 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2575 return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd); 2576 } 2577 2578 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, 2579 const Twine &Name, 2580 Instruction *InsertBefore) { 2581 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2582 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2583 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore); 2584 } 2585 2586 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, 2587 const Twine &Name, 2588 BasicBlock *InsertAtEnd) { 2589 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2590 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2591 return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd); 2592 } 2593 2594 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, 2595 const Twine &Name, 2596 BasicBlock *InsertAtEnd) { 2597 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2598 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) && 2599 "Invalid cast"); 2600 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); 2601 assert((!Ty->isVectorTy() || 2602 Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) && 2603 "Invalid cast"); 2604 2605 if (Ty->isIntOrIntVectorTy()) 2606 return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd); 2607 2608 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd); 2609 } 2610 2611 /// Create a BitCast or a PtrToInt cast instruction 2612 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, 2613 const Twine &Name, 2614 Instruction *InsertBefore) { 2615 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2616 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) && 2617 "Invalid cast"); 2618 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); 2619 assert((!Ty->isVectorTy() || 2620 Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) && 2621 "Invalid cast"); 2622 2623 if (Ty->isIntOrIntVectorTy()) 2624 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore); 2625 2626 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore); 2627 } 2628 2629 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast( 2630 Value *S, Type *Ty, 2631 const Twine &Name, 2632 BasicBlock *InsertAtEnd) { 2633 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2634 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast"); 2635 2636 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace()) 2637 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd); 2638 2639 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2640 } 2641 2642 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast( 2643 Value *S, Type *Ty, 2644 const Twine &Name, 2645 Instruction *InsertBefore) { 2646 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2647 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast"); 2648 2649 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace()) 2650 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore); 2651 2652 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2653 } 2654 2655 CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty, 2656 const Twine &Name, 2657 Instruction *InsertBefore) { 2658 if (S->getType()->isPointerTy() && Ty->isIntegerTy()) 2659 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore); 2660 if (S->getType()->isIntegerTy() && Ty->isPointerTy()) 2661 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore); 2662 2663 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2664 } 2665 2666 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, 2667 bool isSigned, const Twine &Name, 2668 Instruction *InsertBefore) { 2669 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() && 2670 "Invalid integer cast"); 2671 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2672 unsigned DstBits = Ty->getScalarSizeInBits(); 2673 Instruction::CastOps opcode = 2674 (SrcBits == DstBits ? Instruction::BitCast : 2675 (SrcBits > DstBits ? Instruction::Trunc : 2676 (isSigned ? Instruction::SExt : Instruction::ZExt))); 2677 return Create(opcode, C, Ty, Name, InsertBefore); 2678 } 2679 2680 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, 2681 bool isSigned, const Twine &Name, 2682 BasicBlock *InsertAtEnd) { 2683 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() && 2684 "Invalid cast"); 2685 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2686 unsigned DstBits = Ty->getScalarSizeInBits(); 2687 Instruction::CastOps opcode = 2688 (SrcBits == DstBits ? Instruction::BitCast : 2689 (SrcBits > DstBits ? Instruction::Trunc : 2690 (isSigned ? Instruction::SExt : Instruction::ZExt))); 2691 return Create(opcode, C, Ty, Name, InsertAtEnd); 2692 } 2693 2694 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, 2695 const Twine &Name, 2696 Instruction *InsertBefore) { 2697 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && 2698 "Invalid cast"); 2699 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2700 unsigned DstBits = Ty->getScalarSizeInBits(); 2701 Instruction::CastOps opcode = 2702 (SrcBits == DstBits ? Instruction::BitCast : 2703 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt)); 2704 return Create(opcode, C, Ty, Name, InsertBefore); 2705 } 2706 2707 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, 2708 const Twine &Name, 2709 BasicBlock *InsertAtEnd) { 2710 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && 2711 "Invalid cast"); 2712 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2713 unsigned DstBits = Ty->getScalarSizeInBits(); 2714 Instruction::CastOps opcode = 2715 (SrcBits == DstBits ? Instruction::BitCast : 2716 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt)); 2717 return Create(opcode, C, Ty, Name, InsertAtEnd); 2718 } 2719 2720 // Check whether it is valid to call getCastOpcode for these types. 2721 // This routine must be kept in sync with getCastOpcode. 2722 bool CastInst::isCastable(Type *SrcTy, Type *DestTy) { 2723 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType()) 2724 return false; 2725 2726 if (SrcTy == DestTy) 2727 return true; 2728 2729 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) 2730 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) 2731 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 2732 // An element by element cast. Valid if casting the elements is valid. 2733 SrcTy = SrcVecTy->getElementType(); 2734 DestTy = DestVecTy->getElementType(); 2735 } 2736 2737 // Get the bit sizes, we'll need these 2738 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 2739 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 2740 2741 // Run through the possibilities ... 2742 if (DestTy->isIntegerTy()) { // Casting to integral 2743 if (SrcTy->isIntegerTy()) // Casting from integral 2744 return true; 2745 if (SrcTy->isFloatingPointTy()) // Casting from floating pt 2746 return true; 2747 if (SrcTy->isVectorTy()) // Casting from vector 2748 return DestBits == SrcBits; 2749 // Casting from something else 2750 return SrcTy->isPointerTy(); 2751 } 2752 if (DestTy->isFloatingPointTy()) { // Casting to floating pt 2753 if (SrcTy->isIntegerTy()) // Casting from integral 2754 return true; 2755 if (SrcTy->isFloatingPointTy()) // Casting from floating pt 2756 return true; 2757 if (SrcTy->isVectorTy()) // Casting from vector 2758 return DestBits == SrcBits; 2759 // Casting from something else 2760 return false; 2761 } 2762 if (DestTy->isVectorTy()) // Casting to vector 2763 return DestBits == SrcBits; 2764 if (DestTy->isPointerTy()) { // Casting to pointer 2765 if (SrcTy->isPointerTy()) // Casting from pointer 2766 return true; 2767 return SrcTy->isIntegerTy(); // Casting from integral 2768 } 2769 if (DestTy->isX86_MMXTy()) { 2770 if (SrcTy->isVectorTy()) 2771 return DestBits == SrcBits; // 64-bit vector to MMX 2772 return false; 2773 } // Casting to something else 2774 return false; 2775 } 2776 2777 bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) { 2778 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType()) 2779 return false; 2780 2781 if (SrcTy == DestTy) 2782 return true; 2783 2784 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) { 2785 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) { 2786 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 2787 // An element by element cast. Valid if casting the elements is valid. 2788 SrcTy = SrcVecTy->getElementType(); 2789 DestTy = DestVecTy->getElementType(); 2790 } 2791 } 2792 } 2793 2794 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) { 2795 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) { 2796 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace(); 2797 } 2798 } 2799 2800 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 2801 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 2802 2803 // Could still have vectors of pointers if the number of elements doesn't 2804 // match 2805 if (SrcBits == 0 || DestBits == 0) 2806 return false; 2807 2808 if (SrcBits != DestBits) 2809 return false; 2810 2811 if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy()) 2812 return false; 2813 2814 return true; 2815 } 2816 2817 bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, 2818 const DataLayout &DL) { 2819 // ptrtoint and inttoptr are not allowed on non-integral pointers 2820 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy)) 2821 if (auto *IntTy = dyn_cast<IntegerType>(DestTy)) 2822 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) && 2823 !DL.isNonIntegralPointerType(PtrTy)); 2824 if (auto *PtrTy = dyn_cast<PointerType>(DestTy)) 2825 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy)) 2826 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) && 2827 !DL.isNonIntegralPointerType(PtrTy)); 2828 2829 return isBitCastable(SrcTy, DestTy); 2830 } 2831 2832 // Provide a way to get a "cast" where the cast opcode is inferred from the 2833 // types and size of the operand. This, basically, is a parallel of the 2834 // logic in the castIsValid function below. This axiom should hold: 2835 // castIsValid( getCastOpcode(Val, Ty), Val, Ty) 2836 // should not assert in castIsValid. In other words, this produces a "correct" 2837 // casting opcode for the arguments passed to it. 2838 // This routine must be kept in sync with isCastable. 2839 Instruction::CastOps 2840 CastInst::getCastOpcode( 2841 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) { 2842 Type *SrcTy = Src->getType(); 2843 2844 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() && 2845 "Only first class types are castable!"); 2846 2847 if (SrcTy == DestTy) 2848 return BitCast; 2849 2850 // FIXME: Check address space sizes here 2851 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) 2852 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) 2853 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 2854 // An element by element cast. Find the appropriate opcode based on the 2855 // element types. 2856 SrcTy = SrcVecTy->getElementType(); 2857 DestTy = DestVecTy->getElementType(); 2858 } 2859 2860 // Get the bit sizes, we'll need these 2861 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 2862 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 2863 2864 // Run through the possibilities ... 2865 if (DestTy->isIntegerTy()) { // Casting to integral 2866 if (SrcTy->isIntegerTy()) { // Casting from integral 2867 if (DestBits < SrcBits) 2868 return Trunc; // int -> smaller int 2869 else if (DestBits > SrcBits) { // its an extension 2870 if (SrcIsSigned) 2871 return SExt; // signed -> SEXT 2872 else 2873 return ZExt; // unsigned -> ZEXT 2874 } else { 2875 return BitCast; // Same size, No-op cast 2876 } 2877 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt 2878 if (DestIsSigned) 2879 return FPToSI; // FP -> sint 2880 else 2881 return FPToUI; // FP -> uint 2882 } else if (SrcTy->isVectorTy()) { 2883 assert(DestBits == SrcBits && 2884 "Casting vector to integer of different width"); 2885 return BitCast; // Same size, no-op cast 2886 } else { 2887 assert(SrcTy->isPointerTy() && 2888 "Casting from a value that is not first-class type"); 2889 return PtrToInt; // ptr -> int 2890 } 2891 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt 2892 if (SrcTy->isIntegerTy()) { // Casting from integral 2893 if (SrcIsSigned) 2894 return SIToFP; // sint -> FP 2895 else 2896 return UIToFP; // uint -> FP 2897 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt 2898 if (DestBits < SrcBits) { 2899 return FPTrunc; // FP -> smaller FP 2900 } else if (DestBits > SrcBits) { 2901 return FPExt; // FP -> larger FP 2902 } else { 2903 return BitCast; // same size, no-op cast 2904 } 2905 } else if (SrcTy->isVectorTy()) { 2906 assert(DestBits == SrcBits && 2907 "Casting vector to floating point of different width"); 2908 return BitCast; // same size, no-op cast 2909 } 2910 llvm_unreachable("Casting pointer or non-first class to float"); 2911 } else if (DestTy->isVectorTy()) { 2912 assert(DestBits == SrcBits && 2913 "Illegal cast to vector (wrong type or size)"); 2914 return BitCast; 2915 } else if (DestTy->isPointerTy()) { 2916 if (SrcTy->isPointerTy()) { 2917 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace()) 2918 return AddrSpaceCast; 2919 return BitCast; // ptr -> ptr 2920 } else if (SrcTy->isIntegerTy()) { 2921 return IntToPtr; // int -> ptr 2922 } 2923 llvm_unreachable("Casting pointer to other than pointer or int"); 2924 } else if (DestTy->isX86_MMXTy()) { 2925 if (SrcTy->isVectorTy()) { 2926 assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX"); 2927 return BitCast; // 64-bit vector to MMX 2928 } 2929 llvm_unreachable("Illegal cast to X86_MMX"); 2930 } 2931 llvm_unreachable("Casting to type that is not first-class"); 2932 } 2933 2934 //===----------------------------------------------------------------------===// 2935 // CastInst SubClass Constructors 2936 //===----------------------------------------------------------------------===// 2937 2938 /// Check that the construction parameters for a CastInst are correct. This 2939 /// could be broken out into the separate constructors but it is useful to have 2940 /// it in one place and to eliminate the redundant code for getting the sizes 2941 /// of the types involved. 2942 bool 2943 CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) { 2944 // Check for type sanity on the arguments 2945 Type *SrcTy = S->getType(); 2946 2947 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() || 2948 SrcTy->isAggregateType() || DstTy->isAggregateType()) 2949 return false; 2950 2951 // Get the size of the types in bits, we'll need this later 2952 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 2953 unsigned DstBitSize = DstTy->getScalarSizeInBits(); 2954 2955 // If these are vector types, get the lengths of the vectors (using zero for 2956 // scalar types means that checking that vector lengths match also checks that 2957 // scalars are not being converted to vectors or vectors to scalars). 2958 unsigned SrcLength = SrcTy->isVectorTy() ? 2959 cast<VectorType>(SrcTy)->getNumElements() : 0; 2960 unsigned DstLength = DstTy->isVectorTy() ? 2961 cast<VectorType>(DstTy)->getNumElements() : 0; 2962 2963 // Switch on the opcode provided 2964 switch (op) { 2965 default: return false; // This is an input error 2966 case Instruction::Trunc: 2967 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 2968 SrcLength == DstLength && SrcBitSize > DstBitSize; 2969 case Instruction::ZExt: 2970 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 2971 SrcLength == DstLength && SrcBitSize < DstBitSize; 2972 case Instruction::SExt: 2973 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 2974 SrcLength == DstLength && SrcBitSize < DstBitSize; 2975 case Instruction::FPTrunc: 2976 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() && 2977 SrcLength == DstLength && SrcBitSize > DstBitSize; 2978 case Instruction::FPExt: 2979 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() && 2980 SrcLength == DstLength && SrcBitSize < DstBitSize; 2981 case Instruction::UIToFP: 2982 case Instruction::SIToFP: 2983 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() && 2984 SrcLength == DstLength; 2985 case Instruction::FPToUI: 2986 case Instruction::FPToSI: 2987 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() && 2988 SrcLength == DstLength; 2989 case Instruction::PtrToInt: 2990 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy)) 2991 return false; 2992 if (VectorType *VT = dyn_cast<VectorType>(SrcTy)) 2993 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements()) 2994 return false; 2995 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy(); 2996 case Instruction::IntToPtr: 2997 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy)) 2998 return false; 2999 if (VectorType *VT = dyn_cast<VectorType>(SrcTy)) 3000 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements()) 3001 return false; 3002 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy(); 3003 case Instruction::BitCast: { 3004 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType()); 3005 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType()); 3006 3007 // BitCast implies a no-op cast of type only. No bits change. 3008 // However, you can't cast pointers to anything but pointers. 3009 if (!SrcPtrTy != !DstPtrTy) 3010 return false; 3011 3012 // For non-pointer cases, the cast is okay if the source and destination bit 3013 // widths are identical. 3014 if (!SrcPtrTy) 3015 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits(); 3016 3017 // If both are pointers then the address spaces must match. 3018 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) 3019 return false; 3020 3021 // A vector of pointers must have the same number of elements. 3022 VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy); 3023 VectorType *DstVecTy = dyn_cast<VectorType>(DstTy); 3024 if (SrcVecTy && DstVecTy) 3025 return (SrcVecTy->getNumElements() == DstVecTy->getNumElements()); 3026 if (SrcVecTy) 3027 return SrcVecTy->getNumElements() == 1; 3028 if (DstVecTy) 3029 return DstVecTy->getNumElements() == 1; 3030 3031 return true; 3032 } 3033 case Instruction::AddrSpaceCast: { 3034 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType()); 3035 if (!SrcPtrTy) 3036 return false; 3037 3038 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType()); 3039 if (!DstPtrTy) 3040 return false; 3041 3042 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace()) 3043 return false; 3044 3045 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) { 3046 if (VectorType *DstVecTy = dyn_cast<VectorType>(DstTy)) 3047 return (SrcVecTy->getNumElements() == DstVecTy->getNumElements()); 3048 3049 return false; 3050 } 3051 3052 return true; 3053 } 3054 } 3055 } 3056 3057 TruncInst::TruncInst( 3058 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3059 ) : CastInst(Ty, Trunc, S, Name, InsertBefore) { 3060 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc"); 3061 } 3062 3063 TruncInst::TruncInst( 3064 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3065 ) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) { 3066 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc"); 3067 } 3068 3069 ZExtInst::ZExtInst( 3070 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3071 ) : CastInst(Ty, ZExt, S, Name, InsertBefore) { 3072 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); 3073 } 3074 3075 ZExtInst::ZExtInst( 3076 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3077 ) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) { 3078 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); 3079 } 3080 SExtInst::SExtInst( 3081 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3082 ) : CastInst(Ty, SExt, S, Name, InsertBefore) { 3083 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); 3084 } 3085 3086 SExtInst::SExtInst( 3087 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3088 ) : CastInst(Ty, SExt, S, Name, InsertAtEnd) { 3089 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); 3090 } 3091 3092 FPTruncInst::FPTruncInst( 3093 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3094 ) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) { 3095 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); 3096 } 3097 3098 FPTruncInst::FPTruncInst( 3099 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3100 ) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) { 3101 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); 3102 } 3103 3104 FPExtInst::FPExtInst( 3105 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3106 ) : CastInst(Ty, FPExt, S, Name, InsertBefore) { 3107 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); 3108 } 3109 3110 FPExtInst::FPExtInst( 3111 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3112 ) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) { 3113 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); 3114 } 3115 3116 UIToFPInst::UIToFPInst( 3117 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3118 ) : CastInst(Ty, UIToFP, S, Name, InsertBefore) { 3119 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); 3120 } 3121 3122 UIToFPInst::UIToFPInst( 3123 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3124 ) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) { 3125 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); 3126 } 3127 3128 SIToFPInst::SIToFPInst( 3129 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3130 ) : CastInst(Ty, SIToFP, S, Name, InsertBefore) { 3131 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); 3132 } 3133 3134 SIToFPInst::SIToFPInst( 3135 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3136 ) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) { 3137 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); 3138 } 3139 3140 FPToUIInst::FPToUIInst( 3141 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3142 ) : CastInst(Ty, FPToUI, S, Name, InsertBefore) { 3143 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); 3144 } 3145 3146 FPToUIInst::FPToUIInst( 3147 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3148 ) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) { 3149 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); 3150 } 3151 3152 FPToSIInst::FPToSIInst( 3153 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3154 ) : CastInst(Ty, FPToSI, S, Name, InsertBefore) { 3155 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); 3156 } 3157 3158 FPToSIInst::FPToSIInst( 3159 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3160 ) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) { 3161 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); 3162 } 3163 3164 PtrToIntInst::PtrToIntInst( 3165 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3166 ) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) { 3167 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); 3168 } 3169 3170 PtrToIntInst::PtrToIntInst( 3171 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3172 ) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) { 3173 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); 3174 } 3175 3176 IntToPtrInst::IntToPtrInst( 3177 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3178 ) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) { 3179 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); 3180 } 3181 3182 IntToPtrInst::IntToPtrInst( 3183 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3184 ) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) { 3185 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); 3186 } 3187 3188 BitCastInst::BitCastInst( 3189 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3190 ) : CastInst(Ty, BitCast, S, Name, InsertBefore) { 3191 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); 3192 } 3193 3194 BitCastInst::BitCastInst( 3195 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3196 ) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) { 3197 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); 3198 } 3199 3200 AddrSpaceCastInst::AddrSpaceCastInst( 3201 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3202 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) { 3203 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast"); 3204 } 3205 3206 AddrSpaceCastInst::AddrSpaceCastInst( 3207 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3208 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) { 3209 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast"); 3210 } 3211 3212 //===----------------------------------------------------------------------===// 3213 // CmpInst Classes 3214 //===----------------------------------------------------------------------===// 3215 3216 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS, 3217 Value *RHS, const Twine &Name, Instruction *InsertBefore) 3218 : Instruction(ty, op, 3219 OperandTraits<CmpInst>::op_begin(this), 3220 OperandTraits<CmpInst>::operands(this), 3221 InsertBefore) { 3222 Op<0>() = LHS; 3223 Op<1>() = RHS; 3224 setPredicate((Predicate)predicate); 3225 setName(Name); 3226 } 3227 3228 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS, 3229 Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd) 3230 : Instruction(ty, op, 3231 OperandTraits<CmpInst>::op_begin(this), 3232 OperandTraits<CmpInst>::operands(this), 3233 InsertAtEnd) { 3234 Op<0>() = LHS; 3235 Op<1>() = RHS; 3236 setPredicate((Predicate)predicate); 3237 setName(Name); 3238 } 3239 3240 CmpInst * 3241 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, 3242 const Twine &Name, Instruction *InsertBefore) { 3243 if (Op == Instruction::ICmp) { 3244 if (InsertBefore) 3245 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate), 3246 S1, S2, Name); 3247 else 3248 return new ICmpInst(CmpInst::Predicate(predicate), 3249 S1, S2, Name); 3250 } 3251 3252 if (InsertBefore) 3253 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate), 3254 S1, S2, Name); 3255 else 3256 return new FCmpInst(CmpInst::Predicate(predicate), 3257 S1, S2, Name); 3258 } 3259 3260 CmpInst * 3261 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, 3262 const Twine &Name, BasicBlock *InsertAtEnd) { 3263 if (Op == Instruction::ICmp) { 3264 return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate), 3265 S1, S2, Name); 3266 } 3267 return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate), 3268 S1, S2, Name); 3269 } 3270 3271 void CmpInst::swapOperands() { 3272 if (ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3273 IC->swapOperands(); 3274 else 3275 cast<FCmpInst>(this)->swapOperands(); 3276 } 3277 3278 bool CmpInst::isCommutative() const { 3279 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3280 return IC->isCommutative(); 3281 return cast<FCmpInst>(this)->isCommutative(); 3282 } 3283 3284 bool CmpInst::isEquality() const { 3285 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3286 return IC->isEquality(); 3287 return cast<FCmpInst>(this)->isEquality(); 3288 } 3289 3290 CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) { 3291 switch (pred) { 3292 default: llvm_unreachable("Unknown cmp predicate!"); 3293 case ICMP_EQ: return ICMP_NE; 3294 case ICMP_NE: return ICMP_EQ; 3295 case ICMP_UGT: return ICMP_ULE; 3296 case ICMP_ULT: return ICMP_UGE; 3297 case ICMP_UGE: return ICMP_ULT; 3298 case ICMP_ULE: return ICMP_UGT; 3299 case ICMP_SGT: return ICMP_SLE; 3300 case ICMP_SLT: return ICMP_SGE; 3301 case ICMP_SGE: return ICMP_SLT; 3302 case ICMP_SLE: return ICMP_SGT; 3303 3304 case FCMP_OEQ: return FCMP_UNE; 3305 case FCMP_ONE: return FCMP_UEQ; 3306 case FCMP_OGT: return FCMP_ULE; 3307 case FCMP_OLT: return FCMP_UGE; 3308 case FCMP_OGE: return FCMP_ULT; 3309 case FCMP_OLE: return FCMP_UGT; 3310 case FCMP_UEQ: return FCMP_ONE; 3311 case FCMP_UNE: return FCMP_OEQ; 3312 case FCMP_UGT: return FCMP_OLE; 3313 case FCMP_ULT: return FCMP_OGE; 3314 case FCMP_UGE: return FCMP_OLT; 3315 case FCMP_ULE: return FCMP_OGT; 3316 case FCMP_ORD: return FCMP_UNO; 3317 case FCMP_UNO: return FCMP_ORD; 3318 case FCMP_TRUE: return FCMP_FALSE; 3319 case FCMP_FALSE: return FCMP_TRUE; 3320 } 3321 } 3322 3323 StringRef CmpInst::getPredicateName(Predicate Pred) { 3324 switch (Pred) { 3325 default: return "unknown"; 3326 case FCmpInst::FCMP_FALSE: return "false"; 3327 case FCmpInst::FCMP_OEQ: return "oeq"; 3328 case FCmpInst::FCMP_OGT: return "ogt"; 3329 case FCmpInst::FCMP_OGE: return "oge"; 3330 case FCmpInst::FCMP_OLT: return "olt"; 3331 case FCmpInst::FCMP_OLE: return "ole"; 3332 case FCmpInst::FCMP_ONE: return "one"; 3333 case FCmpInst::FCMP_ORD: return "ord"; 3334 case FCmpInst::FCMP_UNO: return "uno"; 3335 case FCmpInst::FCMP_UEQ: return "ueq"; 3336 case FCmpInst::FCMP_UGT: return "ugt"; 3337 case FCmpInst::FCMP_UGE: return "uge"; 3338 case FCmpInst::FCMP_ULT: return "ult"; 3339 case FCmpInst::FCMP_ULE: return "ule"; 3340 case FCmpInst::FCMP_UNE: return "une"; 3341 case FCmpInst::FCMP_TRUE: return "true"; 3342 case ICmpInst::ICMP_EQ: return "eq"; 3343 case ICmpInst::ICMP_NE: return "ne"; 3344 case ICmpInst::ICMP_SGT: return "sgt"; 3345 case ICmpInst::ICMP_SGE: return "sge"; 3346 case ICmpInst::ICMP_SLT: return "slt"; 3347 case ICmpInst::ICMP_SLE: return "sle"; 3348 case ICmpInst::ICMP_UGT: return "ugt"; 3349 case ICmpInst::ICMP_UGE: return "uge"; 3350 case ICmpInst::ICMP_ULT: return "ult"; 3351 case ICmpInst::ICMP_ULE: return "ule"; 3352 } 3353 } 3354 3355 ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) { 3356 switch (pred) { 3357 default: llvm_unreachable("Unknown icmp predicate!"); 3358 case ICMP_EQ: case ICMP_NE: 3359 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE: 3360 return pred; 3361 case ICMP_UGT: return ICMP_SGT; 3362 case ICMP_ULT: return ICMP_SLT; 3363 case ICMP_UGE: return ICMP_SGE; 3364 case ICMP_ULE: return ICMP_SLE; 3365 } 3366 } 3367 3368 ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) { 3369 switch (pred) { 3370 default: llvm_unreachable("Unknown icmp predicate!"); 3371 case ICMP_EQ: case ICMP_NE: 3372 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE: 3373 return pred; 3374 case ICMP_SGT: return ICMP_UGT; 3375 case ICMP_SLT: return ICMP_ULT; 3376 case ICMP_SGE: return ICMP_UGE; 3377 case ICMP_SLE: return ICMP_ULE; 3378 } 3379 } 3380 3381 CmpInst::Predicate CmpInst::getFlippedStrictnessPredicate(Predicate pred) { 3382 switch (pred) { 3383 default: llvm_unreachable("Unknown or unsupported cmp predicate!"); 3384 case ICMP_SGT: return ICMP_SGE; 3385 case ICMP_SLT: return ICMP_SLE; 3386 case ICMP_SGE: return ICMP_SGT; 3387 case ICMP_SLE: return ICMP_SLT; 3388 case ICMP_UGT: return ICMP_UGE; 3389 case ICMP_ULT: return ICMP_ULE; 3390 case ICMP_UGE: return ICMP_UGT; 3391 case ICMP_ULE: return ICMP_ULT; 3392 3393 case FCMP_OGT: return FCMP_OGE; 3394 case FCMP_OLT: return FCMP_OLE; 3395 case FCMP_OGE: return FCMP_OGT; 3396 case FCMP_OLE: return FCMP_OLT; 3397 case FCMP_UGT: return FCMP_UGE; 3398 case FCMP_ULT: return FCMP_ULE; 3399 case FCMP_UGE: return FCMP_UGT; 3400 case FCMP_ULE: return FCMP_ULT; 3401 } 3402 } 3403 3404 CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) { 3405 switch (pred) { 3406 default: llvm_unreachable("Unknown cmp predicate!"); 3407 case ICMP_EQ: case ICMP_NE: 3408 return pred; 3409 case ICMP_SGT: return ICMP_SLT; 3410 case ICMP_SLT: return ICMP_SGT; 3411 case ICMP_SGE: return ICMP_SLE; 3412 case ICMP_SLE: return ICMP_SGE; 3413 case ICMP_UGT: return ICMP_ULT; 3414 case ICMP_ULT: return ICMP_UGT; 3415 case ICMP_UGE: return ICMP_ULE; 3416 case ICMP_ULE: return ICMP_UGE; 3417 3418 case FCMP_FALSE: case FCMP_TRUE: 3419 case FCMP_OEQ: case FCMP_ONE: 3420 case FCMP_UEQ: case FCMP_UNE: 3421 case FCMP_ORD: case FCMP_UNO: 3422 return pred; 3423 case FCMP_OGT: return FCMP_OLT; 3424 case FCMP_OLT: return FCMP_OGT; 3425 case FCMP_OGE: return FCMP_OLE; 3426 case FCMP_OLE: return FCMP_OGE; 3427 case FCMP_UGT: return FCMP_ULT; 3428 case FCMP_ULT: return FCMP_UGT; 3429 case FCMP_UGE: return FCMP_ULE; 3430 case FCMP_ULE: return FCMP_UGE; 3431 } 3432 } 3433 3434 CmpInst::Predicate CmpInst::getNonStrictPredicate(Predicate pred) { 3435 switch (pred) { 3436 case ICMP_SGT: return ICMP_SGE; 3437 case ICMP_SLT: return ICMP_SLE; 3438 case ICMP_UGT: return ICMP_UGE; 3439 case ICMP_ULT: return ICMP_ULE; 3440 case FCMP_OGT: return FCMP_OGE; 3441 case FCMP_OLT: return FCMP_OLE; 3442 case FCMP_UGT: return FCMP_UGE; 3443 case FCMP_ULT: return FCMP_ULE; 3444 default: return pred; 3445 } 3446 } 3447 3448 CmpInst::Predicate CmpInst::getSignedPredicate(Predicate pred) { 3449 assert(CmpInst::isUnsigned(pred) && "Call only with signed predicates!"); 3450 3451 switch (pred) { 3452 default: 3453 llvm_unreachable("Unknown predicate!"); 3454 case CmpInst::ICMP_ULT: 3455 return CmpInst::ICMP_SLT; 3456 case CmpInst::ICMP_ULE: 3457 return CmpInst::ICMP_SLE; 3458 case CmpInst::ICMP_UGT: 3459 return CmpInst::ICMP_SGT; 3460 case CmpInst::ICMP_UGE: 3461 return CmpInst::ICMP_SGE; 3462 } 3463 } 3464 3465 bool CmpInst::isUnsigned(Predicate predicate) { 3466 switch (predicate) { 3467 default: return false; 3468 case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT: 3469 case ICmpInst::ICMP_UGE: return true; 3470 } 3471 } 3472 3473 bool CmpInst::isSigned(Predicate predicate) { 3474 switch (predicate) { 3475 default: return false; 3476 case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT: 3477 case ICmpInst::ICMP_SGE: return true; 3478 } 3479 } 3480 3481 bool CmpInst::isOrdered(Predicate predicate) { 3482 switch (predicate) { 3483 default: return false; 3484 case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT: 3485 case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE: 3486 case FCmpInst::FCMP_ORD: return true; 3487 } 3488 } 3489 3490 bool CmpInst::isUnordered(Predicate predicate) { 3491 switch (predicate) { 3492 default: return false; 3493 case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT: 3494 case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE: 3495 case FCmpInst::FCMP_UNO: return true; 3496 } 3497 } 3498 3499 bool CmpInst::isTrueWhenEqual(Predicate predicate) { 3500 switch(predicate) { 3501 default: return false; 3502 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE: 3503 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true; 3504 } 3505 } 3506 3507 bool CmpInst::isFalseWhenEqual(Predicate predicate) { 3508 switch(predicate) { 3509 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT: 3510 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true; 3511 default: return false; 3512 } 3513 } 3514 3515 bool CmpInst::isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2) { 3516 // If the predicates match, then we know the first condition implies the 3517 // second is true. 3518 if (Pred1 == Pred2) 3519 return true; 3520 3521 switch (Pred1) { 3522 default: 3523 break; 3524 case ICMP_EQ: 3525 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true. 3526 return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE || 3527 Pred2 == ICMP_SLE; 3528 case ICMP_UGT: // A >u B implies A != B and A >=u B are true. 3529 return Pred2 == ICMP_NE || Pred2 == ICMP_UGE; 3530 case ICMP_ULT: // A <u B implies A != B and A <=u B are true. 3531 return Pred2 == ICMP_NE || Pred2 == ICMP_ULE; 3532 case ICMP_SGT: // A >s B implies A != B and A >=s B are true. 3533 return Pred2 == ICMP_NE || Pred2 == ICMP_SGE; 3534 case ICMP_SLT: // A <s B implies A != B and A <=s B are true. 3535 return Pred2 == ICMP_NE || Pred2 == ICMP_SLE; 3536 } 3537 return false; 3538 } 3539 3540 bool CmpInst::isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2) { 3541 return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2)); 3542 } 3543 3544 //===----------------------------------------------------------------------===// 3545 // SwitchInst Implementation 3546 //===----------------------------------------------------------------------===// 3547 3548 void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) { 3549 assert(Value && Default && NumReserved); 3550 ReservedSpace = NumReserved; 3551 setNumHungOffUseOperands(2); 3552 allocHungoffUses(ReservedSpace); 3553 3554 Op<0>() = Value; 3555 Op<1>() = Default; 3556 } 3557 3558 /// SwitchInst ctor - Create a new switch instruction, specifying a value to 3559 /// switch on and a default destination. The number of additional cases can 3560 /// be specified here to make memory allocation more efficient. This 3561 /// constructor can also autoinsert before another instruction. 3562 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3563 Instruction *InsertBefore) 3564 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch, 3565 nullptr, 0, InsertBefore) { 3566 init(Value, Default, 2+NumCases*2); 3567 } 3568 3569 /// SwitchInst ctor - Create a new switch instruction, specifying a value to 3570 /// switch on and a default destination. The number of additional cases can 3571 /// be specified here to make memory allocation more efficient. This 3572 /// constructor also autoinserts at the end of the specified BasicBlock. 3573 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3574 BasicBlock *InsertAtEnd) 3575 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch, 3576 nullptr, 0, InsertAtEnd) { 3577 init(Value, Default, 2+NumCases*2); 3578 } 3579 3580 SwitchInst::SwitchInst(const SwitchInst &SI) 3581 : Instruction(SI.getType(), Instruction::Switch, nullptr, 0) { 3582 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands()); 3583 setNumHungOffUseOperands(SI.getNumOperands()); 3584 Use *OL = getOperandList(); 3585 const Use *InOL = SI.getOperandList(); 3586 for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) { 3587 OL[i] = InOL[i]; 3588 OL[i+1] = InOL[i+1]; 3589 } 3590 SubclassOptionalData = SI.SubclassOptionalData; 3591 } 3592 3593 /// addCase - Add an entry to the switch instruction... 3594 /// 3595 void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) { 3596 unsigned NewCaseIdx = getNumCases(); 3597 unsigned OpNo = getNumOperands(); 3598 if (OpNo+2 > ReservedSpace) 3599 growOperands(); // Get more space! 3600 // Initialize some new operands. 3601 assert(OpNo+1 < ReservedSpace && "Growing didn't work!"); 3602 setNumHungOffUseOperands(OpNo+2); 3603 CaseHandle Case(this, NewCaseIdx); 3604 Case.setValue(OnVal); 3605 Case.setSuccessor(Dest); 3606 } 3607 3608 /// removeCase - This method removes the specified case and its successor 3609 /// from the switch instruction. 3610 SwitchInst::CaseIt SwitchInst::removeCase(CaseIt I) { 3611 unsigned idx = I->getCaseIndex(); 3612 3613 assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!"); 3614 3615 unsigned NumOps = getNumOperands(); 3616 Use *OL = getOperandList(); 3617 3618 // Overwrite this case with the end of the list. 3619 if (2 + (idx + 1) * 2 != NumOps) { 3620 OL[2 + idx * 2] = OL[NumOps - 2]; 3621 OL[2 + idx * 2 + 1] = OL[NumOps - 1]; 3622 } 3623 3624 // Nuke the last value. 3625 OL[NumOps-2].set(nullptr); 3626 OL[NumOps-2+1].set(nullptr); 3627 setNumHungOffUseOperands(NumOps-2); 3628 3629 return CaseIt(this, idx); 3630 } 3631 3632 /// growOperands - grow operands - This grows the operand list in response 3633 /// to a push_back style of operation. This grows the number of ops by 3 times. 3634 /// 3635 void SwitchInst::growOperands() { 3636 unsigned e = getNumOperands(); 3637 unsigned NumOps = e*3; 3638 3639 ReservedSpace = NumOps; 3640 growHungoffUses(ReservedSpace); 3641 } 3642 3643 //===----------------------------------------------------------------------===// 3644 // IndirectBrInst Implementation 3645 //===----------------------------------------------------------------------===// 3646 3647 void IndirectBrInst::init(Value *Address, unsigned NumDests) { 3648 assert(Address && Address->getType()->isPointerTy() && 3649 "Address of indirectbr must be a pointer"); 3650 ReservedSpace = 1+NumDests; 3651 setNumHungOffUseOperands(1); 3652 allocHungoffUses(ReservedSpace); 3653 3654 Op<0>() = Address; 3655 } 3656 3657 3658 /// growOperands - grow operands - This grows the operand list in response 3659 /// to a push_back style of operation. This grows the number of ops by 2 times. 3660 /// 3661 void IndirectBrInst::growOperands() { 3662 unsigned e = getNumOperands(); 3663 unsigned NumOps = e*2; 3664 3665 ReservedSpace = NumOps; 3666 growHungoffUses(ReservedSpace); 3667 } 3668 3669 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases, 3670 Instruction *InsertBefore) 3671 : Instruction(Type::getVoidTy(Address->getContext()), 3672 Instruction::IndirectBr, nullptr, 0, InsertBefore) { 3673 init(Address, NumCases); 3674 } 3675 3676 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases, 3677 BasicBlock *InsertAtEnd) 3678 : Instruction(Type::getVoidTy(Address->getContext()), 3679 Instruction::IndirectBr, nullptr, 0, InsertAtEnd) { 3680 init(Address, NumCases); 3681 } 3682 3683 IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI) 3684 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr, 3685 nullptr, IBI.getNumOperands()) { 3686 allocHungoffUses(IBI.getNumOperands()); 3687 Use *OL = getOperandList(); 3688 const Use *InOL = IBI.getOperandList(); 3689 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i) 3690 OL[i] = InOL[i]; 3691 SubclassOptionalData = IBI.SubclassOptionalData; 3692 } 3693 3694 /// addDestination - Add a destination. 3695 /// 3696 void IndirectBrInst::addDestination(BasicBlock *DestBB) { 3697 unsigned OpNo = getNumOperands(); 3698 if (OpNo+1 > ReservedSpace) 3699 growOperands(); // Get more space! 3700 // Initialize some new operands. 3701 assert(OpNo < ReservedSpace && "Growing didn't work!"); 3702 setNumHungOffUseOperands(OpNo+1); 3703 getOperandList()[OpNo] = DestBB; 3704 } 3705 3706 /// removeDestination - This method removes the specified successor from the 3707 /// indirectbr instruction. 3708 void IndirectBrInst::removeDestination(unsigned idx) { 3709 assert(idx < getNumOperands()-1 && "Successor index out of range!"); 3710 3711 unsigned NumOps = getNumOperands(); 3712 Use *OL = getOperandList(); 3713 3714 // Replace this value with the last one. 3715 OL[idx+1] = OL[NumOps-1]; 3716 3717 // Nuke the last value. 3718 OL[NumOps-1].set(nullptr); 3719 setNumHungOffUseOperands(NumOps-1); 3720 } 3721 3722 //===----------------------------------------------------------------------===// 3723 // cloneImpl() implementations 3724 //===----------------------------------------------------------------------===// 3725 3726 // Define these methods here so vtables don't get emitted into every translation 3727 // unit that uses these classes. 3728 3729 GetElementPtrInst *GetElementPtrInst::cloneImpl() const { 3730 return new (getNumOperands()) GetElementPtrInst(*this); 3731 } 3732 3733 BinaryOperator *BinaryOperator::cloneImpl() const { 3734 return Create(getOpcode(), Op<0>(), Op<1>()); 3735 } 3736 3737 FCmpInst *FCmpInst::cloneImpl() const { 3738 return new FCmpInst(getPredicate(), Op<0>(), Op<1>()); 3739 } 3740 3741 ICmpInst *ICmpInst::cloneImpl() const { 3742 return new ICmpInst(getPredicate(), Op<0>(), Op<1>()); 3743 } 3744 3745 ExtractValueInst *ExtractValueInst::cloneImpl() const { 3746 return new ExtractValueInst(*this); 3747 } 3748 3749 InsertValueInst *InsertValueInst::cloneImpl() const { 3750 return new InsertValueInst(*this); 3751 } 3752 3753 AllocaInst *AllocaInst::cloneImpl() const { 3754 AllocaInst *Result = new AllocaInst(getAllocatedType(), 3755 getType()->getAddressSpace(), 3756 (Value *)getOperand(0), getAlignment()); 3757 Result->setUsedWithInAlloca(isUsedWithInAlloca()); 3758 Result->setSwiftError(isSwiftError()); 3759 return Result; 3760 } 3761 3762 LoadInst *LoadInst::cloneImpl() const { 3763 return new LoadInst(getOperand(0), Twine(), isVolatile(), 3764 getAlignment(), getOrdering(), getSyncScopeID()); 3765 } 3766 3767 StoreInst *StoreInst::cloneImpl() const { 3768 return new StoreInst(getOperand(0), getOperand(1), isVolatile(), 3769 getAlignment(), getOrdering(), getSyncScopeID()); 3770 3771 } 3772 3773 AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const { 3774 AtomicCmpXchgInst *Result = 3775 new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2), 3776 getSuccessOrdering(), getFailureOrdering(), 3777 getSyncScopeID()); 3778 Result->setVolatile(isVolatile()); 3779 Result->setWeak(isWeak()); 3780 return Result; 3781 } 3782 3783 AtomicRMWInst *AtomicRMWInst::cloneImpl() const { 3784 AtomicRMWInst *Result = 3785 new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1), 3786 getOrdering(), getSyncScopeID()); 3787 Result->setVolatile(isVolatile()); 3788 return Result; 3789 } 3790 3791 FenceInst *FenceInst::cloneImpl() const { 3792 return new FenceInst(getContext(), getOrdering(), getSyncScopeID()); 3793 } 3794 3795 TruncInst *TruncInst::cloneImpl() const { 3796 return new TruncInst(getOperand(0), getType()); 3797 } 3798 3799 ZExtInst *ZExtInst::cloneImpl() const { 3800 return new ZExtInst(getOperand(0), getType()); 3801 } 3802 3803 SExtInst *SExtInst::cloneImpl() const { 3804 return new SExtInst(getOperand(0), getType()); 3805 } 3806 3807 FPTruncInst *FPTruncInst::cloneImpl() const { 3808 return new FPTruncInst(getOperand(0), getType()); 3809 } 3810 3811 FPExtInst *FPExtInst::cloneImpl() const { 3812 return new FPExtInst(getOperand(0), getType()); 3813 } 3814 3815 UIToFPInst *UIToFPInst::cloneImpl() const { 3816 return new UIToFPInst(getOperand(0), getType()); 3817 } 3818 3819 SIToFPInst *SIToFPInst::cloneImpl() const { 3820 return new SIToFPInst(getOperand(0), getType()); 3821 } 3822 3823 FPToUIInst *FPToUIInst::cloneImpl() const { 3824 return new FPToUIInst(getOperand(0), getType()); 3825 } 3826 3827 FPToSIInst *FPToSIInst::cloneImpl() const { 3828 return new FPToSIInst(getOperand(0), getType()); 3829 } 3830 3831 PtrToIntInst *PtrToIntInst::cloneImpl() const { 3832 return new PtrToIntInst(getOperand(0), getType()); 3833 } 3834 3835 IntToPtrInst *IntToPtrInst::cloneImpl() const { 3836 return new IntToPtrInst(getOperand(0), getType()); 3837 } 3838 3839 BitCastInst *BitCastInst::cloneImpl() const { 3840 return new BitCastInst(getOperand(0), getType()); 3841 } 3842 3843 AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const { 3844 return new AddrSpaceCastInst(getOperand(0), getType()); 3845 } 3846 3847 CallInst *CallInst::cloneImpl() const { 3848 if (hasOperandBundles()) { 3849 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 3850 return new(getNumOperands(), DescriptorBytes) CallInst(*this); 3851 } 3852 return new(getNumOperands()) CallInst(*this); 3853 } 3854 3855 SelectInst *SelectInst::cloneImpl() const { 3856 return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2)); 3857 } 3858 3859 VAArgInst *VAArgInst::cloneImpl() const { 3860 return new VAArgInst(getOperand(0), getType()); 3861 } 3862 3863 ExtractElementInst *ExtractElementInst::cloneImpl() const { 3864 return ExtractElementInst::Create(getOperand(0), getOperand(1)); 3865 } 3866 3867 InsertElementInst *InsertElementInst::cloneImpl() const { 3868 return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2)); 3869 } 3870 3871 ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const { 3872 return new ShuffleVectorInst(getOperand(0), getOperand(1), getOperand(2)); 3873 } 3874 3875 PHINode *PHINode::cloneImpl() const { return new PHINode(*this); } 3876 3877 LandingPadInst *LandingPadInst::cloneImpl() const { 3878 return new LandingPadInst(*this); 3879 } 3880 3881 ReturnInst *ReturnInst::cloneImpl() const { 3882 return new(getNumOperands()) ReturnInst(*this); 3883 } 3884 3885 BranchInst *BranchInst::cloneImpl() const { 3886 return new(getNumOperands()) BranchInst(*this); 3887 } 3888 3889 SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); } 3890 3891 IndirectBrInst *IndirectBrInst::cloneImpl() const { 3892 return new IndirectBrInst(*this); 3893 } 3894 3895 InvokeInst *InvokeInst::cloneImpl() const { 3896 if (hasOperandBundles()) { 3897 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 3898 return new(getNumOperands(), DescriptorBytes) InvokeInst(*this); 3899 } 3900 return new(getNumOperands()) InvokeInst(*this); 3901 } 3902 3903 ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); } 3904 3905 CleanupReturnInst *CleanupReturnInst::cloneImpl() const { 3906 return new (getNumOperands()) CleanupReturnInst(*this); 3907 } 3908 3909 CatchReturnInst *CatchReturnInst::cloneImpl() const { 3910 return new (getNumOperands()) CatchReturnInst(*this); 3911 } 3912 3913 CatchSwitchInst *CatchSwitchInst::cloneImpl() const { 3914 return new CatchSwitchInst(*this); 3915 } 3916 3917 FuncletPadInst *FuncletPadInst::cloneImpl() const { 3918 return new (getNumOperands()) FuncletPadInst(*this); 3919 } 3920 3921 UnreachableInst *UnreachableInst::cloneImpl() const { 3922 LLVMContext &Context = getContext(); 3923 return new UnreachableInst(Context); 3924 } 3925