1 //===- Instructions.cpp - Implement the LLVM instructions -----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements all of the non-inline methods for the LLVM instruction 11 // classes. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/IR/Instructions.h" 16 #include "LLVMContextImpl.h" 17 #include "llvm/ADT/None.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/Twine.h" 20 #include "llvm/IR/Attributes.h" 21 #include "llvm/IR/BasicBlock.h" 22 #include "llvm/IR/CallSite.h" 23 #include "llvm/IR/Constant.h" 24 #include "llvm/IR/Constants.h" 25 #include "llvm/IR/DataLayout.h" 26 #include "llvm/IR/DerivedTypes.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/IR/InstrTypes.h" 29 #include "llvm/IR/Instruction.h" 30 #include "llvm/IR/LLVMContext.h" 31 #include "llvm/IR/Metadata.h" 32 #include "llvm/IR/Module.h" 33 #include "llvm/IR/Operator.h" 34 #include "llvm/IR/Type.h" 35 #include "llvm/IR/Value.h" 36 #include "llvm/Support/AtomicOrdering.h" 37 #include "llvm/Support/Casting.h" 38 #include "llvm/Support/ErrorHandling.h" 39 #include "llvm/Support/MathExtras.h" 40 #include <algorithm> 41 #include <cassert> 42 #include <cstdint> 43 #include <vector> 44 45 using namespace llvm; 46 47 //===----------------------------------------------------------------------===// 48 // CallSite Class 49 //===----------------------------------------------------------------------===// 50 51 User::op_iterator CallSite::getCallee() const { 52 Instruction *II(getInstruction()); 53 return isCall() 54 ? cast<CallInst>(II)->op_end() - 1 // Skip Callee 55 : cast<InvokeInst>(II)->op_end() - 3; // Skip BB, BB, Callee 56 } 57 58 //===----------------------------------------------------------------------===// 59 // TerminatorInst Class 60 //===----------------------------------------------------------------------===// 61 62 unsigned TerminatorInst::getNumSuccessors() const { 63 switch (getOpcode()) { 64 #define HANDLE_TERM_INST(N, OPC, CLASS) \ 65 case Instruction::OPC: \ 66 return static_cast<const CLASS *>(this)->getNumSuccessors(); 67 #include "llvm/IR/Instruction.def" 68 default: 69 break; 70 } 71 llvm_unreachable("not a terminator"); 72 } 73 74 BasicBlock *TerminatorInst::getSuccessor(unsigned idx) const { 75 switch (getOpcode()) { 76 #define HANDLE_TERM_INST(N, OPC, CLASS) \ 77 case Instruction::OPC: \ 78 return static_cast<const CLASS *>(this)->getSuccessor(idx); 79 #include "llvm/IR/Instruction.def" 80 default: 81 break; 82 } 83 llvm_unreachable("not a terminator"); 84 } 85 86 void TerminatorInst::setSuccessor(unsigned idx, BasicBlock *B) { 87 switch (getOpcode()) { 88 #define HANDLE_TERM_INST(N, OPC, CLASS) \ 89 case Instruction::OPC: \ 90 return static_cast<CLASS *>(this)->setSuccessor(idx, B); 91 #include "llvm/IR/Instruction.def" 92 default: 93 break; 94 } 95 llvm_unreachable("not a terminator"); 96 } 97 98 //===----------------------------------------------------------------------===// 99 // SelectInst Class 100 //===----------------------------------------------------------------------===// 101 102 /// areInvalidOperands - Return a string if the specified operands are invalid 103 /// for a select operation, otherwise return null. 104 const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) { 105 if (Op1->getType() != Op2->getType()) 106 return "both values to select must have same type"; 107 108 if (Op1->getType()->isTokenTy()) 109 return "select values cannot have token type"; 110 111 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) { 112 // Vector select. 113 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext())) 114 return "vector select condition element type must be i1"; 115 VectorType *ET = dyn_cast<VectorType>(Op1->getType()); 116 if (!ET) 117 return "selected values for vector select must be vectors"; 118 if (ET->getNumElements() != VT->getNumElements()) 119 return "vector select requires selected vectors to have " 120 "the same vector length as select condition"; 121 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) { 122 return "select condition must be i1 or <n x i1>"; 123 } 124 return nullptr; 125 } 126 127 //===----------------------------------------------------------------------===// 128 // PHINode Class 129 //===----------------------------------------------------------------------===// 130 131 PHINode::PHINode(const PHINode &PN) 132 : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()), 133 ReservedSpace(PN.getNumOperands()) { 134 allocHungoffUses(PN.getNumOperands()); 135 std::copy(PN.op_begin(), PN.op_end(), op_begin()); 136 std::copy(PN.block_begin(), PN.block_end(), block_begin()); 137 SubclassOptionalData = PN.SubclassOptionalData; 138 } 139 140 // removeIncomingValue - Remove an incoming value. This is useful if a 141 // predecessor basic block is deleted. 142 Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) { 143 Value *Removed = getIncomingValue(Idx); 144 145 // Move everything after this operand down. 146 // 147 // FIXME: we could just swap with the end of the list, then erase. However, 148 // clients might not expect this to happen. The code as it is thrashes the 149 // use/def lists, which is kinda lame. 150 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx); 151 std::copy(block_begin() + Idx + 1, block_end(), block_begin() + Idx); 152 153 // Nuke the last value. 154 Op<-1>().set(nullptr); 155 setNumHungOffUseOperands(getNumOperands() - 1); 156 157 // If the PHI node is dead, because it has zero entries, nuke it now. 158 if (getNumOperands() == 0 && DeletePHIIfEmpty) { 159 // If anyone is using this PHI, make them use a dummy value instead... 160 replaceAllUsesWith(UndefValue::get(getType())); 161 eraseFromParent(); 162 } 163 return Removed; 164 } 165 166 /// growOperands - grow operands - This grows the operand list in response 167 /// to a push_back style of operation. This grows the number of ops by 1.5 168 /// times. 169 /// 170 void PHINode::growOperands() { 171 unsigned e = getNumOperands(); 172 unsigned NumOps = e + e / 2; 173 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common. 174 175 ReservedSpace = NumOps; 176 growHungoffUses(ReservedSpace, /* IsPhi */ true); 177 } 178 179 /// hasConstantValue - If the specified PHI node always merges together the same 180 /// value, return the value, otherwise return null. 181 Value *PHINode::hasConstantValue() const { 182 // Exploit the fact that phi nodes always have at least one entry. 183 Value *ConstantValue = getIncomingValue(0); 184 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i) 185 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) { 186 if (ConstantValue != this) 187 return nullptr; // Incoming values not all the same. 188 // The case where the first value is this PHI. 189 ConstantValue = getIncomingValue(i); 190 } 191 if (ConstantValue == this) 192 return UndefValue::get(getType()); 193 return ConstantValue; 194 } 195 196 /// hasConstantOrUndefValue - Whether the specified PHI node always merges 197 /// together the same value, assuming that undefs result in the same value as 198 /// non-undefs. 199 /// Unlike \ref hasConstantValue, this does not return a value because the 200 /// unique non-undef incoming value need not dominate the PHI node. 201 bool PHINode::hasConstantOrUndefValue() const { 202 Value *ConstantValue = nullptr; 203 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) { 204 Value *Incoming = getIncomingValue(i); 205 if (Incoming != this && !isa<UndefValue>(Incoming)) { 206 if (ConstantValue && ConstantValue != Incoming) 207 return false; 208 ConstantValue = Incoming; 209 } 210 } 211 return true; 212 } 213 214 //===----------------------------------------------------------------------===// 215 // LandingPadInst Implementation 216 //===----------------------------------------------------------------------===// 217 218 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues, 219 const Twine &NameStr, Instruction *InsertBefore) 220 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) { 221 init(NumReservedValues, NameStr); 222 } 223 224 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues, 225 const Twine &NameStr, BasicBlock *InsertAtEnd) 226 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) { 227 init(NumReservedValues, NameStr); 228 } 229 230 LandingPadInst::LandingPadInst(const LandingPadInst &LP) 231 : Instruction(LP.getType(), Instruction::LandingPad, nullptr, 232 LP.getNumOperands()), 233 ReservedSpace(LP.getNumOperands()) { 234 allocHungoffUses(LP.getNumOperands()); 235 Use *OL = getOperandList(); 236 const Use *InOL = LP.getOperandList(); 237 for (unsigned I = 0, E = ReservedSpace; I != E; ++I) 238 OL[I] = InOL[I]; 239 240 setCleanup(LP.isCleanup()); 241 } 242 243 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses, 244 const Twine &NameStr, 245 Instruction *InsertBefore) { 246 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore); 247 } 248 249 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses, 250 const Twine &NameStr, 251 BasicBlock *InsertAtEnd) { 252 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd); 253 } 254 255 void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) { 256 ReservedSpace = NumReservedValues; 257 setNumHungOffUseOperands(0); 258 allocHungoffUses(ReservedSpace); 259 setName(NameStr); 260 setCleanup(false); 261 } 262 263 /// growOperands - grow operands - This grows the operand list in response to a 264 /// push_back style of operation. This grows the number of ops by 2 times. 265 void LandingPadInst::growOperands(unsigned Size) { 266 unsigned e = getNumOperands(); 267 if (ReservedSpace >= e + Size) return; 268 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2; 269 growHungoffUses(ReservedSpace); 270 } 271 272 void LandingPadInst::addClause(Constant *Val) { 273 unsigned OpNo = getNumOperands(); 274 growOperands(1); 275 assert(OpNo < ReservedSpace && "Growing didn't work!"); 276 setNumHungOffUseOperands(getNumOperands() + 1); 277 getOperandList()[OpNo] = Val; 278 } 279 280 //===----------------------------------------------------------------------===// 281 // CallInst Implementation 282 //===----------------------------------------------------------------------===// 283 284 void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, 285 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) { 286 this->FTy = FTy; 287 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 && 288 "NumOperands not set up?"); 289 Op<-1>() = Func; 290 291 #ifndef NDEBUG 292 assert((Args.size() == FTy->getNumParams() || 293 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 294 "Calling a function with bad signature!"); 295 296 for (unsigned i = 0; i != Args.size(); ++i) 297 assert((i >= FTy->getNumParams() || 298 FTy->getParamType(i) == Args[i]->getType()) && 299 "Calling a function with a bad signature!"); 300 #endif 301 302 std::copy(Args.begin(), Args.end(), op_begin()); 303 304 auto It = populateBundleOperandInfos(Bundles, Args.size()); 305 (void)It; 306 assert(It + 1 == op_end() && "Should add up!"); 307 308 setName(NameStr); 309 } 310 311 void CallInst::init(Value *Func, const Twine &NameStr) { 312 FTy = 313 cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType()); 314 assert(getNumOperands() == 1 && "NumOperands not set up?"); 315 Op<-1>() = Func; 316 317 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature"); 318 319 setName(NameStr); 320 } 321 322 CallInst::CallInst(Value *Func, const Twine &Name, Instruction *InsertBefore) 323 : CallBase<CallInst>( 324 cast<FunctionType>( 325 cast<PointerType>(Func->getType())->getElementType()) 326 ->getReturnType(), 327 Instruction::Call, 328 OperandTraits<CallBase<CallInst>>::op_end(this) - 1, 1, 329 InsertBefore) { 330 init(Func, Name); 331 } 332 333 CallInst::CallInst(Value *Func, const Twine &Name, BasicBlock *InsertAtEnd) 334 : CallBase<CallInst>( 335 cast<FunctionType>( 336 cast<PointerType>(Func->getType())->getElementType()) 337 ->getReturnType(), 338 Instruction::Call, 339 OperandTraits<CallBase<CallInst>>::op_end(this) - 1, 1, InsertAtEnd) { 340 init(Func, Name); 341 } 342 343 CallInst::CallInst(const CallInst &CI) 344 : CallBase<CallInst>(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call, 345 OperandTraits<CallBase<CallInst>>::op_end(this) - 346 CI.getNumOperands(), 347 CI.getNumOperands()) { 348 setTailCallKind(CI.getTailCallKind()); 349 setCallingConv(CI.getCallingConv()); 350 351 std::copy(CI.op_begin(), CI.op_end(), op_begin()); 352 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(), 353 bundle_op_info_begin()); 354 SubclassOptionalData = CI.SubclassOptionalData; 355 } 356 357 CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB, 358 Instruction *InsertPt) { 359 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end()); 360 361 auto *NewCI = CallInst::Create(CI->getCalledValue(), Args, OpB, CI->getName(), 362 InsertPt); 363 NewCI->setTailCallKind(CI->getTailCallKind()); 364 NewCI->setCallingConv(CI->getCallingConv()); 365 NewCI->SubclassOptionalData = CI->SubclassOptionalData; 366 NewCI->setAttributes(CI->getAttributes()); 367 NewCI->setDebugLoc(CI->getDebugLoc()); 368 return NewCI; 369 } 370 371 372 373 374 375 376 377 378 379 380 /// IsConstantOne - Return true only if val is constant int 1 381 static bool IsConstantOne(Value *val) { 382 assert(val && "IsConstantOne does not work with nullptr val"); 383 const ConstantInt *CVal = dyn_cast<ConstantInt>(val); 384 return CVal && CVal->isOne(); 385 } 386 387 static Instruction *createMalloc(Instruction *InsertBefore, 388 BasicBlock *InsertAtEnd, Type *IntPtrTy, 389 Type *AllocTy, Value *AllocSize, 390 Value *ArraySize, 391 ArrayRef<OperandBundleDef> OpB, 392 Function *MallocF, const Twine &Name) { 393 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) && 394 "createMalloc needs either InsertBefore or InsertAtEnd"); 395 396 // malloc(type) becomes: 397 // bitcast (i8* malloc(typeSize)) to type* 398 // malloc(type, arraySize) becomes: 399 // bitcast (i8* malloc(typeSize*arraySize)) to type* 400 if (!ArraySize) 401 ArraySize = ConstantInt::get(IntPtrTy, 1); 402 else if (ArraySize->getType() != IntPtrTy) { 403 if (InsertBefore) 404 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false, 405 "", InsertBefore); 406 else 407 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false, 408 "", InsertAtEnd); 409 } 410 411 if (!IsConstantOne(ArraySize)) { 412 if (IsConstantOne(AllocSize)) { 413 AllocSize = ArraySize; // Operand * 1 = Operand 414 } else if (Constant *CO = dyn_cast<Constant>(ArraySize)) { 415 Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy, 416 false /*ZExt*/); 417 // Malloc arg is constant product of type size and array size 418 AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize)); 419 } else { 420 // Multiply type size by the array size... 421 if (InsertBefore) 422 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize, 423 "mallocsize", InsertBefore); 424 else 425 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize, 426 "mallocsize", InsertAtEnd); 427 } 428 } 429 430 assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size"); 431 // Create the call to Malloc. 432 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd; 433 Module *M = BB->getParent()->getParent(); 434 Type *BPTy = Type::getInt8PtrTy(BB->getContext()); 435 Value *MallocFunc = MallocF; 436 if (!MallocFunc) 437 // prototype malloc as "void *malloc(size_t)" 438 MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy); 439 PointerType *AllocPtrType = PointerType::getUnqual(AllocTy); 440 CallInst *MCall = nullptr; 441 Instruction *Result = nullptr; 442 if (InsertBefore) { 443 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall", 444 InsertBefore); 445 Result = MCall; 446 if (Result->getType() != AllocPtrType) 447 // Create a cast instruction to convert to the right type... 448 Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore); 449 } else { 450 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall"); 451 Result = MCall; 452 if (Result->getType() != AllocPtrType) { 453 InsertAtEnd->getInstList().push_back(MCall); 454 // Create a cast instruction to convert to the right type... 455 Result = new BitCastInst(MCall, AllocPtrType, Name); 456 } 457 } 458 MCall->setTailCall(); 459 if (Function *F = dyn_cast<Function>(MallocFunc)) { 460 MCall->setCallingConv(F->getCallingConv()); 461 if (!F->returnDoesNotAlias()) 462 F->setReturnDoesNotAlias(); 463 } 464 assert(!MCall->getType()->isVoidTy() && "Malloc has void return type"); 465 466 return Result; 467 } 468 469 /// CreateMalloc - Generate the IR for a call to malloc: 470 /// 1. Compute the malloc call's argument as the specified type's size, 471 /// possibly multiplied by the array size if the array size is not 472 /// constant 1. 473 /// 2. Call malloc with that argument. 474 /// 3. Bitcast the result of the malloc call to the specified type. 475 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore, 476 Type *IntPtrTy, Type *AllocTy, 477 Value *AllocSize, Value *ArraySize, 478 Function *MallocF, 479 const Twine &Name) { 480 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize, 481 ArraySize, None, MallocF, Name); 482 } 483 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore, 484 Type *IntPtrTy, Type *AllocTy, 485 Value *AllocSize, Value *ArraySize, 486 ArrayRef<OperandBundleDef> OpB, 487 Function *MallocF, 488 const Twine &Name) { 489 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize, 490 ArraySize, OpB, MallocF, Name); 491 } 492 493 /// CreateMalloc - Generate the IR for a call to malloc: 494 /// 1. Compute the malloc call's argument as the specified type's size, 495 /// possibly multiplied by the array size if the array size is not 496 /// constant 1. 497 /// 2. Call malloc with that argument. 498 /// 3. Bitcast the result of the malloc call to the specified type. 499 /// Note: This function does not add the bitcast to the basic block, that is the 500 /// responsibility of the caller. 501 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd, 502 Type *IntPtrTy, Type *AllocTy, 503 Value *AllocSize, Value *ArraySize, 504 Function *MallocF, const Twine &Name) { 505 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize, 506 ArraySize, None, MallocF, Name); 507 } 508 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd, 509 Type *IntPtrTy, Type *AllocTy, 510 Value *AllocSize, Value *ArraySize, 511 ArrayRef<OperandBundleDef> OpB, 512 Function *MallocF, const Twine &Name) { 513 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize, 514 ArraySize, OpB, MallocF, Name); 515 } 516 517 static Instruction *createFree(Value *Source, 518 ArrayRef<OperandBundleDef> Bundles, 519 Instruction *InsertBefore, 520 BasicBlock *InsertAtEnd) { 521 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) && 522 "createFree needs either InsertBefore or InsertAtEnd"); 523 assert(Source->getType()->isPointerTy() && 524 "Can not free something of nonpointer type!"); 525 526 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd; 527 Module *M = BB->getParent()->getParent(); 528 529 Type *VoidTy = Type::getVoidTy(M->getContext()); 530 Type *IntPtrTy = Type::getInt8PtrTy(M->getContext()); 531 // prototype free as "void free(void*)" 532 Value *FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy); 533 CallInst *Result = nullptr; 534 Value *PtrCast = Source; 535 if (InsertBefore) { 536 if (Source->getType() != IntPtrTy) 537 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore); 538 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "", InsertBefore); 539 } else { 540 if (Source->getType() != IntPtrTy) 541 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd); 542 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, ""); 543 } 544 Result->setTailCall(); 545 if (Function *F = dyn_cast<Function>(FreeFunc)) 546 Result->setCallingConv(F->getCallingConv()); 547 548 return Result; 549 } 550 551 /// CreateFree - Generate the IR for a call to the builtin free function. 552 Instruction *CallInst::CreateFree(Value *Source, Instruction *InsertBefore) { 553 return createFree(Source, None, InsertBefore, nullptr); 554 } 555 Instruction *CallInst::CreateFree(Value *Source, 556 ArrayRef<OperandBundleDef> Bundles, 557 Instruction *InsertBefore) { 558 return createFree(Source, Bundles, InsertBefore, nullptr); 559 } 560 561 /// CreateFree - Generate the IR for a call to the builtin free function. 562 /// Note: This function does not add the call to the basic block, that is the 563 /// responsibility of the caller. 564 Instruction *CallInst::CreateFree(Value *Source, BasicBlock *InsertAtEnd) { 565 Instruction *FreeCall = createFree(Source, None, nullptr, InsertAtEnd); 566 assert(FreeCall && "CreateFree did not create a CallInst"); 567 return FreeCall; 568 } 569 Instruction *CallInst::CreateFree(Value *Source, 570 ArrayRef<OperandBundleDef> Bundles, 571 BasicBlock *InsertAtEnd) { 572 Instruction *FreeCall = createFree(Source, Bundles, nullptr, InsertAtEnd); 573 assert(FreeCall && "CreateFree did not create a CallInst"); 574 return FreeCall; 575 } 576 577 //===----------------------------------------------------------------------===// 578 // InvokeInst Implementation 579 //===----------------------------------------------------------------------===// 580 581 void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal, 582 BasicBlock *IfException, ArrayRef<Value *> Args, 583 ArrayRef<OperandBundleDef> Bundles, 584 const Twine &NameStr) { 585 this->FTy = FTy; 586 587 assert(getNumOperands() == 3 + Args.size() + CountBundleInputs(Bundles) && 588 "NumOperands not set up?"); 589 Op<-3>() = Fn; 590 Op<-2>() = IfNormal; 591 Op<-1>() = IfException; 592 593 #ifndef NDEBUG 594 assert(((Args.size() == FTy->getNumParams()) || 595 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 596 "Invoking a function with bad signature"); 597 598 for (unsigned i = 0, e = Args.size(); i != e; i++) 599 assert((i >= FTy->getNumParams() || 600 FTy->getParamType(i) == Args[i]->getType()) && 601 "Invoking a function with a bad signature!"); 602 #endif 603 604 std::copy(Args.begin(), Args.end(), op_begin()); 605 606 auto It = populateBundleOperandInfos(Bundles, Args.size()); 607 (void)It; 608 assert(It + 3 == op_end() && "Should add up!"); 609 610 setName(NameStr); 611 } 612 613 InvokeInst::InvokeInst(const InvokeInst &II) 614 : CallBase<InvokeInst>(II.Attrs, II.FTy, II.getType(), Instruction::Invoke, 615 OperandTraits<CallBase<InvokeInst>>::op_end(this) - 616 II.getNumOperands(), 617 II.getNumOperands()) { 618 setCallingConv(II.getCallingConv()); 619 std::copy(II.op_begin(), II.op_end(), op_begin()); 620 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(), 621 bundle_op_info_begin()); 622 SubclassOptionalData = II.SubclassOptionalData; 623 } 624 625 InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB, 626 Instruction *InsertPt) { 627 std::vector<Value *> Args(II->arg_begin(), II->arg_end()); 628 629 auto *NewII = InvokeInst::Create(II->getCalledValue(), II->getNormalDest(), 630 II->getUnwindDest(), Args, OpB, 631 II->getName(), InsertPt); 632 NewII->setCallingConv(II->getCallingConv()); 633 NewII->SubclassOptionalData = II->SubclassOptionalData; 634 NewII->setAttributes(II->getAttributes()); 635 NewII->setDebugLoc(II->getDebugLoc()); 636 return NewII; 637 } 638 639 640 LandingPadInst *InvokeInst::getLandingPadInst() const { 641 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI()); 642 } 643 644 //===----------------------------------------------------------------------===// 645 // ReturnInst Implementation 646 //===----------------------------------------------------------------------===// 647 648 ReturnInst::ReturnInst(const ReturnInst &RI) 649 : TerminatorInst(Type::getVoidTy(RI.getContext()), Instruction::Ret, 650 OperandTraits<ReturnInst>::op_end(this) - 651 RI.getNumOperands(), 652 RI.getNumOperands()) { 653 if (RI.getNumOperands()) 654 Op<0>() = RI.Op<0>(); 655 SubclassOptionalData = RI.SubclassOptionalData; 656 } 657 658 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore) 659 : TerminatorInst(Type::getVoidTy(C), Instruction::Ret, 660 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal, 661 InsertBefore) { 662 if (retVal) 663 Op<0>() = retVal; 664 } 665 666 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd) 667 : TerminatorInst(Type::getVoidTy(C), Instruction::Ret, 668 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal, 669 InsertAtEnd) { 670 if (retVal) 671 Op<0>() = retVal; 672 } 673 674 ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd) 675 : TerminatorInst(Type::getVoidTy(Context), Instruction::Ret, 676 OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) { 677 } 678 679 //===----------------------------------------------------------------------===// 680 // ResumeInst Implementation 681 //===----------------------------------------------------------------------===// 682 683 ResumeInst::ResumeInst(const ResumeInst &RI) 684 : TerminatorInst(Type::getVoidTy(RI.getContext()), Instruction::Resume, 685 OperandTraits<ResumeInst>::op_begin(this), 1) { 686 Op<0>() = RI.Op<0>(); 687 } 688 689 ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore) 690 : TerminatorInst(Type::getVoidTy(Exn->getContext()), Instruction::Resume, 691 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) { 692 Op<0>() = Exn; 693 } 694 695 ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd) 696 : TerminatorInst(Type::getVoidTy(Exn->getContext()), Instruction::Resume, 697 OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) { 698 Op<0>() = Exn; 699 } 700 701 //===----------------------------------------------------------------------===// 702 // CleanupReturnInst Implementation 703 //===----------------------------------------------------------------------===// 704 705 CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI) 706 : TerminatorInst(CRI.getType(), Instruction::CleanupRet, 707 OperandTraits<CleanupReturnInst>::op_end(this) - 708 CRI.getNumOperands(), 709 CRI.getNumOperands()) { 710 setInstructionSubclassData(CRI.getSubclassDataFromInstruction()); 711 Op<0>() = CRI.Op<0>(); 712 if (CRI.hasUnwindDest()) 713 Op<1>() = CRI.Op<1>(); 714 } 715 716 void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) { 717 if (UnwindBB) 718 setInstructionSubclassData(getSubclassDataFromInstruction() | 1); 719 720 Op<0>() = CleanupPad; 721 if (UnwindBB) 722 Op<1>() = UnwindBB; 723 } 724 725 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, 726 unsigned Values, Instruction *InsertBefore) 727 : TerminatorInst(Type::getVoidTy(CleanupPad->getContext()), 728 Instruction::CleanupRet, 729 OperandTraits<CleanupReturnInst>::op_end(this) - Values, 730 Values, InsertBefore) { 731 init(CleanupPad, UnwindBB); 732 } 733 734 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, 735 unsigned Values, BasicBlock *InsertAtEnd) 736 : TerminatorInst(Type::getVoidTy(CleanupPad->getContext()), 737 Instruction::CleanupRet, 738 OperandTraits<CleanupReturnInst>::op_end(this) - Values, 739 Values, InsertAtEnd) { 740 init(CleanupPad, UnwindBB); 741 } 742 743 //===----------------------------------------------------------------------===// 744 // CatchReturnInst Implementation 745 //===----------------------------------------------------------------------===// 746 void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) { 747 Op<0>() = CatchPad; 748 Op<1>() = BB; 749 } 750 751 CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI) 752 : TerminatorInst(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet, 753 OperandTraits<CatchReturnInst>::op_begin(this), 2) { 754 Op<0>() = CRI.Op<0>(); 755 Op<1>() = CRI.Op<1>(); 756 } 757 758 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB, 759 Instruction *InsertBefore) 760 : TerminatorInst(Type::getVoidTy(BB->getContext()), Instruction::CatchRet, 761 OperandTraits<CatchReturnInst>::op_begin(this), 2, 762 InsertBefore) { 763 init(CatchPad, BB); 764 } 765 766 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB, 767 BasicBlock *InsertAtEnd) 768 : TerminatorInst(Type::getVoidTy(BB->getContext()), Instruction::CatchRet, 769 OperandTraits<CatchReturnInst>::op_begin(this), 2, 770 InsertAtEnd) { 771 init(CatchPad, BB); 772 } 773 774 //===----------------------------------------------------------------------===// 775 // CatchSwitchInst Implementation 776 //===----------------------------------------------------------------------===// 777 778 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 779 unsigned NumReservedValues, 780 const Twine &NameStr, 781 Instruction *InsertBefore) 782 : TerminatorInst(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0, 783 InsertBefore) { 784 if (UnwindDest) 785 ++NumReservedValues; 786 init(ParentPad, UnwindDest, NumReservedValues + 1); 787 setName(NameStr); 788 } 789 790 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 791 unsigned NumReservedValues, 792 const Twine &NameStr, BasicBlock *InsertAtEnd) 793 : TerminatorInst(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0, 794 InsertAtEnd) { 795 if (UnwindDest) 796 ++NumReservedValues; 797 init(ParentPad, UnwindDest, NumReservedValues + 1); 798 setName(NameStr); 799 } 800 801 CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI) 802 : TerminatorInst(CSI.getType(), Instruction::CatchSwitch, nullptr, 803 CSI.getNumOperands()) { 804 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands()); 805 setNumHungOffUseOperands(ReservedSpace); 806 Use *OL = getOperandList(); 807 const Use *InOL = CSI.getOperandList(); 808 for (unsigned I = 1, E = ReservedSpace; I != E; ++I) 809 OL[I] = InOL[I]; 810 } 811 812 void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest, 813 unsigned NumReservedValues) { 814 assert(ParentPad && NumReservedValues); 815 816 ReservedSpace = NumReservedValues; 817 setNumHungOffUseOperands(UnwindDest ? 2 : 1); 818 allocHungoffUses(ReservedSpace); 819 820 Op<0>() = ParentPad; 821 if (UnwindDest) { 822 setInstructionSubclassData(getSubclassDataFromInstruction() | 1); 823 setUnwindDest(UnwindDest); 824 } 825 } 826 827 /// growOperands - grow operands - This grows the operand list in response to a 828 /// push_back style of operation. This grows the number of ops by 2 times. 829 void CatchSwitchInst::growOperands(unsigned Size) { 830 unsigned NumOperands = getNumOperands(); 831 assert(NumOperands >= 1); 832 if (ReservedSpace >= NumOperands + Size) 833 return; 834 ReservedSpace = (NumOperands + Size / 2) * 2; 835 growHungoffUses(ReservedSpace); 836 } 837 838 void CatchSwitchInst::addHandler(BasicBlock *Handler) { 839 unsigned OpNo = getNumOperands(); 840 growOperands(1); 841 assert(OpNo < ReservedSpace && "Growing didn't work!"); 842 setNumHungOffUseOperands(getNumOperands() + 1); 843 getOperandList()[OpNo] = Handler; 844 } 845 846 void CatchSwitchInst::removeHandler(handler_iterator HI) { 847 // Move all subsequent handlers up one. 848 Use *EndDst = op_end() - 1; 849 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst) 850 *CurDst = *(CurDst + 1); 851 // Null out the last handler use. 852 *EndDst = nullptr; 853 854 setNumHungOffUseOperands(getNumOperands() - 1); 855 } 856 857 //===----------------------------------------------------------------------===// 858 // FuncletPadInst Implementation 859 //===----------------------------------------------------------------------===// 860 void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args, 861 const Twine &NameStr) { 862 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?"); 863 std::copy(Args.begin(), Args.end(), op_begin()); 864 setParentPad(ParentPad); 865 setName(NameStr); 866 } 867 868 FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI) 869 : Instruction(FPI.getType(), FPI.getOpcode(), 870 OperandTraits<FuncletPadInst>::op_end(this) - 871 FPI.getNumOperands(), 872 FPI.getNumOperands()) { 873 std::copy(FPI.op_begin(), FPI.op_end(), op_begin()); 874 setParentPad(FPI.getParentPad()); 875 } 876 877 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad, 878 ArrayRef<Value *> Args, unsigned Values, 879 const Twine &NameStr, Instruction *InsertBefore) 880 : Instruction(ParentPad->getType(), Op, 881 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values, 882 InsertBefore) { 883 init(ParentPad, Args, NameStr); 884 } 885 886 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad, 887 ArrayRef<Value *> Args, unsigned Values, 888 const Twine &NameStr, BasicBlock *InsertAtEnd) 889 : Instruction(ParentPad->getType(), Op, 890 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values, 891 InsertAtEnd) { 892 init(ParentPad, Args, NameStr); 893 } 894 895 //===----------------------------------------------------------------------===// 896 // UnreachableInst Implementation 897 //===----------------------------------------------------------------------===// 898 899 UnreachableInst::UnreachableInst(LLVMContext &Context, 900 Instruction *InsertBefore) 901 : TerminatorInst(Type::getVoidTy(Context), Instruction::Unreachable, 902 nullptr, 0, InsertBefore) { 903 } 904 UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd) 905 : TerminatorInst(Type::getVoidTy(Context), Instruction::Unreachable, 906 nullptr, 0, InsertAtEnd) { 907 } 908 909 //===----------------------------------------------------------------------===// 910 // BranchInst Implementation 911 //===----------------------------------------------------------------------===// 912 913 void BranchInst::AssertOK() { 914 if (isConditional()) 915 assert(getCondition()->getType()->isIntegerTy(1) && 916 "May only branch on boolean predicates!"); 917 } 918 919 BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore) 920 : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 921 OperandTraits<BranchInst>::op_end(this) - 1, 922 1, InsertBefore) { 923 assert(IfTrue && "Branch destination may not be null!"); 924 Op<-1>() = IfTrue; 925 } 926 927 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 928 Instruction *InsertBefore) 929 : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 930 OperandTraits<BranchInst>::op_end(this) - 3, 931 3, InsertBefore) { 932 Op<-1>() = IfTrue; 933 Op<-2>() = IfFalse; 934 Op<-3>() = Cond; 935 #ifndef NDEBUG 936 AssertOK(); 937 #endif 938 } 939 940 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) 941 : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 942 OperandTraits<BranchInst>::op_end(this) - 1, 943 1, InsertAtEnd) { 944 assert(IfTrue && "Branch destination may not be null!"); 945 Op<-1>() = IfTrue; 946 } 947 948 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 949 BasicBlock *InsertAtEnd) 950 : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 951 OperandTraits<BranchInst>::op_end(this) - 3, 952 3, InsertAtEnd) { 953 Op<-1>() = IfTrue; 954 Op<-2>() = IfFalse; 955 Op<-3>() = Cond; 956 #ifndef NDEBUG 957 AssertOK(); 958 #endif 959 } 960 961 BranchInst::BranchInst(const BranchInst &BI) : 962 TerminatorInst(Type::getVoidTy(BI.getContext()), Instruction::Br, 963 OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(), 964 BI.getNumOperands()) { 965 Op<-1>() = BI.Op<-1>(); 966 if (BI.getNumOperands() != 1) { 967 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!"); 968 Op<-3>() = BI.Op<-3>(); 969 Op<-2>() = BI.Op<-2>(); 970 } 971 SubclassOptionalData = BI.SubclassOptionalData; 972 } 973 974 void BranchInst::swapSuccessors() { 975 assert(isConditional() && 976 "Cannot swap successors of an unconditional branch"); 977 Op<-1>().swap(Op<-2>()); 978 979 // Update profile metadata if present and it matches our structural 980 // expectations. 981 swapProfMetadata(); 982 } 983 984 //===----------------------------------------------------------------------===// 985 // AllocaInst Implementation 986 //===----------------------------------------------------------------------===// 987 988 static Value *getAISize(LLVMContext &Context, Value *Amt) { 989 if (!Amt) 990 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1); 991 else { 992 assert(!isa<BasicBlock>(Amt) && 993 "Passed basic block into allocation size parameter! Use other ctor"); 994 assert(Amt->getType()->isIntegerTy() && 995 "Allocation array size is not an integer!"); 996 } 997 return Amt; 998 } 999 1000 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 1001 Instruction *InsertBefore) 1002 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {} 1003 1004 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 1005 BasicBlock *InsertAtEnd) 1006 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {} 1007 1008 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1009 const Twine &Name, Instruction *InsertBefore) 1010 : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/0, Name, InsertBefore) {} 1011 1012 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1013 const Twine &Name, BasicBlock *InsertAtEnd) 1014 : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/0, Name, InsertAtEnd) {} 1015 1016 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1017 unsigned Align, const Twine &Name, 1018 Instruction *InsertBefore) 1019 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca, 1020 getAISize(Ty->getContext(), ArraySize), InsertBefore), 1021 AllocatedType(Ty) { 1022 setAlignment(Align); 1023 assert(!Ty->isVoidTy() && "Cannot allocate void!"); 1024 setName(Name); 1025 } 1026 1027 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1028 unsigned Align, const Twine &Name, 1029 BasicBlock *InsertAtEnd) 1030 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca, 1031 getAISize(Ty->getContext(), ArraySize), InsertAtEnd), 1032 AllocatedType(Ty) { 1033 setAlignment(Align); 1034 assert(!Ty->isVoidTy() && "Cannot allocate void!"); 1035 setName(Name); 1036 } 1037 1038 void AllocaInst::setAlignment(unsigned Align) { 1039 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); 1040 assert(Align <= MaximumAlignment && 1041 "Alignment is greater than MaximumAlignment!"); 1042 setInstructionSubclassData((getSubclassDataFromInstruction() & ~31) | 1043 (Log2_32(Align) + 1)); 1044 assert(getAlignment() == Align && "Alignment representation error!"); 1045 } 1046 1047 bool AllocaInst::isArrayAllocation() const { 1048 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0))) 1049 return !CI->isOne(); 1050 return true; 1051 } 1052 1053 /// isStaticAlloca - Return true if this alloca is in the entry block of the 1054 /// function and is a constant size. If so, the code generator will fold it 1055 /// into the prolog/epilog code, so it is basically free. 1056 bool AllocaInst::isStaticAlloca() const { 1057 // Must be constant size. 1058 if (!isa<ConstantInt>(getArraySize())) return false; 1059 1060 // Must be in the entry block. 1061 const BasicBlock *Parent = getParent(); 1062 return Parent == &Parent->getParent()->front() && !isUsedWithInAlloca(); 1063 } 1064 1065 //===----------------------------------------------------------------------===// 1066 // LoadInst Implementation 1067 //===----------------------------------------------------------------------===// 1068 1069 void LoadInst::AssertOK() { 1070 assert(getOperand(0)->getType()->isPointerTy() && 1071 "Ptr must have pointer type."); 1072 assert(!(isAtomic() && getAlignment() == 0) && 1073 "Alignment required for atomic load"); 1074 } 1075 1076 LoadInst::LoadInst(Value *Ptr, const Twine &Name, Instruction *InsertBef) 1077 : LoadInst(Ptr, Name, /*isVolatile=*/false, InsertBef) {} 1078 1079 LoadInst::LoadInst(Value *Ptr, const Twine &Name, BasicBlock *InsertAE) 1080 : LoadInst(Ptr, Name, /*isVolatile=*/false, InsertAE) {} 1081 1082 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1083 Instruction *InsertBef) 1084 : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/0, InsertBef) {} 1085 1086 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, 1087 BasicBlock *InsertAE) 1088 : LoadInst(Ptr, Name, isVolatile, /*Align=*/0, InsertAE) {} 1089 1090 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1091 unsigned Align, Instruction *InsertBef) 1092 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, 1093 SyncScope::System, InsertBef) {} 1094 1095 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, 1096 unsigned Align, BasicBlock *InsertAE) 1097 : LoadInst(Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, 1098 SyncScope::System, InsertAE) {} 1099 1100 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1101 unsigned Align, AtomicOrdering Order, 1102 SyncScope::ID SSID, Instruction *InsertBef) 1103 : UnaryInstruction(Ty, Load, Ptr, InsertBef) { 1104 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType()); 1105 setVolatile(isVolatile); 1106 setAlignment(Align); 1107 setAtomic(Order, SSID); 1108 AssertOK(); 1109 setName(Name); 1110 } 1111 1112 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, 1113 unsigned Align, AtomicOrdering Order, 1114 SyncScope::ID SSID, 1115 BasicBlock *InsertAE) 1116 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), 1117 Load, Ptr, InsertAE) { 1118 setVolatile(isVolatile); 1119 setAlignment(Align); 1120 setAtomic(Order, SSID); 1121 AssertOK(); 1122 setName(Name); 1123 } 1124 1125 LoadInst::LoadInst(Value *Ptr, const char *Name, Instruction *InsertBef) 1126 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), 1127 Load, Ptr, InsertBef) { 1128 setVolatile(false); 1129 setAlignment(0); 1130 setAtomic(AtomicOrdering::NotAtomic); 1131 AssertOK(); 1132 if (Name && Name[0]) setName(Name); 1133 } 1134 1135 LoadInst::LoadInst(Value *Ptr, const char *Name, BasicBlock *InsertAE) 1136 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), 1137 Load, Ptr, InsertAE) { 1138 setVolatile(false); 1139 setAlignment(0); 1140 setAtomic(AtomicOrdering::NotAtomic); 1141 AssertOK(); 1142 if (Name && Name[0]) setName(Name); 1143 } 1144 1145 LoadInst::LoadInst(Type *Ty, Value *Ptr, const char *Name, bool isVolatile, 1146 Instruction *InsertBef) 1147 : UnaryInstruction(Ty, Load, Ptr, InsertBef) { 1148 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType()); 1149 setVolatile(isVolatile); 1150 setAlignment(0); 1151 setAtomic(AtomicOrdering::NotAtomic); 1152 AssertOK(); 1153 if (Name && Name[0]) setName(Name); 1154 } 1155 1156 LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile, 1157 BasicBlock *InsertAE) 1158 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), 1159 Load, Ptr, InsertAE) { 1160 setVolatile(isVolatile); 1161 setAlignment(0); 1162 setAtomic(AtomicOrdering::NotAtomic); 1163 AssertOK(); 1164 if (Name && Name[0]) setName(Name); 1165 } 1166 1167 void LoadInst::setAlignment(unsigned Align) { 1168 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); 1169 assert(Align <= MaximumAlignment && 1170 "Alignment is greater than MaximumAlignment!"); 1171 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | 1172 ((Log2_32(Align)+1)<<1)); 1173 assert(getAlignment() == Align && "Alignment representation error!"); 1174 } 1175 1176 //===----------------------------------------------------------------------===// 1177 // StoreInst Implementation 1178 //===----------------------------------------------------------------------===// 1179 1180 void StoreInst::AssertOK() { 1181 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!"); 1182 assert(getOperand(1)->getType()->isPointerTy() && 1183 "Ptr must have pointer type!"); 1184 assert(getOperand(0)->getType() == 1185 cast<PointerType>(getOperand(1)->getType())->getElementType() 1186 && "Ptr must be a pointer to Val type!"); 1187 assert(!(isAtomic() && getAlignment() == 0) && 1188 "Alignment required for atomic store"); 1189 } 1190 1191 StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore) 1192 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {} 1193 1194 StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd) 1195 : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {} 1196 1197 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1198 Instruction *InsertBefore) 1199 : StoreInst(val, addr, isVolatile, /*Align=*/0, InsertBefore) {} 1200 1201 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1202 BasicBlock *InsertAtEnd) 1203 : StoreInst(val, addr, isVolatile, /*Align=*/0, InsertAtEnd) {} 1204 1205 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align, 1206 Instruction *InsertBefore) 1207 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, 1208 SyncScope::System, InsertBefore) {} 1209 1210 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align, 1211 BasicBlock *InsertAtEnd) 1212 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, 1213 SyncScope::System, InsertAtEnd) {} 1214 1215 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1216 unsigned Align, AtomicOrdering Order, 1217 SyncScope::ID SSID, 1218 Instruction *InsertBefore) 1219 : Instruction(Type::getVoidTy(val->getContext()), Store, 1220 OperandTraits<StoreInst>::op_begin(this), 1221 OperandTraits<StoreInst>::operands(this), 1222 InsertBefore) { 1223 Op<0>() = val; 1224 Op<1>() = addr; 1225 setVolatile(isVolatile); 1226 setAlignment(Align); 1227 setAtomic(Order, SSID); 1228 AssertOK(); 1229 } 1230 1231 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1232 unsigned Align, AtomicOrdering Order, 1233 SyncScope::ID SSID, 1234 BasicBlock *InsertAtEnd) 1235 : Instruction(Type::getVoidTy(val->getContext()), Store, 1236 OperandTraits<StoreInst>::op_begin(this), 1237 OperandTraits<StoreInst>::operands(this), 1238 InsertAtEnd) { 1239 Op<0>() = val; 1240 Op<1>() = addr; 1241 setVolatile(isVolatile); 1242 setAlignment(Align); 1243 setAtomic(Order, SSID); 1244 AssertOK(); 1245 } 1246 1247 void StoreInst::setAlignment(unsigned Align) { 1248 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); 1249 assert(Align <= MaximumAlignment && 1250 "Alignment is greater than MaximumAlignment!"); 1251 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | 1252 ((Log2_32(Align)+1) << 1)); 1253 assert(getAlignment() == Align && "Alignment representation error!"); 1254 } 1255 1256 //===----------------------------------------------------------------------===// 1257 // AtomicCmpXchgInst Implementation 1258 //===----------------------------------------------------------------------===// 1259 1260 void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal, 1261 AtomicOrdering SuccessOrdering, 1262 AtomicOrdering FailureOrdering, 1263 SyncScope::ID SSID) { 1264 Op<0>() = Ptr; 1265 Op<1>() = Cmp; 1266 Op<2>() = NewVal; 1267 setSuccessOrdering(SuccessOrdering); 1268 setFailureOrdering(FailureOrdering); 1269 setSyncScopeID(SSID); 1270 1271 assert(getOperand(0) && getOperand(1) && getOperand(2) && 1272 "All operands must be non-null!"); 1273 assert(getOperand(0)->getType()->isPointerTy() && 1274 "Ptr must have pointer type!"); 1275 assert(getOperand(1)->getType() == 1276 cast<PointerType>(getOperand(0)->getType())->getElementType() 1277 && "Ptr must be a pointer to Cmp type!"); 1278 assert(getOperand(2)->getType() == 1279 cast<PointerType>(getOperand(0)->getType())->getElementType() 1280 && "Ptr must be a pointer to NewVal type!"); 1281 assert(SuccessOrdering != AtomicOrdering::NotAtomic && 1282 "AtomicCmpXchg instructions must be atomic!"); 1283 assert(FailureOrdering != AtomicOrdering::NotAtomic && 1284 "AtomicCmpXchg instructions must be atomic!"); 1285 assert(!isStrongerThan(FailureOrdering, SuccessOrdering) && 1286 "AtomicCmpXchg failure argument shall be no stronger than the success " 1287 "argument"); 1288 assert(FailureOrdering != AtomicOrdering::Release && 1289 FailureOrdering != AtomicOrdering::AcquireRelease && 1290 "AtomicCmpXchg failure ordering cannot include release semantics"); 1291 } 1292 1293 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, 1294 AtomicOrdering SuccessOrdering, 1295 AtomicOrdering FailureOrdering, 1296 SyncScope::ID SSID, 1297 Instruction *InsertBefore) 1298 : Instruction( 1299 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())), 1300 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this), 1301 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) { 1302 Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID); 1303 } 1304 1305 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, 1306 AtomicOrdering SuccessOrdering, 1307 AtomicOrdering FailureOrdering, 1308 SyncScope::ID SSID, 1309 BasicBlock *InsertAtEnd) 1310 : Instruction( 1311 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())), 1312 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this), 1313 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) { 1314 Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID); 1315 } 1316 1317 //===----------------------------------------------------------------------===// 1318 // AtomicRMWInst Implementation 1319 //===----------------------------------------------------------------------===// 1320 1321 void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val, 1322 AtomicOrdering Ordering, 1323 SyncScope::ID SSID) { 1324 Op<0>() = Ptr; 1325 Op<1>() = Val; 1326 setOperation(Operation); 1327 setOrdering(Ordering); 1328 setSyncScopeID(SSID); 1329 1330 assert(getOperand(0) && getOperand(1) && 1331 "All operands must be non-null!"); 1332 assert(getOperand(0)->getType()->isPointerTy() && 1333 "Ptr must have pointer type!"); 1334 assert(getOperand(1)->getType() == 1335 cast<PointerType>(getOperand(0)->getType())->getElementType() 1336 && "Ptr must be a pointer to Val type!"); 1337 assert(Ordering != AtomicOrdering::NotAtomic && 1338 "AtomicRMW instructions must be atomic!"); 1339 } 1340 1341 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, 1342 AtomicOrdering Ordering, 1343 SyncScope::ID SSID, 1344 Instruction *InsertBefore) 1345 : Instruction(Val->getType(), AtomicRMW, 1346 OperandTraits<AtomicRMWInst>::op_begin(this), 1347 OperandTraits<AtomicRMWInst>::operands(this), 1348 InsertBefore) { 1349 Init(Operation, Ptr, Val, Ordering, SSID); 1350 } 1351 1352 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, 1353 AtomicOrdering Ordering, 1354 SyncScope::ID SSID, 1355 BasicBlock *InsertAtEnd) 1356 : Instruction(Val->getType(), AtomicRMW, 1357 OperandTraits<AtomicRMWInst>::op_begin(this), 1358 OperandTraits<AtomicRMWInst>::operands(this), 1359 InsertAtEnd) { 1360 Init(Operation, Ptr, Val, Ordering, SSID); 1361 } 1362 1363 //===----------------------------------------------------------------------===// 1364 // FenceInst Implementation 1365 //===----------------------------------------------------------------------===// 1366 1367 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, 1368 SyncScope::ID SSID, 1369 Instruction *InsertBefore) 1370 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) { 1371 setOrdering(Ordering); 1372 setSyncScopeID(SSID); 1373 } 1374 1375 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, 1376 SyncScope::ID SSID, 1377 BasicBlock *InsertAtEnd) 1378 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) { 1379 setOrdering(Ordering); 1380 setSyncScopeID(SSID); 1381 } 1382 1383 //===----------------------------------------------------------------------===// 1384 // GetElementPtrInst Implementation 1385 //===----------------------------------------------------------------------===// 1386 1387 void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList, 1388 const Twine &Name) { 1389 assert(getNumOperands() == 1 + IdxList.size() && 1390 "NumOperands not initialized?"); 1391 Op<0>() = Ptr; 1392 std::copy(IdxList.begin(), IdxList.end(), op_begin() + 1); 1393 setName(Name); 1394 } 1395 1396 GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI) 1397 : Instruction(GEPI.getType(), GetElementPtr, 1398 OperandTraits<GetElementPtrInst>::op_end(this) - 1399 GEPI.getNumOperands(), 1400 GEPI.getNumOperands()), 1401 SourceElementType(GEPI.SourceElementType), 1402 ResultElementType(GEPI.ResultElementType) { 1403 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin()); 1404 SubclassOptionalData = GEPI.SubclassOptionalData; 1405 } 1406 1407 /// getIndexedType - Returns the type of the element that would be accessed with 1408 /// a gep instruction with the specified parameters. 1409 /// 1410 /// The Idxs pointer should point to a continuous piece of memory containing the 1411 /// indices, either as Value* or uint64_t. 1412 /// 1413 /// A null type is returned if the indices are invalid for the specified 1414 /// pointer type. 1415 /// 1416 template <typename IndexTy> 1417 static Type *getIndexedTypeInternal(Type *Agg, ArrayRef<IndexTy> IdxList) { 1418 // Handle the special case of the empty set index set, which is always valid. 1419 if (IdxList.empty()) 1420 return Agg; 1421 1422 // If there is at least one index, the top level type must be sized, otherwise 1423 // it cannot be 'stepped over'. 1424 if (!Agg->isSized()) 1425 return nullptr; 1426 1427 unsigned CurIdx = 1; 1428 for (; CurIdx != IdxList.size(); ++CurIdx) { 1429 CompositeType *CT = dyn_cast<CompositeType>(Agg); 1430 if (!CT || CT->isPointerTy()) return nullptr; 1431 IndexTy Index = IdxList[CurIdx]; 1432 if (!CT->indexValid(Index)) return nullptr; 1433 Agg = CT->getTypeAtIndex(Index); 1434 } 1435 return CurIdx == IdxList.size() ? Agg : nullptr; 1436 } 1437 1438 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) { 1439 return getIndexedTypeInternal(Ty, IdxList); 1440 } 1441 1442 Type *GetElementPtrInst::getIndexedType(Type *Ty, 1443 ArrayRef<Constant *> IdxList) { 1444 return getIndexedTypeInternal(Ty, IdxList); 1445 } 1446 1447 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) { 1448 return getIndexedTypeInternal(Ty, IdxList); 1449 } 1450 1451 /// hasAllZeroIndices - Return true if all of the indices of this GEP are 1452 /// zeros. If so, the result pointer and the first operand have the same 1453 /// value, just potentially different types. 1454 bool GetElementPtrInst::hasAllZeroIndices() const { 1455 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1456 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) { 1457 if (!CI->isZero()) return false; 1458 } else { 1459 return false; 1460 } 1461 } 1462 return true; 1463 } 1464 1465 /// hasAllConstantIndices - Return true if all of the indices of this GEP are 1466 /// constant integers. If so, the result pointer and the first operand have 1467 /// a constant offset between them. 1468 bool GetElementPtrInst::hasAllConstantIndices() const { 1469 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1470 if (!isa<ConstantInt>(getOperand(i))) 1471 return false; 1472 } 1473 return true; 1474 } 1475 1476 void GetElementPtrInst::setIsInBounds(bool B) { 1477 cast<GEPOperator>(this)->setIsInBounds(B); 1478 } 1479 1480 bool GetElementPtrInst::isInBounds() const { 1481 return cast<GEPOperator>(this)->isInBounds(); 1482 } 1483 1484 bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL, 1485 APInt &Offset) const { 1486 // Delegate to the generic GEPOperator implementation. 1487 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset); 1488 } 1489 1490 //===----------------------------------------------------------------------===// 1491 // ExtractElementInst Implementation 1492 //===----------------------------------------------------------------------===// 1493 1494 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index, 1495 const Twine &Name, 1496 Instruction *InsertBef) 1497 : Instruction(cast<VectorType>(Val->getType())->getElementType(), 1498 ExtractElement, 1499 OperandTraits<ExtractElementInst>::op_begin(this), 1500 2, InsertBef) { 1501 assert(isValidOperands(Val, Index) && 1502 "Invalid extractelement instruction operands!"); 1503 Op<0>() = Val; 1504 Op<1>() = Index; 1505 setName(Name); 1506 } 1507 1508 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index, 1509 const Twine &Name, 1510 BasicBlock *InsertAE) 1511 : Instruction(cast<VectorType>(Val->getType())->getElementType(), 1512 ExtractElement, 1513 OperandTraits<ExtractElementInst>::op_begin(this), 1514 2, InsertAE) { 1515 assert(isValidOperands(Val, Index) && 1516 "Invalid extractelement instruction operands!"); 1517 1518 Op<0>() = Val; 1519 Op<1>() = Index; 1520 setName(Name); 1521 } 1522 1523 bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) { 1524 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy()) 1525 return false; 1526 return true; 1527 } 1528 1529 //===----------------------------------------------------------------------===// 1530 // InsertElementInst Implementation 1531 //===----------------------------------------------------------------------===// 1532 1533 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index, 1534 const Twine &Name, 1535 Instruction *InsertBef) 1536 : Instruction(Vec->getType(), InsertElement, 1537 OperandTraits<InsertElementInst>::op_begin(this), 1538 3, InsertBef) { 1539 assert(isValidOperands(Vec, Elt, Index) && 1540 "Invalid insertelement instruction operands!"); 1541 Op<0>() = Vec; 1542 Op<1>() = Elt; 1543 Op<2>() = Index; 1544 setName(Name); 1545 } 1546 1547 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index, 1548 const Twine &Name, 1549 BasicBlock *InsertAE) 1550 : Instruction(Vec->getType(), InsertElement, 1551 OperandTraits<InsertElementInst>::op_begin(this), 1552 3, InsertAE) { 1553 assert(isValidOperands(Vec, Elt, Index) && 1554 "Invalid insertelement instruction operands!"); 1555 1556 Op<0>() = Vec; 1557 Op<1>() = Elt; 1558 Op<2>() = Index; 1559 setName(Name); 1560 } 1561 1562 bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt, 1563 const Value *Index) { 1564 if (!Vec->getType()->isVectorTy()) 1565 return false; // First operand of insertelement must be vector type. 1566 1567 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType()) 1568 return false;// Second operand of insertelement must be vector element type. 1569 1570 if (!Index->getType()->isIntegerTy()) 1571 return false; // Third operand of insertelement must be i32. 1572 return true; 1573 } 1574 1575 //===----------------------------------------------------------------------===// 1576 // ShuffleVectorInst Implementation 1577 //===----------------------------------------------------------------------===// 1578 1579 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 1580 const Twine &Name, 1581 Instruction *InsertBefore) 1582 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(), 1583 cast<VectorType>(Mask->getType())->getNumElements()), 1584 ShuffleVector, 1585 OperandTraits<ShuffleVectorInst>::op_begin(this), 1586 OperandTraits<ShuffleVectorInst>::operands(this), 1587 InsertBefore) { 1588 assert(isValidOperands(V1, V2, Mask) && 1589 "Invalid shuffle vector instruction operands!"); 1590 Op<0>() = V1; 1591 Op<1>() = V2; 1592 Op<2>() = Mask; 1593 setName(Name); 1594 } 1595 1596 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 1597 const Twine &Name, 1598 BasicBlock *InsertAtEnd) 1599 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(), 1600 cast<VectorType>(Mask->getType())->getNumElements()), 1601 ShuffleVector, 1602 OperandTraits<ShuffleVectorInst>::op_begin(this), 1603 OperandTraits<ShuffleVectorInst>::operands(this), 1604 InsertAtEnd) { 1605 assert(isValidOperands(V1, V2, Mask) && 1606 "Invalid shuffle vector instruction operands!"); 1607 1608 Op<0>() = V1; 1609 Op<1>() = V2; 1610 Op<2>() = Mask; 1611 setName(Name); 1612 } 1613 1614 bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2, 1615 const Value *Mask) { 1616 // V1 and V2 must be vectors of the same type. 1617 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType()) 1618 return false; 1619 1620 // Mask must be vector of i32. 1621 auto *MaskTy = dyn_cast<VectorType>(Mask->getType()); 1622 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32)) 1623 return false; 1624 1625 // Check to see if Mask is valid. 1626 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask)) 1627 return true; 1628 1629 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) { 1630 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); 1631 for (Value *Op : MV->operands()) { 1632 if (auto *CI = dyn_cast<ConstantInt>(Op)) { 1633 if (CI->uge(V1Size*2)) 1634 return false; 1635 } else if (!isa<UndefValue>(Op)) { 1636 return false; 1637 } 1638 } 1639 return true; 1640 } 1641 1642 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) { 1643 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); 1644 for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i) 1645 if (CDS->getElementAsInteger(i) >= V1Size*2) 1646 return false; 1647 return true; 1648 } 1649 1650 // The bitcode reader can create a place holder for a forward reference 1651 // used as the shuffle mask. When this occurs, the shuffle mask will 1652 // fall into this case and fail. To avoid this error, do this bit of 1653 // ugliness to allow such a mask pass. 1654 if (const auto *CE = dyn_cast<ConstantExpr>(Mask)) 1655 if (CE->getOpcode() == Instruction::UserOp1) 1656 return true; 1657 1658 return false; 1659 } 1660 1661 int ShuffleVectorInst::getMaskValue(Constant *Mask, unsigned i) { 1662 assert(i < Mask->getType()->getVectorNumElements() && "Index out of range"); 1663 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) 1664 return CDS->getElementAsInteger(i); 1665 Constant *C = Mask->getAggregateElement(i); 1666 if (isa<UndefValue>(C)) 1667 return -1; 1668 return cast<ConstantInt>(C)->getZExtValue(); 1669 } 1670 1671 void ShuffleVectorInst::getShuffleMask(Constant *Mask, 1672 SmallVectorImpl<int> &Result) { 1673 unsigned NumElts = Mask->getType()->getVectorNumElements(); 1674 1675 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) { 1676 for (unsigned i = 0; i != NumElts; ++i) 1677 Result.push_back(CDS->getElementAsInteger(i)); 1678 return; 1679 } 1680 for (unsigned i = 0; i != NumElts; ++i) { 1681 Constant *C = Mask->getAggregateElement(i); 1682 Result.push_back(isa<UndefValue>(C) ? -1 : 1683 cast<ConstantInt>(C)->getZExtValue()); 1684 } 1685 } 1686 1687 //===----------------------------------------------------------------------===// 1688 // InsertValueInst Class 1689 //===----------------------------------------------------------------------===// 1690 1691 void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, 1692 const Twine &Name) { 1693 assert(getNumOperands() == 2 && "NumOperands not initialized?"); 1694 1695 // There's no fundamental reason why we require at least one index 1696 // (other than weirdness with &*IdxBegin being invalid; see 1697 // getelementptr's init routine for example). But there's no 1698 // present need to support it. 1699 assert(!Idxs.empty() && "InsertValueInst must have at least one index"); 1700 1701 assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) == 1702 Val->getType() && "Inserted value must match indexed type!"); 1703 Op<0>() = Agg; 1704 Op<1>() = Val; 1705 1706 Indices.append(Idxs.begin(), Idxs.end()); 1707 setName(Name); 1708 } 1709 1710 InsertValueInst::InsertValueInst(const InsertValueInst &IVI) 1711 : Instruction(IVI.getType(), InsertValue, 1712 OperandTraits<InsertValueInst>::op_begin(this), 2), 1713 Indices(IVI.Indices) { 1714 Op<0>() = IVI.getOperand(0); 1715 Op<1>() = IVI.getOperand(1); 1716 SubclassOptionalData = IVI.SubclassOptionalData; 1717 } 1718 1719 //===----------------------------------------------------------------------===// 1720 // ExtractValueInst Class 1721 //===----------------------------------------------------------------------===// 1722 1723 void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) { 1724 assert(getNumOperands() == 1 && "NumOperands not initialized?"); 1725 1726 // There's no fundamental reason why we require at least one index. 1727 // But there's no present need to support it. 1728 assert(!Idxs.empty() && "ExtractValueInst must have at least one index"); 1729 1730 Indices.append(Idxs.begin(), Idxs.end()); 1731 setName(Name); 1732 } 1733 1734 ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI) 1735 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)), 1736 Indices(EVI.Indices) { 1737 SubclassOptionalData = EVI.SubclassOptionalData; 1738 } 1739 1740 // getIndexedType - Returns the type of the element that would be extracted 1741 // with an extractvalue instruction with the specified parameters. 1742 // 1743 // A null type is returned if the indices are invalid for the specified 1744 // pointer type. 1745 // 1746 Type *ExtractValueInst::getIndexedType(Type *Agg, 1747 ArrayRef<unsigned> Idxs) { 1748 for (unsigned Index : Idxs) { 1749 // We can't use CompositeType::indexValid(Index) here. 1750 // indexValid() always returns true for arrays because getelementptr allows 1751 // out-of-bounds indices. Since we don't allow those for extractvalue and 1752 // insertvalue we need to check array indexing manually. 1753 // Since the only other types we can index into are struct types it's just 1754 // as easy to check those manually as well. 1755 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) { 1756 if (Index >= AT->getNumElements()) 1757 return nullptr; 1758 } else if (StructType *ST = dyn_cast<StructType>(Agg)) { 1759 if (Index >= ST->getNumElements()) 1760 return nullptr; 1761 } else { 1762 // Not a valid type to index into. 1763 return nullptr; 1764 } 1765 1766 Agg = cast<CompositeType>(Agg)->getTypeAtIndex(Index); 1767 } 1768 return const_cast<Type*>(Agg); 1769 } 1770 1771 //===----------------------------------------------------------------------===// 1772 // BinaryOperator Class 1773 //===----------------------------------------------------------------------===// 1774 1775 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, 1776 Type *Ty, const Twine &Name, 1777 Instruction *InsertBefore) 1778 : Instruction(Ty, iType, 1779 OperandTraits<BinaryOperator>::op_begin(this), 1780 OperandTraits<BinaryOperator>::operands(this), 1781 InsertBefore) { 1782 Op<0>() = S1; 1783 Op<1>() = S2; 1784 setName(Name); 1785 AssertOK(); 1786 } 1787 1788 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, 1789 Type *Ty, const Twine &Name, 1790 BasicBlock *InsertAtEnd) 1791 : Instruction(Ty, iType, 1792 OperandTraits<BinaryOperator>::op_begin(this), 1793 OperandTraits<BinaryOperator>::operands(this), 1794 InsertAtEnd) { 1795 Op<0>() = S1; 1796 Op<1>() = S2; 1797 setName(Name); 1798 AssertOK(); 1799 } 1800 1801 void BinaryOperator::AssertOK() { 1802 Value *LHS = getOperand(0), *RHS = getOperand(1); 1803 (void)LHS; (void)RHS; // Silence warnings. 1804 assert(LHS->getType() == RHS->getType() && 1805 "Binary operator operand types must match!"); 1806 #ifndef NDEBUG 1807 switch (getOpcode()) { 1808 case Add: case Sub: 1809 case Mul: 1810 assert(getType() == LHS->getType() && 1811 "Arithmetic operation should return same type as operands!"); 1812 assert(getType()->isIntOrIntVectorTy() && 1813 "Tried to create an integer operation on a non-integer type!"); 1814 break; 1815 case FAdd: case FSub: 1816 case FMul: 1817 assert(getType() == LHS->getType() && 1818 "Arithmetic operation should return same type as operands!"); 1819 assert(getType()->isFPOrFPVectorTy() && 1820 "Tried to create a floating-point operation on a " 1821 "non-floating-point type!"); 1822 break; 1823 case UDiv: 1824 case SDiv: 1825 assert(getType() == LHS->getType() && 1826 "Arithmetic operation should return same type as operands!"); 1827 assert(getType()->isIntOrIntVectorTy() && 1828 "Incorrect operand type (not integer) for S/UDIV"); 1829 break; 1830 case FDiv: 1831 assert(getType() == LHS->getType() && 1832 "Arithmetic operation should return same type as operands!"); 1833 assert(getType()->isFPOrFPVectorTy() && 1834 "Incorrect operand type (not floating point) for FDIV"); 1835 break; 1836 case URem: 1837 case SRem: 1838 assert(getType() == LHS->getType() && 1839 "Arithmetic operation should return same type as operands!"); 1840 assert(getType()->isIntOrIntVectorTy() && 1841 "Incorrect operand type (not integer) for S/UREM"); 1842 break; 1843 case FRem: 1844 assert(getType() == LHS->getType() && 1845 "Arithmetic operation should return same type as operands!"); 1846 assert(getType()->isFPOrFPVectorTy() && 1847 "Incorrect operand type (not floating point) for FREM"); 1848 break; 1849 case Shl: 1850 case LShr: 1851 case AShr: 1852 assert(getType() == LHS->getType() && 1853 "Shift operation should return same type as operands!"); 1854 assert(getType()->isIntOrIntVectorTy() && 1855 "Tried to create a shift operation on a non-integral type!"); 1856 break; 1857 case And: case Or: 1858 case Xor: 1859 assert(getType() == LHS->getType() && 1860 "Logical operation should return same type as operands!"); 1861 assert(getType()->isIntOrIntVectorTy() && 1862 "Tried to create a logical operation on a non-integral type!"); 1863 break; 1864 default: llvm_unreachable("Invalid opcode provided"); 1865 } 1866 #endif 1867 } 1868 1869 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2, 1870 const Twine &Name, 1871 Instruction *InsertBefore) { 1872 assert(S1->getType() == S2->getType() && 1873 "Cannot create binary operator with two operands of differing type!"); 1874 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore); 1875 } 1876 1877 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2, 1878 const Twine &Name, 1879 BasicBlock *InsertAtEnd) { 1880 BinaryOperator *Res = Create(Op, S1, S2, Name); 1881 InsertAtEnd->getInstList().push_back(Res); 1882 return Res; 1883 } 1884 1885 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name, 1886 Instruction *InsertBefore) { 1887 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 1888 return new BinaryOperator(Instruction::Sub, 1889 zero, Op, 1890 Op->getType(), Name, InsertBefore); 1891 } 1892 1893 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name, 1894 BasicBlock *InsertAtEnd) { 1895 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 1896 return new BinaryOperator(Instruction::Sub, 1897 zero, Op, 1898 Op->getType(), Name, InsertAtEnd); 1899 } 1900 1901 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, 1902 Instruction *InsertBefore) { 1903 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 1904 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertBefore); 1905 } 1906 1907 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, 1908 BasicBlock *InsertAtEnd) { 1909 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 1910 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertAtEnd); 1911 } 1912 1913 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name, 1914 Instruction *InsertBefore) { 1915 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 1916 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertBefore); 1917 } 1918 1919 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name, 1920 BasicBlock *InsertAtEnd) { 1921 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 1922 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertAtEnd); 1923 } 1924 1925 BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name, 1926 Instruction *InsertBefore) { 1927 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 1928 return new BinaryOperator(Instruction::FSub, zero, Op, 1929 Op->getType(), Name, InsertBefore); 1930 } 1931 1932 BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name, 1933 BasicBlock *InsertAtEnd) { 1934 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 1935 return new BinaryOperator(Instruction::FSub, zero, Op, 1936 Op->getType(), Name, InsertAtEnd); 1937 } 1938 1939 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, 1940 Instruction *InsertBefore) { 1941 Constant *C = Constant::getAllOnesValue(Op->getType()); 1942 return new BinaryOperator(Instruction::Xor, Op, C, 1943 Op->getType(), Name, InsertBefore); 1944 } 1945 1946 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, 1947 BasicBlock *InsertAtEnd) { 1948 Constant *AllOnes = Constant::getAllOnesValue(Op->getType()); 1949 return new BinaryOperator(Instruction::Xor, Op, AllOnes, 1950 Op->getType(), Name, InsertAtEnd); 1951 } 1952 1953 // isConstantAllOnes - Helper function for several functions below 1954 static inline bool isConstantAllOnes(const Value *V) { 1955 if (const Constant *C = dyn_cast<Constant>(V)) 1956 return C->isAllOnesValue(); 1957 return false; 1958 } 1959 1960 bool BinaryOperator::isNeg(const Value *V) { 1961 if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(V)) 1962 if (Bop->getOpcode() == Instruction::Sub) 1963 if (Constant *C = dyn_cast<Constant>(Bop->getOperand(0))) 1964 return C->isNegativeZeroValue(); 1965 return false; 1966 } 1967 1968 bool BinaryOperator::isFNeg(const Value *V, bool IgnoreZeroSign) { 1969 if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(V)) 1970 if (Bop->getOpcode() == Instruction::FSub) 1971 if (Constant *C = dyn_cast<Constant>(Bop->getOperand(0))) { 1972 if (!IgnoreZeroSign) 1973 IgnoreZeroSign = cast<Instruction>(V)->hasNoSignedZeros(); 1974 return !IgnoreZeroSign ? C->isNegativeZeroValue() : C->isZeroValue(); 1975 } 1976 return false; 1977 } 1978 1979 bool BinaryOperator::isNot(const Value *V) { 1980 if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(V)) 1981 return (Bop->getOpcode() == Instruction::Xor && 1982 (isConstantAllOnes(Bop->getOperand(1)) || 1983 isConstantAllOnes(Bop->getOperand(0)))); 1984 return false; 1985 } 1986 1987 Value *BinaryOperator::getNegArgument(Value *BinOp) { 1988 return cast<BinaryOperator>(BinOp)->getOperand(1); 1989 } 1990 1991 const Value *BinaryOperator::getNegArgument(const Value *BinOp) { 1992 return getNegArgument(const_cast<Value*>(BinOp)); 1993 } 1994 1995 Value *BinaryOperator::getFNegArgument(Value *BinOp) { 1996 return cast<BinaryOperator>(BinOp)->getOperand(1); 1997 } 1998 1999 const Value *BinaryOperator::getFNegArgument(const Value *BinOp) { 2000 return getFNegArgument(const_cast<Value*>(BinOp)); 2001 } 2002 2003 Value *BinaryOperator::getNotArgument(Value *BinOp) { 2004 assert(isNot(BinOp) && "getNotArgument on non-'not' instruction!"); 2005 BinaryOperator *BO = cast<BinaryOperator>(BinOp); 2006 Value *Op0 = BO->getOperand(0); 2007 Value *Op1 = BO->getOperand(1); 2008 if (isConstantAllOnes(Op0)) return Op1; 2009 2010 assert(isConstantAllOnes(Op1)); 2011 return Op0; 2012 } 2013 2014 const Value *BinaryOperator::getNotArgument(const Value *BinOp) { 2015 return getNotArgument(const_cast<Value*>(BinOp)); 2016 } 2017 2018 // Exchange the two operands to this instruction. This instruction is safe to 2019 // use on any binary instruction and does not modify the semantics of the 2020 // instruction. If the instruction is order-dependent (SetLT f.e.), the opcode 2021 // is changed. 2022 bool BinaryOperator::swapOperands() { 2023 if (!isCommutative()) 2024 return true; // Can't commute operands 2025 Op<0>().swap(Op<1>()); 2026 return false; 2027 } 2028 2029 //===----------------------------------------------------------------------===// 2030 // FPMathOperator Class 2031 //===----------------------------------------------------------------------===// 2032 2033 float FPMathOperator::getFPAccuracy() const { 2034 const MDNode *MD = 2035 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath); 2036 if (!MD) 2037 return 0.0; 2038 ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0)); 2039 return Accuracy->getValueAPF().convertToFloat(); 2040 } 2041 2042 //===----------------------------------------------------------------------===// 2043 // CastInst Class 2044 //===----------------------------------------------------------------------===// 2045 2046 // Just determine if this cast only deals with integral->integral conversion. 2047 bool CastInst::isIntegerCast() const { 2048 switch (getOpcode()) { 2049 default: return false; 2050 case Instruction::ZExt: 2051 case Instruction::SExt: 2052 case Instruction::Trunc: 2053 return true; 2054 case Instruction::BitCast: 2055 return getOperand(0)->getType()->isIntegerTy() && 2056 getType()->isIntegerTy(); 2057 } 2058 } 2059 2060 bool CastInst::isLosslessCast() const { 2061 // Only BitCast can be lossless, exit fast if we're not BitCast 2062 if (getOpcode() != Instruction::BitCast) 2063 return false; 2064 2065 // Identity cast is always lossless 2066 Type *SrcTy = getOperand(0)->getType(); 2067 Type *DstTy = getType(); 2068 if (SrcTy == DstTy) 2069 return true; 2070 2071 // Pointer to pointer is always lossless. 2072 if (SrcTy->isPointerTy()) 2073 return DstTy->isPointerTy(); 2074 return false; // Other types have no identity values 2075 } 2076 2077 /// This function determines if the CastInst does not require any bits to be 2078 /// changed in order to effect the cast. Essentially, it identifies cases where 2079 /// no code gen is necessary for the cast, hence the name no-op cast. For 2080 /// example, the following are all no-op casts: 2081 /// # bitcast i32* %x to i8* 2082 /// # bitcast <2 x i32> %x to <4 x i16> 2083 /// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only 2084 /// Determine if the described cast is a no-op. 2085 bool CastInst::isNoopCast(Instruction::CastOps Opcode, 2086 Type *SrcTy, 2087 Type *DestTy, 2088 const DataLayout &DL) { 2089 switch (Opcode) { 2090 default: llvm_unreachable("Invalid CastOp"); 2091 case Instruction::Trunc: 2092 case Instruction::ZExt: 2093 case Instruction::SExt: 2094 case Instruction::FPTrunc: 2095 case Instruction::FPExt: 2096 case Instruction::UIToFP: 2097 case Instruction::SIToFP: 2098 case Instruction::FPToUI: 2099 case Instruction::FPToSI: 2100 case Instruction::AddrSpaceCast: 2101 // TODO: Target informations may give a more accurate answer here. 2102 return false; 2103 case Instruction::BitCast: 2104 return true; // BitCast never modifies bits. 2105 case Instruction::PtrToInt: 2106 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() == 2107 DestTy->getScalarSizeInBits(); 2108 case Instruction::IntToPtr: 2109 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() == 2110 SrcTy->getScalarSizeInBits(); 2111 } 2112 } 2113 2114 bool CastInst::isNoopCast(const DataLayout &DL) const { 2115 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL); 2116 } 2117 2118 /// This function determines if a pair of casts can be eliminated and what 2119 /// opcode should be used in the elimination. This assumes that there are two 2120 /// instructions like this: 2121 /// * %F = firstOpcode SrcTy %x to MidTy 2122 /// * %S = secondOpcode MidTy %F to DstTy 2123 /// The function returns a resultOpcode so these two casts can be replaced with: 2124 /// * %Replacement = resultOpcode %SrcTy %x to DstTy 2125 /// If no such cast is permitted, the function returns 0. 2126 unsigned CastInst::isEliminableCastPair( 2127 Instruction::CastOps firstOp, Instruction::CastOps secondOp, 2128 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, 2129 Type *DstIntPtrTy) { 2130 // Define the 144 possibilities for these two cast instructions. The values 2131 // in this matrix determine what to do in a given situation and select the 2132 // case in the switch below. The rows correspond to firstOp, the columns 2133 // correspond to secondOp. In looking at the table below, keep in mind 2134 // the following cast properties: 2135 // 2136 // Size Compare Source Destination 2137 // Operator Src ? Size Type Sign Type Sign 2138 // -------- ------------ ------------------- --------------------- 2139 // TRUNC > Integer Any Integral Any 2140 // ZEXT < Integral Unsigned Integer Any 2141 // SEXT < Integral Signed Integer Any 2142 // FPTOUI n/a FloatPt n/a Integral Unsigned 2143 // FPTOSI n/a FloatPt n/a Integral Signed 2144 // UITOFP n/a Integral Unsigned FloatPt n/a 2145 // SITOFP n/a Integral Signed FloatPt n/a 2146 // FPTRUNC > FloatPt n/a FloatPt n/a 2147 // FPEXT < FloatPt n/a FloatPt n/a 2148 // PTRTOINT n/a Pointer n/a Integral Unsigned 2149 // INTTOPTR n/a Integral Unsigned Pointer n/a 2150 // BITCAST = FirstClass n/a FirstClass n/a 2151 // ADDRSPCST n/a Pointer n/a Pointer n/a 2152 // 2153 // NOTE: some transforms are safe, but we consider them to be non-profitable. 2154 // For example, we could merge "fptoui double to i32" + "zext i32 to i64", 2155 // into "fptoui double to i64", but this loses information about the range 2156 // of the produced value (we no longer know the top-part is all zeros). 2157 // Further this conversion is often much more expensive for typical hardware, 2158 // and causes issues when building libgcc. We disallow fptosi+sext for the 2159 // same reason. 2160 const unsigned numCastOps = 2161 Instruction::CastOpsEnd - Instruction::CastOpsBegin; 2162 static const uint8_t CastResults[numCastOps][numCastOps] = { 2163 // T F F U S F F P I B A -+ 2164 // R Z S P P I I T P 2 N T S | 2165 // U E E 2 2 2 2 R E I T C C +- secondOp 2166 // N X X U S F F N X N 2 V V | 2167 // C T T I I P P C T T P T T -+ 2168 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+ 2169 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt | 2170 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt | 2171 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI | 2172 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI | 2173 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp 2174 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP | 2175 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc | 2176 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt | 2177 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt | 2178 { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr | 2179 { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast | 2180 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+ 2181 }; 2182 2183 // TODO: This logic could be encoded into the table above and handled in the 2184 // switch below. 2185 // If either of the casts are a bitcast from scalar to vector, disallow the 2186 // merging. However, any pair of bitcasts are allowed. 2187 bool IsFirstBitcast = (firstOp == Instruction::BitCast); 2188 bool IsSecondBitcast = (secondOp == Instruction::BitCast); 2189 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast; 2190 2191 // Check if any of the casts convert scalars <-> vectors. 2192 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) || 2193 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy))) 2194 if (!AreBothBitcasts) 2195 return 0; 2196 2197 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin] 2198 [secondOp-Instruction::CastOpsBegin]; 2199 switch (ElimCase) { 2200 case 0: 2201 // Categorically disallowed. 2202 return 0; 2203 case 1: 2204 // Allowed, use first cast's opcode. 2205 return firstOp; 2206 case 2: 2207 // Allowed, use second cast's opcode. 2208 return secondOp; 2209 case 3: 2210 // No-op cast in second op implies firstOp as long as the DestTy 2211 // is integer and we are not converting between a vector and a 2212 // non-vector type. 2213 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy()) 2214 return firstOp; 2215 return 0; 2216 case 4: 2217 // No-op cast in second op implies firstOp as long as the DestTy 2218 // is floating point. 2219 if (DstTy->isFloatingPointTy()) 2220 return firstOp; 2221 return 0; 2222 case 5: 2223 // No-op cast in first op implies secondOp as long as the SrcTy 2224 // is an integer. 2225 if (SrcTy->isIntegerTy()) 2226 return secondOp; 2227 return 0; 2228 case 6: 2229 // No-op cast in first op implies secondOp as long as the SrcTy 2230 // is a floating point. 2231 if (SrcTy->isFloatingPointTy()) 2232 return secondOp; 2233 return 0; 2234 case 7: { 2235 // Cannot simplify if address spaces are different! 2236 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) 2237 return 0; 2238 2239 unsigned MidSize = MidTy->getScalarSizeInBits(); 2240 // We can still fold this without knowing the actual sizes as long we 2241 // know that the intermediate pointer is the largest possible 2242 // pointer size. 2243 // FIXME: Is this always true? 2244 if (MidSize == 64) 2245 return Instruction::BitCast; 2246 2247 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size. 2248 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy) 2249 return 0; 2250 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits(); 2251 if (MidSize >= PtrSize) 2252 return Instruction::BitCast; 2253 return 0; 2254 } 2255 case 8: { 2256 // ext, trunc -> bitcast, if the SrcTy and DstTy are same size 2257 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy) 2258 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy) 2259 unsigned SrcSize = SrcTy->getScalarSizeInBits(); 2260 unsigned DstSize = DstTy->getScalarSizeInBits(); 2261 if (SrcSize == DstSize) 2262 return Instruction::BitCast; 2263 else if (SrcSize < DstSize) 2264 return firstOp; 2265 return secondOp; 2266 } 2267 case 9: 2268 // zext, sext -> zext, because sext can't sign extend after zext 2269 return Instruction::ZExt; 2270 case 11: { 2271 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize 2272 if (!MidIntPtrTy) 2273 return 0; 2274 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits(); 2275 unsigned SrcSize = SrcTy->getScalarSizeInBits(); 2276 unsigned DstSize = DstTy->getScalarSizeInBits(); 2277 if (SrcSize <= PtrSize && SrcSize == DstSize) 2278 return Instruction::BitCast; 2279 return 0; 2280 } 2281 case 12: 2282 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS 2283 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS 2284 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) 2285 return Instruction::AddrSpaceCast; 2286 return Instruction::BitCast; 2287 case 13: 2288 // FIXME: this state can be merged with (1), but the following assert 2289 // is useful to check the correcteness of the sequence due to semantic 2290 // change of bitcast. 2291 assert( 2292 SrcTy->isPtrOrPtrVectorTy() && 2293 MidTy->isPtrOrPtrVectorTy() && 2294 DstTy->isPtrOrPtrVectorTy() && 2295 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() && 2296 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() && 2297 "Illegal addrspacecast, bitcast sequence!"); 2298 // Allowed, use first cast's opcode 2299 return firstOp; 2300 case 14: 2301 // bitcast, addrspacecast -> addrspacecast if the element type of 2302 // bitcast's source is the same as that of addrspacecast's destination. 2303 if (SrcTy->getScalarType()->getPointerElementType() == 2304 DstTy->getScalarType()->getPointerElementType()) 2305 return Instruction::AddrSpaceCast; 2306 return 0; 2307 case 15: 2308 // FIXME: this state can be merged with (1), but the following assert 2309 // is useful to check the correcteness of the sequence due to semantic 2310 // change of bitcast. 2311 assert( 2312 SrcTy->isIntOrIntVectorTy() && 2313 MidTy->isPtrOrPtrVectorTy() && 2314 DstTy->isPtrOrPtrVectorTy() && 2315 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() && 2316 "Illegal inttoptr, bitcast sequence!"); 2317 // Allowed, use first cast's opcode 2318 return firstOp; 2319 case 16: 2320 // FIXME: this state can be merged with (2), but the following assert 2321 // is useful to check the correcteness of the sequence due to semantic 2322 // change of bitcast. 2323 assert( 2324 SrcTy->isPtrOrPtrVectorTy() && 2325 MidTy->isPtrOrPtrVectorTy() && 2326 DstTy->isIntOrIntVectorTy() && 2327 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() && 2328 "Illegal bitcast, ptrtoint sequence!"); 2329 // Allowed, use second cast's opcode 2330 return secondOp; 2331 case 17: 2332 // (sitofp (zext x)) -> (uitofp x) 2333 return Instruction::UIToFP; 2334 case 99: 2335 // Cast combination can't happen (error in input). This is for all cases 2336 // where the MidTy is not the same for the two cast instructions. 2337 llvm_unreachable("Invalid Cast Combination"); 2338 default: 2339 llvm_unreachable("Error in CastResults table!!!"); 2340 } 2341 } 2342 2343 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty, 2344 const Twine &Name, Instruction *InsertBefore) { 2345 assert(castIsValid(op, S, Ty) && "Invalid cast!"); 2346 // Construct and return the appropriate CastInst subclass 2347 switch (op) { 2348 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore); 2349 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore); 2350 case SExt: return new SExtInst (S, Ty, Name, InsertBefore); 2351 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore); 2352 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore); 2353 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore); 2354 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore); 2355 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore); 2356 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore); 2357 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore); 2358 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore); 2359 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore); 2360 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore); 2361 default: llvm_unreachable("Invalid opcode provided"); 2362 } 2363 } 2364 2365 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty, 2366 const Twine &Name, BasicBlock *InsertAtEnd) { 2367 assert(castIsValid(op, S, Ty) && "Invalid cast!"); 2368 // Construct and return the appropriate CastInst subclass 2369 switch (op) { 2370 case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd); 2371 case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd); 2372 case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd); 2373 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd); 2374 case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd); 2375 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd); 2376 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd); 2377 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd); 2378 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd); 2379 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd); 2380 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd); 2381 case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd); 2382 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd); 2383 default: llvm_unreachable("Invalid opcode provided"); 2384 } 2385 } 2386 2387 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, 2388 const Twine &Name, 2389 Instruction *InsertBefore) { 2390 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2391 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2392 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore); 2393 } 2394 2395 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, 2396 const Twine &Name, 2397 BasicBlock *InsertAtEnd) { 2398 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2399 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2400 return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd); 2401 } 2402 2403 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, 2404 const Twine &Name, 2405 Instruction *InsertBefore) { 2406 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2407 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2408 return Create(Instruction::SExt, S, Ty, Name, InsertBefore); 2409 } 2410 2411 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, 2412 const Twine &Name, 2413 BasicBlock *InsertAtEnd) { 2414 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2415 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2416 return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd); 2417 } 2418 2419 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, 2420 const Twine &Name, 2421 Instruction *InsertBefore) { 2422 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2423 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2424 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore); 2425 } 2426 2427 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, 2428 const Twine &Name, 2429 BasicBlock *InsertAtEnd) { 2430 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2431 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2432 return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd); 2433 } 2434 2435 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, 2436 const Twine &Name, 2437 BasicBlock *InsertAtEnd) { 2438 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2439 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) && 2440 "Invalid cast"); 2441 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); 2442 assert((!Ty->isVectorTy() || 2443 Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) && 2444 "Invalid cast"); 2445 2446 if (Ty->isIntOrIntVectorTy()) 2447 return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd); 2448 2449 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd); 2450 } 2451 2452 /// Create a BitCast or a PtrToInt cast instruction 2453 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, 2454 const Twine &Name, 2455 Instruction *InsertBefore) { 2456 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2457 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) && 2458 "Invalid cast"); 2459 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); 2460 assert((!Ty->isVectorTy() || 2461 Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) && 2462 "Invalid cast"); 2463 2464 if (Ty->isIntOrIntVectorTy()) 2465 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore); 2466 2467 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore); 2468 } 2469 2470 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast( 2471 Value *S, Type *Ty, 2472 const Twine &Name, 2473 BasicBlock *InsertAtEnd) { 2474 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2475 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast"); 2476 2477 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace()) 2478 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd); 2479 2480 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2481 } 2482 2483 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast( 2484 Value *S, Type *Ty, 2485 const Twine &Name, 2486 Instruction *InsertBefore) { 2487 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2488 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast"); 2489 2490 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace()) 2491 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore); 2492 2493 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2494 } 2495 2496 CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty, 2497 const Twine &Name, 2498 Instruction *InsertBefore) { 2499 if (S->getType()->isPointerTy() && Ty->isIntegerTy()) 2500 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore); 2501 if (S->getType()->isIntegerTy() && Ty->isPointerTy()) 2502 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore); 2503 2504 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2505 } 2506 2507 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, 2508 bool isSigned, const Twine &Name, 2509 Instruction *InsertBefore) { 2510 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() && 2511 "Invalid integer cast"); 2512 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2513 unsigned DstBits = Ty->getScalarSizeInBits(); 2514 Instruction::CastOps opcode = 2515 (SrcBits == DstBits ? Instruction::BitCast : 2516 (SrcBits > DstBits ? Instruction::Trunc : 2517 (isSigned ? Instruction::SExt : Instruction::ZExt))); 2518 return Create(opcode, C, Ty, Name, InsertBefore); 2519 } 2520 2521 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, 2522 bool isSigned, const Twine &Name, 2523 BasicBlock *InsertAtEnd) { 2524 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() && 2525 "Invalid cast"); 2526 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2527 unsigned DstBits = Ty->getScalarSizeInBits(); 2528 Instruction::CastOps opcode = 2529 (SrcBits == DstBits ? Instruction::BitCast : 2530 (SrcBits > DstBits ? Instruction::Trunc : 2531 (isSigned ? Instruction::SExt : Instruction::ZExt))); 2532 return Create(opcode, C, Ty, Name, InsertAtEnd); 2533 } 2534 2535 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, 2536 const Twine &Name, 2537 Instruction *InsertBefore) { 2538 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && 2539 "Invalid cast"); 2540 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2541 unsigned DstBits = Ty->getScalarSizeInBits(); 2542 Instruction::CastOps opcode = 2543 (SrcBits == DstBits ? Instruction::BitCast : 2544 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt)); 2545 return Create(opcode, C, Ty, Name, InsertBefore); 2546 } 2547 2548 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, 2549 const Twine &Name, 2550 BasicBlock *InsertAtEnd) { 2551 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && 2552 "Invalid cast"); 2553 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2554 unsigned DstBits = Ty->getScalarSizeInBits(); 2555 Instruction::CastOps opcode = 2556 (SrcBits == DstBits ? Instruction::BitCast : 2557 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt)); 2558 return Create(opcode, C, Ty, Name, InsertAtEnd); 2559 } 2560 2561 // Check whether it is valid to call getCastOpcode for these types. 2562 // This routine must be kept in sync with getCastOpcode. 2563 bool CastInst::isCastable(Type *SrcTy, Type *DestTy) { 2564 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType()) 2565 return false; 2566 2567 if (SrcTy == DestTy) 2568 return true; 2569 2570 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) 2571 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) 2572 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 2573 // An element by element cast. Valid if casting the elements is valid. 2574 SrcTy = SrcVecTy->getElementType(); 2575 DestTy = DestVecTy->getElementType(); 2576 } 2577 2578 // Get the bit sizes, we'll need these 2579 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 2580 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 2581 2582 // Run through the possibilities ... 2583 if (DestTy->isIntegerTy()) { // Casting to integral 2584 if (SrcTy->isIntegerTy()) // Casting from integral 2585 return true; 2586 if (SrcTy->isFloatingPointTy()) // Casting from floating pt 2587 return true; 2588 if (SrcTy->isVectorTy()) // Casting from vector 2589 return DestBits == SrcBits; 2590 // Casting from something else 2591 return SrcTy->isPointerTy(); 2592 } 2593 if (DestTy->isFloatingPointTy()) { // Casting to floating pt 2594 if (SrcTy->isIntegerTy()) // Casting from integral 2595 return true; 2596 if (SrcTy->isFloatingPointTy()) // Casting from floating pt 2597 return true; 2598 if (SrcTy->isVectorTy()) // Casting from vector 2599 return DestBits == SrcBits; 2600 // Casting from something else 2601 return false; 2602 } 2603 if (DestTy->isVectorTy()) // Casting to vector 2604 return DestBits == SrcBits; 2605 if (DestTy->isPointerTy()) { // Casting to pointer 2606 if (SrcTy->isPointerTy()) // Casting from pointer 2607 return true; 2608 return SrcTy->isIntegerTy(); // Casting from integral 2609 } 2610 if (DestTy->isX86_MMXTy()) { 2611 if (SrcTy->isVectorTy()) 2612 return DestBits == SrcBits; // 64-bit vector to MMX 2613 return false; 2614 } // Casting to something else 2615 return false; 2616 } 2617 2618 bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) { 2619 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType()) 2620 return false; 2621 2622 if (SrcTy == DestTy) 2623 return true; 2624 2625 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) { 2626 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) { 2627 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 2628 // An element by element cast. Valid if casting the elements is valid. 2629 SrcTy = SrcVecTy->getElementType(); 2630 DestTy = DestVecTy->getElementType(); 2631 } 2632 } 2633 } 2634 2635 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) { 2636 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) { 2637 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace(); 2638 } 2639 } 2640 2641 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 2642 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 2643 2644 // Could still have vectors of pointers if the number of elements doesn't 2645 // match 2646 if (SrcBits == 0 || DestBits == 0) 2647 return false; 2648 2649 if (SrcBits != DestBits) 2650 return false; 2651 2652 if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy()) 2653 return false; 2654 2655 return true; 2656 } 2657 2658 bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, 2659 const DataLayout &DL) { 2660 // ptrtoint and inttoptr are not allowed on non-integral pointers 2661 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy)) 2662 if (auto *IntTy = dyn_cast<IntegerType>(DestTy)) 2663 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) && 2664 !DL.isNonIntegralPointerType(PtrTy)); 2665 if (auto *PtrTy = dyn_cast<PointerType>(DestTy)) 2666 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy)) 2667 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) && 2668 !DL.isNonIntegralPointerType(PtrTy)); 2669 2670 return isBitCastable(SrcTy, DestTy); 2671 } 2672 2673 // Provide a way to get a "cast" where the cast opcode is inferred from the 2674 // types and size of the operand. This, basically, is a parallel of the 2675 // logic in the castIsValid function below. This axiom should hold: 2676 // castIsValid( getCastOpcode(Val, Ty), Val, Ty) 2677 // should not assert in castIsValid. In other words, this produces a "correct" 2678 // casting opcode for the arguments passed to it. 2679 // This routine must be kept in sync with isCastable. 2680 Instruction::CastOps 2681 CastInst::getCastOpcode( 2682 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) { 2683 Type *SrcTy = Src->getType(); 2684 2685 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() && 2686 "Only first class types are castable!"); 2687 2688 if (SrcTy == DestTy) 2689 return BitCast; 2690 2691 // FIXME: Check address space sizes here 2692 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) 2693 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) 2694 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 2695 // An element by element cast. Find the appropriate opcode based on the 2696 // element types. 2697 SrcTy = SrcVecTy->getElementType(); 2698 DestTy = DestVecTy->getElementType(); 2699 } 2700 2701 // Get the bit sizes, we'll need these 2702 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 2703 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 2704 2705 // Run through the possibilities ... 2706 if (DestTy->isIntegerTy()) { // Casting to integral 2707 if (SrcTy->isIntegerTy()) { // Casting from integral 2708 if (DestBits < SrcBits) 2709 return Trunc; // int -> smaller int 2710 else if (DestBits > SrcBits) { // its an extension 2711 if (SrcIsSigned) 2712 return SExt; // signed -> SEXT 2713 else 2714 return ZExt; // unsigned -> ZEXT 2715 } else { 2716 return BitCast; // Same size, No-op cast 2717 } 2718 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt 2719 if (DestIsSigned) 2720 return FPToSI; // FP -> sint 2721 else 2722 return FPToUI; // FP -> uint 2723 } else if (SrcTy->isVectorTy()) { 2724 assert(DestBits == SrcBits && 2725 "Casting vector to integer of different width"); 2726 return BitCast; // Same size, no-op cast 2727 } else { 2728 assert(SrcTy->isPointerTy() && 2729 "Casting from a value that is not first-class type"); 2730 return PtrToInt; // ptr -> int 2731 } 2732 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt 2733 if (SrcTy->isIntegerTy()) { // Casting from integral 2734 if (SrcIsSigned) 2735 return SIToFP; // sint -> FP 2736 else 2737 return UIToFP; // uint -> FP 2738 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt 2739 if (DestBits < SrcBits) { 2740 return FPTrunc; // FP -> smaller FP 2741 } else if (DestBits > SrcBits) { 2742 return FPExt; // FP -> larger FP 2743 } else { 2744 return BitCast; // same size, no-op cast 2745 } 2746 } else if (SrcTy->isVectorTy()) { 2747 assert(DestBits == SrcBits && 2748 "Casting vector to floating point of different width"); 2749 return BitCast; // same size, no-op cast 2750 } 2751 llvm_unreachable("Casting pointer or non-first class to float"); 2752 } else if (DestTy->isVectorTy()) { 2753 assert(DestBits == SrcBits && 2754 "Illegal cast to vector (wrong type or size)"); 2755 return BitCast; 2756 } else if (DestTy->isPointerTy()) { 2757 if (SrcTy->isPointerTy()) { 2758 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace()) 2759 return AddrSpaceCast; 2760 return BitCast; // ptr -> ptr 2761 } else if (SrcTy->isIntegerTy()) { 2762 return IntToPtr; // int -> ptr 2763 } 2764 llvm_unreachable("Casting pointer to other than pointer or int"); 2765 } else if (DestTy->isX86_MMXTy()) { 2766 if (SrcTy->isVectorTy()) { 2767 assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX"); 2768 return BitCast; // 64-bit vector to MMX 2769 } 2770 llvm_unreachable("Illegal cast to X86_MMX"); 2771 } 2772 llvm_unreachable("Casting to type that is not first-class"); 2773 } 2774 2775 //===----------------------------------------------------------------------===// 2776 // CastInst SubClass Constructors 2777 //===----------------------------------------------------------------------===// 2778 2779 /// Check that the construction parameters for a CastInst are correct. This 2780 /// could be broken out into the separate constructors but it is useful to have 2781 /// it in one place and to eliminate the redundant code for getting the sizes 2782 /// of the types involved. 2783 bool 2784 CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) { 2785 // Check for type sanity on the arguments 2786 Type *SrcTy = S->getType(); 2787 2788 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() || 2789 SrcTy->isAggregateType() || DstTy->isAggregateType()) 2790 return false; 2791 2792 // Get the size of the types in bits, we'll need this later 2793 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 2794 unsigned DstBitSize = DstTy->getScalarSizeInBits(); 2795 2796 // If these are vector types, get the lengths of the vectors (using zero for 2797 // scalar types means that checking that vector lengths match also checks that 2798 // scalars are not being converted to vectors or vectors to scalars). 2799 unsigned SrcLength = SrcTy->isVectorTy() ? 2800 cast<VectorType>(SrcTy)->getNumElements() : 0; 2801 unsigned DstLength = DstTy->isVectorTy() ? 2802 cast<VectorType>(DstTy)->getNumElements() : 0; 2803 2804 // Switch on the opcode provided 2805 switch (op) { 2806 default: return false; // This is an input error 2807 case Instruction::Trunc: 2808 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 2809 SrcLength == DstLength && SrcBitSize > DstBitSize; 2810 case Instruction::ZExt: 2811 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 2812 SrcLength == DstLength && SrcBitSize < DstBitSize; 2813 case Instruction::SExt: 2814 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 2815 SrcLength == DstLength && SrcBitSize < DstBitSize; 2816 case Instruction::FPTrunc: 2817 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() && 2818 SrcLength == DstLength && SrcBitSize > DstBitSize; 2819 case Instruction::FPExt: 2820 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() && 2821 SrcLength == DstLength && SrcBitSize < DstBitSize; 2822 case Instruction::UIToFP: 2823 case Instruction::SIToFP: 2824 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() && 2825 SrcLength == DstLength; 2826 case Instruction::FPToUI: 2827 case Instruction::FPToSI: 2828 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() && 2829 SrcLength == DstLength; 2830 case Instruction::PtrToInt: 2831 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy)) 2832 return false; 2833 if (VectorType *VT = dyn_cast<VectorType>(SrcTy)) 2834 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements()) 2835 return false; 2836 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy(); 2837 case Instruction::IntToPtr: 2838 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy)) 2839 return false; 2840 if (VectorType *VT = dyn_cast<VectorType>(SrcTy)) 2841 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements()) 2842 return false; 2843 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy(); 2844 case Instruction::BitCast: { 2845 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType()); 2846 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType()); 2847 2848 // BitCast implies a no-op cast of type only. No bits change. 2849 // However, you can't cast pointers to anything but pointers. 2850 if (!SrcPtrTy != !DstPtrTy) 2851 return false; 2852 2853 // For non-pointer cases, the cast is okay if the source and destination bit 2854 // widths are identical. 2855 if (!SrcPtrTy) 2856 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits(); 2857 2858 // If both are pointers then the address spaces must match. 2859 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) 2860 return false; 2861 2862 // A vector of pointers must have the same number of elements. 2863 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) { 2864 if (VectorType *DstVecTy = dyn_cast<VectorType>(DstTy)) 2865 return (SrcVecTy->getNumElements() == DstVecTy->getNumElements()); 2866 2867 return false; 2868 } 2869 2870 return true; 2871 } 2872 case Instruction::AddrSpaceCast: { 2873 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType()); 2874 if (!SrcPtrTy) 2875 return false; 2876 2877 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType()); 2878 if (!DstPtrTy) 2879 return false; 2880 2881 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace()) 2882 return false; 2883 2884 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) { 2885 if (VectorType *DstVecTy = dyn_cast<VectorType>(DstTy)) 2886 return (SrcVecTy->getNumElements() == DstVecTy->getNumElements()); 2887 2888 return false; 2889 } 2890 2891 return true; 2892 } 2893 } 2894 } 2895 2896 TruncInst::TruncInst( 2897 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 2898 ) : CastInst(Ty, Trunc, S, Name, InsertBefore) { 2899 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc"); 2900 } 2901 2902 TruncInst::TruncInst( 2903 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 2904 ) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) { 2905 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc"); 2906 } 2907 2908 ZExtInst::ZExtInst( 2909 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 2910 ) : CastInst(Ty, ZExt, S, Name, InsertBefore) { 2911 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); 2912 } 2913 2914 ZExtInst::ZExtInst( 2915 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 2916 ) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) { 2917 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); 2918 } 2919 SExtInst::SExtInst( 2920 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 2921 ) : CastInst(Ty, SExt, S, Name, InsertBefore) { 2922 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); 2923 } 2924 2925 SExtInst::SExtInst( 2926 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 2927 ) : CastInst(Ty, SExt, S, Name, InsertAtEnd) { 2928 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); 2929 } 2930 2931 FPTruncInst::FPTruncInst( 2932 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 2933 ) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) { 2934 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); 2935 } 2936 2937 FPTruncInst::FPTruncInst( 2938 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 2939 ) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) { 2940 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); 2941 } 2942 2943 FPExtInst::FPExtInst( 2944 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 2945 ) : CastInst(Ty, FPExt, S, Name, InsertBefore) { 2946 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); 2947 } 2948 2949 FPExtInst::FPExtInst( 2950 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 2951 ) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) { 2952 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); 2953 } 2954 2955 UIToFPInst::UIToFPInst( 2956 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 2957 ) : CastInst(Ty, UIToFP, S, Name, InsertBefore) { 2958 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); 2959 } 2960 2961 UIToFPInst::UIToFPInst( 2962 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 2963 ) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) { 2964 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); 2965 } 2966 2967 SIToFPInst::SIToFPInst( 2968 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 2969 ) : CastInst(Ty, SIToFP, S, Name, InsertBefore) { 2970 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); 2971 } 2972 2973 SIToFPInst::SIToFPInst( 2974 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 2975 ) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) { 2976 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); 2977 } 2978 2979 FPToUIInst::FPToUIInst( 2980 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 2981 ) : CastInst(Ty, FPToUI, S, Name, InsertBefore) { 2982 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); 2983 } 2984 2985 FPToUIInst::FPToUIInst( 2986 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 2987 ) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) { 2988 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); 2989 } 2990 2991 FPToSIInst::FPToSIInst( 2992 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 2993 ) : CastInst(Ty, FPToSI, S, Name, InsertBefore) { 2994 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); 2995 } 2996 2997 FPToSIInst::FPToSIInst( 2998 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 2999 ) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) { 3000 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); 3001 } 3002 3003 PtrToIntInst::PtrToIntInst( 3004 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3005 ) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) { 3006 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); 3007 } 3008 3009 PtrToIntInst::PtrToIntInst( 3010 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3011 ) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) { 3012 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); 3013 } 3014 3015 IntToPtrInst::IntToPtrInst( 3016 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3017 ) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) { 3018 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); 3019 } 3020 3021 IntToPtrInst::IntToPtrInst( 3022 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3023 ) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) { 3024 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); 3025 } 3026 3027 BitCastInst::BitCastInst( 3028 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3029 ) : CastInst(Ty, BitCast, S, Name, InsertBefore) { 3030 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); 3031 } 3032 3033 BitCastInst::BitCastInst( 3034 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3035 ) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) { 3036 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); 3037 } 3038 3039 AddrSpaceCastInst::AddrSpaceCastInst( 3040 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3041 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) { 3042 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast"); 3043 } 3044 3045 AddrSpaceCastInst::AddrSpaceCastInst( 3046 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3047 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) { 3048 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast"); 3049 } 3050 3051 //===----------------------------------------------------------------------===// 3052 // CmpInst Classes 3053 //===----------------------------------------------------------------------===// 3054 3055 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS, 3056 Value *RHS, const Twine &Name, Instruction *InsertBefore) 3057 : Instruction(ty, op, 3058 OperandTraits<CmpInst>::op_begin(this), 3059 OperandTraits<CmpInst>::operands(this), 3060 InsertBefore) { 3061 Op<0>() = LHS; 3062 Op<1>() = RHS; 3063 setPredicate((Predicate)predicate); 3064 setName(Name); 3065 } 3066 3067 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS, 3068 Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd) 3069 : Instruction(ty, op, 3070 OperandTraits<CmpInst>::op_begin(this), 3071 OperandTraits<CmpInst>::operands(this), 3072 InsertAtEnd) { 3073 Op<0>() = LHS; 3074 Op<1>() = RHS; 3075 setPredicate((Predicate)predicate); 3076 setName(Name); 3077 } 3078 3079 CmpInst * 3080 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, 3081 const Twine &Name, Instruction *InsertBefore) { 3082 if (Op == Instruction::ICmp) { 3083 if (InsertBefore) 3084 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate), 3085 S1, S2, Name); 3086 else 3087 return new ICmpInst(CmpInst::Predicate(predicate), 3088 S1, S2, Name); 3089 } 3090 3091 if (InsertBefore) 3092 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate), 3093 S1, S2, Name); 3094 else 3095 return new FCmpInst(CmpInst::Predicate(predicate), 3096 S1, S2, Name); 3097 } 3098 3099 CmpInst * 3100 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, 3101 const Twine &Name, BasicBlock *InsertAtEnd) { 3102 if (Op == Instruction::ICmp) { 3103 return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate), 3104 S1, S2, Name); 3105 } 3106 return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate), 3107 S1, S2, Name); 3108 } 3109 3110 void CmpInst::swapOperands() { 3111 if (ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3112 IC->swapOperands(); 3113 else 3114 cast<FCmpInst>(this)->swapOperands(); 3115 } 3116 3117 bool CmpInst::isCommutative() const { 3118 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3119 return IC->isCommutative(); 3120 return cast<FCmpInst>(this)->isCommutative(); 3121 } 3122 3123 bool CmpInst::isEquality() const { 3124 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3125 return IC->isEquality(); 3126 return cast<FCmpInst>(this)->isEquality(); 3127 } 3128 3129 CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) { 3130 switch (pred) { 3131 default: llvm_unreachable("Unknown cmp predicate!"); 3132 case ICMP_EQ: return ICMP_NE; 3133 case ICMP_NE: return ICMP_EQ; 3134 case ICMP_UGT: return ICMP_ULE; 3135 case ICMP_ULT: return ICMP_UGE; 3136 case ICMP_UGE: return ICMP_ULT; 3137 case ICMP_ULE: return ICMP_UGT; 3138 case ICMP_SGT: return ICMP_SLE; 3139 case ICMP_SLT: return ICMP_SGE; 3140 case ICMP_SGE: return ICMP_SLT; 3141 case ICMP_SLE: return ICMP_SGT; 3142 3143 case FCMP_OEQ: return FCMP_UNE; 3144 case FCMP_ONE: return FCMP_UEQ; 3145 case FCMP_OGT: return FCMP_ULE; 3146 case FCMP_OLT: return FCMP_UGE; 3147 case FCMP_OGE: return FCMP_ULT; 3148 case FCMP_OLE: return FCMP_UGT; 3149 case FCMP_UEQ: return FCMP_ONE; 3150 case FCMP_UNE: return FCMP_OEQ; 3151 case FCMP_UGT: return FCMP_OLE; 3152 case FCMP_ULT: return FCMP_OGE; 3153 case FCMP_UGE: return FCMP_OLT; 3154 case FCMP_ULE: return FCMP_OGT; 3155 case FCMP_ORD: return FCMP_UNO; 3156 case FCMP_UNO: return FCMP_ORD; 3157 case FCMP_TRUE: return FCMP_FALSE; 3158 case FCMP_FALSE: return FCMP_TRUE; 3159 } 3160 } 3161 3162 StringRef CmpInst::getPredicateName(Predicate Pred) { 3163 switch (Pred) { 3164 default: return "unknown"; 3165 case FCmpInst::FCMP_FALSE: return "false"; 3166 case FCmpInst::FCMP_OEQ: return "oeq"; 3167 case FCmpInst::FCMP_OGT: return "ogt"; 3168 case FCmpInst::FCMP_OGE: return "oge"; 3169 case FCmpInst::FCMP_OLT: return "olt"; 3170 case FCmpInst::FCMP_OLE: return "ole"; 3171 case FCmpInst::FCMP_ONE: return "one"; 3172 case FCmpInst::FCMP_ORD: return "ord"; 3173 case FCmpInst::FCMP_UNO: return "uno"; 3174 case FCmpInst::FCMP_UEQ: return "ueq"; 3175 case FCmpInst::FCMP_UGT: return "ugt"; 3176 case FCmpInst::FCMP_UGE: return "uge"; 3177 case FCmpInst::FCMP_ULT: return "ult"; 3178 case FCmpInst::FCMP_ULE: return "ule"; 3179 case FCmpInst::FCMP_UNE: return "une"; 3180 case FCmpInst::FCMP_TRUE: return "true"; 3181 case ICmpInst::ICMP_EQ: return "eq"; 3182 case ICmpInst::ICMP_NE: return "ne"; 3183 case ICmpInst::ICMP_SGT: return "sgt"; 3184 case ICmpInst::ICMP_SGE: return "sge"; 3185 case ICmpInst::ICMP_SLT: return "slt"; 3186 case ICmpInst::ICMP_SLE: return "sle"; 3187 case ICmpInst::ICMP_UGT: return "ugt"; 3188 case ICmpInst::ICMP_UGE: return "uge"; 3189 case ICmpInst::ICMP_ULT: return "ult"; 3190 case ICmpInst::ICMP_ULE: return "ule"; 3191 } 3192 } 3193 3194 ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) { 3195 switch (pred) { 3196 default: llvm_unreachable("Unknown icmp predicate!"); 3197 case ICMP_EQ: case ICMP_NE: 3198 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE: 3199 return pred; 3200 case ICMP_UGT: return ICMP_SGT; 3201 case ICMP_ULT: return ICMP_SLT; 3202 case ICMP_UGE: return ICMP_SGE; 3203 case ICMP_ULE: return ICMP_SLE; 3204 } 3205 } 3206 3207 ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) { 3208 switch (pred) { 3209 default: llvm_unreachable("Unknown icmp predicate!"); 3210 case ICMP_EQ: case ICMP_NE: 3211 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE: 3212 return pred; 3213 case ICMP_SGT: return ICMP_UGT; 3214 case ICMP_SLT: return ICMP_ULT; 3215 case ICMP_SGE: return ICMP_UGE; 3216 case ICMP_SLE: return ICMP_ULE; 3217 } 3218 } 3219 3220 CmpInst::Predicate CmpInst::getFlippedStrictnessPredicate(Predicate pred) { 3221 switch (pred) { 3222 default: llvm_unreachable("Unknown or unsupported cmp predicate!"); 3223 case ICMP_SGT: return ICMP_SGE; 3224 case ICMP_SLT: return ICMP_SLE; 3225 case ICMP_SGE: return ICMP_SGT; 3226 case ICMP_SLE: return ICMP_SLT; 3227 case ICMP_UGT: return ICMP_UGE; 3228 case ICMP_ULT: return ICMP_ULE; 3229 case ICMP_UGE: return ICMP_UGT; 3230 case ICMP_ULE: return ICMP_ULT; 3231 3232 case FCMP_OGT: return FCMP_OGE; 3233 case FCMP_OLT: return FCMP_OLE; 3234 case FCMP_OGE: return FCMP_OGT; 3235 case FCMP_OLE: return FCMP_OLT; 3236 case FCMP_UGT: return FCMP_UGE; 3237 case FCMP_ULT: return FCMP_ULE; 3238 case FCMP_UGE: return FCMP_UGT; 3239 case FCMP_ULE: return FCMP_ULT; 3240 } 3241 } 3242 3243 CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) { 3244 switch (pred) { 3245 default: llvm_unreachable("Unknown cmp predicate!"); 3246 case ICMP_EQ: case ICMP_NE: 3247 return pred; 3248 case ICMP_SGT: return ICMP_SLT; 3249 case ICMP_SLT: return ICMP_SGT; 3250 case ICMP_SGE: return ICMP_SLE; 3251 case ICMP_SLE: return ICMP_SGE; 3252 case ICMP_UGT: return ICMP_ULT; 3253 case ICMP_ULT: return ICMP_UGT; 3254 case ICMP_UGE: return ICMP_ULE; 3255 case ICMP_ULE: return ICMP_UGE; 3256 3257 case FCMP_FALSE: case FCMP_TRUE: 3258 case FCMP_OEQ: case FCMP_ONE: 3259 case FCMP_UEQ: case FCMP_UNE: 3260 case FCMP_ORD: case FCMP_UNO: 3261 return pred; 3262 case FCMP_OGT: return FCMP_OLT; 3263 case FCMP_OLT: return FCMP_OGT; 3264 case FCMP_OGE: return FCMP_OLE; 3265 case FCMP_OLE: return FCMP_OGE; 3266 case FCMP_UGT: return FCMP_ULT; 3267 case FCMP_ULT: return FCMP_UGT; 3268 case FCMP_UGE: return FCMP_ULE; 3269 case FCMP_ULE: return FCMP_UGE; 3270 } 3271 } 3272 3273 CmpInst::Predicate CmpInst::getNonStrictPredicate(Predicate pred) { 3274 switch (pred) { 3275 case ICMP_SGT: return ICMP_SGE; 3276 case ICMP_SLT: return ICMP_SLE; 3277 case ICMP_UGT: return ICMP_UGE; 3278 case ICMP_ULT: return ICMP_ULE; 3279 case FCMP_OGT: return FCMP_OGE; 3280 case FCMP_OLT: return FCMP_OLE; 3281 case FCMP_UGT: return FCMP_UGE; 3282 case FCMP_ULT: return FCMP_ULE; 3283 default: return pred; 3284 } 3285 } 3286 3287 CmpInst::Predicate CmpInst::getSignedPredicate(Predicate pred) { 3288 assert(CmpInst::isUnsigned(pred) && "Call only with signed predicates!"); 3289 3290 switch (pred) { 3291 default: 3292 llvm_unreachable("Unknown predicate!"); 3293 case CmpInst::ICMP_ULT: 3294 return CmpInst::ICMP_SLT; 3295 case CmpInst::ICMP_ULE: 3296 return CmpInst::ICMP_SLE; 3297 case CmpInst::ICMP_UGT: 3298 return CmpInst::ICMP_SGT; 3299 case CmpInst::ICMP_UGE: 3300 return CmpInst::ICMP_SGE; 3301 } 3302 } 3303 3304 bool CmpInst::isUnsigned(Predicate predicate) { 3305 switch (predicate) { 3306 default: return false; 3307 case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT: 3308 case ICmpInst::ICMP_UGE: return true; 3309 } 3310 } 3311 3312 bool CmpInst::isSigned(Predicate predicate) { 3313 switch (predicate) { 3314 default: return false; 3315 case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT: 3316 case ICmpInst::ICMP_SGE: return true; 3317 } 3318 } 3319 3320 bool CmpInst::isOrdered(Predicate predicate) { 3321 switch (predicate) { 3322 default: return false; 3323 case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT: 3324 case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE: 3325 case FCmpInst::FCMP_ORD: return true; 3326 } 3327 } 3328 3329 bool CmpInst::isUnordered(Predicate predicate) { 3330 switch (predicate) { 3331 default: return false; 3332 case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT: 3333 case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE: 3334 case FCmpInst::FCMP_UNO: return true; 3335 } 3336 } 3337 3338 bool CmpInst::isTrueWhenEqual(Predicate predicate) { 3339 switch(predicate) { 3340 default: return false; 3341 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE: 3342 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true; 3343 } 3344 } 3345 3346 bool CmpInst::isFalseWhenEqual(Predicate predicate) { 3347 switch(predicate) { 3348 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT: 3349 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true; 3350 default: return false; 3351 } 3352 } 3353 3354 bool CmpInst::isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2) { 3355 // If the predicates match, then we know the first condition implies the 3356 // second is true. 3357 if (Pred1 == Pred2) 3358 return true; 3359 3360 switch (Pred1) { 3361 default: 3362 break; 3363 case ICMP_EQ: 3364 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true. 3365 return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE || 3366 Pred2 == ICMP_SLE; 3367 case ICMP_UGT: // A >u B implies A != B and A >=u B are true. 3368 return Pred2 == ICMP_NE || Pred2 == ICMP_UGE; 3369 case ICMP_ULT: // A <u B implies A != B and A <=u B are true. 3370 return Pred2 == ICMP_NE || Pred2 == ICMP_ULE; 3371 case ICMP_SGT: // A >s B implies A != B and A >=s B are true. 3372 return Pred2 == ICMP_NE || Pred2 == ICMP_SGE; 3373 case ICMP_SLT: // A <s B implies A != B and A <=s B are true. 3374 return Pred2 == ICMP_NE || Pred2 == ICMP_SLE; 3375 } 3376 return false; 3377 } 3378 3379 bool CmpInst::isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2) { 3380 return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2)); 3381 } 3382 3383 //===----------------------------------------------------------------------===// 3384 // SwitchInst Implementation 3385 //===----------------------------------------------------------------------===// 3386 3387 void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) { 3388 assert(Value && Default && NumReserved); 3389 ReservedSpace = NumReserved; 3390 setNumHungOffUseOperands(2); 3391 allocHungoffUses(ReservedSpace); 3392 3393 Op<0>() = Value; 3394 Op<1>() = Default; 3395 } 3396 3397 /// SwitchInst ctor - Create a new switch instruction, specifying a value to 3398 /// switch on and a default destination. The number of additional cases can 3399 /// be specified here to make memory allocation more efficient. This 3400 /// constructor can also autoinsert before another instruction. 3401 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3402 Instruction *InsertBefore) 3403 : TerminatorInst(Type::getVoidTy(Value->getContext()), Instruction::Switch, 3404 nullptr, 0, InsertBefore) { 3405 init(Value, Default, 2+NumCases*2); 3406 } 3407 3408 /// SwitchInst ctor - Create a new switch instruction, specifying a value to 3409 /// switch on and a default destination. The number of additional cases can 3410 /// be specified here to make memory allocation more efficient. This 3411 /// constructor also autoinserts at the end of the specified BasicBlock. 3412 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3413 BasicBlock *InsertAtEnd) 3414 : TerminatorInst(Type::getVoidTy(Value->getContext()), Instruction::Switch, 3415 nullptr, 0, InsertAtEnd) { 3416 init(Value, Default, 2+NumCases*2); 3417 } 3418 3419 SwitchInst::SwitchInst(const SwitchInst &SI) 3420 : TerminatorInst(SI.getType(), Instruction::Switch, nullptr, 0) { 3421 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands()); 3422 setNumHungOffUseOperands(SI.getNumOperands()); 3423 Use *OL = getOperandList(); 3424 const Use *InOL = SI.getOperandList(); 3425 for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) { 3426 OL[i] = InOL[i]; 3427 OL[i+1] = InOL[i+1]; 3428 } 3429 SubclassOptionalData = SI.SubclassOptionalData; 3430 } 3431 3432 3433 /// addCase - Add an entry to the switch instruction... 3434 /// 3435 void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) { 3436 unsigned NewCaseIdx = getNumCases(); 3437 unsigned OpNo = getNumOperands(); 3438 if (OpNo+2 > ReservedSpace) 3439 growOperands(); // Get more space! 3440 // Initialize some new operands. 3441 assert(OpNo+1 < ReservedSpace && "Growing didn't work!"); 3442 setNumHungOffUseOperands(OpNo+2); 3443 CaseHandle Case(this, NewCaseIdx); 3444 Case.setValue(OnVal); 3445 Case.setSuccessor(Dest); 3446 } 3447 3448 /// removeCase - This method removes the specified case and its successor 3449 /// from the switch instruction. 3450 SwitchInst::CaseIt SwitchInst::removeCase(CaseIt I) { 3451 unsigned idx = I->getCaseIndex(); 3452 3453 assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!"); 3454 3455 unsigned NumOps = getNumOperands(); 3456 Use *OL = getOperandList(); 3457 3458 // Overwrite this case with the end of the list. 3459 if (2 + (idx + 1) * 2 != NumOps) { 3460 OL[2 + idx * 2] = OL[NumOps - 2]; 3461 OL[2 + idx * 2 + 1] = OL[NumOps - 1]; 3462 } 3463 3464 // Nuke the last value. 3465 OL[NumOps-2].set(nullptr); 3466 OL[NumOps-2+1].set(nullptr); 3467 setNumHungOffUseOperands(NumOps-2); 3468 3469 return CaseIt(this, idx); 3470 } 3471 3472 /// growOperands - grow operands - This grows the operand list in response 3473 /// to a push_back style of operation. This grows the number of ops by 3 times. 3474 /// 3475 void SwitchInst::growOperands() { 3476 unsigned e = getNumOperands(); 3477 unsigned NumOps = e*3; 3478 3479 ReservedSpace = NumOps; 3480 growHungoffUses(ReservedSpace); 3481 } 3482 3483 //===----------------------------------------------------------------------===// 3484 // IndirectBrInst Implementation 3485 //===----------------------------------------------------------------------===// 3486 3487 void IndirectBrInst::init(Value *Address, unsigned NumDests) { 3488 assert(Address && Address->getType()->isPointerTy() && 3489 "Address of indirectbr must be a pointer"); 3490 ReservedSpace = 1+NumDests; 3491 setNumHungOffUseOperands(1); 3492 allocHungoffUses(ReservedSpace); 3493 3494 Op<0>() = Address; 3495 } 3496 3497 3498 /// growOperands - grow operands - This grows the operand list in response 3499 /// to a push_back style of operation. This grows the number of ops by 2 times. 3500 /// 3501 void IndirectBrInst::growOperands() { 3502 unsigned e = getNumOperands(); 3503 unsigned NumOps = e*2; 3504 3505 ReservedSpace = NumOps; 3506 growHungoffUses(ReservedSpace); 3507 } 3508 3509 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases, 3510 Instruction *InsertBefore) 3511 : TerminatorInst(Type::getVoidTy(Address->getContext()),Instruction::IndirectBr, 3512 nullptr, 0, InsertBefore) { 3513 init(Address, NumCases); 3514 } 3515 3516 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases, 3517 BasicBlock *InsertAtEnd) 3518 : TerminatorInst(Type::getVoidTy(Address->getContext()),Instruction::IndirectBr, 3519 nullptr, 0, InsertAtEnd) { 3520 init(Address, NumCases); 3521 } 3522 3523 IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI) 3524 : TerminatorInst(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr, 3525 nullptr, IBI.getNumOperands()) { 3526 allocHungoffUses(IBI.getNumOperands()); 3527 Use *OL = getOperandList(); 3528 const Use *InOL = IBI.getOperandList(); 3529 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i) 3530 OL[i] = InOL[i]; 3531 SubclassOptionalData = IBI.SubclassOptionalData; 3532 } 3533 3534 /// addDestination - Add a destination. 3535 /// 3536 void IndirectBrInst::addDestination(BasicBlock *DestBB) { 3537 unsigned OpNo = getNumOperands(); 3538 if (OpNo+1 > ReservedSpace) 3539 growOperands(); // Get more space! 3540 // Initialize some new operands. 3541 assert(OpNo < ReservedSpace && "Growing didn't work!"); 3542 setNumHungOffUseOperands(OpNo+1); 3543 getOperandList()[OpNo] = DestBB; 3544 } 3545 3546 /// removeDestination - This method removes the specified successor from the 3547 /// indirectbr instruction. 3548 void IndirectBrInst::removeDestination(unsigned idx) { 3549 assert(idx < getNumOperands()-1 && "Successor index out of range!"); 3550 3551 unsigned NumOps = getNumOperands(); 3552 Use *OL = getOperandList(); 3553 3554 // Replace this value with the last one. 3555 OL[idx+1] = OL[NumOps-1]; 3556 3557 // Nuke the last value. 3558 OL[NumOps-1].set(nullptr); 3559 setNumHungOffUseOperands(NumOps-1); 3560 } 3561 3562 //===----------------------------------------------------------------------===// 3563 // cloneImpl() implementations 3564 //===----------------------------------------------------------------------===// 3565 3566 // Define these methods here so vtables don't get emitted into every translation 3567 // unit that uses these classes. 3568 3569 GetElementPtrInst *GetElementPtrInst::cloneImpl() const { 3570 return new (getNumOperands()) GetElementPtrInst(*this); 3571 } 3572 3573 BinaryOperator *BinaryOperator::cloneImpl() const { 3574 return Create(getOpcode(), Op<0>(), Op<1>()); 3575 } 3576 3577 FCmpInst *FCmpInst::cloneImpl() const { 3578 return new FCmpInst(getPredicate(), Op<0>(), Op<1>()); 3579 } 3580 3581 ICmpInst *ICmpInst::cloneImpl() const { 3582 return new ICmpInst(getPredicate(), Op<0>(), Op<1>()); 3583 } 3584 3585 ExtractValueInst *ExtractValueInst::cloneImpl() const { 3586 return new ExtractValueInst(*this); 3587 } 3588 3589 InsertValueInst *InsertValueInst::cloneImpl() const { 3590 return new InsertValueInst(*this); 3591 } 3592 3593 AllocaInst *AllocaInst::cloneImpl() const { 3594 AllocaInst *Result = new AllocaInst(getAllocatedType(), 3595 getType()->getAddressSpace(), 3596 (Value *)getOperand(0), getAlignment()); 3597 Result->setUsedWithInAlloca(isUsedWithInAlloca()); 3598 Result->setSwiftError(isSwiftError()); 3599 return Result; 3600 } 3601 3602 LoadInst *LoadInst::cloneImpl() const { 3603 return new LoadInst(getOperand(0), Twine(), isVolatile(), 3604 getAlignment(), getOrdering(), getSyncScopeID()); 3605 } 3606 3607 StoreInst *StoreInst::cloneImpl() const { 3608 return new StoreInst(getOperand(0), getOperand(1), isVolatile(), 3609 getAlignment(), getOrdering(), getSyncScopeID()); 3610 3611 } 3612 3613 AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const { 3614 AtomicCmpXchgInst *Result = 3615 new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2), 3616 getSuccessOrdering(), getFailureOrdering(), 3617 getSyncScopeID()); 3618 Result->setVolatile(isVolatile()); 3619 Result->setWeak(isWeak()); 3620 return Result; 3621 } 3622 3623 AtomicRMWInst *AtomicRMWInst::cloneImpl() const { 3624 AtomicRMWInst *Result = 3625 new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1), 3626 getOrdering(), getSyncScopeID()); 3627 Result->setVolatile(isVolatile()); 3628 return Result; 3629 } 3630 3631 FenceInst *FenceInst::cloneImpl() const { 3632 return new FenceInst(getContext(), getOrdering(), getSyncScopeID()); 3633 } 3634 3635 TruncInst *TruncInst::cloneImpl() const { 3636 return new TruncInst(getOperand(0), getType()); 3637 } 3638 3639 ZExtInst *ZExtInst::cloneImpl() const { 3640 return new ZExtInst(getOperand(0), getType()); 3641 } 3642 3643 SExtInst *SExtInst::cloneImpl() const { 3644 return new SExtInst(getOperand(0), getType()); 3645 } 3646 3647 FPTruncInst *FPTruncInst::cloneImpl() const { 3648 return new FPTruncInst(getOperand(0), getType()); 3649 } 3650 3651 FPExtInst *FPExtInst::cloneImpl() const { 3652 return new FPExtInst(getOperand(0), getType()); 3653 } 3654 3655 UIToFPInst *UIToFPInst::cloneImpl() const { 3656 return new UIToFPInst(getOperand(0), getType()); 3657 } 3658 3659 SIToFPInst *SIToFPInst::cloneImpl() const { 3660 return new SIToFPInst(getOperand(0), getType()); 3661 } 3662 3663 FPToUIInst *FPToUIInst::cloneImpl() const { 3664 return new FPToUIInst(getOperand(0), getType()); 3665 } 3666 3667 FPToSIInst *FPToSIInst::cloneImpl() const { 3668 return new FPToSIInst(getOperand(0), getType()); 3669 } 3670 3671 PtrToIntInst *PtrToIntInst::cloneImpl() const { 3672 return new PtrToIntInst(getOperand(0), getType()); 3673 } 3674 3675 IntToPtrInst *IntToPtrInst::cloneImpl() const { 3676 return new IntToPtrInst(getOperand(0), getType()); 3677 } 3678 3679 BitCastInst *BitCastInst::cloneImpl() const { 3680 return new BitCastInst(getOperand(0), getType()); 3681 } 3682 3683 AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const { 3684 return new AddrSpaceCastInst(getOperand(0), getType()); 3685 } 3686 3687 CallInst *CallInst::cloneImpl() const { 3688 if (hasOperandBundles()) { 3689 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 3690 return new(getNumOperands(), DescriptorBytes) CallInst(*this); 3691 } 3692 return new(getNumOperands()) CallInst(*this); 3693 } 3694 3695 SelectInst *SelectInst::cloneImpl() const { 3696 return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2)); 3697 } 3698 3699 VAArgInst *VAArgInst::cloneImpl() const { 3700 return new VAArgInst(getOperand(0), getType()); 3701 } 3702 3703 ExtractElementInst *ExtractElementInst::cloneImpl() const { 3704 return ExtractElementInst::Create(getOperand(0), getOperand(1)); 3705 } 3706 3707 InsertElementInst *InsertElementInst::cloneImpl() const { 3708 return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2)); 3709 } 3710 3711 ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const { 3712 return new ShuffleVectorInst(getOperand(0), getOperand(1), getOperand(2)); 3713 } 3714 3715 PHINode *PHINode::cloneImpl() const { return new PHINode(*this); } 3716 3717 LandingPadInst *LandingPadInst::cloneImpl() const { 3718 return new LandingPadInst(*this); 3719 } 3720 3721 ReturnInst *ReturnInst::cloneImpl() const { 3722 return new(getNumOperands()) ReturnInst(*this); 3723 } 3724 3725 BranchInst *BranchInst::cloneImpl() const { 3726 return new(getNumOperands()) BranchInst(*this); 3727 } 3728 3729 SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); } 3730 3731 IndirectBrInst *IndirectBrInst::cloneImpl() const { 3732 return new IndirectBrInst(*this); 3733 } 3734 3735 InvokeInst *InvokeInst::cloneImpl() const { 3736 if (hasOperandBundles()) { 3737 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 3738 return new(getNumOperands(), DescriptorBytes) InvokeInst(*this); 3739 } 3740 return new(getNumOperands()) InvokeInst(*this); 3741 } 3742 3743 ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); } 3744 3745 CleanupReturnInst *CleanupReturnInst::cloneImpl() const { 3746 return new (getNumOperands()) CleanupReturnInst(*this); 3747 } 3748 3749 CatchReturnInst *CatchReturnInst::cloneImpl() const { 3750 return new (getNumOperands()) CatchReturnInst(*this); 3751 } 3752 3753 CatchSwitchInst *CatchSwitchInst::cloneImpl() const { 3754 return new CatchSwitchInst(*this); 3755 } 3756 3757 FuncletPadInst *FuncletPadInst::cloneImpl() const { 3758 return new (getNumOperands()) FuncletPadInst(*this); 3759 } 3760 3761 UnreachableInst *UnreachableInst::cloneImpl() const { 3762 LLVMContext &Context = getContext(); 3763 return new UnreachableInst(Context); 3764 } 3765