1 //===- Instructions.cpp - Implement the LLVM instructions -----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements all of the non-inline methods for the LLVM instruction 11 // classes. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/IR/Instructions.h" 16 #include "LLVMContextImpl.h" 17 #include "llvm/ADT/None.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/Twine.h" 20 #include "llvm/IR/Attributes.h" 21 #include "llvm/IR/BasicBlock.h" 22 #include "llvm/IR/CallSite.h" 23 #include "llvm/IR/Constant.h" 24 #include "llvm/IR/Constants.h" 25 #include "llvm/IR/DataLayout.h" 26 #include "llvm/IR/DerivedTypes.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/IR/InstrTypes.h" 29 #include "llvm/IR/Instruction.h" 30 #include "llvm/IR/LLVMContext.h" 31 #include "llvm/IR/Metadata.h" 32 #include "llvm/IR/Module.h" 33 #include "llvm/IR/Operator.h" 34 #include "llvm/IR/Type.h" 35 #include "llvm/IR/Value.h" 36 #include "llvm/Support/AtomicOrdering.h" 37 #include "llvm/Support/Casting.h" 38 #include "llvm/Support/ErrorHandling.h" 39 #include "llvm/Support/MathExtras.h" 40 #include <algorithm> 41 #include <cassert> 42 #include <cstdint> 43 #include <vector> 44 45 using namespace llvm; 46 47 //===----------------------------------------------------------------------===// 48 // AllocaInst Class 49 //===----------------------------------------------------------------------===// 50 51 Optional<uint64_t> 52 AllocaInst::getAllocationSizeInBits(const DataLayout &DL) const { 53 uint64_t Size = DL.getTypeAllocSizeInBits(getAllocatedType()); 54 if (isArrayAllocation()) { 55 auto C = dyn_cast<ConstantInt>(getArraySize()); 56 if (!C) 57 return None; 58 Size *= C->getZExtValue(); 59 } 60 return Size; 61 } 62 63 //===----------------------------------------------------------------------===// 64 // CallSite Class 65 //===----------------------------------------------------------------------===// 66 67 User::op_iterator CallSite::getCallee() const { 68 return cast<CallBase>(getInstruction())->op_end() - 1; 69 } 70 71 //===----------------------------------------------------------------------===// 72 // SelectInst Class 73 //===----------------------------------------------------------------------===// 74 75 /// areInvalidOperands - Return a string if the specified operands are invalid 76 /// for a select operation, otherwise return null. 77 const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) { 78 if (Op1->getType() != Op2->getType()) 79 return "both values to select must have same type"; 80 81 if (Op1->getType()->isTokenTy()) 82 return "select values cannot have token type"; 83 84 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) { 85 // Vector select. 86 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext())) 87 return "vector select condition element type must be i1"; 88 VectorType *ET = dyn_cast<VectorType>(Op1->getType()); 89 if (!ET) 90 return "selected values for vector select must be vectors"; 91 if (ET->getNumElements() != VT->getNumElements()) 92 return "vector select requires selected vectors to have " 93 "the same vector length as select condition"; 94 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) { 95 return "select condition must be i1 or <n x i1>"; 96 } 97 return nullptr; 98 } 99 100 //===----------------------------------------------------------------------===// 101 // PHINode Class 102 //===----------------------------------------------------------------------===// 103 104 PHINode::PHINode(const PHINode &PN) 105 : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()), 106 ReservedSpace(PN.getNumOperands()) { 107 allocHungoffUses(PN.getNumOperands()); 108 std::copy(PN.op_begin(), PN.op_end(), op_begin()); 109 std::copy(PN.block_begin(), PN.block_end(), block_begin()); 110 SubclassOptionalData = PN.SubclassOptionalData; 111 } 112 113 // removeIncomingValue - Remove an incoming value. This is useful if a 114 // predecessor basic block is deleted. 115 Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) { 116 Value *Removed = getIncomingValue(Idx); 117 118 // Move everything after this operand down. 119 // 120 // FIXME: we could just swap with the end of the list, then erase. However, 121 // clients might not expect this to happen. The code as it is thrashes the 122 // use/def lists, which is kinda lame. 123 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx); 124 std::copy(block_begin() + Idx + 1, block_end(), block_begin() + Idx); 125 126 // Nuke the last value. 127 Op<-1>().set(nullptr); 128 setNumHungOffUseOperands(getNumOperands() - 1); 129 130 // If the PHI node is dead, because it has zero entries, nuke it now. 131 if (getNumOperands() == 0 && DeletePHIIfEmpty) { 132 // If anyone is using this PHI, make them use a dummy value instead... 133 replaceAllUsesWith(UndefValue::get(getType())); 134 eraseFromParent(); 135 } 136 return Removed; 137 } 138 139 /// growOperands - grow operands - This grows the operand list in response 140 /// to a push_back style of operation. This grows the number of ops by 1.5 141 /// times. 142 /// 143 void PHINode::growOperands() { 144 unsigned e = getNumOperands(); 145 unsigned NumOps = e + e / 2; 146 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common. 147 148 ReservedSpace = NumOps; 149 growHungoffUses(ReservedSpace, /* IsPhi */ true); 150 } 151 152 /// hasConstantValue - If the specified PHI node always merges together the same 153 /// value, return the value, otherwise return null. 154 Value *PHINode::hasConstantValue() const { 155 // Exploit the fact that phi nodes always have at least one entry. 156 Value *ConstantValue = getIncomingValue(0); 157 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i) 158 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) { 159 if (ConstantValue != this) 160 return nullptr; // Incoming values not all the same. 161 // The case where the first value is this PHI. 162 ConstantValue = getIncomingValue(i); 163 } 164 if (ConstantValue == this) 165 return UndefValue::get(getType()); 166 return ConstantValue; 167 } 168 169 /// hasConstantOrUndefValue - Whether the specified PHI node always merges 170 /// together the same value, assuming that undefs result in the same value as 171 /// non-undefs. 172 /// Unlike \ref hasConstantValue, this does not return a value because the 173 /// unique non-undef incoming value need not dominate the PHI node. 174 bool PHINode::hasConstantOrUndefValue() const { 175 Value *ConstantValue = nullptr; 176 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) { 177 Value *Incoming = getIncomingValue(i); 178 if (Incoming != this && !isa<UndefValue>(Incoming)) { 179 if (ConstantValue && ConstantValue != Incoming) 180 return false; 181 ConstantValue = Incoming; 182 } 183 } 184 return true; 185 } 186 187 //===----------------------------------------------------------------------===// 188 // LandingPadInst Implementation 189 //===----------------------------------------------------------------------===// 190 191 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues, 192 const Twine &NameStr, Instruction *InsertBefore) 193 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) { 194 init(NumReservedValues, NameStr); 195 } 196 197 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues, 198 const Twine &NameStr, BasicBlock *InsertAtEnd) 199 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) { 200 init(NumReservedValues, NameStr); 201 } 202 203 LandingPadInst::LandingPadInst(const LandingPadInst &LP) 204 : Instruction(LP.getType(), Instruction::LandingPad, nullptr, 205 LP.getNumOperands()), 206 ReservedSpace(LP.getNumOperands()) { 207 allocHungoffUses(LP.getNumOperands()); 208 Use *OL = getOperandList(); 209 const Use *InOL = LP.getOperandList(); 210 for (unsigned I = 0, E = ReservedSpace; I != E; ++I) 211 OL[I] = InOL[I]; 212 213 setCleanup(LP.isCleanup()); 214 } 215 216 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses, 217 const Twine &NameStr, 218 Instruction *InsertBefore) { 219 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore); 220 } 221 222 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses, 223 const Twine &NameStr, 224 BasicBlock *InsertAtEnd) { 225 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd); 226 } 227 228 void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) { 229 ReservedSpace = NumReservedValues; 230 setNumHungOffUseOperands(0); 231 allocHungoffUses(ReservedSpace); 232 setName(NameStr); 233 setCleanup(false); 234 } 235 236 /// growOperands - grow operands - This grows the operand list in response to a 237 /// push_back style of operation. This grows the number of ops by 2 times. 238 void LandingPadInst::growOperands(unsigned Size) { 239 unsigned e = getNumOperands(); 240 if (ReservedSpace >= e + Size) return; 241 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2; 242 growHungoffUses(ReservedSpace); 243 } 244 245 void LandingPadInst::addClause(Constant *Val) { 246 unsigned OpNo = getNumOperands(); 247 growOperands(1); 248 assert(OpNo < ReservedSpace && "Growing didn't work!"); 249 setNumHungOffUseOperands(getNumOperands() + 1); 250 getOperandList()[OpNo] = Val; 251 } 252 253 //===----------------------------------------------------------------------===// 254 // CallBase Implementation 255 //===----------------------------------------------------------------------===// 256 257 Value *CallBase::getReturnedArgOperand() const { 258 unsigned Index; 259 260 if (Attrs.hasAttrSomewhere(Attribute::Returned, &Index) && Index) 261 return getArgOperand(Index - AttributeList::FirstArgIndex); 262 if (const Function *F = getCalledFunction()) 263 if (F->getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) && 264 Index) 265 return getArgOperand(Index - AttributeList::FirstArgIndex); 266 267 return nullptr; 268 } 269 270 bool CallBase::hasRetAttr(Attribute::AttrKind Kind) const { 271 if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind)) 272 return true; 273 274 // Look at the callee, if available. 275 if (const Function *F = getCalledFunction()) 276 return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind); 277 return false; 278 } 279 280 /// Determine whether the argument or parameter has the given attribute. 281 bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const { 282 assert(ArgNo < getNumArgOperands() && "Param index out of bounds!"); 283 284 if (Attrs.hasParamAttribute(ArgNo, Kind)) 285 return true; 286 if (const Function *F = getCalledFunction()) 287 return F->getAttributes().hasParamAttribute(ArgNo, Kind); 288 return false; 289 } 290 291 bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const { 292 if (const Function *F = getCalledFunction()) 293 return F->getAttributes().hasAttribute(AttributeList::FunctionIndex, Kind); 294 return false; 295 } 296 297 bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const { 298 if (const Function *F = getCalledFunction()) 299 return F->getAttributes().hasAttribute(AttributeList::FunctionIndex, Kind); 300 return false; 301 } 302 303 CallBase::op_iterator 304 CallBase::populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles, 305 const unsigned BeginIndex) { 306 auto It = op_begin() + BeginIndex; 307 for (auto &B : Bundles) 308 It = std::copy(B.input_begin(), B.input_end(), It); 309 310 auto *ContextImpl = getContext().pImpl; 311 auto BI = Bundles.begin(); 312 unsigned CurrentIndex = BeginIndex; 313 314 for (auto &BOI : bundle_op_infos()) { 315 assert(BI != Bundles.end() && "Incorrect allocation?"); 316 317 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag()); 318 BOI.Begin = CurrentIndex; 319 BOI.End = CurrentIndex + BI->input_size(); 320 CurrentIndex = BOI.End; 321 BI++; 322 } 323 324 assert(BI == Bundles.end() && "Incorrect allocation?"); 325 326 return It; 327 } 328 329 //===----------------------------------------------------------------------===// 330 // CallInst Implementation 331 //===----------------------------------------------------------------------===// 332 333 void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, 334 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) { 335 this->FTy = FTy; 336 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 && 337 "NumOperands not set up?"); 338 setCalledOperand(Func); 339 340 #ifndef NDEBUG 341 assert((Args.size() == FTy->getNumParams() || 342 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 343 "Calling a function with bad signature!"); 344 345 for (unsigned i = 0; i != Args.size(); ++i) 346 assert((i >= FTy->getNumParams() || 347 FTy->getParamType(i) == Args[i]->getType()) && 348 "Calling a function with a bad signature!"); 349 #endif 350 351 llvm::copy(Args, op_begin()); 352 353 auto It = populateBundleOperandInfos(Bundles, Args.size()); 354 (void)It; 355 assert(It + 1 == op_end() && "Should add up!"); 356 357 setName(NameStr); 358 } 359 360 void CallInst::init(Value *Func, const Twine &NameStr) { 361 FTy = 362 cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType()); 363 assert(getNumOperands() == 1 && "NumOperands not set up?"); 364 setCalledOperand(Func); 365 366 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature"); 367 368 setName(NameStr); 369 } 370 371 CallInst::CallInst(Value *Func, const Twine &Name, Instruction *InsertBefore) 372 : CallBase(cast<FunctionType>( 373 cast<PointerType>(Func->getType())->getElementType()) 374 ->getReturnType(), 375 Instruction::Call, OperandTraits<CallBase>::op_end(this) - 1, 1, 376 InsertBefore) { 377 init(Func, Name); 378 } 379 380 CallInst::CallInst(Value *Func, const Twine &Name, BasicBlock *InsertAtEnd) 381 : CallBase(cast<FunctionType>( 382 cast<PointerType>(Func->getType())->getElementType()) 383 ->getReturnType(), 384 Instruction::Call, OperandTraits<CallBase>::op_end(this) - 1, 1, 385 InsertAtEnd) { 386 init(Func, Name); 387 } 388 389 CallInst::CallInst(const CallInst &CI) 390 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call, 391 OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(), 392 CI.getNumOperands()) { 393 setTailCallKind(CI.getTailCallKind()); 394 setCallingConv(CI.getCallingConv()); 395 396 std::copy(CI.op_begin(), CI.op_end(), op_begin()); 397 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(), 398 bundle_op_info_begin()); 399 SubclassOptionalData = CI.SubclassOptionalData; 400 } 401 402 CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB, 403 Instruction *InsertPt) { 404 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end()); 405 406 auto *NewCI = CallInst::Create(CI->getCalledValue(), Args, OpB, CI->getName(), 407 InsertPt); 408 NewCI->setTailCallKind(CI->getTailCallKind()); 409 NewCI->setCallingConv(CI->getCallingConv()); 410 NewCI->SubclassOptionalData = CI->SubclassOptionalData; 411 NewCI->setAttributes(CI->getAttributes()); 412 NewCI->setDebugLoc(CI->getDebugLoc()); 413 return NewCI; 414 } 415 416 417 418 419 420 421 422 423 424 425 /// IsConstantOne - Return true only if val is constant int 1 426 static bool IsConstantOne(Value *val) { 427 assert(val && "IsConstantOne does not work with nullptr val"); 428 const ConstantInt *CVal = dyn_cast<ConstantInt>(val); 429 return CVal && CVal->isOne(); 430 } 431 432 static Instruction *createMalloc(Instruction *InsertBefore, 433 BasicBlock *InsertAtEnd, Type *IntPtrTy, 434 Type *AllocTy, Value *AllocSize, 435 Value *ArraySize, 436 ArrayRef<OperandBundleDef> OpB, 437 Function *MallocF, const Twine &Name) { 438 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) && 439 "createMalloc needs either InsertBefore or InsertAtEnd"); 440 441 // malloc(type) becomes: 442 // bitcast (i8* malloc(typeSize)) to type* 443 // malloc(type, arraySize) becomes: 444 // bitcast (i8* malloc(typeSize*arraySize)) to type* 445 if (!ArraySize) 446 ArraySize = ConstantInt::get(IntPtrTy, 1); 447 else if (ArraySize->getType() != IntPtrTy) { 448 if (InsertBefore) 449 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false, 450 "", InsertBefore); 451 else 452 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false, 453 "", InsertAtEnd); 454 } 455 456 if (!IsConstantOne(ArraySize)) { 457 if (IsConstantOne(AllocSize)) { 458 AllocSize = ArraySize; // Operand * 1 = Operand 459 } else if (Constant *CO = dyn_cast<Constant>(ArraySize)) { 460 Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy, 461 false /*ZExt*/); 462 // Malloc arg is constant product of type size and array size 463 AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize)); 464 } else { 465 // Multiply type size by the array size... 466 if (InsertBefore) 467 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize, 468 "mallocsize", InsertBefore); 469 else 470 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize, 471 "mallocsize", InsertAtEnd); 472 } 473 } 474 475 assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size"); 476 // Create the call to Malloc. 477 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd; 478 Module *M = BB->getParent()->getParent(); 479 Type *BPTy = Type::getInt8PtrTy(BB->getContext()); 480 Value *MallocFunc = MallocF; 481 if (!MallocFunc) 482 // prototype malloc as "void *malloc(size_t)" 483 MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy); 484 PointerType *AllocPtrType = PointerType::getUnqual(AllocTy); 485 CallInst *MCall = nullptr; 486 Instruction *Result = nullptr; 487 if (InsertBefore) { 488 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall", 489 InsertBefore); 490 Result = MCall; 491 if (Result->getType() != AllocPtrType) 492 // Create a cast instruction to convert to the right type... 493 Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore); 494 } else { 495 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall"); 496 Result = MCall; 497 if (Result->getType() != AllocPtrType) { 498 InsertAtEnd->getInstList().push_back(MCall); 499 // Create a cast instruction to convert to the right type... 500 Result = new BitCastInst(MCall, AllocPtrType, Name); 501 } 502 } 503 MCall->setTailCall(); 504 if (Function *F = dyn_cast<Function>(MallocFunc)) { 505 MCall->setCallingConv(F->getCallingConv()); 506 if (!F->returnDoesNotAlias()) 507 F->setReturnDoesNotAlias(); 508 } 509 assert(!MCall->getType()->isVoidTy() && "Malloc has void return type"); 510 511 return Result; 512 } 513 514 /// CreateMalloc - Generate the IR for a call to malloc: 515 /// 1. Compute the malloc call's argument as the specified type's size, 516 /// possibly multiplied by the array size if the array size is not 517 /// constant 1. 518 /// 2. Call malloc with that argument. 519 /// 3. Bitcast the result of the malloc call to the specified type. 520 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore, 521 Type *IntPtrTy, Type *AllocTy, 522 Value *AllocSize, Value *ArraySize, 523 Function *MallocF, 524 const Twine &Name) { 525 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize, 526 ArraySize, None, MallocF, Name); 527 } 528 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore, 529 Type *IntPtrTy, Type *AllocTy, 530 Value *AllocSize, Value *ArraySize, 531 ArrayRef<OperandBundleDef> OpB, 532 Function *MallocF, 533 const Twine &Name) { 534 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize, 535 ArraySize, OpB, MallocF, Name); 536 } 537 538 /// CreateMalloc - Generate the IR for a call to malloc: 539 /// 1. Compute the malloc call's argument as the specified type's size, 540 /// possibly multiplied by the array size if the array size is not 541 /// constant 1. 542 /// 2. Call malloc with that argument. 543 /// 3. Bitcast the result of the malloc call to the specified type. 544 /// Note: This function does not add the bitcast to the basic block, that is the 545 /// responsibility of the caller. 546 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd, 547 Type *IntPtrTy, Type *AllocTy, 548 Value *AllocSize, Value *ArraySize, 549 Function *MallocF, const Twine &Name) { 550 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize, 551 ArraySize, None, MallocF, Name); 552 } 553 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd, 554 Type *IntPtrTy, Type *AllocTy, 555 Value *AllocSize, Value *ArraySize, 556 ArrayRef<OperandBundleDef> OpB, 557 Function *MallocF, const Twine &Name) { 558 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize, 559 ArraySize, OpB, MallocF, Name); 560 } 561 562 static Instruction *createFree(Value *Source, 563 ArrayRef<OperandBundleDef> Bundles, 564 Instruction *InsertBefore, 565 BasicBlock *InsertAtEnd) { 566 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) && 567 "createFree needs either InsertBefore or InsertAtEnd"); 568 assert(Source->getType()->isPointerTy() && 569 "Can not free something of nonpointer type!"); 570 571 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd; 572 Module *M = BB->getParent()->getParent(); 573 574 Type *VoidTy = Type::getVoidTy(M->getContext()); 575 Type *IntPtrTy = Type::getInt8PtrTy(M->getContext()); 576 // prototype free as "void free(void*)" 577 Value *FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy); 578 CallInst *Result = nullptr; 579 Value *PtrCast = Source; 580 if (InsertBefore) { 581 if (Source->getType() != IntPtrTy) 582 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore); 583 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "", InsertBefore); 584 } else { 585 if (Source->getType() != IntPtrTy) 586 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd); 587 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, ""); 588 } 589 Result->setTailCall(); 590 if (Function *F = dyn_cast<Function>(FreeFunc)) 591 Result->setCallingConv(F->getCallingConv()); 592 593 return Result; 594 } 595 596 /// CreateFree - Generate the IR for a call to the builtin free function. 597 Instruction *CallInst::CreateFree(Value *Source, Instruction *InsertBefore) { 598 return createFree(Source, None, InsertBefore, nullptr); 599 } 600 Instruction *CallInst::CreateFree(Value *Source, 601 ArrayRef<OperandBundleDef> Bundles, 602 Instruction *InsertBefore) { 603 return createFree(Source, Bundles, InsertBefore, nullptr); 604 } 605 606 /// CreateFree - Generate the IR for a call to the builtin free function. 607 /// Note: This function does not add the call to the basic block, that is the 608 /// responsibility of the caller. 609 Instruction *CallInst::CreateFree(Value *Source, BasicBlock *InsertAtEnd) { 610 Instruction *FreeCall = createFree(Source, None, nullptr, InsertAtEnd); 611 assert(FreeCall && "CreateFree did not create a CallInst"); 612 return FreeCall; 613 } 614 Instruction *CallInst::CreateFree(Value *Source, 615 ArrayRef<OperandBundleDef> Bundles, 616 BasicBlock *InsertAtEnd) { 617 Instruction *FreeCall = createFree(Source, Bundles, nullptr, InsertAtEnd); 618 assert(FreeCall && "CreateFree did not create a CallInst"); 619 return FreeCall; 620 } 621 622 //===----------------------------------------------------------------------===// 623 // InvokeInst Implementation 624 //===----------------------------------------------------------------------===// 625 626 void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal, 627 BasicBlock *IfException, ArrayRef<Value *> Args, 628 ArrayRef<OperandBundleDef> Bundles, 629 const Twine &NameStr) { 630 this->FTy = FTy; 631 632 assert((int)getNumOperands() == 633 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) && 634 "NumOperands not set up?"); 635 setNormalDest(IfNormal); 636 setUnwindDest(IfException); 637 setCalledOperand(Fn); 638 639 #ifndef NDEBUG 640 assert(((Args.size() == FTy->getNumParams()) || 641 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) && 642 "Invoking a function with bad signature"); 643 644 for (unsigned i = 0, e = Args.size(); i != e; i++) 645 assert((i >= FTy->getNumParams() || 646 FTy->getParamType(i) == Args[i]->getType()) && 647 "Invoking a function with a bad signature!"); 648 #endif 649 650 llvm::copy(Args, op_begin()); 651 652 auto It = populateBundleOperandInfos(Bundles, Args.size()); 653 (void)It; 654 assert(It + 3 == op_end() && "Should add up!"); 655 656 setName(NameStr); 657 } 658 659 InvokeInst::InvokeInst(const InvokeInst &II) 660 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke, 661 OperandTraits<CallBase>::op_end(this) - II.getNumOperands(), 662 II.getNumOperands()) { 663 setCallingConv(II.getCallingConv()); 664 std::copy(II.op_begin(), II.op_end(), op_begin()); 665 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(), 666 bundle_op_info_begin()); 667 SubclassOptionalData = II.SubclassOptionalData; 668 } 669 670 InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB, 671 Instruction *InsertPt) { 672 std::vector<Value *> Args(II->arg_begin(), II->arg_end()); 673 674 auto *NewII = InvokeInst::Create(II->getCalledValue(), II->getNormalDest(), 675 II->getUnwindDest(), Args, OpB, 676 II->getName(), InsertPt); 677 NewII->setCallingConv(II->getCallingConv()); 678 NewII->SubclassOptionalData = II->SubclassOptionalData; 679 NewII->setAttributes(II->getAttributes()); 680 NewII->setDebugLoc(II->getDebugLoc()); 681 return NewII; 682 } 683 684 685 LandingPadInst *InvokeInst::getLandingPadInst() const { 686 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI()); 687 } 688 689 //===----------------------------------------------------------------------===// 690 // ReturnInst Implementation 691 //===----------------------------------------------------------------------===// 692 693 ReturnInst::ReturnInst(const ReturnInst &RI) 694 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret, 695 OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(), 696 RI.getNumOperands()) { 697 if (RI.getNumOperands()) 698 Op<0>() = RI.Op<0>(); 699 SubclassOptionalData = RI.SubclassOptionalData; 700 } 701 702 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore) 703 : Instruction(Type::getVoidTy(C), Instruction::Ret, 704 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal, 705 InsertBefore) { 706 if (retVal) 707 Op<0>() = retVal; 708 } 709 710 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd) 711 : Instruction(Type::getVoidTy(C), Instruction::Ret, 712 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal, 713 InsertAtEnd) { 714 if (retVal) 715 Op<0>() = retVal; 716 } 717 718 ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd) 719 : Instruction(Type::getVoidTy(Context), Instruction::Ret, 720 OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {} 721 722 //===----------------------------------------------------------------------===// 723 // ResumeInst Implementation 724 //===----------------------------------------------------------------------===// 725 726 ResumeInst::ResumeInst(const ResumeInst &RI) 727 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume, 728 OperandTraits<ResumeInst>::op_begin(this), 1) { 729 Op<0>() = RI.Op<0>(); 730 } 731 732 ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore) 733 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume, 734 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) { 735 Op<0>() = Exn; 736 } 737 738 ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd) 739 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume, 740 OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) { 741 Op<0>() = Exn; 742 } 743 744 //===----------------------------------------------------------------------===// 745 // CleanupReturnInst Implementation 746 //===----------------------------------------------------------------------===// 747 748 CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI) 749 : Instruction(CRI.getType(), Instruction::CleanupRet, 750 OperandTraits<CleanupReturnInst>::op_end(this) - 751 CRI.getNumOperands(), 752 CRI.getNumOperands()) { 753 setInstructionSubclassData(CRI.getSubclassDataFromInstruction()); 754 Op<0>() = CRI.Op<0>(); 755 if (CRI.hasUnwindDest()) 756 Op<1>() = CRI.Op<1>(); 757 } 758 759 void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) { 760 if (UnwindBB) 761 setInstructionSubclassData(getSubclassDataFromInstruction() | 1); 762 763 Op<0>() = CleanupPad; 764 if (UnwindBB) 765 Op<1>() = UnwindBB; 766 } 767 768 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, 769 unsigned Values, Instruction *InsertBefore) 770 : Instruction(Type::getVoidTy(CleanupPad->getContext()), 771 Instruction::CleanupRet, 772 OperandTraits<CleanupReturnInst>::op_end(this) - Values, 773 Values, InsertBefore) { 774 init(CleanupPad, UnwindBB); 775 } 776 777 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, 778 unsigned Values, BasicBlock *InsertAtEnd) 779 : Instruction(Type::getVoidTy(CleanupPad->getContext()), 780 Instruction::CleanupRet, 781 OperandTraits<CleanupReturnInst>::op_end(this) - Values, 782 Values, InsertAtEnd) { 783 init(CleanupPad, UnwindBB); 784 } 785 786 //===----------------------------------------------------------------------===// 787 // CatchReturnInst Implementation 788 //===----------------------------------------------------------------------===// 789 void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) { 790 Op<0>() = CatchPad; 791 Op<1>() = BB; 792 } 793 794 CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI) 795 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet, 796 OperandTraits<CatchReturnInst>::op_begin(this), 2) { 797 Op<0>() = CRI.Op<0>(); 798 Op<1>() = CRI.Op<1>(); 799 } 800 801 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB, 802 Instruction *InsertBefore) 803 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet, 804 OperandTraits<CatchReturnInst>::op_begin(this), 2, 805 InsertBefore) { 806 init(CatchPad, BB); 807 } 808 809 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB, 810 BasicBlock *InsertAtEnd) 811 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet, 812 OperandTraits<CatchReturnInst>::op_begin(this), 2, 813 InsertAtEnd) { 814 init(CatchPad, BB); 815 } 816 817 //===----------------------------------------------------------------------===// 818 // CatchSwitchInst Implementation 819 //===----------------------------------------------------------------------===// 820 821 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 822 unsigned NumReservedValues, 823 const Twine &NameStr, 824 Instruction *InsertBefore) 825 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0, 826 InsertBefore) { 827 if (UnwindDest) 828 ++NumReservedValues; 829 init(ParentPad, UnwindDest, NumReservedValues + 1); 830 setName(NameStr); 831 } 832 833 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 834 unsigned NumReservedValues, 835 const Twine &NameStr, BasicBlock *InsertAtEnd) 836 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0, 837 InsertAtEnd) { 838 if (UnwindDest) 839 ++NumReservedValues; 840 init(ParentPad, UnwindDest, NumReservedValues + 1); 841 setName(NameStr); 842 } 843 844 CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI) 845 : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr, 846 CSI.getNumOperands()) { 847 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands()); 848 setNumHungOffUseOperands(ReservedSpace); 849 Use *OL = getOperandList(); 850 const Use *InOL = CSI.getOperandList(); 851 for (unsigned I = 1, E = ReservedSpace; I != E; ++I) 852 OL[I] = InOL[I]; 853 } 854 855 void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest, 856 unsigned NumReservedValues) { 857 assert(ParentPad && NumReservedValues); 858 859 ReservedSpace = NumReservedValues; 860 setNumHungOffUseOperands(UnwindDest ? 2 : 1); 861 allocHungoffUses(ReservedSpace); 862 863 Op<0>() = ParentPad; 864 if (UnwindDest) { 865 setInstructionSubclassData(getSubclassDataFromInstruction() | 1); 866 setUnwindDest(UnwindDest); 867 } 868 } 869 870 /// growOperands - grow operands - This grows the operand list in response to a 871 /// push_back style of operation. This grows the number of ops by 2 times. 872 void CatchSwitchInst::growOperands(unsigned Size) { 873 unsigned NumOperands = getNumOperands(); 874 assert(NumOperands >= 1); 875 if (ReservedSpace >= NumOperands + Size) 876 return; 877 ReservedSpace = (NumOperands + Size / 2) * 2; 878 growHungoffUses(ReservedSpace); 879 } 880 881 void CatchSwitchInst::addHandler(BasicBlock *Handler) { 882 unsigned OpNo = getNumOperands(); 883 growOperands(1); 884 assert(OpNo < ReservedSpace && "Growing didn't work!"); 885 setNumHungOffUseOperands(getNumOperands() + 1); 886 getOperandList()[OpNo] = Handler; 887 } 888 889 void CatchSwitchInst::removeHandler(handler_iterator HI) { 890 // Move all subsequent handlers up one. 891 Use *EndDst = op_end() - 1; 892 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst) 893 *CurDst = *(CurDst + 1); 894 // Null out the last handler use. 895 *EndDst = nullptr; 896 897 setNumHungOffUseOperands(getNumOperands() - 1); 898 } 899 900 //===----------------------------------------------------------------------===// 901 // FuncletPadInst Implementation 902 //===----------------------------------------------------------------------===// 903 void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args, 904 const Twine &NameStr) { 905 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?"); 906 llvm::copy(Args, op_begin()); 907 setParentPad(ParentPad); 908 setName(NameStr); 909 } 910 911 FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI) 912 : Instruction(FPI.getType(), FPI.getOpcode(), 913 OperandTraits<FuncletPadInst>::op_end(this) - 914 FPI.getNumOperands(), 915 FPI.getNumOperands()) { 916 std::copy(FPI.op_begin(), FPI.op_end(), op_begin()); 917 setParentPad(FPI.getParentPad()); 918 } 919 920 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad, 921 ArrayRef<Value *> Args, unsigned Values, 922 const Twine &NameStr, Instruction *InsertBefore) 923 : Instruction(ParentPad->getType(), Op, 924 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values, 925 InsertBefore) { 926 init(ParentPad, Args, NameStr); 927 } 928 929 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad, 930 ArrayRef<Value *> Args, unsigned Values, 931 const Twine &NameStr, BasicBlock *InsertAtEnd) 932 : Instruction(ParentPad->getType(), Op, 933 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values, 934 InsertAtEnd) { 935 init(ParentPad, Args, NameStr); 936 } 937 938 //===----------------------------------------------------------------------===// 939 // UnreachableInst Implementation 940 //===----------------------------------------------------------------------===// 941 942 UnreachableInst::UnreachableInst(LLVMContext &Context, 943 Instruction *InsertBefore) 944 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr, 945 0, InsertBefore) {} 946 UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd) 947 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr, 948 0, InsertAtEnd) {} 949 950 //===----------------------------------------------------------------------===// 951 // BranchInst Implementation 952 //===----------------------------------------------------------------------===// 953 954 void BranchInst::AssertOK() { 955 if (isConditional()) 956 assert(getCondition()->getType()->isIntegerTy(1) && 957 "May only branch on boolean predicates!"); 958 } 959 960 BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore) 961 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 962 OperandTraits<BranchInst>::op_end(this) - 1, 1, 963 InsertBefore) { 964 assert(IfTrue && "Branch destination may not be null!"); 965 Op<-1>() = IfTrue; 966 } 967 968 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 969 Instruction *InsertBefore) 970 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 971 OperandTraits<BranchInst>::op_end(this) - 3, 3, 972 InsertBefore) { 973 Op<-1>() = IfTrue; 974 Op<-2>() = IfFalse; 975 Op<-3>() = Cond; 976 #ifndef NDEBUG 977 AssertOK(); 978 #endif 979 } 980 981 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) 982 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 983 OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) { 984 assert(IfTrue && "Branch destination may not be null!"); 985 Op<-1>() = IfTrue; 986 } 987 988 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 989 BasicBlock *InsertAtEnd) 990 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br, 991 OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) { 992 Op<-1>() = IfTrue; 993 Op<-2>() = IfFalse; 994 Op<-3>() = Cond; 995 #ifndef NDEBUG 996 AssertOK(); 997 #endif 998 } 999 1000 BranchInst::BranchInst(const BranchInst &BI) 1001 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br, 1002 OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(), 1003 BI.getNumOperands()) { 1004 Op<-1>() = BI.Op<-1>(); 1005 if (BI.getNumOperands() != 1) { 1006 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!"); 1007 Op<-3>() = BI.Op<-3>(); 1008 Op<-2>() = BI.Op<-2>(); 1009 } 1010 SubclassOptionalData = BI.SubclassOptionalData; 1011 } 1012 1013 void BranchInst::swapSuccessors() { 1014 assert(isConditional() && 1015 "Cannot swap successors of an unconditional branch"); 1016 Op<-1>().swap(Op<-2>()); 1017 1018 // Update profile metadata if present and it matches our structural 1019 // expectations. 1020 swapProfMetadata(); 1021 } 1022 1023 //===----------------------------------------------------------------------===// 1024 // AllocaInst Implementation 1025 //===----------------------------------------------------------------------===// 1026 1027 static Value *getAISize(LLVMContext &Context, Value *Amt) { 1028 if (!Amt) 1029 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1); 1030 else { 1031 assert(!isa<BasicBlock>(Amt) && 1032 "Passed basic block into allocation size parameter! Use other ctor"); 1033 assert(Amt->getType()->isIntegerTy() && 1034 "Allocation array size is not an integer!"); 1035 } 1036 return Amt; 1037 } 1038 1039 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 1040 Instruction *InsertBefore) 1041 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {} 1042 1043 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 1044 BasicBlock *InsertAtEnd) 1045 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {} 1046 1047 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1048 const Twine &Name, Instruction *InsertBefore) 1049 : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/0, Name, InsertBefore) {} 1050 1051 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1052 const Twine &Name, BasicBlock *InsertAtEnd) 1053 : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/0, Name, InsertAtEnd) {} 1054 1055 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1056 unsigned Align, const Twine &Name, 1057 Instruction *InsertBefore) 1058 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca, 1059 getAISize(Ty->getContext(), ArraySize), InsertBefore), 1060 AllocatedType(Ty) { 1061 setAlignment(Align); 1062 assert(!Ty->isVoidTy() && "Cannot allocate void!"); 1063 setName(Name); 1064 } 1065 1066 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 1067 unsigned Align, const Twine &Name, 1068 BasicBlock *InsertAtEnd) 1069 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca, 1070 getAISize(Ty->getContext(), ArraySize), InsertAtEnd), 1071 AllocatedType(Ty) { 1072 setAlignment(Align); 1073 assert(!Ty->isVoidTy() && "Cannot allocate void!"); 1074 setName(Name); 1075 } 1076 1077 void AllocaInst::setAlignment(unsigned Align) { 1078 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); 1079 assert(Align <= MaximumAlignment && 1080 "Alignment is greater than MaximumAlignment!"); 1081 setInstructionSubclassData((getSubclassDataFromInstruction() & ~31) | 1082 (Log2_32(Align) + 1)); 1083 assert(getAlignment() == Align && "Alignment representation error!"); 1084 } 1085 1086 bool AllocaInst::isArrayAllocation() const { 1087 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0))) 1088 return !CI->isOne(); 1089 return true; 1090 } 1091 1092 /// isStaticAlloca - Return true if this alloca is in the entry block of the 1093 /// function and is a constant size. If so, the code generator will fold it 1094 /// into the prolog/epilog code, so it is basically free. 1095 bool AllocaInst::isStaticAlloca() const { 1096 // Must be constant size. 1097 if (!isa<ConstantInt>(getArraySize())) return false; 1098 1099 // Must be in the entry block. 1100 const BasicBlock *Parent = getParent(); 1101 return Parent == &Parent->getParent()->front() && !isUsedWithInAlloca(); 1102 } 1103 1104 //===----------------------------------------------------------------------===// 1105 // LoadInst Implementation 1106 //===----------------------------------------------------------------------===// 1107 1108 void LoadInst::AssertOK() { 1109 assert(getOperand(0)->getType()->isPointerTy() && 1110 "Ptr must have pointer type."); 1111 assert(!(isAtomic() && getAlignment() == 0) && 1112 "Alignment required for atomic load"); 1113 } 1114 1115 LoadInst::LoadInst(Value *Ptr, const Twine &Name, Instruction *InsertBef) 1116 : LoadInst(Ptr, Name, /*isVolatile=*/false, InsertBef) {} 1117 1118 LoadInst::LoadInst(Value *Ptr, const Twine &Name, BasicBlock *InsertAE) 1119 : LoadInst(Ptr, Name, /*isVolatile=*/false, InsertAE) {} 1120 1121 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1122 Instruction *InsertBef) 1123 : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/0, InsertBef) {} 1124 1125 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, 1126 BasicBlock *InsertAE) 1127 : LoadInst(Ptr, Name, isVolatile, /*Align=*/0, InsertAE) {} 1128 1129 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1130 unsigned Align, Instruction *InsertBef) 1131 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, 1132 SyncScope::System, InsertBef) {} 1133 1134 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, 1135 unsigned Align, BasicBlock *InsertAE) 1136 : LoadInst(Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, 1137 SyncScope::System, InsertAE) {} 1138 1139 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, 1140 unsigned Align, AtomicOrdering Order, 1141 SyncScope::ID SSID, Instruction *InsertBef) 1142 : UnaryInstruction(Ty, Load, Ptr, InsertBef) { 1143 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType()); 1144 setVolatile(isVolatile); 1145 setAlignment(Align); 1146 setAtomic(Order, SSID); 1147 AssertOK(); 1148 setName(Name); 1149 } 1150 1151 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, 1152 unsigned Align, AtomicOrdering Order, 1153 SyncScope::ID SSID, 1154 BasicBlock *InsertAE) 1155 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), 1156 Load, Ptr, InsertAE) { 1157 setVolatile(isVolatile); 1158 setAlignment(Align); 1159 setAtomic(Order, SSID); 1160 AssertOK(); 1161 setName(Name); 1162 } 1163 1164 LoadInst::LoadInst(Value *Ptr, const char *Name, Instruction *InsertBef) 1165 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), 1166 Load, Ptr, InsertBef) { 1167 setVolatile(false); 1168 setAlignment(0); 1169 setAtomic(AtomicOrdering::NotAtomic); 1170 AssertOK(); 1171 if (Name && Name[0]) setName(Name); 1172 } 1173 1174 LoadInst::LoadInst(Value *Ptr, const char *Name, BasicBlock *InsertAE) 1175 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), 1176 Load, Ptr, InsertAE) { 1177 setVolatile(false); 1178 setAlignment(0); 1179 setAtomic(AtomicOrdering::NotAtomic); 1180 AssertOK(); 1181 if (Name && Name[0]) setName(Name); 1182 } 1183 1184 LoadInst::LoadInst(Type *Ty, Value *Ptr, const char *Name, bool isVolatile, 1185 Instruction *InsertBef) 1186 : UnaryInstruction(Ty, Load, Ptr, InsertBef) { 1187 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType()); 1188 setVolatile(isVolatile); 1189 setAlignment(0); 1190 setAtomic(AtomicOrdering::NotAtomic); 1191 AssertOK(); 1192 if (Name && Name[0]) setName(Name); 1193 } 1194 1195 LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile, 1196 BasicBlock *InsertAE) 1197 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(), 1198 Load, Ptr, InsertAE) { 1199 setVolatile(isVolatile); 1200 setAlignment(0); 1201 setAtomic(AtomicOrdering::NotAtomic); 1202 AssertOK(); 1203 if (Name && Name[0]) setName(Name); 1204 } 1205 1206 void LoadInst::setAlignment(unsigned Align) { 1207 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); 1208 assert(Align <= MaximumAlignment && 1209 "Alignment is greater than MaximumAlignment!"); 1210 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | 1211 ((Log2_32(Align)+1)<<1)); 1212 assert(getAlignment() == Align && "Alignment representation error!"); 1213 } 1214 1215 //===----------------------------------------------------------------------===// 1216 // StoreInst Implementation 1217 //===----------------------------------------------------------------------===// 1218 1219 void StoreInst::AssertOK() { 1220 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!"); 1221 assert(getOperand(1)->getType()->isPointerTy() && 1222 "Ptr must have pointer type!"); 1223 assert(getOperand(0)->getType() == 1224 cast<PointerType>(getOperand(1)->getType())->getElementType() 1225 && "Ptr must be a pointer to Val type!"); 1226 assert(!(isAtomic() && getAlignment() == 0) && 1227 "Alignment required for atomic store"); 1228 } 1229 1230 StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore) 1231 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {} 1232 1233 StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd) 1234 : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {} 1235 1236 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1237 Instruction *InsertBefore) 1238 : StoreInst(val, addr, isVolatile, /*Align=*/0, InsertBefore) {} 1239 1240 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1241 BasicBlock *InsertAtEnd) 1242 : StoreInst(val, addr, isVolatile, /*Align=*/0, InsertAtEnd) {} 1243 1244 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align, 1245 Instruction *InsertBefore) 1246 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, 1247 SyncScope::System, InsertBefore) {} 1248 1249 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align, 1250 BasicBlock *InsertAtEnd) 1251 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, 1252 SyncScope::System, InsertAtEnd) {} 1253 1254 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1255 unsigned Align, AtomicOrdering Order, 1256 SyncScope::ID SSID, 1257 Instruction *InsertBefore) 1258 : Instruction(Type::getVoidTy(val->getContext()), Store, 1259 OperandTraits<StoreInst>::op_begin(this), 1260 OperandTraits<StoreInst>::operands(this), 1261 InsertBefore) { 1262 Op<0>() = val; 1263 Op<1>() = addr; 1264 setVolatile(isVolatile); 1265 setAlignment(Align); 1266 setAtomic(Order, SSID); 1267 AssertOK(); 1268 } 1269 1270 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, 1271 unsigned Align, AtomicOrdering Order, 1272 SyncScope::ID SSID, 1273 BasicBlock *InsertAtEnd) 1274 : Instruction(Type::getVoidTy(val->getContext()), Store, 1275 OperandTraits<StoreInst>::op_begin(this), 1276 OperandTraits<StoreInst>::operands(this), 1277 InsertAtEnd) { 1278 Op<0>() = val; 1279 Op<1>() = addr; 1280 setVolatile(isVolatile); 1281 setAlignment(Align); 1282 setAtomic(Order, SSID); 1283 AssertOK(); 1284 } 1285 1286 void StoreInst::setAlignment(unsigned Align) { 1287 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); 1288 assert(Align <= MaximumAlignment && 1289 "Alignment is greater than MaximumAlignment!"); 1290 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | 1291 ((Log2_32(Align)+1) << 1)); 1292 assert(getAlignment() == Align && "Alignment representation error!"); 1293 } 1294 1295 //===----------------------------------------------------------------------===// 1296 // AtomicCmpXchgInst Implementation 1297 //===----------------------------------------------------------------------===// 1298 1299 void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal, 1300 AtomicOrdering SuccessOrdering, 1301 AtomicOrdering FailureOrdering, 1302 SyncScope::ID SSID) { 1303 Op<0>() = Ptr; 1304 Op<1>() = Cmp; 1305 Op<2>() = NewVal; 1306 setSuccessOrdering(SuccessOrdering); 1307 setFailureOrdering(FailureOrdering); 1308 setSyncScopeID(SSID); 1309 1310 assert(getOperand(0) && getOperand(1) && getOperand(2) && 1311 "All operands must be non-null!"); 1312 assert(getOperand(0)->getType()->isPointerTy() && 1313 "Ptr must have pointer type!"); 1314 assert(getOperand(1)->getType() == 1315 cast<PointerType>(getOperand(0)->getType())->getElementType() 1316 && "Ptr must be a pointer to Cmp type!"); 1317 assert(getOperand(2)->getType() == 1318 cast<PointerType>(getOperand(0)->getType())->getElementType() 1319 && "Ptr must be a pointer to NewVal type!"); 1320 assert(SuccessOrdering != AtomicOrdering::NotAtomic && 1321 "AtomicCmpXchg instructions must be atomic!"); 1322 assert(FailureOrdering != AtomicOrdering::NotAtomic && 1323 "AtomicCmpXchg instructions must be atomic!"); 1324 assert(!isStrongerThan(FailureOrdering, SuccessOrdering) && 1325 "AtomicCmpXchg failure argument shall be no stronger than the success " 1326 "argument"); 1327 assert(FailureOrdering != AtomicOrdering::Release && 1328 FailureOrdering != AtomicOrdering::AcquireRelease && 1329 "AtomicCmpXchg failure ordering cannot include release semantics"); 1330 } 1331 1332 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, 1333 AtomicOrdering SuccessOrdering, 1334 AtomicOrdering FailureOrdering, 1335 SyncScope::ID SSID, 1336 Instruction *InsertBefore) 1337 : Instruction( 1338 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())), 1339 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this), 1340 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) { 1341 Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID); 1342 } 1343 1344 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, 1345 AtomicOrdering SuccessOrdering, 1346 AtomicOrdering FailureOrdering, 1347 SyncScope::ID SSID, 1348 BasicBlock *InsertAtEnd) 1349 : Instruction( 1350 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())), 1351 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this), 1352 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) { 1353 Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID); 1354 } 1355 1356 //===----------------------------------------------------------------------===// 1357 // AtomicRMWInst Implementation 1358 //===----------------------------------------------------------------------===// 1359 1360 void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val, 1361 AtomicOrdering Ordering, 1362 SyncScope::ID SSID) { 1363 Op<0>() = Ptr; 1364 Op<1>() = Val; 1365 setOperation(Operation); 1366 setOrdering(Ordering); 1367 setSyncScopeID(SSID); 1368 1369 assert(getOperand(0) && getOperand(1) && 1370 "All operands must be non-null!"); 1371 assert(getOperand(0)->getType()->isPointerTy() && 1372 "Ptr must have pointer type!"); 1373 assert(getOperand(1)->getType() == 1374 cast<PointerType>(getOperand(0)->getType())->getElementType() 1375 && "Ptr must be a pointer to Val type!"); 1376 assert(Ordering != AtomicOrdering::NotAtomic && 1377 "AtomicRMW instructions must be atomic!"); 1378 } 1379 1380 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, 1381 AtomicOrdering Ordering, 1382 SyncScope::ID SSID, 1383 Instruction *InsertBefore) 1384 : Instruction(Val->getType(), AtomicRMW, 1385 OperandTraits<AtomicRMWInst>::op_begin(this), 1386 OperandTraits<AtomicRMWInst>::operands(this), 1387 InsertBefore) { 1388 Init(Operation, Ptr, Val, Ordering, SSID); 1389 } 1390 1391 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, 1392 AtomicOrdering Ordering, 1393 SyncScope::ID SSID, 1394 BasicBlock *InsertAtEnd) 1395 : Instruction(Val->getType(), AtomicRMW, 1396 OperandTraits<AtomicRMWInst>::op_begin(this), 1397 OperandTraits<AtomicRMWInst>::operands(this), 1398 InsertAtEnd) { 1399 Init(Operation, Ptr, Val, Ordering, SSID); 1400 } 1401 1402 StringRef AtomicRMWInst::getOperationName(BinOp Op) { 1403 switch (Op) { 1404 case AtomicRMWInst::Xchg: 1405 return "xchg"; 1406 case AtomicRMWInst::Add: 1407 return "add"; 1408 case AtomicRMWInst::Sub: 1409 return "sub"; 1410 case AtomicRMWInst::And: 1411 return "and"; 1412 case AtomicRMWInst::Nand: 1413 return "nand"; 1414 case AtomicRMWInst::Or: 1415 return "or"; 1416 case AtomicRMWInst::Xor: 1417 return "xor"; 1418 case AtomicRMWInst::Max: 1419 return "max"; 1420 case AtomicRMWInst::Min: 1421 return "min"; 1422 case AtomicRMWInst::UMax: 1423 return "umax"; 1424 case AtomicRMWInst::UMin: 1425 return "umin"; 1426 case AtomicRMWInst::BAD_BINOP: 1427 return "<invalid operation>"; 1428 } 1429 1430 llvm_unreachable("invalid atomicrmw operation"); 1431 } 1432 1433 //===----------------------------------------------------------------------===// 1434 // FenceInst Implementation 1435 //===----------------------------------------------------------------------===// 1436 1437 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, 1438 SyncScope::ID SSID, 1439 Instruction *InsertBefore) 1440 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) { 1441 setOrdering(Ordering); 1442 setSyncScopeID(SSID); 1443 } 1444 1445 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, 1446 SyncScope::ID SSID, 1447 BasicBlock *InsertAtEnd) 1448 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) { 1449 setOrdering(Ordering); 1450 setSyncScopeID(SSID); 1451 } 1452 1453 //===----------------------------------------------------------------------===// 1454 // GetElementPtrInst Implementation 1455 //===----------------------------------------------------------------------===// 1456 1457 void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList, 1458 const Twine &Name) { 1459 assert(getNumOperands() == 1 + IdxList.size() && 1460 "NumOperands not initialized?"); 1461 Op<0>() = Ptr; 1462 llvm::copy(IdxList, op_begin() + 1); 1463 setName(Name); 1464 } 1465 1466 GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI) 1467 : Instruction(GEPI.getType(), GetElementPtr, 1468 OperandTraits<GetElementPtrInst>::op_end(this) - 1469 GEPI.getNumOperands(), 1470 GEPI.getNumOperands()), 1471 SourceElementType(GEPI.SourceElementType), 1472 ResultElementType(GEPI.ResultElementType) { 1473 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin()); 1474 SubclassOptionalData = GEPI.SubclassOptionalData; 1475 } 1476 1477 /// getIndexedType - Returns the type of the element that would be accessed with 1478 /// a gep instruction with the specified parameters. 1479 /// 1480 /// The Idxs pointer should point to a continuous piece of memory containing the 1481 /// indices, either as Value* or uint64_t. 1482 /// 1483 /// A null type is returned if the indices are invalid for the specified 1484 /// pointer type. 1485 /// 1486 template <typename IndexTy> 1487 static Type *getIndexedTypeInternal(Type *Agg, ArrayRef<IndexTy> IdxList) { 1488 // Handle the special case of the empty set index set, which is always valid. 1489 if (IdxList.empty()) 1490 return Agg; 1491 1492 // If there is at least one index, the top level type must be sized, otherwise 1493 // it cannot be 'stepped over'. 1494 if (!Agg->isSized()) 1495 return nullptr; 1496 1497 unsigned CurIdx = 1; 1498 for (; CurIdx != IdxList.size(); ++CurIdx) { 1499 CompositeType *CT = dyn_cast<CompositeType>(Agg); 1500 if (!CT || CT->isPointerTy()) return nullptr; 1501 IndexTy Index = IdxList[CurIdx]; 1502 if (!CT->indexValid(Index)) return nullptr; 1503 Agg = CT->getTypeAtIndex(Index); 1504 } 1505 return CurIdx == IdxList.size() ? Agg : nullptr; 1506 } 1507 1508 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) { 1509 return getIndexedTypeInternal(Ty, IdxList); 1510 } 1511 1512 Type *GetElementPtrInst::getIndexedType(Type *Ty, 1513 ArrayRef<Constant *> IdxList) { 1514 return getIndexedTypeInternal(Ty, IdxList); 1515 } 1516 1517 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) { 1518 return getIndexedTypeInternal(Ty, IdxList); 1519 } 1520 1521 /// hasAllZeroIndices - Return true if all of the indices of this GEP are 1522 /// zeros. If so, the result pointer and the first operand have the same 1523 /// value, just potentially different types. 1524 bool GetElementPtrInst::hasAllZeroIndices() const { 1525 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1526 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) { 1527 if (!CI->isZero()) return false; 1528 } else { 1529 return false; 1530 } 1531 } 1532 return true; 1533 } 1534 1535 /// hasAllConstantIndices - Return true if all of the indices of this GEP are 1536 /// constant integers. If so, the result pointer and the first operand have 1537 /// a constant offset between them. 1538 bool GetElementPtrInst::hasAllConstantIndices() const { 1539 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1540 if (!isa<ConstantInt>(getOperand(i))) 1541 return false; 1542 } 1543 return true; 1544 } 1545 1546 void GetElementPtrInst::setIsInBounds(bool B) { 1547 cast<GEPOperator>(this)->setIsInBounds(B); 1548 } 1549 1550 bool GetElementPtrInst::isInBounds() const { 1551 return cast<GEPOperator>(this)->isInBounds(); 1552 } 1553 1554 bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL, 1555 APInt &Offset) const { 1556 // Delegate to the generic GEPOperator implementation. 1557 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset); 1558 } 1559 1560 //===----------------------------------------------------------------------===// 1561 // ExtractElementInst Implementation 1562 //===----------------------------------------------------------------------===// 1563 1564 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index, 1565 const Twine &Name, 1566 Instruction *InsertBef) 1567 : Instruction(cast<VectorType>(Val->getType())->getElementType(), 1568 ExtractElement, 1569 OperandTraits<ExtractElementInst>::op_begin(this), 1570 2, InsertBef) { 1571 assert(isValidOperands(Val, Index) && 1572 "Invalid extractelement instruction operands!"); 1573 Op<0>() = Val; 1574 Op<1>() = Index; 1575 setName(Name); 1576 } 1577 1578 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index, 1579 const Twine &Name, 1580 BasicBlock *InsertAE) 1581 : Instruction(cast<VectorType>(Val->getType())->getElementType(), 1582 ExtractElement, 1583 OperandTraits<ExtractElementInst>::op_begin(this), 1584 2, InsertAE) { 1585 assert(isValidOperands(Val, Index) && 1586 "Invalid extractelement instruction operands!"); 1587 1588 Op<0>() = Val; 1589 Op<1>() = Index; 1590 setName(Name); 1591 } 1592 1593 bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) { 1594 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy()) 1595 return false; 1596 return true; 1597 } 1598 1599 //===----------------------------------------------------------------------===// 1600 // InsertElementInst Implementation 1601 //===----------------------------------------------------------------------===// 1602 1603 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index, 1604 const Twine &Name, 1605 Instruction *InsertBef) 1606 : Instruction(Vec->getType(), InsertElement, 1607 OperandTraits<InsertElementInst>::op_begin(this), 1608 3, InsertBef) { 1609 assert(isValidOperands(Vec, Elt, Index) && 1610 "Invalid insertelement instruction operands!"); 1611 Op<0>() = Vec; 1612 Op<1>() = Elt; 1613 Op<2>() = Index; 1614 setName(Name); 1615 } 1616 1617 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index, 1618 const Twine &Name, 1619 BasicBlock *InsertAE) 1620 : Instruction(Vec->getType(), InsertElement, 1621 OperandTraits<InsertElementInst>::op_begin(this), 1622 3, InsertAE) { 1623 assert(isValidOperands(Vec, Elt, Index) && 1624 "Invalid insertelement instruction operands!"); 1625 1626 Op<0>() = Vec; 1627 Op<1>() = Elt; 1628 Op<2>() = Index; 1629 setName(Name); 1630 } 1631 1632 bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt, 1633 const Value *Index) { 1634 if (!Vec->getType()->isVectorTy()) 1635 return false; // First operand of insertelement must be vector type. 1636 1637 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType()) 1638 return false;// Second operand of insertelement must be vector element type. 1639 1640 if (!Index->getType()->isIntegerTy()) 1641 return false; // Third operand of insertelement must be i32. 1642 return true; 1643 } 1644 1645 //===----------------------------------------------------------------------===// 1646 // ShuffleVectorInst Implementation 1647 //===----------------------------------------------------------------------===// 1648 1649 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 1650 const Twine &Name, 1651 Instruction *InsertBefore) 1652 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(), 1653 cast<VectorType>(Mask->getType())->getNumElements()), 1654 ShuffleVector, 1655 OperandTraits<ShuffleVectorInst>::op_begin(this), 1656 OperandTraits<ShuffleVectorInst>::operands(this), 1657 InsertBefore) { 1658 assert(isValidOperands(V1, V2, Mask) && 1659 "Invalid shuffle vector instruction operands!"); 1660 Op<0>() = V1; 1661 Op<1>() = V2; 1662 Op<2>() = Mask; 1663 setName(Name); 1664 } 1665 1666 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 1667 const Twine &Name, 1668 BasicBlock *InsertAtEnd) 1669 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(), 1670 cast<VectorType>(Mask->getType())->getNumElements()), 1671 ShuffleVector, 1672 OperandTraits<ShuffleVectorInst>::op_begin(this), 1673 OperandTraits<ShuffleVectorInst>::operands(this), 1674 InsertAtEnd) { 1675 assert(isValidOperands(V1, V2, Mask) && 1676 "Invalid shuffle vector instruction operands!"); 1677 1678 Op<0>() = V1; 1679 Op<1>() = V2; 1680 Op<2>() = Mask; 1681 setName(Name); 1682 } 1683 1684 bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2, 1685 const Value *Mask) { 1686 // V1 and V2 must be vectors of the same type. 1687 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType()) 1688 return false; 1689 1690 // Mask must be vector of i32. 1691 auto *MaskTy = dyn_cast<VectorType>(Mask->getType()); 1692 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32)) 1693 return false; 1694 1695 // Check to see if Mask is valid. 1696 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask)) 1697 return true; 1698 1699 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) { 1700 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); 1701 for (Value *Op : MV->operands()) { 1702 if (auto *CI = dyn_cast<ConstantInt>(Op)) { 1703 if (CI->uge(V1Size*2)) 1704 return false; 1705 } else if (!isa<UndefValue>(Op)) { 1706 return false; 1707 } 1708 } 1709 return true; 1710 } 1711 1712 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) { 1713 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements(); 1714 for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i) 1715 if (CDS->getElementAsInteger(i) >= V1Size*2) 1716 return false; 1717 return true; 1718 } 1719 1720 // The bitcode reader can create a place holder for a forward reference 1721 // used as the shuffle mask. When this occurs, the shuffle mask will 1722 // fall into this case and fail. To avoid this error, do this bit of 1723 // ugliness to allow such a mask pass. 1724 if (const auto *CE = dyn_cast<ConstantExpr>(Mask)) 1725 if (CE->getOpcode() == Instruction::UserOp1) 1726 return true; 1727 1728 return false; 1729 } 1730 1731 int ShuffleVectorInst::getMaskValue(const Constant *Mask, unsigned i) { 1732 assert(i < Mask->getType()->getVectorNumElements() && "Index out of range"); 1733 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) 1734 return CDS->getElementAsInteger(i); 1735 Constant *C = Mask->getAggregateElement(i); 1736 if (isa<UndefValue>(C)) 1737 return -1; 1738 return cast<ConstantInt>(C)->getZExtValue(); 1739 } 1740 1741 void ShuffleVectorInst::getShuffleMask(const Constant *Mask, 1742 SmallVectorImpl<int> &Result) { 1743 unsigned NumElts = Mask->getType()->getVectorNumElements(); 1744 1745 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) { 1746 for (unsigned i = 0; i != NumElts; ++i) 1747 Result.push_back(CDS->getElementAsInteger(i)); 1748 return; 1749 } 1750 for (unsigned i = 0; i != NumElts; ++i) { 1751 Constant *C = Mask->getAggregateElement(i); 1752 Result.push_back(isa<UndefValue>(C) ? -1 : 1753 cast<ConstantInt>(C)->getZExtValue()); 1754 } 1755 } 1756 1757 static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) { 1758 assert(!Mask.empty() && "Shuffle mask must contain elements"); 1759 bool UsesLHS = false; 1760 bool UsesRHS = false; 1761 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) { 1762 if (Mask[i] == -1) 1763 continue; 1764 assert(Mask[i] >= 0 && Mask[i] < (NumOpElts * 2) && 1765 "Out-of-bounds shuffle mask element"); 1766 UsesLHS |= (Mask[i] < NumOpElts); 1767 UsesRHS |= (Mask[i] >= NumOpElts); 1768 if (UsesLHS && UsesRHS) 1769 return false; 1770 } 1771 assert((UsesLHS ^ UsesRHS) && "Should have selected from exactly 1 source"); 1772 return true; 1773 } 1774 1775 bool ShuffleVectorInst::isSingleSourceMask(ArrayRef<int> Mask) { 1776 // We don't have vector operand size information, so assume operands are the 1777 // same size as the mask. 1778 return isSingleSourceMaskImpl(Mask, Mask.size()); 1779 } 1780 1781 static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) { 1782 if (!isSingleSourceMaskImpl(Mask, NumOpElts)) 1783 return false; 1784 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) { 1785 if (Mask[i] == -1) 1786 continue; 1787 if (Mask[i] != i && Mask[i] != (NumOpElts + i)) 1788 return false; 1789 } 1790 return true; 1791 } 1792 1793 bool ShuffleVectorInst::isIdentityMask(ArrayRef<int> Mask) { 1794 // We don't have vector operand size information, so assume operands are the 1795 // same size as the mask. 1796 return isIdentityMaskImpl(Mask, Mask.size()); 1797 } 1798 1799 bool ShuffleVectorInst::isReverseMask(ArrayRef<int> Mask) { 1800 if (!isSingleSourceMask(Mask)) 1801 return false; 1802 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 1803 if (Mask[i] == -1) 1804 continue; 1805 if (Mask[i] != (NumElts - 1 - i) && Mask[i] != (NumElts + NumElts - 1 - i)) 1806 return false; 1807 } 1808 return true; 1809 } 1810 1811 bool ShuffleVectorInst::isZeroEltSplatMask(ArrayRef<int> Mask) { 1812 if (!isSingleSourceMask(Mask)) 1813 return false; 1814 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 1815 if (Mask[i] == -1) 1816 continue; 1817 if (Mask[i] != 0 && Mask[i] != NumElts) 1818 return false; 1819 } 1820 return true; 1821 } 1822 1823 bool ShuffleVectorInst::isSelectMask(ArrayRef<int> Mask) { 1824 // Select is differentiated from identity. It requires using both sources. 1825 if (isSingleSourceMask(Mask)) 1826 return false; 1827 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) { 1828 if (Mask[i] == -1) 1829 continue; 1830 if (Mask[i] != i && Mask[i] != (NumElts + i)) 1831 return false; 1832 } 1833 return true; 1834 } 1835 1836 bool ShuffleVectorInst::isTransposeMask(ArrayRef<int> Mask) { 1837 // Example masks that will return true: 1838 // v1 = <a, b, c, d> 1839 // v2 = <e, f, g, h> 1840 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g> 1841 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h> 1842 1843 // 1. The number of elements in the mask must be a power-of-2 and at least 2. 1844 int NumElts = Mask.size(); 1845 if (NumElts < 2 || !isPowerOf2_32(NumElts)) 1846 return false; 1847 1848 // 2. The first element of the mask must be either a 0 or a 1. 1849 if (Mask[0] != 0 && Mask[0] != 1) 1850 return false; 1851 1852 // 3. The difference between the first 2 elements must be equal to the 1853 // number of elements in the mask. 1854 if ((Mask[1] - Mask[0]) != NumElts) 1855 return false; 1856 1857 // 4. The difference between consecutive even-numbered and odd-numbered 1858 // elements must be equal to 2. 1859 for (int i = 2; i < NumElts; ++i) { 1860 int MaskEltVal = Mask[i]; 1861 if (MaskEltVal == -1) 1862 return false; 1863 int MaskEltPrevVal = Mask[i - 2]; 1864 if (MaskEltVal - MaskEltPrevVal != 2) 1865 return false; 1866 } 1867 return true; 1868 } 1869 1870 bool ShuffleVectorInst::isExtractSubvectorMask(ArrayRef<int> Mask, 1871 int NumSrcElts, int &Index) { 1872 // Must extract from a single source. 1873 if (!isSingleSourceMaskImpl(Mask, NumSrcElts)) 1874 return false; 1875 1876 // Must be smaller (else this is an Identity shuffle). 1877 if (NumSrcElts <= (int)Mask.size()) 1878 return false; 1879 1880 // Find start of extraction, accounting that we may start with an UNDEF. 1881 int SubIndex = -1; 1882 for (int i = 0, e = Mask.size(); i != e; ++i) { 1883 int M = Mask[i]; 1884 if (M < 0) 1885 continue; 1886 int Offset = (M % NumSrcElts) - i; 1887 if (0 <= SubIndex && SubIndex != Offset) 1888 return false; 1889 SubIndex = Offset; 1890 } 1891 1892 if (0 <= SubIndex) { 1893 Index = SubIndex; 1894 return true; 1895 } 1896 return false; 1897 } 1898 1899 bool ShuffleVectorInst::isIdentityWithPadding() const { 1900 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 1901 int NumMaskElts = getType()->getVectorNumElements(); 1902 if (NumMaskElts <= NumOpElts) 1903 return false; 1904 1905 // The first part of the mask must choose elements from exactly 1 source op. 1906 SmallVector<int, 16> Mask = getShuffleMask(); 1907 if (!isIdentityMaskImpl(Mask, NumOpElts)) 1908 return false; 1909 1910 // All extending must be with undef elements. 1911 for (int i = NumOpElts; i < NumMaskElts; ++i) 1912 if (Mask[i] != -1) 1913 return false; 1914 1915 return true; 1916 } 1917 1918 bool ShuffleVectorInst::isIdentityWithExtract() const { 1919 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 1920 int NumMaskElts = getType()->getVectorNumElements(); 1921 if (NumMaskElts >= NumOpElts) 1922 return false; 1923 1924 return isIdentityMaskImpl(getShuffleMask(), NumOpElts); 1925 } 1926 1927 bool ShuffleVectorInst::isConcat() const { 1928 // Vector concatenation is differentiated from identity with padding. 1929 if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>())) 1930 return false; 1931 1932 int NumOpElts = Op<0>()->getType()->getVectorNumElements(); 1933 int NumMaskElts = getType()->getVectorNumElements(); 1934 if (NumMaskElts != NumOpElts * 2) 1935 return false; 1936 1937 // Use the mask length rather than the operands' vector lengths here. We 1938 // already know that the shuffle returns a vector twice as long as the inputs, 1939 // and neither of the inputs are undef vectors. If the mask picks consecutive 1940 // elements from both inputs, then this is a concatenation of the inputs. 1941 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts); 1942 } 1943 1944 //===----------------------------------------------------------------------===// 1945 // InsertValueInst Class 1946 //===----------------------------------------------------------------------===// 1947 1948 void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, 1949 const Twine &Name) { 1950 assert(getNumOperands() == 2 && "NumOperands not initialized?"); 1951 1952 // There's no fundamental reason why we require at least one index 1953 // (other than weirdness with &*IdxBegin being invalid; see 1954 // getelementptr's init routine for example). But there's no 1955 // present need to support it. 1956 assert(!Idxs.empty() && "InsertValueInst must have at least one index"); 1957 1958 assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) == 1959 Val->getType() && "Inserted value must match indexed type!"); 1960 Op<0>() = Agg; 1961 Op<1>() = Val; 1962 1963 Indices.append(Idxs.begin(), Idxs.end()); 1964 setName(Name); 1965 } 1966 1967 InsertValueInst::InsertValueInst(const InsertValueInst &IVI) 1968 : Instruction(IVI.getType(), InsertValue, 1969 OperandTraits<InsertValueInst>::op_begin(this), 2), 1970 Indices(IVI.Indices) { 1971 Op<0>() = IVI.getOperand(0); 1972 Op<1>() = IVI.getOperand(1); 1973 SubclassOptionalData = IVI.SubclassOptionalData; 1974 } 1975 1976 //===----------------------------------------------------------------------===// 1977 // ExtractValueInst Class 1978 //===----------------------------------------------------------------------===// 1979 1980 void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) { 1981 assert(getNumOperands() == 1 && "NumOperands not initialized?"); 1982 1983 // There's no fundamental reason why we require at least one index. 1984 // But there's no present need to support it. 1985 assert(!Idxs.empty() && "ExtractValueInst must have at least one index"); 1986 1987 Indices.append(Idxs.begin(), Idxs.end()); 1988 setName(Name); 1989 } 1990 1991 ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI) 1992 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)), 1993 Indices(EVI.Indices) { 1994 SubclassOptionalData = EVI.SubclassOptionalData; 1995 } 1996 1997 // getIndexedType - Returns the type of the element that would be extracted 1998 // with an extractvalue instruction with the specified parameters. 1999 // 2000 // A null type is returned if the indices are invalid for the specified 2001 // pointer type. 2002 // 2003 Type *ExtractValueInst::getIndexedType(Type *Agg, 2004 ArrayRef<unsigned> Idxs) { 2005 for (unsigned Index : Idxs) { 2006 // We can't use CompositeType::indexValid(Index) here. 2007 // indexValid() always returns true for arrays because getelementptr allows 2008 // out-of-bounds indices. Since we don't allow those for extractvalue and 2009 // insertvalue we need to check array indexing manually. 2010 // Since the only other types we can index into are struct types it's just 2011 // as easy to check those manually as well. 2012 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) { 2013 if (Index >= AT->getNumElements()) 2014 return nullptr; 2015 } else if (StructType *ST = dyn_cast<StructType>(Agg)) { 2016 if (Index >= ST->getNumElements()) 2017 return nullptr; 2018 } else { 2019 // Not a valid type to index into. 2020 return nullptr; 2021 } 2022 2023 Agg = cast<CompositeType>(Agg)->getTypeAtIndex(Index); 2024 } 2025 return const_cast<Type*>(Agg); 2026 } 2027 2028 //===----------------------------------------------------------------------===// 2029 // UnaryOperator Class 2030 //===----------------------------------------------------------------------===// 2031 2032 UnaryOperator::UnaryOperator(UnaryOps iType, Value *S, 2033 Type *Ty, const Twine &Name, 2034 Instruction *InsertBefore) 2035 : UnaryInstruction(Ty, iType, S, InsertBefore) { 2036 Op<0>() = S; 2037 setName(Name); 2038 AssertOK(); 2039 } 2040 2041 UnaryOperator::UnaryOperator(UnaryOps iType, Value *S, 2042 Type *Ty, const Twine &Name, 2043 BasicBlock *InsertAtEnd) 2044 : UnaryInstruction(Ty, iType, S, InsertAtEnd) { 2045 Op<0>() = S; 2046 setName(Name); 2047 AssertOK(); 2048 } 2049 2050 UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S, 2051 const Twine &Name, 2052 Instruction *InsertBefore) { 2053 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore); 2054 } 2055 2056 UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S, 2057 const Twine &Name, 2058 BasicBlock *InsertAtEnd) { 2059 UnaryOperator *Res = Create(Op, S, Name); 2060 InsertAtEnd->getInstList().push_back(Res); 2061 return Res; 2062 } 2063 2064 void UnaryOperator::AssertOK() { 2065 Value *LHS = getOperand(0); 2066 (void)LHS; // Silence warnings. 2067 #ifndef NDEBUG 2068 switch (getOpcode()) { 2069 case FNeg: 2070 assert(getType() == LHS->getType() && 2071 "Unary operation should return same type as operand!"); 2072 assert(getType()->isFPOrFPVectorTy() && 2073 "Tried to create a floating-point operation on a " 2074 "non-floating-point type!"); 2075 break; 2076 default: llvm_unreachable("Invalid opcode provided"); 2077 } 2078 #endif 2079 } 2080 2081 //===----------------------------------------------------------------------===// 2082 // BinaryOperator Class 2083 //===----------------------------------------------------------------------===// 2084 2085 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, 2086 Type *Ty, const Twine &Name, 2087 Instruction *InsertBefore) 2088 : Instruction(Ty, iType, 2089 OperandTraits<BinaryOperator>::op_begin(this), 2090 OperandTraits<BinaryOperator>::operands(this), 2091 InsertBefore) { 2092 Op<0>() = S1; 2093 Op<1>() = S2; 2094 setName(Name); 2095 AssertOK(); 2096 } 2097 2098 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, 2099 Type *Ty, const Twine &Name, 2100 BasicBlock *InsertAtEnd) 2101 : Instruction(Ty, iType, 2102 OperandTraits<BinaryOperator>::op_begin(this), 2103 OperandTraits<BinaryOperator>::operands(this), 2104 InsertAtEnd) { 2105 Op<0>() = S1; 2106 Op<1>() = S2; 2107 setName(Name); 2108 AssertOK(); 2109 } 2110 2111 void BinaryOperator::AssertOK() { 2112 Value *LHS = getOperand(0), *RHS = getOperand(1); 2113 (void)LHS; (void)RHS; // Silence warnings. 2114 assert(LHS->getType() == RHS->getType() && 2115 "Binary operator operand types must match!"); 2116 #ifndef NDEBUG 2117 switch (getOpcode()) { 2118 case Add: case Sub: 2119 case Mul: 2120 assert(getType() == LHS->getType() && 2121 "Arithmetic operation should return same type as operands!"); 2122 assert(getType()->isIntOrIntVectorTy() && 2123 "Tried to create an integer operation on a non-integer type!"); 2124 break; 2125 case FAdd: case FSub: 2126 case FMul: 2127 assert(getType() == LHS->getType() && 2128 "Arithmetic operation should return same type as operands!"); 2129 assert(getType()->isFPOrFPVectorTy() && 2130 "Tried to create a floating-point operation on a " 2131 "non-floating-point type!"); 2132 break; 2133 case UDiv: 2134 case SDiv: 2135 assert(getType() == LHS->getType() && 2136 "Arithmetic operation should return same type as operands!"); 2137 assert(getType()->isIntOrIntVectorTy() && 2138 "Incorrect operand type (not integer) for S/UDIV"); 2139 break; 2140 case FDiv: 2141 assert(getType() == LHS->getType() && 2142 "Arithmetic operation should return same type as operands!"); 2143 assert(getType()->isFPOrFPVectorTy() && 2144 "Incorrect operand type (not floating point) for FDIV"); 2145 break; 2146 case URem: 2147 case SRem: 2148 assert(getType() == LHS->getType() && 2149 "Arithmetic operation should return same type as operands!"); 2150 assert(getType()->isIntOrIntVectorTy() && 2151 "Incorrect operand type (not integer) for S/UREM"); 2152 break; 2153 case FRem: 2154 assert(getType() == LHS->getType() && 2155 "Arithmetic operation should return same type as operands!"); 2156 assert(getType()->isFPOrFPVectorTy() && 2157 "Incorrect operand type (not floating point) for FREM"); 2158 break; 2159 case Shl: 2160 case LShr: 2161 case AShr: 2162 assert(getType() == LHS->getType() && 2163 "Shift operation should return same type as operands!"); 2164 assert(getType()->isIntOrIntVectorTy() && 2165 "Tried to create a shift operation on a non-integral type!"); 2166 break; 2167 case And: case Or: 2168 case Xor: 2169 assert(getType() == LHS->getType() && 2170 "Logical operation should return same type as operands!"); 2171 assert(getType()->isIntOrIntVectorTy() && 2172 "Tried to create a logical operation on a non-integral type!"); 2173 break; 2174 default: llvm_unreachable("Invalid opcode provided"); 2175 } 2176 #endif 2177 } 2178 2179 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2, 2180 const Twine &Name, 2181 Instruction *InsertBefore) { 2182 assert(S1->getType() == S2->getType() && 2183 "Cannot create binary operator with two operands of differing type!"); 2184 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore); 2185 } 2186 2187 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2, 2188 const Twine &Name, 2189 BasicBlock *InsertAtEnd) { 2190 BinaryOperator *Res = Create(Op, S1, S2, Name); 2191 InsertAtEnd->getInstList().push_back(Res); 2192 return Res; 2193 } 2194 2195 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name, 2196 Instruction *InsertBefore) { 2197 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2198 return new BinaryOperator(Instruction::Sub, 2199 zero, Op, 2200 Op->getType(), Name, InsertBefore); 2201 } 2202 2203 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name, 2204 BasicBlock *InsertAtEnd) { 2205 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2206 return new BinaryOperator(Instruction::Sub, 2207 zero, Op, 2208 Op->getType(), Name, InsertAtEnd); 2209 } 2210 2211 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, 2212 Instruction *InsertBefore) { 2213 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2214 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertBefore); 2215 } 2216 2217 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, 2218 BasicBlock *InsertAtEnd) { 2219 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2220 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertAtEnd); 2221 } 2222 2223 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name, 2224 Instruction *InsertBefore) { 2225 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2226 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertBefore); 2227 } 2228 2229 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name, 2230 BasicBlock *InsertAtEnd) { 2231 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2232 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertAtEnd); 2233 } 2234 2235 BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name, 2236 Instruction *InsertBefore) { 2237 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2238 return new BinaryOperator(Instruction::FSub, zero, Op, 2239 Op->getType(), Name, InsertBefore); 2240 } 2241 2242 BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name, 2243 BasicBlock *InsertAtEnd) { 2244 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); 2245 return new BinaryOperator(Instruction::FSub, zero, Op, 2246 Op->getType(), Name, InsertAtEnd); 2247 } 2248 2249 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, 2250 Instruction *InsertBefore) { 2251 Constant *C = Constant::getAllOnesValue(Op->getType()); 2252 return new BinaryOperator(Instruction::Xor, Op, C, 2253 Op->getType(), Name, InsertBefore); 2254 } 2255 2256 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, 2257 BasicBlock *InsertAtEnd) { 2258 Constant *AllOnes = Constant::getAllOnesValue(Op->getType()); 2259 return new BinaryOperator(Instruction::Xor, Op, AllOnes, 2260 Op->getType(), Name, InsertAtEnd); 2261 } 2262 2263 // Exchange the two operands to this instruction. This instruction is safe to 2264 // use on any binary instruction and does not modify the semantics of the 2265 // instruction. If the instruction is order-dependent (SetLT f.e.), the opcode 2266 // is changed. 2267 bool BinaryOperator::swapOperands() { 2268 if (!isCommutative()) 2269 return true; // Can't commute operands 2270 Op<0>().swap(Op<1>()); 2271 return false; 2272 } 2273 2274 //===----------------------------------------------------------------------===// 2275 // FPMathOperator Class 2276 //===----------------------------------------------------------------------===// 2277 2278 float FPMathOperator::getFPAccuracy() const { 2279 const MDNode *MD = 2280 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath); 2281 if (!MD) 2282 return 0.0; 2283 ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0)); 2284 return Accuracy->getValueAPF().convertToFloat(); 2285 } 2286 2287 //===----------------------------------------------------------------------===// 2288 // CastInst Class 2289 //===----------------------------------------------------------------------===// 2290 2291 // Just determine if this cast only deals with integral->integral conversion. 2292 bool CastInst::isIntegerCast() const { 2293 switch (getOpcode()) { 2294 default: return false; 2295 case Instruction::ZExt: 2296 case Instruction::SExt: 2297 case Instruction::Trunc: 2298 return true; 2299 case Instruction::BitCast: 2300 return getOperand(0)->getType()->isIntegerTy() && 2301 getType()->isIntegerTy(); 2302 } 2303 } 2304 2305 bool CastInst::isLosslessCast() const { 2306 // Only BitCast can be lossless, exit fast if we're not BitCast 2307 if (getOpcode() != Instruction::BitCast) 2308 return false; 2309 2310 // Identity cast is always lossless 2311 Type *SrcTy = getOperand(0)->getType(); 2312 Type *DstTy = getType(); 2313 if (SrcTy == DstTy) 2314 return true; 2315 2316 // Pointer to pointer is always lossless. 2317 if (SrcTy->isPointerTy()) 2318 return DstTy->isPointerTy(); 2319 return false; // Other types have no identity values 2320 } 2321 2322 /// This function determines if the CastInst does not require any bits to be 2323 /// changed in order to effect the cast. Essentially, it identifies cases where 2324 /// no code gen is necessary for the cast, hence the name no-op cast. For 2325 /// example, the following are all no-op casts: 2326 /// # bitcast i32* %x to i8* 2327 /// # bitcast <2 x i32> %x to <4 x i16> 2328 /// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only 2329 /// Determine if the described cast is a no-op. 2330 bool CastInst::isNoopCast(Instruction::CastOps Opcode, 2331 Type *SrcTy, 2332 Type *DestTy, 2333 const DataLayout &DL) { 2334 switch (Opcode) { 2335 default: llvm_unreachable("Invalid CastOp"); 2336 case Instruction::Trunc: 2337 case Instruction::ZExt: 2338 case Instruction::SExt: 2339 case Instruction::FPTrunc: 2340 case Instruction::FPExt: 2341 case Instruction::UIToFP: 2342 case Instruction::SIToFP: 2343 case Instruction::FPToUI: 2344 case Instruction::FPToSI: 2345 case Instruction::AddrSpaceCast: 2346 // TODO: Target informations may give a more accurate answer here. 2347 return false; 2348 case Instruction::BitCast: 2349 return true; // BitCast never modifies bits. 2350 case Instruction::PtrToInt: 2351 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() == 2352 DestTy->getScalarSizeInBits(); 2353 case Instruction::IntToPtr: 2354 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() == 2355 SrcTy->getScalarSizeInBits(); 2356 } 2357 } 2358 2359 bool CastInst::isNoopCast(const DataLayout &DL) const { 2360 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL); 2361 } 2362 2363 /// This function determines if a pair of casts can be eliminated and what 2364 /// opcode should be used in the elimination. This assumes that there are two 2365 /// instructions like this: 2366 /// * %F = firstOpcode SrcTy %x to MidTy 2367 /// * %S = secondOpcode MidTy %F to DstTy 2368 /// The function returns a resultOpcode so these two casts can be replaced with: 2369 /// * %Replacement = resultOpcode %SrcTy %x to DstTy 2370 /// If no such cast is permitted, the function returns 0. 2371 unsigned CastInst::isEliminableCastPair( 2372 Instruction::CastOps firstOp, Instruction::CastOps secondOp, 2373 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, 2374 Type *DstIntPtrTy) { 2375 // Define the 144 possibilities for these two cast instructions. The values 2376 // in this matrix determine what to do in a given situation and select the 2377 // case in the switch below. The rows correspond to firstOp, the columns 2378 // correspond to secondOp. In looking at the table below, keep in mind 2379 // the following cast properties: 2380 // 2381 // Size Compare Source Destination 2382 // Operator Src ? Size Type Sign Type Sign 2383 // -------- ------------ ------------------- --------------------- 2384 // TRUNC > Integer Any Integral Any 2385 // ZEXT < Integral Unsigned Integer Any 2386 // SEXT < Integral Signed Integer Any 2387 // FPTOUI n/a FloatPt n/a Integral Unsigned 2388 // FPTOSI n/a FloatPt n/a Integral Signed 2389 // UITOFP n/a Integral Unsigned FloatPt n/a 2390 // SITOFP n/a Integral Signed FloatPt n/a 2391 // FPTRUNC > FloatPt n/a FloatPt n/a 2392 // FPEXT < FloatPt n/a FloatPt n/a 2393 // PTRTOINT n/a Pointer n/a Integral Unsigned 2394 // INTTOPTR n/a Integral Unsigned Pointer n/a 2395 // BITCAST = FirstClass n/a FirstClass n/a 2396 // ADDRSPCST n/a Pointer n/a Pointer n/a 2397 // 2398 // NOTE: some transforms are safe, but we consider them to be non-profitable. 2399 // For example, we could merge "fptoui double to i32" + "zext i32 to i64", 2400 // into "fptoui double to i64", but this loses information about the range 2401 // of the produced value (we no longer know the top-part is all zeros). 2402 // Further this conversion is often much more expensive for typical hardware, 2403 // and causes issues when building libgcc. We disallow fptosi+sext for the 2404 // same reason. 2405 const unsigned numCastOps = 2406 Instruction::CastOpsEnd - Instruction::CastOpsBegin; 2407 static const uint8_t CastResults[numCastOps][numCastOps] = { 2408 // T F F U S F F P I B A -+ 2409 // R Z S P P I I T P 2 N T S | 2410 // U E E 2 2 2 2 R E I T C C +- secondOp 2411 // N X X U S F F N X N 2 V V | 2412 // C T T I I P P C T T P T T -+ 2413 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+ 2414 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt | 2415 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt | 2416 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI | 2417 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI | 2418 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp 2419 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP | 2420 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc | 2421 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt | 2422 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt | 2423 { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr | 2424 { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast | 2425 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+ 2426 }; 2427 2428 // TODO: This logic could be encoded into the table above and handled in the 2429 // switch below. 2430 // If either of the casts are a bitcast from scalar to vector, disallow the 2431 // merging. However, any pair of bitcasts are allowed. 2432 bool IsFirstBitcast = (firstOp == Instruction::BitCast); 2433 bool IsSecondBitcast = (secondOp == Instruction::BitCast); 2434 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast; 2435 2436 // Check if any of the casts convert scalars <-> vectors. 2437 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) || 2438 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy))) 2439 if (!AreBothBitcasts) 2440 return 0; 2441 2442 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin] 2443 [secondOp-Instruction::CastOpsBegin]; 2444 switch (ElimCase) { 2445 case 0: 2446 // Categorically disallowed. 2447 return 0; 2448 case 1: 2449 // Allowed, use first cast's opcode. 2450 return firstOp; 2451 case 2: 2452 // Allowed, use second cast's opcode. 2453 return secondOp; 2454 case 3: 2455 // No-op cast in second op implies firstOp as long as the DestTy 2456 // is integer and we are not converting between a vector and a 2457 // non-vector type. 2458 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy()) 2459 return firstOp; 2460 return 0; 2461 case 4: 2462 // No-op cast in second op implies firstOp as long as the DestTy 2463 // is floating point. 2464 if (DstTy->isFloatingPointTy()) 2465 return firstOp; 2466 return 0; 2467 case 5: 2468 // No-op cast in first op implies secondOp as long as the SrcTy 2469 // is an integer. 2470 if (SrcTy->isIntegerTy()) 2471 return secondOp; 2472 return 0; 2473 case 6: 2474 // No-op cast in first op implies secondOp as long as the SrcTy 2475 // is a floating point. 2476 if (SrcTy->isFloatingPointTy()) 2477 return secondOp; 2478 return 0; 2479 case 7: { 2480 // Cannot simplify if address spaces are different! 2481 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) 2482 return 0; 2483 2484 unsigned MidSize = MidTy->getScalarSizeInBits(); 2485 // We can still fold this without knowing the actual sizes as long we 2486 // know that the intermediate pointer is the largest possible 2487 // pointer size. 2488 // FIXME: Is this always true? 2489 if (MidSize == 64) 2490 return Instruction::BitCast; 2491 2492 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size. 2493 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy) 2494 return 0; 2495 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits(); 2496 if (MidSize >= PtrSize) 2497 return Instruction::BitCast; 2498 return 0; 2499 } 2500 case 8: { 2501 // ext, trunc -> bitcast, if the SrcTy and DstTy are same size 2502 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy) 2503 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy) 2504 unsigned SrcSize = SrcTy->getScalarSizeInBits(); 2505 unsigned DstSize = DstTy->getScalarSizeInBits(); 2506 if (SrcSize == DstSize) 2507 return Instruction::BitCast; 2508 else if (SrcSize < DstSize) 2509 return firstOp; 2510 return secondOp; 2511 } 2512 case 9: 2513 // zext, sext -> zext, because sext can't sign extend after zext 2514 return Instruction::ZExt; 2515 case 11: { 2516 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize 2517 if (!MidIntPtrTy) 2518 return 0; 2519 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits(); 2520 unsigned SrcSize = SrcTy->getScalarSizeInBits(); 2521 unsigned DstSize = DstTy->getScalarSizeInBits(); 2522 if (SrcSize <= PtrSize && SrcSize == DstSize) 2523 return Instruction::BitCast; 2524 return 0; 2525 } 2526 case 12: 2527 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS 2528 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS 2529 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) 2530 return Instruction::AddrSpaceCast; 2531 return Instruction::BitCast; 2532 case 13: 2533 // FIXME: this state can be merged with (1), but the following assert 2534 // is useful to check the correcteness of the sequence due to semantic 2535 // change of bitcast. 2536 assert( 2537 SrcTy->isPtrOrPtrVectorTy() && 2538 MidTy->isPtrOrPtrVectorTy() && 2539 DstTy->isPtrOrPtrVectorTy() && 2540 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() && 2541 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() && 2542 "Illegal addrspacecast, bitcast sequence!"); 2543 // Allowed, use first cast's opcode 2544 return firstOp; 2545 case 14: 2546 // bitcast, addrspacecast -> addrspacecast if the element type of 2547 // bitcast's source is the same as that of addrspacecast's destination. 2548 if (SrcTy->getScalarType()->getPointerElementType() == 2549 DstTy->getScalarType()->getPointerElementType()) 2550 return Instruction::AddrSpaceCast; 2551 return 0; 2552 case 15: 2553 // FIXME: this state can be merged with (1), but the following assert 2554 // is useful to check the correcteness of the sequence due to semantic 2555 // change of bitcast. 2556 assert( 2557 SrcTy->isIntOrIntVectorTy() && 2558 MidTy->isPtrOrPtrVectorTy() && 2559 DstTy->isPtrOrPtrVectorTy() && 2560 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() && 2561 "Illegal inttoptr, bitcast sequence!"); 2562 // Allowed, use first cast's opcode 2563 return firstOp; 2564 case 16: 2565 // FIXME: this state can be merged with (2), but the following assert 2566 // is useful to check the correcteness of the sequence due to semantic 2567 // change of bitcast. 2568 assert( 2569 SrcTy->isPtrOrPtrVectorTy() && 2570 MidTy->isPtrOrPtrVectorTy() && 2571 DstTy->isIntOrIntVectorTy() && 2572 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() && 2573 "Illegal bitcast, ptrtoint sequence!"); 2574 // Allowed, use second cast's opcode 2575 return secondOp; 2576 case 17: 2577 // (sitofp (zext x)) -> (uitofp x) 2578 return Instruction::UIToFP; 2579 case 99: 2580 // Cast combination can't happen (error in input). This is for all cases 2581 // where the MidTy is not the same for the two cast instructions. 2582 llvm_unreachable("Invalid Cast Combination"); 2583 default: 2584 llvm_unreachable("Error in CastResults table!!!"); 2585 } 2586 } 2587 2588 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty, 2589 const Twine &Name, Instruction *InsertBefore) { 2590 assert(castIsValid(op, S, Ty) && "Invalid cast!"); 2591 // Construct and return the appropriate CastInst subclass 2592 switch (op) { 2593 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore); 2594 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore); 2595 case SExt: return new SExtInst (S, Ty, Name, InsertBefore); 2596 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore); 2597 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore); 2598 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore); 2599 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore); 2600 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore); 2601 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore); 2602 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore); 2603 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore); 2604 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore); 2605 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore); 2606 default: llvm_unreachable("Invalid opcode provided"); 2607 } 2608 } 2609 2610 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty, 2611 const Twine &Name, BasicBlock *InsertAtEnd) { 2612 assert(castIsValid(op, S, Ty) && "Invalid cast!"); 2613 // Construct and return the appropriate CastInst subclass 2614 switch (op) { 2615 case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd); 2616 case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd); 2617 case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd); 2618 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd); 2619 case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd); 2620 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd); 2621 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd); 2622 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd); 2623 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd); 2624 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd); 2625 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd); 2626 case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd); 2627 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd); 2628 default: llvm_unreachable("Invalid opcode provided"); 2629 } 2630 } 2631 2632 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, 2633 const Twine &Name, 2634 Instruction *InsertBefore) { 2635 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2636 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2637 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore); 2638 } 2639 2640 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, 2641 const Twine &Name, 2642 BasicBlock *InsertAtEnd) { 2643 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2644 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2645 return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd); 2646 } 2647 2648 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, 2649 const Twine &Name, 2650 Instruction *InsertBefore) { 2651 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2652 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2653 return Create(Instruction::SExt, S, Ty, Name, InsertBefore); 2654 } 2655 2656 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, 2657 const Twine &Name, 2658 BasicBlock *InsertAtEnd) { 2659 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2660 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2661 return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd); 2662 } 2663 2664 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, 2665 const Twine &Name, 2666 Instruction *InsertBefore) { 2667 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2668 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2669 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore); 2670 } 2671 2672 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, 2673 const Twine &Name, 2674 BasicBlock *InsertAtEnd) { 2675 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits()) 2676 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2677 return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd); 2678 } 2679 2680 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, 2681 const Twine &Name, 2682 BasicBlock *InsertAtEnd) { 2683 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2684 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) && 2685 "Invalid cast"); 2686 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); 2687 assert((!Ty->isVectorTy() || 2688 Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) && 2689 "Invalid cast"); 2690 2691 if (Ty->isIntOrIntVectorTy()) 2692 return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd); 2693 2694 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd); 2695 } 2696 2697 /// Create a BitCast or a PtrToInt cast instruction 2698 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, 2699 const Twine &Name, 2700 Instruction *InsertBefore) { 2701 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2702 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) && 2703 "Invalid cast"); 2704 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); 2705 assert((!Ty->isVectorTy() || 2706 Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) && 2707 "Invalid cast"); 2708 2709 if (Ty->isIntOrIntVectorTy()) 2710 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore); 2711 2712 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore); 2713 } 2714 2715 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast( 2716 Value *S, Type *Ty, 2717 const Twine &Name, 2718 BasicBlock *InsertAtEnd) { 2719 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2720 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast"); 2721 2722 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace()) 2723 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd); 2724 2725 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd); 2726 } 2727 2728 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast( 2729 Value *S, Type *Ty, 2730 const Twine &Name, 2731 Instruction *InsertBefore) { 2732 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast"); 2733 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast"); 2734 2735 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace()) 2736 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore); 2737 2738 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2739 } 2740 2741 CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty, 2742 const Twine &Name, 2743 Instruction *InsertBefore) { 2744 if (S->getType()->isPointerTy() && Ty->isIntegerTy()) 2745 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore); 2746 if (S->getType()->isIntegerTy() && Ty->isPointerTy()) 2747 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore); 2748 2749 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore); 2750 } 2751 2752 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, 2753 bool isSigned, const Twine &Name, 2754 Instruction *InsertBefore) { 2755 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() && 2756 "Invalid integer cast"); 2757 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2758 unsigned DstBits = Ty->getScalarSizeInBits(); 2759 Instruction::CastOps opcode = 2760 (SrcBits == DstBits ? Instruction::BitCast : 2761 (SrcBits > DstBits ? Instruction::Trunc : 2762 (isSigned ? Instruction::SExt : Instruction::ZExt))); 2763 return Create(opcode, C, Ty, Name, InsertBefore); 2764 } 2765 2766 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, 2767 bool isSigned, const Twine &Name, 2768 BasicBlock *InsertAtEnd) { 2769 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() && 2770 "Invalid cast"); 2771 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2772 unsigned DstBits = Ty->getScalarSizeInBits(); 2773 Instruction::CastOps opcode = 2774 (SrcBits == DstBits ? Instruction::BitCast : 2775 (SrcBits > DstBits ? Instruction::Trunc : 2776 (isSigned ? Instruction::SExt : Instruction::ZExt))); 2777 return Create(opcode, C, Ty, Name, InsertAtEnd); 2778 } 2779 2780 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, 2781 const Twine &Name, 2782 Instruction *InsertBefore) { 2783 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && 2784 "Invalid cast"); 2785 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2786 unsigned DstBits = Ty->getScalarSizeInBits(); 2787 Instruction::CastOps opcode = 2788 (SrcBits == DstBits ? Instruction::BitCast : 2789 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt)); 2790 return Create(opcode, C, Ty, Name, InsertBefore); 2791 } 2792 2793 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, 2794 const Twine &Name, 2795 BasicBlock *InsertAtEnd) { 2796 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() && 2797 "Invalid cast"); 2798 unsigned SrcBits = C->getType()->getScalarSizeInBits(); 2799 unsigned DstBits = Ty->getScalarSizeInBits(); 2800 Instruction::CastOps opcode = 2801 (SrcBits == DstBits ? Instruction::BitCast : 2802 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt)); 2803 return Create(opcode, C, Ty, Name, InsertAtEnd); 2804 } 2805 2806 // Check whether it is valid to call getCastOpcode for these types. 2807 // This routine must be kept in sync with getCastOpcode. 2808 bool CastInst::isCastable(Type *SrcTy, Type *DestTy) { 2809 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType()) 2810 return false; 2811 2812 if (SrcTy == DestTy) 2813 return true; 2814 2815 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) 2816 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) 2817 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 2818 // An element by element cast. Valid if casting the elements is valid. 2819 SrcTy = SrcVecTy->getElementType(); 2820 DestTy = DestVecTy->getElementType(); 2821 } 2822 2823 // Get the bit sizes, we'll need these 2824 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 2825 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 2826 2827 // Run through the possibilities ... 2828 if (DestTy->isIntegerTy()) { // Casting to integral 2829 if (SrcTy->isIntegerTy()) // Casting from integral 2830 return true; 2831 if (SrcTy->isFloatingPointTy()) // Casting from floating pt 2832 return true; 2833 if (SrcTy->isVectorTy()) // Casting from vector 2834 return DestBits == SrcBits; 2835 // Casting from something else 2836 return SrcTy->isPointerTy(); 2837 } 2838 if (DestTy->isFloatingPointTy()) { // Casting to floating pt 2839 if (SrcTy->isIntegerTy()) // Casting from integral 2840 return true; 2841 if (SrcTy->isFloatingPointTy()) // Casting from floating pt 2842 return true; 2843 if (SrcTy->isVectorTy()) // Casting from vector 2844 return DestBits == SrcBits; 2845 // Casting from something else 2846 return false; 2847 } 2848 if (DestTy->isVectorTy()) // Casting to vector 2849 return DestBits == SrcBits; 2850 if (DestTy->isPointerTy()) { // Casting to pointer 2851 if (SrcTy->isPointerTy()) // Casting from pointer 2852 return true; 2853 return SrcTy->isIntegerTy(); // Casting from integral 2854 } 2855 if (DestTy->isX86_MMXTy()) { 2856 if (SrcTy->isVectorTy()) 2857 return DestBits == SrcBits; // 64-bit vector to MMX 2858 return false; 2859 } // Casting to something else 2860 return false; 2861 } 2862 2863 bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) { 2864 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType()) 2865 return false; 2866 2867 if (SrcTy == DestTy) 2868 return true; 2869 2870 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) { 2871 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) { 2872 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 2873 // An element by element cast. Valid if casting the elements is valid. 2874 SrcTy = SrcVecTy->getElementType(); 2875 DestTy = DestVecTy->getElementType(); 2876 } 2877 } 2878 } 2879 2880 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) { 2881 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) { 2882 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace(); 2883 } 2884 } 2885 2886 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 2887 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 2888 2889 // Could still have vectors of pointers if the number of elements doesn't 2890 // match 2891 if (SrcBits == 0 || DestBits == 0) 2892 return false; 2893 2894 if (SrcBits != DestBits) 2895 return false; 2896 2897 if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy()) 2898 return false; 2899 2900 return true; 2901 } 2902 2903 bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, 2904 const DataLayout &DL) { 2905 // ptrtoint and inttoptr are not allowed on non-integral pointers 2906 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy)) 2907 if (auto *IntTy = dyn_cast<IntegerType>(DestTy)) 2908 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) && 2909 !DL.isNonIntegralPointerType(PtrTy)); 2910 if (auto *PtrTy = dyn_cast<PointerType>(DestTy)) 2911 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy)) 2912 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) && 2913 !DL.isNonIntegralPointerType(PtrTy)); 2914 2915 return isBitCastable(SrcTy, DestTy); 2916 } 2917 2918 // Provide a way to get a "cast" where the cast opcode is inferred from the 2919 // types and size of the operand. This, basically, is a parallel of the 2920 // logic in the castIsValid function below. This axiom should hold: 2921 // castIsValid( getCastOpcode(Val, Ty), Val, Ty) 2922 // should not assert in castIsValid. In other words, this produces a "correct" 2923 // casting opcode for the arguments passed to it. 2924 // This routine must be kept in sync with isCastable. 2925 Instruction::CastOps 2926 CastInst::getCastOpcode( 2927 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) { 2928 Type *SrcTy = Src->getType(); 2929 2930 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() && 2931 "Only first class types are castable!"); 2932 2933 if (SrcTy == DestTy) 2934 return BitCast; 2935 2936 // FIXME: Check address space sizes here 2937 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) 2938 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) 2939 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { 2940 // An element by element cast. Find the appropriate opcode based on the 2941 // element types. 2942 SrcTy = SrcVecTy->getElementType(); 2943 DestTy = DestVecTy->getElementType(); 2944 } 2945 2946 // Get the bit sizes, we'll need these 2947 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr 2948 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr 2949 2950 // Run through the possibilities ... 2951 if (DestTy->isIntegerTy()) { // Casting to integral 2952 if (SrcTy->isIntegerTy()) { // Casting from integral 2953 if (DestBits < SrcBits) 2954 return Trunc; // int -> smaller int 2955 else if (DestBits > SrcBits) { // its an extension 2956 if (SrcIsSigned) 2957 return SExt; // signed -> SEXT 2958 else 2959 return ZExt; // unsigned -> ZEXT 2960 } else { 2961 return BitCast; // Same size, No-op cast 2962 } 2963 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt 2964 if (DestIsSigned) 2965 return FPToSI; // FP -> sint 2966 else 2967 return FPToUI; // FP -> uint 2968 } else if (SrcTy->isVectorTy()) { 2969 assert(DestBits == SrcBits && 2970 "Casting vector to integer of different width"); 2971 return BitCast; // Same size, no-op cast 2972 } else { 2973 assert(SrcTy->isPointerTy() && 2974 "Casting from a value that is not first-class type"); 2975 return PtrToInt; // ptr -> int 2976 } 2977 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt 2978 if (SrcTy->isIntegerTy()) { // Casting from integral 2979 if (SrcIsSigned) 2980 return SIToFP; // sint -> FP 2981 else 2982 return UIToFP; // uint -> FP 2983 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt 2984 if (DestBits < SrcBits) { 2985 return FPTrunc; // FP -> smaller FP 2986 } else if (DestBits > SrcBits) { 2987 return FPExt; // FP -> larger FP 2988 } else { 2989 return BitCast; // same size, no-op cast 2990 } 2991 } else if (SrcTy->isVectorTy()) { 2992 assert(DestBits == SrcBits && 2993 "Casting vector to floating point of different width"); 2994 return BitCast; // same size, no-op cast 2995 } 2996 llvm_unreachable("Casting pointer or non-first class to float"); 2997 } else if (DestTy->isVectorTy()) { 2998 assert(DestBits == SrcBits && 2999 "Illegal cast to vector (wrong type or size)"); 3000 return BitCast; 3001 } else if (DestTy->isPointerTy()) { 3002 if (SrcTy->isPointerTy()) { 3003 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace()) 3004 return AddrSpaceCast; 3005 return BitCast; // ptr -> ptr 3006 } else if (SrcTy->isIntegerTy()) { 3007 return IntToPtr; // int -> ptr 3008 } 3009 llvm_unreachable("Casting pointer to other than pointer or int"); 3010 } else if (DestTy->isX86_MMXTy()) { 3011 if (SrcTy->isVectorTy()) { 3012 assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX"); 3013 return BitCast; // 64-bit vector to MMX 3014 } 3015 llvm_unreachable("Illegal cast to X86_MMX"); 3016 } 3017 llvm_unreachable("Casting to type that is not first-class"); 3018 } 3019 3020 //===----------------------------------------------------------------------===// 3021 // CastInst SubClass Constructors 3022 //===----------------------------------------------------------------------===// 3023 3024 /// Check that the construction parameters for a CastInst are correct. This 3025 /// could be broken out into the separate constructors but it is useful to have 3026 /// it in one place and to eliminate the redundant code for getting the sizes 3027 /// of the types involved. 3028 bool 3029 CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) { 3030 // Check for type sanity on the arguments 3031 Type *SrcTy = S->getType(); 3032 3033 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() || 3034 SrcTy->isAggregateType() || DstTy->isAggregateType()) 3035 return false; 3036 3037 // Get the size of the types in bits, we'll need this later 3038 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 3039 unsigned DstBitSize = DstTy->getScalarSizeInBits(); 3040 3041 // If these are vector types, get the lengths of the vectors (using zero for 3042 // scalar types means that checking that vector lengths match also checks that 3043 // scalars are not being converted to vectors or vectors to scalars). 3044 unsigned SrcLength = SrcTy->isVectorTy() ? 3045 cast<VectorType>(SrcTy)->getNumElements() : 0; 3046 unsigned DstLength = DstTy->isVectorTy() ? 3047 cast<VectorType>(DstTy)->getNumElements() : 0; 3048 3049 // Switch on the opcode provided 3050 switch (op) { 3051 default: return false; // This is an input error 3052 case Instruction::Trunc: 3053 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 3054 SrcLength == DstLength && SrcBitSize > DstBitSize; 3055 case Instruction::ZExt: 3056 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 3057 SrcLength == DstLength && SrcBitSize < DstBitSize; 3058 case Instruction::SExt: 3059 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() && 3060 SrcLength == DstLength && SrcBitSize < DstBitSize; 3061 case Instruction::FPTrunc: 3062 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() && 3063 SrcLength == DstLength && SrcBitSize > DstBitSize; 3064 case Instruction::FPExt: 3065 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() && 3066 SrcLength == DstLength && SrcBitSize < DstBitSize; 3067 case Instruction::UIToFP: 3068 case Instruction::SIToFP: 3069 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() && 3070 SrcLength == DstLength; 3071 case Instruction::FPToUI: 3072 case Instruction::FPToSI: 3073 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() && 3074 SrcLength == DstLength; 3075 case Instruction::PtrToInt: 3076 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy)) 3077 return false; 3078 if (VectorType *VT = dyn_cast<VectorType>(SrcTy)) 3079 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements()) 3080 return false; 3081 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy(); 3082 case Instruction::IntToPtr: 3083 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy)) 3084 return false; 3085 if (VectorType *VT = dyn_cast<VectorType>(SrcTy)) 3086 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements()) 3087 return false; 3088 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy(); 3089 case Instruction::BitCast: { 3090 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType()); 3091 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType()); 3092 3093 // BitCast implies a no-op cast of type only. No bits change. 3094 // However, you can't cast pointers to anything but pointers. 3095 if (!SrcPtrTy != !DstPtrTy) 3096 return false; 3097 3098 // For non-pointer cases, the cast is okay if the source and destination bit 3099 // widths are identical. 3100 if (!SrcPtrTy) 3101 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits(); 3102 3103 // If both are pointers then the address spaces must match. 3104 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) 3105 return false; 3106 3107 // A vector of pointers must have the same number of elements. 3108 VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy); 3109 VectorType *DstVecTy = dyn_cast<VectorType>(DstTy); 3110 if (SrcVecTy && DstVecTy) 3111 return (SrcVecTy->getNumElements() == DstVecTy->getNumElements()); 3112 if (SrcVecTy) 3113 return SrcVecTy->getNumElements() == 1; 3114 if (DstVecTy) 3115 return DstVecTy->getNumElements() == 1; 3116 3117 return true; 3118 } 3119 case Instruction::AddrSpaceCast: { 3120 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType()); 3121 if (!SrcPtrTy) 3122 return false; 3123 3124 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType()); 3125 if (!DstPtrTy) 3126 return false; 3127 3128 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace()) 3129 return false; 3130 3131 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) { 3132 if (VectorType *DstVecTy = dyn_cast<VectorType>(DstTy)) 3133 return (SrcVecTy->getNumElements() == DstVecTy->getNumElements()); 3134 3135 return false; 3136 } 3137 3138 return true; 3139 } 3140 } 3141 } 3142 3143 TruncInst::TruncInst( 3144 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3145 ) : CastInst(Ty, Trunc, S, Name, InsertBefore) { 3146 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc"); 3147 } 3148 3149 TruncInst::TruncInst( 3150 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3151 ) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) { 3152 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc"); 3153 } 3154 3155 ZExtInst::ZExtInst( 3156 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3157 ) : CastInst(Ty, ZExt, S, Name, InsertBefore) { 3158 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); 3159 } 3160 3161 ZExtInst::ZExtInst( 3162 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3163 ) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) { 3164 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); 3165 } 3166 SExtInst::SExtInst( 3167 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3168 ) : CastInst(Ty, SExt, S, Name, InsertBefore) { 3169 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); 3170 } 3171 3172 SExtInst::SExtInst( 3173 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3174 ) : CastInst(Ty, SExt, S, Name, InsertAtEnd) { 3175 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); 3176 } 3177 3178 FPTruncInst::FPTruncInst( 3179 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3180 ) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) { 3181 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); 3182 } 3183 3184 FPTruncInst::FPTruncInst( 3185 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3186 ) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) { 3187 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); 3188 } 3189 3190 FPExtInst::FPExtInst( 3191 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3192 ) : CastInst(Ty, FPExt, S, Name, InsertBefore) { 3193 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); 3194 } 3195 3196 FPExtInst::FPExtInst( 3197 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3198 ) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) { 3199 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); 3200 } 3201 3202 UIToFPInst::UIToFPInst( 3203 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3204 ) : CastInst(Ty, UIToFP, S, Name, InsertBefore) { 3205 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); 3206 } 3207 3208 UIToFPInst::UIToFPInst( 3209 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3210 ) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) { 3211 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); 3212 } 3213 3214 SIToFPInst::SIToFPInst( 3215 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3216 ) : CastInst(Ty, SIToFP, S, Name, InsertBefore) { 3217 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); 3218 } 3219 3220 SIToFPInst::SIToFPInst( 3221 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3222 ) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) { 3223 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); 3224 } 3225 3226 FPToUIInst::FPToUIInst( 3227 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3228 ) : CastInst(Ty, FPToUI, S, Name, InsertBefore) { 3229 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); 3230 } 3231 3232 FPToUIInst::FPToUIInst( 3233 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3234 ) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) { 3235 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); 3236 } 3237 3238 FPToSIInst::FPToSIInst( 3239 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3240 ) : CastInst(Ty, FPToSI, S, Name, InsertBefore) { 3241 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); 3242 } 3243 3244 FPToSIInst::FPToSIInst( 3245 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3246 ) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) { 3247 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); 3248 } 3249 3250 PtrToIntInst::PtrToIntInst( 3251 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3252 ) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) { 3253 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); 3254 } 3255 3256 PtrToIntInst::PtrToIntInst( 3257 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3258 ) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) { 3259 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); 3260 } 3261 3262 IntToPtrInst::IntToPtrInst( 3263 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3264 ) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) { 3265 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); 3266 } 3267 3268 IntToPtrInst::IntToPtrInst( 3269 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3270 ) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) { 3271 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); 3272 } 3273 3274 BitCastInst::BitCastInst( 3275 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3276 ) : CastInst(Ty, BitCast, S, Name, InsertBefore) { 3277 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); 3278 } 3279 3280 BitCastInst::BitCastInst( 3281 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3282 ) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) { 3283 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); 3284 } 3285 3286 AddrSpaceCastInst::AddrSpaceCastInst( 3287 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore 3288 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) { 3289 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast"); 3290 } 3291 3292 AddrSpaceCastInst::AddrSpaceCastInst( 3293 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd 3294 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) { 3295 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast"); 3296 } 3297 3298 //===----------------------------------------------------------------------===// 3299 // CmpInst Classes 3300 //===----------------------------------------------------------------------===// 3301 3302 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS, 3303 Value *RHS, const Twine &Name, Instruction *InsertBefore, 3304 Instruction *FlagsSource) 3305 : Instruction(ty, op, 3306 OperandTraits<CmpInst>::op_begin(this), 3307 OperandTraits<CmpInst>::operands(this), 3308 InsertBefore) { 3309 Op<0>() = LHS; 3310 Op<1>() = RHS; 3311 setPredicate((Predicate)predicate); 3312 setName(Name); 3313 if (FlagsSource) 3314 copyIRFlags(FlagsSource); 3315 } 3316 3317 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS, 3318 Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd) 3319 : Instruction(ty, op, 3320 OperandTraits<CmpInst>::op_begin(this), 3321 OperandTraits<CmpInst>::operands(this), 3322 InsertAtEnd) { 3323 Op<0>() = LHS; 3324 Op<1>() = RHS; 3325 setPredicate((Predicate)predicate); 3326 setName(Name); 3327 } 3328 3329 CmpInst * 3330 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, 3331 const Twine &Name, Instruction *InsertBefore) { 3332 if (Op == Instruction::ICmp) { 3333 if (InsertBefore) 3334 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate), 3335 S1, S2, Name); 3336 else 3337 return new ICmpInst(CmpInst::Predicate(predicate), 3338 S1, S2, Name); 3339 } 3340 3341 if (InsertBefore) 3342 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate), 3343 S1, S2, Name); 3344 else 3345 return new FCmpInst(CmpInst::Predicate(predicate), 3346 S1, S2, Name); 3347 } 3348 3349 CmpInst * 3350 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, 3351 const Twine &Name, BasicBlock *InsertAtEnd) { 3352 if (Op == Instruction::ICmp) { 3353 return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate), 3354 S1, S2, Name); 3355 } 3356 return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate), 3357 S1, S2, Name); 3358 } 3359 3360 void CmpInst::swapOperands() { 3361 if (ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3362 IC->swapOperands(); 3363 else 3364 cast<FCmpInst>(this)->swapOperands(); 3365 } 3366 3367 bool CmpInst::isCommutative() const { 3368 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3369 return IC->isCommutative(); 3370 return cast<FCmpInst>(this)->isCommutative(); 3371 } 3372 3373 bool CmpInst::isEquality() const { 3374 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this)) 3375 return IC->isEquality(); 3376 return cast<FCmpInst>(this)->isEquality(); 3377 } 3378 3379 CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) { 3380 switch (pred) { 3381 default: llvm_unreachable("Unknown cmp predicate!"); 3382 case ICMP_EQ: return ICMP_NE; 3383 case ICMP_NE: return ICMP_EQ; 3384 case ICMP_UGT: return ICMP_ULE; 3385 case ICMP_ULT: return ICMP_UGE; 3386 case ICMP_UGE: return ICMP_ULT; 3387 case ICMP_ULE: return ICMP_UGT; 3388 case ICMP_SGT: return ICMP_SLE; 3389 case ICMP_SLT: return ICMP_SGE; 3390 case ICMP_SGE: return ICMP_SLT; 3391 case ICMP_SLE: return ICMP_SGT; 3392 3393 case FCMP_OEQ: return FCMP_UNE; 3394 case FCMP_ONE: return FCMP_UEQ; 3395 case FCMP_OGT: return FCMP_ULE; 3396 case FCMP_OLT: return FCMP_UGE; 3397 case FCMP_OGE: return FCMP_ULT; 3398 case FCMP_OLE: return FCMP_UGT; 3399 case FCMP_UEQ: return FCMP_ONE; 3400 case FCMP_UNE: return FCMP_OEQ; 3401 case FCMP_UGT: return FCMP_OLE; 3402 case FCMP_ULT: return FCMP_OGE; 3403 case FCMP_UGE: return FCMP_OLT; 3404 case FCMP_ULE: return FCMP_OGT; 3405 case FCMP_ORD: return FCMP_UNO; 3406 case FCMP_UNO: return FCMP_ORD; 3407 case FCMP_TRUE: return FCMP_FALSE; 3408 case FCMP_FALSE: return FCMP_TRUE; 3409 } 3410 } 3411 3412 StringRef CmpInst::getPredicateName(Predicate Pred) { 3413 switch (Pred) { 3414 default: return "unknown"; 3415 case FCmpInst::FCMP_FALSE: return "false"; 3416 case FCmpInst::FCMP_OEQ: return "oeq"; 3417 case FCmpInst::FCMP_OGT: return "ogt"; 3418 case FCmpInst::FCMP_OGE: return "oge"; 3419 case FCmpInst::FCMP_OLT: return "olt"; 3420 case FCmpInst::FCMP_OLE: return "ole"; 3421 case FCmpInst::FCMP_ONE: return "one"; 3422 case FCmpInst::FCMP_ORD: return "ord"; 3423 case FCmpInst::FCMP_UNO: return "uno"; 3424 case FCmpInst::FCMP_UEQ: return "ueq"; 3425 case FCmpInst::FCMP_UGT: return "ugt"; 3426 case FCmpInst::FCMP_UGE: return "uge"; 3427 case FCmpInst::FCMP_ULT: return "ult"; 3428 case FCmpInst::FCMP_ULE: return "ule"; 3429 case FCmpInst::FCMP_UNE: return "une"; 3430 case FCmpInst::FCMP_TRUE: return "true"; 3431 case ICmpInst::ICMP_EQ: return "eq"; 3432 case ICmpInst::ICMP_NE: return "ne"; 3433 case ICmpInst::ICMP_SGT: return "sgt"; 3434 case ICmpInst::ICMP_SGE: return "sge"; 3435 case ICmpInst::ICMP_SLT: return "slt"; 3436 case ICmpInst::ICMP_SLE: return "sle"; 3437 case ICmpInst::ICMP_UGT: return "ugt"; 3438 case ICmpInst::ICMP_UGE: return "uge"; 3439 case ICmpInst::ICMP_ULT: return "ult"; 3440 case ICmpInst::ICMP_ULE: return "ule"; 3441 } 3442 } 3443 3444 ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) { 3445 switch (pred) { 3446 default: llvm_unreachable("Unknown icmp predicate!"); 3447 case ICMP_EQ: case ICMP_NE: 3448 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE: 3449 return pred; 3450 case ICMP_UGT: return ICMP_SGT; 3451 case ICMP_ULT: return ICMP_SLT; 3452 case ICMP_UGE: return ICMP_SGE; 3453 case ICMP_ULE: return ICMP_SLE; 3454 } 3455 } 3456 3457 ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) { 3458 switch (pred) { 3459 default: llvm_unreachable("Unknown icmp predicate!"); 3460 case ICMP_EQ: case ICMP_NE: 3461 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE: 3462 return pred; 3463 case ICMP_SGT: return ICMP_UGT; 3464 case ICMP_SLT: return ICMP_ULT; 3465 case ICMP_SGE: return ICMP_UGE; 3466 case ICMP_SLE: return ICMP_ULE; 3467 } 3468 } 3469 3470 CmpInst::Predicate CmpInst::getFlippedStrictnessPredicate(Predicate pred) { 3471 switch (pred) { 3472 default: llvm_unreachable("Unknown or unsupported cmp predicate!"); 3473 case ICMP_SGT: return ICMP_SGE; 3474 case ICMP_SLT: return ICMP_SLE; 3475 case ICMP_SGE: return ICMP_SGT; 3476 case ICMP_SLE: return ICMP_SLT; 3477 case ICMP_UGT: return ICMP_UGE; 3478 case ICMP_ULT: return ICMP_ULE; 3479 case ICMP_UGE: return ICMP_UGT; 3480 case ICMP_ULE: return ICMP_ULT; 3481 3482 case FCMP_OGT: return FCMP_OGE; 3483 case FCMP_OLT: return FCMP_OLE; 3484 case FCMP_OGE: return FCMP_OGT; 3485 case FCMP_OLE: return FCMP_OLT; 3486 case FCMP_UGT: return FCMP_UGE; 3487 case FCMP_ULT: return FCMP_ULE; 3488 case FCMP_UGE: return FCMP_UGT; 3489 case FCMP_ULE: return FCMP_ULT; 3490 } 3491 } 3492 3493 CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) { 3494 switch (pred) { 3495 default: llvm_unreachable("Unknown cmp predicate!"); 3496 case ICMP_EQ: case ICMP_NE: 3497 return pred; 3498 case ICMP_SGT: return ICMP_SLT; 3499 case ICMP_SLT: return ICMP_SGT; 3500 case ICMP_SGE: return ICMP_SLE; 3501 case ICMP_SLE: return ICMP_SGE; 3502 case ICMP_UGT: return ICMP_ULT; 3503 case ICMP_ULT: return ICMP_UGT; 3504 case ICMP_UGE: return ICMP_ULE; 3505 case ICMP_ULE: return ICMP_UGE; 3506 3507 case FCMP_FALSE: case FCMP_TRUE: 3508 case FCMP_OEQ: case FCMP_ONE: 3509 case FCMP_UEQ: case FCMP_UNE: 3510 case FCMP_ORD: case FCMP_UNO: 3511 return pred; 3512 case FCMP_OGT: return FCMP_OLT; 3513 case FCMP_OLT: return FCMP_OGT; 3514 case FCMP_OGE: return FCMP_OLE; 3515 case FCMP_OLE: return FCMP_OGE; 3516 case FCMP_UGT: return FCMP_ULT; 3517 case FCMP_ULT: return FCMP_UGT; 3518 case FCMP_UGE: return FCMP_ULE; 3519 case FCMP_ULE: return FCMP_UGE; 3520 } 3521 } 3522 3523 CmpInst::Predicate CmpInst::getNonStrictPredicate(Predicate pred) { 3524 switch (pred) { 3525 case ICMP_SGT: return ICMP_SGE; 3526 case ICMP_SLT: return ICMP_SLE; 3527 case ICMP_UGT: return ICMP_UGE; 3528 case ICMP_ULT: return ICMP_ULE; 3529 case FCMP_OGT: return FCMP_OGE; 3530 case FCMP_OLT: return FCMP_OLE; 3531 case FCMP_UGT: return FCMP_UGE; 3532 case FCMP_ULT: return FCMP_ULE; 3533 default: return pred; 3534 } 3535 } 3536 3537 CmpInst::Predicate CmpInst::getSignedPredicate(Predicate pred) { 3538 assert(CmpInst::isUnsigned(pred) && "Call only with signed predicates!"); 3539 3540 switch (pred) { 3541 default: 3542 llvm_unreachable("Unknown predicate!"); 3543 case CmpInst::ICMP_ULT: 3544 return CmpInst::ICMP_SLT; 3545 case CmpInst::ICMP_ULE: 3546 return CmpInst::ICMP_SLE; 3547 case CmpInst::ICMP_UGT: 3548 return CmpInst::ICMP_SGT; 3549 case CmpInst::ICMP_UGE: 3550 return CmpInst::ICMP_SGE; 3551 } 3552 } 3553 3554 bool CmpInst::isUnsigned(Predicate predicate) { 3555 switch (predicate) { 3556 default: return false; 3557 case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT: 3558 case ICmpInst::ICMP_UGE: return true; 3559 } 3560 } 3561 3562 bool CmpInst::isSigned(Predicate predicate) { 3563 switch (predicate) { 3564 default: return false; 3565 case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT: 3566 case ICmpInst::ICMP_SGE: return true; 3567 } 3568 } 3569 3570 bool CmpInst::isOrdered(Predicate predicate) { 3571 switch (predicate) { 3572 default: return false; 3573 case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT: 3574 case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE: 3575 case FCmpInst::FCMP_ORD: return true; 3576 } 3577 } 3578 3579 bool CmpInst::isUnordered(Predicate predicate) { 3580 switch (predicate) { 3581 default: return false; 3582 case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT: 3583 case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE: 3584 case FCmpInst::FCMP_UNO: return true; 3585 } 3586 } 3587 3588 bool CmpInst::isTrueWhenEqual(Predicate predicate) { 3589 switch(predicate) { 3590 default: return false; 3591 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE: 3592 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true; 3593 } 3594 } 3595 3596 bool CmpInst::isFalseWhenEqual(Predicate predicate) { 3597 switch(predicate) { 3598 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT: 3599 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true; 3600 default: return false; 3601 } 3602 } 3603 3604 bool CmpInst::isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2) { 3605 // If the predicates match, then we know the first condition implies the 3606 // second is true. 3607 if (Pred1 == Pred2) 3608 return true; 3609 3610 switch (Pred1) { 3611 default: 3612 break; 3613 case ICMP_EQ: 3614 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true. 3615 return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE || 3616 Pred2 == ICMP_SLE; 3617 case ICMP_UGT: // A >u B implies A != B and A >=u B are true. 3618 return Pred2 == ICMP_NE || Pred2 == ICMP_UGE; 3619 case ICMP_ULT: // A <u B implies A != B and A <=u B are true. 3620 return Pred2 == ICMP_NE || Pred2 == ICMP_ULE; 3621 case ICMP_SGT: // A >s B implies A != B and A >=s B are true. 3622 return Pred2 == ICMP_NE || Pred2 == ICMP_SGE; 3623 case ICMP_SLT: // A <s B implies A != B and A <=s B are true. 3624 return Pred2 == ICMP_NE || Pred2 == ICMP_SLE; 3625 } 3626 return false; 3627 } 3628 3629 bool CmpInst::isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2) { 3630 return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2)); 3631 } 3632 3633 //===----------------------------------------------------------------------===// 3634 // SwitchInst Implementation 3635 //===----------------------------------------------------------------------===// 3636 3637 void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) { 3638 assert(Value && Default && NumReserved); 3639 ReservedSpace = NumReserved; 3640 setNumHungOffUseOperands(2); 3641 allocHungoffUses(ReservedSpace); 3642 3643 Op<0>() = Value; 3644 Op<1>() = Default; 3645 } 3646 3647 /// SwitchInst ctor - Create a new switch instruction, specifying a value to 3648 /// switch on and a default destination. The number of additional cases can 3649 /// be specified here to make memory allocation more efficient. This 3650 /// constructor can also autoinsert before another instruction. 3651 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3652 Instruction *InsertBefore) 3653 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch, 3654 nullptr, 0, InsertBefore) { 3655 init(Value, Default, 2+NumCases*2); 3656 } 3657 3658 /// SwitchInst ctor - Create a new switch instruction, specifying a value to 3659 /// switch on and a default destination. The number of additional cases can 3660 /// be specified here to make memory allocation more efficient. This 3661 /// constructor also autoinserts at the end of the specified BasicBlock. 3662 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3663 BasicBlock *InsertAtEnd) 3664 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch, 3665 nullptr, 0, InsertAtEnd) { 3666 init(Value, Default, 2+NumCases*2); 3667 } 3668 3669 SwitchInst::SwitchInst(const SwitchInst &SI) 3670 : Instruction(SI.getType(), Instruction::Switch, nullptr, 0) { 3671 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands()); 3672 setNumHungOffUseOperands(SI.getNumOperands()); 3673 Use *OL = getOperandList(); 3674 const Use *InOL = SI.getOperandList(); 3675 for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) { 3676 OL[i] = InOL[i]; 3677 OL[i+1] = InOL[i+1]; 3678 } 3679 SubclassOptionalData = SI.SubclassOptionalData; 3680 } 3681 3682 /// addCase - Add an entry to the switch instruction... 3683 /// 3684 void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) { 3685 unsigned NewCaseIdx = getNumCases(); 3686 unsigned OpNo = getNumOperands(); 3687 if (OpNo+2 > ReservedSpace) 3688 growOperands(); // Get more space! 3689 // Initialize some new operands. 3690 assert(OpNo+1 < ReservedSpace && "Growing didn't work!"); 3691 setNumHungOffUseOperands(OpNo+2); 3692 CaseHandle Case(this, NewCaseIdx); 3693 Case.setValue(OnVal); 3694 Case.setSuccessor(Dest); 3695 } 3696 3697 /// removeCase - This method removes the specified case and its successor 3698 /// from the switch instruction. 3699 SwitchInst::CaseIt SwitchInst::removeCase(CaseIt I) { 3700 unsigned idx = I->getCaseIndex(); 3701 3702 assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!"); 3703 3704 unsigned NumOps = getNumOperands(); 3705 Use *OL = getOperandList(); 3706 3707 // Overwrite this case with the end of the list. 3708 if (2 + (idx + 1) * 2 != NumOps) { 3709 OL[2 + idx * 2] = OL[NumOps - 2]; 3710 OL[2 + idx * 2 + 1] = OL[NumOps - 1]; 3711 } 3712 3713 // Nuke the last value. 3714 OL[NumOps-2].set(nullptr); 3715 OL[NumOps-2+1].set(nullptr); 3716 setNumHungOffUseOperands(NumOps-2); 3717 3718 return CaseIt(this, idx); 3719 } 3720 3721 /// growOperands - grow operands - This grows the operand list in response 3722 /// to a push_back style of operation. This grows the number of ops by 3 times. 3723 /// 3724 void SwitchInst::growOperands() { 3725 unsigned e = getNumOperands(); 3726 unsigned NumOps = e*3; 3727 3728 ReservedSpace = NumOps; 3729 growHungoffUses(ReservedSpace); 3730 } 3731 3732 //===----------------------------------------------------------------------===// 3733 // IndirectBrInst Implementation 3734 //===----------------------------------------------------------------------===// 3735 3736 void IndirectBrInst::init(Value *Address, unsigned NumDests) { 3737 assert(Address && Address->getType()->isPointerTy() && 3738 "Address of indirectbr must be a pointer"); 3739 ReservedSpace = 1+NumDests; 3740 setNumHungOffUseOperands(1); 3741 allocHungoffUses(ReservedSpace); 3742 3743 Op<0>() = Address; 3744 } 3745 3746 3747 /// growOperands - grow operands - This grows the operand list in response 3748 /// to a push_back style of operation. This grows the number of ops by 2 times. 3749 /// 3750 void IndirectBrInst::growOperands() { 3751 unsigned e = getNumOperands(); 3752 unsigned NumOps = e*2; 3753 3754 ReservedSpace = NumOps; 3755 growHungoffUses(ReservedSpace); 3756 } 3757 3758 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases, 3759 Instruction *InsertBefore) 3760 : Instruction(Type::getVoidTy(Address->getContext()), 3761 Instruction::IndirectBr, nullptr, 0, InsertBefore) { 3762 init(Address, NumCases); 3763 } 3764 3765 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases, 3766 BasicBlock *InsertAtEnd) 3767 : Instruction(Type::getVoidTy(Address->getContext()), 3768 Instruction::IndirectBr, nullptr, 0, InsertAtEnd) { 3769 init(Address, NumCases); 3770 } 3771 3772 IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI) 3773 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr, 3774 nullptr, IBI.getNumOperands()) { 3775 allocHungoffUses(IBI.getNumOperands()); 3776 Use *OL = getOperandList(); 3777 const Use *InOL = IBI.getOperandList(); 3778 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i) 3779 OL[i] = InOL[i]; 3780 SubclassOptionalData = IBI.SubclassOptionalData; 3781 } 3782 3783 /// addDestination - Add a destination. 3784 /// 3785 void IndirectBrInst::addDestination(BasicBlock *DestBB) { 3786 unsigned OpNo = getNumOperands(); 3787 if (OpNo+1 > ReservedSpace) 3788 growOperands(); // Get more space! 3789 // Initialize some new operands. 3790 assert(OpNo < ReservedSpace && "Growing didn't work!"); 3791 setNumHungOffUseOperands(OpNo+1); 3792 getOperandList()[OpNo] = DestBB; 3793 } 3794 3795 /// removeDestination - This method removes the specified successor from the 3796 /// indirectbr instruction. 3797 void IndirectBrInst::removeDestination(unsigned idx) { 3798 assert(idx < getNumOperands()-1 && "Successor index out of range!"); 3799 3800 unsigned NumOps = getNumOperands(); 3801 Use *OL = getOperandList(); 3802 3803 // Replace this value with the last one. 3804 OL[idx+1] = OL[NumOps-1]; 3805 3806 // Nuke the last value. 3807 OL[NumOps-1].set(nullptr); 3808 setNumHungOffUseOperands(NumOps-1); 3809 } 3810 3811 //===----------------------------------------------------------------------===// 3812 // cloneImpl() implementations 3813 //===----------------------------------------------------------------------===// 3814 3815 // Define these methods here so vtables don't get emitted into every translation 3816 // unit that uses these classes. 3817 3818 GetElementPtrInst *GetElementPtrInst::cloneImpl() const { 3819 return new (getNumOperands()) GetElementPtrInst(*this); 3820 } 3821 3822 UnaryOperator *UnaryOperator::cloneImpl() const { 3823 return Create(getOpcode(), Op<0>()); 3824 } 3825 3826 BinaryOperator *BinaryOperator::cloneImpl() const { 3827 return Create(getOpcode(), Op<0>(), Op<1>()); 3828 } 3829 3830 FCmpInst *FCmpInst::cloneImpl() const { 3831 return new FCmpInst(getPredicate(), Op<0>(), Op<1>()); 3832 } 3833 3834 ICmpInst *ICmpInst::cloneImpl() const { 3835 return new ICmpInst(getPredicate(), Op<0>(), Op<1>()); 3836 } 3837 3838 ExtractValueInst *ExtractValueInst::cloneImpl() const { 3839 return new ExtractValueInst(*this); 3840 } 3841 3842 InsertValueInst *InsertValueInst::cloneImpl() const { 3843 return new InsertValueInst(*this); 3844 } 3845 3846 AllocaInst *AllocaInst::cloneImpl() const { 3847 AllocaInst *Result = new AllocaInst(getAllocatedType(), 3848 getType()->getAddressSpace(), 3849 (Value *)getOperand(0), getAlignment()); 3850 Result->setUsedWithInAlloca(isUsedWithInAlloca()); 3851 Result->setSwiftError(isSwiftError()); 3852 return Result; 3853 } 3854 3855 LoadInst *LoadInst::cloneImpl() const { 3856 return new LoadInst(getOperand(0), Twine(), isVolatile(), 3857 getAlignment(), getOrdering(), getSyncScopeID()); 3858 } 3859 3860 StoreInst *StoreInst::cloneImpl() const { 3861 return new StoreInst(getOperand(0), getOperand(1), isVolatile(), 3862 getAlignment(), getOrdering(), getSyncScopeID()); 3863 3864 } 3865 3866 AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const { 3867 AtomicCmpXchgInst *Result = 3868 new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2), 3869 getSuccessOrdering(), getFailureOrdering(), 3870 getSyncScopeID()); 3871 Result->setVolatile(isVolatile()); 3872 Result->setWeak(isWeak()); 3873 return Result; 3874 } 3875 3876 AtomicRMWInst *AtomicRMWInst::cloneImpl() const { 3877 AtomicRMWInst *Result = 3878 new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1), 3879 getOrdering(), getSyncScopeID()); 3880 Result->setVolatile(isVolatile()); 3881 return Result; 3882 } 3883 3884 FenceInst *FenceInst::cloneImpl() const { 3885 return new FenceInst(getContext(), getOrdering(), getSyncScopeID()); 3886 } 3887 3888 TruncInst *TruncInst::cloneImpl() const { 3889 return new TruncInst(getOperand(0), getType()); 3890 } 3891 3892 ZExtInst *ZExtInst::cloneImpl() const { 3893 return new ZExtInst(getOperand(0), getType()); 3894 } 3895 3896 SExtInst *SExtInst::cloneImpl() const { 3897 return new SExtInst(getOperand(0), getType()); 3898 } 3899 3900 FPTruncInst *FPTruncInst::cloneImpl() const { 3901 return new FPTruncInst(getOperand(0), getType()); 3902 } 3903 3904 FPExtInst *FPExtInst::cloneImpl() const { 3905 return new FPExtInst(getOperand(0), getType()); 3906 } 3907 3908 UIToFPInst *UIToFPInst::cloneImpl() const { 3909 return new UIToFPInst(getOperand(0), getType()); 3910 } 3911 3912 SIToFPInst *SIToFPInst::cloneImpl() const { 3913 return new SIToFPInst(getOperand(0), getType()); 3914 } 3915 3916 FPToUIInst *FPToUIInst::cloneImpl() const { 3917 return new FPToUIInst(getOperand(0), getType()); 3918 } 3919 3920 FPToSIInst *FPToSIInst::cloneImpl() const { 3921 return new FPToSIInst(getOperand(0), getType()); 3922 } 3923 3924 PtrToIntInst *PtrToIntInst::cloneImpl() const { 3925 return new PtrToIntInst(getOperand(0), getType()); 3926 } 3927 3928 IntToPtrInst *IntToPtrInst::cloneImpl() const { 3929 return new IntToPtrInst(getOperand(0), getType()); 3930 } 3931 3932 BitCastInst *BitCastInst::cloneImpl() const { 3933 return new BitCastInst(getOperand(0), getType()); 3934 } 3935 3936 AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const { 3937 return new AddrSpaceCastInst(getOperand(0), getType()); 3938 } 3939 3940 CallInst *CallInst::cloneImpl() const { 3941 if (hasOperandBundles()) { 3942 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 3943 return new(getNumOperands(), DescriptorBytes) CallInst(*this); 3944 } 3945 return new(getNumOperands()) CallInst(*this); 3946 } 3947 3948 SelectInst *SelectInst::cloneImpl() const { 3949 return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2)); 3950 } 3951 3952 VAArgInst *VAArgInst::cloneImpl() const { 3953 return new VAArgInst(getOperand(0), getType()); 3954 } 3955 3956 ExtractElementInst *ExtractElementInst::cloneImpl() const { 3957 return ExtractElementInst::Create(getOperand(0), getOperand(1)); 3958 } 3959 3960 InsertElementInst *InsertElementInst::cloneImpl() const { 3961 return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2)); 3962 } 3963 3964 ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const { 3965 return new ShuffleVectorInst(getOperand(0), getOperand(1), getOperand(2)); 3966 } 3967 3968 PHINode *PHINode::cloneImpl() const { return new PHINode(*this); } 3969 3970 LandingPadInst *LandingPadInst::cloneImpl() const { 3971 return new LandingPadInst(*this); 3972 } 3973 3974 ReturnInst *ReturnInst::cloneImpl() const { 3975 return new(getNumOperands()) ReturnInst(*this); 3976 } 3977 3978 BranchInst *BranchInst::cloneImpl() const { 3979 return new(getNumOperands()) BranchInst(*this); 3980 } 3981 3982 SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); } 3983 3984 IndirectBrInst *IndirectBrInst::cloneImpl() const { 3985 return new IndirectBrInst(*this); 3986 } 3987 3988 InvokeInst *InvokeInst::cloneImpl() const { 3989 if (hasOperandBundles()) { 3990 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo); 3991 return new(getNumOperands(), DescriptorBytes) InvokeInst(*this); 3992 } 3993 return new(getNumOperands()) InvokeInst(*this); 3994 } 3995 3996 ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); } 3997 3998 CleanupReturnInst *CleanupReturnInst::cloneImpl() const { 3999 return new (getNumOperands()) CleanupReturnInst(*this); 4000 } 4001 4002 CatchReturnInst *CatchReturnInst::cloneImpl() const { 4003 return new (getNumOperands()) CatchReturnInst(*this); 4004 } 4005 4006 CatchSwitchInst *CatchSwitchInst::cloneImpl() const { 4007 return new CatchSwitchInst(*this); 4008 } 4009 4010 FuncletPadInst *FuncletPadInst::cloneImpl() const { 4011 return new (getNumOperands()) FuncletPadInst(*this); 4012 } 4013 4014 UnreachableInst *UnreachableInst::cloneImpl() const { 4015 LLVMContext &Context = getContext(); 4016 return new UnreachableInst(Context); 4017 } 4018