1 //===-- IntrinsicInst.cpp - Intrinsic Instruction Wrappers ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements methods that make it really easy to deal with intrinsic 10 // functions. 11 // 12 // All intrinsic function calls are instances of the call instruction, so these 13 // are all subclasses of the CallInst class. Note that none of these classes 14 // has state or virtual methods, which is an important part of this gross/neat 15 // hack working. 16 // 17 // In some cases, arguments to intrinsics need to be generic and are defined as 18 // type pointer to empty struct { }*. To access the real item of interest the 19 // cast instruction needs to be stripped away. 20 // 21 //===----------------------------------------------------------------------===// 22 23 #include "llvm/IR/IntrinsicInst.h" 24 #include "llvm/ADT/StringSwitch.h" 25 #include "llvm/IR/Constants.h" 26 #include "llvm/IR/DebugInfoMetadata.h" 27 #include "llvm/IR/GlobalVariable.h" 28 #include "llvm/IR/Metadata.h" 29 #include "llvm/IR/Module.h" 30 #include "llvm/IR/Operator.h" 31 #include "llvm/IR/PatternMatch.h" 32 #include "llvm/IR/Statepoint.h" 33 34 #include "llvm/Support/raw_ostream.h" 35 using namespace llvm; 36 37 //===----------------------------------------------------------------------===// 38 /// DbgVariableIntrinsic - This is the common base class for debug info 39 /// intrinsics for variables. 40 /// 41 42 iterator_range<DbgVariableIntrinsic::location_op_iterator> 43 DbgVariableIntrinsic::location_ops() const { 44 auto *MD = getRawLocation(); 45 assert(MD && "First operand of DbgVariableIntrinsic should be non-null."); 46 47 // If operand is ValueAsMetadata, return a range over just that operand. 48 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) { 49 return {location_op_iterator(VAM), location_op_iterator(VAM + 1)}; 50 } 51 // If operand is DIArgList, return a range over its args. 52 if (auto *AL = dyn_cast<DIArgList>(MD)) 53 return {location_op_iterator(AL->args_begin()), 54 location_op_iterator(AL->args_end())}; 55 // Operand must be an empty metadata tuple, so return empty iterator. 56 return {location_op_iterator(static_cast<ValueAsMetadata *>(nullptr)), 57 location_op_iterator(static_cast<ValueAsMetadata *>(nullptr))}; 58 } 59 60 Value *DbgVariableIntrinsic::getVariableLocationOp(unsigned OpIdx) const { 61 auto *MD = getRawLocation(); 62 assert(MD && "First operand of DbgVariableIntrinsic should be non-null."); 63 if (auto *AL = dyn_cast<DIArgList>(MD)) 64 return AL->getArgs()[OpIdx]->getValue(); 65 if (isa<MDNode>(MD)) 66 return nullptr; 67 assert( 68 isa<ValueAsMetadata>(MD) && 69 "Attempted to get location operand from DbgVariableIntrinsic with none."); 70 auto *V = cast<ValueAsMetadata>(MD); 71 assert(OpIdx == 0 && "Operand Index must be 0 for a debug intrinsic with a " 72 "single location operand."); 73 return V->getValue(); 74 } 75 76 static ValueAsMetadata *getAsMetadata(Value *V) { 77 return isa<MetadataAsValue>(V) ? dyn_cast<ValueAsMetadata>( 78 cast<MetadataAsValue>(V)->getMetadata()) 79 : ValueAsMetadata::get(V); 80 } 81 82 void DbgVariableIntrinsic::replaceVariableLocationOp(Value *OldValue, 83 Value *NewValue) { 84 assert(NewValue && "Values must be non-null"); 85 auto Locations = location_ops(); 86 auto OldIt = find(Locations, OldValue); 87 assert(OldIt != Locations.end() && "OldValue must be a current location"); 88 if (!hasArgList()) { 89 Value *NewOperand = isa<MetadataAsValue>(NewValue) 90 ? NewValue 91 : MetadataAsValue::get( 92 getContext(), ValueAsMetadata::get(NewValue)); 93 return setArgOperand(0, NewOperand); 94 } 95 SmallVector<ValueAsMetadata *, 4> MDs; 96 ValueAsMetadata *NewOperand = getAsMetadata(NewValue); 97 for (auto *VMD : Locations) 98 MDs.push_back(VMD == *OldIt ? NewOperand : getAsMetadata(VMD)); 99 setArgOperand( 100 0, MetadataAsValue::get(getContext(), DIArgList::get(getContext(), MDs))); 101 } 102 void DbgVariableIntrinsic::replaceVariableLocationOp(unsigned OpIdx, 103 Value *NewValue) { 104 assert(OpIdx < getNumVariableLocationOps() && "Invalid Operand Index"); 105 if (!hasArgList()) { 106 Value *NewOperand = isa<MetadataAsValue>(NewValue) 107 ? NewValue 108 : MetadataAsValue::get( 109 getContext(), ValueAsMetadata::get(NewValue)); 110 return setArgOperand(0, NewOperand); 111 } 112 SmallVector<ValueAsMetadata *, 4> MDs; 113 ValueAsMetadata *NewOperand = getAsMetadata(NewValue); 114 for (unsigned Idx = 0; Idx < getNumVariableLocationOps(); ++Idx) 115 MDs.push_back(Idx == OpIdx ? NewOperand 116 : getAsMetadata(getVariableLocationOp(Idx))); 117 setArgOperand( 118 0, MetadataAsValue::get(getContext(), DIArgList::get(getContext(), MDs))); 119 } 120 121 void DbgVariableIntrinsic::addVariableLocationOps(ArrayRef<Value *> NewValues, 122 DIExpression *NewExpr) { 123 assert(NewExpr->hasAllLocationOps(getNumVariableLocationOps() + 124 NewValues.size()) && 125 "NewExpr for debug variable intrinsic does not reference every " 126 "location operand."); 127 assert(!is_contained(NewValues, nullptr) && "New values must be non-null"); 128 setArgOperand(2, MetadataAsValue::get(getContext(), NewExpr)); 129 SmallVector<ValueAsMetadata *, 4> MDs; 130 for (auto *VMD : location_ops()) 131 MDs.push_back(getAsMetadata(VMD)); 132 for (auto *VMD : NewValues) 133 MDs.push_back(getAsMetadata(VMD)); 134 setArgOperand( 135 0, MetadataAsValue::get(getContext(), DIArgList::get(getContext(), MDs))); 136 } 137 138 Optional<uint64_t> DbgVariableIntrinsic::getFragmentSizeInBits() const { 139 if (auto Fragment = getExpression()->getFragmentInfo()) 140 return Fragment->SizeInBits; 141 return getVariable()->getSizeInBits(); 142 } 143 144 int llvm::Intrinsic::lookupLLVMIntrinsicByName(ArrayRef<const char *> NameTable, 145 StringRef Name) { 146 assert(Name.startswith("llvm.")); 147 148 // Do successive binary searches of the dotted name components. For 149 // "llvm.gc.experimental.statepoint.p1i8.p1i32", we will find the range of 150 // intrinsics starting with "llvm.gc", then "llvm.gc.experimental", then 151 // "llvm.gc.experimental.statepoint", and then we will stop as the range is 152 // size 1. During the search, we can skip the prefix that we already know is 153 // identical. By using strncmp we consider names with differing suffixes to 154 // be part of the equal range. 155 size_t CmpEnd = 4; // Skip the "llvm" component. 156 const char *const *Low = NameTable.begin(); 157 const char *const *High = NameTable.end(); 158 const char *const *LastLow = Low; 159 while (CmpEnd < Name.size() && High - Low > 0) { 160 size_t CmpStart = CmpEnd; 161 CmpEnd = Name.find('.', CmpStart + 1); 162 CmpEnd = CmpEnd == StringRef::npos ? Name.size() : CmpEnd; 163 auto Cmp = [CmpStart, CmpEnd](const char *LHS, const char *RHS) { 164 return strncmp(LHS + CmpStart, RHS + CmpStart, CmpEnd - CmpStart) < 0; 165 }; 166 LastLow = Low; 167 std::tie(Low, High) = std::equal_range(Low, High, Name.data(), Cmp); 168 } 169 if (High - Low > 0) 170 LastLow = Low; 171 172 if (LastLow == NameTable.end()) 173 return -1; 174 StringRef NameFound = *LastLow; 175 if (Name == NameFound || 176 (Name.startswith(NameFound) && Name[NameFound.size()] == '.')) 177 return LastLow - NameTable.begin(); 178 return -1; 179 } 180 181 ConstantInt *InstrProfInstBase::getNumCounters() const { 182 if (InstrProfValueProfileInst::classof(this)) 183 llvm_unreachable("InstrProfValueProfileInst does not have counters!"); 184 return cast<ConstantInt>(const_cast<Value *>(getArgOperand(2))); 185 } 186 187 ConstantInt *InstrProfInstBase::getIndex() const { 188 if (InstrProfValueProfileInst::classof(this)) 189 llvm_unreachable("Please use InstrProfValueProfileInst::getIndex()"); 190 return cast<ConstantInt>(const_cast<Value *>(getArgOperand(3))); 191 } 192 193 Value *InstrProfIncrementInst::getStep() const { 194 if (InstrProfIncrementInstStep::classof(this)) { 195 return const_cast<Value *>(getArgOperand(4)); 196 } 197 const Module *M = getModule(); 198 LLVMContext &Context = M->getContext(); 199 return ConstantInt::get(Type::getInt64Ty(Context), 1); 200 } 201 202 Optional<RoundingMode> ConstrainedFPIntrinsic::getRoundingMode() const { 203 unsigned NumOperands = arg_size(); 204 Metadata *MD = nullptr; 205 auto *MAV = dyn_cast<MetadataAsValue>(getArgOperand(NumOperands - 2)); 206 if (MAV) 207 MD = MAV->getMetadata(); 208 if (!MD || !isa<MDString>(MD)) 209 return None; 210 return convertStrToRoundingMode(cast<MDString>(MD)->getString()); 211 } 212 213 Optional<fp::ExceptionBehavior> 214 ConstrainedFPIntrinsic::getExceptionBehavior() const { 215 unsigned NumOperands = arg_size(); 216 Metadata *MD = nullptr; 217 auto *MAV = dyn_cast<MetadataAsValue>(getArgOperand(NumOperands - 1)); 218 if (MAV) 219 MD = MAV->getMetadata(); 220 if (!MD || !isa<MDString>(MD)) 221 return None; 222 return convertStrToExceptionBehavior(cast<MDString>(MD)->getString()); 223 } 224 225 bool ConstrainedFPIntrinsic::isDefaultFPEnvironment() const { 226 Optional<fp::ExceptionBehavior> Except = getExceptionBehavior(); 227 if (Except) { 228 if (Except.getValue() != fp::ebIgnore) 229 return false; 230 } 231 232 Optional<RoundingMode> Rounding = getRoundingMode(); 233 if (Rounding) { 234 if (Rounding.getValue() != RoundingMode::NearestTiesToEven) 235 return false; 236 } 237 238 return true; 239 } 240 241 FCmpInst::Predicate ConstrainedFPCmpIntrinsic::getPredicate() const { 242 Metadata *MD = cast<MetadataAsValue>(getArgOperand(2))->getMetadata(); 243 if (!MD || !isa<MDString>(MD)) 244 return FCmpInst::BAD_FCMP_PREDICATE; 245 return StringSwitch<FCmpInst::Predicate>(cast<MDString>(MD)->getString()) 246 .Case("oeq", FCmpInst::FCMP_OEQ) 247 .Case("ogt", FCmpInst::FCMP_OGT) 248 .Case("oge", FCmpInst::FCMP_OGE) 249 .Case("olt", FCmpInst::FCMP_OLT) 250 .Case("ole", FCmpInst::FCMP_OLE) 251 .Case("one", FCmpInst::FCMP_ONE) 252 .Case("ord", FCmpInst::FCMP_ORD) 253 .Case("uno", FCmpInst::FCMP_UNO) 254 .Case("ueq", FCmpInst::FCMP_UEQ) 255 .Case("ugt", FCmpInst::FCMP_UGT) 256 .Case("uge", FCmpInst::FCMP_UGE) 257 .Case("ult", FCmpInst::FCMP_ULT) 258 .Case("ule", FCmpInst::FCMP_ULE) 259 .Case("une", FCmpInst::FCMP_UNE) 260 .Default(FCmpInst::BAD_FCMP_PREDICATE); 261 } 262 263 bool ConstrainedFPIntrinsic::isUnaryOp() const { 264 switch (getIntrinsicID()) { 265 default: 266 return false; 267 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ 268 case Intrinsic::INTRINSIC: \ 269 return NARG == 1; 270 #include "llvm/IR/ConstrainedOps.def" 271 } 272 } 273 274 bool ConstrainedFPIntrinsic::isTernaryOp() const { 275 switch (getIntrinsicID()) { 276 default: 277 return false; 278 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ 279 case Intrinsic::INTRINSIC: \ 280 return NARG == 3; 281 #include "llvm/IR/ConstrainedOps.def" 282 } 283 } 284 285 bool ConstrainedFPIntrinsic::classof(const IntrinsicInst *I) { 286 switch (I->getIntrinsicID()) { 287 #define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \ 288 case Intrinsic::INTRINSIC: 289 #include "llvm/IR/ConstrainedOps.def" 290 return true; 291 default: 292 return false; 293 } 294 } 295 296 ElementCount VPIntrinsic::getStaticVectorLength() const { 297 auto GetVectorLengthOfType = [](const Type *T) -> ElementCount { 298 const auto *VT = cast<VectorType>(T); 299 auto ElemCount = VT->getElementCount(); 300 return ElemCount; 301 }; 302 303 Value *VPMask = getMaskParam(); 304 assert(VPMask && "No mask param?"); 305 return GetVectorLengthOfType(VPMask->getType()); 306 } 307 308 Value *VPIntrinsic::getMaskParam() const { 309 if (auto MaskPos = getMaskParamPos(getIntrinsicID())) 310 return getArgOperand(MaskPos.getValue()); 311 return nullptr; 312 } 313 314 void VPIntrinsic::setMaskParam(Value *NewMask) { 315 auto MaskPos = getMaskParamPos(getIntrinsicID()); 316 setArgOperand(*MaskPos, NewMask); 317 } 318 319 Value *VPIntrinsic::getVectorLengthParam() const { 320 if (auto EVLPos = getVectorLengthParamPos(getIntrinsicID())) 321 return getArgOperand(EVLPos.getValue()); 322 return nullptr; 323 } 324 325 void VPIntrinsic::setVectorLengthParam(Value *NewEVL) { 326 auto EVLPos = getVectorLengthParamPos(getIntrinsicID()); 327 setArgOperand(*EVLPos, NewEVL); 328 } 329 330 Optional<unsigned> VPIntrinsic::getMaskParamPos(Intrinsic::ID IntrinsicID) { 331 switch (IntrinsicID) { 332 default: 333 return None; 334 335 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \ 336 case Intrinsic::VPID: \ 337 return MASKPOS; 338 #include "llvm/IR/VPIntrinsics.def" 339 } 340 } 341 342 Optional<unsigned> 343 VPIntrinsic::getVectorLengthParamPos(Intrinsic::ID IntrinsicID) { 344 switch (IntrinsicID) { 345 default: 346 return None; 347 348 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \ 349 case Intrinsic::VPID: \ 350 return VLENPOS; 351 #include "llvm/IR/VPIntrinsics.def" 352 } 353 } 354 355 /// \return the alignment of the pointer used by this load/store/gather or 356 /// scatter. 357 MaybeAlign VPIntrinsic::getPointerAlignment() const { 358 Optional<unsigned> PtrParamOpt = getMemoryPointerParamPos(getIntrinsicID()); 359 assert(PtrParamOpt.hasValue() && "no pointer argument!"); 360 return getParamAlign(PtrParamOpt.getValue()); 361 } 362 363 /// \return The pointer operand of this load,store, gather or scatter. 364 Value *VPIntrinsic::getMemoryPointerParam() const { 365 if (auto PtrParamOpt = getMemoryPointerParamPos(getIntrinsicID())) 366 return getArgOperand(PtrParamOpt.getValue()); 367 return nullptr; 368 } 369 370 Optional<unsigned> VPIntrinsic::getMemoryPointerParamPos(Intrinsic::ID VPID) { 371 switch (VPID) { 372 default: 373 break; 374 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID: 375 #define VP_PROPERTY_MEMOP(POINTERPOS, ...) return POINTERPOS; 376 #define END_REGISTER_VP_INTRINSIC(VPID) break; 377 #include "llvm/IR/VPIntrinsics.def" 378 } 379 return None; 380 } 381 382 /// \return The data (payload) operand of this store or scatter. 383 Value *VPIntrinsic::getMemoryDataParam() const { 384 auto DataParamOpt = getMemoryDataParamPos(getIntrinsicID()); 385 if (!DataParamOpt.hasValue()) 386 return nullptr; 387 return getArgOperand(DataParamOpt.getValue()); 388 } 389 390 Optional<unsigned> VPIntrinsic::getMemoryDataParamPos(Intrinsic::ID VPID) { 391 switch (VPID) { 392 default: 393 break; 394 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID: 395 #define VP_PROPERTY_MEMOP(POINTERPOS, DATAPOS) return DATAPOS; 396 #define END_REGISTER_VP_INTRINSIC(VPID) break; 397 #include "llvm/IR/VPIntrinsics.def" 398 } 399 return None; 400 } 401 402 bool VPIntrinsic::isVPIntrinsic(Intrinsic::ID ID) { 403 switch (ID) { 404 default: 405 break; 406 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \ 407 case Intrinsic::VPID: \ 408 return true; 409 #include "llvm/IR/VPIntrinsics.def" 410 } 411 return false; 412 } 413 414 // Equivalent non-predicated opcode 415 Optional<unsigned> VPIntrinsic::getFunctionalOpcodeForVP(Intrinsic::ID ID) { 416 switch (ID) { 417 default: 418 break; 419 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID: 420 #define VP_PROPERTY_FUNCTIONAL_OPC(OPC) return Instruction::OPC; 421 #define END_REGISTER_VP_INTRINSIC(VPID) break; 422 #include "llvm/IR/VPIntrinsics.def" 423 } 424 return None; 425 } 426 427 Intrinsic::ID VPIntrinsic::getForOpcode(unsigned IROPC) { 428 switch (IROPC) { 429 default: 430 break; 431 432 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) break; 433 #define VP_PROPERTY_FUNCTIONAL_OPC(OPC) case Instruction::OPC: 434 #define END_REGISTER_VP_INTRINSIC(VPID) return Intrinsic::VPID; 435 #include "llvm/IR/VPIntrinsics.def" 436 } 437 return Intrinsic::not_intrinsic; 438 } 439 440 bool VPIntrinsic::canIgnoreVectorLengthParam() const { 441 using namespace PatternMatch; 442 443 ElementCount EC = getStaticVectorLength(); 444 445 // No vlen param - no lanes masked-off by it. 446 auto *VLParam = getVectorLengthParam(); 447 if (!VLParam) 448 return true; 449 450 // Note that the VP intrinsic causes undefined behavior if the Explicit Vector 451 // Length parameter is strictly greater-than the number of vector elements of 452 // the operation. This function returns true when this is detected statically 453 // in the IR. 454 455 // Check whether "W == vscale * EC.getKnownMinValue()" 456 if (EC.isScalable()) { 457 // Undig the DL 458 const auto *ParMod = this->getModule(); 459 if (!ParMod) 460 return false; 461 const auto &DL = ParMod->getDataLayout(); 462 463 // Compare vscale patterns 464 uint64_t VScaleFactor; 465 if (match(VLParam, m_c_Mul(m_ConstantInt(VScaleFactor), m_VScale(DL)))) 466 return VScaleFactor >= EC.getKnownMinValue(); 467 return (EC.getKnownMinValue() == 1) && match(VLParam, m_VScale(DL)); 468 } 469 470 // standard SIMD operation 471 const auto *VLConst = dyn_cast<ConstantInt>(VLParam); 472 if (!VLConst) 473 return false; 474 475 uint64_t VLNum = VLConst->getZExtValue(); 476 if (VLNum >= EC.getKnownMinValue()) 477 return true; 478 479 return false; 480 } 481 482 Function *VPIntrinsic::getDeclarationForParams(Module *M, Intrinsic::ID VPID, 483 Type *ReturnType, 484 ArrayRef<Value *> Params) { 485 assert(isVPIntrinsic(VPID) && "not a VP intrinsic"); 486 Function *VPFunc; 487 switch (VPID) { 488 default: { 489 Type *OverloadTy = Params[0]->getType(); 490 if (VPReductionIntrinsic::isVPReduction(VPID)) 491 OverloadTy = 492 Params[*VPReductionIntrinsic::getVectorParamPos(VPID)]->getType(); 493 494 VPFunc = Intrinsic::getDeclaration(M, VPID, OverloadTy); 495 break; 496 } 497 case Intrinsic::vp_merge: 498 case Intrinsic::vp_select: 499 VPFunc = Intrinsic::getDeclaration(M, VPID, {Params[1]->getType()}); 500 break; 501 case Intrinsic::vp_load: 502 VPFunc = Intrinsic::getDeclaration( 503 M, VPID, {ReturnType, Params[0]->getType()}); 504 break; 505 case Intrinsic::vp_gather: 506 VPFunc = Intrinsic::getDeclaration( 507 M, VPID, {ReturnType, Params[0]->getType()}); 508 break; 509 case Intrinsic::vp_store: 510 VPFunc = Intrinsic::getDeclaration( 511 M, VPID, {Params[0]->getType(), Params[1]->getType()}); 512 break; 513 case Intrinsic::vp_scatter: 514 VPFunc = Intrinsic::getDeclaration( 515 M, VPID, {Params[0]->getType(), Params[1]->getType()}); 516 break; 517 } 518 assert(VPFunc && "Could not declare VP intrinsic"); 519 return VPFunc; 520 } 521 522 bool VPReductionIntrinsic::isVPReduction(Intrinsic::ID ID) { 523 switch (ID) { 524 default: 525 break; 526 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID: 527 #define VP_PROPERTY_REDUCTION(STARTPOS, ...) return true; 528 #define END_REGISTER_VP_INTRINSIC(VPID) break; 529 #include "llvm/IR/VPIntrinsics.def" 530 } 531 return false; 532 } 533 534 unsigned VPReductionIntrinsic::getVectorParamPos() const { 535 return *VPReductionIntrinsic::getVectorParamPos(getIntrinsicID()); 536 } 537 538 unsigned VPReductionIntrinsic::getStartParamPos() const { 539 return *VPReductionIntrinsic::getStartParamPos(getIntrinsicID()); 540 } 541 542 Optional<unsigned> VPReductionIntrinsic::getVectorParamPos(Intrinsic::ID ID) { 543 switch (ID) { 544 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID: 545 #define VP_PROPERTY_REDUCTION(STARTPOS, VECTORPOS) return VECTORPOS; 546 #define END_REGISTER_VP_INTRINSIC(VPID) break; 547 #include "llvm/IR/VPIntrinsics.def" 548 default: 549 break; 550 } 551 return None; 552 } 553 554 Optional<unsigned> VPReductionIntrinsic::getStartParamPos(Intrinsic::ID ID) { 555 switch (ID) { 556 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID: 557 #define VP_PROPERTY_REDUCTION(STARTPOS, VECTORPOS) return STARTPOS; 558 #define END_REGISTER_VP_INTRINSIC(VPID) break; 559 #include "llvm/IR/VPIntrinsics.def" 560 default: 561 break; 562 } 563 return None; 564 } 565 566 Instruction::BinaryOps BinaryOpIntrinsic::getBinaryOp() const { 567 switch (getIntrinsicID()) { 568 case Intrinsic::uadd_with_overflow: 569 case Intrinsic::sadd_with_overflow: 570 case Intrinsic::uadd_sat: 571 case Intrinsic::sadd_sat: 572 return Instruction::Add; 573 case Intrinsic::usub_with_overflow: 574 case Intrinsic::ssub_with_overflow: 575 case Intrinsic::usub_sat: 576 case Intrinsic::ssub_sat: 577 return Instruction::Sub; 578 case Intrinsic::umul_with_overflow: 579 case Intrinsic::smul_with_overflow: 580 return Instruction::Mul; 581 default: 582 llvm_unreachable("Invalid intrinsic"); 583 } 584 } 585 586 bool BinaryOpIntrinsic::isSigned() const { 587 switch (getIntrinsicID()) { 588 case Intrinsic::sadd_with_overflow: 589 case Intrinsic::ssub_with_overflow: 590 case Intrinsic::smul_with_overflow: 591 case Intrinsic::sadd_sat: 592 case Intrinsic::ssub_sat: 593 return true; 594 default: 595 return false; 596 } 597 } 598 599 unsigned BinaryOpIntrinsic::getNoWrapKind() const { 600 if (isSigned()) 601 return OverflowingBinaryOperator::NoSignedWrap; 602 else 603 return OverflowingBinaryOperator::NoUnsignedWrap; 604 } 605 606 const GCStatepointInst *GCProjectionInst::getStatepoint() const { 607 const Value *Token = getArgOperand(0); 608 609 // This takes care both of relocates for call statepoints and relocates 610 // on normal path of invoke statepoint. 611 if (!isa<LandingPadInst>(Token)) 612 return cast<GCStatepointInst>(Token); 613 614 // This relocate is on exceptional path of an invoke statepoint 615 const BasicBlock *InvokeBB = 616 cast<Instruction>(Token)->getParent()->getUniquePredecessor(); 617 618 assert(InvokeBB && "safepoints should have unique landingpads"); 619 assert(InvokeBB->getTerminator() && 620 "safepoint block should be well formed"); 621 622 return cast<GCStatepointInst>(InvokeBB->getTerminator()); 623 } 624 625 Value *GCRelocateInst::getBasePtr() const { 626 if (auto Opt = getStatepoint()->getOperandBundle(LLVMContext::OB_gc_live)) 627 return *(Opt->Inputs.begin() + getBasePtrIndex()); 628 return *(getStatepoint()->arg_begin() + getBasePtrIndex()); 629 } 630 631 Value *GCRelocateInst::getDerivedPtr() const { 632 if (auto Opt = getStatepoint()->getOperandBundle(LLVMContext::OB_gc_live)) 633 return *(Opt->Inputs.begin() + getDerivedPtrIndex()); 634 return *(getStatepoint()->arg_begin() + getDerivedPtrIndex()); 635 } 636