1 //===- InstCombineCalls.cpp -----------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the visitCall, visitInvoke, and visitCallBr functions. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "InstCombineInternal.h" 14 #include "llvm/ADT/APFloat.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/APSInt.h" 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/FloatingPointMode.h" 19 #include "llvm/ADT/None.h" 20 #include "llvm/ADT/Optional.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/ADT/Twine.h" 25 #include "llvm/Analysis/AliasAnalysis.h" 26 #include "llvm/Analysis/AssumeBundleQueries.h" 27 #include "llvm/Analysis/AssumptionCache.h" 28 #include "llvm/Analysis/InstructionSimplify.h" 29 #include "llvm/Analysis/Loads.h" 30 #include "llvm/Analysis/MemoryBuiltins.h" 31 #include "llvm/Analysis/TargetTransformInfo.h" 32 #include "llvm/Analysis/ValueTracking.h" 33 #include "llvm/Analysis/VectorUtils.h" 34 #include "llvm/IR/Attributes.h" 35 #include "llvm/IR/BasicBlock.h" 36 #include "llvm/IR/Constant.h" 37 #include "llvm/IR/Constants.h" 38 #include "llvm/IR/DataLayout.h" 39 #include "llvm/IR/DerivedTypes.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/IR/GlobalVariable.h" 42 #include "llvm/IR/InlineAsm.h" 43 #include "llvm/IR/InstrTypes.h" 44 #include "llvm/IR/Instruction.h" 45 #include "llvm/IR/Instructions.h" 46 #include "llvm/IR/IntrinsicInst.h" 47 #include "llvm/IR/Intrinsics.h" 48 #include "llvm/IR/IntrinsicsAArch64.h" 49 #include "llvm/IR/IntrinsicsAMDGPU.h" 50 #include "llvm/IR/IntrinsicsARM.h" 51 #include "llvm/IR/IntrinsicsHexagon.h" 52 #include "llvm/IR/LLVMContext.h" 53 #include "llvm/IR/Metadata.h" 54 #include "llvm/IR/PatternMatch.h" 55 #include "llvm/IR/Statepoint.h" 56 #include "llvm/IR/Type.h" 57 #include "llvm/IR/User.h" 58 #include "llvm/IR/Value.h" 59 #include "llvm/IR/ValueHandle.h" 60 #include "llvm/Support/AtomicOrdering.h" 61 #include "llvm/Support/Casting.h" 62 #include "llvm/Support/CommandLine.h" 63 #include "llvm/Support/Compiler.h" 64 #include "llvm/Support/Debug.h" 65 #include "llvm/Support/ErrorHandling.h" 66 #include "llvm/Support/KnownBits.h" 67 #include "llvm/Support/MathExtras.h" 68 #include "llvm/Support/raw_ostream.h" 69 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h" 70 #include "llvm/Transforms/InstCombine/InstCombiner.h" 71 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" 72 #include "llvm/Transforms/Utils/Local.h" 73 #include "llvm/Transforms/Utils/SimplifyLibCalls.h" 74 #include <algorithm> 75 #include <cassert> 76 #include <cstdint> 77 #include <cstring> 78 #include <utility> 79 #include <vector> 80 81 using namespace llvm; 82 using namespace PatternMatch; 83 84 #define DEBUG_TYPE "instcombine" 85 86 STATISTIC(NumSimplified, "Number of library calls simplified"); 87 88 static cl::opt<unsigned> GuardWideningWindow( 89 "instcombine-guard-widening-window", 90 cl::init(3), 91 cl::desc("How wide an instruction window to bypass looking for " 92 "another guard")); 93 94 namespace llvm { 95 /// enable preservation of attributes in assume like: 96 /// call void @llvm.assume(i1 true) [ "nonnull"(i32* %PTR) ] 97 extern cl::opt<bool> EnableKnowledgeRetention; 98 } // namespace llvm 99 100 /// Return the specified type promoted as it would be to pass though a va_arg 101 /// area. 102 static Type *getPromotedType(Type *Ty) { 103 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) { 104 if (ITy->getBitWidth() < 32) 105 return Type::getInt32Ty(Ty->getContext()); 106 } 107 return Ty; 108 } 109 110 Instruction *InstCombinerImpl::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) { 111 Align DstAlign = getKnownAlignment(MI->getRawDest(), DL, MI, &AC, &DT); 112 MaybeAlign CopyDstAlign = MI->getDestAlign(); 113 if (!CopyDstAlign || *CopyDstAlign < DstAlign) { 114 MI->setDestAlignment(DstAlign); 115 return MI; 116 } 117 118 Align SrcAlign = getKnownAlignment(MI->getRawSource(), DL, MI, &AC, &DT); 119 MaybeAlign CopySrcAlign = MI->getSourceAlign(); 120 if (!CopySrcAlign || *CopySrcAlign < SrcAlign) { 121 MI->setSourceAlignment(SrcAlign); 122 return MI; 123 } 124 125 // If we have a store to a location which is known constant, we can conclude 126 // that the store must be storing the constant value (else the memory 127 // wouldn't be constant), and this must be a noop. 128 if (AA->pointsToConstantMemory(MI->getDest())) { 129 // Set the size of the copy to 0, it will be deleted on the next iteration. 130 MI->setLength(Constant::getNullValue(MI->getLength()->getType())); 131 return MI; 132 } 133 134 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with 135 // load/store. 136 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getLength()); 137 if (!MemOpLength) return nullptr; 138 139 // Source and destination pointer types are always "i8*" for intrinsic. See 140 // if the size is something we can handle with a single primitive load/store. 141 // A single load+store correctly handles overlapping memory in the memmove 142 // case. 143 uint64_t Size = MemOpLength->getLimitedValue(); 144 assert(Size && "0-sized memory transferring should be removed already."); 145 146 if (Size > 8 || (Size&(Size-1))) 147 return nullptr; // If not 1/2/4/8 bytes, exit. 148 149 // If it is an atomic and alignment is less than the size then we will 150 // introduce the unaligned memory access which will be later transformed 151 // into libcall in CodeGen. This is not evident performance gain so disable 152 // it now. 153 if (isa<AtomicMemTransferInst>(MI)) 154 if (*CopyDstAlign < Size || *CopySrcAlign < Size) 155 return nullptr; 156 157 // Use an integer load+store unless we can find something better. 158 unsigned SrcAddrSp = 159 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace(); 160 unsigned DstAddrSp = 161 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace(); 162 163 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3); 164 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp); 165 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp); 166 167 // If the memcpy has metadata describing the members, see if we can get the 168 // TBAA tag describing our copy. 169 MDNode *CopyMD = nullptr; 170 if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa)) { 171 CopyMD = M; 172 } else if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) { 173 if (M->getNumOperands() == 3 && M->getOperand(0) && 174 mdconst::hasa<ConstantInt>(M->getOperand(0)) && 175 mdconst::extract<ConstantInt>(M->getOperand(0))->isZero() && 176 M->getOperand(1) && 177 mdconst::hasa<ConstantInt>(M->getOperand(1)) && 178 mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() == 179 Size && 180 M->getOperand(2) && isa<MDNode>(M->getOperand(2))) 181 CopyMD = cast<MDNode>(M->getOperand(2)); 182 } 183 184 Value *Src = Builder.CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy); 185 Value *Dest = Builder.CreateBitCast(MI->getArgOperand(0), NewDstPtrTy); 186 LoadInst *L = Builder.CreateLoad(IntType, Src); 187 // Alignment from the mem intrinsic will be better, so use it. 188 L->setAlignment(*CopySrcAlign); 189 if (CopyMD) 190 L->setMetadata(LLVMContext::MD_tbaa, CopyMD); 191 MDNode *LoopMemParallelMD = 192 MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access); 193 if (LoopMemParallelMD) 194 L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD); 195 MDNode *AccessGroupMD = MI->getMetadata(LLVMContext::MD_access_group); 196 if (AccessGroupMD) 197 L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD); 198 199 StoreInst *S = Builder.CreateStore(L, Dest); 200 // Alignment from the mem intrinsic will be better, so use it. 201 S->setAlignment(*CopyDstAlign); 202 if (CopyMD) 203 S->setMetadata(LLVMContext::MD_tbaa, CopyMD); 204 if (LoopMemParallelMD) 205 S->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD); 206 if (AccessGroupMD) 207 S->setMetadata(LLVMContext::MD_access_group, AccessGroupMD); 208 209 if (auto *MT = dyn_cast<MemTransferInst>(MI)) { 210 // non-atomics can be volatile 211 L->setVolatile(MT->isVolatile()); 212 S->setVolatile(MT->isVolatile()); 213 } 214 if (isa<AtomicMemTransferInst>(MI)) { 215 // atomics have to be unordered 216 L->setOrdering(AtomicOrdering::Unordered); 217 S->setOrdering(AtomicOrdering::Unordered); 218 } 219 220 // Set the size of the copy to 0, it will be deleted on the next iteration. 221 MI->setLength(Constant::getNullValue(MemOpLength->getType())); 222 return MI; 223 } 224 225 Instruction *InstCombinerImpl::SimplifyAnyMemSet(AnyMemSetInst *MI) { 226 const Align KnownAlignment = 227 getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT); 228 MaybeAlign MemSetAlign = MI->getDestAlign(); 229 if (!MemSetAlign || *MemSetAlign < KnownAlignment) { 230 MI->setDestAlignment(KnownAlignment); 231 return MI; 232 } 233 234 // If we have a store to a location which is known constant, we can conclude 235 // that the store must be storing the constant value (else the memory 236 // wouldn't be constant), and this must be a noop. 237 if (AA->pointsToConstantMemory(MI->getDest())) { 238 // Set the size of the copy to 0, it will be deleted on the next iteration. 239 MI->setLength(Constant::getNullValue(MI->getLength()->getType())); 240 return MI; 241 } 242 243 // Extract the length and alignment and fill if they are constant. 244 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength()); 245 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue()); 246 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8)) 247 return nullptr; 248 const uint64_t Len = LenC->getLimitedValue(); 249 assert(Len && "0-sized memory setting should be removed already."); 250 const Align Alignment = assumeAligned(MI->getDestAlignment()); 251 252 // If it is an atomic and alignment is less than the size then we will 253 // introduce the unaligned memory access which will be later transformed 254 // into libcall in CodeGen. This is not evident performance gain so disable 255 // it now. 256 if (isa<AtomicMemSetInst>(MI)) 257 if (Alignment < Len) 258 return nullptr; 259 260 // memset(s,c,n) -> store s, c (for n=1,2,4,8) 261 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) { 262 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8. 263 264 Value *Dest = MI->getDest(); 265 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace(); 266 Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp); 267 Dest = Builder.CreateBitCast(Dest, NewDstPtrTy); 268 269 // Extract the fill value and store. 270 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL; 271 StoreInst *S = Builder.CreateStore(ConstantInt::get(ITy, Fill), Dest, 272 MI->isVolatile()); 273 S->setAlignment(Alignment); 274 if (isa<AtomicMemSetInst>(MI)) 275 S->setOrdering(AtomicOrdering::Unordered); 276 277 // Set the size of the copy to 0, it will be deleted on the next iteration. 278 MI->setLength(Constant::getNullValue(LenC->getType())); 279 return MI; 280 } 281 282 return nullptr; 283 } 284 285 // TODO, Obvious Missing Transforms: 286 // * Narrow width by halfs excluding zero/undef lanes 287 Value *InstCombinerImpl::simplifyMaskedLoad(IntrinsicInst &II) { 288 Value *LoadPtr = II.getArgOperand(0); 289 const Align Alignment = 290 cast<ConstantInt>(II.getArgOperand(1))->getAlignValue(); 291 292 // If the mask is all ones or undefs, this is a plain vector load of the 1st 293 // argument. 294 if (maskIsAllOneOrUndef(II.getArgOperand(2))) { 295 LoadInst *L = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment, 296 "unmaskedload"); 297 L->copyMetadata(II); 298 return L; 299 } 300 301 // If we can unconditionally load from this address, replace with a 302 // load/select idiom. TODO: use DT for context sensitive query 303 if (isDereferenceablePointer(LoadPtr, II.getType(), 304 II.getModule()->getDataLayout(), &II, nullptr)) { 305 LoadInst *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment, 306 "unmaskedload"); 307 LI->copyMetadata(II); 308 return Builder.CreateSelect(II.getArgOperand(2), LI, II.getArgOperand(3)); 309 } 310 311 return nullptr; 312 } 313 314 // TODO, Obvious Missing Transforms: 315 // * Single constant active lane -> store 316 // * Narrow width by halfs excluding zero/undef lanes 317 Instruction *InstCombinerImpl::simplifyMaskedStore(IntrinsicInst &II) { 318 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3)); 319 if (!ConstMask) 320 return nullptr; 321 322 // If the mask is all zeros, this instruction does nothing. 323 if (ConstMask->isNullValue()) 324 return eraseInstFromFunction(II); 325 326 // If the mask is all ones, this is a plain vector store of the 1st argument. 327 if (ConstMask->isAllOnesValue()) { 328 Value *StorePtr = II.getArgOperand(1); 329 Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue(); 330 StoreInst *S = 331 new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment); 332 S->copyMetadata(II); 333 return S; 334 } 335 336 if (isa<ScalableVectorType>(ConstMask->getType())) 337 return nullptr; 338 339 // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts 340 APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask); 341 APInt UndefElts(DemandedElts.getBitWidth(), 0); 342 if (Value *V = 343 SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts, UndefElts)) 344 return replaceOperand(II, 0, V); 345 346 return nullptr; 347 } 348 349 // TODO, Obvious Missing Transforms: 350 // * Single constant active lane load -> load 351 // * Dereferenceable address & few lanes -> scalarize speculative load/selects 352 // * Adjacent vector addresses -> masked.load 353 // * Narrow width by halfs excluding zero/undef lanes 354 // * Vector splat address w/known mask -> scalar load 355 // * Vector incrementing address -> vector masked load 356 Instruction *InstCombinerImpl::simplifyMaskedGather(IntrinsicInst &II) { 357 return nullptr; 358 } 359 360 // TODO, Obvious Missing Transforms: 361 // * Single constant active lane -> store 362 // * Adjacent vector addresses -> masked.store 363 // * Narrow store width by halfs excluding zero/undef lanes 364 // * Vector splat address w/known mask -> scalar store 365 // * Vector incrementing address -> vector masked store 366 Instruction *InstCombinerImpl::simplifyMaskedScatter(IntrinsicInst &II) { 367 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3)); 368 if (!ConstMask) 369 return nullptr; 370 371 // If the mask is all zeros, a scatter does nothing. 372 if (ConstMask->isNullValue()) 373 return eraseInstFromFunction(II); 374 375 if (isa<ScalableVectorType>(ConstMask->getType())) 376 return nullptr; 377 378 // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts 379 APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask); 380 APInt UndefElts(DemandedElts.getBitWidth(), 0); 381 if (Value *V = 382 SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts, UndefElts)) 383 return replaceOperand(II, 0, V); 384 if (Value *V = 385 SimplifyDemandedVectorElts(II.getOperand(1), DemandedElts, UndefElts)) 386 return replaceOperand(II, 1, V); 387 388 return nullptr; 389 } 390 391 /// This function transforms launder.invariant.group and strip.invariant.group 392 /// like: 393 /// launder(launder(%x)) -> launder(%x) (the result is not the argument) 394 /// launder(strip(%x)) -> launder(%x) 395 /// strip(strip(%x)) -> strip(%x) (the result is not the argument) 396 /// strip(launder(%x)) -> strip(%x) 397 /// This is legal because it preserves the most recent information about 398 /// the presence or absence of invariant.group. 399 static Instruction *simplifyInvariantGroupIntrinsic(IntrinsicInst &II, 400 InstCombinerImpl &IC) { 401 auto *Arg = II.getArgOperand(0); 402 auto *StrippedArg = Arg->stripPointerCasts(); 403 auto *StrippedInvariantGroupsArg = StrippedArg; 404 while (auto *Intr = dyn_cast<IntrinsicInst>(StrippedInvariantGroupsArg)) { 405 if (Intr->getIntrinsicID() != Intrinsic::launder_invariant_group && 406 Intr->getIntrinsicID() != Intrinsic::strip_invariant_group) 407 break; 408 StrippedInvariantGroupsArg = Intr->getArgOperand(0)->stripPointerCasts(); 409 } 410 if (StrippedArg == StrippedInvariantGroupsArg) 411 return nullptr; // No launders/strips to remove. 412 413 Value *Result = nullptr; 414 415 if (II.getIntrinsicID() == Intrinsic::launder_invariant_group) 416 Result = IC.Builder.CreateLaunderInvariantGroup(StrippedInvariantGroupsArg); 417 else if (II.getIntrinsicID() == Intrinsic::strip_invariant_group) 418 Result = IC.Builder.CreateStripInvariantGroup(StrippedInvariantGroupsArg); 419 else 420 llvm_unreachable( 421 "simplifyInvariantGroupIntrinsic only handles launder and strip"); 422 if (Result->getType()->getPointerAddressSpace() != 423 II.getType()->getPointerAddressSpace()) 424 Result = IC.Builder.CreateAddrSpaceCast(Result, II.getType()); 425 if (Result->getType() != II.getType()) 426 Result = IC.Builder.CreateBitCast(Result, II.getType()); 427 428 return cast<Instruction>(Result); 429 } 430 431 static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC) { 432 assert((II.getIntrinsicID() == Intrinsic::cttz || 433 II.getIntrinsicID() == Intrinsic::ctlz) && 434 "Expected cttz or ctlz intrinsic"); 435 bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz; 436 Value *Op0 = II.getArgOperand(0); 437 Value *Op1 = II.getArgOperand(1); 438 Value *X; 439 // ctlz(bitreverse(x)) -> cttz(x) 440 // cttz(bitreverse(x)) -> ctlz(x) 441 if (match(Op0, m_BitReverse(m_Value(X)))) { 442 Intrinsic::ID ID = IsTZ ? Intrinsic::ctlz : Intrinsic::cttz; 443 Function *F = Intrinsic::getDeclaration(II.getModule(), ID, II.getType()); 444 return CallInst::Create(F, {X, II.getArgOperand(1)}); 445 } 446 447 if (II.getType()->isIntOrIntVectorTy(1)) { 448 // ctlz/cttz i1 Op0 --> not Op0 449 if (match(Op1, m_Zero())) 450 return BinaryOperator::CreateNot(Op0); 451 // If zero is undef, then the input can be assumed to be "true", so the 452 // instruction simplifies to "false". 453 assert(match(Op1, m_One()) && "Expected ctlz/cttz operand to be 0 or 1"); 454 return IC.replaceInstUsesWith(II, ConstantInt::getNullValue(II.getType())); 455 } 456 457 if (IsTZ) { 458 // cttz(-x) -> cttz(x) 459 if (match(Op0, m_Neg(m_Value(X)))) 460 return IC.replaceOperand(II, 0, X); 461 462 // cttz(sext(x)) -> cttz(zext(x)) 463 if (match(Op0, m_OneUse(m_SExt(m_Value(X))))) { 464 auto *Zext = IC.Builder.CreateZExt(X, II.getType()); 465 auto *CttzZext = 466 IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, Zext, Op1); 467 return IC.replaceInstUsesWith(II, CttzZext); 468 } 469 470 // Zext doesn't change the number of trailing zeros, so narrow: 471 // cttz(zext(x)) -> zext(cttz(x)) if the 'ZeroIsUndef' parameter is 'true'. 472 if (match(Op0, m_OneUse(m_ZExt(m_Value(X)))) && match(Op1, m_One())) { 473 auto *Cttz = IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, X, 474 IC.Builder.getTrue()); 475 auto *ZextCttz = IC.Builder.CreateZExt(Cttz, II.getType()); 476 return IC.replaceInstUsesWith(II, ZextCttz); 477 } 478 479 // cttz(abs(x)) -> cttz(x) 480 // cttz(nabs(x)) -> cttz(x) 481 Value *Y; 482 SelectPatternFlavor SPF = matchSelectPattern(Op0, X, Y).Flavor; 483 if (SPF == SPF_ABS || SPF == SPF_NABS) 484 return IC.replaceOperand(II, 0, X); 485 486 if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(X)))) 487 return IC.replaceOperand(II, 0, X); 488 } 489 490 KnownBits Known = IC.computeKnownBits(Op0, 0, &II); 491 492 // Create a mask for bits above (ctlz) or below (cttz) the first known one. 493 unsigned PossibleZeros = IsTZ ? Known.countMaxTrailingZeros() 494 : Known.countMaxLeadingZeros(); 495 unsigned DefiniteZeros = IsTZ ? Known.countMinTrailingZeros() 496 : Known.countMinLeadingZeros(); 497 498 // If all bits above (ctlz) or below (cttz) the first known one are known 499 // zero, this value is constant. 500 // FIXME: This should be in InstSimplify because we're replacing an 501 // instruction with a constant. 502 if (PossibleZeros == DefiniteZeros) { 503 auto *C = ConstantInt::get(Op0->getType(), DefiniteZeros); 504 return IC.replaceInstUsesWith(II, C); 505 } 506 507 // If the input to cttz/ctlz is known to be non-zero, 508 // then change the 'ZeroIsUndef' parameter to 'true' 509 // because we know the zero behavior can't affect the result. 510 if (!Known.One.isNullValue() || 511 isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II, 512 &IC.getDominatorTree())) { 513 if (!match(II.getArgOperand(1), m_One())) 514 return IC.replaceOperand(II, 1, IC.Builder.getTrue()); 515 } 516 517 // Add range metadata since known bits can't completely reflect what we know. 518 // TODO: Handle splat vectors. 519 auto *IT = dyn_cast<IntegerType>(Op0->getType()); 520 if (IT && IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) { 521 Metadata *LowAndHigh[] = { 522 ConstantAsMetadata::get(ConstantInt::get(IT, DefiniteZeros)), 523 ConstantAsMetadata::get(ConstantInt::get(IT, PossibleZeros + 1))}; 524 II.setMetadata(LLVMContext::MD_range, 525 MDNode::get(II.getContext(), LowAndHigh)); 526 return &II; 527 } 528 529 return nullptr; 530 } 531 532 static Instruction *foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC) { 533 assert(II.getIntrinsicID() == Intrinsic::ctpop && 534 "Expected ctpop intrinsic"); 535 Type *Ty = II.getType(); 536 unsigned BitWidth = Ty->getScalarSizeInBits(); 537 Value *Op0 = II.getArgOperand(0); 538 Value *X, *Y; 539 540 // ctpop(bitreverse(x)) -> ctpop(x) 541 // ctpop(bswap(x)) -> ctpop(x) 542 if (match(Op0, m_BitReverse(m_Value(X))) || match(Op0, m_BSwap(m_Value(X)))) 543 return IC.replaceOperand(II, 0, X); 544 545 // ctpop(rot(x)) -> ctpop(x) 546 if ((match(Op0, m_FShl(m_Value(X), m_Value(Y), m_Value())) || 547 match(Op0, m_FShr(m_Value(X), m_Value(Y), m_Value()))) && 548 X == Y) 549 return IC.replaceOperand(II, 0, X); 550 551 // ctpop(x | -x) -> bitwidth - cttz(x, false) 552 if (Op0->hasOneUse() && 553 match(Op0, m_c_Or(m_Value(X), m_Neg(m_Deferred(X))))) { 554 Function *F = 555 Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty); 556 auto *Cttz = IC.Builder.CreateCall(F, {X, IC.Builder.getFalse()}); 557 auto *Bw = ConstantInt::get(Ty, APInt(BitWidth, BitWidth)); 558 return IC.replaceInstUsesWith(II, IC.Builder.CreateSub(Bw, Cttz)); 559 } 560 561 // ctpop(~x & (x - 1)) -> cttz(x, false) 562 if (match(Op0, 563 m_c_And(m_Not(m_Value(X)), m_Add(m_Deferred(X), m_AllOnes())))) { 564 Function *F = 565 Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty); 566 return CallInst::Create(F, {X, IC.Builder.getFalse()}); 567 } 568 569 // Zext doesn't change the number of set bits, so narrow: 570 // ctpop (zext X) --> zext (ctpop X) 571 if (match(Op0, m_OneUse(m_ZExt(m_Value(X))))) { 572 Value *NarrowPop = IC.Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, X); 573 return CastInst::Create(Instruction::ZExt, NarrowPop, Ty); 574 } 575 576 KnownBits Known(BitWidth); 577 IC.computeKnownBits(Op0, Known, 0, &II); 578 579 // If all bits are zero except for exactly one fixed bit, then the result 580 // must be 0 or 1, and we can get that answer by shifting to LSB: 581 // ctpop (X & 32) --> (X & 32) >> 5 582 if ((~Known.Zero).isPowerOf2()) 583 return BinaryOperator::CreateLShr( 584 Op0, ConstantInt::get(Ty, (~Known.Zero).exactLogBase2())); 585 586 // FIXME: Try to simplify vectors of integers. 587 auto *IT = dyn_cast<IntegerType>(Ty); 588 if (!IT) 589 return nullptr; 590 591 // Add range metadata since known bits can't completely reflect what we know. 592 unsigned MinCount = Known.countMinPopulation(); 593 unsigned MaxCount = Known.countMaxPopulation(); 594 if (IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) { 595 Metadata *LowAndHigh[] = { 596 ConstantAsMetadata::get(ConstantInt::get(IT, MinCount)), 597 ConstantAsMetadata::get(ConstantInt::get(IT, MaxCount + 1))}; 598 II.setMetadata(LLVMContext::MD_range, 599 MDNode::get(II.getContext(), LowAndHigh)); 600 return &II; 601 } 602 603 return nullptr; 604 } 605 606 /// Convert a table lookup to shufflevector if the mask is constant. 607 /// This could benefit tbl1 if the mask is { 7,6,5,4,3,2,1,0 }, in 608 /// which case we could lower the shufflevector with rev64 instructions 609 /// as it's actually a byte reverse. 610 static Value *simplifyNeonTbl1(const IntrinsicInst &II, 611 InstCombiner::BuilderTy &Builder) { 612 // Bail out if the mask is not a constant. 613 auto *C = dyn_cast<Constant>(II.getArgOperand(1)); 614 if (!C) 615 return nullptr; 616 617 auto *VecTy = cast<FixedVectorType>(II.getType()); 618 unsigned NumElts = VecTy->getNumElements(); 619 620 // Only perform this transformation for <8 x i8> vector types. 621 if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8) 622 return nullptr; 623 624 int Indexes[8]; 625 626 for (unsigned I = 0; I < NumElts; ++I) { 627 Constant *COp = C->getAggregateElement(I); 628 629 if (!COp || !isa<ConstantInt>(COp)) 630 return nullptr; 631 632 Indexes[I] = cast<ConstantInt>(COp)->getLimitedValue(); 633 634 // Make sure the mask indices are in range. 635 if ((unsigned)Indexes[I] >= NumElts) 636 return nullptr; 637 } 638 639 auto *V1 = II.getArgOperand(0); 640 auto *V2 = Constant::getNullValue(V1->getType()); 641 return Builder.CreateShuffleVector(V1, V2, makeArrayRef(Indexes)); 642 } 643 644 // Returns true iff the 2 intrinsics have the same operands, limiting the 645 // comparison to the first NumOperands. 646 static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E, 647 unsigned NumOperands) { 648 assert(I.getNumArgOperands() >= NumOperands && "Not enough operands"); 649 assert(E.getNumArgOperands() >= NumOperands && "Not enough operands"); 650 for (unsigned i = 0; i < NumOperands; i++) 651 if (I.getArgOperand(i) != E.getArgOperand(i)) 652 return false; 653 return true; 654 } 655 656 // Remove trivially empty start/end intrinsic ranges, i.e. a start 657 // immediately followed by an end (ignoring debuginfo or other 658 // start/end intrinsics in between). As this handles only the most trivial 659 // cases, tracking the nesting level is not needed: 660 // 661 // call @llvm.foo.start(i1 0) 662 // call @llvm.foo.start(i1 0) ; This one won't be skipped: it will be removed 663 // call @llvm.foo.end(i1 0) 664 // call @llvm.foo.end(i1 0) ; &I 665 static bool 666 removeTriviallyEmptyRange(IntrinsicInst &EndI, InstCombinerImpl &IC, 667 std::function<bool(const IntrinsicInst &)> IsStart) { 668 // We start from the end intrinsic and scan backwards, so that InstCombine 669 // has already processed (and potentially removed) all the instructions 670 // before the end intrinsic. 671 BasicBlock::reverse_iterator BI(EndI), BE(EndI.getParent()->rend()); 672 for (; BI != BE; ++BI) { 673 if (auto *I = dyn_cast<IntrinsicInst>(&*BI)) { 674 if (isa<DbgInfoIntrinsic>(I) || 675 I->getIntrinsicID() == EndI.getIntrinsicID()) 676 continue; 677 if (IsStart(*I)) { 678 if (haveSameOperands(EndI, *I, EndI.getNumArgOperands())) { 679 IC.eraseInstFromFunction(*I); 680 IC.eraseInstFromFunction(EndI); 681 return true; 682 } 683 // Skip start intrinsics that don't pair with this end intrinsic. 684 continue; 685 } 686 } 687 break; 688 } 689 690 return false; 691 } 692 693 Instruction *InstCombinerImpl::visitVAEndInst(VAEndInst &I) { 694 removeTriviallyEmptyRange(I, *this, [](const IntrinsicInst &I) { 695 return I.getIntrinsicID() == Intrinsic::vastart || 696 I.getIntrinsicID() == Intrinsic::vacopy; 697 }); 698 return nullptr; 699 } 700 701 static CallInst *canonicalizeConstantArg0ToArg1(CallInst &Call) { 702 assert(Call.getNumArgOperands() > 1 && "Need at least 2 args to swap"); 703 Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1); 704 if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) { 705 Call.setArgOperand(0, Arg1); 706 Call.setArgOperand(1, Arg0); 707 return &Call; 708 } 709 return nullptr; 710 } 711 712 /// Creates a result tuple for an overflow intrinsic \p II with a given 713 /// \p Result and a constant \p Overflow value. 714 static Instruction *createOverflowTuple(IntrinsicInst *II, Value *Result, 715 Constant *Overflow) { 716 Constant *V[] = {UndefValue::get(Result->getType()), Overflow}; 717 StructType *ST = cast<StructType>(II->getType()); 718 Constant *Struct = ConstantStruct::get(ST, V); 719 return InsertValueInst::Create(Struct, Result, 0); 720 } 721 722 Instruction * 723 InstCombinerImpl::foldIntrinsicWithOverflowCommon(IntrinsicInst *II) { 724 WithOverflowInst *WO = cast<WithOverflowInst>(II); 725 Value *OperationResult = nullptr; 726 Constant *OverflowResult = nullptr; 727 if (OptimizeOverflowCheck(WO->getBinaryOp(), WO->isSigned(), WO->getLHS(), 728 WO->getRHS(), *WO, OperationResult, OverflowResult)) 729 return createOverflowTuple(WO, OperationResult, OverflowResult); 730 return nullptr; 731 } 732 733 static Optional<bool> getKnownSign(Value *Op, Instruction *CxtI, 734 const DataLayout &DL, AssumptionCache *AC, 735 DominatorTree *DT) { 736 KnownBits Known = computeKnownBits(Op, DL, 0, AC, CxtI, DT); 737 if (Known.isNonNegative()) 738 return false; 739 if (Known.isNegative()) 740 return true; 741 742 return isImpliedByDomCondition( 743 ICmpInst::ICMP_SLT, Op, Constant::getNullValue(Op->getType()), CxtI, DL); 744 } 745 746 /// If we have a clamp pattern like max (min X, 42), 41 -- where the output 747 /// can only be one of two possible constant values -- turn that into a select 748 /// of constants. 749 static Instruction *foldClampRangeOfTwo(IntrinsicInst *II, 750 InstCombiner::BuilderTy &Builder) { 751 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1); 752 Value *X; 753 const APInt *C0, *C1; 754 if (!match(I1, m_APInt(C1)) || !I0->hasOneUse()) 755 return nullptr; 756 757 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 758 switch (II->getIntrinsicID()) { 759 case Intrinsic::smax: 760 if (match(I0, m_SMin(m_Value(X), m_APInt(C0))) && *C0 == *C1 + 1) 761 Pred = ICmpInst::ICMP_SGT; 762 break; 763 case Intrinsic::smin: 764 if (match(I0, m_SMax(m_Value(X), m_APInt(C0))) && *C1 == *C0 + 1) 765 Pred = ICmpInst::ICMP_SLT; 766 break; 767 case Intrinsic::umax: 768 if (match(I0, m_UMin(m_Value(X), m_APInt(C0))) && *C0 == *C1 + 1) 769 Pred = ICmpInst::ICMP_UGT; 770 break; 771 case Intrinsic::umin: 772 if (match(I0, m_UMax(m_Value(X), m_APInt(C0))) && *C1 == *C0 + 1) 773 Pred = ICmpInst::ICMP_ULT; 774 break; 775 default: 776 llvm_unreachable("Expected min/max intrinsic"); 777 } 778 if (Pred == CmpInst::BAD_ICMP_PREDICATE) 779 return nullptr; 780 781 // max (min X, 42), 41 --> X > 41 ? 42 : 41 782 // min (max X, 42), 43 --> X < 43 ? 42 : 43 783 Value *Cmp = Builder.CreateICmp(Pred, X, I1); 784 return SelectInst::Create(Cmp, ConstantInt::get(II->getType(), *C0), I1); 785 } 786 787 /// CallInst simplification. This mostly only handles folding of intrinsic 788 /// instructions. For normal calls, it allows visitCallBase to do the heavy 789 /// lifting. 790 Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { 791 // Don't try to simplify calls without uses. It will not do anything useful, 792 // but will result in the following folds being skipped. 793 if (!CI.use_empty()) 794 if (Value *V = SimplifyCall(&CI, SQ.getWithInstruction(&CI))) 795 return replaceInstUsesWith(CI, V); 796 797 if (isFreeCall(&CI, &TLI)) 798 return visitFree(CI); 799 800 // If the caller function is nounwind, mark the call as nounwind, even if the 801 // callee isn't. 802 if (CI.getFunction()->doesNotThrow() && !CI.doesNotThrow()) { 803 CI.setDoesNotThrow(); 804 return &CI; 805 } 806 807 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI); 808 if (!II) return visitCallBase(CI); 809 810 // For atomic unordered mem intrinsics if len is not a positive or 811 // not a multiple of element size then behavior is undefined. 812 if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(II)) 813 if (ConstantInt *NumBytes = dyn_cast<ConstantInt>(AMI->getLength())) 814 if (NumBytes->getSExtValue() < 0 || 815 (NumBytes->getZExtValue() % AMI->getElementSizeInBytes() != 0)) { 816 CreateNonTerminatorUnreachable(AMI); 817 assert(AMI->getType()->isVoidTy() && 818 "non void atomic unordered mem intrinsic"); 819 return eraseInstFromFunction(*AMI); 820 } 821 822 // Intrinsics cannot occur in an invoke or a callbr, so handle them here 823 // instead of in visitCallBase. 824 if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) { 825 bool Changed = false; 826 827 // memmove/cpy/set of zero bytes is a noop. 828 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) { 829 if (NumBytes->isNullValue()) 830 return eraseInstFromFunction(CI); 831 832 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes)) 833 if (CI->getZExtValue() == 1) { 834 // Replace the instruction with just byte operations. We would 835 // transform other cases to loads/stores, but we don't know if 836 // alignment is sufficient. 837 } 838 } 839 840 // No other transformations apply to volatile transfers. 841 if (auto *M = dyn_cast<MemIntrinsic>(MI)) 842 if (M->isVolatile()) 843 return nullptr; 844 845 // If we have a memmove and the source operation is a constant global, 846 // then the source and dest pointers can't alias, so we can change this 847 // into a call to memcpy. 848 if (auto *MMI = dyn_cast<AnyMemMoveInst>(MI)) { 849 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource())) 850 if (GVSrc->isConstant()) { 851 Module *M = CI.getModule(); 852 Intrinsic::ID MemCpyID = 853 isa<AtomicMemMoveInst>(MMI) 854 ? Intrinsic::memcpy_element_unordered_atomic 855 : Intrinsic::memcpy; 856 Type *Tys[3] = { CI.getArgOperand(0)->getType(), 857 CI.getArgOperand(1)->getType(), 858 CI.getArgOperand(2)->getType() }; 859 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys)); 860 Changed = true; 861 } 862 } 863 864 if (AnyMemTransferInst *MTI = dyn_cast<AnyMemTransferInst>(MI)) { 865 // memmove(x,x,size) -> noop. 866 if (MTI->getSource() == MTI->getDest()) 867 return eraseInstFromFunction(CI); 868 } 869 870 // If we can determine a pointer alignment that is bigger than currently 871 // set, update the alignment. 872 if (auto *MTI = dyn_cast<AnyMemTransferInst>(MI)) { 873 if (Instruction *I = SimplifyAnyMemTransfer(MTI)) 874 return I; 875 } else if (auto *MSI = dyn_cast<AnyMemSetInst>(MI)) { 876 if (Instruction *I = SimplifyAnyMemSet(MSI)) 877 return I; 878 } 879 880 if (Changed) return II; 881 } 882 883 // For fixed width vector result intrinsics, use the generic demanded vector 884 // support. 885 if (auto *IIFVTy = dyn_cast<FixedVectorType>(II->getType())) { 886 auto VWidth = IIFVTy->getNumElements(); 887 APInt UndefElts(VWidth, 0); 888 APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth)); 889 if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, UndefElts)) { 890 if (V != II) 891 return replaceInstUsesWith(*II, V); 892 return II; 893 } 894 } 895 896 if (II->isCommutative()) { 897 if (CallInst *NewCall = canonicalizeConstantArg0ToArg1(CI)) 898 return NewCall; 899 } 900 901 Intrinsic::ID IID = II->getIntrinsicID(); 902 switch (IID) { 903 case Intrinsic::objectsize: 904 if (Value *V = lowerObjectSizeCall(II, DL, &TLI, /*MustSucceed=*/false)) 905 return replaceInstUsesWith(CI, V); 906 return nullptr; 907 case Intrinsic::abs: { 908 Value *IIOperand = II->getArgOperand(0); 909 bool IntMinIsPoison = cast<Constant>(II->getArgOperand(1))->isOneValue(); 910 911 // abs(-x) -> abs(x) 912 // TODO: Copy nsw if it was present on the neg? 913 Value *X; 914 if (match(IIOperand, m_Neg(m_Value(X)))) 915 return replaceOperand(*II, 0, X); 916 if (match(IIOperand, m_Select(m_Value(), m_Value(X), m_Neg(m_Deferred(X))))) 917 return replaceOperand(*II, 0, X); 918 if (match(IIOperand, m_Select(m_Value(), m_Neg(m_Value(X)), m_Deferred(X)))) 919 return replaceOperand(*II, 0, X); 920 921 if (Optional<bool> Sign = getKnownSign(IIOperand, II, DL, &AC, &DT)) { 922 // abs(x) -> x if x >= 0 923 if (!*Sign) 924 return replaceInstUsesWith(*II, IIOperand); 925 926 // abs(x) -> -x if x < 0 927 if (IntMinIsPoison) 928 return BinaryOperator::CreateNSWNeg(IIOperand); 929 return BinaryOperator::CreateNeg(IIOperand); 930 } 931 932 // abs (sext X) --> zext (abs X*) 933 // Clear the IsIntMin (nsw) bit on the abs to allow narrowing. 934 if (match(IIOperand, m_OneUse(m_SExt(m_Value(X))))) { 935 Value *NarrowAbs = 936 Builder.CreateBinaryIntrinsic(Intrinsic::abs, X, Builder.getFalse()); 937 return CastInst::Create(Instruction::ZExt, NarrowAbs, II->getType()); 938 } 939 940 // Match a complicated way to check if a number is odd/even: 941 // abs (srem X, 2) --> and X, 1 942 const APInt *C; 943 if (match(IIOperand, m_SRem(m_Value(X), m_APInt(C))) && *C == 2) 944 return BinaryOperator::CreateAnd(X, ConstantInt::get(II->getType(), 1)); 945 946 break; 947 } 948 case Intrinsic::umax: 949 case Intrinsic::umin: { 950 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1); 951 Value *X, *Y; 952 if (match(I0, m_ZExt(m_Value(X))) && match(I1, m_ZExt(m_Value(Y))) && 953 (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) { 954 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y); 955 return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType()); 956 } 957 Constant *C; 958 if (match(I0, m_ZExt(m_Value(X))) && match(I1, m_Constant(C)) && 959 I0->hasOneUse()) { 960 Constant *NarrowC = ConstantExpr::getTrunc(C, X->getType()); 961 if (ConstantExpr::getZExt(NarrowC, II->getType()) == C) { 962 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC); 963 return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType()); 964 } 965 } 966 // If both operands of unsigned min/max are sign-extended, it is still ok 967 // to narrow the operation. 968 LLVM_FALLTHROUGH; 969 } 970 case Intrinsic::smax: 971 case Intrinsic::smin: { 972 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1); 973 Value *X, *Y; 974 if (match(I0, m_SExt(m_Value(X))) && match(I1, m_SExt(m_Value(Y))) && 975 (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) { 976 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y); 977 return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType()); 978 } 979 980 Constant *C; 981 if (match(I0, m_SExt(m_Value(X))) && match(I1, m_Constant(C)) && 982 I0->hasOneUse()) { 983 Constant *NarrowC = ConstantExpr::getTrunc(C, X->getType()); 984 if (ConstantExpr::getSExt(NarrowC, II->getType()) == C) { 985 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC); 986 return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType()); 987 } 988 } 989 990 if (match(I0, m_Not(m_Value(X)))) { 991 // max (not X), (not Y) --> not (min X, Y) 992 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(IID); 993 if (match(I1, m_Not(m_Value(Y))) && 994 (I0->hasOneUse() || I1->hasOneUse())) { 995 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, X, Y); 996 return BinaryOperator::CreateNot(InvMaxMin); 997 } 998 // max (not X), C --> not(min X, ~C) 999 if (match(I1, m_Constant(C)) && I0->hasOneUse()) { 1000 Constant *NotC = ConstantExpr::getNot(C); 1001 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, X, NotC); 1002 return BinaryOperator::CreateNot(InvMaxMin); 1003 } 1004 } 1005 1006 // smax(X, -X) --> abs(X) 1007 // smin(X, -X) --> -abs(X) 1008 // umax(X, -X) --> -abs(X) 1009 // umin(X, -X) --> abs(X) 1010 if (isKnownNegation(I0, I1)) { 1011 // We can choose either operand as the input to abs(), but if we can 1012 // eliminate the only use of a value, that's better for subsequent 1013 // transforms/analysis. 1014 if (I0->hasOneUse() && !I1->hasOneUse()) 1015 std::swap(I0, I1); 1016 1017 // This is some variant of abs(). See if we can propagate 'nsw' to the abs 1018 // operation and potentially its negation. 1019 bool IntMinIsPoison = isKnownNegation(I0, I1, /* NeedNSW */ true); 1020 Value *Abs = Builder.CreateBinaryIntrinsic( 1021 Intrinsic::abs, I0, 1022 ConstantInt::getBool(II->getContext(), IntMinIsPoison)); 1023 1024 // We don't have a "nabs" intrinsic, so negate if needed based on the 1025 // max/min operation. 1026 if (IID == Intrinsic::smin || IID == Intrinsic::umax) 1027 Abs = Builder.CreateNeg(Abs, "nabs", /* NUW */ false, IntMinIsPoison); 1028 return replaceInstUsesWith(CI, Abs); 1029 } 1030 1031 if (Instruction *Sel = foldClampRangeOfTwo(II, Builder)) 1032 return Sel; 1033 1034 break; 1035 } 1036 case Intrinsic::bswap: { 1037 Value *IIOperand = II->getArgOperand(0); 1038 Value *X = nullptr; 1039 1040 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c)) 1041 if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) { 1042 unsigned C = X->getType()->getScalarSizeInBits() - 1043 IIOperand->getType()->getScalarSizeInBits(); 1044 Value *CV = ConstantInt::get(X->getType(), C); 1045 Value *V = Builder.CreateLShr(X, CV); 1046 return new TruncInst(V, IIOperand->getType()); 1047 } 1048 break; 1049 } 1050 case Intrinsic::masked_load: 1051 if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II)) 1052 return replaceInstUsesWith(CI, SimplifiedMaskedOp); 1053 break; 1054 case Intrinsic::masked_store: 1055 return simplifyMaskedStore(*II); 1056 case Intrinsic::masked_gather: 1057 return simplifyMaskedGather(*II); 1058 case Intrinsic::masked_scatter: 1059 return simplifyMaskedScatter(*II); 1060 case Intrinsic::launder_invariant_group: 1061 case Intrinsic::strip_invariant_group: 1062 if (auto *SkippedBarrier = simplifyInvariantGroupIntrinsic(*II, *this)) 1063 return replaceInstUsesWith(*II, SkippedBarrier); 1064 break; 1065 case Intrinsic::powi: 1066 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) { 1067 // 0 and 1 are handled in instsimplify 1068 // powi(x, -1) -> 1/x 1069 if (Power->isMinusOne()) 1070 return BinaryOperator::CreateFDivFMF(ConstantFP::get(CI.getType(), 1.0), 1071 II->getArgOperand(0), II); 1072 // powi(x, 2) -> x*x 1073 if (Power->equalsInt(2)) 1074 return BinaryOperator::CreateFMulFMF(II->getArgOperand(0), 1075 II->getArgOperand(0), II); 1076 } 1077 break; 1078 1079 case Intrinsic::cttz: 1080 case Intrinsic::ctlz: 1081 if (auto *I = foldCttzCtlz(*II, *this)) 1082 return I; 1083 break; 1084 1085 case Intrinsic::ctpop: 1086 if (auto *I = foldCtpop(*II, *this)) 1087 return I; 1088 break; 1089 1090 case Intrinsic::fshl: 1091 case Intrinsic::fshr: { 1092 Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1); 1093 Type *Ty = II->getType(); 1094 unsigned BitWidth = Ty->getScalarSizeInBits(); 1095 Constant *ShAmtC; 1096 if (match(II->getArgOperand(2), m_ImmConstant(ShAmtC)) && 1097 !ShAmtC->containsConstantExpression()) { 1098 // Canonicalize a shift amount constant operand to modulo the bit-width. 1099 Constant *WidthC = ConstantInt::get(Ty, BitWidth); 1100 Constant *ModuloC = ConstantExpr::getURem(ShAmtC, WidthC); 1101 if (ModuloC != ShAmtC) 1102 return replaceOperand(*II, 2, ModuloC); 1103 1104 assert(ConstantExpr::getICmp(ICmpInst::ICMP_UGT, WidthC, ShAmtC) == 1105 ConstantInt::getTrue(CmpInst::makeCmpResultType(Ty)) && 1106 "Shift amount expected to be modulo bitwidth"); 1107 1108 // Canonicalize funnel shift right by constant to funnel shift left. This 1109 // is not entirely arbitrary. For historical reasons, the backend may 1110 // recognize rotate left patterns but miss rotate right patterns. 1111 if (IID == Intrinsic::fshr) { 1112 // fshr X, Y, C --> fshl X, Y, (BitWidth - C) 1113 Constant *LeftShiftC = ConstantExpr::getSub(WidthC, ShAmtC); 1114 Module *Mod = II->getModule(); 1115 Function *Fshl = Intrinsic::getDeclaration(Mod, Intrinsic::fshl, Ty); 1116 return CallInst::Create(Fshl, { Op0, Op1, LeftShiftC }); 1117 } 1118 assert(IID == Intrinsic::fshl && 1119 "All funnel shifts by simple constants should go left"); 1120 1121 // fshl(X, 0, C) --> shl X, C 1122 // fshl(X, undef, C) --> shl X, C 1123 if (match(Op1, m_ZeroInt()) || match(Op1, m_Undef())) 1124 return BinaryOperator::CreateShl(Op0, ShAmtC); 1125 1126 // fshl(0, X, C) --> lshr X, (BW-C) 1127 // fshl(undef, X, C) --> lshr X, (BW-C) 1128 if (match(Op0, m_ZeroInt()) || match(Op0, m_Undef())) 1129 return BinaryOperator::CreateLShr(Op1, 1130 ConstantExpr::getSub(WidthC, ShAmtC)); 1131 1132 // fshl i16 X, X, 8 --> bswap i16 X (reduce to more-specific form) 1133 if (Op0 == Op1 && BitWidth == 16 && match(ShAmtC, m_SpecificInt(8))) { 1134 Module *Mod = II->getModule(); 1135 Function *Bswap = Intrinsic::getDeclaration(Mod, Intrinsic::bswap, Ty); 1136 return CallInst::Create(Bswap, { Op0 }); 1137 } 1138 } 1139 1140 // Left or right might be masked. 1141 if (SimplifyDemandedInstructionBits(*II)) 1142 return &CI; 1143 1144 // The shift amount (operand 2) of a funnel shift is modulo the bitwidth, 1145 // so only the low bits of the shift amount are demanded if the bitwidth is 1146 // a power-of-2. 1147 if (!isPowerOf2_32(BitWidth)) 1148 break; 1149 APInt Op2Demanded = APInt::getLowBitsSet(BitWidth, Log2_32_Ceil(BitWidth)); 1150 KnownBits Op2Known(BitWidth); 1151 if (SimplifyDemandedBits(II, 2, Op2Demanded, Op2Known)) 1152 return &CI; 1153 break; 1154 } 1155 case Intrinsic::uadd_with_overflow: 1156 case Intrinsic::sadd_with_overflow: { 1157 if (Instruction *I = foldIntrinsicWithOverflowCommon(II)) 1158 return I; 1159 1160 // Given 2 constant operands whose sum does not overflow: 1161 // uaddo (X +nuw C0), C1 -> uaddo X, C0 + C1 1162 // saddo (X +nsw C0), C1 -> saddo X, C0 + C1 1163 Value *X; 1164 const APInt *C0, *C1; 1165 Value *Arg0 = II->getArgOperand(0); 1166 Value *Arg1 = II->getArgOperand(1); 1167 bool IsSigned = IID == Intrinsic::sadd_with_overflow; 1168 bool HasNWAdd = IsSigned ? match(Arg0, m_NSWAdd(m_Value(X), m_APInt(C0))) 1169 : match(Arg0, m_NUWAdd(m_Value(X), m_APInt(C0))); 1170 if (HasNWAdd && match(Arg1, m_APInt(C1))) { 1171 bool Overflow; 1172 APInt NewC = 1173 IsSigned ? C1->sadd_ov(*C0, Overflow) : C1->uadd_ov(*C0, Overflow); 1174 if (!Overflow) 1175 return replaceInstUsesWith( 1176 *II, Builder.CreateBinaryIntrinsic( 1177 IID, X, ConstantInt::get(Arg1->getType(), NewC))); 1178 } 1179 break; 1180 } 1181 1182 case Intrinsic::umul_with_overflow: 1183 case Intrinsic::smul_with_overflow: 1184 case Intrinsic::usub_with_overflow: 1185 if (Instruction *I = foldIntrinsicWithOverflowCommon(II)) 1186 return I; 1187 break; 1188 1189 case Intrinsic::ssub_with_overflow: { 1190 if (Instruction *I = foldIntrinsicWithOverflowCommon(II)) 1191 return I; 1192 1193 Constant *C; 1194 Value *Arg0 = II->getArgOperand(0); 1195 Value *Arg1 = II->getArgOperand(1); 1196 // Given a constant C that is not the minimum signed value 1197 // for an integer of a given bit width: 1198 // 1199 // ssubo X, C -> saddo X, -C 1200 if (match(Arg1, m_Constant(C)) && C->isNotMinSignedValue()) { 1201 Value *NegVal = ConstantExpr::getNeg(C); 1202 // Build a saddo call that is equivalent to the discovered 1203 // ssubo call. 1204 return replaceInstUsesWith( 1205 *II, Builder.CreateBinaryIntrinsic(Intrinsic::sadd_with_overflow, 1206 Arg0, NegVal)); 1207 } 1208 1209 break; 1210 } 1211 1212 case Intrinsic::uadd_sat: 1213 case Intrinsic::sadd_sat: 1214 case Intrinsic::usub_sat: 1215 case Intrinsic::ssub_sat: { 1216 SaturatingInst *SI = cast<SaturatingInst>(II); 1217 Type *Ty = SI->getType(); 1218 Value *Arg0 = SI->getLHS(); 1219 Value *Arg1 = SI->getRHS(); 1220 1221 // Make use of known overflow information. 1222 OverflowResult OR = computeOverflow(SI->getBinaryOp(), SI->isSigned(), 1223 Arg0, Arg1, SI); 1224 switch (OR) { 1225 case OverflowResult::MayOverflow: 1226 break; 1227 case OverflowResult::NeverOverflows: 1228 if (SI->isSigned()) 1229 return BinaryOperator::CreateNSW(SI->getBinaryOp(), Arg0, Arg1); 1230 else 1231 return BinaryOperator::CreateNUW(SI->getBinaryOp(), Arg0, Arg1); 1232 case OverflowResult::AlwaysOverflowsLow: { 1233 unsigned BitWidth = Ty->getScalarSizeInBits(); 1234 APInt Min = APSInt::getMinValue(BitWidth, !SI->isSigned()); 1235 return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Min)); 1236 } 1237 case OverflowResult::AlwaysOverflowsHigh: { 1238 unsigned BitWidth = Ty->getScalarSizeInBits(); 1239 APInt Max = APSInt::getMaxValue(BitWidth, !SI->isSigned()); 1240 return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Max)); 1241 } 1242 } 1243 1244 // ssub.sat(X, C) -> sadd.sat(X, -C) if C != MIN 1245 Constant *C; 1246 if (IID == Intrinsic::ssub_sat && match(Arg1, m_Constant(C)) && 1247 C->isNotMinSignedValue()) { 1248 Value *NegVal = ConstantExpr::getNeg(C); 1249 return replaceInstUsesWith( 1250 *II, Builder.CreateBinaryIntrinsic( 1251 Intrinsic::sadd_sat, Arg0, NegVal)); 1252 } 1253 1254 // sat(sat(X + Val2) + Val) -> sat(X + (Val+Val2)) 1255 // sat(sat(X - Val2) - Val) -> sat(X - (Val+Val2)) 1256 // if Val and Val2 have the same sign 1257 if (auto *Other = dyn_cast<IntrinsicInst>(Arg0)) { 1258 Value *X; 1259 const APInt *Val, *Val2; 1260 APInt NewVal; 1261 bool IsUnsigned = 1262 IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat; 1263 if (Other->getIntrinsicID() == IID && 1264 match(Arg1, m_APInt(Val)) && 1265 match(Other->getArgOperand(0), m_Value(X)) && 1266 match(Other->getArgOperand(1), m_APInt(Val2))) { 1267 if (IsUnsigned) 1268 NewVal = Val->uadd_sat(*Val2); 1269 else if (Val->isNonNegative() == Val2->isNonNegative()) { 1270 bool Overflow; 1271 NewVal = Val->sadd_ov(*Val2, Overflow); 1272 if (Overflow) { 1273 // Both adds together may add more than SignedMaxValue 1274 // without saturating the final result. 1275 break; 1276 } 1277 } else { 1278 // Cannot fold saturated addition with different signs. 1279 break; 1280 } 1281 1282 return replaceInstUsesWith( 1283 *II, Builder.CreateBinaryIntrinsic( 1284 IID, X, ConstantInt::get(II->getType(), NewVal))); 1285 } 1286 } 1287 break; 1288 } 1289 1290 case Intrinsic::minnum: 1291 case Intrinsic::maxnum: 1292 case Intrinsic::minimum: 1293 case Intrinsic::maximum: { 1294 Value *Arg0 = II->getArgOperand(0); 1295 Value *Arg1 = II->getArgOperand(1); 1296 Value *X, *Y; 1297 if (match(Arg0, m_FNeg(m_Value(X))) && match(Arg1, m_FNeg(m_Value(Y))) && 1298 (Arg0->hasOneUse() || Arg1->hasOneUse())) { 1299 // If both operands are negated, invert the call and negate the result: 1300 // min(-X, -Y) --> -(max(X, Y)) 1301 // max(-X, -Y) --> -(min(X, Y)) 1302 Intrinsic::ID NewIID; 1303 switch (IID) { 1304 case Intrinsic::maxnum: 1305 NewIID = Intrinsic::minnum; 1306 break; 1307 case Intrinsic::minnum: 1308 NewIID = Intrinsic::maxnum; 1309 break; 1310 case Intrinsic::maximum: 1311 NewIID = Intrinsic::minimum; 1312 break; 1313 case Intrinsic::minimum: 1314 NewIID = Intrinsic::maximum; 1315 break; 1316 default: 1317 llvm_unreachable("unexpected intrinsic ID"); 1318 } 1319 Value *NewCall = Builder.CreateBinaryIntrinsic(NewIID, X, Y, II); 1320 Instruction *FNeg = UnaryOperator::CreateFNeg(NewCall); 1321 FNeg->copyIRFlags(II); 1322 return FNeg; 1323 } 1324 1325 // m(m(X, C2), C1) -> m(X, C) 1326 const APFloat *C1, *C2; 1327 if (auto *M = dyn_cast<IntrinsicInst>(Arg0)) { 1328 if (M->getIntrinsicID() == IID && match(Arg1, m_APFloat(C1)) && 1329 ((match(M->getArgOperand(0), m_Value(X)) && 1330 match(M->getArgOperand(1), m_APFloat(C2))) || 1331 (match(M->getArgOperand(1), m_Value(X)) && 1332 match(M->getArgOperand(0), m_APFloat(C2))))) { 1333 APFloat Res(0.0); 1334 switch (IID) { 1335 case Intrinsic::maxnum: 1336 Res = maxnum(*C1, *C2); 1337 break; 1338 case Intrinsic::minnum: 1339 Res = minnum(*C1, *C2); 1340 break; 1341 case Intrinsic::maximum: 1342 Res = maximum(*C1, *C2); 1343 break; 1344 case Intrinsic::minimum: 1345 Res = minimum(*C1, *C2); 1346 break; 1347 default: 1348 llvm_unreachable("unexpected intrinsic ID"); 1349 } 1350 Instruction *NewCall = Builder.CreateBinaryIntrinsic( 1351 IID, X, ConstantFP::get(Arg0->getType(), Res), II); 1352 // TODO: Conservatively intersecting FMF. If Res == C2, the transform 1353 // was a simplification (so Arg0 and its original flags could 1354 // propagate?) 1355 NewCall->andIRFlags(M); 1356 return replaceInstUsesWith(*II, NewCall); 1357 } 1358 } 1359 1360 Value *ExtSrc0; 1361 Value *ExtSrc1; 1362 1363 // minnum (fpext x), (fpext y) -> minnum x, y 1364 // maxnum (fpext x), (fpext y) -> maxnum x, y 1365 if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc0)))) && 1366 match(II->getArgOperand(1), m_OneUse(m_FPExt(m_Value(ExtSrc1)))) && 1367 ExtSrc0->getType() == ExtSrc1->getType()) { 1368 Function *F = Intrinsic::getDeclaration( 1369 II->getModule(), II->getIntrinsicID(), {ExtSrc0->getType()}); 1370 CallInst *NewCall = Builder.CreateCall(F, { ExtSrc0, ExtSrc1 }); 1371 NewCall->copyFastMathFlags(II); 1372 NewCall->takeName(II); 1373 return new FPExtInst(NewCall, II->getType()); 1374 } 1375 1376 break; 1377 } 1378 case Intrinsic::fmuladd: { 1379 // Canonicalize fast fmuladd to the separate fmul + fadd. 1380 if (II->isFast()) { 1381 BuilderTy::FastMathFlagGuard Guard(Builder); 1382 Builder.setFastMathFlags(II->getFastMathFlags()); 1383 Value *Mul = Builder.CreateFMul(II->getArgOperand(0), 1384 II->getArgOperand(1)); 1385 Value *Add = Builder.CreateFAdd(Mul, II->getArgOperand(2)); 1386 Add->takeName(II); 1387 return replaceInstUsesWith(*II, Add); 1388 } 1389 1390 // Try to simplify the underlying FMul. 1391 if (Value *V = SimplifyFMulInst(II->getArgOperand(0), II->getArgOperand(1), 1392 II->getFastMathFlags(), 1393 SQ.getWithInstruction(II))) { 1394 auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2)); 1395 FAdd->copyFastMathFlags(II); 1396 return FAdd; 1397 } 1398 1399 LLVM_FALLTHROUGH; 1400 } 1401 case Intrinsic::fma: { 1402 // fma fneg(x), fneg(y), z -> fma x, y, z 1403 Value *Src0 = II->getArgOperand(0); 1404 Value *Src1 = II->getArgOperand(1); 1405 Value *X, *Y; 1406 if (match(Src0, m_FNeg(m_Value(X))) && match(Src1, m_FNeg(m_Value(Y)))) { 1407 replaceOperand(*II, 0, X); 1408 replaceOperand(*II, 1, Y); 1409 return II; 1410 } 1411 1412 // fma fabs(x), fabs(x), z -> fma x, x, z 1413 if (match(Src0, m_FAbs(m_Value(X))) && 1414 match(Src1, m_FAbs(m_Specific(X)))) { 1415 replaceOperand(*II, 0, X); 1416 replaceOperand(*II, 1, X); 1417 return II; 1418 } 1419 1420 // Try to simplify the underlying FMul. We can only apply simplifications 1421 // that do not require rounding. 1422 if (Value *V = SimplifyFMAFMul(II->getArgOperand(0), II->getArgOperand(1), 1423 II->getFastMathFlags(), 1424 SQ.getWithInstruction(II))) { 1425 auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2)); 1426 FAdd->copyFastMathFlags(II); 1427 return FAdd; 1428 } 1429 1430 // fma x, y, 0 -> fmul x, y 1431 // This is always valid for -0.0, but requires nsz for +0.0 as 1432 // -0.0 + 0.0 = 0.0, which would not be the same as the fmul on its own. 1433 if (match(II->getArgOperand(2), m_NegZeroFP()) || 1434 (match(II->getArgOperand(2), m_PosZeroFP()) && 1435 II->getFastMathFlags().noSignedZeros())) 1436 return BinaryOperator::CreateFMulFMF(Src0, Src1, II); 1437 1438 break; 1439 } 1440 case Intrinsic::copysign: { 1441 Value *Mag = II->getArgOperand(0), *Sign = II->getArgOperand(1); 1442 if (SignBitMustBeZero(Sign, &TLI)) { 1443 // If we know that the sign argument is positive, reduce to FABS: 1444 // copysign Mag, +Sign --> fabs Mag 1445 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II); 1446 return replaceInstUsesWith(*II, Fabs); 1447 } 1448 // TODO: There should be a ValueTracking sibling like SignBitMustBeOne. 1449 const APFloat *C; 1450 if (match(Sign, m_APFloat(C)) && C->isNegative()) { 1451 // If we know that the sign argument is negative, reduce to FNABS: 1452 // copysign Mag, -Sign --> fneg (fabs Mag) 1453 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II); 1454 return replaceInstUsesWith(*II, Builder.CreateFNegFMF(Fabs, II)); 1455 } 1456 1457 // Propagate sign argument through nested calls: 1458 // copysign Mag, (copysign ?, X) --> copysign Mag, X 1459 Value *X; 1460 if (match(Sign, m_Intrinsic<Intrinsic::copysign>(m_Value(), m_Value(X)))) 1461 return replaceOperand(*II, 1, X); 1462 1463 // Peek through changes of magnitude's sign-bit. This call rewrites those: 1464 // copysign (fabs X), Sign --> copysign X, Sign 1465 // copysign (fneg X), Sign --> copysign X, Sign 1466 if (match(Mag, m_FAbs(m_Value(X))) || match(Mag, m_FNeg(m_Value(X)))) 1467 return replaceOperand(*II, 0, X); 1468 1469 break; 1470 } 1471 case Intrinsic::fabs: { 1472 Value *Cond, *TVal, *FVal; 1473 if (match(II->getArgOperand(0), 1474 m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))) { 1475 // fabs (select Cond, TrueC, FalseC) --> select Cond, AbsT, AbsF 1476 if (isa<Constant>(TVal) && isa<Constant>(FVal)) { 1477 CallInst *AbsT = Builder.CreateCall(II->getCalledFunction(), {TVal}); 1478 CallInst *AbsF = Builder.CreateCall(II->getCalledFunction(), {FVal}); 1479 return SelectInst::Create(Cond, AbsT, AbsF); 1480 } 1481 // fabs (select Cond, -FVal, FVal) --> fabs FVal 1482 if (match(TVal, m_FNeg(m_Specific(FVal)))) 1483 return replaceOperand(*II, 0, FVal); 1484 // fabs (select Cond, TVal, -TVal) --> fabs TVal 1485 if (match(FVal, m_FNeg(m_Specific(TVal)))) 1486 return replaceOperand(*II, 0, TVal); 1487 } 1488 1489 LLVM_FALLTHROUGH; 1490 } 1491 case Intrinsic::ceil: 1492 case Intrinsic::floor: 1493 case Intrinsic::round: 1494 case Intrinsic::roundeven: 1495 case Intrinsic::nearbyint: 1496 case Intrinsic::rint: 1497 case Intrinsic::trunc: { 1498 Value *ExtSrc; 1499 if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc))))) { 1500 // Narrow the call: intrinsic (fpext x) -> fpext (intrinsic x) 1501 Value *NarrowII = Builder.CreateUnaryIntrinsic(IID, ExtSrc, II); 1502 return new FPExtInst(NarrowII, II->getType()); 1503 } 1504 break; 1505 } 1506 case Intrinsic::cos: 1507 case Intrinsic::amdgcn_cos: { 1508 Value *X; 1509 Value *Src = II->getArgOperand(0); 1510 if (match(Src, m_FNeg(m_Value(X))) || match(Src, m_FAbs(m_Value(X)))) { 1511 // cos(-x) -> cos(x) 1512 // cos(fabs(x)) -> cos(x) 1513 return replaceOperand(*II, 0, X); 1514 } 1515 break; 1516 } 1517 case Intrinsic::sin: { 1518 Value *X; 1519 if (match(II->getArgOperand(0), m_OneUse(m_FNeg(m_Value(X))))) { 1520 // sin(-x) --> -sin(x) 1521 Value *NewSin = Builder.CreateUnaryIntrinsic(Intrinsic::sin, X, II); 1522 Instruction *FNeg = UnaryOperator::CreateFNeg(NewSin); 1523 FNeg->copyFastMathFlags(II); 1524 return FNeg; 1525 } 1526 break; 1527 } 1528 1529 case Intrinsic::arm_neon_vtbl1: 1530 case Intrinsic::aarch64_neon_tbl1: 1531 if (Value *V = simplifyNeonTbl1(*II, Builder)) 1532 return replaceInstUsesWith(*II, V); 1533 break; 1534 1535 case Intrinsic::arm_neon_vmulls: 1536 case Intrinsic::arm_neon_vmullu: 1537 case Intrinsic::aarch64_neon_smull: 1538 case Intrinsic::aarch64_neon_umull: { 1539 Value *Arg0 = II->getArgOperand(0); 1540 Value *Arg1 = II->getArgOperand(1); 1541 1542 // Handle mul by zero first: 1543 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) { 1544 return replaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType())); 1545 } 1546 1547 // Check for constant LHS & RHS - in this case we just simplify. 1548 bool Zext = (IID == Intrinsic::arm_neon_vmullu || 1549 IID == Intrinsic::aarch64_neon_umull); 1550 VectorType *NewVT = cast<VectorType>(II->getType()); 1551 if (Constant *CV0 = dyn_cast<Constant>(Arg0)) { 1552 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) { 1553 CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext); 1554 CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext); 1555 1556 return replaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1)); 1557 } 1558 1559 // Couldn't simplify - canonicalize constant to the RHS. 1560 std::swap(Arg0, Arg1); 1561 } 1562 1563 // Handle mul by one: 1564 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) 1565 if (ConstantInt *Splat = 1566 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue())) 1567 if (Splat->isOne()) 1568 return CastInst::CreateIntegerCast(Arg0, II->getType(), 1569 /*isSigned=*/!Zext); 1570 1571 break; 1572 } 1573 case Intrinsic::arm_neon_aesd: 1574 case Intrinsic::arm_neon_aese: 1575 case Intrinsic::aarch64_crypto_aesd: 1576 case Intrinsic::aarch64_crypto_aese: { 1577 Value *DataArg = II->getArgOperand(0); 1578 Value *KeyArg = II->getArgOperand(1); 1579 1580 // Try to use the builtin XOR in AESE and AESD to eliminate a prior XOR 1581 Value *Data, *Key; 1582 if (match(KeyArg, m_ZeroInt()) && 1583 match(DataArg, m_Xor(m_Value(Data), m_Value(Key)))) { 1584 replaceOperand(*II, 0, Data); 1585 replaceOperand(*II, 1, Key); 1586 return II; 1587 } 1588 break; 1589 } 1590 case Intrinsic::hexagon_V6_vandvrt: 1591 case Intrinsic::hexagon_V6_vandvrt_128B: { 1592 // Simplify Q -> V -> Q conversion. 1593 if (auto Op0 = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) { 1594 Intrinsic::ID ID0 = Op0->getIntrinsicID(); 1595 if (ID0 != Intrinsic::hexagon_V6_vandqrt && 1596 ID0 != Intrinsic::hexagon_V6_vandqrt_128B) 1597 break; 1598 Value *Bytes = Op0->getArgOperand(1), *Mask = II->getArgOperand(1); 1599 uint64_t Bytes1 = computeKnownBits(Bytes, 0, Op0).One.getZExtValue(); 1600 uint64_t Mask1 = computeKnownBits(Mask, 0, II).One.getZExtValue(); 1601 // Check if every byte has common bits in Bytes and Mask. 1602 uint64_t C = Bytes1 & Mask1; 1603 if ((C & 0xFF) && (C & 0xFF00) && (C & 0xFF0000) && (C & 0xFF000000)) 1604 return replaceInstUsesWith(*II, Op0->getArgOperand(0)); 1605 } 1606 break; 1607 } 1608 case Intrinsic::stackrestore: { 1609 // If the save is right next to the restore, remove the restore. This can 1610 // happen when variable allocas are DCE'd. 1611 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) { 1612 if (SS->getIntrinsicID() == Intrinsic::stacksave) { 1613 // Skip over debug info. 1614 if (SS->getNextNonDebugInstruction() == II) { 1615 return eraseInstFromFunction(CI); 1616 } 1617 } 1618 } 1619 1620 // Scan down this block to see if there is another stack restore in the 1621 // same block without an intervening call/alloca. 1622 BasicBlock::iterator BI(II); 1623 Instruction *TI = II->getParent()->getTerminator(); 1624 bool CannotRemove = false; 1625 for (++BI; &*BI != TI; ++BI) { 1626 if (isa<AllocaInst>(BI)) { 1627 CannotRemove = true; 1628 break; 1629 } 1630 if (CallInst *BCI = dyn_cast<CallInst>(BI)) { 1631 if (auto *II2 = dyn_cast<IntrinsicInst>(BCI)) { 1632 // If there is a stackrestore below this one, remove this one. 1633 if (II2->getIntrinsicID() == Intrinsic::stackrestore) 1634 return eraseInstFromFunction(CI); 1635 1636 // Bail if we cross over an intrinsic with side effects, such as 1637 // llvm.stacksave, or llvm.read_register. 1638 if (II2->mayHaveSideEffects()) { 1639 CannotRemove = true; 1640 break; 1641 } 1642 } else { 1643 // If we found a non-intrinsic call, we can't remove the stack 1644 // restore. 1645 CannotRemove = true; 1646 break; 1647 } 1648 } 1649 } 1650 1651 // If the stack restore is in a return, resume, or unwind block and if there 1652 // are no allocas or calls between the restore and the return, nuke the 1653 // restore. 1654 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI))) 1655 return eraseInstFromFunction(CI); 1656 break; 1657 } 1658 case Intrinsic::lifetime_end: 1659 // Asan needs to poison memory to detect invalid access which is possible 1660 // even for empty lifetime range. 1661 if (II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) || 1662 II->getFunction()->hasFnAttribute(Attribute::SanitizeMemory) || 1663 II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress)) 1664 break; 1665 1666 if (removeTriviallyEmptyRange(*II, *this, [](const IntrinsicInst &I) { 1667 return I.getIntrinsicID() == Intrinsic::lifetime_start; 1668 })) 1669 return nullptr; 1670 break; 1671 case Intrinsic::assume: { 1672 Value *IIOperand = II->getArgOperand(0); 1673 SmallVector<OperandBundleDef, 4> OpBundles; 1674 II->getOperandBundlesAsDefs(OpBundles); 1675 1676 /// This will remove the boolean Condition from the assume given as 1677 /// argument and remove the assume if it becomes useless. 1678 /// always returns nullptr for use as a return values. 1679 auto RemoveConditionFromAssume = [&](Instruction *Assume) -> Instruction * { 1680 assert(isa<AssumeInst>(Assume)); 1681 if (isAssumeWithEmptyBundle(*cast<AssumeInst>(II))) 1682 return eraseInstFromFunction(CI); 1683 replaceUse(II->getOperandUse(0), ConstantInt::getTrue(II->getContext())); 1684 return nullptr; 1685 }; 1686 // Remove an assume if it is followed by an identical assume. 1687 // TODO: Do we need this? Unless there are conflicting assumptions, the 1688 // computeKnownBits(IIOperand) below here eliminates redundant assumes. 1689 Instruction *Next = II->getNextNonDebugInstruction(); 1690 if (match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand)))) 1691 return RemoveConditionFromAssume(Next); 1692 1693 // Canonicalize assume(a && b) -> assume(a); assume(b); 1694 // Note: New assumption intrinsics created here are registered by 1695 // the InstCombineIRInserter object. 1696 FunctionType *AssumeIntrinsicTy = II->getFunctionType(); 1697 Value *AssumeIntrinsic = II->getCalledOperand(); 1698 Value *A, *B; 1699 if (match(IIOperand, m_LogicalAnd(m_Value(A), m_Value(B)))) { 1700 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, OpBundles, 1701 II->getName()); 1702 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, B, II->getName()); 1703 return eraseInstFromFunction(*II); 1704 } 1705 // assume(!(a || b)) -> assume(!a); assume(!b); 1706 if (match(IIOperand, m_Not(m_LogicalOr(m_Value(A), m_Value(B))))) { 1707 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, 1708 Builder.CreateNot(A), OpBundles, II->getName()); 1709 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, 1710 Builder.CreateNot(B), II->getName()); 1711 return eraseInstFromFunction(*II); 1712 } 1713 1714 // assume( (load addr) != null ) -> add 'nonnull' metadata to load 1715 // (if assume is valid at the load) 1716 CmpInst::Predicate Pred; 1717 Instruction *LHS; 1718 if (match(IIOperand, m_ICmp(Pred, m_Instruction(LHS), m_Zero())) && 1719 Pred == ICmpInst::ICMP_NE && LHS->getOpcode() == Instruction::Load && 1720 LHS->getType()->isPointerTy() && 1721 isValidAssumeForContext(II, LHS, &DT)) { 1722 MDNode *MD = MDNode::get(II->getContext(), None); 1723 LHS->setMetadata(LLVMContext::MD_nonnull, MD); 1724 return RemoveConditionFromAssume(II); 1725 1726 // TODO: apply nonnull return attributes to calls and invokes 1727 // TODO: apply range metadata for range check patterns? 1728 } 1729 1730 // Convert nonnull assume like: 1731 // %A = icmp ne i32* %PTR, null 1732 // call void @llvm.assume(i1 %A) 1733 // into 1734 // call void @llvm.assume(i1 true) [ "nonnull"(i32* %PTR) ] 1735 if (EnableKnowledgeRetention && 1736 match(IIOperand, m_Cmp(Pred, m_Value(A), m_Zero())) && 1737 Pred == CmpInst::ICMP_NE && A->getType()->isPointerTy()) { 1738 if (auto *Replacement = buildAssumeFromKnowledge( 1739 {RetainedKnowledge{Attribute::NonNull, 0, A}}, Next, &AC, &DT)) { 1740 1741 Replacement->insertBefore(Next); 1742 AC.registerAssumption(Replacement); 1743 return RemoveConditionFromAssume(II); 1744 } 1745 } 1746 1747 // Convert alignment assume like: 1748 // %B = ptrtoint i32* %A to i64 1749 // %C = and i64 %B, Constant 1750 // %D = icmp eq i64 %C, 0 1751 // call void @llvm.assume(i1 %D) 1752 // into 1753 // call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 Constant + 1)] 1754 uint64_t AlignMask; 1755 if (EnableKnowledgeRetention && 1756 match(IIOperand, 1757 m_Cmp(Pred, m_And(m_Value(A), m_ConstantInt(AlignMask)), 1758 m_Zero())) && 1759 Pred == CmpInst::ICMP_EQ) { 1760 if (isPowerOf2_64(AlignMask + 1)) { 1761 uint64_t Offset = 0; 1762 match(A, m_Add(m_Value(A), m_ConstantInt(Offset))); 1763 if (match(A, m_PtrToInt(m_Value(A)))) { 1764 /// Note: this doesn't preserve the offset information but merges 1765 /// offset and alignment. 1766 /// TODO: we can generate a GEP instead of merging the alignment with 1767 /// the offset. 1768 RetainedKnowledge RK{Attribute::Alignment, 1769 (unsigned)MinAlign(Offset, AlignMask + 1), A}; 1770 if (auto *Replacement = 1771 buildAssumeFromKnowledge(RK, Next, &AC, &DT)) { 1772 1773 Replacement->insertAfter(II); 1774 AC.registerAssumption(Replacement); 1775 } 1776 return RemoveConditionFromAssume(II); 1777 } 1778 } 1779 } 1780 1781 /// Canonicalize Knowledge in operand bundles. 1782 if (EnableKnowledgeRetention && II->hasOperandBundles()) { 1783 for (unsigned Idx = 0; Idx < II->getNumOperandBundles(); Idx++) { 1784 auto &BOI = II->bundle_op_info_begin()[Idx]; 1785 RetainedKnowledge RK = 1786 llvm::getKnowledgeFromBundle(cast<AssumeInst>(*II), BOI); 1787 if (BOI.End - BOI.Begin > 2) 1788 continue; // Prevent reducing knowledge in an align with offset since 1789 // extracting a RetainedKnowledge form them looses offset 1790 // information 1791 RetainedKnowledge CanonRK = 1792 llvm::simplifyRetainedKnowledge(cast<AssumeInst>(II), RK, 1793 &getAssumptionCache(), 1794 &getDominatorTree()); 1795 if (CanonRK == RK) 1796 continue; 1797 if (!CanonRK) { 1798 if (BOI.End - BOI.Begin > 0) { 1799 Worklist.pushValue(II->op_begin()[BOI.Begin]); 1800 Value::dropDroppableUse(II->op_begin()[BOI.Begin]); 1801 } 1802 continue; 1803 } 1804 assert(RK.AttrKind == CanonRK.AttrKind); 1805 if (BOI.End - BOI.Begin > 0) 1806 II->op_begin()[BOI.Begin].set(CanonRK.WasOn); 1807 if (BOI.End - BOI.Begin > 1) 1808 II->op_begin()[BOI.Begin + 1].set(ConstantInt::get( 1809 Type::getInt64Ty(II->getContext()), CanonRK.ArgValue)); 1810 if (RK.WasOn) 1811 Worklist.pushValue(RK.WasOn); 1812 return II; 1813 } 1814 } 1815 1816 // If there is a dominating assume with the same condition as this one, 1817 // then this one is redundant, and should be removed. 1818 KnownBits Known(1); 1819 computeKnownBits(IIOperand, Known, 0, II); 1820 if (Known.isAllOnes() && isAssumeWithEmptyBundle(cast<AssumeInst>(*II))) 1821 return eraseInstFromFunction(*II); 1822 1823 // Update the cache of affected values for this assumption (we might be 1824 // here because we just simplified the condition). 1825 AC.updateAffectedValues(cast<AssumeInst>(II)); 1826 break; 1827 } 1828 case Intrinsic::experimental_guard: { 1829 // Is this guard followed by another guard? We scan forward over a small 1830 // fixed window of instructions to handle common cases with conditions 1831 // computed between guards. 1832 Instruction *NextInst = II->getNextNonDebugInstruction(); 1833 for (unsigned i = 0; i < GuardWideningWindow; i++) { 1834 // Note: Using context-free form to avoid compile time blow up 1835 if (!isSafeToSpeculativelyExecute(NextInst)) 1836 break; 1837 NextInst = NextInst->getNextNonDebugInstruction(); 1838 } 1839 Value *NextCond = nullptr; 1840 if (match(NextInst, 1841 m_Intrinsic<Intrinsic::experimental_guard>(m_Value(NextCond)))) { 1842 Value *CurrCond = II->getArgOperand(0); 1843 1844 // Remove a guard that it is immediately preceded by an identical guard. 1845 // Otherwise canonicalize guard(a); guard(b) -> guard(a & b). 1846 if (CurrCond != NextCond) { 1847 Instruction *MoveI = II->getNextNonDebugInstruction(); 1848 while (MoveI != NextInst) { 1849 auto *Temp = MoveI; 1850 MoveI = MoveI->getNextNonDebugInstruction(); 1851 Temp->moveBefore(II); 1852 } 1853 replaceOperand(*II, 0, Builder.CreateAnd(CurrCond, NextCond)); 1854 } 1855 eraseInstFromFunction(*NextInst); 1856 return II; 1857 } 1858 break; 1859 } 1860 case Intrinsic::experimental_vector_insert: { 1861 Value *Vec = II->getArgOperand(0); 1862 Value *SubVec = II->getArgOperand(1); 1863 Value *Idx = II->getArgOperand(2); 1864 auto *DstTy = dyn_cast<FixedVectorType>(II->getType()); 1865 auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType()); 1866 auto *SubVecTy = dyn_cast<FixedVectorType>(SubVec->getType()); 1867 1868 // Only canonicalize if the destination vector, Vec, and SubVec are all 1869 // fixed vectors. 1870 if (DstTy && VecTy && SubVecTy) { 1871 unsigned DstNumElts = DstTy->getNumElements(); 1872 unsigned VecNumElts = VecTy->getNumElements(); 1873 unsigned SubVecNumElts = SubVecTy->getNumElements(); 1874 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue(); 1875 1876 // The result of this call is undefined if IdxN is not a constant multiple 1877 // of the SubVec's minimum vector length OR the insertion overruns Vec. 1878 if (IdxN % SubVecNumElts != 0 || IdxN + SubVecNumElts > VecNumElts) { 1879 replaceInstUsesWith(CI, UndefValue::get(CI.getType())); 1880 return eraseInstFromFunction(CI); 1881 } 1882 1883 // An insert that entirely overwrites Vec with SubVec is a nop. 1884 if (VecNumElts == SubVecNumElts) { 1885 replaceInstUsesWith(CI, SubVec); 1886 return eraseInstFromFunction(CI); 1887 } 1888 1889 // Widen SubVec into a vector of the same width as Vec, since 1890 // shufflevector requires the two input vectors to be the same width. 1891 // Elements beyond the bounds of SubVec within the widened vector are 1892 // undefined. 1893 SmallVector<int, 8> WidenMask; 1894 unsigned i; 1895 for (i = 0; i != SubVecNumElts; ++i) 1896 WidenMask.push_back(i); 1897 for (; i != VecNumElts; ++i) 1898 WidenMask.push_back(UndefMaskElem); 1899 1900 Value *WidenShuffle = Builder.CreateShuffleVector(SubVec, WidenMask); 1901 1902 SmallVector<int, 8> Mask; 1903 for (unsigned i = 0; i != IdxN; ++i) 1904 Mask.push_back(i); 1905 for (unsigned i = DstNumElts; i != DstNumElts + SubVecNumElts; ++i) 1906 Mask.push_back(i); 1907 for (unsigned i = IdxN + SubVecNumElts; i != DstNumElts; ++i) 1908 Mask.push_back(i); 1909 1910 Value *Shuffle = Builder.CreateShuffleVector(Vec, WidenShuffle, Mask); 1911 replaceInstUsesWith(CI, Shuffle); 1912 return eraseInstFromFunction(CI); 1913 } 1914 break; 1915 } 1916 case Intrinsic::experimental_vector_extract: { 1917 Value *Vec = II->getArgOperand(0); 1918 Value *Idx = II->getArgOperand(1); 1919 1920 auto *DstTy = dyn_cast<FixedVectorType>(II->getType()); 1921 auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType()); 1922 1923 // Only canonicalize if the the destination vector and Vec are fixed 1924 // vectors. 1925 if (DstTy && VecTy) { 1926 unsigned DstNumElts = DstTy->getNumElements(); 1927 unsigned VecNumElts = VecTy->getNumElements(); 1928 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue(); 1929 1930 // The result of this call is undefined if IdxN is not a constant multiple 1931 // of the result type's minimum vector length OR the extraction overruns 1932 // Vec. 1933 if (IdxN % DstNumElts != 0 || IdxN + DstNumElts > VecNumElts) { 1934 replaceInstUsesWith(CI, UndefValue::get(CI.getType())); 1935 return eraseInstFromFunction(CI); 1936 } 1937 1938 // Extracting the entirety of Vec is a nop. 1939 if (VecNumElts == DstNumElts) { 1940 replaceInstUsesWith(CI, Vec); 1941 return eraseInstFromFunction(CI); 1942 } 1943 1944 SmallVector<int, 8> Mask; 1945 for (unsigned i = 0; i != DstNumElts; ++i) 1946 Mask.push_back(IdxN + i); 1947 1948 Value *Shuffle = Builder.CreateShuffleVector(Vec, Mask); 1949 replaceInstUsesWith(CI, Shuffle); 1950 return eraseInstFromFunction(CI); 1951 } 1952 break; 1953 } 1954 case Intrinsic::vector_reduce_or: 1955 case Intrinsic::vector_reduce_and: { 1956 // Canonicalize logical or/and reductions: 1957 // Or reduction for i1 is represented as: 1958 // %val = bitcast <ReduxWidth x i1> to iReduxWidth 1959 // %res = cmp ne iReduxWidth %val, 0 1960 // And reduction for i1 is represented as: 1961 // %val = bitcast <ReduxWidth x i1> to iReduxWidth 1962 // %res = cmp eq iReduxWidth %val, 11111 1963 Value *Arg = II->getArgOperand(0); 1964 Type *RetTy = II->getType(); 1965 if (RetTy == Builder.getInt1Ty()) 1966 if (auto *FVTy = dyn_cast<FixedVectorType>(Arg->getType())) { 1967 Value *Res = Builder.CreateBitCast( 1968 Arg, Builder.getIntNTy(FVTy->getNumElements())); 1969 if (IID == Intrinsic::vector_reduce_and) { 1970 Res = Builder.CreateICmpEQ( 1971 Res, ConstantInt::getAllOnesValue(Res->getType())); 1972 } else { 1973 assert(IID == Intrinsic::vector_reduce_or && 1974 "Expected or reduction."); 1975 Res = Builder.CreateIsNotNull(Res); 1976 } 1977 replaceInstUsesWith(CI, Res); 1978 return eraseInstFromFunction(CI); 1979 } 1980 break; 1981 } 1982 default: { 1983 // Handle target specific intrinsics 1984 Optional<Instruction *> V = targetInstCombineIntrinsic(*II); 1985 if (V.hasValue()) 1986 return V.getValue(); 1987 break; 1988 } 1989 } 1990 // Some intrinsics (like experimental_gc_statepoint) can be used in invoke 1991 // context, so it is handled in visitCallBase and we should trigger it. 1992 return visitCallBase(*II); 1993 } 1994 1995 // Fence instruction simplification 1996 Instruction *InstCombinerImpl::visitFenceInst(FenceInst &FI) { 1997 // Remove identical consecutive fences. 1998 Instruction *Next = FI.getNextNonDebugInstruction(); 1999 if (auto *NFI = dyn_cast<FenceInst>(Next)) 2000 if (FI.isIdenticalTo(NFI)) 2001 return eraseInstFromFunction(FI); 2002 return nullptr; 2003 } 2004 2005 // InvokeInst simplification 2006 Instruction *InstCombinerImpl::visitInvokeInst(InvokeInst &II) { 2007 return visitCallBase(II); 2008 } 2009 2010 // CallBrInst simplification 2011 Instruction *InstCombinerImpl::visitCallBrInst(CallBrInst &CBI) { 2012 return visitCallBase(CBI); 2013 } 2014 2015 /// If this cast does not affect the value passed through the varargs area, we 2016 /// can eliminate the use of the cast. 2017 static bool isSafeToEliminateVarargsCast(const CallBase &Call, 2018 const DataLayout &DL, 2019 const CastInst *const CI, 2020 const int ix) { 2021 if (!CI->isLosslessCast()) 2022 return false; 2023 2024 // If this is a GC intrinsic, avoid munging types. We need types for 2025 // statepoint reconstruction in SelectionDAG. 2026 // TODO: This is probably something which should be expanded to all 2027 // intrinsics since the entire point of intrinsics is that 2028 // they are understandable by the optimizer. 2029 if (isa<GCStatepointInst>(Call) || isa<GCRelocateInst>(Call) || 2030 isa<GCResultInst>(Call)) 2031 return false; 2032 2033 // The size of ByVal or InAlloca arguments is derived from the type, so we 2034 // can't change to a type with a different size. If the size were 2035 // passed explicitly we could avoid this check. 2036 if (!Call.isPassPointeeByValueArgument(ix)) 2037 return true; 2038 2039 Type* SrcTy = 2040 cast<PointerType>(CI->getOperand(0)->getType())->getElementType(); 2041 Type *DstTy = Call.isByValArgument(ix) 2042 ? Call.getParamByValType(ix) 2043 : cast<PointerType>(CI->getType())->getElementType(); 2044 if (!SrcTy->isSized() || !DstTy->isSized()) 2045 return false; 2046 if (DL.getTypeAllocSize(SrcTy) != DL.getTypeAllocSize(DstTy)) 2047 return false; 2048 return true; 2049 } 2050 2051 Instruction *InstCombinerImpl::tryOptimizeCall(CallInst *CI) { 2052 if (!CI->getCalledFunction()) return nullptr; 2053 2054 auto InstCombineRAUW = [this](Instruction *From, Value *With) { 2055 replaceInstUsesWith(*From, With); 2056 }; 2057 auto InstCombineErase = [this](Instruction *I) { 2058 eraseInstFromFunction(*I); 2059 }; 2060 LibCallSimplifier Simplifier(DL, &TLI, ORE, BFI, PSI, InstCombineRAUW, 2061 InstCombineErase); 2062 if (Value *With = Simplifier.optimizeCall(CI, Builder)) { 2063 ++NumSimplified; 2064 return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With); 2065 } 2066 2067 return nullptr; 2068 } 2069 2070 static IntrinsicInst *findInitTrampolineFromAlloca(Value *TrampMem) { 2071 // Strip off at most one level of pointer casts, looking for an alloca. This 2072 // is good enough in practice and simpler than handling any number of casts. 2073 Value *Underlying = TrampMem->stripPointerCasts(); 2074 if (Underlying != TrampMem && 2075 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem)) 2076 return nullptr; 2077 if (!isa<AllocaInst>(Underlying)) 2078 return nullptr; 2079 2080 IntrinsicInst *InitTrampoline = nullptr; 2081 for (User *U : TrampMem->users()) { 2082 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 2083 if (!II) 2084 return nullptr; 2085 if (II->getIntrinsicID() == Intrinsic::init_trampoline) { 2086 if (InitTrampoline) 2087 // More than one init_trampoline writes to this value. Give up. 2088 return nullptr; 2089 InitTrampoline = II; 2090 continue; 2091 } 2092 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline) 2093 // Allow any number of calls to adjust.trampoline. 2094 continue; 2095 return nullptr; 2096 } 2097 2098 // No call to init.trampoline found. 2099 if (!InitTrampoline) 2100 return nullptr; 2101 2102 // Check that the alloca is being used in the expected way. 2103 if (InitTrampoline->getOperand(0) != TrampMem) 2104 return nullptr; 2105 2106 return InitTrampoline; 2107 } 2108 2109 static IntrinsicInst *findInitTrampolineFromBB(IntrinsicInst *AdjustTramp, 2110 Value *TrampMem) { 2111 // Visit all the previous instructions in the basic block, and try to find a 2112 // init.trampoline which has a direct path to the adjust.trampoline. 2113 for (BasicBlock::iterator I = AdjustTramp->getIterator(), 2114 E = AdjustTramp->getParent()->begin(); 2115 I != E;) { 2116 Instruction *Inst = &*--I; 2117 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2118 if (II->getIntrinsicID() == Intrinsic::init_trampoline && 2119 II->getOperand(0) == TrampMem) 2120 return II; 2121 if (Inst->mayWriteToMemory()) 2122 return nullptr; 2123 } 2124 return nullptr; 2125 } 2126 2127 // Given a call to llvm.adjust.trampoline, find and return the corresponding 2128 // call to llvm.init.trampoline if the call to the trampoline can be optimized 2129 // to a direct call to a function. Otherwise return NULL. 2130 static IntrinsicInst *findInitTrampoline(Value *Callee) { 2131 Callee = Callee->stripPointerCasts(); 2132 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee); 2133 if (!AdjustTramp || 2134 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline) 2135 return nullptr; 2136 2137 Value *TrampMem = AdjustTramp->getOperand(0); 2138 2139 if (IntrinsicInst *IT = findInitTrampolineFromAlloca(TrampMem)) 2140 return IT; 2141 if (IntrinsicInst *IT = findInitTrampolineFromBB(AdjustTramp, TrampMem)) 2142 return IT; 2143 return nullptr; 2144 } 2145 2146 void InstCombinerImpl::annotateAnyAllocSite(CallBase &Call, const TargetLibraryInfo *TLI) { 2147 unsigned NumArgs = Call.getNumArgOperands(); 2148 ConstantInt *Op0C = dyn_cast<ConstantInt>(Call.getOperand(0)); 2149 ConstantInt *Op1C = 2150 (NumArgs == 1) ? nullptr : dyn_cast<ConstantInt>(Call.getOperand(1)); 2151 // Bail out if the allocation size is zero (or an invalid alignment of zero 2152 // with aligned_alloc). 2153 if ((Op0C && Op0C->isNullValue()) || (Op1C && Op1C->isNullValue())) 2154 return; 2155 2156 if (isMallocLikeFn(&Call, TLI) && Op0C) { 2157 if (isOpNewLikeFn(&Call, TLI)) 2158 Call.addAttribute(AttributeList::ReturnIndex, 2159 Attribute::getWithDereferenceableBytes( 2160 Call.getContext(), Op0C->getZExtValue())); 2161 else 2162 Call.addAttribute(AttributeList::ReturnIndex, 2163 Attribute::getWithDereferenceableOrNullBytes( 2164 Call.getContext(), Op0C->getZExtValue())); 2165 } else if (isAlignedAllocLikeFn(&Call, TLI)) { 2166 if (Op1C) 2167 Call.addAttribute(AttributeList::ReturnIndex, 2168 Attribute::getWithDereferenceableOrNullBytes( 2169 Call.getContext(), Op1C->getZExtValue())); 2170 // Add alignment attribute if alignment is a power of two constant. 2171 if (Op0C && Op0C->getValue().ult(llvm::Value::MaximumAlignment) && 2172 isKnownNonZero(Call.getOperand(1), DL, 0, &AC, &Call, &DT)) { 2173 uint64_t AlignmentVal = Op0C->getZExtValue(); 2174 if (llvm::isPowerOf2_64(AlignmentVal)) { 2175 Call.removeAttribute(AttributeList::ReturnIndex, Attribute::Alignment); 2176 Call.addAttribute(AttributeList::ReturnIndex, 2177 Attribute::getWithAlignment(Call.getContext(), 2178 Align(AlignmentVal))); 2179 } 2180 } 2181 } else if (isReallocLikeFn(&Call, TLI) && Op1C) { 2182 Call.addAttribute(AttributeList::ReturnIndex, 2183 Attribute::getWithDereferenceableOrNullBytes( 2184 Call.getContext(), Op1C->getZExtValue())); 2185 } else if (isCallocLikeFn(&Call, TLI) && Op0C && Op1C) { 2186 bool Overflow; 2187 const APInt &N = Op0C->getValue(); 2188 APInt Size = N.umul_ov(Op1C->getValue(), Overflow); 2189 if (!Overflow) 2190 Call.addAttribute(AttributeList::ReturnIndex, 2191 Attribute::getWithDereferenceableOrNullBytes( 2192 Call.getContext(), Size.getZExtValue())); 2193 } else if (isStrdupLikeFn(&Call, TLI)) { 2194 uint64_t Len = GetStringLength(Call.getOperand(0)); 2195 if (Len) { 2196 // strdup 2197 if (NumArgs == 1) 2198 Call.addAttribute(AttributeList::ReturnIndex, 2199 Attribute::getWithDereferenceableOrNullBytes( 2200 Call.getContext(), Len)); 2201 // strndup 2202 else if (NumArgs == 2 && Op1C) 2203 Call.addAttribute( 2204 AttributeList::ReturnIndex, 2205 Attribute::getWithDereferenceableOrNullBytes( 2206 Call.getContext(), std::min(Len, Op1C->getZExtValue() + 1))); 2207 } 2208 } 2209 } 2210 2211 /// Improvements for call, callbr and invoke instructions. 2212 Instruction *InstCombinerImpl::visitCallBase(CallBase &Call) { 2213 if (isAllocationFn(&Call, &TLI)) 2214 annotateAnyAllocSite(Call, &TLI); 2215 2216 bool Changed = false; 2217 2218 // Mark any parameters that are known to be non-null with the nonnull 2219 // attribute. This is helpful for inlining calls to functions with null 2220 // checks on their arguments. 2221 SmallVector<unsigned, 4> ArgNos; 2222 unsigned ArgNo = 0; 2223 2224 for (Value *V : Call.args()) { 2225 if (V->getType()->isPointerTy() && 2226 !Call.paramHasAttr(ArgNo, Attribute::NonNull) && 2227 isKnownNonZero(V, DL, 0, &AC, &Call, &DT)) 2228 ArgNos.push_back(ArgNo); 2229 ArgNo++; 2230 } 2231 2232 assert(ArgNo == Call.arg_size() && "sanity check"); 2233 2234 if (!ArgNos.empty()) { 2235 AttributeList AS = Call.getAttributes(); 2236 LLVMContext &Ctx = Call.getContext(); 2237 AS = AS.addParamAttribute(Ctx, ArgNos, 2238 Attribute::get(Ctx, Attribute::NonNull)); 2239 Call.setAttributes(AS); 2240 Changed = true; 2241 } 2242 2243 // If the callee is a pointer to a function, attempt to move any casts to the 2244 // arguments of the call/callbr/invoke. 2245 Value *Callee = Call.getCalledOperand(); 2246 if (!isa<Function>(Callee) && transformConstExprCastCall(Call)) 2247 return nullptr; 2248 2249 if (Function *CalleeF = dyn_cast<Function>(Callee)) { 2250 // Remove the convergent attr on calls when the callee is not convergent. 2251 if (Call.isConvergent() && !CalleeF->isConvergent() && 2252 !CalleeF->isIntrinsic()) { 2253 LLVM_DEBUG(dbgs() << "Removing convergent attr from instr " << Call 2254 << "\n"); 2255 Call.setNotConvergent(); 2256 return &Call; 2257 } 2258 2259 // If the call and callee calling conventions don't match, and neither one 2260 // of the calling conventions is compatible with C calling convention 2261 // this call must be unreachable, as the call is undefined. 2262 if ((CalleeF->getCallingConv() != Call.getCallingConv() && 2263 !(CalleeF->getCallingConv() == llvm::CallingConv::C && 2264 TargetLibraryInfoImpl::isCallingConvCCompatible(&Call)) && 2265 !(Call.getCallingConv() == llvm::CallingConv::C && 2266 TargetLibraryInfoImpl::isCallingConvCCompatible(CalleeF))) && 2267 // Only do this for calls to a function with a body. A prototype may 2268 // not actually end up matching the implementation's calling conv for a 2269 // variety of reasons (e.g. it may be written in assembly). 2270 !CalleeF->isDeclaration()) { 2271 Instruction *OldCall = &Call; 2272 CreateNonTerminatorUnreachable(OldCall); 2273 // If OldCall does not return void then replaceInstUsesWith undef. 2274 // This allows ValueHandlers and custom metadata to adjust itself. 2275 if (!OldCall->getType()->isVoidTy()) 2276 replaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType())); 2277 if (isa<CallInst>(OldCall)) 2278 return eraseInstFromFunction(*OldCall); 2279 2280 // We cannot remove an invoke or a callbr, because it would change thexi 2281 // CFG, just change the callee to a null pointer. 2282 cast<CallBase>(OldCall)->setCalledFunction( 2283 CalleeF->getFunctionType(), 2284 Constant::getNullValue(CalleeF->getType())); 2285 return nullptr; 2286 } 2287 } 2288 2289 if ((isa<ConstantPointerNull>(Callee) && 2290 !NullPointerIsDefined(Call.getFunction())) || 2291 isa<UndefValue>(Callee)) { 2292 // If Call does not return void then replaceInstUsesWith undef. 2293 // This allows ValueHandlers and custom metadata to adjust itself. 2294 if (!Call.getType()->isVoidTy()) 2295 replaceInstUsesWith(Call, UndefValue::get(Call.getType())); 2296 2297 if (Call.isTerminator()) { 2298 // Can't remove an invoke or callbr because we cannot change the CFG. 2299 return nullptr; 2300 } 2301 2302 // This instruction is not reachable, just remove it. 2303 CreateNonTerminatorUnreachable(&Call); 2304 return eraseInstFromFunction(Call); 2305 } 2306 2307 if (IntrinsicInst *II = findInitTrampoline(Callee)) 2308 return transformCallThroughTrampoline(Call, *II); 2309 2310 PointerType *PTy = cast<PointerType>(Callee->getType()); 2311 FunctionType *FTy = cast<FunctionType>(PTy->getElementType()); 2312 if (FTy->isVarArg()) { 2313 int ix = FTy->getNumParams(); 2314 // See if we can optimize any arguments passed through the varargs area of 2315 // the call. 2316 for (auto I = Call.arg_begin() + FTy->getNumParams(), E = Call.arg_end(); 2317 I != E; ++I, ++ix) { 2318 CastInst *CI = dyn_cast<CastInst>(*I); 2319 if (CI && isSafeToEliminateVarargsCast(Call, DL, CI, ix)) { 2320 replaceUse(*I, CI->getOperand(0)); 2321 2322 // Update the byval type to match the argument type. 2323 if (Call.isByValArgument(ix)) { 2324 Call.removeParamAttr(ix, Attribute::ByVal); 2325 Call.addParamAttr( 2326 ix, Attribute::getWithByValType( 2327 Call.getContext(), 2328 CI->getOperand(0)->getType()->getPointerElementType())); 2329 } 2330 Changed = true; 2331 } 2332 } 2333 } 2334 2335 if (isa<InlineAsm>(Callee) && !Call.doesNotThrow()) { 2336 InlineAsm *IA = cast<InlineAsm>(Callee); 2337 if (!IA->canThrow()) { 2338 // Normal inline asm calls cannot throw - mark them 2339 // 'nounwind'. 2340 Call.setDoesNotThrow(); 2341 Changed = true; 2342 } 2343 } 2344 2345 // Try to optimize the call if possible, we require DataLayout for most of 2346 // this. None of these calls are seen as possibly dead so go ahead and 2347 // delete the instruction now. 2348 if (CallInst *CI = dyn_cast<CallInst>(&Call)) { 2349 Instruction *I = tryOptimizeCall(CI); 2350 // If we changed something return the result, etc. Otherwise let 2351 // the fallthrough check. 2352 if (I) return eraseInstFromFunction(*I); 2353 } 2354 2355 if (!Call.use_empty() && !Call.isMustTailCall()) 2356 if (Value *ReturnedArg = Call.getReturnedArgOperand()) { 2357 Type *CallTy = Call.getType(); 2358 Type *RetArgTy = ReturnedArg->getType(); 2359 if (RetArgTy->canLosslesslyBitCastTo(CallTy)) 2360 return replaceInstUsesWith( 2361 Call, Builder.CreateBitOrPointerCast(ReturnedArg, CallTy)); 2362 } 2363 2364 if (isAllocLikeFn(&Call, &TLI)) 2365 return visitAllocSite(Call); 2366 2367 // Handle intrinsics which can be used in both call and invoke context. 2368 switch (Call.getIntrinsicID()) { 2369 case Intrinsic::experimental_gc_statepoint: { 2370 GCStatepointInst &GCSP = *cast<GCStatepointInst>(&Call); 2371 SmallPtrSet<Value *, 32> LiveGcValues; 2372 for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) { 2373 GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc); 2374 2375 // Remove the relocation if unused. 2376 if (GCR.use_empty()) { 2377 eraseInstFromFunction(GCR); 2378 continue; 2379 } 2380 2381 Value *DerivedPtr = GCR.getDerivedPtr(); 2382 Value *BasePtr = GCR.getBasePtr(); 2383 2384 // Undef is undef, even after relocation. 2385 if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) { 2386 replaceInstUsesWith(GCR, UndefValue::get(GCR.getType())); 2387 eraseInstFromFunction(GCR); 2388 continue; 2389 } 2390 2391 if (auto *PT = dyn_cast<PointerType>(GCR.getType())) { 2392 // The relocation of null will be null for most any collector. 2393 // TODO: provide a hook for this in GCStrategy. There might be some 2394 // weird collector this property does not hold for. 2395 if (isa<ConstantPointerNull>(DerivedPtr)) { 2396 // Use null-pointer of gc_relocate's type to replace it. 2397 replaceInstUsesWith(GCR, ConstantPointerNull::get(PT)); 2398 eraseInstFromFunction(GCR); 2399 continue; 2400 } 2401 2402 // isKnownNonNull -> nonnull attribute 2403 if (!GCR.hasRetAttr(Attribute::NonNull) && 2404 isKnownNonZero(DerivedPtr, DL, 0, &AC, &Call, &DT)) { 2405 GCR.addAttribute(AttributeList::ReturnIndex, Attribute::NonNull); 2406 // We discovered new fact, re-check users. 2407 Worklist.pushUsersToWorkList(GCR); 2408 } 2409 } 2410 2411 // If we have two copies of the same pointer in the statepoint argument 2412 // list, canonicalize to one. This may let us common gc.relocates. 2413 if (GCR.getBasePtr() == GCR.getDerivedPtr() && 2414 GCR.getBasePtrIndex() != GCR.getDerivedPtrIndex()) { 2415 auto *OpIntTy = GCR.getOperand(2)->getType(); 2416 GCR.setOperand(2, ConstantInt::get(OpIntTy, GCR.getBasePtrIndex())); 2417 } 2418 2419 // TODO: bitcast(relocate(p)) -> relocate(bitcast(p)) 2420 // Canonicalize on the type from the uses to the defs 2421 2422 // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...) 2423 LiveGcValues.insert(BasePtr); 2424 LiveGcValues.insert(DerivedPtr); 2425 } 2426 Optional<OperandBundleUse> Bundle = 2427 GCSP.getOperandBundle(LLVMContext::OB_gc_live); 2428 unsigned NumOfGCLives = LiveGcValues.size(); 2429 if (!Bundle.hasValue() || NumOfGCLives == Bundle->Inputs.size()) 2430 break; 2431 // We can reduce the size of gc live bundle. 2432 DenseMap<Value *, unsigned> Val2Idx; 2433 std::vector<Value *> NewLiveGc; 2434 for (unsigned I = 0, E = Bundle->Inputs.size(); I < E; ++I) { 2435 Value *V = Bundle->Inputs[I]; 2436 if (Val2Idx.count(V)) 2437 continue; 2438 if (LiveGcValues.count(V)) { 2439 Val2Idx[V] = NewLiveGc.size(); 2440 NewLiveGc.push_back(V); 2441 } else 2442 Val2Idx[V] = NumOfGCLives; 2443 } 2444 // Update all gc.relocates 2445 for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) { 2446 GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc); 2447 Value *BasePtr = GCR.getBasePtr(); 2448 assert(Val2Idx.count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives && 2449 "Missed live gc for base pointer"); 2450 auto *OpIntTy1 = GCR.getOperand(1)->getType(); 2451 GCR.setOperand(1, ConstantInt::get(OpIntTy1, Val2Idx[BasePtr])); 2452 Value *DerivedPtr = GCR.getDerivedPtr(); 2453 assert(Val2Idx.count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives && 2454 "Missed live gc for derived pointer"); 2455 auto *OpIntTy2 = GCR.getOperand(2)->getType(); 2456 GCR.setOperand(2, ConstantInt::get(OpIntTy2, Val2Idx[DerivedPtr])); 2457 } 2458 // Create new statepoint instruction. 2459 OperandBundleDef NewBundle("gc-live", NewLiveGc); 2460 return CallBase::Create(&Call, NewBundle); 2461 } 2462 default: { break; } 2463 } 2464 2465 return Changed ? &Call : nullptr; 2466 } 2467 2468 /// If the callee is a constexpr cast of a function, attempt to move the cast to 2469 /// the arguments of the call/callbr/invoke. 2470 bool InstCombinerImpl::transformConstExprCastCall(CallBase &Call) { 2471 auto *Callee = 2472 dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts()); 2473 if (!Callee) 2474 return false; 2475 2476 // If this is a call to a thunk function, don't remove the cast. Thunks are 2477 // used to transparently forward all incoming parameters and outgoing return 2478 // values, so it's important to leave the cast in place. 2479 if (Callee->hasFnAttribute("thunk")) 2480 return false; 2481 2482 // If this is a musttail call, the callee's prototype must match the caller's 2483 // prototype with the exception of pointee types. The code below doesn't 2484 // implement that, so we can't do this transform. 2485 // TODO: Do the transform if it only requires adding pointer casts. 2486 if (Call.isMustTailCall()) 2487 return false; 2488 2489 Instruction *Caller = &Call; 2490 const AttributeList &CallerPAL = Call.getAttributes(); 2491 2492 // Okay, this is a cast from a function to a different type. Unless doing so 2493 // would cause a type conversion of one of our arguments, change this call to 2494 // be a direct call with arguments casted to the appropriate types. 2495 FunctionType *FT = Callee->getFunctionType(); 2496 Type *OldRetTy = Caller->getType(); 2497 Type *NewRetTy = FT->getReturnType(); 2498 2499 // Check to see if we are changing the return type... 2500 if (OldRetTy != NewRetTy) { 2501 2502 if (NewRetTy->isStructTy()) 2503 return false; // TODO: Handle multiple return values. 2504 2505 if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) { 2506 if (Callee->isDeclaration()) 2507 return false; // Cannot transform this return value. 2508 2509 if (!Caller->use_empty() && 2510 // void -> non-void is handled specially 2511 !NewRetTy->isVoidTy()) 2512 return false; // Cannot transform this return value. 2513 } 2514 2515 if (!CallerPAL.isEmpty() && !Caller->use_empty()) { 2516 AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex); 2517 if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy))) 2518 return false; // Attribute not compatible with transformed value. 2519 } 2520 2521 // If the callbase is an invoke/callbr instruction, and the return value is 2522 // used by a PHI node in a successor, we cannot change the return type of 2523 // the call because there is no place to put the cast instruction (without 2524 // breaking the critical edge). Bail out in this case. 2525 if (!Caller->use_empty()) { 2526 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) 2527 for (User *U : II->users()) 2528 if (PHINode *PN = dyn_cast<PHINode>(U)) 2529 if (PN->getParent() == II->getNormalDest() || 2530 PN->getParent() == II->getUnwindDest()) 2531 return false; 2532 // FIXME: Be conservative for callbr to avoid a quadratic search. 2533 if (isa<CallBrInst>(Caller)) 2534 return false; 2535 } 2536 } 2537 2538 unsigned NumActualArgs = Call.arg_size(); 2539 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs); 2540 2541 // Prevent us turning: 2542 // declare void @takes_i32_inalloca(i32* inalloca) 2543 // call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0) 2544 // 2545 // into: 2546 // call void @takes_i32_inalloca(i32* null) 2547 // 2548 // Similarly, avoid folding away bitcasts of byval calls. 2549 if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) || 2550 Callee->getAttributes().hasAttrSomewhere(Attribute::Preallocated) || 2551 Callee->getAttributes().hasAttrSomewhere(Attribute::ByVal)) 2552 return false; 2553 2554 auto AI = Call.arg_begin(); 2555 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) { 2556 Type *ParamTy = FT->getParamType(i); 2557 Type *ActTy = (*AI)->getType(); 2558 2559 if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL)) 2560 return false; // Cannot transform this parameter value. 2561 2562 if (AttrBuilder(CallerPAL.getParamAttributes(i)) 2563 .overlaps(AttributeFuncs::typeIncompatible(ParamTy))) 2564 return false; // Attribute not compatible with transformed value. 2565 2566 if (Call.isInAllocaArgument(i)) 2567 return false; // Cannot transform to and from inalloca. 2568 2569 if (CallerPAL.hasParamAttribute(i, Attribute::SwiftError)) 2570 return false; 2571 2572 // If the parameter is passed as a byval argument, then we have to have a 2573 // sized type and the sized type has to have the same size as the old type. 2574 if (ParamTy != ActTy && CallerPAL.hasParamAttribute(i, Attribute::ByVal)) { 2575 PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy); 2576 if (!ParamPTy || !ParamPTy->getElementType()->isSized()) 2577 return false; 2578 2579 Type *CurElTy = Call.getParamByValType(i); 2580 if (DL.getTypeAllocSize(CurElTy) != 2581 DL.getTypeAllocSize(ParamPTy->getElementType())) 2582 return false; 2583 } 2584 } 2585 2586 if (Callee->isDeclaration()) { 2587 // Do not delete arguments unless we have a function body. 2588 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg()) 2589 return false; 2590 2591 // If the callee is just a declaration, don't change the varargsness of the 2592 // call. We don't want to introduce a varargs call where one doesn't 2593 // already exist. 2594 PointerType *APTy = cast<PointerType>(Call.getCalledOperand()->getType()); 2595 if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg()) 2596 return false; 2597 2598 // If both the callee and the cast type are varargs, we still have to make 2599 // sure the number of fixed parameters are the same or we have the same 2600 // ABI issues as if we introduce a varargs call. 2601 if (FT->isVarArg() && 2602 cast<FunctionType>(APTy->getElementType())->isVarArg() && 2603 FT->getNumParams() != 2604 cast<FunctionType>(APTy->getElementType())->getNumParams()) 2605 return false; 2606 } 2607 2608 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() && 2609 !CallerPAL.isEmpty()) { 2610 // In this case we have more arguments than the new function type, but we 2611 // won't be dropping them. Check that these extra arguments have attributes 2612 // that are compatible with being a vararg call argument. 2613 unsigned SRetIdx; 2614 if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) && 2615 SRetIdx > FT->getNumParams()) 2616 return false; 2617 } 2618 2619 // Okay, we decided that this is a safe thing to do: go ahead and start 2620 // inserting cast instructions as necessary. 2621 SmallVector<Value *, 8> Args; 2622 SmallVector<AttributeSet, 8> ArgAttrs; 2623 Args.reserve(NumActualArgs); 2624 ArgAttrs.reserve(NumActualArgs); 2625 2626 // Get any return attributes. 2627 AttrBuilder RAttrs(CallerPAL, AttributeList::ReturnIndex); 2628 2629 // If the return value is not being used, the type may not be compatible 2630 // with the existing attributes. Wipe out any problematic attributes. 2631 RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy)); 2632 2633 LLVMContext &Ctx = Call.getContext(); 2634 AI = Call.arg_begin(); 2635 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) { 2636 Type *ParamTy = FT->getParamType(i); 2637 2638 Value *NewArg = *AI; 2639 if ((*AI)->getType() != ParamTy) 2640 NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy); 2641 Args.push_back(NewArg); 2642 2643 // Add any parameter attributes. 2644 if (CallerPAL.hasParamAttribute(i, Attribute::ByVal)) { 2645 AttrBuilder AB(CallerPAL.getParamAttributes(i)); 2646 AB.addByValAttr(NewArg->getType()->getPointerElementType()); 2647 ArgAttrs.push_back(AttributeSet::get(Ctx, AB)); 2648 } else 2649 ArgAttrs.push_back(CallerPAL.getParamAttributes(i)); 2650 } 2651 2652 // If the function takes more arguments than the call was taking, add them 2653 // now. 2654 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) { 2655 Args.push_back(Constant::getNullValue(FT->getParamType(i))); 2656 ArgAttrs.push_back(AttributeSet()); 2657 } 2658 2659 // If we are removing arguments to the function, emit an obnoxious warning. 2660 if (FT->getNumParams() < NumActualArgs) { 2661 // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722 2662 if (FT->isVarArg()) { 2663 // Add all of the arguments in their promoted form to the arg list. 2664 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) { 2665 Type *PTy = getPromotedType((*AI)->getType()); 2666 Value *NewArg = *AI; 2667 if (PTy != (*AI)->getType()) { 2668 // Must promote to pass through va_arg area! 2669 Instruction::CastOps opcode = 2670 CastInst::getCastOpcode(*AI, false, PTy, false); 2671 NewArg = Builder.CreateCast(opcode, *AI, PTy); 2672 } 2673 Args.push_back(NewArg); 2674 2675 // Add any parameter attributes. 2676 ArgAttrs.push_back(CallerPAL.getParamAttributes(i)); 2677 } 2678 } 2679 } 2680 2681 AttributeSet FnAttrs = CallerPAL.getFnAttributes(); 2682 2683 if (NewRetTy->isVoidTy()) 2684 Caller->setName(""); // Void type should not have a name. 2685 2686 assert((ArgAttrs.size() == FT->getNumParams() || FT->isVarArg()) && 2687 "missing argument attributes"); 2688 AttributeList NewCallerPAL = AttributeList::get( 2689 Ctx, FnAttrs, AttributeSet::get(Ctx, RAttrs), ArgAttrs); 2690 2691 SmallVector<OperandBundleDef, 1> OpBundles; 2692 Call.getOperandBundlesAsDefs(OpBundles); 2693 2694 CallBase *NewCall; 2695 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 2696 NewCall = Builder.CreateInvoke(Callee, II->getNormalDest(), 2697 II->getUnwindDest(), Args, OpBundles); 2698 } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) { 2699 NewCall = Builder.CreateCallBr(Callee, CBI->getDefaultDest(), 2700 CBI->getIndirectDests(), Args, OpBundles); 2701 } else { 2702 NewCall = Builder.CreateCall(Callee, Args, OpBundles); 2703 cast<CallInst>(NewCall)->setTailCallKind( 2704 cast<CallInst>(Caller)->getTailCallKind()); 2705 } 2706 NewCall->takeName(Caller); 2707 NewCall->setCallingConv(Call.getCallingConv()); 2708 NewCall->setAttributes(NewCallerPAL); 2709 2710 // Preserve prof metadata if any. 2711 NewCall->copyMetadata(*Caller, {LLVMContext::MD_prof}); 2712 2713 // Insert a cast of the return type as necessary. 2714 Instruction *NC = NewCall; 2715 Value *NV = NC; 2716 if (OldRetTy != NV->getType() && !Caller->use_empty()) { 2717 if (!NV->getType()->isVoidTy()) { 2718 NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy); 2719 NC->setDebugLoc(Caller->getDebugLoc()); 2720 2721 // If this is an invoke/callbr instruction, we should insert it after the 2722 // first non-phi instruction in the normal successor block. 2723 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { 2724 BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt(); 2725 InsertNewInstBefore(NC, *I); 2726 } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) { 2727 BasicBlock::iterator I = CBI->getDefaultDest()->getFirstInsertionPt(); 2728 InsertNewInstBefore(NC, *I); 2729 } else { 2730 // Otherwise, it's a call, just insert cast right after the call. 2731 InsertNewInstBefore(NC, *Caller); 2732 } 2733 Worklist.pushUsersToWorkList(*Caller); 2734 } else { 2735 NV = UndefValue::get(Caller->getType()); 2736 } 2737 } 2738 2739 if (!Caller->use_empty()) 2740 replaceInstUsesWith(*Caller, NV); 2741 else if (Caller->hasValueHandle()) { 2742 if (OldRetTy == NV->getType()) 2743 ValueHandleBase::ValueIsRAUWd(Caller, NV); 2744 else 2745 // We cannot call ValueIsRAUWd with a different type, and the 2746 // actual tracked value will disappear. 2747 ValueHandleBase::ValueIsDeleted(Caller); 2748 } 2749 2750 eraseInstFromFunction(*Caller); 2751 return true; 2752 } 2753 2754 /// Turn a call to a function created by init_trampoline / adjust_trampoline 2755 /// intrinsic pair into a direct call to the underlying function. 2756 Instruction * 2757 InstCombinerImpl::transformCallThroughTrampoline(CallBase &Call, 2758 IntrinsicInst &Tramp) { 2759 Value *Callee = Call.getCalledOperand(); 2760 Type *CalleeTy = Callee->getType(); 2761 FunctionType *FTy = Call.getFunctionType(); 2762 AttributeList Attrs = Call.getAttributes(); 2763 2764 // If the call already has the 'nest' attribute somewhere then give up - 2765 // otherwise 'nest' would occur twice after splicing in the chain. 2766 if (Attrs.hasAttrSomewhere(Attribute::Nest)) 2767 return nullptr; 2768 2769 Function *NestF = cast<Function>(Tramp.getArgOperand(1)->stripPointerCasts()); 2770 FunctionType *NestFTy = NestF->getFunctionType(); 2771 2772 AttributeList NestAttrs = NestF->getAttributes(); 2773 if (!NestAttrs.isEmpty()) { 2774 unsigned NestArgNo = 0; 2775 Type *NestTy = nullptr; 2776 AttributeSet NestAttr; 2777 2778 // Look for a parameter marked with the 'nest' attribute. 2779 for (FunctionType::param_iterator I = NestFTy->param_begin(), 2780 E = NestFTy->param_end(); 2781 I != E; ++NestArgNo, ++I) { 2782 AttributeSet AS = NestAttrs.getParamAttributes(NestArgNo); 2783 if (AS.hasAttribute(Attribute::Nest)) { 2784 // Record the parameter type and any other attributes. 2785 NestTy = *I; 2786 NestAttr = AS; 2787 break; 2788 } 2789 } 2790 2791 if (NestTy) { 2792 std::vector<Value*> NewArgs; 2793 std::vector<AttributeSet> NewArgAttrs; 2794 NewArgs.reserve(Call.arg_size() + 1); 2795 NewArgAttrs.reserve(Call.arg_size()); 2796 2797 // Insert the nest argument into the call argument list, which may 2798 // mean appending it. Likewise for attributes. 2799 2800 { 2801 unsigned ArgNo = 0; 2802 auto I = Call.arg_begin(), E = Call.arg_end(); 2803 do { 2804 if (ArgNo == NestArgNo) { 2805 // Add the chain argument and attributes. 2806 Value *NestVal = Tramp.getArgOperand(2); 2807 if (NestVal->getType() != NestTy) 2808 NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest"); 2809 NewArgs.push_back(NestVal); 2810 NewArgAttrs.push_back(NestAttr); 2811 } 2812 2813 if (I == E) 2814 break; 2815 2816 // Add the original argument and attributes. 2817 NewArgs.push_back(*I); 2818 NewArgAttrs.push_back(Attrs.getParamAttributes(ArgNo)); 2819 2820 ++ArgNo; 2821 ++I; 2822 } while (true); 2823 } 2824 2825 // The trampoline may have been bitcast to a bogus type (FTy). 2826 // Handle this by synthesizing a new function type, equal to FTy 2827 // with the chain parameter inserted. 2828 2829 std::vector<Type*> NewTypes; 2830 NewTypes.reserve(FTy->getNumParams()+1); 2831 2832 // Insert the chain's type into the list of parameter types, which may 2833 // mean appending it. 2834 { 2835 unsigned ArgNo = 0; 2836 FunctionType::param_iterator I = FTy->param_begin(), 2837 E = FTy->param_end(); 2838 2839 do { 2840 if (ArgNo == NestArgNo) 2841 // Add the chain's type. 2842 NewTypes.push_back(NestTy); 2843 2844 if (I == E) 2845 break; 2846 2847 // Add the original type. 2848 NewTypes.push_back(*I); 2849 2850 ++ArgNo; 2851 ++I; 2852 } while (true); 2853 } 2854 2855 // Replace the trampoline call with a direct call. Let the generic 2856 // code sort out any function type mismatches. 2857 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes, 2858 FTy->isVarArg()); 2859 Constant *NewCallee = 2860 NestF->getType() == PointerType::getUnqual(NewFTy) ? 2861 NestF : ConstantExpr::getBitCast(NestF, 2862 PointerType::getUnqual(NewFTy)); 2863 AttributeList NewPAL = 2864 AttributeList::get(FTy->getContext(), Attrs.getFnAttributes(), 2865 Attrs.getRetAttributes(), NewArgAttrs); 2866 2867 SmallVector<OperandBundleDef, 1> OpBundles; 2868 Call.getOperandBundlesAsDefs(OpBundles); 2869 2870 Instruction *NewCaller; 2871 if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) { 2872 NewCaller = InvokeInst::Create(NewFTy, NewCallee, 2873 II->getNormalDest(), II->getUnwindDest(), 2874 NewArgs, OpBundles); 2875 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv()); 2876 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL); 2877 } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(&Call)) { 2878 NewCaller = 2879 CallBrInst::Create(NewFTy, NewCallee, CBI->getDefaultDest(), 2880 CBI->getIndirectDests(), NewArgs, OpBundles); 2881 cast<CallBrInst>(NewCaller)->setCallingConv(CBI->getCallingConv()); 2882 cast<CallBrInst>(NewCaller)->setAttributes(NewPAL); 2883 } else { 2884 NewCaller = CallInst::Create(NewFTy, NewCallee, NewArgs, OpBundles); 2885 cast<CallInst>(NewCaller)->setTailCallKind( 2886 cast<CallInst>(Call).getTailCallKind()); 2887 cast<CallInst>(NewCaller)->setCallingConv( 2888 cast<CallInst>(Call).getCallingConv()); 2889 cast<CallInst>(NewCaller)->setAttributes(NewPAL); 2890 } 2891 NewCaller->setDebugLoc(Call.getDebugLoc()); 2892 2893 return NewCaller; 2894 } 2895 } 2896 2897 // Replace the trampoline call with a direct call. Since there is no 'nest' 2898 // parameter, there is no need to adjust the argument list. Let the generic 2899 // code sort out any function type mismatches. 2900 Constant *NewCallee = ConstantExpr::getBitCast(NestF, CalleeTy); 2901 Call.setCalledFunction(FTy, NewCallee); 2902 return &Call; 2903 } 2904