1 //===- InstructionCombining.cpp - Combine multiple instructions -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // InstructionCombining - Combine instructions to form fewer, simple 10 // instructions. This pass does not modify the CFG. This pass is where 11 // algebraic simplification happens. 12 // 13 // This pass combines things like: 14 // %Y = add i32 %X, 1 15 // %Z = add i32 %Y, 1 16 // into: 17 // %Z = add i32 %X, 2 18 // 19 // This is a simple worklist driven algorithm. 20 // 21 // This pass guarantees that the following canonicalizations are performed on 22 // the program: 23 // 1. If a binary operator has a constant operand, it is moved to the RHS 24 // 2. Bitwise operators with constant operands are always grouped so that 25 // shifts are performed first, then or's, then and's, then xor's. 26 // 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible 27 // 4. All cmp instructions on boolean values are replaced with logical ops 28 // 5. add X, X is represented as (X*2) => (X << 1) 29 // 6. Multiplies with a power-of-two constant argument are transformed into 30 // shifts. 31 // ... etc. 32 // 33 //===----------------------------------------------------------------------===// 34 35 #include "InstCombineInternal.h" 36 #include "llvm-c/Initialization.h" 37 #include "llvm-c/Transforms/InstCombine.h" 38 #include "llvm/ADT/APInt.h" 39 #include "llvm/ADT/ArrayRef.h" 40 #include "llvm/ADT/DenseMap.h" 41 #include "llvm/ADT/None.h" 42 #include "llvm/ADT/SmallPtrSet.h" 43 #include "llvm/ADT/SmallVector.h" 44 #include "llvm/ADT/Statistic.h" 45 #include "llvm/Analysis/AliasAnalysis.h" 46 #include "llvm/Analysis/AssumptionCache.h" 47 #include "llvm/Analysis/BasicAliasAnalysis.h" 48 #include "llvm/Analysis/BlockFrequencyInfo.h" 49 #include "llvm/Analysis/CFG.h" 50 #include "llvm/Analysis/ConstantFolding.h" 51 #include "llvm/Analysis/EHPersonalities.h" 52 #include "llvm/Analysis/GlobalsModRef.h" 53 #include "llvm/Analysis/InstructionSimplify.h" 54 #include "llvm/Analysis/LazyBlockFrequencyInfo.h" 55 #include "llvm/Analysis/LoopInfo.h" 56 #include "llvm/Analysis/MemoryBuiltins.h" 57 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 58 #include "llvm/Analysis/ProfileSummaryInfo.h" 59 #include "llvm/Analysis/TargetFolder.h" 60 #include "llvm/Analysis/TargetLibraryInfo.h" 61 #include "llvm/Analysis/TargetTransformInfo.h" 62 #include "llvm/Analysis/Utils/Local.h" 63 #include "llvm/Analysis/ValueTracking.h" 64 #include "llvm/Analysis/VectorUtils.h" 65 #include "llvm/IR/BasicBlock.h" 66 #include "llvm/IR/CFG.h" 67 #include "llvm/IR/Constant.h" 68 #include "llvm/IR/Constants.h" 69 #include "llvm/IR/DIBuilder.h" 70 #include "llvm/IR/DataLayout.h" 71 #include "llvm/IR/DebugInfo.h" 72 #include "llvm/IR/DerivedTypes.h" 73 #include "llvm/IR/Dominators.h" 74 #include "llvm/IR/Function.h" 75 #include "llvm/IR/GetElementPtrTypeIterator.h" 76 #include "llvm/IR/IRBuilder.h" 77 #include "llvm/IR/InstrTypes.h" 78 #include "llvm/IR/Instruction.h" 79 #include "llvm/IR/Instructions.h" 80 #include "llvm/IR/IntrinsicInst.h" 81 #include "llvm/IR/Intrinsics.h" 82 #include "llvm/IR/LegacyPassManager.h" 83 #include "llvm/IR/Metadata.h" 84 #include "llvm/IR/Operator.h" 85 #include "llvm/IR/PassManager.h" 86 #include "llvm/IR/PatternMatch.h" 87 #include "llvm/IR/Type.h" 88 #include "llvm/IR/Use.h" 89 #include "llvm/IR/User.h" 90 #include "llvm/IR/Value.h" 91 #include "llvm/IR/ValueHandle.h" 92 #include "llvm/InitializePasses.h" 93 #include "llvm/Support/Casting.h" 94 #include "llvm/Support/CommandLine.h" 95 #include "llvm/Support/Compiler.h" 96 #include "llvm/Support/Debug.h" 97 #include "llvm/Support/DebugCounter.h" 98 #include "llvm/Support/ErrorHandling.h" 99 #include "llvm/Support/KnownBits.h" 100 #include "llvm/Support/raw_ostream.h" 101 #include "llvm/Transforms/InstCombine/InstCombine.h" 102 #include "llvm/Transforms/Utils/Local.h" 103 #include <algorithm> 104 #include <cassert> 105 #include <cstdint> 106 #include <memory> 107 #include <string> 108 #include <utility> 109 110 #define DEBUG_TYPE "instcombine" 111 #include "llvm/Transforms/Utils/InstructionWorklist.h" 112 113 using namespace llvm; 114 using namespace llvm::PatternMatch; 115 116 STATISTIC(NumWorklistIterations, 117 "Number of instruction combining iterations performed"); 118 119 STATISTIC(NumCombined , "Number of insts combined"); 120 STATISTIC(NumConstProp, "Number of constant folds"); 121 STATISTIC(NumDeadInst , "Number of dead inst eliminated"); 122 STATISTIC(NumSunkInst , "Number of instructions sunk"); 123 STATISTIC(NumExpand, "Number of expansions"); 124 STATISTIC(NumFactor , "Number of factorizations"); 125 STATISTIC(NumReassoc , "Number of reassociations"); 126 DEBUG_COUNTER(VisitCounter, "instcombine-visit", 127 "Controls which instructions are visited"); 128 129 // FIXME: these limits eventually should be as low as 2. 130 static constexpr unsigned InstCombineDefaultMaxIterations = 1000; 131 #ifndef NDEBUG 132 static constexpr unsigned InstCombineDefaultInfiniteLoopThreshold = 100; 133 #else 134 static constexpr unsigned InstCombineDefaultInfiniteLoopThreshold = 1000; 135 #endif 136 137 static cl::opt<bool> 138 EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), 139 cl::init(true)); 140 141 static cl::opt<unsigned> LimitMaxIterations( 142 "instcombine-max-iterations", 143 cl::desc("Limit the maximum number of instruction combining iterations"), 144 cl::init(InstCombineDefaultMaxIterations)); 145 146 static cl::opt<unsigned> InfiniteLoopDetectionThreshold( 147 "instcombine-infinite-loop-threshold", 148 cl::desc("Number of instruction combining iterations considered an " 149 "infinite loop"), 150 cl::init(InstCombineDefaultInfiniteLoopThreshold), cl::Hidden); 151 152 static cl::opt<unsigned> 153 MaxArraySize("instcombine-maxarray-size", cl::init(1024), 154 cl::desc("Maximum array size considered when doing a combine")); 155 156 // FIXME: Remove this flag when it is no longer necessary to convert 157 // llvm.dbg.declare to avoid inaccurate debug info. Setting this to false 158 // increases variable availability at the cost of accuracy. Variables that 159 // cannot be promoted by mem2reg or SROA will be described as living in memory 160 // for their entire lifetime. However, passes like DSE and instcombine can 161 // delete stores to the alloca, leading to misleading and inaccurate debug 162 // information. This flag can be removed when those passes are fixed. 163 static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", 164 cl::Hidden, cl::init(true)); 165 166 Optional<Instruction *> 167 InstCombiner::targetInstCombineIntrinsic(IntrinsicInst &II) { 168 // Handle target specific intrinsics 169 if (II.getCalledFunction()->isTargetIntrinsic()) { 170 return TTI.instCombineIntrinsic(*this, II); 171 } 172 return None; 173 } 174 175 Optional<Value *> InstCombiner::targetSimplifyDemandedUseBitsIntrinsic( 176 IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, 177 bool &KnownBitsComputed) { 178 // Handle target specific intrinsics 179 if (II.getCalledFunction()->isTargetIntrinsic()) { 180 return TTI.simplifyDemandedUseBitsIntrinsic(*this, II, DemandedMask, Known, 181 KnownBitsComputed); 182 } 183 return None; 184 } 185 186 Optional<Value *> InstCombiner::targetSimplifyDemandedVectorEltsIntrinsic( 187 IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, 188 APInt &UndefElts3, 189 std::function<void(Instruction *, unsigned, APInt, APInt &)> 190 SimplifyAndSetOp) { 191 // Handle target specific intrinsics 192 if (II.getCalledFunction()->isTargetIntrinsic()) { 193 return TTI.simplifyDemandedVectorEltsIntrinsic( 194 *this, II, DemandedElts, UndefElts, UndefElts2, UndefElts3, 195 SimplifyAndSetOp); 196 } 197 return None; 198 } 199 200 Value *InstCombinerImpl::EmitGEPOffset(User *GEP) { 201 return llvm::EmitGEPOffset(&Builder, DL, GEP); 202 } 203 204 /// Legal integers and common types are considered desirable. This is used to 205 /// avoid creating instructions with types that may not be supported well by the 206 /// the backend. 207 /// NOTE: This treats i8, i16 and i32 specially because they are common 208 /// types in frontend languages. 209 bool InstCombinerImpl::isDesirableIntType(unsigned BitWidth) const { 210 switch (BitWidth) { 211 case 8: 212 case 16: 213 case 32: 214 return true; 215 default: 216 return DL.isLegalInteger(BitWidth); 217 } 218 } 219 220 /// Return true if it is desirable to convert an integer computation from a 221 /// given bit width to a new bit width. 222 /// We don't want to convert from a legal to an illegal type or from a smaller 223 /// to a larger illegal type. A width of '1' is always treated as a desirable 224 /// type because i1 is a fundamental type in IR, and there are many specialized 225 /// optimizations for i1 types. Common/desirable widths are equally treated as 226 /// legal to convert to, in order to open up more combining opportunities. 227 bool InstCombinerImpl::shouldChangeType(unsigned FromWidth, 228 unsigned ToWidth) const { 229 bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth); 230 bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth); 231 232 // Convert to desirable widths even if they are not legal types. 233 // Only shrink types, to prevent infinite loops. 234 if (ToWidth < FromWidth && isDesirableIntType(ToWidth)) 235 return true; 236 237 // If this is a legal integer from type, and the result would be an illegal 238 // type, don't do the transformation. 239 if (FromLegal && !ToLegal) 240 return false; 241 242 // Otherwise, if both are illegal, do not increase the size of the result. We 243 // do allow things like i160 -> i64, but not i64 -> i160. 244 if (!FromLegal && !ToLegal && ToWidth > FromWidth) 245 return false; 246 247 return true; 248 } 249 250 /// Return true if it is desirable to convert a computation from 'From' to 'To'. 251 /// We don't want to convert from a legal to an illegal type or from a smaller 252 /// to a larger illegal type. i1 is always treated as a legal type because it is 253 /// a fundamental type in IR, and there are many specialized optimizations for 254 /// i1 types. 255 bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const { 256 // TODO: This could be extended to allow vectors. Datalayout changes might be 257 // needed to properly support that. 258 if (!From->isIntegerTy() || !To->isIntegerTy()) 259 return false; 260 261 unsigned FromWidth = From->getPrimitiveSizeInBits(); 262 unsigned ToWidth = To->getPrimitiveSizeInBits(); 263 return shouldChangeType(FromWidth, ToWidth); 264 } 265 266 // Return true, if No Signed Wrap should be maintained for I. 267 // The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C", 268 // where both B and C should be ConstantInts, results in a constant that does 269 // not overflow. This function only handles the Add and Sub opcodes. For 270 // all other opcodes, the function conservatively returns false. 271 static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C) { 272 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I); 273 if (!OBO || !OBO->hasNoSignedWrap()) 274 return false; 275 276 // We reason about Add and Sub Only. 277 Instruction::BinaryOps Opcode = I.getOpcode(); 278 if (Opcode != Instruction::Add && Opcode != Instruction::Sub) 279 return false; 280 281 const APInt *BVal, *CVal; 282 if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal))) 283 return false; 284 285 bool Overflow = false; 286 if (Opcode == Instruction::Add) 287 (void)BVal->sadd_ov(*CVal, Overflow); 288 else 289 (void)BVal->ssub_ov(*CVal, Overflow); 290 291 return !Overflow; 292 } 293 294 static bool hasNoUnsignedWrap(BinaryOperator &I) { 295 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I); 296 return OBO && OBO->hasNoUnsignedWrap(); 297 } 298 299 static bool hasNoSignedWrap(BinaryOperator &I) { 300 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I); 301 return OBO && OBO->hasNoSignedWrap(); 302 } 303 304 /// Conservatively clears subclassOptionalData after a reassociation or 305 /// commutation. We preserve fast-math flags when applicable as they can be 306 /// preserved. 307 static void ClearSubclassDataAfterReassociation(BinaryOperator &I) { 308 FPMathOperator *FPMO = dyn_cast<FPMathOperator>(&I); 309 if (!FPMO) { 310 I.clearSubclassOptionalData(); 311 return; 312 } 313 314 FastMathFlags FMF = I.getFastMathFlags(); 315 I.clearSubclassOptionalData(); 316 I.setFastMathFlags(FMF); 317 } 318 319 /// Combine constant operands of associative operations either before or after a 320 /// cast to eliminate one of the associative operations: 321 /// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2))) 322 /// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2)) 323 static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, 324 InstCombinerImpl &IC) { 325 auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0)); 326 if (!Cast || !Cast->hasOneUse()) 327 return false; 328 329 // TODO: Enhance logic for other casts and remove this check. 330 auto CastOpcode = Cast->getOpcode(); 331 if (CastOpcode != Instruction::ZExt) 332 return false; 333 334 // TODO: Enhance logic for other BinOps and remove this check. 335 if (!BinOp1->isBitwiseLogicOp()) 336 return false; 337 338 auto AssocOpcode = BinOp1->getOpcode(); 339 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0)); 340 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode) 341 return false; 342 343 Constant *C1, *C2; 344 if (!match(BinOp1->getOperand(1), m_Constant(C1)) || 345 !match(BinOp2->getOperand(1), m_Constant(C2))) 346 return false; 347 348 // TODO: This assumes a zext cast. 349 // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2 350 // to the destination type might lose bits. 351 352 // Fold the constants together in the destination type: 353 // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC) 354 Type *DestTy = C1->getType(); 355 Constant *CastC2 = ConstantExpr::getCast(CastOpcode, C2, DestTy); 356 Constant *FoldedC = ConstantExpr::get(AssocOpcode, C1, CastC2); 357 IC.replaceOperand(*Cast, 0, BinOp2->getOperand(0)); 358 IC.replaceOperand(*BinOp1, 1, FoldedC); 359 return true; 360 } 361 362 // Simplifies IntToPtr/PtrToInt RoundTrip Cast To BitCast. 363 // inttoptr ( ptrtoint (x) ) --> x 364 Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(Value *Val) { 365 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val); 366 if (IntToPtr && DL.getPointerTypeSizeInBits(IntToPtr->getDestTy()) == 367 DL.getTypeSizeInBits(IntToPtr->getSrcTy())) { 368 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0)); 369 Type *CastTy = IntToPtr->getDestTy(); 370 if (PtrToInt && 371 CastTy->getPointerAddressSpace() == 372 PtrToInt->getSrcTy()->getPointerAddressSpace() && 373 DL.getPointerTypeSizeInBits(PtrToInt->getSrcTy()) == 374 DL.getTypeSizeInBits(PtrToInt->getDestTy())) { 375 return CastInst::CreateBitOrPointerCast(PtrToInt->getOperand(0), CastTy, 376 "", PtrToInt); 377 } 378 } 379 return nullptr; 380 } 381 382 /// This performs a few simplifications for operators that are associative or 383 /// commutative: 384 /// 385 /// Commutative operators: 386 /// 387 /// 1. Order operands such that they are listed from right (least complex) to 388 /// left (most complex). This puts constants before unary operators before 389 /// binary operators. 390 /// 391 /// Associative operators: 392 /// 393 /// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies. 394 /// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies. 395 /// 396 /// Associative and commutative operators: 397 /// 398 /// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies. 399 /// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies. 400 /// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)" 401 /// if C1 and C2 are constants. 402 bool InstCombinerImpl::SimplifyAssociativeOrCommutative(BinaryOperator &I) { 403 Instruction::BinaryOps Opcode = I.getOpcode(); 404 bool Changed = false; 405 406 do { 407 // Order operands such that they are listed from right (least complex) to 408 // left (most complex). This puts constants before unary operators before 409 // binary operators. 410 if (I.isCommutative() && getComplexity(I.getOperand(0)) < 411 getComplexity(I.getOperand(1))) 412 Changed = !I.swapOperands(); 413 414 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0)); 415 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1)); 416 417 if (I.isAssociative()) { 418 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies. 419 if (Op0 && Op0->getOpcode() == Opcode) { 420 Value *A = Op0->getOperand(0); 421 Value *B = Op0->getOperand(1); 422 Value *C = I.getOperand(1); 423 424 // Does "B op C" simplify? 425 if (Value *V = SimplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) { 426 // It simplifies to V. Form "A op V". 427 replaceOperand(I, 0, A); 428 replaceOperand(I, 1, V); 429 bool IsNUW = hasNoUnsignedWrap(I) && hasNoUnsignedWrap(*Op0); 430 bool IsNSW = maintainNoSignedWrap(I, B, C) && hasNoSignedWrap(*Op0); 431 432 // Conservatively clear all optional flags since they may not be 433 // preserved by the reassociation. Reset nsw/nuw based on the above 434 // analysis. 435 ClearSubclassDataAfterReassociation(I); 436 437 // Note: this is only valid because SimplifyBinOp doesn't look at 438 // the operands to Op0. 439 if (IsNUW) 440 I.setHasNoUnsignedWrap(true); 441 442 if (IsNSW) 443 I.setHasNoSignedWrap(true); 444 445 Changed = true; 446 ++NumReassoc; 447 continue; 448 } 449 } 450 451 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies. 452 if (Op1 && Op1->getOpcode() == Opcode) { 453 Value *A = I.getOperand(0); 454 Value *B = Op1->getOperand(0); 455 Value *C = Op1->getOperand(1); 456 457 // Does "A op B" simplify? 458 if (Value *V = SimplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) { 459 // It simplifies to V. Form "V op C". 460 replaceOperand(I, 0, V); 461 replaceOperand(I, 1, C); 462 // Conservatively clear the optional flags, since they may not be 463 // preserved by the reassociation. 464 ClearSubclassDataAfterReassociation(I); 465 Changed = true; 466 ++NumReassoc; 467 continue; 468 } 469 } 470 } 471 472 if (I.isAssociative() && I.isCommutative()) { 473 if (simplifyAssocCastAssoc(&I, *this)) { 474 Changed = true; 475 ++NumReassoc; 476 continue; 477 } 478 479 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies. 480 if (Op0 && Op0->getOpcode() == Opcode) { 481 Value *A = Op0->getOperand(0); 482 Value *B = Op0->getOperand(1); 483 Value *C = I.getOperand(1); 484 485 // Does "C op A" simplify? 486 if (Value *V = SimplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) { 487 // It simplifies to V. Form "V op B". 488 replaceOperand(I, 0, V); 489 replaceOperand(I, 1, B); 490 // Conservatively clear the optional flags, since they may not be 491 // preserved by the reassociation. 492 ClearSubclassDataAfterReassociation(I); 493 Changed = true; 494 ++NumReassoc; 495 continue; 496 } 497 } 498 499 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies. 500 if (Op1 && Op1->getOpcode() == Opcode) { 501 Value *A = I.getOperand(0); 502 Value *B = Op1->getOperand(0); 503 Value *C = Op1->getOperand(1); 504 505 // Does "C op A" simplify? 506 if (Value *V = SimplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) { 507 // It simplifies to V. Form "B op V". 508 replaceOperand(I, 0, B); 509 replaceOperand(I, 1, V); 510 // Conservatively clear the optional flags, since they may not be 511 // preserved by the reassociation. 512 ClearSubclassDataAfterReassociation(I); 513 Changed = true; 514 ++NumReassoc; 515 continue; 516 } 517 } 518 519 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)" 520 // if C1 and C2 are constants. 521 Value *A, *B; 522 Constant *C1, *C2; 523 if (Op0 && Op1 && 524 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode && 525 match(Op0, m_OneUse(m_BinOp(m_Value(A), m_Constant(C1)))) && 526 match(Op1, m_OneUse(m_BinOp(m_Value(B), m_Constant(C2))))) { 527 bool IsNUW = hasNoUnsignedWrap(I) && 528 hasNoUnsignedWrap(*Op0) && 529 hasNoUnsignedWrap(*Op1); 530 BinaryOperator *NewBO = (IsNUW && Opcode == Instruction::Add) ? 531 BinaryOperator::CreateNUW(Opcode, A, B) : 532 BinaryOperator::Create(Opcode, A, B); 533 534 if (isa<FPMathOperator>(NewBO)) { 535 FastMathFlags Flags = I.getFastMathFlags(); 536 Flags &= Op0->getFastMathFlags(); 537 Flags &= Op1->getFastMathFlags(); 538 NewBO->setFastMathFlags(Flags); 539 } 540 InsertNewInstWith(NewBO, I); 541 NewBO->takeName(Op1); 542 replaceOperand(I, 0, NewBO); 543 replaceOperand(I, 1, ConstantExpr::get(Opcode, C1, C2)); 544 // Conservatively clear the optional flags, since they may not be 545 // preserved by the reassociation. 546 ClearSubclassDataAfterReassociation(I); 547 if (IsNUW) 548 I.setHasNoUnsignedWrap(true); 549 550 Changed = true; 551 continue; 552 } 553 } 554 555 // No further simplifications. 556 return Changed; 557 } while (true); 558 } 559 560 /// Return whether "X LOp (Y ROp Z)" is always equal to 561 /// "(X LOp Y) ROp (X LOp Z)". 562 static bool leftDistributesOverRight(Instruction::BinaryOps LOp, 563 Instruction::BinaryOps ROp) { 564 // X & (Y | Z) <--> (X & Y) | (X & Z) 565 // X & (Y ^ Z) <--> (X & Y) ^ (X & Z) 566 if (LOp == Instruction::And) 567 return ROp == Instruction::Or || ROp == Instruction::Xor; 568 569 // X | (Y & Z) <--> (X | Y) & (X | Z) 570 if (LOp == Instruction::Or) 571 return ROp == Instruction::And; 572 573 // X * (Y + Z) <--> (X * Y) + (X * Z) 574 // X * (Y - Z) <--> (X * Y) - (X * Z) 575 if (LOp == Instruction::Mul) 576 return ROp == Instruction::Add || ROp == Instruction::Sub; 577 578 return false; 579 } 580 581 /// Return whether "(X LOp Y) ROp Z" is always equal to 582 /// "(X ROp Z) LOp (Y ROp Z)". 583 static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, 584 Instruction::BinaryOps ROp) { 585 if (Instruction::isCommutative(ROp)) 586 return leftDistributesOverRight(ROp, LOp); 587 588 // (X {&|^} Y) >> Z <--> (X >> Z) {&|^} (Y >> Z) for all shifts. 589 return Instruction::isBitwiseLogicOp(LOp) && Instruction::isShift(ROp); 590 591 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z", 592 // but this requires knowing that the addition does not overflow and other 593 // such subtleties. 594 } 595 596 /// This function returns identity value for given opcode, which can be used to 597 /// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1). 598 static Value *getIdentityValue(Instruction::BinaryOps Opcode, Value *V) { 599 if (isa<Constant>(V)) 600 return nullptr; 601 602 return ConstantExpr::getBinOpIdentity(Opcode, V->getType()); 603 } 604 605 /// This function predicates factorization using distributive laws. By default, 606 /// it just returns the 'Op' inputs. But for special-cases like 607 /// 'add(shl(X, 5), ...)', this function will have TopOpcode == Instruction::Add 608 /// and Op = shl(X, 5). The 'shl' is treated as the more general 'mul X, 32' to 609 /// allow more factorization opportunities. 610 static Instruction::BinaryOps 611 getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, 612 Value *&LHS, Value *&RHS) { 613 assert(Op && "Expected a binary operator"); 614 LHS = Op->getOperand(0); 615 RHS = Op->getOperand(1); 616 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) { 617 Constant *C; 618 if (match(Op, m_Shl(m_Value(), m_Constant(C)))) { 619 // X << C --> X * (1 << C) 620 RHS = ConstantExpr::getShl(ConstantInt::get(Op->getType(), 1), C); 621 return Instruction::Mul; 622 } 623 // TODO: We can add other conversions e.g. shr => div etc. 624 } 625 return Op->getOpcode(); 626 } 627 628 /// This tries to simplify binary operations by factorizing out common terms 629 /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)"). 630 Value *InstCombinerImpl::tryFactorization(BinaryOperator &I, 631 Instruction::BinaryOps InnerOpcode, 632 Value *A, Value *B, Value *C, 633 Value *D) { 634 assert(A && B && C && D && "All values must be provided"); 635 636 Value *V = nullptr; 637 Value *SimplifiedInst = nullptr; 638 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 639 Instruction::BinaryOps TopLevelOpcode = I.getOpcode(); 640 641 // Does "X op' Y" always equal "Y op' X"? 642 bool InnerCommutative = Instruction::isCommutative(InnerOpcode); 643 644 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"? 645 if (leftDistributesOverRight(InnerOpcode, TopLevelOpcode)) 646 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the 647 // commutative case, "(A op' B) op (C op' A)"? 648 if (A == C || (InnerCommutative && A == D)) { 649 if (A != C) 650 std::swap(C, D); 651 // Consider forming "A op' (B op D)". 652 // If "B op D" simplifies then it can be formed with no cost. 653 V = SimplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I)); 654 // If "B op D" doesn't simplify then only go on if both of the existing 655 // operations "A op' B" and "C op' D" will be zapped as no longer used. 656 if (!V && LHS->hasOneUse() && RHS->hasOneUse()) 657 V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName()); 658 if (V) { 659 SimplifiedInst = Builder.CreateBinOp(InnerOpcode, A, V); 660 } 661 } 662 663 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"? 664 if (!SimplifiedInst && rightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) 665 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the 666 // commutative case, "(A op' B) op (B op' D)"? 667 if (B == D || (InnerCommutative && B == C)) { 668 if (B != D) 669 std::swap(C, D); 670 // Consider forming "(A op C) op' B". 671 // If "A op C" simplifies then it can be formed with no cost. 672 V = SimplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I)); 673 674 // If "A op C" doesn't simplify then only go on if both of the existing 675 // operations "A op' B" and "C op' D" will be zapped as no longer used. 676 if (!V && LHS->hasOneUse() && RHS->hasOneUse()) 677 V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName()); 678 if (V) { 679 SimplifiedInst = Builder.CreateBinOp(InnerOpcode, V, B); 680 } 681 } 682 683 if (SimplifiedInst) { 684 ++NumFactor; 685 SimplifiedInst->takeName(&I); 686 687 // Check if we can add NSW/NUW flags to SimplifiedInst. If so, set them. 688 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(SimplifiedInst)) { 689 if (isa<OverflowingBinaryOperator>(SimplifiedInst)) { 690 bool HasNSW = false; 691 bool HasNUW = false; 692 if (isa<OverflowingBinaryOperator>(&I)) { 693 HasNSW = I.hasNoSignedWrap(); 694 HasNUW = I.hasNoUnsignedWrap(); 695 } 696 697 if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS)) { 698 HasNSW &= LOBO->hasNoSignedWrap(); 699 HasNUW &= LOBO->hasNoUnsignedWrap(); 700 } 701 702 if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS)) { 703 HasNSW &= ROBO->hasNoSignedWrap(); 704 HasNUW &= ROBO->hasNoUnsignedWrap(); 705 } 706 707 if (TopLevelOpcode == Instruction::Add && 708 InnerOpcode == Instruction::Mul) { 709 // We can propagate 'nsw' if we know that 710 // %Y = mul nsw i16 %X, C 711 // %Z = add nsw i16 %Y, %X 712 // => 713 // %Z = mul nsw i16 %X, C+1 714 // 715 // iff C+1 isn't INT_MIN 716 const APInt *CInt; 717 if (match(V, m_APInt(CInt))) { 718 if (!CInt->isMinSignedValue()) 719 BO->setHasNoSignedWrap(HasNSW); 720 } 721 722 // nuw can be propagated with any constant or nuw value. 723 BO->setHasNoUnsignedWrap(HasNUW); 724 } 725 } 726 } 727 } 728 return SimplifiedInst; 729 } 730 731 /// This tries to simplify binary operations which some other binary operation 732 /// distributes over either by factorizing out common terms 733 /// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in 734 /// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win). 735 /// Returns the simplified value, or null if it didn't simplify. 736 Value *InstCombinerImpl::SimplifyUsingDistributiveLaws(BinaryOperator &I) { 737 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 738 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS); 739 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS); 740 Instruction::BinaryOps TopLevelOpcode = I.getOpcode(); 741 742 { 743 // Factorization. 744 Value *A, *B, *C, *D; 745 Instruction::BinaryOps LHSOpcode, RHSOpcode; 746 if (Op0) 747 LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B); 748 if (Op1) 749 RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D); 750 751 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize 752 // a common term. 753 if (Op0 && Op1 && LHSOpcode == RHSOpcode) 754 if (Value *V = tryFactorization(I, LHSOpcode, A, B, C, D)) 755 return V; 756 757 // The instruction has the form "(A op' B) op (C)". Try to factorize common 758 // term. 759 if (Op0) 760 if (Value *Ident = getIdentityValue(LHSOpcode, RHS)) 761 if (Value *V = tryFactorization(I, LHSOpcode, A, B, RHS, Ident)) 762 return V; 763 764 // The instruction has the form "(B) op (C op' D)". Try to factorize common 765 // term. 766 if (Op1) 767 if (Value *Ident = getIdentityValue(RHSOpcode, LHS)) 768 if (Value *V = tryFactorization(I, RHSOpcode, LHS, Ident, C, D)) 769 return V; 770 } 771 772 // Expansion. 773 if (Op0 && rightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) { 774 // The instruction has the form "(A op' B) op C". See if expanding it out 775 // to "(A op C) op' (B op C)" results in simplifications. 776 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS; 777 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op' 778 779 // Disable the use of undef because it's not safe to distribute undef. 780 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef(); 781 Value *L = SimplifyBinOp(TopLevelOpcode, A, C, SQDistributive); 782 Value *R = SimplifyBinOp(TopLevelOpcode, B, C, SQDistributive); 783 784 // Do "A op C" and "B op C" both simplify? 785 if (L && R) { 786 // They do! Return "L op' R". 787 ++NumExpand; 788 C = Builder.CreateBinOp(InnerOpcode, L, R); 789 C->takeName(&I); 790 return C; 791 } 792 793 // Does "A op C" simplify to the identity value for the inner opcode? 794 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) { 795 // They do! Return "B op C". 796 ++NumExpand; 797 C = Builder.CreateBinOp(TopLevelOpcode, B, C); 798 C->takeName(&I); 799 return C; 800 } 801 802 // Does "B op C" simplify to the identity value for the inner opcode? 803 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) { 804 // They do! Return "A op C". 805 ++NumExpand; 806 C = Builder.CreateBinOp(TopLevelOpcode, A, C); 807 C->takeName(&I); 808 return C; 809 } 810 } 811 812 if (Op1 && leftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) { 813 // The instruction has the form "A op (B op' C)". See if expanding it out 814 // to "(A op B) op' (A op C)" results in simplifications. 815 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1); 816 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op' 817 818 // Disable the use of undef because it's not safe to distribute undef. 819 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef(); 820 Value *L = SimplifyBinOp(TopLevelOpcode, A, B, SQDistributive); 821 Value *R = SimplifyBinOp(TopLevelOpcode, A, C, SQDistributive); 822 823 // Do "A op B" and "A op C" both simplify? 824 if (L && R) { 825 // They do! Return "L op' R". 826 ++NumExpand; 827 A = Builder.CreateBinOp(InnerOpcode, L, R); 828 A->takeName(&I); 829 return A; 830 } 831 832 // Does "A op B" simplify to the identity value for the inner opcode? 833 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) { 834 // They do! Return "A op C". 835 ++NumExpand; 836 A = Builder.CreateBinOp(TopLevelOpcode, A, C); 837 A->takeName(&I); 838 return A; 839 } 840 841 // Does "A op C" simplify to the identity value for the inner opcode? 842 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) { 843 // They do! Return "A op B". 844 ++NumExpand; 845 A = Builder.CreateBinOp(TopLevelOpcode, A, B); 846 A->takeName(&I); 847 return A; 848 } 849 } 850 851 return SimplifySelectsFeedingBinaryOp(I, LHS, RHS); 852 } 853 854 Value *InstCombinerImpl::SimplifySelectsFeedingBinaryOp(BinaryOperator &I, 855 Value *LHS, 856 Value *RHS) { 857 Value *A, *B, *C, *D, *E, *F; 858 bool LHSIsSelect = match(LHS, m_Select(m_Value(A), m_Value(B), m_Value(C))); 859 bool RHSIsSelect = match(RHS, m_Select(m_Value(D), m_Value(E), m_Value(F))); 860 if (!LHSIsSelect && !RHSIsSelect) 861 return nullptr; 862 863 FastMathFlags FMF; 864 BuilderTy::FastMathFlagGuard Guard(Builder); 865 if (isa<FPMathOperator>(&I)) { 866 FMF = I.getFastMathFlags(); 867 Builder.setFastMathFlags(FMF); 868 } 869 870 Instruction::BinaryOps Opcode = I.getOpcode(); 871 SimplifyQuery Q = SQ.getWithInstruction(&I); 872 873 Value *Cond, *True = nullptr, *False = nullptr; 874 if (LHSIsSelect && RHSIsSelect && A == D) { 875 // (A ? B : C) op (A ? E : F) -> A ? (B op E) : (C op F) 876 Cond = A; 877 True = SimplifyBinOp(Opcode, B, E, FMF, Q); 878 False = SimplifyBinOp(Opcode, C, F, FMF, Q); 879 880 if (LHS->hasOneUse() && RHS->hasOneUse()) { 881 if (False && !True) 882 True = Builder.CreateBinOp(Opcode, B, E); 883 else if (True && !False) 884 False = Builder.CreateBinOp(Opcode, C, F); 885 } 886 } else if (LHSIsSelect && LHS->hasOneUse()) { 887 // (A ? B : C) op Y -> A ? (B op Y) : (C op Y) 888 Cond = A; 889 True = SimplifyBinOp(Opcode, B, RHS, FMF, Q); 890 False = SimplifyBinOp(Opcode, C, RHS, FMF, Q); 891 } else if (RHSIsSelect && RHS->hasOneUse()) { 892 // X op (D ? E : F) -> D ? (X op E) : (X op F) 893 Cond = D; 894 True = SimplifyBinOp(Opcode, LHS, E, FMF, Q); 895 False = SimplifyBinOp(Opcode, LHS, F, FMF, Q); 896 } 897 898 if (!True || !False) 899 return nullptr; 900 901 Value *SI = Builder.CreateSelect(Cond, True, False); 902 SI->takeName(&I); 903 return SI; 904 } 905 906 /// Freely adapt every user of V as-if V was changed to !V. 907 /// WARNING: only if canFreelyInvertAllUsersOf() said this can be done. 908 void InstCombinerImpl::freelyInvertAllUsersOf(Value *I) { 909 for (User *U : I->users()) { 910 switch (cast<Instruction>(U)->getOpcode()) { 911 case Instruction::Select: { 912 auto *SI = cast<SelectInst>(U); 913 SI->swapValues(); 914 SI->swapProfMetadata(); 915 break; 916 } 917 case Instruction::Br: 918 cast<BranchInst>(U)->swapSuccessors(); // swaps prof metadata too 919 break; 920 case Instruction::Xor: 921 replaceInstUsesWith(cast<Instruction>(*U), I); 922 break; 923 default: 924 llvm_unreachable("Got unexpected user - out of sync with " 925 "canFreelyInvertAllUsersOf() ?"); 926 } 927 } 928 } 929 930 /// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a 931 /// constant zero (which is the 'negate' form). 932 Value *InstCombinerImpl::dyn_castNegVal(Value *V) const { 933 Value *NegV; 934 if (match(V, m_Neg(m_Value(NegV)))) 935 return NegV; 936 937 // Constants can be considered to be negated values if they can be folded. 938 if (ConstantInt *C = dyn_cast<ConstantInt>(V)) 939 return ConstantExpr::getNeg(C); 940 941 if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V)) 942 if (C->getType()->getElementType()->isIntegerTy()) 943 return ConstantExpr::getNeg(C); 944 945 if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) { 946 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { 947 Constant *Elt = CV->getAggregateElement(i); 948 if (!Elt) 949 return nullptr; 950 951 if (isa<UndefValue>(Elt)) 952 continue; 953 954 if (!isa<ConstantInt>(Elt)) 955 return nullptr; 956 } 957 return ConstantExpr::getNeg(CV); 958 } 959 960 // Negate integer vector splats. 961 if (auto *CV = dyn_cast<Constant>(V)) 962 if (CV->getType()->isVectorTy() && 963 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue()) 964 return ConstantExpr::getNeg(CV); 965 966 return nullptr; 967 } 968 969 /// A binop with a constant operand and a sign-extended boolean operand may be 970 /// converted into a select of constants by applying the binary operation to 971 /// the constant with the two possible values of the extended boolean (0 or -1). 972 Instruction *InstCombinerImpl::foldBinopOfSextBoolToSelect(BinaryOperator &BO) { 973 // TODO: Handle non-commutative binop (constant is operand 0). 974 // TODO: Handle zext. 975 // TODO: Peek through 'not' of cast. 976 Value *BO0 = BO.getOperand(0); 977 Value *BO1 = BO.getOperand(1); 978 Value *X; 979 Constant *C; 980 if (!match(BO0, m_SExt(m_Value(X))) || !match(BO1, m_ImmConstant(C)) || 981 !X->getType()->isIntOrIntVectorTy(1)) 982 return nullptr; 983 984 // bo (sext i1 X), C --> select X, (bo -1, C), (bo 0, C) 985 Constant *Ones = ConstantInt::getAllOnesValue(BO.getType()); 986 Constant *Zero = ConstantInt::getNullValue(BO.getType()); 987 Constant *TVal = ConstantExpr::get(BO.getOpcode(), Ones, C); 988 Constant *FVal = ConstantExpr::get(BO.getOpcode(), Zero, C); 989 return SelectInst::Create(X, TVal, FVal); 990 } 991 992 static Value *foldOperationIntoSelectOperand(Instruction &I, Value *SO, 993 InstCombiner::BuilderTy &Builder) { 994 if (auto *Cast = dyn_cast<CastInst>(&I)) 995 return Builder.CreateCast(Cast->getOpcode(), SO, I.getType()); 996 997 if (auto *II = dyn_cast<IntrinsicInst>(&I)) { 998 assert(canConstantFoldCallTo(II, cast<Function>(II->getCalledOperand())) && 999 "Expected constant-foldable intrinsic"); 1000 Intrinsic::ID IID = II->getIntrinsicID(); 1001 if (II->arg_size() == 1) 1002 return Builder.CreateUnaryIntrinsic(IID, SO); 1003 1004 // This works for real binary ops like min/max (where we always expect the 1005 // constant operand to be canonicalized as op1) and unary ops with a bonus 1006 // constant argument like ctlz/cttz. 1007 // TODO: Handle non-commutative binary intrinsics as below for binops. 1008 assert(II->arg_size() == 2 && "Expected binary intrinsic"); 1009 assert(isa<Constant>(II->getArgOperand(1)) && "Expected constant operand"); 1010 return Builder.CreateBinaryIntrinsic(IID, SO, II->getArgOperand(1)); 1011 } 1012 1013 assert(I.isBinaryOp() && "Unexpected opcode for select folding"); 1014 1015 // Figure out if the constant is the left or the right argument. 1016 bool ConstIsRHS = isa<Constant>(I.getOperand(1)); 1017 Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS)); 1018 1019 if (auto *SOC = dyn_cast<Constant>(SO)) { 1020 if (ConstIsRHS) 1021 return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand); 1022 return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC); 1023 } 1024 1025 Value *Op0 = SO, *Op1 = ConstOperand; 1026 if (!ConstIsRHS) 1027 std::swap(Op0, Op1); 1028 1029 Value *NewBO = Builder.CreateBinOp(cast<BinaryOperator>(&I)->getOpcode(), Op0, 1030 Op1, SO->getName() + ".op"); 1031 if (auto *NewBOI = dyn_cast<Instruction>(NewBO)) 1032 NewBOI->copyIRFlags(&I); 1033 return NewBO; 1034 } 1035 1036 Instruction *InstCombinerImpl::FoldOpIntoSelect(Instruction &Op, 1037 SelectInst *SI) { 1038 // Don't modify shared select instructions. 1039 if (!SI->hasOneUse()) 1040 return nullptr; 1041 1042 Value *TV = SI->getTrueValue(); 1043 Value *FV = SI->getFalseValue(); 1044 if (!(isa<Constant>(TV) || isa<Constant>(FV))) 1045 return nullptr; 1046 1047 // Bool selects with constant operands can be folded to logical ops. 1048 if (SI->getType()->isIntOrIntVectorTy(1)) 1049 return nullptr; 1050 1051 // If it's a bitcast involving vectors, make sure it has the same number of 1052 // elements on both sides. 1053 if (auto *BC = dyn_cast<BitCastInst>(&Op)) { 1054 VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy()); 1055 VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy()); 1056 1057 // Verify that either both or neither are vectors. 1058 if ((SrcTy == nullptr) != (DestTy == nullptr)) 1059 return nullptr; 1060 1061 // If vectors, verify that they have the same number of elements. 1062 if (SrcTy && SrcTy->getElementCount() != DestTy->getElementCount()) 1063 return nullptr; 1064 } 1065 1066 // Test if a CmpInst instruction is used exclusively by a select as 1067 // part of a minimum or maximum operation. If so, refrain from doing 1068 // any other folding. This helps out other analyses which understand 1069 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution 1070 // and CodeGen. And in this case, at least one of the comparison 1071 // operands has at least one user besides the compare (the select), 1072 // which would often largely negate the benefit of folding anyway. 1073 if (auto *CI = dyn_cast<CmpInst>(SI->getCondition())) { 1074 if (CI->hasOneUse()) { 1075 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1); 1076 1077 // FIXME: This is a hack to avoid infinite looping with min/max patterns. 1078 // We have to ensure that vector constants that only differ with 1079 // undef elements are treated as equivalent. 1080 auto areLooselyEqual = [](Value *A, Value *B) { 1081 if (A == B) 1082 return true; 1083 1084 // Test for vector constants. 1085 Constant *ConstA, *ConstB; 1086 if (!match(A, m_Constant(ConstA)) || !match(B, m_Constant(ConstB))) 1087 return false; 1088 1089 // TODO: Deal with FP constants? 1090 if (!A->getType()->isIntOrIntVectorTy() || A->getType() != B->getType()) 1091 return false; 1092 1093 // Compare for equality including undefs as equal. 1094 auto *Cmp = ConstantExpr::getCompare(ICmpInst::ICMP_EQ, ConstA, ConstB); 1095 const APInt *C; 1096 return match(Cmp, m_APIntAllowUndef(C)) && C->isOne(); 1097 }; 1098 1099 if ((areLooselyEqual(TV, Op0) && areLooselyEqual(FV, Op1)) || 1100 (areLooselyEqual(FV, Op0) && areLooselyEqual(TV, Op1))) 1101 return nullptr; 1102 } 1103 } 1104 1105 Value *NewTV = foldOperationIntoSelectOperand(Op, TV, Builder); 1106 Value *NewFV = foldOperationIntoSelectOperand(Op, FV, Builder); 1107 return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI); 1108 } 1109 1110 static Value *foldOperationIntoPhiValue(BinaryOperator *I, Value *InV, 1111 InstCombiner::BuilderTy &Builder) { 1112 bool ConstIsRHS = isa<Constant>(I->getOperand(1)); 1113 Constant *C = cast<Constant>(I->getOperand(ConstIsRHS)); 1114 1115 if (auto *InC = dyn_cast<Constant>(InV)) { 1116 if (ConstIsRHS) 1117 return ConstantExpr::get(I->getOpcode(), InC, C); 1118 return ConstantExpr::get(I->getOpcode(), C, InC); 1119 } 1120 1121 Value *Op0 = InV, *Op1 = C; 1122 if (!ConstIsRHS) 1123 std::swap(Op0, Op1); 1124 1125 Value *RI = Builder.CreateBinOp(I->getOpcode(), Op0, Op1, "phi.bo"); 1126 auto *FPInst = dyn_cast<Instruction>(RI); 1127 if (FPInst && isa<FPMathOperator>(FPInst)) 1128 FPInst->copyFastMathFlags(I); 1129 return RI; 1130 } 1131 1132 Instruction *InstCombinerImpl::foldOpIntoPhi(Instruction &I, PHINode *PN) { 1133 unsigned NumPHIValues = PN->getNumIncomingValues(); 1134 if (NumPHIValues == 0) 1135 return nullptr; 1136 1137 // We normally only transform phis with a single use. However, if a PHI has 1138 // multiple uses and they are all the same operation, we can fold *all* of the 1139 // uses into the PHI. 1140 if (!PN->hasOneUse()) { 1141 // Walk the use list for the instruction, comparing them to I. 1142 for (User *U : PN->users()) { 1143 Instruction *UI = cast<Instruction>(U); 1144 if (UI != &I && !I.isIdenticalTo(UI)) 1145 return nullptr; 1146 } 1147 // Otherwise, we can replace *all* users with the new PHI we form. 1148 } 1149 1150 // Check to see if all of the operands of the PHI are simple constants 1151 // (constantint/constantfp/undef). If there is one non-constant value, 1152 // remember the BB it is in. If there is more than one or if *it* is a PHI, 1153 // bail out. We don't do arbitrary constant expressions here because moving 1154 // their computation can be expensive without a cost model. 1155 BasicBlock *NonConstBB = nullptr; 1156 for (unsigned i = 0; i != NumPHIValues; ++i) { 1157 Value *InVal = PN->getIncomingValue(i); 1158 // For non-freeze, require constant operand 1159 // For freeze, require non-undef, non-poison operand 1160 if (!isa<FreezeInst>(I) && match(InVal, m_ImmConstant())) 1161 continue; 1162 if (isa<FreezeInst>(I) && isGuaranteedNotToBeUndefOrPoison(InVal)) 1163 continue; 1164 1165 if (isa<PHINode>(InVal)) return nullptr; // Itself a phi. 1166 if (NonConstBB) return nullptr; // More than one non-const value. 1167 1168 NonConstBB = PN->getIncomingBlock(i); 1169 1170 // If the InVal is an invoke at the end of the pred block, then we can't 1171 // insert a computation after it without breaking the edge. 1172 if (isa<InvokeInst>(InVal)) 1173 if (cast<Instruction>(InVal)->getParent() == NonConstBB) 1174 return nullptr; 1175 1176 // If the incoming non-constant value is in I's block, we will remove one 1177 // instruction, but insert another equivalent one, leading to infinite 1178 // instcombine. 1179 if (isPotentiallyReachable(I.getParent(), NonConstBB, nullptr, &DT, LI)) 1180 return nullptr; 1181 } 1182 1183 // If there is exactly one non-constant value, we can insert a copy of the 1184 // operation in that block. However, if this is a critical edge, we would be 1185 // inserting the computation on some other paths (e.g. inside a loop). Only 1186 // do this if the pred block is unconditionally branching into the phi block. 1187 // Also, make sure that the pred block is not dead code. 1188 if (NonConstBB != nullptr) { 1189 BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator()); 1190 if (!BI || !BI->isUnconditional() || !DT.isReachableFromEntry(NonConstBB)) 1191 return nullptr; 1192 } 1193 1194 // Okay, we can do the transformation: create the new PHI node. 1195 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues()); 1196 InsertNewInstBefore(NewPN, *PN); 1197 NewPN->takeName(PN); 1198 1199 // If we are going to have to insert a new computation, do so right before the 1200 // predecessor's terminator. 1201 if (NonConstBB) 1202 Builder.SetInsertPoint(NonConstBB->getTerminator()); 1203 1204 // Next, add all of the operands to the PHI. 1205 if (SelectInst *SI = dyn_cast<SelectInst>(&I)) { 1206 // We only currently try to fold the condition of a select when it is a phi, 1207 // not the true/false values. 1208 Value *TrueV = SI->getTrueValue(); 1209 Value *FalseV = SI->getFalseValue(); 1210 BasicBlock *PhiTransBB = PN->getParent(); 1211 for (unsigned i = 0; i != NumPHIValues; ++i) { 1212 BasicBlock *ThisBB = PN->getIncomingBlock(i); 1213 Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB); 1214 Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB); 1215 Value *InV = nullptr; 1216 // Beware of ConstantExpr: it may eventually evaluate to getNullValue, 1217 // even if currently isNullValue gives false. 1218 Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)); 1219 // For vector constants, we cannot use isNullValue to fold into 1220 // FalseVInPred versus TrueVInPred. When we have individual nonzero 1221 // elements in the vector, we will incorrectly fold InC to 1222 // `TrueVInPred`. 1223 if (InC && isa<ConstantInt>(InC)) 1224 InV = InC->isNullValue() ? FalseVInPred : TrueVInPred; 1225 else { 1226 // Generate the select in the same block as PN's current incoming block. 1227 // Note: ThisBB need not be the NonConstBB because vector constants 1228 // which are constants by definition are handled here. 1229 // FIXME: This can lead to an increase in IR generation because we might 1230 // generate selects for vector constant phi operand, that could not be 1231 // folded to TrueVInPred or FalseVInPred as done for ConstantInt. For 1232 // non-vector phis, this transformation was always profitable because 1233 // the select would be generated exactly once in the NonConstBB. 1234 Builder.SetInsertPoint(ThisBB->getTerminator()); 1235 InV = Builder.CreateSelect(PN->getIncomingValue(i), TrueVInPred, 1236 FalseVInPred, "phi.sel"); 1237 } 1238 NewPN->addIncoming(InV, ThisBB); 1239 } 1240 } else if (CmpInst *CI = dyn_cast<CmpInst>(&I)) { 1241 Constant *C = cast<Constant>(I.getOperand(1)); 1242 for (unsigned i = 0; i != NumPHIValues; ++i) { 1243 Value *InV = nullptr; 1244 if (auto *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 1245 InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C); 1246 else 1247 InV = Builder.CreateCmp(CI->getPredicate(), PN->getIncomingValue(i), 1248 C, "phi.cmp"); 1249 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 1250 } 1251 } else if (auto *BO = dyn_cast<BinaryOperator>(&I)) { 1252 for (unsigned i = 0; i != NumPHIValues; ++i) { 1253 Value *InV = foldOperationIntoPhiValue(BO, PN->getIncomingValue(i), 1254 Builder); 1255 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 1256 } 1257 } else if (isa<FreezeInst>(&I)) { 1258 for (unsigned i = 0; i != NumPHIValues; ++i) { 1259 Value *InV; 1260 if (NonConstBB == PN->getIncomingBlock(i)) 1261 InV = Builder.CreateFreeze(PN->getIncomingValue(i), "phi.fr"); 1262 else 1263 InV = PN->getIncomingValue(i); 1264 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 1265 } 1266 } else { 1267 CastInst *CI = cast<CastInst>(&I); 1268 Type *RetTy = CI->getType(); 1269 for (unsigned i = 0; i != NumPHIValues; ++i) { 1270 Value *InV; 1271 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 1272 InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy); 1273 else 1274 InV = Builder.CreateCast(CI->getOpcode(), PN->getIncomingValue(i), 1275 I.getType(), "phi.cast"); 1276 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 1277 } 1278 } 1279 1280 for (User *U : make_early_inc_range(PN->users())) { 1281 Instruction *User = cast<Instruction>(U); 1282 if (User == &I) continue; 1283 replaceInstUsesWith(*User, NewPN); 1284 eraseInstFromFunction(*User); 1285 } 1286 return replaceInstUsesWith(I, NewPN); 1287 } 1288 1289 Instruction *InstCombinerImpl::foldBinopWithPhiOperands(BinaryOperator &BO) { 1290 // TODO: This should be similar to the incoming values check in foldOpIntoPhi: 1291 // we are guarding against replicating the binop in >1 predecessor. 1292 // This could miss matching a phi with 2 constant incoming values. 1293 auto *Phi0 = dyn_cast<PHINode>(BO.getOperand(0)); 1294 auto *Phi1 = dyn_cast<PHINode>(BO.getOperand(1)); 1295 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() || 1296 Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2) 1297 return nullptr; 1298 1299 // TODO: Remove the restriction for binop being in the same block as the phis. 1300 if (BO.getParent() != Phi0->getParent() || 1301 BO.getParent() != Phi1->getParent()) 1302 return nullptr; 1303 1304 // Match a pair of incoming constants for one of the predecessor blocks. 1305 BasicBlock *ConstBB, *OtherBB; 1306 Constant *C0, *C1; 1307 if (match(Phi0->getIncomingValue(0), m_ImmConstant(C0))) { 1308 ConstBB = Phi0->getIncomingBlock(0); 1309 OtherBB = Phi0->getIncomingBlock(1); 1310 } else if (match(Phi0->getIncomingValue(1), m_ImmConstant(C0))) { 1311 ConstBB = Phi0->getIncomingBlock(1); 1312 OtherBB = Phi0->getIncomingBlock(0); 1313 } else { 1314 return nullptr; 1315 } 1316 if (!match(Phi1->getIncomingValueForBlock(ConstBB), m_ImmConstant(C1))) 1317 return nullptr; 1318 1319 // The block that we are hoisting to must reach here unconditionally. 1320 // Otherwise, we could be speculatively executing an expensive or 1321 // non-speculative op. 1322 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->getTerminator()); 1323 if (!PredBlockBranch || PredBlockBranch->isConditional() || 1324 !DT.isReachableFromEntry(OtherBB)) 1325 return nullptr; 1326 1327 // TODO: This check could be tightened to only apply to binops (div/rem) that 1328 // are not safe to speculatively execute. But that could allow hoisting 1329 // potentially expensive instructions (fdiv for example). 1330 for (auto BBIter = BO.getParent()->begin(); &*BBIter != &BO; ++BBIter) 1331 if (!isGuaranteedToTransferExecutionToSuccessor(&*BBIter)) 1332 return nullptr; 1333 1334 // Make a new binop in the predecessor block with the non-constant incoming 1335 // values. 1336 Builder.SetInsertPoint(PredBlockBranch); 1337 Value *NewBO = Builder.CreateBinOp(BO.getOpcode(), 1338 Phi0->getIncomingValueForBlock(OtherBB), 1339 Phi1->getIncomingValueForBlock(OtherBB)); 1340 if (auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO)) 1341 NotFoldedNewBO->copyIRFlags(&BO); 1342 1343 // Fold constants for the predecessor block with constant incoming values. 1344 Constant *NewC = ConstantExpr::get(BO.getOpcode(), C0, C1); 1345 1346 // Replace the binop with a phi of the new values. The old phis are dead. 1347 PHINode *NewPhi = PHINode::Create(BO.getType(), 2); 1348 NewPhi->addIncoming(NewBO, OtherBB); 1349 NewPhi->addIncoming(NewC, ConstBB); 1350 return NewPhi; 1351 } 1352 1353 Instruction *InstCombinerImpl::foldBinOpIntoSelectOrPhi(BinaryOperator &I) { 1354 if (!isa<Constant>(I.getOperand(1))) 1355 return nullptr; 1356 1357 if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(0))) { 1358 if (Instruction *NewSel = FoldOpIntoSelect(I, Sel)) 1359 return NewSel; 1360 } else if (auto *PN = dyn_cast<PHINode>(I.getOperand(0))) { 1361 if (Instruction *NewPhi = foldOpIntoPhi(I, PN)) 1362 return NewPhi; 1363 } 1364 return nullptr; 1365 } 1366 1367 /// Given a pointer type and a constant offset, determine whether or not there 1368 /// is a sequence of GEP indices into the pointed type that will land us at the 1369 /// specified offset. If so, fill them into NewIndices and return the resultant 1370 /// element type, otherwise return null. 1371 static Type *findElementAtOffset(PointerType *PtrTy, int64_t IntOffset, 1372 SmallVectorImpl<Value *> &NewIndices, 1373 const DataLayout &DL) { 1374 // Only used by visitGEPOfBitcast(), which is skipped for opaque pointers. 1375 Type *Ty = PtrTy->getNonOpaquePointerElementType(); 1376 if (!Ty->isSized()) 1377 return nullptr; 1378 1379 APInt Offset(DL.getIndexTypeSizeInBits(PtrTy), IntOffset); 1380 SmallVector<APInt> Indices = DL.getGEPIndicesForOffset(Ty, Offset); 1381 if (!Offset.isZero()) 1382 return nullptr; 1383 1384 for (const APInt &Index : Indices) 1385 NewIndices.push_back(ConstantInt::get(PtrTy->getContext(), Index)); 1386 return Ty; 1387 } 1388 1389 static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) { 1390 // If this GEP has only 0 indices, it is the same pointer as 1391 // Src. If Src is not a trivial GEP too, don't combine 1392 // the indices. 1393 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() && 1394 !Src.hasOneUse()) 1395 return false; 1396 return true; 1397 } 1398 1399 /// Return a value X such that Val = X * Scale, or null if none. 1400 /// If the multiplication is known not to overflow, then NoSignedWrap is set. 1401 Value *InstCombinerImpl::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) { 1402 assert(isa<IntegerType>(Val->getType()) && "Can only descale integers!"); 1403 assert(cast<IntegerType>(Val->getType())->getBitWidth() == 1404 Scale.getBitWidth() && "Scale not compatible with value!"); 1405 1406 // If Val is zero or Scale is one then Val = Val * Scale. 1407 if (match(Val, m_Zero()) || Scale == 1) { 1408 NoSignedWrap = true; 1409 return Val; 1410 } 1411 1412 // If Scale is zero then it does not divide Val. 1413 if (Scale.isMinValue()) 1414 return nullptr; 1415 1416 // Look through chains of multiplications, searching for a constant that is 1417 // divisible by Scale. For example, descaling X*(Y*(Z*4)) by a factor of 4 1418 // will find the constant factor 4 and produce X*(Y*Z). Descaling X*(Y*8) by 1419 // a factor of 4 will produce X*(Y*2). The principle of operation is to bore 1420 // down from Val: 1421 // 1422 // Val = M1 * X || Analysis starts here and works down 1423 // M1 = M2 * Y || Doesn't descend into terms with more 1424 // M2 = Z * 4 \/ than one use 1425 // 1426 // Then to modify a term at the bottom: 1427 // 1428 // Val = M1 * X 1429 // M1 = Z * Y || Replaced M2 with Z 1430 // 1431 // Then to work back up correcting nsw flags. 1432 1433 // Op - the term we are currently analyzing. Starts at Val then drills down. 1434 // Replaced with its descaled value before exiting from the drill down loop. 1435 Value *Op = Val; 1436 1437 // Parent - initially null, but after drilling down notes where Op came from. 1438 // In the example above, Parent is (Val, 0) when Op is M1, because M1 is the 1439 // 0'th operand of Val. 1440 std::pair<Instruction *, unsigned> Parent; 1441 1442 // Set if the transform requires a descaling at deeper levels that doesn't 1443 // overflow. 1444 bool RequireNoSignedWrap = false; 1445 1446 // Log base 2 of the scale. Negative if not a power of 2. 1447 int32_t logScale = Scale.exactLogBase2(); 1448 1449 for (;; Op = Parent.first->getOperand(Parent.second)) { // Drill down 1450 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 1451 // If Op is a constant divisible by Scale then descale to the quotient. 1452 APInt Quotient(Scale), Remainder(Scale); // Init ensures right bitwidth. 1453 APInt::sdivrem(CI->getValue(), Scale, Quotient, Remainder); 1454 if (!Remainder.isMinValue()) 1455 // Not divisible by Scale. 1456 return nullptr; 1457 // Replace with the quotient in the parent. 1458 Op = ConstantInt::get(CI->getType(), Quotient); 1459 NoSignedWrap = true; 1460 break; 1461 } 1462 1463 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op)) { 1464 if (BO->getOpcode() == Instruction::Mul) { 1465 // Multiplication. 1466 NoSignedWrap = BO->hasNoSignedWrap(); 1467 if (RequireNoSignedWrap && !NoSignedWrap) 1468 return nullptr; 1469 1470 // There are three cases for multiplication: multiplication by exactly 1471 // the scale, multiplication by a constant different to the scale, and 1472 // multiplication by something else. 1473 Value *LHS = BO->getOperand(0); 1474 Value *RHS = BO->getOperand(1); 1475 1476 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) { 1477 // Multiplication by a constant. 1478 if (CI->getValue() == Scale) { 1479 // Multiplication by exactly the scale, replace the multiplication 1480 // by its left-hand side in the parent. 1481 Op = LHS; 1482 break; 1483 } 1484 1485 // Otherwise drill down into the constant. 1486 if (!Op->hasOneUse()) 1487 return nullptr; 1488 1489 Parent = std::make_pair(BO, 1); 1490 continue; 1491 } 1492 1493 // Multiplication by something else. Drill down into the left-hand side 1494 // since that's where the reassociate pass puts the good stuff. 1495 if (!Op->hasOneUse()) 1496 return nullptr; 1497 1498 Parent = std::make_pair(BO, 0); 1499 continue; 1500 } 1501 1502 if (logScale > 0 && BO->getOpcode() == Instruction::Shl && 1503 isa<ConstantInt>(BO->getOperand(1))) { 1504 // Multiplication by a power of 2. 1505 NoSignedWrap = BO->hasNoSignedWrap(); 1506 if (RequireNoSignedWrap && !NoSignedWrap) 1507 return nullptr; 1508 1509 Value *LHS = BO->getOperand(0); 1510 int32_t Amt = cast<ConstantInt>(BO->getOperand(1))-> 1511 getLimitedValue(Scale.getBitWidth()); 1512 // Op = LHS << Amt. 1513 1514 if (Amt == logScale) { 1515 // Multiplication by exactly the scale, replace the multiplication 1516 // by its left-hand side in the parent. 1517 Op = LHS; 1518 break; 1519 } 1520 if (Amt < logScale || !Op->hasOneUse()) 1521 return nullptr; 1522 1523 // Multiplication by more than the scale. Reduce the multiplying amount 1524 // by the scale in the parent. 1525 Parent = std::make_pair(BO, 1); 1526 Op = ConstantInt::get(BO->getType(), Amt - logScale); 1527 break; 1528 } 1529 } 1530 1531 if (!Op->hasOneUse()) 1532 return nullptr; 1533 1534 if (CastInst *Cast = dyn_cast<CastInst>(Op)) { 1535 if (Cast->getOpcode() == Instruction::SExt) { 1536 // Op is sign-extended from a smaller type, descale in the smaller type. 1537 unsigned SmallSize = Cast->getSrcTy()->getPrimitiveSizeInBits(); 1538 APInt SmallScale = Scale.trunc(SmallSize); 1539 // Suppose Op = sext X, and we descale X as Y * SmallScale. We want to 1540 // descale Op as (sext Y) * Scale. In order to have 1541 // sext (Y * SmallScale) = (sext Y) * Scale 1542 // some conditions need to hold however: SmallScale must sign-extend to 1543 // Scale and the multiplication Y * SmallScale should not overflow. 1544 if (SmallScale.sext(Scale.getBitWidth()) != Scale) 1545 // SmallScale does not sign-extend to Scale. 1546 return nullptr; 1547 assert(SmallScale.exactLogBase2() == logScale); 1548 // Require that Y * SmallScale must not overflow. 1549 RequireNoSignedWrap = true; 1550 1551 // Drill down through the cast. 1552 Parent = std::make_pair(Cast, 0); 1553 Scale = SmallScale; 1554 continue; 1555 } 1556 1557 if (Cast->getOpcode() == Instruction::Trunc) { 1558 // Op is truncated from a larger type, descale in the larger type. 1559 // Suppose Op = trunc X, and we descale X as Y * sext Scale. Then 1560 // trunc (Y * sext Scale) = (trunc Y) * Scale 1561 // always holds. However (trunc Y) * Scale may overflow even if 1562 // trunc (Y * sext Scale) does not, so nsw flags need to be cleared 1563 // from this point up in the expression (see later). 1564 if (RequireNoSignedWrap) 1565 return nullptr; 1566 1567 // Drill down through the cast. 1568 unsigned LargeSize = Cast->getSrcTy()->getPrimitiveSizeInBits(); 1569 Parent = std::make_pair(Cast, 0); 1570 Scale = Scale.sext(LargeSize); 1571 if (logScale + 1 == (int32_t)Cast->getType()->getPrimitiveSizeInBits()) 1572 logScale = -1; 1573 assert(Scale.exactLogBase2() == logScale); 1574 continue; 1575 } 1576 } 1577 1578 // Unsupported expression, bail out. 1579 return nullptr; 1580 } 1581 1582 // If Op is zero then Val = Op * Scale. 1583 if (match(Op, m_Zero())) { 1584 NoSignedWrap = true; 1585 return Op; 1586 } 1587 1588 // We know that we can successfully descale, so from here on we can safely 1589 // modify the IR. Op holds the descaled version of the deepest term in the 1590 // expression. NoSignedWrap is 'true' if multiplying Op by Scale is known 1591 // not to overflow. 1592 1593 if (!Parent.first) 1594 // The expression only had one term. 1595 return Op; 1596 1597 // Rewrite the parent using the descaled version of its operand. 1598 assert(Parent.first->hasOneUse() && "Drilled down when more than one use!"); 1599 assert(Op != Parent.first->getOperand(Parent.second) && 1600 "Descaling was a no-op?"); 1601 replaceOperand(*Parent.first, Parent.second, Op); 1602 Worklist.push(Parent.first); 1603 1604 // Now work back up the expression correcting nsw flags. The logic is based 1605 // on the following observation: if X * Y is known not to overflow as a signed 1606 // multiplication, and Y is replaced by a value Z with smaller absolute value, 1607 // then X * Z will not overflow as a signed multiplication either. As we work 1608 // our way up, having NoSignedWrap 'true' means that the descaled value at the 1609 // current level has strictly smaller absolute value than the original. 1610 Instruction *Ancestor = Parent.first; 1611 do { 1612 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Ancestor)) { 1613 // If the multiplication wasn't nsw then we can't say anything about the 1614 // value of the descaled multiplication, and we have to clear nsw flags 1615 // from this point on up. 1616 bool OpNoSignedWrap = BO->hasNoSignedWrap(); 1617 NoSignedWrap &= OpNoSignedWrap; 1618 if (NoSignedWrap != OpNoSignedWrap) { 1619 BO->setHasNoSignedWrap(NoSignedWrap); 1620 Worklist.push(Ancestor); 1621 } 1622 } else if (Ancestor->getOpcode() == Instruction::Trunc) { 1623 // The fact that the descaled input to the trunc has smaller absolute 1624 // value than the original input doesn't tell us anything useful about 1625 // the absolute values of the truncations. 1626 NoSignedWrap = false; 1627 } 1628 assert((Ancestor->getOpcode() != Instruction::SExt || NoSignedWrap) && 1629 "Failed to keep proper track of nsw flags while drilling down?"); 1630 1631 if (Ancestor == Val) 1632 // Got to the top, all done! 1633 return Val; 1634 1635 // Move up one level in the expression. 1636 assert(Ancestor->hasOneUse() && "Drilled down when more than one use!"); 1637 Ancestor = Ancestor->user_back(); 1638 } while (true); 1639 } 1640 1641 Instruction *InstCombinerImpl::foldVectorBinop(BinaryOperator &Inst) { 1642 if (!isa<VectorType>(Inst.getType())) 1643 return nullptr; 1644 1645 BinaryOperator::BinaryOps Opcode = Inst.getOpcode(); 1646 Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1); 1647 assert(cast<VectorType>(LHS->getType())->getElementCount() == 1648 cast<VectorType>(Inst.getType())->getElementCount()); 1649 assert(cast<VectorType>(RHS->getType())->getElementCount() == 1650 cast<VectorType>(Inst.getType())->getElementCount()); 1651 1652 // If both operands of the binop are vector concatenations, then perform the 1653 // narrow binop on each pair of the source operands followed by concatenation 1654 // of the results. 1655 Value *L0, *L1, *R0, *R1; 1656 ArrayRef<int> Mask; 1657 if (match(LHS, m_Shuffle(m_Value(L0), m_Value(L1), m_Mask(Mask))) && 1658 match(RHS, m_Shuffle(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) && 1659 LHS->hasOneUse() && RHS->hasOneUse() && 1660 cast<ShuffleVectorInst>(LHS)->isConcat() && 1661 cast<ShuffleVectorInst>(RHS)->isConcat()) { 1662 // This transform does not have the speculative execution constraint as 1663 // below because the shuffle is a concatenation. The new binops are 1664 // operating on exactly the same elements as the existing binop. 1665 // TODO: We could ease the mask requirement to allow different undef lanes, 1666 // but that requires an analysis of the binop-with-undef output value. 1667 Value *NewBO0 = Builder.CreateBinOp(Opcode, L0, R0); 1668 if (auto *BO = dyn_cast<BinaryOperator>(NewBO0)) 1669 BO->copyIRFlags(&Inst); 1670 Value *NewBO1 = Builder.CreateBinOp(Opcode, L1, R1); 1671 if (auto *BO = dyn_cast<BinaryOperator>(NewBO1)) 1672 BO->copyIRFlags(&Inst); 1673 return new ShuffleVectorInst(NewBO0, NewBO1, Mask); 1674 } 1675 1676 // It may not be safe to reorder shuffles and things like div, urem, etc. 1677 // because we may trap when executing those ops on unknown vector elements. 1678 // See PR20059. 1679 if (!isSafeToSpeculativelyExecute(&Inst)) 1680 return nullptr; 1681 1682 auto createBinOpShuffle = [&](Value *X, Value *Y, ArrayRef<int> M) { 1683 Value *XY = Builder.CreateBinOp(Opcode, X, Y); 1684 if (auto *BO = dyn_cast<BinaryOperator>(XY)) 1685 BO->copyIRFlags(&Inst); 1686 return new ShuffleVectorInst(XY, M); 1687 }; 1688 1689 // If both arguments of the binary operation are shuffles that use the same 1690 // mask and shuffle within a single vector, move the shuffle after the binop. 1691 Value *V1, *V2; 1692 if (match(LHS, m_Shuffle(m_Value(V1), m_Undef(), m_Mask(Mask))) && 1693 match(RHS, m_Shuffle(m_Value(V2), m_Undef(), m_SpecificMask(Mask))) && 1694 V1->getType() == V2->getType() && 1695 (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) { 1696 // Op(shuffle(V1, Mask), shuffle(V2, Mask)) -> shuffle(Op(V1, V2), Mask) 1697 return createBinOpShuffle(V1, V2, Mask); 1698 } 1699 1700 // If both arguments of a commutative binop are select-shuffles that use the 1701 // same mask with commuted operands, the shuffles are unnecessary. 1702 if (Inst.isCommutative() && 1703 match(LHS, m_Shuffle(m_Value(V1), m_Value(V2), m_Mask(Mask))) && 1704 match(RHS, 1705 m_Shuffle(m_Specific(V2), m_Specific(V1), m_SpecificMask(Mask)))) { 1706 auto *LShuf = cast<ShuffleVectorInst>(LHS); 1707 auto *RShuf = cast<ShuffleVectorInst>(RHS); 1708 // TODO: Allow shuffles that contain undefs in the mask? 1709 // That is legal, but it reduces undef knowledge. 1710 // TODO: Allow arbitrary shuffles by shuffling after binop? 1711 // That might be legal, but we have to deal with poison. 1712 if (LShuf->isSelect() && 1713 !is_contained(LShuf->getShuffleMask(), UndefMaskElem) && 1714 RShuf->isSelect() && 1715 !is_contained(RShuf->getShuffleMask(), UndefMaskElem)) { 1716 // Example: 1717 // LHS = shuffle V1, V2, <0, 5, 6, 3> 1718 // RHS = shuffle V2, V1, <0, 5, 6, 3> 1719 // LHS + RHS --> (V10+V20, V21+V11, V22+V12, V13+V23) --> V1 + V2 1720 Instruction *NewBO = BinaryOperator::Create(Opcode, V1, V2); 1721 NewBO->copyIRFlags(&Inst); 1722 return NewBO; 1723 } 1724 } 1725 1726 // If one argument is a shuffle within one vector and the other is a constant, 1727 // try moving the shuffle after the binary operation. This canonicalization 1728 // intends to move shuffles closer to other shuffles and binops closer to 1729 // other binops, so they can be folded. It may also enable demanded elements 1730 // transforms. 1731 Constant *C; 1732 auto *InstVTy = dyn_cast<FixedVectorType>(Inst.getType()); 1733 if (InstVTy && 1734 match(&Inst, 1735 m_c_BinOp(m_OneUse(m_Shuffle(m_Value(V1), m_Undef(), m_Mask(Mask))), 1736 m_ImmConstant(C))) && 1737 cast<FixedVectorType>(V1->getType())->getNumElements() <= 1738 InstVTy->getNumElements()) { 1739 assert(InstVTy->getScalarType() == V1->getType()->getScalarType() && 1740 "Shuffle should not change scalar type"); 1741 1742 // Find constant NewC that has property: 1743 // shuffle(NewC, ShMask) = C 1744 // If such constant does not exist (example: ShMask=<0,0> and C=<1,2>) 1745 // reorder is not possible. A 1-to-1 mapping is not required. Example: 1746 // ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = <undef,5,6,undef> 1747 bool ConstOp1 = isa<Constant>(RHS); 1748 ArrayRef<int> ShMask = Mask; 1749 unsigned SrcVecNumElts = 1750 cast<FixedVectorType>(V1->getType())->getNumElements(); 1751 UndefValue *UndefScalar = UndefValue::get(C->getType()->getScalarType()); 1752 SmallVector<Constant *, 16> NewVecC(SrcVecNumElts, UndefScalar); 1753 bool MayChange = true; 1754 unsigned NumElts = InstVTy->getNumElements(); 1755 for (unsigned I = 0; I < NumElts; ++I) { 1756 Constant *CElt = C->getAggregateElement(I); 1757 if (ShMask[I] >= 0) { 1758 assert(ShMask[I] < (int)NumElts && "Not expecting narrowing shuffle"); 1759 Constant *NewCElt = NewVecC[ShMask[I]]; 1760 // Bail out if: 1761 // 1. The constant vector contains a constant expression. 1762 // 2. The shuffle needs an element of the constant vector that can't 1763 // be mapped to a new constant vector. 1764 // 3. This is a widening shuffle that copies elements of V1 into the 1765 // extended elements (extending with undef is allowed). 1766 if (!CElt || (!isa<UndefValue>(NewCElt) && NewCElt != CElt) || 1767 I >= SrcVecNumElts) { 1768 MayChange = false; 1769 break; 1770 } 1771 NewVecC[ShMask[I]] = CElt; 1772 } 1773 // If this is a widening shuffle, we must be able to extend with undef 1774 // elements. If the original binop does not produce an undef in the high 1775 // lanes, then this transform is not safe. 1776 // Similarly for undef lanes due to the shuffle mask, we can only 1777 // transform binops that preserve undef. 1778 // TODO: We could shuffle those non-undef constant values into the 1779 // result by using a constant vector (rather than an undef vector) 1780 // as operand 1 of the new binop, but that might be too aggressive 1781 // for target-independent shuffle creation. 1782 if (I >= SrcVecNumElts || ShMask[I] < 0) { 1783 Constant *MaybeUndef = 1784 ConstOp1 ? ConstantExpr::get(Opcode, UndefScalar, CElt) 1785 : ConstantExpr::get(Opcode, CElt, UndefScalar); 1786 if (!match(MaybeUndef, m_Undef())) { 1787 MayChange = false; 1788 break; 1789 } 1790 } 1791 } 1792 if (MayChange) { 1793 Constant *NewC = ConstantVector::get(NewVecC); 1794 // It may not be safe to execute a binop on a vector with undef elements 1795 // because the entire instruction can be folded to undef or create poison 1796 // that did not exist in the original code. 1797 if (Inst.isIntDivRem() || (Inst.isShift() && ConstOp1)) 1798 NewC = getSafeVectorConstantForBinop(Opcode, NewC, ConstOp1); 1799 1800 // Op(shuffle(V1, Mask), C) -> shuffle(Op(V1, NewC), Mask) 1801 // Op(C, shuffle(V1, Mask)) -> shuffle(Op(NewC, V1), Mask) 1802 Value *NewLHS = ConstOp1 ? V1 : NewC; 1803 Value *NewRHS = ConstOp1 ? NewC : V1; 1804 return createBinOpShuffle(NewLHS, NewRHS, Mask); 1805 } 1806 } 1807 1808 // Try to reassociate to sink a splat shuffle after a binary operation. 1809 if (Inst.isAssociative() && Inst.isCommutative()) { 1810 // Canonicalize shuffle operand as LHS. 1811 if (isa<ShuffleVectorInst>(RHS)) 1812 std::swap(LHS, RHS); 1813 1814 Value *X; 1815 ArrayRef<int> MaskC; 1816 int SplatIndex; 1817 Value *Y, *OtherOp; 1818 if (!match(LHS, 1819 m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) || 1820 !match(MaskC, m_SplatOrUndefMask(SplatIndex)) || 1821 X->getType() != Inst.getType() || 1822 !match(RHS, m_OneUse(m_BinOp(Opcode, m_Value(Y), m_Value(OtherOp))))) 1823 return nullptr; 1824 1825 // FIXME: This may not be safe if the analysis allows undef elements. By 1826 // moving 'Y' before the splat shuffle, we are implicitly assuming 1827 // that it is not undef/poison at the splat index. 1828 if (isSplatValue(OtherOp, SplatIndex)) { 1829 std::swap(Y, OtherOp); 1830 } else if (!isSplatValue(Y, SplatIndex)) { 1831 return nullptr; 1832 } 1833 1834 // X and Y are splatted values, so perform the binary operation on those 1835 // values followed by a splat followed by the 2nd binary operation: 1836 // bo (splat X), (bo Y, OtherOp) --> bo (splat (bo X, Y)), OtherOp 1837 Value *NewBO = Builder.CreateBinOp(Opcode, X, Y); 1838 SmallVector<int, 8> NewMask(MaskC.size(), SplatIndex); 1839 Value *NewSplat = Builder.CreateShuffleVector(NewBO, NewMask); 1840 Instruction *R = BinaryOperator::Create(Opcode, NewSplat, OtherOp); 1841 1842 // Intersect FMF on both new binops. Other (poison-generating) flags are 1843 // dropped to be safe. 1844 if (isa<FPMathOperator>(R)) { 1845 R->copyFastMathFlags(&Inst); 1846 R->andIRFlags(RHS); 1847 } 1848 if (auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO)) 1849 NewInstBO->copyIRFlags(R); 1850 return R; 1851 } 1852 1853 return nullptr; 1854 } 1855 1856 /// Try to narrow the width of a binop if at least 1 operand is an extend of 1857 /// of a value. This requires a potentially expensive known bits check to make 1858 /// sure the narrow op does not overflow. 1859 Instruction *InstCombinerImpl::narrowMathIfNoOverflow(BinaryOperator &BO) { 1860 // We need at least one extended operand. 1861 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1); 1862 1863 // If this is a sub, we swap the operands since we always want an extension 1864 // on the RHS. The LHS can be an extension or a constant. 1865 if (BO.getOpcode() == Instruction::Sub) 1866 std::swap(Op0, Op1); 1867 1868 Value *X; 1869 bool IsSext = match(Op0, m_SExt(m_Value(X))); 1870 if (!IsSext && !match(Op0, m_ZExt(m_Value(X)))) 1871 return nullptr; 1872 1873 // If both operands are the same extension from the same source type and we 1874 // can eliminate at least one (hasOneUse), this might work. 1875 CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt; 1876 Value *Y; 1877 if (!(match(Op1, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() && 1878 cast<Operator>(Op1)->getOpcode() == CastOpc && 1879 (Op0->hasOneUse() || Op1->hasOneUse()))) { 1880 // If that did not match, see if we have a suitable constant operand. 1881 // Truncating and extending must produce the same constant. 1882 Constant *WideC; 1883 if (!Op0->hasOneUse() || !match(Op1, m_Constant(WideC))) 1884 return nullptr; 1885 Constant *NarrowC = ConstantExpr::getTrunc(WideC, X->getType()); 1886 if (ConstantExpr::getCast(CastOpc, NarrowC, BO.getType()) != WideC) 1887 return nullptr; 1888 Y = NarrowC; 1889 } 1890 1891 // Swap back now that we found our operands. 1892 if (BO.getOpcode() == Instruction::Sub) 1893 std::swap(X, Y); 1894 1895 // Both operands have narrow versions. Last step: the math must not overflow 1896 // in the narrow width. 1897 if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext)) 1898 return nullptr; 1899 1900 // bo (ext X), (ext Y) --> ext (bo X, Y) 1901 // bo (ext X), C --> ext (bo X, C') 1902 Value *NarrowBO = Builder.CreateBinOp(BO.getOpcode(), X, Y, "narrow"); 1903 if (auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) { 1904 if (IsSext) 1905 NewBinOp->setHasNoSignedWrap(); 1906 else 1907 NewBinOp->setHasNoUnsignedWrap(); 1908 } 1909 return CastInst::Create(CastOpc, NarrowBO, BO.getType()); 1910 } 1911 1912 static bool isMergedGEPInBounds(GEPOperator &GEP1, GEPOperator &GEP2) { 1913 // At least one GEP must be inbounds. 1914 if (!GEP1.isInBounds() && !GEP2.isInBounds()) 1915 return false; 1916 1917 return (GEP1.isInBounds() || GEP1.hasAllZeroIndices()) && 1918 (GEP2.isInBounds() || GEP2.hasAllZeroIndices()); 1919 } 1920 1921 /// Thread a GEP operation with constant indices through the constant true/false 1922 /// arms of a select. 1923 static Instruction *foldSelectGEP(GetElementPtrInst &GEP, 1924 InstCombiner::BuilderTy &Builder) { 1925 if (!GEP.hasAllConstantIndices()) 1926 return nullptr; 1927 1928 Instruction *Sel; 1929 Value *Cond; 1930 Constant *TrueC, *FalseC; 1931 if (!match(GEP.getPointerOperand(), m_Instruction(Sel)) || 1932 !match(Sel, 1933 m_Select(m_Value(Cond), m_Constant(TrueC), m_Constant(FalseC)))) 1934 return nullptr; 1935 1936 // gep (select Cond, TrueC, FalseC), IndexC --> select Cond, TrueC', FalseC' 1937 // Propagate 'inbounds' and metadata from existing instructions. 1938 // Note: using IRBuilder to create the constants for efficiency. 1939 SmallVector<Value *, 4> IndexC(GEP.indices()); 1940 bool IsInBounds = GEP.isInBounds(); 1941 Type *Ty = GEP.getSourceElementType(); 1942 Value *NewTrueC = IsInBounds ? Builder.CreateInBoundsGEP(Ty, TrueC, IndexC) 1943 : Builder.CreateGEP(Ty, TrueC, IndexC); 1944 Value *NewFalseC = IsInBounds ? Builder.CreateInBoundsGEP(Ty, FalseC, IndexC) 1945 : Builder.CreateGEP(Ty, FalseC, IndexC); 1946 return SelectInst::Create(Cond, NewTrueC, NewFalseC, "", nullptr, Sel); 1947 } 1948 1949 Instruction *InstCombinerImpl::visitGEPOfGEP(GetElementPtrInst &GEP, 1950 GEPOperator *Src) { 1951 // Combine Indices - If the source pointer to this getelementptr instruction 1952 // is a getelementptr instruction with matching element type, combine the 1953 // indices of the two getelementptr instructions into a single instruction. 1954 if (Src->getResultElementType() != GEP.getSourceElementType()) 1955 return nullptr; 1956 1957 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src)) 1958 return nullptr; 1959 1960 if (Src->getNumOperands() == 2 && GEP.getNumOperands() == 2 && 1961 Src->hasOneUse()) { 1962 Value *GO1 = GEP.getOperand(1); 1963 Value *SO1 = Src->getOperand(1); 1964 1965 if (LI) { 1966 // Try to reassociate loop invariant GEP chains to enable LICM. 1967 if (Loop *L = LI->getLoopFor(GEP.getParent())) { 1968 // Reassociate the two GEPs if SO1 is variant in the loop and GO1 is 1969 // invariant: this breaks the dependence between GEPs and allows LICM 1970 // to hoist the invariant part out of the loop. 1971 if (L->isLoopInvariant(GO1) && !L->isLoopInvariant(SO1)) { 1972 // We have to be careful here. 1973 // We have something like: 1974 // %src = getelementptr <ty>, <ty>* %base, <ty> %idx 1975 // %gep = getelementptr <ty>, <ty>* %src, <ty> %idx2 1976 // If we just swap idx & idx2 then we could inadvertantly 1977 // change %src from a vector to a scalar, or vice versa. 1978 // Cases: 1979 // 1) %base a scalar & idx a scalar & idx2 a vector 1980 // => Swapping idx & idx2 turns %src into a vector type. 1981 // 2) %base a scalar & idx a vector & idx2 a scalar 1982 // => Swapping idx & idx2 turns %src in a scalar type 1983 // 3) %base, %idx, and %idx2 are scalars 1984 // => %src & %gep are scalars 1985 // => swapping idx & idx2 is safe 1986 // 4) %base a vector 1987 // => %src is a vector 1988 // => swapping idx & idx2 is safe. 1989 auto *SO0 = Src->getOperand(0); 1990 auto *SO0Ty = SO0->getType(); 1991 if (!isa<VectorType>(GEP.getType()) || // case 3 1992 isa<VectorType>(SO0Ty)) { // case 4 1993 Src->setOperand(1, GO1); 1994 GEP.setOperand(1, SO1); 1995 return &GEP; 1996 } else { 1997 // Case 1 or 2 1998 // -- have to recreate %src & %gep 1999 // put NewSrc at same location as %src 2000 Builder.SetInsertPoint(cast<Instruction>(Src)); 2001 Value *NewSrc = Builder.CreateGEP( 2002 GEP.getSourceElementType(), SO0, GO1, Src->getName()); 2003 // Propagate 'inbounds' if the new source was not constant-folded. 2004 if (auto *NewSrcGEPI = dyn_cast<GetElementPtrInst>(NewSrc)) 2005 NewSrcGEPI->setIsInBounds(Src->isInBounds()); 2006 GetElementPtrInst *NewGEP = GetElementPtrInst::Create( 2007 GEP.getSourceElementType(), NewSrc, {SO1}); 2008 NewGEP->setIsInBounds(GEP.isInBounds()); 2009 return NewGEP; 2010 } 2011 } 2012 } 2013 } 2014 } 2015 2016 // Note that if our source is a gep chain itself then we wait for that 2017 // chain to be resolved before we perform this transformation. This 2018 // avoids us creating a TON of code in some cases. 2019 if (auto *SrcGEP = dyn_cast<GEPOperator>(Src->getOperand(0))) 2020 if (SrcGEP->getNumOperands() == 2 && shouldMergeGEPs(*Src, *SrcGEP)) 2021 return nullptr; // Wait until our source is folded to completion. 2022 2023 SmallVector<Value*, 8> Indices; 2024 2025 // Find out whether the last index in the source GEP is a sequential idx. 2026 bool EndsWithSequential = false; 2027 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src); 2028 I != E; ++I) 2029 EndsWithSequential = I.isSequential(); 2030 2031 // Can we combine the two pointer arithmetics offsets? 2032 if (EndsWithSequential) { 2033 // Replace: gep (gep %P, long B), long A, ... 2034 // With: T = long A+B; gep %P, T, ... 2035 Value *SO1 = Src->getOperand(Src->getNumOperands()-1); 2036 Value *GO1 = GEP.getOperand(1); 2037 2038 // If they aren't the same type, then the input hasn't been processed 2039 // by the loop above yet (which canonicalizes sequential index types to 2040 // intptr_t). Just avoid transforming this until the input has been 2041 // normalized. 2042 if (SO1->getType() != GO1->getType()) 2043 return nullptr; 2044 2045 Value *Sum = 2046 SimplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP)); 2047 // Only do the combine when we are sure the cost after the 2048 // merge is never more than that before the merge. 2049 if (Sum == nullptr) 2050 return nullptr; 2051 2052 // Update the GEP in place if possible. 2053 if (Src->getNumOperands() == 2) { 2054 GEP.setIsInBounds(isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP))); 2055 replaceOperand(GEP, 0, Src->getOperand(0)); 2056 replaceOperand(GEP, 1, Sum); 2057 return &GEP; 2058 } 2059 Indices.append(Src->op_begin()+1, Src->op_end()-1); 2060 Indices.push_back(Sum); 2061 Indices.append(GEP.op_begin()+2, GEP.op_end()); 2062 } else if (isa<Constant>(*GEP.idx_begin()) && 2063 cast<Constant>(*GEP.idx_begin())->isNullValue() && 2064 Src->getNumOperands() != 1) { 2065 // Otherwise we can do the fold if the first index of the GEP is a zero 2066 Indices.append(Src->op_begin()+1, Src->op_end()); 2067 Indices.append(GEP.idx_begin()+1, GEP.idx_end()); 2068 } 2069 2070 if (!Indices.empty()) 2071 return isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP)) 2072 ? GetElementPtrInst::CreateInBounds( 2073 Src->getSourceElementType(), Src->getOperand(0), Indices, 2074 GEP.getName()) 2075 : GetElementPtrInst::Create(Src->getSourceElementType(), 2076 Src->getOperand(0), Indices, 2077 GEP.getName()); 2078 2079 return nullptr; 2080 } 2081 2082 // Note that we may have also stripped an address space cast in between. 2083 Instruction *InstCombinerImpl::visitGEPOfBitcast(BitCastInst *BCI, 2084 GetElementPtrInst &GEP) { 2085 // With opaque pointers, there is no pointer element type we can use to 2086 // adjust the GEP type. 2087 PointerType *SrcType = cast<PointerType>(BCI->getSrcTy()); 2088 if (SrcType->isOpaque()) 2089 return nullptr; 2090 2091 Type *GEPEltType = GEP.getSourceElementType(); 2092 Type *SrcEltType = SrcType->getNonOpaquePointerElementType(); 2093 Value *SrcOp = BCI->getOperand(0); 2094 2095 // GEP directly using the source operand if this GEP is accessing an element 2096 // of a bitcasted pointer to vector or array of the same dimensions: 2097 // gep (bitcast <c x ty>* X to [c x ty]*), Y, Z --> gep X, Y, Z 2098 // gep (bitcast [c x ty]* X to <c x ty>*), Y, Z --> gep X, Y, Z 2099 auto areMatchingArrayAndVecTypes = [](Type *ArrTy, Type *VecTy, 2100 const DataLayout &DL) { 2101 auto *VecVTy = cast<FixedVectorType>(VecTy); 2102 return ArrTy->getArrayElementType() == VecVTy->getElementType() && 2103 ArrTy->getArrayNumElements() == VecVTy->getNumElements() && 2104 DL.getTypeAllocSize(ArrTy) == DL.getTypeAllocSize(VecTy); 2105 }; 2106 if (GEP.getNumOperands() == 3 && 2107 ((GEPEltType->isArrayTy() && isa<FixedVectorType>(SrcEltType) && 2108 areMatchingArrayAndVecTypes(GEPEltType, SrcEltType, DL)) || 2109 (isa<FixedVectorType>(GEPEltType) && SrcEltType->isArrayTy() && 2110 areMatchingArrayAndVecTypes(SrcEltType, GEPEltType, DL)))) { 2111 2112 // Create a new GEP here, as using `setOperand()` followed by 2113 // `setSourceElementType()` won't actually update the type of the 2114 // existing GEP Value. Causing issues if this Value is accessed when 2115 // constructing an AddrSpaceCastInst 2116 SmallVector<Value *, 8> Indices(GEP.indices()); 2117 Value *NGEP = GEP.isInBounds() 2118 ? Builder.CreateInBoundsGEP(SrcEltType, SrcOp, Indices) 2119 : Builder.CreateGEP(SrcEltType, SrcOp, Indices); 2120 NGEP->takeName(&GEP); 2121 2122 // Preserve GEP address space to satisfy users 2123 if (NGEP->getType()->getPointerAddressSpace() != GEP.getAddressSpace()) 2124 return new AddrSpaceCastInst(NGEP, GEP.getType()); 2125 2126 return replaceInstUsesWith(GEP, NGEP); 2127 } 2128 2129 // See if we can simplify: 2130 // X = bitcast A* to B* 2131 // Y = gep X, <...constant indices...> 2132 // into a gep of the original struct. This is important for SROA and alias 2133 // analysis of unions. If "A" is also a bitcast, wait for A/X to be merged. 2134 unsigned OffsetBits = DL.getIndexTypeSizeInBits(GEP.getType()); 2135 APInt Offset(OffsetBits, 0); 2136 2137 // If the bitcast argument is an allocation, The bitcast is for convertion 2138 // to actual type of allocation. Removing such bitcasts, results in having 2139 // GEPs with i8* base and pure byte offsets. That means GEP is not aware of 2140 // struct or array hierarchy. 2141 // By avoiding such GEPs, phi translation and MemoryDependencyAnalysis have 2142 // a better chance to succeed. 2143 if (!isa<BitCastInst>(SrcOp) && GEP.accumulateConstantOffset(DL, Offset) && 2144 !isAllocationFn(SrcOp, &TLI)) { 2145 // If this GEP instruction doesn't move the pointer, just replace the GEP 2146 // with a bitcast of the real input to the dest type. 2147 if (!Offset) { 2148 // If the bitcast is of an allocation, and the allocation will be 2149 // converted to match the type of the cast, don't touch this. 2150 if (isa<AllocaInst>(SrcOp)) { 2151 // See if the bitcast simplifies, if so, don't nuke this GEP yet. 2152 if (Instruction *I = visitBitCast(*BCI)) { 2153 if (I != BCI) { 2154 I->takeName(BCI); 2155 BCI->getParent()->getInstList().insert(BCI->getIterator(), I); 2156 replaceInstUsesWith(*BCI, I); 2157 } 2158 return &GEP; 2159 } 2160 } 2161 2162 if (SrcType->getPointerAddressSpace() != GEP.getAddressSpace()) 2163 return new AddrSpaceCastInst(SrcOp, GEP.getType()); 2164 return new BitCastInst(SrcOp, GEP.getType()); 2165 } 2166 2167 // Otherwise, if the offset is non-zero, we need to find out if there is a 2168 // field at Offset in 'A's type. If so, we can pull the cast through the 2169 // GEP. 2170 SmallVector<Value*, 8> NewIndices; 2171 if (findElementAtOffset(SrcType, Offset.getSExtValue(), NewIndices, DL)) { 2172 Value *NGEP = 2173 GEP.isInBounds() 2174 ? Builder.CreateInBoundsGEP(SrcEltType, SrcOp, NewIndices) 2175 : Builder.CreateGEP(SrcEltType, SrcOp, NewIndices); 2176 2177 if (NGEP->getType() == GEP.getType()) 2178 return replaceInstUsesWith(GEP, NGEP); 2179 NGEP->takeName(&GEP); 2180 2181 if (NGEP->getType()->getPointerAddressSpace() != GEP.getAddressSpace()) 2182 return new AddrSpaceCastInst(NGEP, GEP.getType()); 2183 return new BitCastInst(NGEP, GEP.getType()); 2184 } 2185 } 2186 2187 return nullptr; 2188 } 2189 2190 Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) { 2191 Value *PtrOp = GEP.getOperand(0); 2192 SmallVector<Value *, 8> Indices(GEP.indices()); 2193 Type *GEPType = GEP.getType(); 2194 Type *GEPEltType = GEP.getSourceElementType(); 2195 bool IsGEPSrcEleScalable = isa<ScalableVectorType>(GEPEltType); 2196 if (Value *V = SimplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.isInBounds(), 2197 SQ.getWithInstruction(&GEP))) 2198 return replaceInstUsesWith(GEP, V); 2199 2200 // For vector geps, use the generic demanded vector support. 2201 // Skip if GEP return type is scalable. The number of elements is unknown at 2202 // compile-time. 2203 if (auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) { 2204 auto VWidth = GEPFVTy->getNumElements(); 2205 APInt UndefElts(VWidth, 0); 2206 APInt AllOnesEltMask(APInt::getAllOnes(VWidth)); 2207 if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask, 2208 UndefElts)) { 2209 if (V != &GEP) 2210 return replaceInstUsesWith(GEP, V); 2211 return &GEP; 2212 } 2213 2214 // TODO: 1) Scalarize splat operands, 2) scalarize entire instruction if 2215 // possible (decide on canonical form for pointer broadcast), 3) exploit 2216 // undef elements to decrease demanded bits 2217 } 2218 2219 // Eliminate unneeded casts for indices, and replace indices which displace 2220 // by multiples of a zero size type with zero. 2221 bool MadeChange = false; 2222 2223 // Index width may not be the same width as pointer width. 2224 // Data layout chooses the right type based on supported integer types. 2225 Type *NewScalarIndexTy = 2226 DL.getIndexType(GEP.getPointerOperandType()->getScalarType()); 2227 2228 gep_type_iterator GTI = gep_type_begin(GEP); 2229 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E; 2230 ++I, ++GTI) { 2231 // Skip indices into struct types. 2232 if (GTI.isStruct()) 2233 continue; 2234 2235 Type *IndexTy = (*I)->getType(); 2236 Type *NewIndexType = 2237 IndexTy->isVectorTy() 2238 ? VectorType::get(NewScalarIndexTy, 2239 cast<VectorType>(IndexTy)->getElementCount()) 2240 : NewScalarIndexTy; 2241 2242 // If the element type has zero size then any index over it is equivalent 2243 // to an index of zero, so replace it with zero if it is not zero already. 2244 Type *EltTy = GTI.getIndexedType(); 2245 if (EltTy->isSized() && DL.getTypeAllocSize(EltTy).isZero()) 2246 if (!isa<Constant>(*I) || !match(I->get(), m_Zero())) { 2247 *I = Constant::getNullValue(NewIndexType); 2248 MadeChange = true; 2249 } 2250 2251 if (IndexTy != NewIndexType) { 2252 // If we are using a wider index than needed for this platform, shrink 2253 // it to what we need. If narrower, sign-extend it to what we need. 2254 // This explicit cast can make subsequent optimizations more obvious. 2255 *I = Builder.CreateIntCast(*I, NewIndexType, true); 2256 MadeChange = true; 2257 } 2258 } 2259 if (MadeChange) 2260 return &GEP; 2261 2262 // Check to see if the inputs to the PHI node are getelementptr instructions. 2263 if (auto *PN = dyn_cast<PHINode>(PtrOp)) { 2264 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0)); 2265 if (!Op1) 2266 return nullptr; 2267 2268 // Don't fold a GEP into itself through a PHI node. This can only happen 2269 // through the back-edge of a loop. Folding a GEP into itself means that 2270 // the value of the previous iteration needs to be stored in the meantime, 2271 // thus requiring an additional register variable to be live, but not 2272 // actually achieving anything (the GEP still needs to be executed once per 2273 // loop iteration). 2274 if (Op1 == &GEP) 2275 return nullptr; 2276 2277 int DI = -1; 2278 2279 for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) { 2280 auto *Op2 = dyn_cast<GetElementPtrInst>(*I); 2281 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() || 2282 Op1->getSourceElementType() != Op2->getSourceElementType()) 2283 return nullptr; 2284 2285 // As for Op1 above, don't try to fold a GEP into itself. 2286 if (Op2 == &GEP) 2287 return nullptr; 2288 2289 // Keep track of the type as we walk the GEP. 2290 Type *CurTy = nullptr; 2291 2292 for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) { 2293 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType()) 2294 return nullptr; 2295 2296 if (Op1->getOperand(J) != Op2->getOperand(J)) { 2297 if (DI == -1) { 2298 // We have not seen any differences yet in the GEPs feeding the 2299 // PHI yet, so we record this one if it is allowed to be a 2300 // variable. 2301 2302 // The first two arguments can vary for any GEP, the rest have to be 2303 // static for struct slots 2304 if (J > 1) { 2305 assert(CurTy && "No current type?"); 2306 if (CurTy->isStructTy()) 2307 return nullptr; 2308 } 2309 2310 DI = J; 2311 } else { 2312 // The GEP is different by more than one input. While this could be 2313 // extended to support GEPs that vary by more than one variable it 2314 // doesn't make sense since it greatly increases the complexity and 2315 // would result in an R+R+R addressing mode which no backend 2316 // directly supports and would need to be broken into several 2317 // simpler instructions anyway. 2318 return nullptr; 2319 } 2320 } 2321 2322 // Sink down a layer of the type for the next iteration. 2323 if (J > 0) { 2324 if (J == 1) { 2325 CurTy = Op1->getSourceElementType(); 2326 } else { 2327 CurTy = 2328 GetElementPtrInst::getTypeAtIndex(CurTy, Op1->getOperand(J)); 2329 } 2330 } 2331 } 2332 } 2333 2334 // If not all GEPs are identical we'll have to create a new PHI node. 2335 // Check that the old PHI node has only one use so that it will get 2336 // removed. 2337 if (DI != -1 && !PN->hasOneUse()) 2338 return nullptr; 2339 2340 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone()); 2341 if (DI == -1) { 2342 // All the GEPs feeding the PHI are identical. Clone one down into our 2343 // BB so that it can be merged with the current GEP. 2344 } else { 2345 // All the GEPs feeding the PHI differ at a single offset. Clone a GEP 2346 // into the current block so it can be merged, and create a new PHI to 2347 // set that index. 2348 PHINode *NewPN; 2349 { 2350 IRBuilderBase::InsertPointGuard Guard(Builder); 2351 Builder.SetInsertPoint(PN); 2352 NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(), 2353 PN->getNumOperands()); 2354 } 2355 2356 for (auto &I : PN->operands()) 2357 NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI), 2358 PN->getIncomingBlock(I)); 2359 2360 NewGEP->setOperand(DI, NewPN); 2361 } 2362 2363 GEP.getParent()->getInstList().insert( 2364 GEP.getParent()->getFirstInsertionPt(), NewGEP); 2365 replaceOperand(GEP, 0, NewGEP); 2366 PtrOp = NewGEP; 2367 } 2368 2369 if (auto *Src = dyn_cast<GEPOperator>(PtrOp)) 2370 if (Instruction *I = visitGEPOfGEP(GEP, Src)) 2371 return I; 2372 2373 // Skip if GEP source element type is scalable. The type alloc size is unknown 2374 // at compile-time. 2375 if (GEP.getNumIndices() == 1 && !IsGEPSrcEleScalable) { 2376 unsigned AS = GEP.getPointerAddressSpace(); 2377 if (GEP.getOperand(1)->getType()->getScalarSizeInBits() == 2378 DL.getIndexSizeInBits(AS)) { 2379 uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedSize(); 2380 2381 bool Matched = false; 2382 uint64_t C; 2383 Value *V = nullptr; 2384 if (TyAllocSize == 1) { 2385 V = GEP.getOperand(1); 2386 Matched = true; 2387 } else if (match(GEP.getOperand(1), 2388 m_AShr(m_Value(V), m_ConstantInt(C)))) { 2389 if (TyAllocSize == 1ULL << C) 2390 Matched = true; 2391 } else if (match(GEP.getOperand(1), 2392 m_SDiv(m_Value(V), m_ConstantInt(C)))) { 2393 if (TyAllocSize == C) 2394 Matched = true; 2395 } 2396 2397 // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) to (bitcast Y), but 2398 // only if both point to the same underlying object (otherwise provenance 2399 // is not necessarily retained). 2400 Value *Y; 2401 Value *X = GEP.getOperand(0); 2402 if (Matched && 2403 match(V, m_Sub(m_PtrToInt(m_Value(Y)), m_PtrToInt(m_Specific(X)))) && 2404 getUnderlyingObject(X) == getUnderlyingObject(Y)) 2405 return CastInst::CreatePointerBitCastOrAddrSpaceCast(Y, GEPType); 2406 } 2407 } 2408 2409 // We do not handle pointer-vector geps here. 2410 if (GEPType->isVectorTy()) 2411 return nullptr; 2412 2413 // Handle gep(bitcast x) and gep(gep x, 0, 0, 0). 2414 Value *StrippedPtr = PtrOp->stripPointerCasts(); 2415 PointerType *StrippedPtrTy = cast<PointerType>(StrippedPtr->getType()); 2416 2417 // TODO: The basic approach of these folds is not compatible with opaque 2418 // pointers, because we can't use bitcasts as a hint for a desirable GEP 2419 // type. Instead, we should perform canonicalization directly on the GEP 2420 // type. For now, skip these. 2421 if (StrippedPtr != PtrOp && !StrippedPtrTy->isOpaque()) { 2422 bool HasZeroPointerIndex = false; 2423 Type *StrippedPtrEltTy = StrippedPtrTy->getNonOpaquePointerElementType(); 2424 2425 if (auto *C = dyn_cast<ConstantInt>(GEP.getOperand(1))) 2426 HasZeroPointerIndex = C->isZero(); 2427 2428 // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... 2429 // into : GEP [10 x i8]* X, i32 0, ... 2430 // 2431 // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ... 2432 // into : GEP i8* X, ... 2433 // 2434 // This occurs when the program declares an array extern like "int X[];" 2435 if (HasZeroPointerIndex) { 2436 if (auto *CATy = dyn_cast<ArrayType>(GEPEltType)) { 2437 // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ? 2438 if (CATy->getElementType() == StrippedPtrEltTy) { 2439 // -> GEP i8* X, ... 2440 SmallVector<Value *, 8> Idx(drop_begin(GEP.indices())); 2441 GetElementPtrInst *Res = GetElementPtrInst::Create( 2442 StrippedPtrEltTy, StrippedPtr, Idx, GEP.getName()); 2443 Res->setIsInBounds(GEP.isInBounds()); 2444 if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace()) 2445 return Res; 2446 // Insert Res, and create an addrspacecast. 2447 // e.g., 2448 // GEP (addrspacecast i8 addrspace(1)* X to [0 x i8]*), i32 0, ... 2449 // -> 2450 // %0 = GEP i8 addrspace(1)* X, ... 2451 // addrspacecast i8 addrspace(1)* %0 to i8* 2452 return new AddrSpaceCastInst(Builder.Insert(Res), GEPType); 2453 } 2454 2455 if (auto *XATy = dyn_cast<ArrayType>(StrippedPtrEltTy)) { 2456 // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ? 2457 if (CATy->getElementType() == XATy->getElementType()) { 2458 // -> GEP [10 x i8]* X, i32 0, ... 2459 // At this point, we know that the cast source type is a pointer 2460 // to an array of the same type as the destination pointer 2461 // array. Because the array type is never stepped over (there 2462 // is a leading zero) we can fold the cast into this GEP. 2463 if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace()) { 2464 GEP.setSourceElementType(XATy); 2465 return replaceOperand(GEP, 0, StrippedPtr); 2466 } 2467 // Cannot replace the base pointer directly because StrippedPtr's 2468 // address space is different. Instead, create a new GEP followed by 2469 // an addrspacecast. 2470 // e.g., 2471 // GEP (addrspacecast [10 x i8] addrspace(1)* X to [0 x i8]*), 2472 // i32 0, ... 2473 // -> 2474 // %0 = GEP [10 x i8] addrspace(1)* X, ... 2475 // addrspacecast i8 addrspace(1)* %0 to i8* 2476 SmallVector<Value *, 8> Idx(GEP.indices()); 2477 Value *NewGEP = 2478 GEP.isInBounds() 2479 ? Builder.CreateInBoundsGEP(StrippedPtrEltTy, StrippedPtr, 2480 Idx, GEP.getName()) 2481 : Builder.CreateGEP(StrippedPtrEltTy, StrippedPtr, Idx, 2482 GEP.getName()); 2483 return new AddrSpaceCastInst(NewGEP, GEPType); 2484 } 2485 } 2486 } 2487 } else if (GEP.getNumOperands() == 2 && !IsGEPSrcEleScalable) { 2488 // Skip if GEP source element type is scalable. The type alloc size is 2489 // unknown at compile-time. 2490 // Transform things like: %t = getelementptr i32* 2491 // bitcast ([2 x i32]* %str to i32*), i32 %V into: %t1 = getelementptr [2 2492 // x i32]* %str, i32 0, i32 %V; bitcast 2493 if (StrippedPtrEltTy->isArrayTy() && 2494 DL.getTypeAllocSize(StrippedPtrEltTy->getArrayElementType()) == 2495 DL.getTypeAllocSize(GEPEltType)) { 2496 Type *IdxType = DL.getIndexType(GEPType); 2497 Value *Idx[2] = { Constant::getNullValue(IdxType), GEP.getOperand(1) }; 2498 Value *NewGEP = 2499 GEP.isInBounds() 2500 ? Builder.CreateInBoundsGEP(StrippedPtrEltTy, StrippedPtr, Idx, 2501 GEP.getName()) 2502 : Builder.CreateGEP(StrippedPtrEltTy, StrippedPtr, Idx, 2503 GEP.getName()); 2504 2505 // V and GEP are both pointer types --> BitCast 2506 return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP, GEPType); 2507 } 2508 2509 // Transform things like: 2510 // %V = mul i64 %N, 4 2511 // %t = getelementptr i8* bitcast (i32* %arr to i8*), i32 %V 2512 // into: %t1 = getelementptr i32* %arr, i32 %N; bitcast 2513 if (GEPEltType->isSized() && StrippedPtrEltTy->isSized()) { 2514 // Check that changing the type amounts to dividing the index by a scale 2515 // factor. 2516 uint64_t ResSize = DL.getTypeAllocSize(GEPEltType).getFixedSize(); 2517 uint64_t SrcSize = DL.getTypeAllocSize(StrippedPtrEltTy).getFixedSize(); 2518 if (ResSize && SrcSize % ResSize == 0) { 2519 Value *Idx = GEP.getOperand(1); 2520 unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits(); 2521 uint64_t Scale = SrcSize / ResSize; 2522 2523 // Earlier transforms ensure that the index has the right type 2524 // according to Data Layout, which considerably simplifies the 2525 // logic by eliminating implicit casts. 2526 assert(Idx->getType() == DL.getIndexType(GEPType) && 2527 "Index type does not match the Data Layout preferences"); 2528 2529 bool NSW; 2530 if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) { 2531 // Successfully decomposed Idx as NewIdx * Scale, form a new GEP. 2532 // If the multiplication NewIdx * Scale may overflow then the new 2533 // GEP may not be "inbounds". 2534 Value *NewGEP = 2535 GEP.isInBounds() && NSW 2536 ? Builder.CreateInBoundsGEP(StrippedPtrEltTy, StrippedPtr, 2537 NewIdx, GEP.getName()) 2538 : Builder.CreateGEP(StrippedPtrEltTy, StrippedPtr, NewIdx, 2539 GEP.getName()); 2540 2541 // The NewGEP must be pointer typed, so must the old one -> BitCast 2542 return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP, 2543 GEPType); 2544 } 2545 } 2546 } 2547 2548 // Similarly, transform things like: 2549 // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp 2550 // (where tmp = 8*tmp2) into: 2551 // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast 2552 if (GEPEltType->isSized() && StrippedPtrEltTy->isSized() && 2553 StrippedPtrEltTy->isArrayTy()) { 2554 // Check that changing to the array element type amounts to dividing the 2555 // index by a scale factor. 2556 uint64_t ResSize = DL.getTypeAllocSize(GEPEltType).getFixedSize(); 2557 uint64_t ArrayEltSize = 2558 DL.getTypeAllocSize(StrippedPtrEltTy->getArrayElementType()) 2559 .getFixedSize(); 2560 if (ResSize && ArrayEltSize % ResSize == 0) { 2561 Value *Idx = GEP.getOperand(1); 2562 unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits(); 2563 uint64_t Scale = ArrayEltSize / ResSize; 2564 2565 // Earlier transforms ensure that the index has the right type 2566 // according to the Data Layout, which considerably simplifies 2567 // the logic by eliminating implicit casts. 2568 assert(Idx->getType() == DL.getIndexType(GEPType) && 2569 "Index type does not match the Data Layout preferences"); 2570 2571 bool NSW; 2572 if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) { 2573 // Successfully decomposed Idx as NewIdx * Scale, form a new GEP. 2574 // If the multiplication NewIdx * Scale may overflow then the new 2575 // GEP may not be "inbounds". 2576 Type *IndTy = DL.getIndexType(GEPType); 2577 Value *Off[2] = {Constant::getNullValue(IndTy), NewIdx}; 2578 2579 Value *NewGEP = 2580 GEP.isInBounds() && NSW 2581 ? Builder.CreateInBoundsGEP(StrippedPtrEltTy, StrippedPtr, 2582 Off, GEP.getName()) 2583 : Builder.CreateGEP(StrippedPtrEltTy, StrippedPtr, Off, 2584 GEP.getName()); 2585 // The NewGEP must be pointer typed, so must the old one -> BitCast 2586 return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP, 2587 GEPType); 2588 } 2589 } 2590 } 2591 } 2592 } 2593 2594 // addrspacecast between types is canonicalized as a bitcast, then an 2595 // addrspacecast. To take advantage of the below bitcast + struct GEP, look 2596 // through the addrspacecast. 2597 Value *ASCStrippedPtrOp = PtrOp; 2598 if (auto *ASC = dyn_cast<AddrSpaceCastInst>(PtrOp)) { 2599 // X = bitcast A addrspace(1)* to B addrspace(1)* 2600 // Y = addrspacecast A addrspace(1)* to B addrspace(2)* 2601 // Z = gep Y, <...constant indices...> 2602 // Into an addrspacecasted GEP of the struct. 2603 if (auto *BC = dyn_cast<BitCastInst>(ASC->getOperand(0))) 2604 ASCStrippedPtrOp = BC; 2605 } 2606 2607 if (auto *BCI = dyn_cast<BitCastInst>(ASCStrippedPtrOp)) 2608 if (Instruction *I = visitGEPOfBitcast(BCI, GEP)) 2609 return I; 2610 2611 if (!GEP.isInBounds()) { 2612 unsigned IdxWidth = 2613 DL.getIndexSizeInBits(PtrOp->getType()->getPointerAddressSpace()); 2614 APInt BasePtrOffset(IdxWidth, 0); 2615 Value *UnderlyingPtrOp = 2616 PtrOp->stripAndAccumulateInBoundsConstantOffsets(DL, 2617 BasePtrOffset); 2618 if (auto *AI = dyn_cast<AllocaInst>(UnderlyingPtrOp)) { 2619 if (GEP.accumulateConstantOffset(DL, BasePtrOffset) && 2620 BasePtrOffset.isNonNegative()) { 2621 APInt AllocSize( 2622 IdxWidth, 2623 DL.getTypeAllocSize(AI->getAllocatedType()).getKnownMinSize()); 2624 if (BasePtrOffset.ule(AllocSize)) { 2625 return GetElementPtrInst::CreateInBounds( 2626 GEP.getSourceElementType(), PtrOp, Indices, GEP.getName()); 2627 } 2628 } 2629 } 2630 } 2631 2632 if (Instruction *R = foldSelectGEP(GEP, Builder)) 2633 return R; 2634 2635 return nullptr; 2636 } 2637 2638 static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, 2639 Instruction *AI) { 2640 if (isa<ConstantPointerNull>(V)) 2641 return true; 2642 if (auto *LI = dyn_cast<LoadInst>(V)) 2643 return isa<GlobalVariable>(LI->getPointerOperand()); 2644 // Two distinct allocations will never be equal. 2645 return isAllocLikeFn(V, &TLI) && V != AI; 2646 } 2647 2648 /// Given a call CB which uses an address UsedV, return true if we can prove the 2649 /// call's only possible effect is storing to V. 2650 static bool isRemovableWrite(CallBase &CB, Value *UsedV, 2651 const TargetLibraryInfo &TLI) { 2652 if (!CB.use_empty()) 2653 // TODO: add recursion if returned attribute is present 2654 return false; 2655 2656 if (CB.isTerminator()) 2657 // TODO: remove implementation restriction 2658 return false; 2659 2660 if (!CB.willReturn() || !CB.doesNotThrow()) 2661 return false; 2662 2663 // If the only possible side effect of the call is writing to the alloca, 2664 // and the result isn't used, we can safely remove any reads implied by the 2665 // call including those which might read the alloca itself. 2666 Optional<MemoryLocation> Dest = MemoryLocation::getForDest(&CB, TLI); 2667 return Dest && Dest->Ptr == UsedV; 2668 } 2669 2670 static bool isAllocSiteRemovable(Instruction *AI, 2671 SmallVectorImpl<WeakTrackingVH> &Users, 2672 const TargetLibraryInfo &TLI) { 2673 SmallVector<Instruction*, 4> Worklist; 2674 const Optional<StringRef> Family = getAllocationFamily(AI, &TLI); 2675 Worklist.push_back(AI); 2676 2677 do { 2678 Instruction *PI = Worklist.pop_back_val(); 2679 for (User *U : PI->users()) { 2680 Instruction *I = cast<Instruction>(U); 2681 switch (I->getOpcode()) { 2682 default: 2683 // Give up the moment we see something we can't handle. 2684 return false; 2685 2686 case Instruction::AddrSpaceCast: 2687 case Instruction::BitCast: 2688 case Instruction::GetElementPtr: 2689 Users.emplace_back(I); 2690 Worklist.push_back(I); 2691 continue; 2692 2693 case Instruction::ICmp: { 2694 ICmpInst *ICI = cast<ICmpInst>(I); 2695 // We can fold eq/ne comparisons with null to false/true, respectively. 2696 // We also fold comparisons in some conditions provided the alloc has 2697 // not escaped (see isNeverEqualToUnescapedAlloc). 2698 if (!ICI->isEquality()) 2699 return false; 2700 unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0; 2701 if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI)) 2702 return false; 2703 Users.emplace_back(I); 2704 continue; 2705 } 2706 2707 case Instruction::Call: 2708 // Ignore no-op and store intrinsics. 2709 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 2710 switch (II->getIntrinsicID()) { 2711 default: 2712 return false; 2713 2714 case Intrinsic::memmove: 2715 case Intrinsic::memcpy: 2716 case Intrinsic::memset: { 2717 MemIntrinsic *MI = cast<MemIntrinsic>(II); 2718 if (MI->isVolatile() || MI->getRawDest() != PI) 2719 return false; 2720 LLVM_FALLTHROUGH; 2721 } 2722 case Intrinsic::assume: 2723 case Intrinsic::invariant_start: 2724 case Intrinsic::invariant_end: 2725 case Intrinsic::lifetime_start: 2726 case Intrinsic::lifetime_end: 2727 case Intrinsic::objectsize: 2728 Users.emplace_back(I); 2729 continue; 2730 case Intrinsic::launder_invariant_group: 2731 case Intrinsic::strip_invariant_group: 2732 Users.emplace_back(I); 2733 Worklist.push_back(I); 2734 continue; 2735 } 2736 } 2737 2738 if (isRemovableWrite(*cast<CallBase>(I), PI, TLI)) { 2739 Users.emplace_back(I); 2740 continue; 2741 } 2742 2743 if (isFreeCall(I, &TLI) && getAllocationFamily(I, &TLI) == Family) { 2744 assert(Family); 2745 Users.emplace_back(I); 2746 continue; 2747 } 2748 2749 if (isReallocLikeFn(I, &TLI) && 2750 getAllocationFamily(I, &TLI) == Family) { 2751 assert(Family); 2752 Users.emplace_back(I); 2753 Worklist.push_back(I); 2754 continue; 2755 } 2756 2757 return false; 2758 2759 case Instruction::Store: { 2760 StoreInst *SI = cast<StoreInst>(I); 2761 if (SI->isVolatile() || SI->getPointerOperand() != PI) 2762 return false; 2763 Users.emplace_back(I); 2764 continue; 2765 } 2766 } 2767 llvm_unreachable("missing a return?"); 2768 } 2769 } while (!Worklist.empty()); 2770 return true; 2771 } 2772 2773 Instruction *InstCombinerImpl::visitAllocSite(Instruction &MI) { 2774 assert(isa<AllocaInst>(MI) || isAllocRemovable(&cast<CallBase>(MI), &TLI)); 2775 2776 // If we have a malloc call which is only used in any amount of comparisons to 2777 // null and free calls, delete the calls and replace the comparisons with true 2778 // or false as appropriate. 2779 2780 // This is based on the principle that we can substitute our own allocation 2781 // function (which will never return null) rather than knowledge of the 2782 // specific function being called. In some sense this can change the permitted 2783 // outputs of a program (when we convert a malloc to an alloca, the fact that 2784 // the allocation is now on the stack is potentially visible, for example), 2785 // but we believe in a permissible manner. 2786 SmallVector<WeakTrackingVH, 64> Users; 2787 2788 // If we are removing an alloca with a dbg.declare, insert dbg.value calls 2789 // before each store. 2790 SmallVector<DbgVariableIntrinsic *, 8> DVIs; 2791 std::unique_ptr<DIBuilder> DIB; 2792 if (isa<AllocaInst>(MI)) { 2793 findDbgUsers(DVIs, &MI); 2794 DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false)); 2795 } 2796 2797 if (isAllocSiteRemovable(&MI, Users, TLI)) { 2798 for (unsigned i = 0, e = Users.size(); i != e; ++i) { 2799 // Lowering all @llvm.objectsize calls first because they may 2800 // use a bitcast/GEP of the alloca we are removing. 2801 if (!Users[i]) 2802 continue; 2803 2804 Instruction *I = cast<Instruction>(&*Users[i]); 2805 2806 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 2807 if (II->getIntrinsicID() == Intrinsic::objectsize) { 2808 Value *Result = 2809 lowerObjectSizeCall(II, DL, &TLI, /*MustSucceed=*/true); 2810 replaceInstUsesWith(*I, Result); 2811 eraseInstFromFunction(*I); 2812 Users[i] = nullptr; // Skip examining in the next loop. 2813 } 2814 } 2815 } 2816 for (unsigned i = 0, e = Users.size(); i != e; ++i) { 2817 if (!Users[i]) 2818 continue; 2819 2820 Instruction *I = cast<Instruction>(&*Users[i]); 2821 2822 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) { 2823 replaceInstUsesWith(*C, 2824 ConstantInt::get(Type::getInt1Ty(C->getContext()), 2825 C->isFalseWhenEqual())); 2826 } else if (auto *SI = dyn_cast<StoreInst>(I)) { 2827 for (auto *DVI : DVIs) 2828 if (DVI->isAddressOfVariable()) 2829 ConvertDebugDeclareToDebugValue(DVI, SI, *DIB); 2830 } else { 2831 // Casts, GEP, or anything else: we're about to delete this instruction, 2832 // so it can not have any valid uses. 2833 replaceInstUsesWith(*I, PoisonValue::get(I->getType())); 2834 } 2835 eraseInstFromFunction(*I); 2836 } 2837 2838 if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) { 2839 // Replace invoke with a NOP intrinsic to maintain the original CFG 2840 Module *M = II->getModule(); 2841 Function *F = Intrinsic::getDeclaration(M, Intrinsic::donothing); 2842 InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(), 2843 None, "", II->getParent()); 2844 } 2845 2846 // Remove debug intrinsics which describe the value contained within the 2847 // alloca. In addition to removing dbg.{declare,addr} which simply point to 2848 // the alloca, remove dbg.value(<alloca>, ..., DW_OP_deref)'s as well, e.g.: 2849 // 2850 // ``` 2851 // define void @foo(i32 %0) { 2852 // %a = alloca i32 ; Deleted. 2853 // store i32 %0, i32* %a 2854 // dbg.value(i32 %0, "arg0") ; Not deleted. 2855 // dbg.value(i32* %a, "arg0", DW_OP_deref) ; Deleted. 2856 // call void @trivially_inlinable_no_op(i32* %a) 2857 // ret void 2858 // } 2859 // ``` 2860 // 2861 // This may not be required if we stop describing the contents of allocas 2862 // using dbg.value(<alloca>, ..., DW_OP_deref), but we currently do this in 2863 // the LowerDbgDeclare utility. 2864 // 2865 // If there is a dead store to `%a` in @trivially_inlinable_no_op, the 2866 // "arg0" dbg.value may be stale after the call. However, failing to remove 2867 // the DW_OP_deref dbg.value causes large gaps in location coverage. 2868 for (auto *DVI : DVIs) 2869 if (DVI->isAddressOfVariable() || DVI->getExpression()->startsWithDeref()) 2870 DVI->eraseFromParent(); 2871 2872 return eraseInstFromFunction(MI); 2873 } 2874 return nullptr; 2875 } 2876 2877 /// Move the call to free before a NULL test. 2878 /// 2879 /// Check if this free is accessed after its argument has been test 2880 /// against NULL (property 0). 2881 /// If yes, it is legal to move this call in its predecessor block. 2882 /// 2883 /// The move is performed only if the block containing the call to free 2884 /// will be removed, i.e.: 2885 /// 1. it has only one predecessor P, and P has two successors 2886 /// 2. it contains the call, noops, and an unconditional branch 2887 /// 3. its successor is the same as its predecessor's successor 2888 /// 2889 /// The profitability is out-of concern here and this function should 2890 /// be called only if the caller knows this transformation would be 2891 /// profitable (e.g., for code size). 2892 static Instruction *tryToMoveFreeBeforeNullTest(CallInst &FI, 2893 const DataLayout &DL) { 2894 Value *Op = FI.getArgOperand(0); 2895 BasicBlock *FreeInstrBB = FI.getParent(); 2896 BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor(); 2897 2898 // Validate part of constraint #1: Only one predecessor 2899 // FIXME: We can extend the number of predecessor, but in that case, we 2900 // would duplicate the call to free in each predecessor and it may 2901 // not be profitable even for code size. 2902 if (!PredBB) 2903 return nullptr; 2904 2905 // Validate constraint #2: Does this block contains only the call to 2906 // free, noops, and an unconditional branch? 2907 BasicBlock *SuccBB; 2908 Instruction *FreeInstrBBTerminator = FreeInstrBB->getTerminator(); 2909 if (!match(FreeInstrBBTerminator, m_UnconditionalBr(SuccBB))) 2910 return nullptr; 2911 2912 // If there are only 2 instructions in the block, at this point, 2913 // this is the call to free and unconditional. 2914 // If there are more than 2 instructions, check that they are noops 2915 // i.e., they won't hurt the performance of the generated code. 2916 if (FreeInstrBB->size() != 2) { 2917 for (const Instruction &Inst : FreeInstrBB->instructionsWithoutDebug()) { 2918 if (&Inst == &FI || &Inst == FreeInstrBBTerminator) 2919 continue; 2920 auto *Cast = dyn_cast<CastInst>(&Inst); 2921 if (!Cast || !Cast->isNoopCast(DL)) 2922 return nullptr; 2923 } 2924 } 2925 // Validate the rest of constraint #1 by matching on the pred branch. 2926 Instruction *TI = PredBB->getTerminator(); 2927 BasicBlock *TrueBB, *FalseBB; 2928 ICmpInst::Predicate Pred; 2929 if (!match(TI, m_Br(m_ICmp(Pred, 2930 m_CombineOr(m_Specific(Op), 2931 m_Specific(Op->stripPointerCasts())), 2932 m_Zero()), 2933 TrueBB, FalseBB))) 2934 return nullptr; 2935 if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE) 2936 return nullptr; 2937 2938 // Validate constraint #3: Ensure the null case just falls through. 2939 if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB)) 2940 return nullptr; 2941 assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) && 2942 "Broken CFG: missing edge from predecessor to successor"); 2943 2944 // At this point, we know that everything in FreeInstrBB can be moved 2945 // before TI. 2946 for (Instruction &Instr : llvm::make_early_inc_range(*FreeInstrBB)) { 2947 if (&Instr == FreeInstrBBTerminator) 2948 break; 2949 Instr.moveBefore(TI); 2950 } 2951 assert(FreeInstrBB->size() == 1 && 2952 "Only the branch instruction should remain"); 2953 2954 // Now that we've moved the call to free before the NULL check, we have to 2955 // remove any attributes on its parameter that imply it's non-null, because 2956 // those attributes might have only been valid because of the NULL check, and 2957 // we can get miscompiles if we keep them. This is conservative if non-null is 2958 // also implied by something other than the NULL check, but it's guaranteed to 2959 // be correct, and the conservativeness won't matter in practice, since the 2960 // attributes are irrelevant for the call to free itself and the pointer 2961 // shouldn't be used after the call. 2962 AttributeList Attrs = FI.getAttributes(); 2963 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, Attribute::NonNull); 2964 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable); 2965 if (Dereferenceable.isValid()) { 2966 uint64_t Bytes = Dereferenceable.getDereferenceableBytes(); 2967 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, 2968 Attribute::Dereferenceable); 2969 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.getContext(), 0, Bytes); 2970 } 2971 FI.setAttributes(Attrs); 2972 2973 return &FI; 2974 } 2975 2976 Instruction *InstCombinerImpl::visitFree(CallInst &FI) { 2977 Value *Op = FI.getArgOperand(0); 2978 2979 // free undef -> unreachable. 2980 if (isa<UndefValue>(Op)) { 2981 // Leave a marker since we can't modify the CFG here. 2982 CreateNonTerminatorUnreachable(&FI); 2983 return eraseInstFromFunction(FI); 2984 } 2985 2986 // If we have 'free null' delete the instruction. This can happen in stl code 2987 // when lots of inlining happens. 2988 if (isa<ConstantPointerNull>(Op)) 2989 return eraseInstFromFunction(FI); 2990 2991 // If we had free(realloc(...)) with no intervening uses, then eliminate the 2992 // realloc() entirely. 2993 if (CallInst *CI = dyn_cast<CallInst>(Op)) { 2994 if (CI->hasOneUse() && isReallocLikeFn(CI, &TLI)) { 2995 return eraseInstFromFunction( 2996 *replaceInstUsesWith(*CI, CI->getOperand(0))); 2997 } 2998 } 2999 3000 // If we optimize for code size, try to move the call to free before the null 3001 // test so that simplify cfg can remove the empty block and dead code 3002 // elimination the branch. I.e., helps to turn something like: 3003 // if (foo) free(foo); 3004 // into 3005 // free(foo); 3006 // 3007 // Note that we can only do this for 'free' and not for any flavor of 3008 // 'operator delete'; there is no 'operator delete' symbol for which we are 3009 // permitted to invent a call, even if we're passing in a null pointer. 3010 if (MinimizeSize) { 3011 LibFunc Func; 3012 if (TLI.getLibFunc(FI, Func) && TLI.has(Func) && Func == LibFunc_free) 3013 if (Instruction *I = tryToMoveFreeBeforeNullTest(FI, DL)) 3014 return I; 3015 } 3016 3017 return nullptr; 3018 } 3019 3020 static bool isMustTailCall(Value *V) { 3021 if (auto *CI = dyn_cast<CallInst>(V)) 3022 return CI->isMustTailCall(); 3023 return false; 3024 } 3025 3026 Instruction *InstCombinerImpl::visitReturnInst(ReturnInst &RI) { 3027 if (RI.getNumOperands() == 0) // ret void 3028 return nullptr; 3029 3030 Value *ResultOp = RI.getOperand(0); 3031 Type *VTy = ResultOp->getType(); 3032 if (!VTy->isIntegerTy() || isa<Constant>(ResultOp)) 3033 return nullptr; 3034 3035 // Don't replace result of musttail calls. 3036 if (isMustTailCall(ResultOp)) 3037 return nullptr; 3038 3039 // There might be assume intrinsics dominating this return that completely 3040 // determine the value. If so, constant fold it. 3041 KnownBits Known = computeKnownBits(ResultOp, 0, &RI); 3042 if (Known.isConstant()) 3043 return replaceOperand(RI, 0, 3044 Constant::getIntegerValue(VTy, Known.getConstant())); 3045 3046 return nullptr; 3047 } 3048 3049 // WARNING: keep in sync with SimplifyCFGOpt::simplifyUnreachable()! 3050 Instruction *InstCombinerImpl::visitUnreachableInst(UnreachableInst &I) { 3051 // Try to remove the previous instruction if it must lead to unreachable. 3052 // This includes instructions like stores and "llvm.assume" that may not get 3053 // removed by simple dead code elimination. 3054 while (Instruction *Prev = I.getPrevNonDebugInstruction()) { 3055 // While we theoretically can erase EH, that would result in a block that 3056 // used to start with an EH no longer starting with EH, which is invalid. 3057 // To make it valid, we'd need to fixup predecessors to no longer refer to 3058 // this block, but that changes CFG, which is not allowed in InstCombine. 3059 if (Prev->isEHPad()) 3060 return nullptr; // Can not drop any more instructions. We're done here. 3061 3062 if (!isGuaranteedToTransferExecutionToSuccessor(Prev)) 3063 return nullptr; // Can not drop any more instructions. We're done here. 3064 // Otherwise, this instruction can be freely erased, 3065 // even if it is not side-effect free. 3066 3067 // A value may still have uses before we process it here (for example, in 3068 // another unreachable block), so convert those to poison. 3069 replaceInstUsesWith(*Prev, PoisonValue::get(Prev->getType())); 3070 eraseInstFromFunction(*Prev); 3071 } 3072 assert(I.getParent()->sizeWithoutDebug() == 1 && "The block is now empty."); 3073 // FIXME: recurse into unconditional predecessors? 3074 return nullptr; 3075 } 3076 3077 Instruction *InstCombinerImpl::visitUnconditionalBranchInst(BranchInst &BI) { 3078 assert(BI.isUnconditional() && "Only for unconditional branches."); 3079 3080 // If this store is the second-to-last instruction in the basic block 3081 // (excluding debug info and bitcasts of pointers) and if the block ends with 3082 // an unconditional branch, try to move the store to the successor block. 3083 3084 auto GetLastSinkableStore = [](BasicBlock::iterator BBI) { 3085 auto IsNoopInstrForStoreMerging = [](BasicBlock::iterator BBI) { 3086 return BBI->isDebugOrPseudoInst() || 3087 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()); 3088 }; 3089 3090 BasicBlock::iterator FirstInstr = BBI->getParent()->begin(); 3091 do { 3092 if (BBI != FirstInstr) 3093 --BBI; 3094 } while (BBI != FirstInstr && IsNoopInstrForStoreMerging(BBI)); 3095 3096 return dyn_cast<StoreInst>(BBI); 3097 }; 3098 3099 if (StoreInst *SI = GetLastSinkableStore(BasicBlock::iterator(BI))) 3100 if (mergeStoreIntoSuccessor(*SI)) 3101 return &BI; 3102 3103 return nullptr; 3104 } 3105 3106 Instruction *InstCombinerImpl::visitBranchInst(BranchInst &BI) { 3107 if (BI.isUnconditional()) 3108 return visitUnconditionalBranchInst(BI); 3109 3110 // Change br (not X), label True, label False to: br X, label False, True 3111 Value *X = nullptr; 3112 if (match(&BI, m_Br(m_Not(m_Value(X)), m_BasicBlock(), m_BasicBlock())) && 3113 !isa<Constant>(X)) { 3114 // Swap Destinations and condition... 3115 BI.swapSuccessors(); 3116 return replaceOperand(BI, 0, X); 3117 } 3118 3119 // If the condition is irrelevant, remove the use so that other 3120 // transforms on the condition become more effective. 3121 if (!isa<ConstantInt>(BI.getCondition()) && 3122 BI.getSuccessor(0) == BI.getSuccessor(1)) 3123 return replaceOperand( 3124 BI, 0, ConstantInt::getFalse(BI.getCondition()->getType())); 3125 3126 // Canonicalize, for example, fcmp_one -> fcmp_oeq. 3127 CmpInst::Predicate Pred; 3128 if (match(&BI, m_Br(m_OneUse(m_FCmp(Pred, m_Value(), m_Value())), 3129 m_BasicBlock(), m_BasicBlock())) && 3130 !isCanonicalPredicate(Pred)) { 3131 // Swap destinations and condition. 3132 CmpInst *Cond = cast<CmpInst>(BI.getCondition()); 3133 Cond->setPredicate(CmpInst::getInversePredicate(Pred)); 3134 BI.swapSuccessors(); 3135 Worklist.push(Cond); 3136 return &BI; 3137 } 3138 3139 return nullptr; 3140 } 3141 3142 Instruction *InstCombinerImpl::visitSwitchInst(SwitchInst &SI) { 3143 Value *Cond = SI.getCondition(); 3144 Value *Op0; 3145 ConstantInt *AddRHS; 3146 if (match(Cond, m_Add(m_Value(Op0), m_ConstantInt(AddRHS)))) { 3147 // Change 'switch (X+4) case 1:' into 'switch (X) case -3'. 3148 for (auto Case : SI.cases()) { 3149 Constant *NewCase = ConstantExpr::getSub(Case.getCaseValue(), AddRHS); 3150 assert(isa<ConstantInt>(NewCase) && 3151 "Result of expression should be constant"); 3152 Case.setValue(cast<ConstantInt>(NewCase)); 3153 } 3154 return replaceOperand(SI, 0, Op0); 3155 } 3156 3157 KnownBits Known = computeKnownBits(Cond, 0, &SI); 3158 unsigned LeadingKnownZeros = Known.countMinLeadingZeros(); 3159 unsigned LeadingKnownOnes = Known.countMinLeadingOnes(); 3160 3161 // Compute the number of leading bits we can ignore. 3162 // TODO: A better way to determine this would use ComputeNumSignBits(). 3163 for (auto &C : SI.cases()) { 3164 LeadingKnownZeros = std::min( 3165 LeadingKnownZeros, C.getCaseValue()->getValue().countLeadingZeros()); 3166 LeadingKnownOnes = std::min( 3167 LeadingKnownOnes, C.getCaseValue()->getValue().countLeadingOnes()); 3168 } 3169 3170 unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes); 3171 3172 // Shrink the condition operand if the new type is smaller than the old type. 3173 // But do not shrink to a non-standard type, because backend can't generate 3174 // good code for that yet. 3175 // TODO: We can make it aggressive again after fixing PR39569. 3176 if (NewWidth > 0 && NewWidth < Known.getBitWidth() && 3177 shouldChangeType(Known.getBitWidth(), NewWidth)) { 3178 IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth); 3179 Builder.SetInsertPoint(&SI); 3180 Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc"); 3181 3182 for (auto Case : SI.cases()) { 3183 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth); 3184 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase)); 3185 } 3186 return replaceOperand(SI, 0, NewCond); 3187 } 3188 3189 return nullptr; 3190 } 3191 3192 Instruction *InstCombinerImpl::visitExtractValueInst(ExtractValueInst &EV) { 3193 Value *Agg = EV.getAggregateOperand(); 3194 3195 if (!EV.hasIndices()) 3196 return replaceInstUsesWith(EV, Agg); 3197 3198 if (Value *V = SimplifyExtractValueInst(Agg, EV.getIndices(), 3199 SQ.getWithInstruction(&EV))) 3200 return replaceInstUsesWith(EV, V); 3201 3202 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) { 3203 // We're extracting from an insertvalue instruction, compare the indices 3204 const unsigned *exti, *exte, *insi, *inse; 3205 for (exti = EV.idx_begin(), insi = IV->idx_begin(), 3206 exte = EV.idx_end(), inse = IV->idx_end(); 3207 exti != exte && insi != inse; 3208 ++exti, ++insi) { 3209 if (*insi != *exti) 3210 // The insert and extract both reference distinctly different elements. 3211 // This means the extract is not influenced by the insert, and we can 3212 // replace the aggregate operand of the extract with the aggregate 3213 // operand of the insert. i.e., replace 3214 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1 3215 // %E = extractvalue { i32, { i32 } } %I, 0 3216 // with 3217 // %E = extractvalue { i32, { i32 } } %A, 0 3218 return ExtractValueInst::Create(IV->getAggregateOperand(), 3219 EV.getIndices()); 3220 } 3221 if (exti == exte && insi == inse) 3222 // Both iterators are at the end: Index lists are identical. Replace 3223 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0 3224 // %C = extractvalue { i32, { i32 } } %B, 1, 0 3225 // with "i32 42" 3226 return replaceInstUsesWith(EV, IV->getInsertedValueOperand()); 3227 if (exti == exte) { 3228 // The extract list is a prefix of the insert list. i.e. replace 3229 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0 3230 // %E = extractvalue { i32, { i32 } } %I, 1 3231 // with 3232 // %X = extractvalue { i32, { i32 } } %A, 1 3233 // %E = insertvalue { i32 } %X, i32 42, 0 3234 // by switching the order of the insert and extract (though the 3235 // insertvalue should be left in, since it may have other uses). 3236 Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(), 3237 EV.getIndices()); 3238 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(), 3239 makeArrayRef(insi, inse)); 3240 } 3241 if (insi == inse) 3242 // The insert list is a prefix of the extract list 3243 // We can simply remove the common indices from the extract and make it 3244 // operate on the inserted value instead of the insertvalue result. 3245 // i.e., replace 3246 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1 3247 // %E = extractvalue { i32, { i32 } } %I, 1, 0 3248 // with 3249 // %E extractvalue { i32 } { i32 42 }, 0 3250 return ExtractValueInst::Create(IV->getInsertedValueOperand(), 3251 makeArrayRef(exti, exte)); 3252 } 3253 if (WithOverflowInst *WO = dyn_cast<WithOverflowInst>(Agg)) { 3254 // extractvalue (any_mul_with_overflow X, -1), 0 --> -X 3255 Intrinsic::ID OvID = WO->getIntrinsicID(); 3256 if (*EV.idx_begin() == 0 && 3257 (OvID == Intrinsic::smul_with_overflow || 3258 OvID == Intrinsic::umul_with_overflow) && 3259 match(WO->getArgOperand(1), m_AllOnes())) { 3260 return BinaryOperator::CreateNeg(WO->getArgOperand(0)); 3261 } 3262 3263 // We're extracting from an overflow intrinsic, see if we're the only user, 3264 // which allows us to simplify multiple result intrinsics to simpler 3265 // things that just get one value. 3266 if (WO->hasOneUse()) { 3267 // Check if we're grabbing only the result of a 'with overflow' intrinsic 3268 // and replace it with a traditional binary instruction. 3269 if (*EV.idx_begin() == 0) { 3270 Instruction::BinaryOps BinOp = WO->getBinaryOp(); 3271 Value *LHS = WO->getLHS(), *RHS = WO->getRHS(); 3272 // Replace the old instruction's uses with poison. 3273 replaceInstUsesWith(*WO, PoisonValue::get(WO->getType())); 3274 eraseInstFromFunction(*WO); 3275 return BinaryOperator::Create(BinOp, LHS, RHS); 3276 } 3277 3278 assert(*EV.idx_begin() == 1 && 3279 "unexpected extract index for overflow inst"); 3280 3281 // If only the overflow result is used, and the right hand side is a 3282 // constant (or constant splat), we can remove the intrinsic by directly 3283 // checking for overflow. 3284 const APInt *C; 3285 if (match(WO->getRHS(), m_APInt(C))) { 3286 // Compute the no-wrap range for LHS given RHS=C, then construct an 3287 // equivalent icmp, potentially using an offset. 3288 ConstantRange NWR = 3289 ConstantRange::makeExactNoWrapRegion(WO->getBinaryOp(), *C, 3290 WO->getNoWrapKind()); 3291 3292 CmpInst::Predicate Pred; 3293 APInt NewRHSC, Offset; 3294 NWR.getEquivalentICmp(Pred, NewRHSC, Offset); 3295 auto *OpTy = WO->getRHS()->getType(); 3296 auto *NewLHS = WO->getLHS(); 3297 if (Offset != 0) 3298 NewLHS = Builder.CreateAdd(NewLHS, ConstantInt::get(OpTy, Offset)); 3299 return new ICmpInst(ICmpInst::getInversePredicate(Pred), NewLHS, 3300 ConstantInt::get(OpTy, NewRHSC)); 3301 } 3302 } 3303 } 3304 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) 3305 // If the (non-volatile) load only has one use, we can rewrite this to a 3306 // load from a GEP. This reduces the size of the load. If a load is used 3307 // only by extractvalue instructions then this either must have been 3308 // optimized before, or it is a struct with padding, in which case we 3309 // don't want to do the transformation as it loses padding knowledge. 3310 if (L->isSimple() && L->hasOneUse()) { 3311 // extractvalue has integer indices, getelementptr has Value*s. Convert. 3312 SmallVector<Value*, 4> Indices; 3313 // Prefix an i32 0 since we need the first element. 3314 Indices.push_back(Builder.getInt32(0)); 3315 for (unsigned Idx : EV.indices()) 3316 Indices.push_back(Builder.getInt32(Idx)); 3317 3318 // We need to insert these at the location of the old load, not at that of 3319 // the extractvalue. 3320 Builder.SetInsertPoint(L); 3321 Value *GEP = Builder.CreateInBoundsGEP(L->getType(), 3322 L->getPointerOperand(), Indices); 3323 Instruction *NL = Builder.CreateLoad(EV.getType(), GEP); 3324 // Whatever aliasing information we had for the orignal load must also 3325 // hold for the smaller load, so propagate the annotations. 3326 NL->setAAMetadata(L->getAAMetadata()); 3327 // Returning the load directly will cause the main loop to insert it in 3328 // the wrong spot, so use replaceInstUsesWith(). 3329 return replaceInstUsesWith(EV, NL); 3330 } 3331 // We could simplify extracts from other values. Note that nested extracts may 3332 // already be simplified implicitly by the above: extract (extract (insert) ) 3333 // will be translated into extract ( insert ( extract ) ) first and then just 3334 // the value inserted, if appropriate. Similarly for extracts from single-use 3335 // loads: extract (extract (load)) will be translated to extract (load (gep)) 3336 // and if again single-use then via load (gep (gep)) to load (gep). 3337 // However, double extracts from e.g. function arguments or return values 3338 // aren't handled yet. 3339 return nullptr; 3340 } 3341 3342 /// Return 'true' if the given typeinfo will match anything. 3343 static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) { 3344 switch (Personality) { 3345 case EHPersonality::GNU_C: 3346 case EHPersonality::GNU_C_SjLj: 3347 case EHPersonality::Rust: 3348 // The GCC C EH and Rust personality only exists to support cleanups, so 3349 // it's not clear what the semantics of catch clauses are. 3350 return false; 3351 case EHPersonality::Unknown: 3352 return false; 3353 case EHPersonality::GNU_Ada: 3354 // While __gnat_all_others_value will match any Ada exception, it doesn't 3355 // match foreign exceptions (or didn't, before gcc-4.7). 3356 return false; 3357 case EHPersonality::GNU_CXX: 3358 case EHPersonality::GNU_CXX_SjLj: 3359 case EHPersonality::GNU_ObjC: 3360 case EHPersonality::MSVC_X86SEH: 3361 case EHPersonality::MSVC_TableSEH: 3362 case EHPersonality::MSVC_CXX: 3363 case EHPersonality::CoreCLR: 3364 case EHPersonality::Wasm_CXX: 3365 case EHPersonality::XL_CXX: 3366 return TypeInfo->isNullValue(); 3367 } 3368 llvm_unreachable("invalid enum"); 3369 } 3370 3371 static bool shorter_filter(const Value *LHS, const Value *RHS) { 3372 return 3373 cast<ArrayType>(LHS->getType())->getNumElements() 3374 < 3375 cast<ArrayType>(RHS->getType())->getNumElements(); 3376 } 3377 3378 Instruction *InstCombinerImpl::visitLandingPadInst(LandingPadInst &LI) { 3379 // The logic here should be correct for any real-world personality function. 3380 // However if that turns out not to be true, the offending logic can always 3381 // be conditioned on the personality function, like the catch-all logic is. 3382 EHPersonality Personality = 3383 classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn()); 3384 3385 // Simplify the list of clauses, eg by removing repeated catch clauses 3386 // (these are often created by inlining). 3387 bool MakeNewInstruction = false; // If true, recreate using the following: 3388 SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction; 3389 bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup. 3390 3391 SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already. 3392 for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) { 3393 bool isLastClause = i + 1 == e; 3394 if (LI.isCatch(i)) { 3395 // A catch clause. 3396 Constant *CatchClause = LI.getClause(i); 3397 Constant *TypeInfo = CatchClause->stripPointerCasts(); 3398 3399 // If we already saw this clause, there is no point in having a second 3400 // copy of it. 3401 if (AlreadyCaught.insert(TypeInfo).second) { 3402 // This catch clause was not already seen. 3403 NewClauses.push_back(CatchClause); 3404 } else { 3405 // Repeated catch clause - drop the redundant copy. 3406 MakeNewInstruction = true; 3407 } 3408 3409 // If this is a catch-all then there is no point in keeping any following 3410 // clauses or marking the landingpad as having a cleanup. 3411 if (isCatchAll(Personality, TypeInfo)) { 3412 if (!isLastClause) 3413 MakeNewInstruction = true; 3414 CleanupFlag = false; 3415 break; 3416 } 3417 } else { 3418 // A filter clause. If any of the filter elements were already caught 3419 // then they can be dropped from the filter. It is tempting to try to 3420 // exploit the filter further by saying that any typeinfo that does not 3421 // occur in the filter can't be caught later (and thus can be dropped). 3422 // However this would be wrong, since typeinfos can match without being 3423 // equal (for example if one represents a C++ class, and the other some 3424 // class derived from it). 3425 assert(LI.isFilter(i) && "Unsupported landingpad clause!"); 3426 Constant *FilterClause = LI.getClause(i); 3427 ArrayType *FilterType = cast<ArrayType>(FilterClause->getType()); 3428 unsigned NumTypeInfos = FilterType->getNumElements(); 3429 3430 // An empty filter catches everything, so there is no point in keeping any 3431 // following clauses or marking the landingpad as having a cleanup. By 3432 // dealing with this case here the following code is made a bit simpler. 3433 if (!NumTypeInfos) { 3434 NewClauses.push_back(FilterClause); 3435 if (!isLastClause) 3436 MakeNewInstruction = true; 3437 CleanupFlag = false; 3438 break; 3439 } 3440 3441 bool MakeNewFilter = false; // If true, make a new filter. 3442 SmallVector<Constant *, 16> NewFilterElts; // New elements. 3443 if (isa<ConstantAggregateZero>(FilterClause)) { 3444 // Not an empty filter - it contains at least one null typeinfo. 3445 assert(NumTypeInfos > 0 && "Should have handled empty filter already!"); 3446 Constant *TypeInfo = 3447 Constant::getNullValue(FilterType->getElementType()); 3448 // If this typeinfo is a catch-all then the filter can never match. 3449 if (isCatchAll(Personality, TypeInfo)) { 3450 // Throw the filter away. 3451 MakeNewInstruction = true; 3452 continue; 3453 } 3454 3455 // There is no point in having multiple copies of this typeinfo, so 3456 // discard all but the first copy if there is more than one. 3457 NewFilterElts.push_back(TypeInfo); 3458 if (NumTypeInfos > 1) 3459 MakeNewFilter = true; 3460 } else { 3461 ConstantArray *Filter = cast<ConstantArray>(FilterClause); 3462 SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements. 3463 NewFilterElts.reserve(NumTypeInfos); 3464 3465 // Remove any filter elements that were already caught or that already 3466 // occurred in the filter. While there, see if any of the elements are 3467 // catch-alls. If so, the filter can be discarded. 3468 bool SawCatchAll = false; 3469 for (unsigned j = 0; j != NumTypeInfos; ++j) { 3470 Constant *Elt = Filter->getOperand(j); 3471 Constant *TypeInfo = Elt->stripPointerCasts(); 3472 if (isCatchAll(Personality, TypeInfo)) { 3473 // This element is a catch-all. Bail out, noting this fact. 3474 SawCatchAll = true; 3475 break; 3476 } 3477 3478 // Even if we've seen a type in a catch clause, we don't want to 3479 // remove it from the filter. An unexpected type handler may be 3480 // set up for a call site which throws an exception of the same 3481 // type caught. In order for the exception thrown by the unexpected 3482 // handler to propagate correctly, the filter must be correctly 3483 // described for the call site. 3484 // 3485 // Example: 3486 // 3487 // void unexpected() { throw 1;} 3488 // void foo() throw (int) { 3489 // std::set_unexpected(unexpected); 3490 // try { 3491 // throw 2.0; 3492 // } catch (int i) {} 3493 // } 3494 3495 // There is no point in having multiple copies of the same typeinfo in 3496 // a filter, so only add it if we didn't already. 3497 if (SeenInFilter.insert(TypeInfo).second) 3498 NewFilterElts.push_back(cast<Constant>(Elt)); 3499 } 3500 // A filter containing a catch-all cannot match anything by definition. 3501 if (SawCatchAll) { 3502 // Throw the filter away. 3503 MakeNewInstruction = true; 3504 continue; 3505 } 3506 3507 // If we dropped something from the filter, make a new one. 3508 if (NewFilterElts.size() < NumTypeInfos) 3509 MakeNewFilter = true; 3510 } 3511 if (MakeNewFilter) { 3512 FilterType = ArrayType::get(FilterType->getElementType(), 3513 NewFilterElts.size()); 3514 FilterClause = ConstantArray::get(FilterType, NewFilterElts); 3515 MakeNewInstruction = true; 3516 } 3517 3518 NewClauses.push_back(FilterClause); 3519 3520 // If the new filter is empty then it will catch everything so there is 3521 // no point in keeping any following clauses or marking the landingpad 3522 // as having a cleanup. The case of the original filter being empty was 3523 // already handled above. 3524 if (MakeNewFilter && !NewFilterElts.size()) { 3525 assert(MakeNewInstruction && "New filter but not a new instruction!"); 3526 CleanupFlag = false; 3527 break; 3528 } 3529 } 3530 } 3531 3532 // If several filters occur in a row then reorder them so that the shortest 3533 // filters come first (those with the smallest number of elements). This is 3534 // advantageous because shorter filters are more likely to match, speeding up 3535 // unwinding, but mostly because it increases the effectiveness of the other 3536 // filter optimizations below. 3537 for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) { 3538 unsigned j; 3539 // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters. 3540 for (j = i; j != e; ++j) 3541 if (!isa<ArrayType>(NewClauses[j]->getType())) 3542 break; 3543 3544 // Check whether the filters are already sorted by length. We need to know 3545 // if sorting them is actually going to do anything so that we only make a 3546 // new landingpad instruction if it does. 3547 for (unsigned k = i; k + 1 < j; ++k) 3548 if (shorter_filter(NewClauses[k+1], NewClauses[k])) { 3549 // Not sorted, so sort the filters now. Doing an unstable sort would be 3550 // correct too but reordering filters pointlessly might confuse users. 3551 std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j, 3552 shorter_filter); 3553 MakeNewInstruction = true; 3554 break; 3555 } 3556 3557 // Look for the next batch of filters. 3558 i = j + 1; 3559 } 3560 3561 // If typeinfos matched if and only if equal, then the elements of a filter L 3562 // that occurs later than a filter F could be replaced by the intersection of 3563 // the elements of F and L. In reality two typeinfos can match without being 3564 // equal (for example if one represents a C++ class, and the other some class 3565 // derived from it) so it would be wrong to perform this transform in general. 3566 // However the transform is correct and useful if F is a subset of L. In that 3567 // case L can be replaced by F, and thus removed altogether since repeating a 3568 // filter is pointless. So here we look at all pairs of filters F and L where 3569 // L follows F in the list of clauses, and remove L if every element of F is 3570 // an element of L. This can occur when inlining C++ functions with exception 3571 // specifications. 3572 for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) { 3573 // Examine each filter in turn. 3574 Value *Filter = NewClauses[i]; 3575 ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType()); 3576 if (!FTy) 3577 // Not a filter - skip it. 3578 continue; 3579 unsigned FElts = FTy->getNumElements(); 3580 // Examine each filter following this one. Doing this backwards means that 3581 // we don't have to worry about filters disappearing under us when removed. 3582 for (unsigned j = NewClauses.size() - 1; j != i; --j) { 3583 Value *LFilter = NewClauses[j]; 3584 ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType()); 3585 if (!LTy) 3586 // Not a filter - skip it. 3587 continue; 3588 // If Filter is a subset of LFilter, i.e. every element of Filter is also 3589 // an element of LFilter, then discard LFilter. 3590 SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j; 3591 // If Filter is empty then it is a subset of LFilter. 3592 if (!FElts) { 3593 // Discard LFilter. 3594 NewClauses.erase(J); 3595 MakeNewInstruction = true; 3596 // Move on to the next filter. 3597 continue; 3598 } 3599 unsigned LElts = LTy->getNumElements(); 3600 // If Filter is longer than LFilter then it cannot be a subset of it. 3601 if (FElts > LElts) 3602 // Move on to the next filter. 3603 continue; 3604 // At this point we know that LFilter has at least one element. 3605 if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros. 3606 // Filter is a subset of LFilter iff Filter contains only zeros (as we 3607 // already know that Filter is not longer than LFilter). 3608 if (isa<ConstantAggregateZero>(Filter)) { 3609 assert(FElts <= LElts && "Should have handled this case earlier!"); 3610 // Discard LFilter. 3611 NewClauses.erase(J); 3612 MakeNewInstruction = true; 3613 } 3614 // Move on to the next filter. 3615 continue; 3616 } 3617 ConstantArray *LArray = cast<ConstantArray>(LFilter); 3618 if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros. 3619 // Since Filter is non-empty and contains only zeros, it is a subset of 3620 // LFilter iff LFilter contains a zero. 3621 assert(FElts > 0 && "Should have eliminated the empty filter earlier!"); 3622 for (unsigned l = 0; l != LElts; ++l) 3623 if (LArray->getOperand(l)->isNullValue()) { 3624 // LFilter contains a zero - discard it. 3625 NewClauses.erase(J); 3626 MakeNewInstruction = true; 3627 break; 3628 } 3629 // Move on to the next filter. 3630 continue; 3631 } 3632 // At this point we know that both filters are ConstantArrays. Loop over 3633 // operands to see whether every element of Filter is also an element of 3634 // LFilter. Since filters tend to be short this is probably faster than 3635 // using a method that scales nicely. 3636 ConstantArray *FArray = cast<ConstantArray>(Filter); 3637 bool AllFound = true; 3638 for (unsigned f = 0; f != FElts; ++f) { 3639 Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts(); 3640 AllFound = false; 3641 for (unsigned l = 0; l != LElts; ++l) { 3642 Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts(); 3643 if (LTypeInfo == FTypeInfo) { 3644 AllFound = true; 3645 break; 3646 } 3647 } 3648 if (!AllFound) 3649 break; 3650 } 3651 if (AllFound) { 3652 // Discard LFilter. 3653 NewClauses.erase(J); 3654 MakeNewInstruction = true; 3655 } 3656 // Move on to the next filter. 3657 } 3658 } 3659 3660 // If we changed any of the clauses, replace the old landingpad instruction 3661 // with a new one. 3662 if (MakeNewInstruction) { 3663 LandingPadInst *NLI = LandingPadInst::Create(LI.getType(), 3664 NewClauses.size()); 3665 for (unsigned i = 0, e = NewClauses.size(); i != e; ++i) 3666 NLI->addClause(NewClauses[i]); 3667 // A landing pad with no clauses must have the cleanup flag set. It is 3668 // theoretically possible, though highly unlikely, that we eliminated all 3669 // clauses. If so, force the cleanup flag to true. 3670 if (NewClauses.empty()) 3671 CleanupFlag = true; 3672 NLI->setCleanup(CleanupFlag); 3673 return NLI; 3674 } 3675 3676 // Even if none of the clauses changed, we may nonetheless have understood 3677 // that the cleanup flag is pointless. Clear it if so. 3678 if (LI.isCleanup() != CleanupFlag) { 3679 assert(!CleanupFlag && "Adding a cleanup, not removing one?!"); 3680 LI.setCleanup(CleanupFlag); 3681 return &LI; 3682 } 3683 3684 return nullptr; 3685 } 3686 3687 Value * 3688 InstCombinerImpl::pushFreezeToPreventPoisonFromPropagating(FreezeInst &OrigFI) { 3689 // Try to push freeze through instructions that propagate but don't produce 3690 // poison as far as possible. If an operand of freeze follows three 3691 // conditions 1) one-use, 2) does not produce poison, and 3) has all but one 3692 // guaranteed-non-poison operands then push the freeze through to the one 3693 // operand that is not guaranteed non-poison. The actual transform is as 3694 // follows. 3695 // Op1 = ... ; Op1 can be posion 3696 // Op0 = Inst(Op1, NonPoisonOps...) ; Op0 has only one use and only have 3697 // ; single guaranteed-non-poison operands 3698 // ... = Freeze(Op0) 3699 // => 3700 // Op1 = ... 3701 // Op1.fr = Freeze(Op1) 3702 // ... = Inst(Op1.fr, NonPoisonOps...) 3703 auto *OrigOp = OrigFI.getOperand(0); 3704 auto *OrigOpInst = dyn_cast<Instruction>(OrigOp); 3705 3706 // While we could change the other users of OrigOp to use freeze(OrigOp), that 3707 // potentially reduces their optimization potential, so let's only do this iff 3708 // the OrigOp is only used by the freeze. 3709 if (!OrigOpInst || !OrigOpInst->hasOneUse() || isa<PHINode>(OrigOp)) 3710 return nullptr; 3711 3712 // We can't push the freeze through an instruction which can itself create 3713 // poison. If the only source of new poison is flags, we can simply 3714 // strip them (since we know the only use is the freeze and nothing can 3715 // benefit from them.) 3716 if (canCreateUndefOrPoison(cast<Operator>(OrigOp), /*ConsiderFlags*/ false)) 3717 return nullptr; 3718 3719 // If operand is guaranteed not to be poison, there is no need to add freeze 3720 // to the operand. So we first find the operand that is not guaranteed to be 3721 // poison. 3722 Use *MaybePoisonOperand = nullptr; 3723 for (Use &U : OrigOpInst->operands()) { 3724 if (isGuaranteedNotToBeUndefOrPoison(U.get())) 3725 continue; 3726 if (!MaybePoisonOperand) 3727 MaybePoisonOperand = &U; 3728 else 3729 return nullptr; 3730 } 3731 3732 OrigOpInst->dropPoisonGeneratingFlags(); 3733 3734 // If all operands are guaranteed to be non-poison, we can drop freeze. 3735 if (!MaybePoisonOperand) 3736 return OrigOp; 3737 3738 auto *FrozenMaybePoisonOperand = new FreezeInst( 3739 MaybePoisonOperand->get(), MaybePoisonOperand->get()->getName() + ".fr"); 3740 3741 replaceUse(*MaybePoisonOperand, FrozenMaybePoisonOperand); 3742 FrozenMaybePoisonOperand->insertBefore(OrigOpInst); 3743 return OrigOp; 3744 } 3745 3746 bool InstCombinerImpl::freezeDominatedUses(FreezeInst &FI) { 3747 Value *Op = FI.getOperand(0); 3748 3749 if (isa<Constant>(Op)) 3750 return false; 3751 3752 bool Changed = false; 3753 Op->replaceUsesWithIf(&FI, [&](Use &U) -> bool { 3754 bool Dominates = DT.dominates(&FI, U); 3755 Changed |= Dominates; 3756 return Dominates; 3757 }); 3758 3759 return Changed; 3760 } 3761 3762 Instruction *InstCombinerImpl::visitFreeze(FreezeInst &I) { 3763 Value *Op0 = I.getOperand(0); 3764 3765 if (Value *V = SimplifyFreezeInst(Op0, SQ.getWithInstruction(&I))) 3766 return replaceInstUsesWith(I, V); 3767 3768 // freeze (phi const, x) --> phi const, (freeze x) 3769 if (auto *PN = dyn_cast<PHINode>(Op0)) { 3770 if (Instruction *NV = foldOpIntoPhi(I, PN)) 3771 return NV; 3772 } 3773 3774 if (Value *NI = pushFreezeToPreventPoisonFromPropagating(I)) 3775 return replaceInstUsesWith(I, NI); 3776 3777 if (match(Op0, m_Undef())) { 3778 // If I is freeze(undef), see its uses and fold it to the best constant. 3779 // - or: pick -1 3780 // - select's condition: pick the value that leads to choosing a constant 3781 // - other ops: pick 0 3782 Constant *BestValue = nullptr; 3783 Constant *NullValue = Constant::getNullValue(I.getType()); 3784 for (const auto *U : I.users()) { 3785 Constant *C = NullValue; 3786 3787 if (match(U, m_Or(m_Value(), m_Value()))) 3788 C = Constant::getAllOnesValue(I.getType()); 3789 else if (const auto *SI = dyn_cast<SelectInst>(U)) { 3790 if (SI->getCondition() == &I) { 3791 APInt CondVal(1, isa<Constant>(SI->getFalseValue()) ? 0 : 1); 3792 C = Constant::getIntegerValue(I.getType(), CondVal); 3793 } 3794 } 3795 3796 if (!BestValue) 3797 BestValue = C; 3798 else if (BestValue != C) 3799 BestValue = NullValue; 3800 } 3801 3802 return replaceInstUsesWith(I, BestValue); 3803 } 3804 3805 // Replace all dominated uses of Op to freeze(Op). 3806 if (freezeDominatedUses(I)) 3807 return &I; 3808 3809 return nullptr; 3810 } 3811 3812 /// Check for case where the call writes to an otherwise dead alloca. This 3813 /// shows up for unused out-params in idiomatic C/C++ code. Note that this 3814 /// helper *only* analyzes the write; doesn't check any other legality aspect. 3815 static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI) { 3816 auto *CB = dyn_cast<CallBase>(I); 3817 if (!CB) 3818 // TODO: handle e.g. store to alloca here - only worth doing if we extend 3819 // to allow reload along used path as described below. Otherwise, this 3820 // is simply a store to a dead allocation which will be removed. 3821 return false; 3822 Optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI); 3823 if (!Dest) 3824 return false; 3825 auto *AI = dyn_cast<AllocaInst>(getUnderlyingObject(Dest->Ptr)); 3826 if (!AI) 3827 // TODO: allow malloc? 3828 return false; 3829 // TODO: allow memory access dominated by move point? Note that since AI 3830 // could have a reference to itself captured by the call, we would need to 3831 // account for cycles in doing so. 3832 SmallVector<const User *> AllocaUsers; 3833 SmallPtrSet<const User *, 4> Visited; 3834 auto pushUsers = [&](const Instruction &I) { 3835 for (const User *U : I.users()) { 3836 if (Visited.insert(U).second) 3837 AllocaUsers.push_back(U); 3838 } 3839 }; 3840 pushUsers(*AI); 3841 while (!AllocaUsers.empty()) { 3842 auto *UserI = cast<Instruction>(AllocaUsers.pop_back_val()); 3843 if (isa<BitCastInst>(UserI) || isa<GetElementPtrInst>(UserI) || 3844 isa<AddrSpaceCastInst>(UserI)) { 3845 pushUsers(*UserI); 3846 continue; 3847 } 3848 if (UserI == CB) 3849 continue; 3850 // TODO: support lifetime.start/end here 3851 return false; 3852 } 3853 return true; 3854 } 3855 3856 /// Try to move the specified instruction from its current block into the 3857 /// beginning of DestBlock, which can only happen if it's safe to move the 3858 /// instruction past all of the instructions between it and the end of its 3859 /// block. 3860 static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock, 3861 TargetLibraryInfo &TLI) { 3862 assert(I->getUniqueUndroppableUser() && "Invariants didn't hold!"); 3863 BasicBlock *SrcBlock = I->getParent(); 3864 3865 // Cannot move control-flow-involving, volatile loads, vaarg, etc. 3866 if (isa<PHINode>(I) || I->isEHPad() || I->mayThrow() || !I->willReturn() || 3867 I->isTerminator()) 3868 return false; 3869 3870 // Do not sink static or dynamic alloca instructions. Static allocas must 3871 // remain in the entry block, and dynamic allocas must not be sunk in between 3872 // a stacksave / stackrestore pair, which would incorrectly shorten its 3873 // lifetime. 3874 if (isa<AllocaInst>(I)) 3875 return false; 3876 3877 // Do not sink into catchswitch blocks. 3878 if (isa<CatchSwitchInst>(DestBlock->getTerminator())) 3879 return false; 3880 3881 // Do not sink convergent call instructions. 3882 if (auto *CI = dyn_cast<CallInst>(I)) { 3883 if (CI->isConvergent()) 3884 return false; 3885 } 3886 3887 // Unless we can prove that the memory write isn't visibile except on the 3888 // path we're sinking to, we must bail. 3889 if (I->mayWriteToMemory()) { 3890 if (!SoleWriteToDeadLocal(I, TLI)) 3891 return false; 3892 } 3893 3894 // We can only sink load instructions if there is nothing between the load and 3895 // the end of block that could change the value. 3896 if (I->mayReadFromMemory()) { 3897 // We don't want to do any sophisticated alias analysis, so we only check 3898 // the instructions after I in I's parent block if we try to sink to its 3899 // successor block. 3900 if (DestBlock->getUniquePredecessor() != I->getParent()) 3901 return false; 3902 for (BasicBlock::iterator Scan = std::next(I->getIterator()), 3903 E = I->getParent()->end(); 3904 Scan != E; ++Scan) 3905 if (Scan->mayWriteToMemory()) 3906 return false; 3907 } 3908 3909 I->dropDroppableUses([DestBlock](const Use *U) { 3910 if (auto *I = dyn_cast<Instruction>(U->getUser())) 3911 return I->getParent() != DestBlock; 3912 return true; 3913 }); 3914 /// FIXME: We could remove droppable uses that are not dominated by 3915 /// the new position. 3916 3917 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt(); 3918 I->moveBefore(&*InsertPos); 3919 ++NumSunkInst; 3920 3921 // Also sink all related debug uses from the source basic block. Otherwise we 3922 // get debug use before the def. Attempt to salvage debug uses first, to 3923 // maximise the range variables have location for. If we cannot salvage, then 3924 // mark the location undef: we know it was supposed to receive a new location 3925 // here, but that computation has been sunk. 3926 SmallVector<DbgVariableIntrinsic *, 2> DbgUsers; 3927 findDbgUsers(DbgUsers, I); 3928 // Process the sinking DbgUsers in reverse order, as we only want to clone the 3929 // last appearing debug intrinsic for each given variable. 3930 SmallVector<DbgVariableIntrinsic *, 2> DbgUsersToSink; 3931 for (DbgVariableIntrinsic *DVI : DbgUsers) 3932 if (DVI->getParent() == SrcBlock) 3933 DbgUsersToSink.push_back(DVI); 3934 llvm::sort(DbgUsersToSink, 3935 [](auto *A, auto *B) { return B->comesBefore(A); }); 3936 3937 SmallVector<DbgVariableIntrinsic *, 2> DIIClones; 3938 SmallSet<DebugVariable, 4> SunkVariables; 3939 for (auto User : DbgUsersToSink) { 3940 // A dbg.declare instruction should not be cloned, since there can only be 3941 // one per variable fragment. It should be left in the original place 3942 // because the sunk instruction is not an alloca (otherwise we could not be 3943 // here). 3944 if (isa<DbgDeclareInst>(User)) 3945 continue; 3946 3947 DebugVariable DbgUserVariable = 3948 DebugVariable(User->getVariable(), User->getExpression(), 3949 User->getDebugLoc()->getInlinedAt()); 3950 3951 if (!SunkVariables.insert(DbgUserVariable).second) 3952 continue; 3953 3954 DIIClones.emplace_back(cast<DbgVariableIntrinsic>(User->clone())); 3955 if (isa<DbgDeclareInst>(User) && isa<CastInst>(I)) 3956 DIIClones.back()->replaceVariableLocationOp(I, I->getOperand(0)); 3957 LLVM_DEBUG(dbgs() << "CLONE: " << *DIIClones.back() << '\n'); 3958 } 3959 3960 // Perform salvaging without the clones, then sink the clones. 3961 if (!DIIClones.empty()) { 3962 salvageDebugInfoForDbgValues(*I, DbgUsers); 3963 // The clones are in reverse order of original appearance, reverse again to 3964 // maintain the original order. 3965 for (auto &DIIClone : llvm::reverse(DIIClones)) { 3966 DIIClone->insertBefore(&*InsertPos); 3967 LLVM_DEBUG(dbgs() << "SINK: " << *DIIClone << '\n'); 3968 } 3969 } 3970 3971 return true; 3972 } 3973 3974 bool InstCombinerImpl::run() { 3975 while (!Worklist.isEmpty()) { 3976 // Walk deferred instructions in reverse order, and push them to the 3977 // worklist, which means they'll end up popped from the worklist in-order. 3978 while (Instruction *I = Worklist.popDeferred()) { 3979 // Check to see if we can DCE the instruction. We do this already here to 3980 // reduce the number of uses and thus allow other folds to trigger. 3981 // Note that eraseInstFromFunction() may push additional instructions on 3982 // the deferred worklist, so this will DCE whole instruction chains. 3983 if (isInstructionTriviallyDead(I, &TLI)) { 3984 eraseInstFromFunction(*I); 3985 ++NumDeadInst; 3986 continue; 3987 } 3988 3989 Worklist.push(I); 3990 } 3991 3992 Instruction *I = Worklist.removeOne(); 3993 if (I == nullptr) continue; // skip null values. 3994 3995 // Check to see if we can DCE the instruction. 3996 if (isInstructionTriviallyDead(I, &TLI)) { 3997 eraseInstFromFunction(*I); 3998 ++NumDeadInst; 3999 continue; 4000 } 4001 4002 if (!DebugCounter::shouldExecute(VisitCounter)) 4003 continue; 4004 4005 // Instruction isn't dead, see if we can constant propagate it. 4006 if (!I->use_empty() && 4007 (I->getNumOperands() == 0 || isa<Constant>(I->getOperand(0)))) { 4008 if (Constant *C = ConstantFoldInstruction(I, DL, &TLI)) { 4009 LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I 4010 << '\n'); 4011 4012 // Add operands to the worklist. 4013 replaceInstUsesWith(*I, C); 4014 ++NumConstProp; 4015 if (isInstructionTriviallyDead(I, &TLI)) 4016 eraseInstFromFunction(*I); 4017 MadeIRChange = true; 4018 continue; 4019 } 4020 } 4021 4022 // See if we can trivially sink this instruction to its user if we can 4023 // prove that the successor is not executed more frequently than our block. 4024 // Return the UserBlock if successful. 4025 auto getOptionalSinkBlockForInst = 4026 [this](Instruction *I) -> Optional<BasicBlock *> { 4027 if (!EnableCodeSinking) 4028 return None; 4029 auto *UserInst = cast_or_null<Instruction>(I->getUniqueUndroppableUser()); 4030 if (!UserInst) 4031 return None; 4032 4033 BasicBlock *BB = I->getParent(); 4034 BasicBlock *UserParent = nullptr; 4035 4036 // Special handling for Phi nodes - get the block the use occurs in. 4037 if (PHINode *PN = dyn_cast<PHINode>(UserInst)) { 4038 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 4039 if (PN->getIncomingValue(i) == I) { 4040 // Bail out if we have uses in different blocks. We don't do any 4041 // sophisticated analysis (i.e finding NearestCommonDominator of these 4042 // use blocks). 4043 if (UserParent && UserParent != PN->getIncomingBlock(i)) 4044 return None; 4045 UserParent = PN->getIncomingBlock(i); 4046 } 4047 } 4048 assert(UserParent && "expected to find user block!"); 4049 } else 4050 UserParent = UserInst->getParent(); 4051 4052 // Try sinking to another block. If that block is unreachable, then do 4053 // not bother. SimplifyCFG should handle it. 4054 if (UserParent == BB || !DT.isReachableFromEntry(UserParent)) 4055 return None; 4056 4057 auto *Term = UserParent->getTerminator(); 4058 // See if the user is one of our successors that has only one 4059 // predecessor, so that we don't have to split the critical edge. 4060 // Another option where we can sink is a block that ends with a 4061 // terminator that does not pass control to other block (such as 4062 // return or unreachable or resume). In this case: 4063 // - I dominates the User (by SSA form); 4064 // - the User will be executed at most once. 4065 // So sinking I down to User is always profitable or neutral. 4066 if (UserParent->getUniquePredecessor() == BB || succ_empty(Term)) { 4067 assert(DT.dominates(BB, UserParent) && "Dominance relation broken?"); 4068 return UserParent; 4069 } 4070 return None; 4071 }; 4072 4073 auto OptBB = getOptionalSinkBlockForInst(I); 4074 if (OptBB) { 4075 auto *UserParent = *OptBB; 4076 // Okay, the CFG is simple enough, try to sink this instruction. 4077 if (TryToSinkInstruction(I, UserParent, TLI)) { 4078 LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n'); 4079 MadeIRChange = true; 4080 // We'll add uses of the sunk instruction below, but since 4081 // sinking can expose opportunities for it's *operands* add 4082 // them to the worklist 4083 for (Use &U : I->operands()) 4084 if (Instruction *OpI = dyn_cast<Instruction>(U.get())) 4085 Worklist.push(OpI); 4086 } 4087 } 4088 4089 // Now that we have an instruction, try combining it to simplify it. 4090 Builder.SetInsertPoint(I); 4091 Builder.CollectMetadataToCopy( 4092 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation}); 4093 4094 #ifndef NDEBUG 4095 std::string OrigI; 4096 #endif 4097 LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str();); 4098 LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n'); 4099 4100 if (Instruction *Result = visit(*I)) { 4101 ++NumCombined; 4102 // Should we replace the old instruction with a new one? 4103 if (Result != I) { 4104 LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n' 4105 << " New = " << *Result << '\n'); 4106 4107 Result->copyMetadata(*I, 4108 {LLVMContext::MD_dbg, LLVMContext::MD_annotation}); 4109 // Everything uses the new instruction now. 4110 I->replaceAllUsesWith(Result); 4111 4112 // Move the name to the new instruction first. 4113 Result->takeName(I); 4114 4115 // Insert the new instruction into the basic block... 4116 BasicBlock *InstParent = I->getParent(); 4117 BasicBlock::iterator InsertPos = I->getIterator(); 4118 4119 // Are we replace a PHI with something that isn't a PHI, or vice versa? 4120 if (isa<PHINode>(Result) != isa<PHINode>(I)) { 4121 // We need to fix up the insertion point. 4122 if (isa<PHINode>(I)) // PHI -> Non-PHI 4123 InsertPos = InstParent->getFirstInsertionPt(); 4124 else // Non-PHI -> PHI 4125 InsertPos = InstParent->getFirstNonPHI()->getIterator(); 4126 } 4127 4128 InstParent->getInstList().insert(InsertPos, Result); 4129 4130 // Push the new instruction and any users onto the worklist. 4131 Worklist.pushUsersToWorkList(*Result); 4132 Worklist.push(Result); 4133 4134 eraseInstFromFunction(*I); 4135 } else { 4136 LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n' 4137 << " New = " << *I << '\n'); 4138 4139 // If the instruction was modified, it's possible that it is now dead. 4140 // if so, remove it. 4141 if (isInstructionTriviallyDead(I, &TLI)) { 4142 eraseInstFromFunction(*I); 4143 } else { 4144 Worklist.pushUsersToWorkList(*I); 4145 Worklist.push(I); 4146 } 4147 } 4148 MadeIRChange = true; 4149 } 4150 } 4151 4152 Worklist.zap(); 4153 return MadeIRChange; 4154 } 4155 4156 // Track the scopes used by !alias.scope and !noalias. In a function, a 4157 // @llvm.experimental.noalias.scope.decl is only useful if that scope is used 4158 // by both sets. If not, the declaration of the scope can be safely omitted. 4159 // The MDNode of the scope can be omitted as well for the instructions that are 4160 // part of this function. We do not do that at this point, as this might become 4161 // too time consuming to do. 4162 class AliasScopeTracker { 4163 SmallPtrSet<const MDNode *, 8> UsedAliasScopesAndLists; 4164 SmallPtrSet<const MDNode *, 8> UsedNoAliasScopesAndLists; 4165 4166 public: 4167 void analyse(Instruction *I) { 4168 // This seems to be faster than checking 'mayReadOrWriteMemory()'. 4169 if (!I->hasMetadataOtherThanDebugLoc()) 4170 return; 4171 4172 auto Track = [](Metadata *ScopeList, auto &Container) { 4173 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList); 4174 if (!MDScopeList || !Container.insert(MDScopeList).second) 4175 return; 4176 for (auto &MDOperand : MDScopeList->operands()) 4177 if (auto *MDScope = dyn_cast<MDNode>(MDOperand)) 4178 Container.insert(MDScope); 4179 }; 4180 4181 Track(I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists); 4182 Track(I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists); 4183 } 4184 4185 bool isNoAliasScopeDeclDead(Instruction *Inst) { 4186 NoAliasScopeDeclInst *Decl = dyn_cast<NoAliasScopeDeclInst>(Inst); 4187 if (!Decl) 4188 return false; 4189 4190 assert(Decl->use_empty() && 4191 "llvm.experimental.noalias.scope.decl in use ?"); 4192 const MDNode *MDSL = Decl->getScopeList(); 4193 assert(MDSL->getNumOperands() == 1 && 4194 "llvm.experimental.noalias.scope should refer to a single scope"); 4195 auto &MDOperand = MDSL->getOperand(0); 4196 if (auto *MD = dyn_cast<MDNode>(MDOperand)) 4197 return !UsedAliasScopesAndLists.contains(MD) || 4198 !UsedNoAliasScopesAndLists.contains(MD); 4199 4200 // Not an MDNode ? throw away. 4201 return true; 4202 } 4203 }; 4204 4205 /// Populate the IC worklist from a function, by walking it in depth-first 4206 /// order and adding all reachable code to the worklist. 4207 /// 4208 /// This has a couple of tricks to make the code faster and more powerful. In 4209 /// particular, we constant fold and DCE instructions as we go, to avoid adding 4210 /// them to the worklist (this significantly speeds up instcombine on code where 4211 /// many instructions are dead or constant). Additionally, if we find a branch 4212 /// whose condition is a known constant, we only visit the reachable successors. 4213 static bool prepareICWorklistFromFunction(Function &F, const DataLayout &DL, 4214 const TargetLibraryInfo *TLI, 4215 InstructionWorklist &ICWorklist) { 4216 bool MadeIRChange = false; 4217 SmallPtrSet<BasicBlock *, 32> Visited; 4218 SmallVector<BasicBlock*, 256> Worklist; 4219 Worklist.push_back(&F.front()); 4220 4221 SmallVector<Instruction *, 128> InstrsForInstructionWorklist; 4222 DenseMap<Constant *, Constant *> FoldedConstants; 4223 AliasScopeTracker SeenAliasScopes; 4224 4225 do { 4226 BasicBlock *BB = Worklist.pop_back_val(); 4227 4228 // We have now visited this block! If we've already been here, ignore it. 4229 if (!Visited.insert(BB).second) 4230 continue; 4231 4232 for (Instruction &Inst : llvm::make_early_inc_range(*BB)) { 4233 // ConstantProp instruction if trivially constant. 4234 if (!Inst.use_empty() && 4235 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0)))) 4236 if (Constant *C = ConstantFoldInstruction(&Inst, DL, TLI)) { 4237 LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << Inst 4238 << '\n'); 4239 Inst.replaceAllUsesWith(C); 4240 ++NumConstProp; 4241 if (isInstructionTriviallyDead(&Inst, TLI)) 4242 Inst.eraseFromParent(); 4243 MadeIRChange = true; 4244 continue; 4245 } 4246 4247 // See if we can constant fold its operands. 4248 for (Use &U : Inst.operands()) { 4249 if (!isa<ConstantVector>(U) && !isa<ConstantExpr>(U)) 4250 continue; 4251 4252 auto *C = cast<Constant>(U); 4253 Constant *&FoldRes = FoldedConstants[C]; 4254 if (!FoldRes) 4255 FoldRes = ConstantFoldConstant(C, DL, TLI); 4256 4257 if (FoldRes != C) { 4258 LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << Inst 4259 << "\n Old = " << *C 4260 << "\n New = " << *FoldRes << '\n'); 4261 U = FoldRes; 4262 MadeIRChange = true; 4263 } 4264 } 4265 4266 // Skip processing debug and pseudo intrinsics in InstCombine. Processing 4267 // these call instructions consumes non-trivial amount of time and 4268 // provides no value for the optimization. 4269 if (!Inst.isDebugOrPseudoInst()) { 4270 InstrsForInstructionWorklist.push_back(&Inst); 4271 SeenAliasScopes.analyse(&Inst); 4272 } 4273 } 4274 4275 // Recursively visit successors. If this is a branch or switch on a 4276 // constant, only visit the reachable successor. 4277 Instruction *TI = BB->getTerminator(); 4278 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 4279 if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) { 4280 bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue(); 4281 BasicBlock *ReachableBB = BI->getSuccessor(!CondVal); 4282 Worklist.push_back(ReachableBB); 4283 continue; 4284 } 4285 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) { 4286 if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) { 4287 Worklist.push_back(SI->findCaseValue(Cond)->getCaseSuccessor()); 4288 continue; 4289 } 4290 } 4291 4292 append_range(Worklist, successors(TI)); 4293 } while (!Worklist.empty()); 4294 4295 // Remove instructions inside unreachable blocks. This prevents the 4296 // instcombine code from having to deal with some bad special cases, and 4297 // reduces use counts of instructions. 4298 for (BasicBlock &BB : F) { 4299 if (Visited.count(&BB)) 4300 continue; 4301 4302 unsigned NumDeadInstInBB; 4303 unsigned NumDeadDbgInstInBB; 4304 std::tie(NumDeadInstInBB, NumDeadDbgInstInBB) = 4305 removeAllNonTerminatorAndEHPadInstructions(&BB); 4306 4307 MadeIRChange |= NumDeadInstInBB + NumDeadDbgInstInBB > 0; 4308 NumDeadInst += NumDeadInstInBB; 4309 } 4310 4311 // Once we've found all of the instructions to add to instcombine's worklist, 4312 // add them in reverse order. This way instcombine will visit from the top 4313 // of the function down. This jives well with the way that it adds all uses 4314 // of instructions to the worklist after doing a transformation, thus avoiding 4315 // some N^2 behavior in pathological cases. 4316 ICWorklist.reserve(InstrsForInstructionWorklist.size()); 4317 for (Instruction *Inst : reverse(InstrsForInstructionWorklist)) { 4318 // DCE instruction if trivially dead. As we iterate in reverse program 4319 // order here, we will clean up whole chains of dead instructions. 4320 if (isInstructionTriviallyDead(Inst, TLI) || 4321 SeenAliasScopes.isNoAliasScopeDeclDead(Inst)) { 4322 ++NumDeadInst; 4323 LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n'); 4324 salvageDebugInfo(*Inst); 4325 Inst->eraseFromParent(); 4326 MadeIRChange = true; 4327 continue; 4328 } 4329 4330 ICWorklist.push(Inst); 4331 } 4332 4333 return MadeIRChange; 4334 } 4335 4336 static bool combineInstructionsOverFunction( 4337 Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, 4338 AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, 4339 DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, 4340 ProfileSummaryInfo *PSI, unsigned MaxIterations, LoopInfo *LI) { 4341 auto &DL = F.getParent()->getDataLayout(); 4342 MaxIterations = std::min(MaxIterations, LimitMaxIterations.getValue()); 4343 4344 /// Builder - This is an IRBuilder that automatically inserts new 4345 /// instructions into the worklist when they are created. 4346 IRBuilder<TargetFolder, IRBuilderCallbackInserter> Builder( 4347 F.getContext(), TargetFolder(DL), 4348 IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) { 4349 Worklist.add(I); 4350 if (auto *Assume = dyn_cast<AssumeInst>(I)) 4351 AC.registerAssumption(Assume); 4352 })); 4353 4354 // Lower dbg.declare intrinsics otherwise their value may be clobbered 4355 // by instcombiner. 4356 bool MadeIRChange = false; 4357 if (ShouldLowerDbgDeclare) 4358 MadeIRChange = LowerDbgDeclare(F); 4359 4360 // Iterate while there is work to do. 4361 unsigned Iteration = 0; 4362 while (true) { 4363 ++NumWorklistIterations; 4364 ++Iteration; 4365 4366 if (Iteration > InfiniteLoopDetectionThreshold) { 4367 report_fatal_error( 4368 "Instruction Combining seems stuck in an infinite loop after " + 4369 Twine(InfiniteLoopDetectionThreshold) + " iterations."); 4370 } 4371 4372 if (Iteration > MaxIterations) { 4373 LLVM_DEBUG(dbgs() << "\n\n[IC] Iteration limit #" << MaxIterations 4374 << " on " << F.getName() 4375 << " reached; stopping before reaching a fixpoint\n"); 4376 break; 4377 } 4378 4379 LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on " 4380 << F.getName() << "\n"); 4381 4382 MadeIRChange |= prepareICWorklistFromFunction(F, DL, &TLI, Worklist); 4383 4384 InstCombinerImpl IC(Worklist, Builder, F.hasMinSize(), AA, AC, TLI, TTI, DT, 4385 ORE, BFI, PSI, DL, LI); 4386 IC.MaxArraySizeForCombine = MaxArraySize; 4387 4388 if (!IC.run()) 4389 break; 4390 4391 MadeIRChange = true; 4392 } 4393 4394 return MadeIRChange; 4395 } 4396 4397 InstCombinePass::InstCombinePass() : MaxIterations(LimitMaxIterations) {} 4398 4399 InstCombinePass::InstCombinePass(unsigned MaxIterations) 4400 : MaxIterations(MaxIterations) {} 4401 4402 PreservedAnalyses InstCombinePass::run(Function &F, 4403 FunctionAnalysisManager &AM) { 4404 auto &AC = AM.getResult<AssumptionAnalysis>(F); 4405 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 4406 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 4407 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 4408 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 4409 4410 auto *LI = AM.getCachedResult<LoopAnalysis>(F); 4411 4412 auto *AA = &AM.getResult<AAManager>(F); 4413 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 4414 ProfileSummaryInfo *PSI = 4415 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 4416 auto *BFI = (PSI && PSI->hasProfileSummary()) ? 4417 &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr; 4418 4419 if (!combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE, 4420 BFI, PSI, MaxIterations, LI)) 4421 // No changes, all analyses are preserved. 4422 return PreservedAnalyses::all(); 4423 4424 // Mark all the analyses that instcombine updates as preserved. 4425 PreservedAnalyses PA; 4426 PA.preserveSet<CFGAnalyses>(); 4427 return PA; 4428 } 4429 4430 void InstructionCombiningPass::getAnalysisUsage(AnalysisUsage &AU) const { 4431 AU.setPreservesCFG(); 4432 AU.addRequired<AAResultsWrapperPass>(); 4433 AU.addRequired<AssumptionCacheTracker>(); 4434 AU.addRequired<TargetLibraryInfoWrapperPass>(); 4435 AU.addRequired<TargetTransformInfoWrapperPass>(); 4436 AU.addRequired<DominatorTreeWrapperPass>(); 4437 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 4438 AU.addPreserved<DominatorTreeWrapperPass>(); 4439 AU.addPreserved<AAResultsWrapperPass>(); 4440 AU.addPreserved<BasicAAWrapperPass>(); 4441 AU.addPreserved<GlobalsAAWrapperPass>(); 4442 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 4443 LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU); 4444 } 4445 4446 bool InstructionCombiningPass::runOnFunction(Function &F) { 4447 if (skipFunction(F)) 4448 return false; 4449 4450 // Required analyses. 4451 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 4452 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 4453 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 4454 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 4455 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 4456 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 4457 4458 // Optional analyses. 4459 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>(); 4460 auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr; 4461 ProfileSummaryInfo *PSI = 4462 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 4463 BlockFrequencyInfo *BFI = 4464 (PSI && PSI->hasProfileSummary()) ? 4465 &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() : 4466 nullptr; 4467 4468 return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE, 4469 BFI, PSI, MaxIterations, LI); 4470 } 4471 4472 char InstructionCombiningPass::ID = 0; 4473 4474 InstructionCombiningPass::InstructionCombiningPass() 4475 : FunctionPass(ID), MaxIterations(InstCombineDefaultMaxIterations) { 4476 initializeInstructionCombiningPassPass(*PassRegistry::getPassRegistry()); 4477 } 4478 4479 InstructionCombiningPass::InstructionCombiningPass(unsigned MaxIterations) 4480 : FunctionPass(ID), MaxIterations(MaxIterations) { 4481 initializeInstructionCombiningPassPass(*PassRegistry::getPassRegistry()); 4482 } 4483 4484 INITIALIZE_PASS_BEGIN(InstructionCombiningPass, "instcombine", 4485 "Combine redundant instructions", false, false) 4486 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 4487 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 4488 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 4489 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 4490 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 4491 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 4492 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 4493 INITIALIZE_PASS_DEPENDENCY(LazyBlockFrequencyInfoPass) 4494 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 4495 INITIALIZE_PASS_END(InstructionCombiningPass, "instcombine", 4496 "Combine redundant instructions", false, false) 4497 4498 // Initialization Routines 4499 void llvm::initializeInstCombine(PassRegistry &Registry) { 4500 initializeInstructionCombiningPassPass(Registry); 4501 } 4502 4503 void LLVMInitializeInstCombine(LLVMPassRegistryRef R) { 4504 initializeInstructionCombiningPassPass(*unwrap(R)); 4505 } 4506 4507 FunctionPass *llvm::createInstructionCombiningPass() { 4508 return new InstructionCombiningPass(); 4509 } 4510 4511 FunctionPass *llvm::createInstructionCombiningPass(unsigned MaxIterations) { 4512 return new InstructionCombiningPass(MaxIterations); 4513 } 4514 4515 void LLVMAddInstructionCombiningPass(LLVMPassManagerRef PM) { 4516 unwrap(PM)->add(createInstructionCombiningPass()); 4517 } 4518