1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis ------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the implementation of the scalar evolution expander, 10 // which is used to generate the code corresponding to a given scalar evolution 11 // expression. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 16 #include "llvm/ADT/STLExtras.h" 17 #include "llvm/ADT/SmallSet.h" 18 #include "llvm/Analysis/InstructionSimplify.h" 19 #include "llvm/Analysis/LoopInfo.h" 20 #include "llvm/Analysis/TargetTransformInfo.h" 21 #include "llvm/IR/DataLayout.h" 22 #include "llvm/IR/Dominators.h" 23 #include "llvm/IR/IntrinsicInst.h" 24 #include "llvm/IR/LLVMContext.h" 25 #include "llvm/IR/Module.h" 26 #include "llvm/IR/PatternMatch.h" 27 #include "llvm/Support/CommandLine.h" 28 #include "llvm/Support/Debug.h" 29 #include "llvm/Support/raw_ostream.h" 30 #include "llvm/Transforms/Utils/LoopUtils.h" 31 32 using namespace llvm; 33 34 cl::opt<unsigned> llvm::SCEVCheapExpansionBudget( 35 "scev-cheap-expansion-budget", cl::Hidden, cl::init(4), 36 cl::desc("When performing SCEV expansion only if it is cheap to do, this " 37 "controls the budget that is considered cheap (default = 4)")); 38 39 using namespace PatternMatch; 40 41 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP, 42 /// reusing an existing cast if a suitable one (= dominating IP) exists, or 43 /// creating a new one. 44 Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty, 45 Instruction::CastOps Op, 46 BasicBlock::iterator IP) { 47 // This function must be called with the builder having a valid insertion 48 // point. It doesn't need to be the actual IP where the uses of the returned 49 // cast will be added, but it must dominate such IP. 50 // We use this precondition to produce a cast that will dominate all its 51 // uses. In particular, this is crucial for the case where the builder's 52 // insertion point *is* the point where we were asked to put the cast. 53 // Since we don't know the builder's insertion point is actually 54 // where the uses will be added (only that it dominates it), we are 55 // not allowed to move it. 56 BasicBlock::iterator BIP = Builder.GetInsertPoint(); 57 58 Instruction *Ret = nullptr; 59 60 // Check to see if there is already a cast! 61 for (User *U : V->users()) { 62 if (U->getType() != Ty) 63 continue; 64 CastInst *CI = dyn_cast<CastInst>(U); 65 if (!CI || CI->getOpcode() != Op) 66 continue; 67 68 // Found a suitable cast that is at IP or comes before IP. Use it. Note that 69 // the cast must also properly dominate the Builder's insertion point. 70 if (IP->getParent() == CI->getParent() && &*BIP != CI && 71 (&*IP == CI || CI->comesBefore(&*IP))) { 72 Ret = CI; 73 break; 74 } 75 } 76 77 // Create a new cast. 78 if (!Ret) { 79 Ret = CastInst::Create(Op, V, Ty, V->getName(), &*IP); 80 rememberInstruction(Ret); 81 } 82 83 // We assert at the end of the function since IP might point to an 84 // instruction with different dominance properties than a cast 85 // (an invoke for example) and not dominate BIP (but the cast does). 86 assert(SE.DT.dominates(Ret, &*BIP)); 87 88 return Ret; 89 } 90 91 BasicBlock::iterator 92 SCEVExpander::findInsertPointAfter(Instruction *I, Instruction *MustDominate) { 93 BasicBlock::iterator IP = ++I->getIterator(); 94 if (auto *II = dyn_cast<InvokeInst>(I)) 95 IP = II->getNormalDest()->begin(); 96 97 while (isa<PHINode>(IP)) 98 ++IP; 99 100 if (isa<FuncletPadInst>(IP) || isa<LandingPadInst>(IP)) { 101 ++IP; 102 } else if (isa<CatchSwitchInst>(IP)) { 103 IP = MustDominate->getParent()->getFirstInsertionPt(); 104 } else { 105 assert(!IP->isEHPad() && "unexpected eh pad!"); 106 } 107 108 // Adjust insert point to be after instructions inserted by the expander, so 109 // we can re-use already inserted instructions. Avoid skipping past the 110 // original \p MustDominate, in case it is an inserted instruction. 111 while (isInsertedInstruction(&*IP) && &*IP != MustDominate) 112 ++IP; 113 114 return IP; 115 } 116 117 /// InsertNoopCastOfTo - Insert a cast of V to the specified type, 118 /// which must be possible with a noop cast, doing what we can to share 119 /// the casts. 120 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) { 121 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false); 122 assert((Op == Instruction::BitCast || 123 Op == Instruction::PtrToInt || 124 Op == Instruction::IntToPtr) && 125 "InsertNoopCastOfTo cannot perform non-noop casts!"); 126 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) && 127 "InsertNoopCastOfTo cannot change sizes!"); 128 129 // inttoptr only works for integral pointers. For non-integral pointers, we 130 // can create a GEP on i8* null with the integral value as index. Note that 131 // it is safe to use GEP of null instead of inttoptr here, because only 132 // expressions already based on a GEP of null should be converted to pointers 133 // during expansion. 134 if (Op == Instruction::IntToPtr) { 135 auto *PtrTy = cast<PointerType>(Ty); 136 if (DL.isNonIntegralPointerType(PtrTy)) { 137 auto *Int8PtrTy = Builder.getInt8PtrTy(PtrTy->getAddressSpace()); 138 assert(DL.getTypeAllocSize(Int8PtrTy->getElementType()) == 1 && 139 "alloc size of i8 must by 1 byte for the GEP to be correct"); 140 auto *GEP = Builder.CreateGEP( 141 Builder.getInt8Ty(), Constant::getNullValue(Int8PtrTy), V, "uglygep"); 142 return Builder.CreateBitCast(GEP, Ty); 143 } 144 } 145 // Short-circuit unnecessary bitcasts. 146 if (Op == Instruction::BitCast) { 147 if (V->getType() == Ty) 148 return V; 149 if (CastInst *CI = dyn_cast<CastInst>(V)) { 150 if (CI->getOperand(0)->getType() == Ty) 151 return CI->getOperand(0); 152 } 153 } 154 // Short-circuit unnecessary inttoptr<->ptrtoint casts. 155 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) && 156 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) { 157 if (CastInst *CI = dyn_cast<CastInst>(V)) 158 if ((CI->getOpcode() == Instruction::PtrToInt || 159 CI->getOpcode() == Instruction::IntToPtr) && 160 SE.getTypeSizeInBits(CI->getType()) == 161 SE.getTypeSizeInBits(CI->getOperand(0)->getType())) 162 return CI->getOperand(0); 163 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 164 if ((CE->getOpcode() == Instruction::PtrToInt || 165 CE->getOpcode() == Instruction::IntToPtr) && 166 SE.getTypeSizeInBits(CE->getType()) == 167 SE.getTypeSizeInBits(CE->getOperand(0)->getType())) 168 return CE->getOperand(0); 169 } 170 171 // Fold a cast of a constant. 172 if (Constant *C = dyn_cast<Constant>(V)) 173 return ConstantExpr::getCast(Op, C, Ty); 174 175 // Cast the argument at the beginning of the entry block, after 176 // any bitcasts of other arguments. 177 if (Argument *A = dyn_cast<Argument>(V)) { 178 BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin(); 179 while ((isa<BitCastInst>(IP) && 180 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) && 181 cast<BitCastInst>(IP)->getOperand(0) != A) || 182 isa<DbgInfoIntrinsic>(IP)) 183 ++IP; 184 return ReuseOrCreateCast(A, Ty, Op, IP); 185 } 186 187 // Cast the instruction immediately after the instruction. 188 Instruction *I = cast<Instruction>(V); 189 BasicBlock::iterator IP = findInsertPointAfter(I, &*Builder.GetInsertPoint()); 190 return ReuseOrCreateCast(I, Ty, Op, IP); 191 } 192 193 /// InsertBinop - Insert the specified binary operator, doing a small amount 194 /// of work to avoid inserting an obviously redundant operation, and hoisting 195 /// to an outer loop when the opportunity is there and it is safe. 196 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, 197 Value *LHS, Value *RHS, 198 SCEV::NoWrapFlags Flags, bool IsSafeToHoist) { 199 // Fold a binop with constant operands. 200 if (Constant *CLHS = dyn_cast<Constant>(LHS)) 201 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 202 return ConstantExpr::get(Opcode, CLHS, CRHS); 203 204 // Do a quick scan to see if we have this binop nearby. If so, reuse it. 205 unsigned ScanLimit = 6; 206 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 207 // Scanning starts from the last instruction before the insertion point. 208 BasicBlock::iterator IP = Builder.GetInsertPoint(); 209 if (IP != BlockBegin) { 210 --IP; 211 for (; ScanLimit; --IP, --ScanLimit) { 212 // Don't count dbg.value against the ScanLimit, to avoid perturbing the 213 // generated code. 214 if (isa<DbgInfoIntrinsic>(IP)) 215 ScanLimit++; 216 217 auto canGenerateIncompatiblePoison = [&Flags](Instruction *I) { 218 // Ensure that no-wrap flags match. 219 if (isa<OverflowingBinaryOperator>(I)) { 220 if (I->hasNoSignedWrap() != (Flags & SCEV::FlagNSW)) 221 return true; 222 if (I->hasNoUnsignedWrap() != (Flags & SCEV::FlagNUW)) 223 return true; 224 } 225 // Conservatively, do not use any instruction which has any of exact 226 // flags installed. 227 if (isa<PossiblyExactOperator>(I) && I->isExact()) 228 return true; 229 return false; 230 }; 231 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS && 232 IP->getOperand(1) == RHS && !canGenerateIncompatiblePoison(&*IP)) 233 return &*IP; 234 if (IP == BlockBegin) break; 235 } 236 } 237 238 // Save the original insertion point so we can restore it when we're done. 239 DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc(); 240 SCEVInsertPointGuard Guard(Builder, this); 241 242 if (IsSafeToHoist) { 243 // Move the insertion point out of as many loops as we can. 244 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) { 245 if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break; 246 BasicBlock *Preheader = L->getLoopPreheader(); 247 if (!Preheader) break; 248 249 // Ok, move up a level. 250 Builder.SetInsertPoint(Preheader->getTerminator()); 251 } 252 } 253 254 // If we haven't found this binop, insert it. 255 Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS)); 256 BO->setDebugLoc(Loc); 257 if (Flags & SCEV::FlagNUW) 258 BO->setHasNoUnsignedWrap(); 259 if (Flags & SCEV::FlagNSW) 260 BO->setHasNoSignedWrap(); 261 262 return BO; 263 } 264 265 /// FactorOutConstant - Test if S is divisible by Factor, using signed 266 /// division. If so, update S with Factor divided out and return true. 267 /// S need not be evenly divisible if a reasonable remainder can be 268 /// computed. 269 static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder, 270 const SCEV *Factor, ScalarEvolution &SE, 271 const DataLayout &DL) { 272 // Everything is divisible by one. 273 if (Factor->isOne()) 274 return true; 275 276 // x/x == 1. 277 if (S == Factor) { 278 S = SE.getConstant(S->getType(), 1); 279 return true; 280 } 281 282 // For a Constant, check for a multiple of the given factor. 283 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 284 // 0/x == 0. 285 if (C->isZero()) 286 return true; 287 // Check for divisibility. 288 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) { 289 ConstantInt *CI = 290 ConstantInt::get(SE.getContext(), C->getAPInt().sdiv(FC->getAPInt())); 291 // If the quotient is zero and the remainder is non-zero, reject 292 // the value at this scale. It will be considered for subsequent 293 // smaller scales. 294 if (!CI->isZero()) { 295 const SCEV *Div = SE.getConstant(CI); 296 S = Div; 297 Remainder = SE.getAddExpr( 298 Remainder, SE.getConstant(C->getAPInt().srem(FC->getAPInt()))); 299 return true; 300 } 301 } 302 } 303 304 // In a Mul, check if there is a constant operand which is a multiple 305 // of the given factor. 306 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 307 // Size is known, check if there is a constant operand which is a multiple 308 // of the given factor. If so, we can factor it. 309 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) 310 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0))) 311 if (!C->getAPInt().srem(FC->getAPInt())) { 312 SmallVector<const SCEV *, 4> NewMulOps(M->operands()); 313 NewMulOps[0] = SE.getConstant(C->getAPInt().sdiv(FC->getAPInt())); 314 S = SE.getMulExpr(NewMulOps); 315 return true; 316 } 317 } 318 319 // In an AddRec, check if both start and step are divisible. 320 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 321 const SCEV *Step = A->getStepRecurrence(SE); 322 const SCEV *StepRem = SE.getConstant(Step->getType(), 0); 323 if (!FactorOutConstant(Step, StepRem, Factor, SE, DL)) 324 return false; 325 if (!StepRem->isZero()) 326 return false; 327 const SCEV *Start = A->getStart(); 328 if (!FactorOutConstant(Start, Remainder, Factor, SE, DL)) 329 return false; 330 S = SE.getAddRecExpr(Start, Step, A->getLoop(), 331 A->getNoWrapFlags(SCEV::FlagNW)); 332 return true; 333 } 334 335 return false; 336 } 337 338 /// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs 339 /// is the number of SCEVAddRecExprs present, which are kept at the end of 340 /// the list. 341 /// 342 static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops, 343 Type *Ty, 344 ScalarEvolution &SE) { 345 unsigned NumAddRecs = 0; 346 for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i) 347 ++NumAddRecs; 348 // Group Ops into non-addrecs and addrecs. 349 SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs); 350 SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end()); 351 // Let ScalarEvolution sort and simplify the non-addrecs list. 352 const SCEV *Sum = NoAddRecs.empty() ? 353 SE.getConstant(Ty, 0) : 354 SE.getAddExpr(NoAddRecs); 355 // If it returned an add, use the operands. Otherwise it simplified 356 // the sum into a single value, so just use that. 357 Ops.clear(); 358 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum)) 359 Ops.append(Add->op_begin(), Add->op_end()); 360 else if (!Sum->isZero()) 361 Ops.push_back(Sum); 362 // Then append the addrecs. 363 Ops.append(AddRecs.begin(), AddRecs.end()); 364 } 365 366 /// SplitAddRecs - Flatten a list of add operands, moving addrec start values 367 /// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}. 368 /// This helps expose more opportunities for folding parts of the expressions 369 /// into GEP indices. 370 /// 371 static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops, 372 Type *Ty, 373 ScalarEvolution &SE) { 374 // Find the addrecs. 375 SmallVector<const SCEV *, 8> AddRecs; 376 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 377 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) { 378 const SCEV *Start = A->getStart(); 379 if (Start->isZero()) break; 380 const SCEV *Zero = SE.getConstant(Ty, 0); 381 AddRecs.push_back(SE.getAddRecExpr(Zero, 382 A->getStepRecurrence(SE), 383 A->getLoop(), 384 A->getNoWrapFlags(SCEV::FlagNW))); 385 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) { 386 Ops[i] = Zero; 387 Ops.append(Add->op_begin(), Add->op_end()); 388 e += Add->getNumOperands(); 389 } else { 390 Ops[i] = Start; 391 } 392 } 393 if (!AddRecs.empty()) { 394 // Add the addrecs onto the end of the list. 395 Ops.append(AddRecs.begin(), AddRecs.end()); 396 // Resort the operand list, moving any constants to the front. 397 SimplifyAddOperands(Ops, Ty, SE); 398 } 399 } 400 401 /// expandAddToGEP - Expand an addition expression with a pointer type into 402 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps 403 /// BasicAliasAnalysis and other passes analyze the result. See the rules 404 /// for getelementptr vs. inttoptr in 405 /// http://llvm.org/docs/LangRef.html#pointeraliasing 406 /// for details. 407 /// 408 /// Design note: The correctness of using getelementptr here depends on 409 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as 410 /// they may introduce pointer arithmetic which may not be safely converted 411 /// into getelementptr. 412 /// 413 /// Design note: It might seem desirable for this function to be more 414 /// loop-aware. If some of the indices are loop-invariant while others 415 /// aren't, it might seem desirable to emit multiple GEPs, keeping the 416 /// loop-invariant portions of the overall computation outside the loop. 417 /// However, there are a few reasons this is not done here. Hoisting simple 418 /// arithmetic is a low-level optimization that often isn't very 419 /// important until late in the optimization process. In fact, passes 420 /// like InstructionCombining will combine GEPs, even if it means 421 /// pushing loop-invariant computation down into loops, so even if the 422 /// GEPs were split here, the work would quickly be undone. The 423 /// LoopStrengthReduction pass, which is usually run quite late (and 424 /// after the last InstructionCombining pass), takes care of hoisting 425 /// loop-invariant portions of expressions, after considering what 426 /// can be folded using target addressing modes. 427 /// 428 Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, 429 const SCEV *const *op_end, 430 PointerType *PTy, 431 Type *Ty, 432 Value *V) { 433 Type *OriginalElTy = PTy->getElementType(); 434 Type *ElTy = OriginalElTy; 435 SmallVector<Value *, 4> GepIndices; 436 SmallVector<const SCEV *, 8> Ops(op_begin, op_end); 437 bool AnyNonZeroIndices = false; 438 439 // Split AddRecs up into parts as either of the parts may be usable 440 // without the other. 441 SplitAddRecs(Ops, Ty, SE); 442 443 Type *IntIdxTy = DL.getIndexType(PTy); 444 445 // Descend down the pointer's type and attempt to convert the other 446 // operands into GEP indices, at each level. The first index in a GEP 447 // indexes into the array implied by the pointer operand; the rest of 448 // the indices index into the element or field type selected by the 449 // preceding index. 450 for (;;) { 451 // If the scale size is not 0, attempt to factor out a scale for 452 // array indexing. 453 SmallVector<const SCEV *, 8> ScaledOps; 454 if (ElTy->isSized()) { 455 const SCEV *ElSize = SE.getSizeOfExpr(IntIdxTy, ElTy); 456 if (!ElSize->isZero()) { 457 SmallVector<const SCEV *, 8> NewOps; 458 for (const SCEV *Op : Ops) { 459 const SCEV *Remainder = SE.getConstant(Ty, 0); 460 if (FactorOutConstant(Op, Remainder, ElSize, SE, DL)) { 461 // Op now has ElSize factored out. 462 ScaledOps.push_back(Op); 463 if (!Remainder->isZero()) 464 NewOps.push_back(Remainder); 465 AnyNonZeroIndices = true; 466 } else { 467 // The operand was not divisible, so add it to the list of operands 468 // we'll scan next iteration. 469 NewOps.push_back(Op); 470 } 471 } 472 // If we made any changes, update Ops. 473 if (!ScaledOps.empty()) { 474 Ops = NewOps; 475 SimplifyAddOperands(Ops, Ty, SE); 476 } 477 } 478 } 479 480 // Record the scaled array index for this level of the type. If 481 // we didn't find any operands that could be factored, tentatively 482 // assume that element zero was selected (since the zero offset 483 // would obviously be folded away). 484 Value *Scaled = 485 ScaledOps.empty() 486 ? Constant::getNullValue(Ty) 487 : expandCodeForImpl(SE.getAddExpr(ScaledOps), Ty, false); 488 GepIndices.push_back(Scaled); 489 490 // Collect struct field index operands. 491 while (StructType *STy = dyn_cast<StructType>(ElTy)) { 492 bool FoundFieldNo = false; 493 // An empty struct has no fields. 494 if (STy->getNumElements() == 0) break; 495 // Field offsets are known. See if a constant offset falls within any of 496 // the struct fields. 497 if (Ops.empty()) 498 break; 499 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0])) 500 if (SE.getTypeSizeInBits(C->getType()) <= 64) { 501 const StructLayout &SL = *DL.getStructLayout(STy); 502 uint64_t FullOffset = C->getValue()->getZExtValue(); 503 if (FullOffset < SL.getSizeInBytes()) { 504 unsigned ElIdx = SL.getElementContainingOffset(FullOffset); 505 GepIndices.push_back( 506 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx)); 507 ElTy = STy->getTypeAtIndex(ElIdx); 508 Ops[0] = 509 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx)); 510 AnyNonZeroIndices = true; 511 FoundFieldNo = true; 512 } 513 } 514 // If no struct field offsets were found, tentatively assume that 515 // field zero was selected (since the zero offset would obviously 516 // be folded away). 517 if (!FoundFieldNo) { 518 ElTy = STy->getTypeAtIndex(0u); 519 GepIndices.push_back( 520 Constant::getNullValue(Type::getInt32Ty(Ty->getContext()))); 521 } 522 } 523 524 if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy)) 525 ElTy = ATy->getElementType(); 526 else 527 // FIXME: Handle VectorType. 528 // E.g., If ElTy is scalable vector, then ElSize is not a compile-time 529 // constant, therefore can not be factored out. The generated IR is less 530 // ideal with base 'V' cast to i8* and do ugly getelementptr over that. 531 break; 532 } 533 534 // If none of the operands were convertible to proper GEP indices, cast 535 // the base to i8* and do an ugly getelementptr with that. It's still 536 // better than ptrtoint+arithmetic+inttoptr at least. 537 if (!AnyNonZeroIndices) { 538 // Cast the base to i8*. 539 V = InsertNoopCastOfTo(V, 540 Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace())); 541 542 assert(!isa<Instruction>(V) || 543 SE.DT.dominates(cast<Instruction>(V), &*Builder.GetInsertPoint())); 544 545 // Expand the operands for a plain byte offset. 546 Value *Idx = expandCodeForImpl(SE.getAddExpr(Ops), Ty, false); 547 548 // Fold a GEP with constant operands. 549 if (Constant *CLHS = dyn_cast<Constant>(V)) 550 if (Constant *CRHS = dyn_cast<Constant>(Idx)) 551 return ConstantExpr::getGetElementPtr(Type::getInt8Ty(Ty->getContext()), 552 CLHS, CRHS); 553 554 // Do a quick scan to see if we have this GEP nearby. If so, reuse it. 555 unsigned ScanLimit = 6; 556 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 557 // Scanning starts from the last instruction before the insertion point. 558 BasicBlock::iterator IP = Builder.GetInsertPoint(); 559 if (IP != BlockBegin) { 560 --IP; 561 for (; ScanLimit; --IP, --ScanLimit) { 562 // Don't count dbg.value against the ScanLimit, to avoid perturbing the 563 // generated code. 564 if (isa<DbgInfoIntrinsic>(IP)) 565 ScanLimit++; 566 if (IP->getOpcode() == Instruction::GetElementPtr && 567 IP->getOperand(0) == V && IP->getOperand(1) == Idx) 568 return &*IP; 569 if (IP == BlockBegin) break; 570 } 571 } 572 573 // Save the original insertion point so we can restore it when we're done. 574 SCEVInsertPointGuard Guard(Builder, this); 575 576 // Move the insertion point out of as many loops as we can. 577 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) { 578 if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break; 579 BasicBlock *Preheader = L->getLoopPreheader(); 580 if (!Preheader) break; 581 582 // Ok, move up a level. 583 Builder.SetInsertPoint(Preheader->getTerminator()); 584 } 585 586 // Emit a GEP. 587 return Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "uglygep"); 588 } 589 590 { 591 SCEVInsertPointGuard Guard(Builder, this); 592 593 // Move the insertion point out of as many loops as we can. 594 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) { 595 if (!L->isLoopInvariant(V)) break; 596 597 bool AnyIndexNotLoopInvariant = any_of( 598 GepIndices, [L](Value *Op) { return !L->isLoopInvariant(Op); }); 599 600 if (AnyIndexNotLoopInvariant) 601 break; 602 603 BasicBlock *Preheader = L->getLoopPreheader(); 604 if (!Preheader) break; 605 606 // Ok, move up a level. 607 Builder.SetInsertPoint(Preheader->getTerminator()); 608 } 609 610 // Insert a pretty getelementptr. Note that this GEP is not marked inbounds, 611 // because ScalarEvolution may have changed the address arithmetic to 612 // compute a value which is beyond the end of the allocated object. 613 Value *Casted = V; 614 if (V->getType() != PTy) 615 Casted = InsertNoopCastOfTo(Casted, PTy); 616 Value *GEP = Builder.CreateGEP(OriginalElTy, Casted, GepIndices, "scevgep"); 617 Ops.push_back(SE.getUnknown(GEP)); 618 } 619 620 return expand(SE.getAddExpr(Ops)); 621 } 622 623 Value *SCEVExpander::expandAddToGEP(const SCEV *Op, PointerType *PTy, Type *Ty, 624 Value *V) { 625 const SCEV *const Ops[1] = {Op}; 626 return expandAddToGEP(Ops, Ops + 1, PTy, Ty, V); 627 } 628 629 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for 630 /// SCEV expansion. If they are nested, this is the most nested. If they are 631 /// neighboring, pick the later. 632 static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B, 633 DominatorTree &DT) { 634 if (!A) return B; 635 if (!B) return A; 636 if (A->contains(B)) return B; 637 if (B->contains(A)) return A; 638 if (DT.dominates(A->getHeader(), B->getHeader())) return B; 639 if (DT.dominates(B->getHeader(), A->getHeader())) return A; 640 return A; // Arbitrarily break the tie. 641 } 642 643 /// getRelevantLoop - Get the most relevant loop associated with the given 644 /// expression, according to PickMostRelevantLoop. 645 const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) { 646 // Test whether we've already computed the most relevant loop for this SCEV. 647 auto Pair = RelevantLoops.insert(std::make_pair(S, nullptr)); 648 if (!Pair.second) 649 return Pair.first->second; 650 651 if (isa<SCEVConstant>(S)) 652 // A constant has no relevant loops. 653 return nullptr; 654 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 655 if (const Instruction *I = dyn_cast<Instruction>(U->getValue())) 656 return Pair.first->second = SE.LI.getLoopFor(I->getParent()); 657 // A non-instruction has no relevant loops. 658 return nullptr; 659 } 660 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) { 661 const Loop *L = nullptr; 662 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 663 L = AR->getLoop(); 664 for (const SCEV *Op : N->operands()) 665 L = PickMostRelevantLoop(L, getRelevantLoop(Op), SE.DT); 666 return RelevantLoops[N] = L; 667 } 668 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) { 669 const Loop *Result = getRelevantLoop(C->getOperand()); 670 return RelevantLoops[C] = Result; 671 } 672 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 673 const Loop *Result = PickMostRelevantLoop( 674 getRelevantLoop(D->getLHS()), getRelevantLoop(D->getRHS()), SE.DT); 675 return RelevantLoops[D] = Result; 676 } 677 llvm_unreachable("Unexpected SCEV type!"); 678 } 679 680 namespace { 681 682 /// LoopCompare - Compare loops by PickMostRelevantLoop. 683 class LoopCompare { 684 DominatorTree &DT; 685 public: 686 explicit LoopCompare(DominatorTree &dt) : DT(dt) {} 687 688 bool operator()(std::pair<const Loop *, const SCEV *> LHS, 689 std::pair<const Loop *, const SCEV *> RHS) const { 690 // Keep pointer operands sorted at the end. 691 if (LHS.second->getType()->isPointerTy() != 692 RHS.second->getType()->isPointerTy()) 693 return LHS.second->getType()->isPointerTy(); 694 695 // Compare loops with PickMostRelevantLoop. 696 if (LHS.first != RHS.first) 697 return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first; 698 699 // If one operand is a non-constant negative and the other is not, 700 // put the non-constant negative on the right so that a sub can 701 // be used instead of a negate and add. 702 if (LHS.second->isNonConstantNegative()) { 703 if (!RHS.second->isNonConstantNegative()) 704 return false; 705 } else if (RHS.second->isNonConstantNegative()) 706 return true; 707 708 // Otherwise they are equivalent according to this comparison. 709 return false; 710 } 711 }; 712 713 } 714 715 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) { 716 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 717 718 // Collect all the add operands in a loop, along with their associated loops. 719 // Iterate in reverse so that constants are emitted last, all else equal, and 720 // so that pointer operands are inserted first, which the code below relies on 721 // to form more involved GEPs. 722 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 723 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()), 724 E(S->op_begin()); I != E; ++I) 725 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 726 727 // Sort by loop. Use a stable sort so that constants follow non-constants and 728 // pointer operands precede non-pointer operands. 729 llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT)); 730 731 // Emit instructions to add all the operands. Hoist as much as possible 732 // out of loops, and form meaningful getelementptrs where possible. 733 Value *Sum = nullptr; 734 for (auto I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E;) { 735 const Loop *CurLoop = I->first; 736 const SCEV *Op = I->second; 737 if (!Sum) { 738 // This is the first operand. Just expand it. 739 Sum = expand(Op); 740 ++I; 741 } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) { 742 // The running sum expression is a pointer. Try to form a getelementptr 743 // at this level with that as the base. 744 SmallVector<const SCEV *, 4> NewOps; 745 for (; I != E && I->first == CurLoop; ++I) { 746 // If the operand is SCEVUnknown and not instructions, peek through 747 // it, to enable more of it to be folded into the GEP. 748 const SCEV *X = I->second; 749 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X)) 750 if (!isa<Instruction>(U->getValue())) 751 X = SE.getSCEV(U->getValue()); 752 NewOps.push_back(X); 753 } 754 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum); 755 } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) { 756 // The running sum is an integer, and there's a pointer at this level. 757 // Try to form a getelementptr. If the running sum is instructions, 758 // use a SCEVUnknown to avoid re-analyzing them. 759 SmallVector<const SCEV *, 4> NewOps; 760 NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) : 761 SE.getSCEV(Sum)); 762 for (++I; I != E && I->first == CurLoop; ++I) 763 NewOps.push_back(I->second); 764 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op)); 765 } else if (Op->isNonConstantNegative()) { 766 // Instead of doing a negate and add, just do a subtract. 767 Value *W = expandCodeForImpl(SE.getNegativeSCEV(Op), Ty, false); 768 Sum = InsertNoopCastOfTo(Sum, Ty); 769 Sum = InsertBinop(Instruction::Sub, Sum, W, SCEV::FlagAnyWrap, 770 /*IsSafeToHoist*/ true); 771 ++I; 772 } else { 773 // A simple add. 774 Value *W = expandCodeForImpl(Op, Ty, false); 775 Sum = InsertNoopCastOfTo(Sum, Ty); 776 // Canonicalize a constant to the RHS. 777 if (isa<Constant>(Sum)) std::swap(Sum, W); 778 Sum = InsertBinop(Instruction::Add, Sum, W, S->getNoWrapFlags(), 779 /*IsSafeToHoist*/ true); 780 ++I; 781 } 782 } 783 784 return Sum; 785 } 786 787 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) { 788 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 789 790 // Collect all the mul operands in a loop, along with their associated loops. 791 // Iterate in reverse so that constants are emitted last, all else equal. 792 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 793 for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()), 794 E(S->op_begin()); I != E; ++I) 795 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 796 797 // Sort by loop. Use a stable sort so that constants follow non-constants. 798 llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT)); 799 800 // Emit instructions to mul all the operands. Hoist as much as possible 801 // out of loops. 802 Value *Prod = nullptr; 803 auto I = OpsAndLoops.begin(); 804 805 // Expand the calculation of X pow N in the following manner: 806 // Let N = P1 + P2 + ... + PK, where all P are powers of 2. Then: 807 // X pow N = (X pow P1) * (X pow P2) * ... * (X pow PK). 808 const auto ExpandOpBinPowN = [this, &I, &OpsAndLoops, &Ty]() { 809 auto E = I; 810 // Calculate how many times the same operand from the same loop is included 811 // into this power. 812 uint64_t Exponent = 0; 813 const uint64_t MaxExponent = UINT64_MAX >> 1; 814 // No one sane will ever try to calculate such huge exponents, but if we 815 // need this, we stop on UINT64_MAX / 2 because we need to exit the loop 816 // below when the power of 2 exceeds our Exponent, and we want it to be 817 // 1u << 31 at most to not deal with unsigned overflow. 818 while (E != OpsAndLoops.end() && *I == *E && Exponent != MaxExponent) { 819 ++Exponent; 820 ++E; 821 } 822 assert(Exponent > 0 && "Trying to calculate a zeroth exponent of operand?"); 823 824 // Calculate powers with exponents 1, 2, 4, 8 etc. and include those of them 825 // that are needed into the result. 826 Value *P = expandCodeForImpl(I->second, Ty, false); 827 Value *Result = nullptr; 828 if (Exponent & 1) 829 Result = P; 830 for (uint64_t BinExp = 2; BinExp <= Exponent; BinExp <<= 1) { 831 P = InsertBinop(Instruction::Mul, P, P, SCEV::FlagAnyWrap, 832 /*IsSafeToHoist*/ true); 833 if (Exponent & BinExp) 834 Result = Result ? InsertBinop(Instruction::Mul, Result, P, 835 SCEV::FlagAnyWrap, 836 /*IsSafeToHoist*/ true) 837 : P; 838 } 839 840 I = E; 841 assert(Result && "Nothing was expanded?"); 842 return Result; 843 }; 844 845 while (I != OpsAndLoops.end()) { 846 if (!Prod) { 847 // This is the first operand. Just expand it. 848 Prod = ExpandOpBinPowN(); 849 } else if (I->second->isAllOnesValue()) { 850 // Instead of doing a multiply by negative one, just do a negate. 851 Prod = InsertNoopCastOfTo(Prod, Ty); 852 Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod, 853 SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true); 854 ++I; 855 } else { 856 // A simple mul. 857 Value *W = ExpandOpBinPowN(); 858 Prod = InsertNoopCastOfTo(Prod, Ty); 859 // Canonicalize a constant to the RHS. 860 if (isa<Constant>(Prod)) std::swap(Prod, W); 861 const APInt *RHS; 862 if (match(W, m_Power2(RHS))) { 863 // Canonicalize Prod*(1<<C) to Prod<<C. 864 assert(!Ty->isVectorTy() && "vector types are not SCEVable"); 865 auto NWFlags = S->getNoWrapFlags(); 866 // clear nsw flag if shl will produce poison value. 867 if (RHS->logBase2() == RHS->getBitWidth() - 1) 868 NWFlags = ScalarEvolution::clearFlags(NWFlags, SCEV::FlagNSW); 869 Prod = InsertBinop(Instruction::Shl, Prod, 870 ConstantInt::get(Ty, RHS->logBase2()), NWFlags, 871 /*IsSafeToHoist*/ true); 872 } else { 873 Prod = InsertBinop(Instruction::Mul, Prod, W, S->getNoWrapFlags(), 874 /*IsSafeToHoist*/ true); 875 } 876 } 877 } 878 879 return Prod; 880 } 881 882 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) { 883 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 884 885 Value *LHS = expandCodeForImpl(S->getLHS(), Ty, false); 886 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) { 887 const APInt &RHS = SC->getAPInt(); 888 if (RHS.isPowerOf2()) 889 return InsertBinop(Instruction::LShr, LHS, 890 ConstantInt::get(Ty, RHS.logBase2()), 891 SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true); 892 } 893 894 Value *RHS = expandCodeForImpl(S->getRHS(), Ty, false); 895 return InsertBinop(Instruction::UDiv, LHS, RHS, SCEV::FlagAnyWrap, 896 /*IsSafeToHoist*/ SE.isKnownNonZero(S->getRHS())); 897 } 898 899 /// Move parts of Base into Rest to leave Base with the minimal 900 /// expression that provides a pointer operand suitable for a 901 /// GEP expansion. 902 static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest, 903 ScalarEvolution &SE) { 904 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) { 905 Base = A->getStart(); 906 Rest = SE.getAddExpr(Rest, 907 SE.getAddRecExpr(SE.getConstant(A->getType(), 0), 908 A->getStepRecurrence(SE), 909 A->getLoop(), 910 A->getNoWrapFlags(SCEV::FlagNW))); 911 } 912 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) { 913 Base = A->getOperand(A->getNumOperands()-1); 914 SmallVector<const SCEV *, 8> NewAddOps(A->operands()); 915 NewAddOps.back() = Rest; 916 Rest = SE.getAddExpr(NewAddOps); 917 ExposePointerBase(Base, Rest, SE); 918 } 919 } 920 921 /// Determine if this is a well-behaved chain of instructions leading back to 922 /// the PHI. If so, it may be reused by expanded expressions. 923 bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV, 924 const Loop *L) { 925 if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) || 926 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV))) 927 return false; 928 // If any of the operands don't dominate the insert position, bail. 929 // Addrec operands are always loop-invariant, so this can only happen 930 // if there are instructions which haven't been hoisted. 931 if (L == IVIncInsertLoop) { 932 for (User::op_iterator OI = IncV->op_begin()+1, 933 OE = IncV->op_end(); OI != OE; ++OI) 934 if (Instruction *OInst = dyn_cast<Instruction>(OI)) 935 if (!SE.DT.dominates(OInst, IVIncInsertPos)) 936 return false; 937 } 938 // Advance to the next instruction. 939 IncV = dyn_cast<Instruction>(IncV->getOperand(0)); 940 if (!IncV) 941 return false; 942 943 if (IncV->mayHaveSideEffects()) 944 return false; 945 946 if (IncV == PN) 947 return true; 948 949 return isNormalAddRecExprPHI(PN, IncV, L); 950 } 951 952 /// getIVIncOperand returns an induction variable increment's induction 953 /// variable operand. 954 /// 955 /// If allowScale is set, any type of GEP is allowed as long as the nonIV 956 /// operands dominate InsertPos. 957 /// 958 /// If allowScale is not set, ensure that a GEP increment conforms to one of the 959 /// simple patterns generated by getAddRecExprPHILiterally and 960 /// expandAddtoGEP. If the pattern isn't recognized, return NULL. 961 Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV, 962 Instruction *InsertPos, 963 bool allowScale) { 964 if (IncV == InsertPos) 965 return nullptr; 966 967 switch (IncV->getOpcode()) { 968 default: 969 return nullptr; 970 // Check for a simple Add/Sub or GEP of a loop invariant step. 971 case Instruction::Add: 972 case Instruction::Sub: { 973 Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1)); 974 if (!OInst || SE.DT.dominates(OInst, InsertPos)) 975 return dyn_cast<Instruction>(IncV->getOperand(0)); 976 return nullptr; 977 } 978 case Instruction::BitCast: 979 return dyn_cast<Instruction>(IncV->getOperand(0)); 980 case Instruction::GetElementPtr: 981 for (auto I = IncV->op_begin() + 1, E = IncV->op_end(); I != E; ++I) { 982 if (isa<Constant>(*I)) 983 continue; 984 if (Instruction *OInst = dyn_cast<Instruction>(*I)) { 985 if (!SE.DT.dominates(OInst, InsertPos)) 986 return nullptr; 987 } 988 if (allowScale) { 989 // allow any kind of GEP as long as it can be hoisted. 990 continue; 991 } 992 // This must be a pointer addition of constants (pretty), which is already 993 // handled, or some number of address-size elements (ugly). Ugly geps 994 // have 2 operands. i1* is used by the expander to represent an 995 // address-size element. 996 if (IncV->getNumOperands() != 2) 997 return nullptr; 998 unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace(); 999 if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS) 1000 && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS)) 1001 return nullptr; 1002 break; 1003 } 1004 return dyn_cast<Instruction>(IncV->getOperand(0)); 1005 } 1006 } 1007 1008 /// If the insert point of the current builder or any of the builders on the 1009 /// stack of saved builders has 'I' as its insert point, update it to point to 1010 /// the instruction after 'I'. This is intended to be used when the instruction 1011 /// 'I' is being moved. If this fixup is not done and 'I' is moved to a 1012 /// different block, the inconsistent insert point (with a mismatched 1013 /// Instruction and Block) can lead to an instruction being inserted in a block 1014 /// other than its parent. 1015 void SCEVExpander::fixupInsertPoints(Instruction *I) { 1016 BasicBlock::iterator It(*I); 1017 BasicBlock::iterator NewInsertPt = std::next(It); 1018 if (Builder.GetInsertPoint() == It) 1019 Builder.SetInsertPoint(&*NewInsertPt); 1020 for (auto *InsertPtGuard : InsertPointGuards) 1021 if (InsertPtGuard->GetInsertPoint() == It) 1022 InsertPtGuard->SetInsertPoint(NewInsertPt); 1023 } 1024 1025 /// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make 1026 /// it available to other uses in this loop. Recursively hoist any operands, 1027 /// until we reach a value that dominates InsertPos. 1028 bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) { 1029 if (SE.DT.dominates(IncV, InsertPos)) 1030 return true; 1031 1032 // InsertPos must itself dominate IncV so that IncV's new position satisfies 1033 // its existing users. 1034 if (isa<PHINode>(InsertPos) || 1035 !SE.DT.dominates(InsertPos->getParent(), IncV->getParent())) 1036 return false; 1037 1038 if (!SE.LI.movementPreservesLCSSAForm(IncV, InsertPos)) 1039 return false; 1040 1041 // Check that the chain of IV operands leading back to Phi can be hoisted. 1042 SmallVector<Instruction*, 4> IVIncs; 1043 for(;;) { 1044 Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true); 1045 if (!Oper) 1046 return false; 1047 // IncV is safe to hoist. 1048 IVIncs.push_back(IncV); 1049 IncV = Oper; 1050 if (SE.DT.dominates(IncV, InsertPos)) 1051 break; 1052 } 1053 for (auto I = IVIncs.rbegin(), E = IVIncs.rend(); I != E; ++I) { 1054 fixupInsertPoints(*I); 1055 (*I)->moveBefore(InsertPos); 1056 } 1057 return true; 1058 } 1059 1060 /// Determine if this cyclic phi is in a form that would have been generated by 1061 /// LSR. We don't care if the phi was actually expanded in this pass, as long 1062 /// as it is in a low-cost form, for example, no implied multiplication. This 1063 /// should match any patterns generated by getAddRecExprPHILiterally and 1064 /// expandAddtoGEP. 1065 bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV, 1066 const Loop *L) { 1067 for(Instruction *IVOper = IncV; 1068 (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(), 1069 /*allowScale=*/false));) { 1070 if (IVOper == PN) 1071 return true; 1072 } 1073 return false; 1074 } 1075 1076 /// expandIVInc - Expand an IV increment at Builder's current InsertPos. 1077 /// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may 1078 /// need to materialize IV increments elsewhere to handle difficult situations. 1079 Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L, 1080 Type *ExpandTy, Type *IntTy, 1081 bool useSubtract) { 1082 Value *IncV; 1083 // If the PHI is a pointer, use a GEP, otherwise use an add or sub. 1084 if (ExpandTy->isPointerTy()) { 1085 PointerType *GEPPtrTy = cast<PointerType>(ExpandTy); 1086 // If the step isn't constant, don't use an implicitly scaled GEP, because 1087 // that would require a multiply inside the loop. 1088 if (!isa<ConstantInt>(StepV)) 1089 GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()), 1090 GEPPtrTy->getAddressSpace()); 1091 IncV = expandAddToGEP(SE.getSCEV(StepV), GEPPtrTy, IntTy, PN); 1092 if (IncV->getType() != PN->getType()) 1093 IncV = Builder.CreateBitCast(IncV, PN->getType()); 1094 } else { 1095 IncV = useSubtract ? 1096 Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") : 1097 Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next"); 1098 } 1099 return IncV; 1100 } 1101 1102 /// Hoist the addrec instruction chain rooted in the loop phi above the 1103 /// position. This routine assumes that this is possible (has been checked). 1104 void SCEVExpander::hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist, 1105 Instruction *Pos, PHINode *LoopPhi) { 1106 do { 1107 if (DT->dominates(InstToHoist, Pos)) 1108 break; 1109 // Make sure the increment is where we want it. But don't move it 1110 // down past a potential existing post-inc user. 1111 fixupInsertPoints(InstToHoist); 1112 InstToHoist->moveBefore(Pos); 1113 Pos = InstToHoist; 1114 InstToHoist = cast<Instruction>(InstToHoist->getOperand(0)); 1115 } while (InstToHoist != LoopPhi); 1116 } 1117 1118 /// Check whether we can cheaply express the requested SCEV in terms of 1119 /// the available PHI SCEV by truncation and/or inversion of the step. 1120 static bool canBeCheaplyTransformed(ScalarEvolution &SE, 1121 const SCEVAddRecExpr *Phi, 1122 const SCEVAddRecExpr *Requested, 1123 bool &InvertStep) { 1124 Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType()); 1125 Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType()); 1126 1127 if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth()) 1128 return false; 1129 1130 // Try truncate it if necessary. 1131 Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy)); 1132 if (!Phi) 1133 return false; 1134 1135 // Check whether truncation will help. 1136 if (Phi == Requested) { 1137 InvertStep = false; 1138 return true; 1139 } 1140 1141 // Check whether inverting will help: {R,+,-1} == R - {0,+,1}. 1142 if (SE.getAddExpr(Requested->getStart(), 1143 SE.getNegativeSCEV(Requested)) == Phi) { 1144 InvertStep = true; 1145 return true; 1146 } 1147 1148 return false; 1149 } 1150 1151 static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) { 1152 if (!isa<IntegerType>(AR->getType())) 1153 return false; 1154 1155 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth(); 1156 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2); 1157 const SCEV *Step = AR->getStepRecurrence(SE); 1158 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getSignExtendExpr(Step, WideTy), 1159 SE.getSignExtendExpr(AR, WideTy)); 1160 const SCEV *ExtendAfterOp = 1161 SE.getSignExtendExpr(SE.getAddExpr(AR, Step), WideTy); 1162 return ExtendAfterOp == OpAfterExtend; 1163 } 1164 1165 static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) { 1166 if (!isa<IntegerType>(AR->getType())) 1167 return false; 1168 1169 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth(); 1170 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2); 1171 const SCEV *Step = AR->getStepRecurrence(SE); 1172 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getZeroExtendExpr(Step, WideTy), 1173 SE.getZeroExtendExpr(AR, WideTy)); 1174 const SCEV *ExtendAfterOp = 1175 SE.getZeroExtendExpr(SE.getAddExpr(AR, Step), WideTy); 1176 return ExtendAfterOp == OpAfterExtend; 1177 } 1178 1179 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand 1180 /// the base addrec, which is the addrec without any non-loop-dominating 1181 /// values, and return the PHI. 1182 PHINode * 1183 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized, 1184 const Loop *L, 1185 Type *ExpandTy, 1186 Type *IntTy, 1187 Type *&TruncTy, 1188 bool &InvertStep) { 1189 assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position"); 1190 1191 // Reuse a previously-inserted PHI, if present. 1192 BasicBlock *LatchBlock = L->getLoopLatch(); 1193 if (LatchBlock) { 1194 PHINode *AddRecPhiMatch = nullptr; 1195 Instruction *IncV = nullptr; 1196 TruncTy = nullptr; 1197 InvertStep = false; 1198 1199 // Only try partially matching scevs that need truncation and/or 1200 // step-inversion if we know this loop is outside the current loop. 1201 bool TryNonMatchingSCEV = 1202 IVIncInsertLoop && 1203 SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader()); 1204 1205 for (PHINode &PN : L->getHeader()->phis()) { 1206 if (!SE.isSCEVable(PN.getType())) 1207 continue; 1208 1209 // We should not look for a incomplete PHI. Getting SCEV for a incomplete 1210 // PHI has no meaning at all. 1211 if (!PN.isComplete()) { 1212 DEBUG_WITH_TYPE( 1213 DebugType, dbgs() << "One incomplete PHI is found: " << PN << "\n"); 1214 continue; 1215 } 1216 1217 const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(&PN)); 1218 if (!PhiSCEV) 1219 continue; 1220 1221 bool IsMatchingSCEV = PhiSCEV == Normalized; 1222 // We only handle truncation and inversion of phi recurrences for the 1223 // expanded expression if the expanded expression's loop dominates the 1224 // loop we insert to. Check now, so we can bail out early. 1225 if (!IsMatchingSCEV && !TryNonMatchingSCEV) 1226 continue; 1227 1228 // TODO: this possibly can be reworked to avoid this cast at all. 1229 Instruction *TempIncV = 1230 dyn_cast<Instruction>(PN.getIncomingValueForBlock(LatchBlock)); 1231 if (!TempIncV) 1232 continue; 1233 1234 // Check whether we can reuse this PHI node. 1235 if (LSRMode) { 1236 if (!isExpandedAddRecExprPHI(&PN, TempIncV, L)) 1237 continue; 1238 if (L == IVIncInsertLoop && !hoistIVInc(TempIncV, IVIncInsertPos)) 1239 continue; 1240 } else { 1241 if (!isNormalAddRecExprPHI(&PN, TempIncV, L)) 1242 continue; 1243 } 1244 1245 // Stop if we have found an exact match SCEV. 1246 if (IsMatchingSCEV) { 1247 IncV = TempIncV; 1248 TruncTy = nullptr; 1249 InvertStep = false; 1250 AddRecPhiMatch = &PN; 1251 break; 1252 } 1253 1254 // Try whether the phi can be translated into the requested form 1255 // (truncated and/or offset by a constant). 1256 if ((!TruncTy || InvertStep) && 1257 canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) { 1258 // Record the phi node. But don't stop we might find an exact match 1259 // later. 1260 AddRecPhiMatch = &PN; 1261 IncV = TempIncV; 1262 TruncTy = SE.getEffectiveSCEVType(Normalized->getType()); 1263 } 1264 } 1265 1266 if (AddRecPhiMatch) { 1267 // Potentially, move the increment. We have made sure in 1268 // isExpandedAddRecExprPHI or hoistIVInc that this is possible. 1269 if (L == IVIncInsertLoop) 1270 hoistBeforePos(&SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch); 1271 1272 // Ok, the add recurrence looks usable. 1273 // Remember this PHI, even in post-inc mode. 1274 InsertedValues.insert(AddRecPhiMatch); 1275 // Remember the increment. 1276 rememberInstruction(IncV); 1277 // Those values were not actually inserted but re-used. 1278 ReusedValues.insert(AddRecPhiMatch); 1279 ReusedValues.insert(IncV); 1280 return AddRecPhiMatch; 1281 } 1282 } 1283 1284 // Save the original insertion point so we can restore it when we're done. 1285 SCEVInsertPointGuard Guard(Builder, this); 1286 1287 // Another AddRec may need to be recursively expanded below. For example, if 1288 // this AddRec is quadratic, the StepV may itself be an AddRec in this 1289 // loop. Remove this loop from the PostIncLoops set before expanding such 1290 // AddRecs. Otherwise, we cannot find a valid position for the step 1291 // (i.e. StepV can never dominate its loop header). Ideally, we could do 1292 // SavedIncLoops.swap(PostIncLoops), but we generally have a single element, 1293 // so it's not worth implementing SmallPtrSet::swap. 1294 PostIncLoopSet SavedPostIncLoops = PostIncLoops; 1295 PostIncLoops.clear(); 1296 1297 // Expand code for the start value into the loop preheader. 1298 assert(L->getLoopPreheader() && 1299 "Can't expand add recurrences without a loop preheader!"); 1300 Value *StartV = 1301 expandCodeForImpl(Normalized->getStart(), ExpandTy, 1302 L->getLoopPreheader()->getTerminator(), false); 1303 1304 // StartV must have been be inserted into L's preheader to dominate the new 1305 // phi. 1306 assert(!isa<Instruction>(StartV) || 1307 SE.DT.properlyDominates(cast<Instruction>(StartV)->getParent(), 1308 L->getHeader())); 1309 1310 // Expand code for the step value. Do this before creating the PHI so that PHI 1311 // reuse code doesn't see an incomplete PHI. 1312 const SCEV *Step = Normalized->getStepRecurrence(SE); 1313 // If the stride is negative, insert a sub instead of an add for the increment 1314 // (unless it's a constant, because subtracts of constants are canonicalized 1315 // to adds). 1316 bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative(); 1317 if (useSubtract) 1318 Step = SE.getNegativeSCEV(Step); 1319 // Expand the step somewhere that dominates the loop header. 1320 Value *StepV = expandCodeForImpl( 1321 Step, IntTy, &*L->getHeader()->getFirstInsertionPt(), false); 1322 1323 // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if 1324 // we actually do emit an addition. It does not apply if we emit a 1325 // subtraction. 1326 bool IncrementIsNUW = !useSubtract && IsIncrementNUW(SE, Normalized); 1327 bool IncrementIsNSW = !useSubtract && IsIncrementNSW(SE, Normalized); 1328 1329 // Create the PHI. 1330 BasicBlock *Header = L->getHeader(); 1331 Builder.SetInsertPoint(Header, Header->begin()); 1332 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1333 PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE), 1334 Twine(IVName) + ".iv"); 1335 1336 // Create the step instructions and populate the PHI. 1337 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1338 BasicBlock *Pred = *HPI; 1339 1340 // Add a start value. 1341 if (!L->contains(Pred)) { 1342 PN->addIncoming(StartV, Pred); 1343 continue; 1344 } 1345 1346 // Create a step value and add it to the PHI. 1347 // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the 1348 // instructions at IVIncInsertPos. 1349 Instruction *InsertPos = L == IVIncInsertLoop ? 1350 IVIncInsertPos : Pred->getTerminator(); 1351 Builder.SetInsertPoint(InsertPos); 1352 Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); 1353 1354 if (isa<OverflowingBinaryOperator>(IncV)) { 1355 if (IncrementIsNUW) 1356 cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap(); 1357 if (IncrementIsNSW) 1358 cast<BinaryOperator>(IncV)->setHasNoSignedWrap(); 1359 } 1360 PN->addIncoming(IncV, Pred); 1361 } 1362 1363 // After expanding subexpressions, restore the PostIncLoops set so the caller 1364 // can ensure that IVIncrement dominates the current uses. 1365 PostIncLoops = SavedPostIncLoops; 1366 1367 // Remember this PHI, even in post-inc mode. 1368 InsertedValues.insert(PN); 1369 1370 return PN; 1371 } 1372 1373 Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) { 1374 Type *STy = S->getType(); 1375 Type *IntTy = SE.getEffectiveSCEVType(STy); 1376 const Loop *L = S->getLoop(); 1377 1378 // Determine a normalized form of this expression, which is the expression 1379 // before any post-inc adjustment is made. 1380 const SCEVAddRecExpr *Normalized = S; 1381 if (PostIncLoops.count(L)) { 1382 PostIncLoopSet Loops; 1383 Loops.insert(L); 1384 Normalized = cast<SCEVAddRecExpr>(normalizeForPostIncUse(S, Loops, SE)); 1385 } 1386 1387 // Strip off any non-loop-dominating component from the addrec start. 1388 const SCEV *Start = Normalized->getStart(); 1389 const SCEV *PostLoopOffset = nullptr; 1390 if (!SE.properlyDominates(Start, L->getHeader())) { 1391 PostLoopOffset = Start; 1392 Start = SE.getConstant(Normalized->getType(), 0); 1393 Normalized = cast<SCEVAddRecExpr>( 1394 SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE), 1395 Normalized->getLoop(), 1396 Normalized->getNoWrapFlags(SCEV::FlagNW))); 1397 } 1398 1399 // Strip off any non-loop-dominating component from the addrec step. 1400 const SCEV *Step = Normalized->getStepRecurrence(SE); 1401 const SCEV *PostLoopScale = nullptr; 1402 if (!SE.dominates(Step, L->getHeader())) { 1403 PostLoopScale = Step; 1404 Step = SE.getConstant(Normalized->getType(), 1); 1405 if (!Start->isZero()) { 1406 // The normalization below assumes that Start is constant zero, so if 1407 // it isn't re-associate Start to PostLoopOffset. 1408 assert(!PostLoopOffset && "Start not-null but PostLoopOffset set?"); 1409 PostLoopOffset = Start; 1410 Start = SE.getConstant(Normalized->getType(), 0); 1411 } 1412 Normalized = 1413 cast<SCEVAddRecExpr>(SE.getAddRecExpr( 1414 Start, Step, Normalized->getLoop(), 1415 Normalized->getNoWrapFlags(SCEV::FlagNW))); 1416 } 1417 1418 // Expand the core addrec. If we need post-loop scaling, force it to 1419 // expand to an integer type to avoid the need for additional casting. 1420 Type *ExpandTy = PostLoopScale ? IntTy : STy; 1421 // We can't use a pointer type for the addrec if the pointer type is 1422 // non-integral. 1423 Type *AddRecPHIExpandTy = 1424 DL.isNonIntegralPointerType(STy) ? Normalized->getType() : ExpandTy; 1425 1426 // In some cases, we decide to reuse an existing phi node but need to truncate 1427 // it and/or invert the step. 1428 Type *TruncTy = nullptr; 1429 bool InvertStep = false; 1430 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, AddRecPHIExpandTy, 1431 IntTy, TruncTy, InvertStep); 1432 1433 // Accommodate post-inc mode, if necessary. 1434 Value *Result; 1435 if (!PostIncLoops.count(L)) 1436 Result = PN; 1437 else { 1438 // In PostInc mode, use the post-incremented value. 1439 BasicBlock *LatchBlock = L->getLoopLatch(); 1440 assert(LatchBlock && "PostInc mode requires a unique loop latch!"); 1441 Result = PN->getIncomingValueForBlock(LatchBlock); 1442 1443 // For an expansion to use the postinc form, the client must call 1444 // expandCodeFor with an InsertPoint that is either outside the PostIncLoop 1445 // or dominated by IVIncInsertPos. 1446 if (isa<Instruction>(Result) && 1447 !SE.DT.dominates(cast<Instruction>(Result), 1448 &*Builder.GetInsertPoint())) { 1449 // The induction variable's postinc expansion does not dominate this use. 1450 // IVUsers tries to prevent this case, so it is rare. However, it can 1451 // happen when an IVUser outside the loop is not dominated by the latch 1452 // block. Adjusting IVIncInsertPos before expansion begins cannot handle 1453 // all cases. Consider a phi outside whose operand is replaced during 1454 // expansion with the value of the postinc user. Without fundamentally 1455 // changing the way postinc users are tracked, the only remedy is 1456 // inserting an extra IV increment. StepV might fold into PostLoopOffset, 1457 // but hopefully expandCodeFor handles that. 1458 bool useSubtract = 1459 !ExpandTy->isPointerTy() && Step->isNonConstantNegative(); 1460 if (useSubtract) 1461 Step = SE.getNegativeSCEV(Step); 1462 Value *StepV; 1463 { 1464 // Expand the step somewhere that dominates the loop header. 1465 SCEVInsertPointGuard Guard(Builder, this); 1466 StepV = expandCodeForImpl( 1467 Step, IntTy, &*L->getHeader()->getFirstInsertionPt(), false); 1468 } 1469 Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); 1470 } 1471 } 1472 1473 // We have decided to reuse an induction variable of a dominating loop. Apply 1474 // truncation and/or inversion of the step. 1475 if (TruncTy) { 1476 Type *ResTy = Result->getType(); 1477 // Normalize the result type. 1478 if (ResTy != SE.getEffectiveSCEVType(ResTy)) 1479 Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy)); 1480 // Truncate the result. 1481 if (TruncTy != Result->getType()) 1482 Result = Builder.CreateTrunc(Result, TruncTy); 1483 1484 // Invert the result. 1485 if (InvertStep) 1486 Result = Builder.CreateSub( 1487 expandCodeForImpl(Normalized->getStart(), TruncTy, false), Result); 1488 } 1489 1490 // Re-apply any non-loop-dominating scale. 1491 if (PostLoopScale) { 1492 assert(S->isAffine() && "Can't linearly scale non-affine recurrences."); 1493 Result = InsertNoopCastOfTo(Result, IntTy); 1494 Result = Builder.CreateMul(Result, 1495 expandCodeForImpl(PostLoopScale, IntTy, false)); 1496 } 1497 1498 // Re-apply any non-loop-dominating offset. 1499 if (PostLoopOffset) { 1500 if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) { 1501 if (Result->getType()->isIntegerTy()) { 1502 Value *Base = expandCodeForImpl(PostLoopOffset, ExpandTy, false); 1503 Result = expandAddToGEP(SE.getUnknown(Result), PTy, IntTy, Base); 1504 } else { 1505 Result = expandAddToGEP(PostLoopOffset, PTy, IntTy, Result); 1506 } 1507 } else { 1508 Result = InsertNoopCastOfTo(Result, IntTy); 1509 Result = Builder.CreateAdd( 1510 Result, expandCodeForImpl(PostLoopOffset, IntTy, false)); 1511 } 1512 } 1513 1514 return Result; 1515 } 1516 1517 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { 1518 // In canonical mode we compute the addrec as an expression of a canonical IV 1519 // using evaluateAtIteration and expand the resulting SCEV expression. This 1520 // way we avoid introducing new IVs to carry on the comutation of the addrec 1521 // throughout the loop. 1522 // 1523 // For nested addrecs evaluateAtIteration might need a canonical IV of a 1524 // type wider than the addrec itself. Emitting a canonical IV of the 1525 // proper type might produce non-legal types, for example expanding an i64 1526 // {0,+,2,+,1} addrec would need an i65 canonical IV. To avoid this just fall 1527 // back to non-canonical mode for nested addrecs. 1528 if (!CanonicalMode || (S->getNumOperands() > 2)) 1529 return expandAddRecExprLiterally(S); 1530 1531 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1532 const Loop *L = S->getLoop(); 1533 1534 // First check for an existing canonical IV in a suitable type. 1535 PHINode *CanonicalIV = nullptr; 1536 if (PHINode *PN = L->getCanonicalInductionVariable()) 1537 if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty)) 1538 CanonicalIV = PN; 1539 1540 // Rewrite an AddRec in terms of the canonical induction variable, if 1541 // its type is more narrow. 1542 if (CanonicalIV && 1543 SE.getTypeSizeInBits(CanonicalIV->getType()) > 1544 SE.getTypeSizeInBits(Ty)) { 1545 SmallVector<const SCEV *, 4> NewOps(S->getNumOperands()); 1546 for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i) 1547 NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType()); 1548 Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(), 1549 S->getNoWrapFlags(SCEV::FlagNW))); 1550 BasicBlock::iterator NewInsertPt = 1551 findInsertPointAfter(cast<Instruction>(V), &*Builder.GetInsertPoint()); 1552 V = expandCodeForImpl(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr, 1553 &*NewInsertPt, false); 1554 return V; 1555 } 1556 1557 // {X,+,F} --> X + {0,+,F} 1558 if (!S->getStart()->isZero()) { 1559 SmallVector<const SCEV *, 4> NewOps(S->operands()); 1560 NewOps[0] = SE.getConstant(Ty, 0); 1561 const SCEV *Rest = SE.getAddRecExpr(NewOps, L, 1562 S->getNoWrapFlags(SCEV::FlagNW)); 1563 1564 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the 1565 // comments on expandAddToGEP for details. 1566 const SCEV *Base = S->getStart(); 1567 // Dig into the expression to find the pointer base for a GEP. 1568 const SCEV *ExposedRest = Rest; 1569 ExposePointerBase(Base, ExposedRest, SE); 1570 // If we found a pointer, expand the AddRec with a GEP. 1571 if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) { 1572 // Make sure the Base isn't something exotic, such as a multiplied 1573 // or divided pointer value. In those cases, the result type isn't 1574 // actually a pointer type. 1575 if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) { 1576 Value *StartV = expand(Base); 1577 assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!"); 1578 return expandAddToGEP(ExposedRest, PTy, Ty, StartV); 1579 } 1580 } 1581 1582 // Just do a normal add. Pre-expand the operands to suppress folding. 1583 // 1584 // The LHS and RHS values are factored out of the expand call to make the 1585 // output independent of the argument evaluation order. 1586 const SCEV *AddExprLHS = SE.getUnknown(expand(S->getStart())); 1587 const SCEV *AddExprRHS = SE.getUnknown(expand(Rest)); 1588 return expand(SE.getAddExpr(AddExprLHS, AddExprRHS)); 1589 } 1590 1591 // If we don't yet have a canonical IV, create one. 1592 if (!CanonicalIV) { 1593 // Create and insert the PHI node for the induction variable in the 1594 // specified loop. 1595 BasicBlock *Header = L->getHeader(); 1596 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1597 CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar", 1598 &Header->front()); 1599 rememberInstruction(CanonicalIV); 1600 1601 SmallSet<BasicBlock *, 4> PredSeen; 1602 Constant *One = ConstantInt::get(Ty, 1); 1603 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1604 BasicBlock *HP = *HPI; 1605 if (!PredSeen.insert(HP).second) { 1606 // There must be an incoming value for each predecessor, even the 1607 // duplicates! 1608 CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP); 1609 continue; 1610 } 1611 1612 if (L->contains(HP)) { 1613 // Insert a unit add instruction right before the terminator 1614 // corresponding to the back-edge. 1615 Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One, 1616 "indvar.next", 1617 HP->getTerminator()); 1618 Add->setDebugLoc(HP->getTerminator()->getDebugLoc()); 1619 rememberInstruction(Add); 1620 CanonicalIV->addIncoming(Add, HP); 1621 } else { 1622 CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP); 1623 } 1624 } 1625 } 1626 1627 // {0,+,1} --> Insert a canonical induction variable into the loop! 1628 if (S->isAffine() && S->getOperand(1)->isOne()) { 1629 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) && 1630 "IVs with types different from the canonical IV should " 1631 "already have been handled!"); 1632 return CanonicalIV; 1633 } 1634 1635 // {0,+,F} --> {0,+,1} * F 1636 1637 // If this is a simple linear addrec, emit it now as a special case. 1638 if (S->isAffine()) // {0,+,F} --> i*F 1639 return 1640 expand(SE.getTruncateOrNoop( 1641 SE.getMulExpr(SE.getUnknown(CanonicalIV), 1642 SE.getNoopOrAnyExtend(S->getOperand(1), 1643 CanonicalIV->getType())), 1644 Ty)); 1645 1646 // If this is a chain of recurrences, turn it into a closed form, using the 1647 // folders, then expandCodeFor the closed form. This allows the folders to 1648 // simplify the expression without having to build a bunch of special code 1649 // into this folder. 1650 const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV. 1651 1652 // Promote S up to the canonical IV type, if the cast is foldable. 1653 const SCEV *NewS = S; 1654 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType()); 1655 if (isa<SCEVAddRecExpr>(Ext)) 1656 NewS = Ext; 1657 1658 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE); 1659 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n"; 1660 1661 // Truncate the result down to the original type, if needed. 1662 const SCEV *T = SE.getTruncateOrNoop(V, Ty); 1663 return expand(T); 1664 } 1665 1666 Value *SCEVExpander::visitPtrToIntExpr(const SCEVPtrToIntExpr *S) { 1667 Value *V = 1668 expandCodeForImpl(S->getOperand(), S->getOperand()->getType(), false); 1669 return Builder.CreatePtrToInt(V, S->getType()); 1670 } 1671 1672 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) { 1673 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1674 Value *V = expandCodeForImpl( 1675 S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType()), 1676 false); 1677 return Builder.CreateTrunc(V, Ty); 1678 } 1679 1680 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) { 1681 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1682 Value *V = expandCodeForImpl( 1683 S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType()), 1684 false); 1685 return Builder.CreateZExt(V, Ty); 1686 } 1687 1688 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) { 1689 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1690 Value *V = expandCodeForImpl( 1691 S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType()), 1692 false); 1693 return Builder.CreateSExt(V, Ty); 1694 } 1695 1696 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) { 1697 Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1698 Type *Ty = LHS->getType(); 1699 for (int i = S->getNumOperands()-2; i >= 0; --i) { 1700 // In the case of mixed integer and pointer types, do the 1701 // rest of the comparisons as integer. 1702 Type *OpTy = S->getOperand(i)->getType(); 1703 if (OpTy->isIntegerTy() != Ty->isIntegerTy()) { 1704 Ty = SE.getEffectiveSCEVType(Ty); 1705 LHS = InsertNoopCastOfTo(LHS, Ty); 1706 } 1707 Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false); 1708 Value *ICmp = Builder.CreateICmpSGT(LHS, RHS); 1709 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax"); 1710 LHS = Sel; 1711 } 1712 // In the case of mixed integer and pointer types, cast the 1713 // final result back to the pointer type. 1714 if (LHS->getType() != S->getType()) 1715 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1716 return LHS; 1717 } 1718 1719 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) { 1720 Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1721 Type *Ty = LHS->getType(); 1722 for (int i = S->getNumOperands()-2; i >= 0; --i) { 1723 // In the case of mixed integer and pointer types, do the 1724 // rest of the comparisons as integer. 1725 Type *OpTy = S->getOperand(i)->getType(); 1726 if (OpTy->isIntegerTy() != Ty->isIntegerTy()) { 1727 Ty = SE.getEffectiveSCEVType(Ty); 1728 LHS = InsertNoopCastOfTo(LHS, Ty); 1729 } 1730 Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false); 1731 Value *ICmp = Builder.CreateICmpUGT(LHS, RHS); 1732 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax"); 1733 LHS = Sel; 1734 } 1735 // In the case of mixed integer and pointer types, cast the 1736 // final result back to the pointer type. 1737 if (LHS->getType() != S->getType()) 1738 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1739 return LHS; 1740 } 1741 1742 Value *SCEVExpander::visitSMinExpr(const SCEVSMinExpr *S) { 1743 Value *LHS = expand(S->getOperand(S->getNumOperands() - 1)); 1744 Type *Ty = LHS->getType(); 1745 for (int i = S->getNumOperands() - 2; i >= 0; --i) { 1746 // In the case of mixed integer and pointer types, do the 1747 // rest of the comparisons as integer. 1748 Type *OpTy = S->getOperand(i)->getType(); 1749 if (OpTy->isIntegerTy() != Ty->isIntegerTy()) { 1750 Ty = SE.getEffectiveSCEVType(Ty); 1751 LHS = InsertNoopCastOfTo(LHS, Ty); 1752 } 1753 Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false); 1754 Value *ICmp = Builder.CreateICmpSLT(LHS, RHS); 1755 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smin"); 1756 LHS = Sel; 1757 } 1758 // In the case of mixed integer and pointer types, cast the 1759 // final result back to the pointer type. 1760 if (LHS->getType() != S->getType()) 1761 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1762 return LHS; 1763 } 1764 1765 Value *SCEVExpander::visitUMinExpr(const SCEVUMinExpr *S) { 1766 Value *LHS = expand(S->getOperand(S->getNumOperands() - 1)); 1767 Type *Ty = LHS->getType(); 1768 for (int i = S->getNumOperands() - 2; i >= 0; --i) { 1769 // In the case of mixed integer and pointer types, do the 1770 // rest of the comparisons as integer. 1771 Type *OpTy = S->getOperand(i)->getType(); 1772 if (OpTy->isIntegerTy() != Ty->isIntegerTy()) { 1773 Ty = SE.getEffectiveSCEVType(Ty); 1774 LHS = InsertNoopCastOfTo(LHS, Ty); 1775 } 1776 Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false); 1777 Value *ICmp = Builder.CreateICmpULT(LHS, RHS); 1778 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umin"); 1779 LHS = Sel; 1780 } 1781 // In the case of mixed integer and pointer types, cast the 1782 // final result back to the pointer type. 1783 if (LHS->getType() != S->getType()) 1784 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1785 return LHS; 1786 } 1787 1788 Value *SCEVExpander::expandCodeForImpl(const SCEV *SH, Type *Ty, 1789 Instruction *IP, bool Root) { 1790 setInsertPoint(IP); 1791 Value *V = expandCodeForImpl(SH, Ty, Root); 1792 return V; 1793 } 1794 1795 Value *SCEVExpander::expandCodeForImpl(const SCEV *SH, Type *Ty, bool Root) { 1796 // Expand the code for this SCEV. 1797 Value *V = expand(SH); 1798 1799 if (PreserveLCSSA) { 1800 if (auto *Inst = dyn_cast<Instruction>(V)) { 1801 // Create a temporary instruction to at the current insertion point, so we 1802 // can hand it off to the helper to create LCSSA PHIs if required for the 1803 // new use. 1804 // FIXME: Ideally formLCSSAForInstructions (used in fixupLCSSAFormFor) 1805 // would accept a insertion point and return an LCSSA phi for that 1806 // insertion point, so there is no need to insert & remove the temporary 1807 // instruction. 1808 Instruction *Tmp; 1809 if (Inst->getType()->isIntegerTy()) 1810 Tmp = 1811 cast<Instruction>(Builder.CreateAdd(Inst, Inst, "tmp.lcssa.user")); 1812 else { 1813 assert(Inst->getType()->isPointerTy()); 1814 Tmp = cast<Instruction>( 1815 Builder.CreateGEP(Inst, Builder.getInt32(1), "tmp.lcssa.user")); 1816 } 1817 V = fixupLCSSAFormFor(Tmp, 0); 1818 1819 // Clean up temporary instruction. 1820 InsertedValues.erase(Tmp); 1821 InsertedPostIncValues.erase(Tmp); 1822 Tmp->eraseFromParent(); 1823 } 1824 } 1825 1826 InsertedExpressions[std::make_pair(SH, &*Builder.GetInsertPoint())] = V; 1827 if (Ty) { 1828 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) && 1829 "non-trivial casts should be done with the SCEVs directly!"); 1830 V = InsertNoopCastOfTo(V, Ty); 1831 } 1832 return V; 1833 } 1834 1835 ScalarEvolution::ValueOffsetPair 1836 SCEVExpander::FindValueInExprValueMap(const SCEV *S, 1837 const Instruction *InsertPt) { 1838 SetVector<ScalarEvolution::ValueOffsetPair> *Set = SE.getSCEVValues(S); 1839 // If the expansion is not in CanonicalMode, and the SCEV contains any 1840 // sub scAddRecExpr type SCEV, it is required to expand the SCEV literally. 1841 if (CanonicalMode || !SE.containsAddRecurrence(S)) { 1842 // If S is scConstant, it may be worse to reuse an existing Value. 1843 if (S->getSCEVType() != scConstant && Set) { 1844 // Choose a Value from the set which dominates the insertPt. 1845 // insertPt should be inside the Value's parent loop so as not to break 1846 // the LCSSA form. 1847 for (auto const &VOPair : *Set) { 1848 Value *V = VOPair.first; 1849 ConstantInt *Offset = VOPair.second; 1850 Instruction *EntInst = nullptr; 1851 if (V && isa<Instruction>(V) && (EntInst = cast<Instruction>(V)) && 1852 S->getType() == V->getType() && 1853 EntInst->getFunction() == InsertPt->getFunction() && 1854 SE.DT.dominates(EntInst, InsertPt) && 1855 (SE.LI.getLoopFor(EntInst->getParent()) == nullptr || 1856 SE.LI.getLoopFor(EntInst->getParent())->contains(InsertPt))) 1857 return {V, Offset}; 1858 } 1859 } 1860 } 1861 return {nullptr, nullptr}; 1862 } 1863 1864 // The expansion of SCEV will either reuse a previous Value in ExprValueMap, 1865 // or expand the SCEV literally. Specifically, if the expansion is in LSRMode, 1866 // and the SCEV contains any sub scAddRecExpr type SCEV, it will be expanded 1867 // literally, to prevent LSR's transformed SCEV from being reverted. Otherwise, 1868 // the expansion will try to reuse Value from ExprValueMap, and only when it 1869 // fails, expand the SCEV literally. 1870 Value *SCEVExpander::expand(const SCEV *S) { 1871 // Compute an insertion point for this SCEV object. Hoist the instructions 1872 // as far out in the loop nest as possible. 1873 Instruction *InsertPt = &*Builder.GetInsertPoint(); 1874 1875 // We can move insertion point only if there is no div or rem operations 1876 // otherwise we are risky to move it over the check for zero denominator. 1877 auto SafeToHoist = [](const SCEV *S) { 1878 return !SCEVExprContains(S, [](const SCEV *S) { 1879 if (const auto *D = dyn_cast<SCEVUDivExpr>(S)) { 1880 if (const auto *SC = dyn_cast<SCEVConstant>(D->getRHS())) 1881 // Division by non-zero constants can be hoisted. 1882 return SC->getValue()->isZero(); 1883 // All other divisions should not be moved as they may be 1884 // divisions by zero and should be kept within the 1885 // conditions of the surrounding loops that guard their 1886 // execution (see PR35406). 1887 return true; 1888 } 1889 return false; 1890 }); 1891 }; 1892 if (SafeToHoist(S)) { 1893 for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());; 1894 L = L->getParentLoop()) { 1895 if (SE.isLoopInvariant(S, L)) { 1896 if (!L) break; 1897 if (BasicBlock *Preheader = L->getLoopPreheader()) 1898 InsertPt = Preheader->getTerminator(); 1899 else 1900 // LSR sets the insertion point for AddRec start/step values to the 1901 // block start to simplify value reuse, even though it's an invalid 1902 // position. SCEVExpander must correct for this in all cases. 1903 InsertPt = &*L->getHeader()->getFirstInsertionPt(); 1904 } else { 1905 // If the SCEV is computable at this level, insert it into the header 1906 // after the PHIs (and after any other instructions that we've inserted 1907 // there) so that it is guaranteed to dominate any user inside the loop. 1908 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L)) 1909 InsertPt = &*L->getHeader()->getFirstInsertionPt(); 1910 1911 while (InsertPt->getIterator() != Builder.GetInsertPoint() && 1912 (isInsertedInstruction(InsertPt) || 1913 isa<DbgInfoIntrinsic>(InsertPt))) { 1914 InsertPt = &*std::next(InsertPt->getIterator()); 1915 } 1916 break; 1917 } 1918 } 1919 } 1920 1921 // Check to see if we already expanded this here. 1922 auto I = InsertedExpressions.find(std::make_pair(S, InsertPt)); 1923 if (I != InsertedExpressions.end()) 1924 return I->second; 1925 1926 SCEVInsertPointGuard Guard(Builder, this); 1927 Builder.SetInsertPoint(InsertPt); 1928 1929 // Expand the expression into instructions. 1930 ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, InsertPt); 1931 Value *V = VO.first; 1932 1933 if (!V) 1934 V = visit(S); 1935 else if (VO.second) { 1936 if (PointerType *Vty = dyn_cast<PointerType>(V->getType())) { 1937 Type *Ety = Vty->getPointerElementType(); 1938 int64_t Offset = VO.second->getSExtValue(); 1939 int64_t ESize = SE.getTypeSizeInBits(Ety); 1940 if ((Offset * 8) % ESize == 0) { 1941 ConstantInt *Idx = 1942 ConstantInt::getSigned(VO.second->getType(), -(Offset * 8) / ESize); 1943 V = Builder.CreateGEP(Ety, V, Idx, "scevgep"); 1944 } else { 1945 ConstantInt *Idx = 1946 ConstantInt::getSigned(VO.second->getType(), -Offset); 1947 unsigned AS = Vty->getAddressSpace(); 1948 V = Builder.CreateBitCast(V, Type::getInt8PtrTy(SE.getContext(), AS)); 1949 V = Builder.CreateGEP(Type::getInt8Ty(SE.getContext()), V, Idx, 1950 "uglygep"); 1951 V = Builder.CreateBitCast(V, Vty); 1952 } 1953 } else { 1954 V = Builder.CreateSub(V, VO.second); 1955 } 1956 } 1957 // Remember the expanded value for this SCEV at this location. 1958 // 1959 // This is independent of PostIncLoops. The mapped value simply materializes 1960 // the expression at this insertion point. If the mapped value happened to be 1961 // a postinc expansion, it could be reused by a non-postinc user, but only if 1962 // its insertion point was already at the head of the loop. 1963 InsertedExpressions[std::make_pair(S, InsertPt)] = V; 1964 return V; 1965 } 1966 1967 void SCEVExpander::rememberInstruction(Value *I) { 1968 auto DoInsert = [this](Value *V) { 1969 if (!PostIncLoops.empty()) 1970 InsertedPostIncValues.insert(V); 1971 else 1972 InsertedValues.insert(V); 1973 }; 1974 DoInsert(I); 1975 1976 if (!PreserveLCSSA) 1977 return; 1978 1979 if (auto *Inst = dyn_cast<Instruction>(I)) { 1980 // A new instruction has been added, which might introduce new uses outside 1981 // a defining loop. Fix LCSSA from for each operand of the new instruction, 1982 // if required. 1983 for (unsigned OpIdx = 0, OpEnd = Inst->getNumOperands(); OpIdx != OpEnd; 1984 OpIdx++) 1985 fixupLCSSAFormFor(Inst, OpIdx); 1986 } 1987 } 1988 1989 /// replaceCongruentIVs - Check for congruent phis in this loop header and 1990 /// replace them with their most canonical representative. Return the number of 1991 /// phis eliminated. 1992 /// 1993 /// This does not depend on any SCEVExpander state but should be used in 1994 /// the same context that SCEVExpander is used. 1995 unsigned 1996 SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT, 1997 SmallVectorImpl<WeakTrackingVH> &DeadInsts, 1998 const TargetTransformInfo *TTI) { 1999 // Find integer phis in order of increasing width. 2000 SmallVector<PHINode*, 8> Phis; 2001 for (PHINode &PN : L->getHeader()->phis()) 2002 Phis.push_back(&PN); 2003 2004 if (TTI) 2005 llvm::sort(Phis, [](Value *LHS, Value *RHS) { 2006 // Put pointers at the back and make sure pointer < pointer = false. 2007 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 2008 return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy(); 2009 return RHS->getType()->getPrimitiveSizeInBits().getFixedSize() < 2010 LHS->getType()->getPrimitiveSizeInBits().getFixedSize(); 2011 }); 2012 2013 unsigned NumElim = 0; 2014 DenseMap<const SCEV *, PHINode *> ExprToIVMap; 2015 // Process phis from wide to narrow. Map wide phis to their truncation 2016 // so narrow phis can reuse them. 2017 for (PHINode *Phi : Phis) { 2018 auto SimplifyPHINode = [&](PHINode *PN) -> Value * { 2019 if (Value *V = SimplifyInstruction(PN, {DL, &SE.TLI, &SE.DT, &SE.AC})) 2020 return V; 2021 if (!SE.isSCEVable(PN->getType())) 2022 return nullptr; 2023 auto *Const = dyn_cast<SCEVConstant>(SE.getSCEV(PN)); 2024 if (!Const) 2025 return nullptr; 2026 return Const->getValue(); 2027 }; 2028 2029 // Fold constant phis. They may be congruent to other constant phis and 2030 // would confuse the logic below that expects proper IVs. 2031 if (Value *V = SimplifyPHINode(Phi)) { 2032 if (V->getType() != Phi->getType()) 2033 continue; 2034 Phi->replaceAllUsesWith(V); 2035 DeadInsts.emplace_back(Phi); 2036 ++NumElim; 2037 DEBUG_WITH_TYPE(DebugType, dbgs() 2038 << "INDVARS: Eliminated constant iv: " << *Phi << '\n'); 2039 continue; 2040 } 2041 2042 if (!SE.isSCEVable(Phi->getType())) 2043 continue; 2044 2045 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)]; 2046 if (!OrigPhiRef) { 2047 OrigPhiRef = Phi; 2048 if (Phi->getType()->isIntegerTy() && TTI && 2049 TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) { 2050 // This phi can be freely truncated to the narrowest phi type. Map the 2051 // truncated expression to it so it will be reused for narrow types. 2052 const SCEV *TruncExpr = 2053 SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType()); 2054 ExprToIVMap[TruncExpr] = Phi; 2055 } 2056 continue; 2057 } 2058 2059 // Replacing a pointer phi with an integer phi or vice-versa doesn't make 2060 // sense. 2061 if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy()) 2062 continue; 2063 2064 if (BasicBlock *LatchBlock = L->getLoopLatch()) { 2065 Instruction *OrigInc = dyn_cast<Instruction>( 2066 OrigPhiRef->getIncomingValueForBlock(LatchBlock)); 2067 Instruction *IsomorphicInc = 2068 dyn_cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock)); 2069 2070 if (OrigInc && IsomorphicInc) { 2071 // If this phi has the same width but is more canonical, replace the 2072 // original with it. As part of the "more canonical" determination, 2073 // respect a prior decision to use an IV chain. 2074 if (OrigPhiRef->getType() == Phi->getType() && 2075 !(ChainedPhis.count(Phi) || 2076 isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) && 2077 (ChainedPhis.count(Phi) || 2078 isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) { 2079 std::swap(OrigPhiRef, Phi); 2080 std::swap(OrigInc, IsomorphicInc); 2081 } 2082 // Replacing the congruent phi is sufficient because acyclic 2083 // redundancy elimination, CSE/GVN, should handle the 2084 // rest. However, once SCEV proves that a phi is congruent, 2085 // it's often the head of an IV user cycle that is isomorphic 2086 // with the original phi. It's worth eagerly cleaning up the 2087 // common case of a single IV increment so that DeleteDeadPHIs 2088 // can remove cycles that had postinc uses. 2089 const SCEV *TruncExpr = 2090 SE.getTruncateOrNoop(SE.getSCEV(OrigInc), IsomorphicInc->getType()); 2091 if (OrigInc != IsomorphicInc && 2092 TruncExpr == SE.getSCEV(IsomorphicInc) && 2093 SE.LI.replacementPreservesLCSSAForm(IsomorphicInc, OrigInc) && 2094 hoistIVInc(OrigInc, IsomorphicInc)) { 2095 DEBUG_WITH_TYPE(DebugType, 2096 dbgs() << "INDVARS: Eliminated congruent iv.inc: " 2097 << *IsomorphicInc << '\n'); 2098 Value *NewInc = OrigInc; 2099 if (OrigInc->getType() != IsomorphicInc->getType()) { 2100 Instruction *IP = nullptr; 2101 if (PHINode *PN = dyn_cast<PHINode>(OrigInc)) 2102 IP = &*PN->getParent()->getFirstInsertionPt(); 2103 else 2104 IP = OrigInc->getNextNode(); 2105 2106 IRBuilder<> Builder(IP); 2107 Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc()); 2108 NewInc = Builder.CreateTruncOrBitCast( 2109 OrigInc, IsomorphicInc->getType(), IVName); 2110 } 2111 IsomorphicInc->replaceAllUsesWith(NewInc); 2112 DeadInsts.emplace_back(IsomorphicInc); 2113 } 2114 } 2115 } 2116 DEBUG_WITH_TYPE(DebugType, dbgs() << "INDVARS: Eliminated congruent iv: " 2117 << *Phi << '\n'); 2118 DEBUG_WITH_TYPE(DebugType, dbgs() << "INDVARS: Original iv: " 2119 << *OrigPhiRef << '\n'); 2120 ++NumElim; 2121 Value *NewIV = OrigPhiRef; 2122 if (OrigPhiRef->getType() != Phi->getType()) { 2123 IRBuilder<> Builder(&*L->getHeader()->getFirstInsertionPt()); 2124 Builder.SetCurrentDebugLocation(Phi->getDebugLoc()); 2125 NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName); 2126 } 2127 Phi->replaceAllUsesWith(NewIV); 2128 DeadInsts.emplace_back(Phi); 2129 } 2130 return NumElim; 2131 } 2132 2133 Optional<ScalarEvolution::ValueOffsetPair> 2134 SCEVExpander::getRelatedExistingExpansion(const SCEV *S, const Instruction *At, 2135 Loop *L) { 2136 using namespace llvm::PatternMatch; 2137 2138 SmallVector<BasicBlock *, 4> ExitingBlocks; 2139 L->getExitingBlocks(ExitingBlocks); 2140 2141 // Look for suitable value in simple conditions at the loop exits. 2142 for (BasicBlock *BB : ExitingBlocks) { 2143 ICmpInst::Predicate Pred; 2144 Instruction *LHS, *RHS; 2145 2146 if (!match(BB->getTerminator(), 2147 m_Br(m_ICmp(Pred, m_Instruction(LHS), m_Instruction(RHS)), 2148 m_BasicBlock(), m_BasicBlock()))) 2149 continue; 2150 2151 if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At)) 2152 return ScalarEvolution::ValueOffsetPair(LHS, nullptr); 2153 2154 if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At)) 2155 return ScalarEvolution::ValueOffsetPair(RHS, nullptr); 2156 } 2157 2158 // Use expand's logic which is used for reusing a previous Value in 2159 // ExprValueMap. 2160 ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, At); 2161 if (VO.first) 2162 return VO; 2163 2164 // There is potential to make this significantly smarter, but this simple 2165 // heuristic already gets some interesting cases. 2166 2167 // Can not find suitable value. 2168 return None; 2169 } 2170 2171 template<typename T> static int costAndCollectOperands( 2172 const SCEVOperand &WorkItem, const TargetTransformInfo &TTI, 2173 TargetTransformInfo::TargetCostKind CostKind, 2174 SmallVectorImpl<SCEVOperand> &Worklist) { 2175 2176 const T *S = cast<T>(WorkItem.S); 2177 int Cost = 0; 2178 // Object to help map SCEV operands to expanded IR instructions. 2179 struct OperationIndices { 2180 OperationIndices(unsigned Opc, size_t min, size_t max) : 2181 Opcode(Opc), MinIdx(min), MaxIdx(max) { } 2182 unsigned Opcode; 2183 size_t MinIdx; 2184 size_t MaxIdx; 2185 }; 2186 2187 // Collect the operations of all the instructions that will be needed to 2188 // expand the SCEVExpr. This is so that when we come to cost the operands, 2189 // we know what the generated user(s) will be. 2190 SmallVector<OperationIndices, 2> Operations; 2191 2192 auto CastCost = [&](unsigned Opcode) { 2193 Operations.emplace_back(Opcode, 0, 0); 2194 return TTI.getCastInstrCost(Opcode, S->getType(), 2195 S->getOperand(0)->getType(), 2196 TTI::CastContextHint::None, CostKind); 2197 }; 2198 2199 auto ArithCost = [&](unsigned Opcode, unsigned NumRequired, 2200 unsigned MinIdx = 0, unsigned MaxIdx = 1) { 2201 Operations.emplace_back(Opcode, MinIdx, MaxIdx); 2202 return NumRequired * 2203 TTI.getArithmeticInstrCost(Opcode, S->getType(), CostKind); 2204 }; 2205 2206 auto CmpSelCost = [&](unsigned Opcode, unsigned NumRequired, 2207 unsigned MinIdx, unsigned MaxIdx) { 2208 Operations.emplace_back(Opcode, MinIdx, MaxIdx); 2209 Type *OpType = S->getOperand(0)->getType(); 2210 return NumRequired * TTI.getCmpSelInstrCost( 2211 Opcode, OpType, CmpInst::makeCmpResultType(OpType), 2212 CmpInst::BAD_ICMP_PREDICATE, CostKind); 2213 }; 2214 2215 switch (S->getSCEVType()) { 2216 case scCouldNotCompute: 2217 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 2218 case scUnknown: 2219 case scConstant: 2220 return 0; 2221 case scPtrToInt: 2222 Cost = CastCost(Instruction::PtrToInt); 2223 break; 2224 case scTruncate: 2225 Cost = CastCost(Instruction::Trunc); 2226 break; 2227 case scZeroExtend: 2228 Cost = CastCost(Instruction::ZExt); 2229 break; 2230 case scSignExtend: 2231 Cost = CastCost(Instruction::SExt); 2232 break; 2233 case scUDivExpr: { 2234 unsigned Opcode = Instruction::UDiv; 2235 if (auto *SC = dyn_cast<SCEVConstant>(S->getOperand(1))) 2236 if (SC->getAPInt().isPowerOf2()) 2237 Opcode = Instruction::LShr; 2238 Cost = ArithCost(Opcode, 1); 2239 break; 2240 } 2241 case scAddExpr: 2242 Cost = ArithCost(Instruction::Add, S->getNumOperands() - 1); 2243 break; 2244 case scMulExpr: 2245 // TODO: this is a very pessimistic cost modelling for Mul, 2246 // because of Bin Pow algorithm actually used by the expander, 2247 // see SCEVExpander::visitMulExpr(), ExpandOpBinPowN(). 2248 Cost = ArithCost(Instruction::Mul, S->getNumOperands() - 1); 2249 break; 2250 case scSMaxExpr: 2251 case scUMaxExpr: 2252 case scSMinExpr: 2253 case scUMinExpr: { 2254 Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 1); 2255 Cost += CmpSelCost(Instruction::Select, S->getNumOperands() - 1, 0, 2); 2256 break; 2257 } 2258 case scAddRecExpr: { 2259 // In this polynominal, we may have some zero operands, and we shouldn't 2260 // really charge for those. So how many non-zero coeffients are there? 2261 int NumTerms = llvm::count_if(S->operands(), [](const SCEV *Op) { 2262 return !Op->isZero(); 2263 }); 2264 2265 assert(NumTerms >= 1 && "Polynominal should have at least one term."); 2266 assert(!(*std::prev(S->operands().end()))->isZero() && 2267 "Last operand should not be zero"); 2268 2269 // Ignoring constant term (operand 0), how many of the coeffients are u> 1? 2270 int NumNonZeroDegreeNonOneTerms = 2271 llvm::count_if(S->operands(), [](const SCEV *Op) { 2272 auto *SConst = dyn_cast<SCEVConstant>(Op); 2273 return !SConst || SConst->getAPInt().ugt(1); 2274 }); 2275 2276 // Much like with normal add expr, the polynominal will require 2277 // one less addition than the number of it's terms. 2278 int AddCost = ArithCost(Instruction::Add, NumTerms - 1, 2279 /*MinIdx*/1, /*MaxIdx*/1); 2280 // Here, *each* one of those will require a multiplication. 2281 int MulCost = ArithCost(Instruction::Mul, NumNonZeroDegreeNonOneTerms); 2282 Cost = AddCost + MulCost; 2283 2284 // What is the degree of this polynominal? 2285 int PolyDegree = S->getNumOperands() - 1; 2286 assert(PolyDegree >= 1 && "Should be at least affine."); 2287 2288 // The final term will be: 2289 // Op_{PolyDegree} * x ^ {PolyDegree} 2290 // Where x ^ {PolyDegree} will again require PolyDegree-1 mul operations. 2291 // Note that x ^ {PolyDegree} = x * x ^ {PolyDegree-1} so charging for 2292 // x ^ {PolyDegree} will give us x ^ {2} .. x ^ {PolyDegree-1} for free. 2293 // FIXME: this is conservatively correct, but might be overly pessimistic. 2294 Cost += MulCost * (PolyDegree - 1); 2295 break; 2296 } 2297 } 2298 2299 for (auto &CostOp : Operations) { 2300 for (auto SCEVOp : enumerate(S->operands())) { 2301 // Clamp the index to account for multiple IR operations being chained. 2302 size_t MinIdx = std::max(SCEVOp.index(), CostOp.MinIdx); 2303 size_t OpIdx = std::min(MinIdx, CostOp.MaxIdx); 2304 Worklist.emplace_back(CostOp.Opcode, OpIdx, SCEVOp.value()); 2305 } 2306 } 2307 return Cost; 2308 } 2309 2310 bool SCEVExpander::isHighCostExpansionHelper( 2311 const SCEVOperand &WorkItem, Loop *L, const Instruction &At, 2312 int &BudgetRemaining, const TargetTransformInfo &TTI, 2313 SmallPtrSetImpl<const SCEV *> &Processed, 2314 SmallVectorImpl<SCEVOperand> &Worklist) { 2315 if (BudgetRemaining < 0) 2316 return true; // Already run out of budget, give up. 2317 2318 const SCEV *S = WorkItem.S; 2319 // Was the cost of expansion of this expression already accounted for? 2320 if (!isa<SCEVConstant>(S) && !Processed.insert(S).second) 2321 return false; // We have already accounted for this expression. 2322 2323 // If we can find an existing value for this scev available at the point "At" 2324 // then consider the expression cheap. 2325 if (getRelatedExistingExpansion(S, &At, L)) 2326 return false; // Consider the expression to be free. 2327 2328 TargetTransformInfo::TargetCostKind CostKind = 2329 L->getHeader()->getParent()->hasMinSize() 2330 ? TargetTransformInfo::TCK_CodeSize 2331 : TargetTransformInfo::TCK_RecipThroughput; 2332 2333 switch (S->getSCEVType()) { 2334 case scCouldNotCompute: 2335 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 2336 case scUnknown: 2337 // Assume to be zero-cost. 2338 return false; 2339 case scConstant: { 2340 // Only evalulate the costs of constants when optimizing for size. 2341 if (CostKind != TargetTransformInfo::TCK_CodeSize) 2342 return 0; 2343 const APInt &Imm = cast<SCEVConstant>(S)->getAPInt(); 2344 Type *Ty = S->getType(); 2345 BudgetRemaining -= TTI.getIntImmCostInst( 2346 WorkItem.ParentOpcode, WorkItem.OperandIdx, Imm, Ty, CostKind); 2347 return BudgetRemaining < 0; 2348 } 2349 case scTruncate: 2350 case scPtrToInt: 2351 case scZeroExtend: 2352 case scSignExtend: { 2353 int Cost = 2354 costAndCollectOperands<SCEVCastExpr>(WorkItem, TTI, CostKind, Worklist); 2355 BudgetRemaining -= Cost; 2356 return false; // Will answer upon next entry into this function. 2357 } 2358 case scUDivExpr: { 2359 // UDivExpr is very likely a UDiv that ScalarEvolution's HowFarToZero or 2360 // HowManyLessThans produced to compute a precise expression, rather than a 2361 // UDiv from the user's code. If we can't find a UDiv in the code with some 2362 // simple searching, we need to account for it's cost. 2363 2364 // At the beginning of this function we already tried to find existing 2365 // value for plain 'S'. Now try to lookup 'S + 1' since it is common 2366 // pattern involving division. This is just a simple search heuristic. 2367 if (getRelatedExistingExpansion( 2368 SE.getAddExpr(S, SE.getConstant(S->getType(), 1)), &At, L)) 2369 return false; // Consider it to be free. 2370 2371 int Cost = 2372 costAndCollectOperands<SCEVUDivExpr>(WorkItem, TTI, CostKind, Worklist); 2373 // Need to count the cost of this UDiv. 2374 BudgetRemaining -= Cost; 2375 return false; // Will answer upon next entry into this function. 2376 } 2377 case scAddExpr: 2378 case scMulExpr: 2379 case scUMaxExpr: 2380 case scSMaxExpr: 2381 case scUMinExpr: 2382 case scSMinExpr: { 2383 assert(cast<SCEVNAryExpr>(S)->getNumOperands() > 1 && 2384 "Nary expr should have more than 1 operand."); 2385 // The simple nary expr will require one less op (or pair of ops) 2386 // than the number of it's terms. 2387 int Cost = 2388 costAndCollectOperands<SCEVNAryExpr>(WorkItem, TTI, CostKind, Worklist); 2389 BudgetRemaining -= Cost; 2390 return BudgetRemaining < 0; 2391 } 2392 case scAddRecExpr: { 2393 assert(cast<SCEVAddRecExpr>(S)->getNumOperands() >= 2 && 2394 "Polynomial should be at least linear"); 2395 BudgetRemaining -= costAndCollectOperands<SCEVAddRecExpr>( 2396 WorkItem, TTI, CostKind, Worklist); 2397 return BudgetRemaining < 0; 2398 } 2399 } 2400 llvm_unreachable("Unknown SCEV kind!"); 2401 } 2402 2403 Value *SCEVExpander::expandCodeForPredicate(const SCEVPredicate *Pred, 2404 Instruction *IP) { 2405 assert(IP); 2406 switch (Pred->getKind()) { 2407 case SCEVPredicate::P_Union: 2408 return expandUnionPredicate(cast<SCEVUnionPredicate>(Pred), IP); 2409 case SCEVPredicate::P_Equal: 2410 return expandEqualPredicate(cast<SCEVEqualPredicate>(Pred), IP); 2411 case SCEVPredicate::P_Wrap: { 2412 auto *AddRecPred = cast<SCEVWrapPredicate>(Pred); 2413 return expandWrapPredicate(AddRecPred, IP); 2414 } 2415 } 2416 llvm_unreachable("Unknown SCEV predicate type"); 2417 } 2418 2419 Value *SCEVExpander::expandEqualPredicate(const SCEVEqualPredicate *Pred, 2420 Instruction *IP) { 2421 Value *Expr0 = 2422 expandCodeForImpl(Pred->getLHS(), Pred->getLHS()->getType(), IP, false); 2423 Value *Expr1 = 2424 expandCodeForImpl(Pred->getRHS(), Pred->getRHS()->getType(), IP, false); 2425 2426 Builder.SetInsertPoint(IP); 2427 auto *I = Builder.CreateICmpNE(Expr0, Expr1, "ident.check"); 2428 return I; 2429 } 2430 2431 Value *SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr *AR, 2432 Instruction *Loc, bool Signed) { 2433 assert(AR->isAffine() && "Cannot generate RT check for " 2434 "non-affine expression"); 2435 2436 SCEVUnionPredicate Pred; 2437 const SCEV *ExitCount = 2438 SE.getPredicatedBackedgeTakenCount(AR->getLoop(), Pred); 2439 2440 assert(!isa<SCEVCouldNotCompute>(ExitCount) && "Invalid loop count"); 2441 2442 const SCEV *Step = AR->getStepRecurrence(SE); 2443 const SCEV *Start = AR->getStart(); 2444 2445 Type *ARTy = AR->getType(); 2446 unsigned SrcBits = SE.getTypeSizeInBits(ExitCount->getType()); 2447 unsigned DstBits = SE.getTypeSizeInBits(ARTy); 2448 2449 // The expression {Start,+,Step} has nusw/nssw if 2450 // Step < 0, Start - |Step| * Backedge <= Start 2451 // Step >= 0, Start + |Step| * Backedge > Start 2452 // and |Step| * Backedge doesn't unsigned overflow. 2453 2454 IntegerType *CountTy = IntegerType::get(Loc->getContext(), SrcBits); 2455 Builder.SetInsertPoint(Loc); 2456 Value *TripCountVal = expandCodeForImpl(ExitCount, CountTy, Loc, false); 2457 2458 IntegerType *Ty = 2459 IntegerType::get(Loc->getContext(), SE.getTypeSizeInBits(ARTy)); 2460 Type *ARExpandTy = DL.isNonIntegralPointerType(ARTy) ? ARTy : Ty; 2461 2462 Value *StepValue = expandCodeForImpl(Step, Ty, Loc, false); 2463 Value *NegStepValue = 2464 expandCodeForImpl(SE.getNegativeSCEV(Step), Ty, Loc, false); 2465 Value *StartValue = expandCodeForImpl(Start, ARExpandTy, Loc, false); 2466 2467 ConstantInt *Zero = 2468 ConstantInt::get(Loc->getContext(), APInt::getNullValue(DstBits)); 2469 2470 Builder.SetInsertPoint(Loc); 2471 // Compute |Step| 2472 Value *StepCompare = Builder.CreateICmp(ICmpInst::ICMP_SLT, StepValue, Zero); 2473 Value *AbsStep = Builder.CreateSelect(StepCompare, NegStepValue, StepValue); 2474 2475 // Get the backedge taken count and truncate or extended to the AR type. 2476 Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty); 2477 auto *MulF = Intrinsic::getDeclaration(Loc->getModule(), 2478 Intrinsic::umul_with_overflow, Ty); 2479 2480 // Compute |Step| * Backedge 2481 CallInst *Mul = Builder.CreateCall(MulF, {AbsStep, TruncTripCount}, "mul"); 2482 Value *MulV = Builder.CreateExtractValue(Mul, 0, "mul.result"); 2483 Value *OfMul = Builder.CreateExtractValue(Mul, 1, "mul.overflow"); 2484 2485 // Compute: 2486 // Start + |Step| * Backedge < Start 2487 // Start - |Step| * Backedge > Start 2488 Value *Add = nullptr, *Sub = nullptr; 2489 if (PointerType *ARPtrTy = dyn_cast<PointerType>(ARExpandTy)) { 2490 const SCEV *MulS = SE.getSCEV(MulV); 2491 const SCEV *NegMulS = SE.getNegativeSCEV(MulS); 2492 Add = Builder.CreateBitCast(expandAddToGEP(MulS, ARPtrTy, Ty, StartValue), 2493 ARPtrTy); 2494 Sub = Builder.CreateBitCast( 2495 expandAddToGEP(NegMulS, ARPtrTy, Ty, StartValue), ARPtrTy); 2496 } else { 2497 Add = Builder.CreateAdd(StartValue, MulV); 2498 Sub = Builder.CreateSub(StartValue, MulV); 2499 } 2500 2501 Value *EndCompareGT = Builder.CreateICmp( 2502 Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT, Sub, StartValue); 2503 2504 Value *EndCompareLT = Builder.CreateICmp( 2505 Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, Add, StartValue); 2506 2507 // Select the answer based on the sign of Step. 2508 Value *EndCheck = 2509 Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT); 2510 2511 // If the backedge taken count type is larger than the AR type, 2512 // check that we don't drop any bits by truncating it. If we are 2513 // dropping bits, then we have overflow (unless the step is zero). 2514 if (SE.getTypeSizeInBits(CountTy) > SE.getTypeSizeInBits(Ty)) { 2515 auto MaxVal = APInt::getMaxValue(DstBits).zext(SrcBits); 2516 auto *BackedgeCheck = 2517 Builder.CreateICmp(ICmpInst::ICMP_UGT, TripCountVal, 2518 ConstantInt::get(Loc->getContext(), MaxVal)); 2519 BackedgeCheck = Builder.CreateAnd( 2520 BackedgeCheck, Builder.CreateICmp(ICmpInst::ICMP_NE, StepValue, Zero)); 2521 2522 EndCheck = Builder.CreateOr(EndCheck, BackedgeCheck); 2523 } 2524 2525 return Builder.CreateOr(EndCheck, OfMul); 2526 } 2527 2528 Value *SCEVExpander::expandWrapPredicate(const SCEVWrapPredicate *Pred, 2529 Instruction *IP) { 2530 const auto *A = cast<SCEVAddRecExpr>(Pred->getExpr()); 2531 Value *NSSWCheck = nullptr, *NUSWCheck = nullptr; 2532 2533 // Add a check for NUSW 2534 if (Pred->getFlags() & SCEVWrapPredicate::IncrementNUSW) 2535 NUSWCheck = generateOverflowCheck(A, IP, false); 2536 2537 // Add a check for NSSW 2538 if (Pred->getFlags() & SCEVWrapPredicate::IncrementNSSW) 2539 NSSWCheck = generateOverflowCheck(A, IP, true); 2540 2541 if (NUSWCheck && NSSWCheck) 2542 return Builder.CreateOr(NUSWCheck, NSSWCheck); 2543 2544 if (NUSWCheck) 2545 return NUSWCheck; 2546 2547 if (NSSWCheck) 2548 return NSSWCheck; 2549 2550 return ConstantInt::getFalse(IP->getContext()); 2551 } 2552 2553 Value *SCEVExpander::expandUnionPredicate(const SCEVUnionPredicate *Union, 2554 Instruction *IP) { 2555 auto *BoolType = IntegerType::get(IP->getContext(), 1); 2556 Value *Check = ConstantInt::getNullValue(BoolType); 2557 2558 // Loop over all checks in this set. 2559 for (auto Pred : Union->getPredicates()) { 2560 auto *NextCheck = expandCodeForPredicate(Pred, IP); 2561 Builder.SetInsertPoint(IP); 2562 Check = Builder.CreateOr(Check, NextCheck); 2563 } 2564 2565 return Check; 2566 } 2567 2568 Value *SCEVExpander::fixupLCSSAFormFor(Instruction *User, unsigned OpIdx) { 2569 assert(PreserveLCSSA); 2570 SmallVector<Instruction *, 1> ToUpdate; 2571 2572 auto *OpV = User->getOperand(OpIdx); 2573 auto *OpI = dyn_cast<Instruction>(OpV); 2574 if (!OpI) 2575 return OpV; 2576 2577 Loop *DefLoop = SE.LI.getLoopFor(OpI->getParent()); 2578 Loop *UseLoop = SE.LI.getLoopFor(User->getParent()); 2579 if (!DefLoop || UseLoop == DefLoop || DefLoop->contains(UseLoop)) 2580 return OpV; 2581 2582 ToUpdate.push_back(OpI); 2583 SmallVector<PHINode *, 16> PHIsToRemove; 2584 formLCSSAForInstructions(ToUpdate, SE.DT, SE.LI, &SE, Builder, &PHIsToRemove); 2585 for (PHINode *PN : PHIsToRemove) { 2586 if (!PN->use_empty()) 2587 continue; 2588 InsertedValues.erase(PN); 2589 InsertedPostIncValues.erase(PN); 2590 PN->eraseFromParent(); 2591 } 2592 2593 return User->getOperand(OpIdx); 2594 } 2595 2596 namespace { 2597 // Search for a SCEV subexpression that is not safe to expand. Any expression 2598 // that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely 2599 // UDiv expressions. We don't know if the UDiv is derived from an IR divide 2600 // instruction, but the important thing is that we prove the denominator is 2601 // nonzero before expansion. 2602 // 2603 // IVUsers already checks that IV-derived expressions are safe. So this check is 2604 // only needed when the expression includes some subexpression that is not IV 2605 // derived. 2606 // 2607 // Currently, we only allow division by a nonzero constant here. If this is 2608 // inadequate, we could easily allow division by SCEVUnknown by using 2609 // ValueTracking to check isKnownNonZero(). 2610 // 2611 // We cannot generally expand recurrences unless the step dominates the loop 2612 // header. The expander handles the special case of affine recurrences by 2613 // scaling the recurrence outside the loop, but this technique isn't generally 2614 // applicable. Expanding a nested recurrence outside a loop requires computing 2615 // binomial coefficients. This could be done, but the recurrence has to be in a 2616 // perfectly reduced form, which can't be guaranteed. 2617 struct SCEVFindUnsafe { 2618 ScalarEvolution &SE; 2619 bool IsUnsafe; 2620 2621 SCEVFindUnsafe(ScalarEvolution &se): SE(se), IsUnsafe(false) {} 2622 2623 bool follow(const SCEV *S) { 2624 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 2625 const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS()); 2626 if (!SC || SC->getValue()->isZero()) { 2627 IsUnsafe = true; 2628 return false; 2629 } 2630 } 2631 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 2632 const SCEV *Step = AR->getStepRecurrence(SE); 2633 if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) { 2634 IsUnsafe = true; 2635 return false; 2636 } 2637 } 2638 return true; 2639 } 2640 bool isDone() const { return IsUnsafe; } 2641 }; 2642 } 2643 2644 namespace llvm { 2645 bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE) { 2646 SCEVFindUnsafe Search(SE); 2647 visitAll(S, Search); 2648 return !Search.IsUnsafe; 2649 } 2650 2651 bool isSafeToExpandAt(const SCEV *S, const Instruction *InsertionPoint, 2652 ScalarEvolution &SE) { 2653 if (!isSafeToExpand(S, SE)) 2654 return false; 2655 // We have to prove that the expanded site of S dominates InsertionPoint. 2656 // This is easy when not in the same block, but hard when S is an instruction 2657 // to be expanded somewhere inside the same block as our insertion point. 2658 // What we really need here is something analogous to an OrderedBasicBlock, 2659 // but for the moment, we paper over the problem by handling two common and 2660 // cheap to check cases. 2661 if (SE.properlyDominates(S, InsertionPoint->getParent())) 2662 return true; 2663 if (SE.dominates(S, InsertionPoint->getParent())) { 2664 if (InsertionPoint->getParent()->getTerminator() == InsertionPoint) 2665 return true; 2666 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) 2667 for (const Value *V : InsertionPoint->operand_values()) 2668 if (V == U->getValue()) 2669 return true; 2670 } 2671 return false; 2672 } 2673 2674 SCEVExpanderCleaner::~SCEVExpanderCleaner() { 2675 // Result is used, nothing to remove. 2676 if (ResultUsed) 2677 return; 2678 2679 auto InsertedInstructions = Expander.getAllInsertedInstructions(); 2680 #ifndef NDEBUG 2681 SmallPtrSet<Instruction *, 8> InsertedSet(InsertedInstructions.begin(), 2682 InsertedInstructions.end()); 2683 (void)InsertedSet; 2684 #endif 2685 // Remove sets with value handles. 2686 Expander.clear(); 2687 2688 // Sort so that earlier instructions do not dominate later instructions. 2689 stable_sort(InsertedInstructions, [this](Instruction *A, Instruction *B) { 2690 return DT.dominates(B, A); 2691 }); 2692 // Remove all inserted instructions. 2693 for (Instruction *I : InsertedInstructions) { 2694 2695 #ifndef NDEBUG 2696 assert(all_of(I->users(), 2697 [&InsertedSet](Value *U) { 2698 return InsertedSet.contains(cast<Instruction>(U)); 2699 }) && 2700 "removed instruction should only be used by instructions inserted " 2701 "during expansion"); 2702 #endif 2703 assert(!I->getType()->isVoidTy() && 2704 "inserted instruction should have non-void types"); 2705 I->replaceAllUsesWith(UndefValue::get(I->getType())); 2706 I->eraseFromParent(); 2707 } 2708 } 2709 } 2710