1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the scalar evolution expander, 11 // which is used to generate the code corresponding to a given scalar evolution 12 // expression. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Analysis/ScalarEvolutionExpander.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/SmallSet.h" 19 #include "llvm/Analysis/InstructionSimplify.h" 20 #include "llvm/Analysis/LoopInfo.h" 21 #include "llvm/Analysis/TargetTransformInfo.h" 22 #include "llvm/IR/DataLayout.h" 23 #include "llvm/IR/Dominators.h" 24 #include "llvm/IR/IntrinsicInst.h" 25 #include "llvm/IR/LLVMContext.h" 26 #include "llvm/IR/Module.h" 27 #include "llvm/IR/PatternMatch.h" 28 #include "llvm/Support/Debug.h" 29 #include "llvm/Support/raw_ostream.h" 30 31 using namespace llvm; 32 using namespace PatternMatch; 33 34 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP, 35 /// reusing an existing cast if a suitable one exists, moving an existing 36 /// cast if a suitable one exists but isn't in the right place, or 37 /// creating a new one. 38 Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty, 39 Instruction::CastOps Op, 40 BasicBlock::iterator IP) { 41 // This function must be called with the builder having a valid insertion 42 // point. It doesn't need to be the actual IP where the uses of the returned 43 // cast will be added, but it must dominate such IP. 44 // We use this precondition to produce a cast that will dominate all its 45 // uses. In particular, this is crucial for the case where the builder's 46 // insertion point *is* the point where we were asked to put the cast. 47 // Since we don't know the builder's insertion point is actually 48 // where the uses will be added (only that it dominates it), we are 49 // not allowed to move it. 50 BasicBlock::iterator BIP = Builder.GetInsertPoint(); 51 52 Instruction *Ret = nullptr; 53 54 // Check to see if there is already a cast! 55 for (User *U : V->users()) 56 if (U->getType() == Ty) 57 if (CastInst *CI = dyn_cast<CastInst>(U)) 58 if (CI->getOpcode() == Op) { 59 // If the cast isn't where we want it, create a new cast at IP. 60 // Likewise, do not reuse a cast at BIP because it must dominate 61 // instructions that might be inserted before BIP. 62 if (BasicBlock::iterator(CI) != IP || BIP == IP) { 63 // Create a new cast, and leave the old cast in place in case 64 // it is being used as an insert point. Clear its operand 65 // so that it doesn't hold anything live. 66 Ret = CastInst::Create(Op, V, Ty, "", IP); 67 Ret->takeName(CI); 68 CI->replaceAllUsesWith(Ret); 69 CI->setOperand(0, UndefValue::get(V->getType())); 70 break; 71 } 72 Ret = CI; 73 break; 74 } 75 76 // Create a new cast. 77 if (!Ret) 78 Ret = CastInst::Create(Op, V, Ty, V->getName(), IP); 79 80 // We assert at the end of the function since IP might point to an 81 // instruction with different dominance properties than a cast 82 // (an invoke for example) and not dominate BIP (but the cast does). 83 assert(SE.DT->dominates(Ret, BIP)); 84 85 rememberInstruction(Ret); 86 return Ret; 87 } 88 89 /// InsertNoopCastOfTo - Insert a cast of V to the specified type, 90 /// which must be possible with a noop cast, doing what we can to share 91 /// the casts. 92 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) { 93 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false); 94 assert((Op == Instruction::BitCast || 95 Op == Instruction::PtrToInt || 96 Op == Instruction::IntToPtr) && 97 "InsertNoopCastOfTo cannot perform non-noop casts!"); 98 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) && 99 "InsertNoopCastOfTo cannot change sizes!"); 100 101 // Short-circuit unnecessary bitcasts. 102 if (Op == Instruction::BitCast) { 103 if (V->getType() == Ty) 104 return V; 105 if (CastInst *CI = dyn_cast<CastInst>(V)) { 106 if (CI->getOperand(0)->getType() == Ty) 107 return CI->getOperand(0); 108 } 109 } 110 // Short-circuit unnecessary inttoptr<->ptrtoint casts. 111 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) && 112 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) { 113 if (CastInst *CI = dyn_cast<CastInst>(V)) 114 if ((CI->getOpcode() == Instruction::PtrToInt || 115 CI->getOpcode() == Instruction::IntToPtr) && 116 SE.getTypeSizeInBits(CI->getType()) == 117 SE.getTypeSizeInBits(CI->getOperand(0)->getType())) 118 return CI->getOperand(0); 119 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 120 if ((CE->getOpcode() == Instruction::PtrToInt || 121 CE->getOpcode() == Instruction::IntToPtr) && 122 SE.getTypeSizeInBits(CE->getType()) == 123 SE.getTypeSizeInBits(CE->getOperand(0)->getType())) 124 return CE->getOperand(0); 125 } 126 127 // Fold a cast of a constant. 128 if (Constant *C = dyn_cast<Constant>(V)) 129 return ConstantExpr::getCast(Op, C, Ty); 130 131 // Cast the argument at the beginning of the entry block, after 132 // any bitcasts of other arguments. 133 if (Argument *A = dyn_cast<Argument>(V)) { 134 BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin(); 135 while ((isa<BitCastInst>(IP) && 136 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) && 137 cast<BitCastInst>(IP)->getOperand(0) != A) || 138 isa<DbgInfoIntrinsic>(IP) || 139 isa<LandingPadInst>(IP)) 140 ++IP; 141 return ReuseOrCreateCast(A, Ty, Op, IP); 142 } 143 144 // Cast the instruction immediately after the instruction. 145 Instruction *I = cast<Instruction>(V); 146 BasicBlock::iterator IP = I; ++IP; 147 if (InvokeInst *II = dyn_cast<InvokeInst>(I)) 148 IP = II->getNormalDest()->begin(); 149 while (isa<PHINode>(IP) || isa<LandingPadInst>(IP)) 150 ++IP; 151 return ReuseOrCreateCast(I, Ty, Op, IP); 152 } 153 154 /// InsertBinop - Insert the specified binary operator, doing a small amount 155 /// of work to avoid inserting an obviously redundant operation. 156 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, 157 Value *LHS, Value *RHS) { 158 // Fold a binop with constant operands. 159 if (Constant *CLHS = dyn_cast<Constant>(LHS)) 160 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 161 return ConstantExpr::get(Opcode, CLHS, CRHS); 162 163 // Do a quick scan to see if we have this binop nearby. If so, reuse it. 164 unsigned ScanLimit = 6; 165 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 166 // Scanning starts from the last instruction before the insertion point. 167 BasicBlock::iterator IP = Builder.GetInsertPoint(); 168 if (IP != BlockBegin) { 169 --IP; 170 for (; ScanLimit; --IP, --ScanLimit) { 171 // Don't count dbg.value against the ScanLimit, to avoid perturbing the 172 // generated code. 173 if (isa<DbgInfoIntrinsic>(IP)) 174 ScanLimit++; 175 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS && 176 IP->getOperand(1) == RHS) 177 return IP; 178 if (IP == BlockBegin) break; 179 } 180 } 181 182 // Save the original insertion point so we can restore it when we're done. 183 DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc(); 184 BuilderType::InsertPointGuard Guard(Builder); 185 186 // Move the insertion point out of as many loops as we can. 187 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 188 if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break; 189 BasicBlock *Preheader = L->getLoopPreheader(); 190 if (!Preheader) break; 191 192 // Ok, move up a level. 193 Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 194 } 195 196 // If we haven't found this binop, insert it. 197 Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS)); 198 BO->setDebugLoc(Loc); 199 rememberInstruction(BO); 200 201 return BO; 202 } 203 204 /// FactorOutConstant - Test if S is divisible by Factor, using signed 205 /// division. If so, update S with Factor divided out and return true. 206 /// S need not be evenly divisible if a reasonable remainder can be 207 /// computed. 208 /// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made 209 /// unnecessary; in its place, just signed-divide Ops[i] by the scale and 210 /// check to see if the divide was folded. 211 static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder, 212 const SCEV *Factor, ScalarEvolution &SE, 213 const DataLayout &DL) { 214 // Everything is divisible by one. 215 if (Factor->isOne()) 216 return true; 217 218 // x/x == 1. 219 if (S == Factor) { 220 S = SE.getConstant(S->getType(), 1); 221 return true; 222 } 223 224 // For a Constant, check for a multiple of the given factor. 225 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 226 // 0/x == 0. 227 if (C->isZero()) 228 return true; 229 // Check for divisibility. 230 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) { 231 ConstantInt *CI = 232 ConstantInt::get(SE.getContext(), 233 C->getValue()->getValue().sdiv( 234 FC->getValue()->getValue())); 235 // If the quotient is zero and the remainder is non-zero, reject 236 // the value at this scale. It will be considered for subsequent 237 // smaller scales. 238 if (!CI->isZero()) { 239 const SCEV *Div = SE.getConstant(CI); 240 S = Div; 241 Remainder = 242 SE.getAddExpr(Remainder, 243 SE.getConstant(C->getValue()->getValue().srem( 244 FC->getValue()->getValue()))); 245 return true; 246 } 247 } 248 } 249 250 // In a Mul, check if there is a constant operand which is a multiple 251 // of the given factor. 252 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 253 // Size is known, check if there is a constant operand which is a multiple 254 // of the given factor. If so, we can factor it. 255 const SCEVConstant *FC = cast<SCEVConstant>(Factor); 256 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0))) 257 if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) { 258 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end()); 259 NewMulOps[0] = SE.getConstant( 260 C->getValue()->getValue().sdiv(FC->getValue()->getValue())); 261 S = SE.getMulExpr(NewMulOps); 262 return true; 263 } 264 } 265 266 // In an AddRec, check if both start and step are divisible. 267 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 268 const SCEV *Step = A->getStepRecurrence(SE); 269 const SCEV *StepRem = SE.getConstant(Step->getType(), 0); 270 if (!FactorOutConstant(Step, StepRem, Factor, SE, DL)) 271 return false; 272 if (!StepRem->isZero()) 273 return false; 274 const SCEV *Start = A->getStart(); 275 if (!FactorOutConstant(Start, Remainder, Factor, SE, DL)) 276 return false; 277 S = SE.getAddRecExpr(Start, Step, A->getLoop(), 278 A->getNoWrapFlags(SCEV::FlagNW)); 279 return true; 280 } 281 282 return false; 283 } 284 285 /// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs 286 /// is the number of SCEVAddRecExprs present, which are kept at the end of 287 /// the list. 288 /// 289 static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops, 290 Type *Ty, 291 ScalarEvolution &SE) { 292 unsigned NumAddRecs = 0; 293 for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i) 294 ++NumAddRecs; 295 // Group Ops into non-addrecs and addrecs. 296 SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs); 297 SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end()); 298 // Let ScalarEvolution sort and simplify the non-addrecs list. 299 const SCEV *Sum = NoAddRecs.empty() ? 300 SE.getConstant(Ty, 0) : 301 SE.getAddExpr(NoAddRecs); 302 // If it returned an add, use the operands. Otherwise it simplified 303 // the sum into a single value, so just use that. 304 Ops.clear(); 305 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum)) 306 Ops.append(Add->op_begin(), Add->op_end()); 307 else if (!Sum->isZero()) 308 Ops.push_back(Sum); 309 // Then append the addrecs. 310 Ops.append(AddRecs.begin(), AddRecs.end()); 311 } 312 313 /// SplitAddRecs - Flatten a list of add operands, moving addrec start values 314 /// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}. 315 /// This helps expose more opportunities for folding parts of the expressions 316 /// into GEP indices. 317 /// 318 static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops, 319 Type *Ty, 320 ScalarEvolution &SE) { 321 // Find the addrecs. 322 SmallVector<const SCEV *, 8> AddRecs; 323 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 324 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) { 325 const SCEV *Start = A->getStart(); 326 if (Start->isZero()) break; 327 const SCEV *Zero = SE.getConstant(Ty, 0); 328 AddRecs.push_back(SE.getAddRecExpr(Zero, 329 A->getStepRecurrence(SE), 330 A->getLoop(), 331 A->getNoWrapFlags(SCEV::FlagNW))); 332 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) { 333 Ops[i] = Zero; 334 Ops.append(Add->op_begin(), Add->op_end()); 335 e += Add->getNumOperands(); 336 } else { 337 Ops[i] = Start; 338 } 339 } 340 if (!AddRecs.empty()) { 341 // Add the addrecs onto the end of the list. 342 Ops.append(AddRecs.begin(), AddRecs.end()); 343 // Resort the operand list, moving any constants to the front. 344 SimplifyAddOperands(Ops, Ty, SE); 345 } 346 } 347 348 /// expandAddToGEP - Expand an addition expression with a pointer type into 349 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps 350 /// BasicAliasAnalysis and other passes analyze the result. See the rules 351 /// for getelementptr vs. inttoptr in 352 /// http://llvm.org/docs/LangRef.html#pointeraliasing 353 /// for details. 354 /// 355 /// Design note: The correctness of using getelementptr here depends on 356 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as 357 /// they may introduce pointer arithmetic which may not be safely converted 358 /// into getelementptr. 359 /// 360 /// Design note: It might seem desirable for this function to be more 361 /// loop-aware. If some of the indices are loop-invariant while others 362 /// aren't, it might seem desirable to emit multiple GEPs, keeping the 363 /// loop-invariant portions of the overall computation outside the loop. 364 /// However, there are a few reasons this is not done here. Hoisting simple 365 /// arithmetic is a low-level optimization that often isn't very 366 /// important until late in the optimization process. In fact, passes 367 /// like InstructionCombining will combine GEPs, even if it means 368 /// pushing loop-invariant computation down into loops, so even if the 369 /// GEPs were split here, the work would quickly be undone. The 370 /// LoopStrengthReduction pass, which is usually run quite late (and 371 /// after the last InstructionCombining pass), takes care of hoisting 372 /// loop-invariant portions of expressions, after considering what 373 /// can be folded using target addressing modes. 374 /// 375 Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, 376 const SCEV *const *op_end, 377 PointerType *PTy, 378 Type *Ty, 379 Value *V) { 380 Type *OriginalElTy = PTy->getElementType(); 381 Type *ElTy = OriginalElTy; 382 SmallVector<Value *, 4> GepIndices; 383 SmallVector<const SCEV *, 8> Ops(op_begin, op_end); 384 bool AnyNonZeroIndices = false; 385 386 // Split AddRecs up into parts as either of the parts may be usable 387 // without the other. 388 SplitAddRecs(Ops, Ty, SE); 389 390 Type *IntPtrTy = DL.getIntPtrType(PTy); 391 392 // Descend down the pointer's type and attempt to convert the other 393 // operands into GEP indices, at each level. The first index in a GEP 394 // indexes into the array implied by the pointer operand; the rest of 395 // the indices index into the element or field type selected by the 396 // preceding index. 397 for (;;) { 398 // If the scale size is not 0, attempt to factor out a scale for 399 // array indexing. 400 SmallVector<const SCEV *, 8> ScaledOps; 401 if (ElTy->isSized()) { 402 const SCEV *ElSize = SE.getSizeOfExpr(IntPtrTy, ElTy); 403 if (!ElSize->isZero()) { 404 SmallVector<const SCEV *, 8> NewOps; 405 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 406 const SCEV *Op = Ops[i]; 407 const SCEV *Remainder = SE.getConstant(Ty, 0); 408 if (FactorOutConstant(Op, Remainder, ElSize, SE, DL)) { 409 // Op now has ElSize factored out. 410 ScaledOps.push_back(Op); 411 if (!Remainder->isZero()) 412 NewOps.push_back(Remainder); 413 AnyNonZeroIndices = true; 414 } else { 415 // The operand was not divisible, so add it to the list of operands 416 // we'll scan next iteration. 417 NewOps.push_back(Ops[i]); 418 } 419 } 420 // If we made any changes, update Ops. 421 if (!ScaledOps.empty()) { 422 Ops = NewOps; 423 SimplifyAddOperands(Ops, Ty, SE); 424 } 425 } 426 } 427 428 // Record the scaled array index for this level of the type. If 429 // we didn't find any operands that could be factored, tentatively 430 // assume that element zero was selected (since the zero offset 431 // would obviously be folded away). 432 Value *Scaled = ScaledOps.empty() ? 433 Constant::getNullValue(Ty) : 434 expandCodeFor(SE.getAddExpr(ScaledOps), Ty); 435 GepIndices.push_back(Scaled); 436 437 // Collect struct field index operands. 438 while (StructType *STy = dyn_cast<StructType>(ElTy)) { 439 bool FoundFieldNo = false; 440 // An empty struct has no fields. 441 if (STy->getNumElements() == 0) break; 442 // Field offsets are known. See if a constant offset falls within any of 443 // the struct fields. 444 if (Ops.empty()) 445 break; 446 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0])) 447 if (SE.getTypeSizeInBits(C->getType()) <= 64) { 448 const StructLayout &SL = *DL.getStructLayout(STy); 449 uint64_t FullOffset = C->getValue()->getZExtValue(); 450 if (FullOffset < SL.getSizeInBytes()) { 451 unsigned ElIdx = SL.getElementContainingOffset(FullOffset); 452 GepIndices.push_back( 453 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx)); 454 ElTy = STy->getTypeAtIndex(ElIdx); 455 Ops[0] = 456 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx)); 457 AnyNonZeroIndices = true; 458 FoundFieldNo = true; 459 } 460 } 461 // If no struct field offsets were found, tentatively assume that 462 // field zero was selected (since the zero offset would obviously 463 // be folded away). 464 if (!FoundFieldNo) { 465 ElTy = STy->getTypeAtIndex(0u); 466 GepIndices.push_back( 467 Constant::getNullValue(Type::getInt32Ty(Ty->getContext()))); 468 } 469 } 470 471 if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy)) 472 ElTy = ATy->getElementType(); 473 else 474 break; 475 } 476 477 // If none of the operands were convertible to proper GEP indices, cast 478 // the base to i8* and do an ugly getelementptr with that. It's still 479 // better than ptrtoint+arithmetic+inttoptr at least. 480 if (!AnyNonZeroIndices) { 481 // Cast the base to i8*. 482 V = InsertNoopCastOfTo(V, 483 Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace())); 484 485 assert(!isa<Instruction>(V) || 486 SE.DT->dominates(cast<Instruction>(V), Builder.GetInsertPoint())); 487 488 // Expand the operands for a plain byte offset. 489 Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty); 490 491 // Fold a GEP with constant operands. 492 if (Constant *CLHS = dyn_cast<Constant>(V)) 493 if (Constant *CRHS = dyn_cast<Constant>(Idx)) 494 return ConstantExpr::getGetElementPtr(Type::getInt8Ty(Ty->getContext()), 495 CLHS, CRHS); 496 497 // Do a quick scan to see if we have this GEP nearby. If so, reuse it. 498 unsigned ScanLimit = 6; 499 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 500 // Scanning starts from the last instruction before the insertion point. 501 BasicBlock::iterator IP = Builder.GetInsertPoint(); 502 if (IP != BlockBegin) { 503 --IP; 504 for (; ScanLimit; --IP, --ScanLimit) { 505 // Don't count dbg.value against the ScanLimit, to avoid perturbing the 506 // generated code. 507 if (isa<DbgInfoIntrinsic>(IP)) 508 ScanLimit++; 509 if (IP->getOpcode() == Instruction::GetElementPtr && 510 IP->getOperand(0) == V && IP->getOperand(1) == Idx) 511 return IP; 512 if (IP == BlockBegin) break; 513 } 514 } 515 516 // Save the original insertion point so we can restore it when we're done. 517 BuilderType::InsertPointGuard Guard(Builder); 518 519 // Move the insertion point out of as many loops as we can. 520 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 521 if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break; 522 BasicBlock *Preheader = L->getLoopPreheader(); 523 if (!Preheader) break; 524 525 // Ok, move up a level. 526 Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 527 } 528 529 // Emit a GEP. 530 Value *GEP = Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "uglygep"); 531 rememberInstruction(GEP); 532 533 return GEP; 534 } 535 536 // Save the original insertion point so we can restore it when we're done. 537 BuilderType::InsertPoint SaveInsertPt = Builder.saveIP(); 538 539 // Move the insertion point out of as many loops as we can. 540 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 541 if (!L->isLoopInvariant(V)) break; 542 543 bool AnyIndexNotLoopInvariant = false; 544 for (SmallVectorImpl<Value *>::const_iterator I = GepIndices.begin(), 545 E = GepIndices.end(); I != E; ++I) 546 if (!L->isLoopInvariant(*I)) { 547 AnyIndexNotLoopInvariant = true; 548 break; 549 } 550 if (AnyIndexNotLoopInvariant) 551 break; 552 553 BasicBlock *Preheader = L->getLoopPreheader(); 554 if (!Preheader) break; 555 556 // Ok, move up a level. 557 Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 558 } 559 560 // Insert a pretty getelementptr. Note that this GEP is not marked inbounds, 561 // because ScalarEvolution may have changed the address arithmetic to 562 // compute a value which is beyond the end of the allocated object. 563 Value *Casted = V; 564 if (V->getType() != PTy) 565 Casted = InsertNoopCastOfTo(Casted, PTy); 566 Value *GEP = Builder.CreateGEP(OriginalElTy, Casted, 567 GepIndices, 568 "scevgep"); 569 Ops.push_back(SE.getUnknown(GEP)); 570 rememberInstruction(GEP); 571 572 // Restore the original insert point. 573 Builder.restoreIP(SaveInsertPt); 574 575 return expand(SE.getAddExpr(Ops)); 576 } 577 578 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for 579 /// SCEV expansion. If they are nested, this is the most nested. If they are 580 /// neighboring, pick the later. 581 static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B, 582 DominatorTree &DT) { 583 if (!A) return B; 584 if (!B) return A; 585 if (A->contains(B)) return B; 586 if (B->contains(A)) return A; 587 if (DT.dominates(A->getHeader(), B->getHeader())) return B; 588 if (DT.dominates(B->getHeader(), A->getHeader())) return A; 589 return A; // Arbitrarily break the tie. 590 } 591 592 /// getRelevantLoop - Get the most relevant loop associated with the given 593 /// expression, according to PickMostRelevantLoop. 594 const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) { 595 // Test whether we've already computed the most relevant loop for this SCEV. 596 std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair = 597 RelevantLoops.insert(std::make_pair(S, nullptr)); 598 if (!Pair.second) 599 return Pair.first->second; 600 601 if (isa<SCEVConstant>(S)) 602 // A constant has no relevant loops. 603 return nullptr; 604 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 605 if (const Instruction *I = dyn_cast<Instruction>(U->getValue())) 606 return Pair.first->second = SE.LI->getLoopFor(I->getParent()); 607 // A non-instruction has no relevant loops. 608 return nullptr; 609 } 610 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) { 611 const Loop *L = nullptr; 612 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 613 L = AR->getLoop(); 614 for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end(); 615 I != E; ++I) 616 L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT); 617 return RelevantLoops[N] = L; 618 } 619 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) { 620 const Loop *Result = getRelevantLoop(C->getOperand()); 621 return RelevantLoops[C] = Result; 622 } 623 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 624 const Loop *Result = 625 PickMostRelevantLoop(getRelevantLoop(D->getLHS()), 626 getRelevantLoop(D->getRHS()), 627 *SE.DT); 628 return RelevantLoops[D] = Result; 629 } 630 llvm_unreachable("Unexpected SCEV type!"); 631 } 632 633 namespace { 634 635 /// LoopCompare - Compare loops by PickMostRelevantLoop. 636 class LoopCompare { 637 DominatorTree &DT; 638 public: 639 explicit LoopCompare(DominatorTree &dt) : DT(dt) {} 640 641 bool operator()(std::pair<const Loop *, const SCEV *> LHS, 642 std::pair<const Loop *, const SCEV *> RHS) const { 643 // Keep pointer operands sorted at the end. 644 if (LHS.second->getType()->isPointerTy() != 645 RHS.second->getType()->isPointerTy()) 646 return LHS.second->getType()->isPointerTy(); 647 648 // Compare loops with PickMostRelevantLoop. 649 if (LHS.first != RHS.first) 650 return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first; 651 652 // If one operand is a non-constant negative and the other is not, 653 // put the non-constant negative on the right so that a sub can 654 // be used instead of a negate and add. 655 if (LHS.second->isNonConstantNegative()) { 656 if (!RHS.second->isNonConstantNegative()) 657 return false; 658 } else if (RHS.second->isNonConstantNegative()) 659 return true; 660 661 // Otherwise they are equivalent according to this comparison. 662 return false; 663 } 664 }; 665 666 } 667 668 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) { 669 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 670 671 // Collect all the add operands in a loop, along with their associated loops. 672 // Iterate in reverse so that constants are emitted last, all else equal, and 673 // so that pointer operands are inserted first, which the code below relies on 674 // to form more involved GEPs. 675 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 676 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()), 677 E(S->op_begin()); I != E; ++I) 678 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 679 680 // Sort by loop. Use a stable sort so that constants follow non-constants and 681 // pointer operands precede non-pointer operands. 682 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT)); 683 684 // Emit instructions to add all the operands. Hoist as much as possible 685 // out of loops, and form meaningful getelementptrs where possible. 686 Value *Sum = nullptr; 687 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator 688 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) { 689 const Loop *CurLoop = I->first; 690 const SCEV *Op = I->second; 691 if (!Sum) { 692 // This is the first operand. Just expand it. 693 Sum = expand(Op); 694 ++I; 695 } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) { 696 // The running sum expression is a pointer. Try to form a getelementptr 697 // at this level with that as the base. 698 SmallVector<const SCEV *, 4> NewOps; 699 for (; I != E && I->first == CurLoop; ++I) { 700 // If the operand is SCEVUnknown and not instructions, peek through 701 // it, to enable more of it to be folded into the GEP. 702 const SCEV *X = I->second; 703 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X)) 704 if (!isa<Instruction>(U->getValue())) 705 X = SE.getSCEV(U->getValue()); 706 NewOps.push_back(X); 707 } 708 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum); 709 } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) { 710 // The running sum is an integer, and there's a pointer at this level. 711 // Try to form a getelementptr. If the running sum is instructions, 712 // use a SCEVUnknown to avoid re-analyzing them. 713 SmallVector<const SCEV *, 4> NewOps; 714 NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) : 715 SE.getSCEV(Sum)); 716 for (++I; I != E && I->first == CurLoop; ++I) 717 NewOps.push_back(I->second); 718 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op)); 719 } else if (Op->isNonConstantNegative()) { 720 // Instead of doing a negate and add, just do a subtract. 721 Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty); 722 Sum = InsertNoopCastOfTo(Sum, Ty); 723 Sum = InsertBinop(Instruction::Sub, Sum, W); 724 ++I; 725 } else { 726 // A simple add. 727 Value *W = expandCodeFor(Op, Ty); 728 Sum = InsertNoopCastOfTo(Sum, Ty); 729 // Canonicalize a constant to the RHS. 730 if (isa<Constant>(Sum)) std::swap(Sum, W); 731 Sum = InsertBinop(Instruction::Add, Sum, W); 732 ++I; 733 } 734 } 735 736 return Sum; 737 } 738 739 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) { 740 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 741 742 // Collect all the mul operands in a loop, along with their associated loops. 743 // Iterate in reverse so that constants are emitted last, all else equal. 744 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 745 for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()), 746 E(S->op_begin()); I != E; ++I) 747 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 748 749 // Sort by loop. Use a stable sort so that constants follow non-constants. 750 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT)); 751 752 // Emit instructions to mul all the operands. Hoist as much as possible 753 // out of loops. 754 Value *Prod = nullptr; 755 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator 756 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ++I) { 757 const SCEV *Op = I->second; 758 if (!Prod) { 759 // This is the first operand. Just expand it. 760 Prod = expand(Op); 761 } else if (Op->isAllOnesValue()) { 762 // Instead of doing a multiply by negative one, just do a negate. 763 Prod = InsertNoopCastOfTo(Prod, Ty); 764 Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod); 765 } else { 766 // A simple mul. 767 Value *W = expandCodeFor(Op, Ty); 768 Prod = InsertNoopCastOfTo(Prod, Ty); 769 // Canonicalize a constant to the RHS. 770 if (isa<Constant>(Prod)) std::swap(Prod, W); 771 const APInt *RHS; 772 if (match(W, m_Power2(RHS))) { 773 // Canonicalize Prod*(1<<C) to Prod<<C. 774 assert(!Ty->isVectorTy() && "vector types are not SCEVable"); 775 Prod = InsertBinop(Instruction::Shl, Prod, 776 ConstantInt::get(Ty, RHS->logBase2())); 777 } else { 778 Prod = InsertBinop(Instruction::Mul, Prod, W); 779 } 780 } 781 } 782 783 return Prod; 784 } 785 786 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) { 787 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 788 789 Value *LHS = expandCodeFor(S->getLHS(), Ty); 790 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) { 791 const APInt &RHS = SC->getValue()->getValue(); 792 if (RHS.isPowerOf2()) 793 return InsertBinop(Instruction::LShr, LHS, 794 ConstantInt::get(Ty, RHS.logBase2())); 795 } 796 797 Value *RHS = expandCodeFor(S->getRHS(), Ty); 798 return InsertBinop(Instruction::UDiv, LHS, RHS); 799 } 800 801 /// Move parts of Base into Rest to leave Base with the minimal 802 /// expression that provides a pointer operand suitable for a 803 /// GEP expansion. 804 static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest, 805 ScalarEvolution &SE) { 806 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) { 807 Base = A->getStart(); 808 Rest = SE.getAddExpr(Rest, 809 SE.getAddRecExpr(SE.getConstant(A->getType(), 0), 810 A->getStepRecurrence(SE), 811 A->getLoop(), 812 A->getNoWrapFlags(SCEV::FlagNW))); 813 } 814 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) { 815 Base = A->getOperand(A->getNumOperands()-1); 816 SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end()); 817 NewAddOps.back() = Rest; 818 Rest = SE.getAddExpr(NewAddOps); 819 ExposePointerBase(Base, Rest, SE); 820 } 821 } 822 823 /// Determine if this is a well-behaved chain of instructions leading back to 824 /// the PHI. If so, it may be reused by expanded expressions. 825 bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV, 826 const Loop *L) { 827 if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) || 828 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV))) 829 return false; 830 // If any of the operands don't dominate the insert position, bail. 831 // Addrec operands are always loop-invariant, so this can only happen 832 // if there are instructions which haven't been hoisted. 833 if (L == IVIncInsertLoop) { 834 for (User::op_iterator OI = IncV->op_begin()+1, 835 OE = IncV->op_end(); OI != OE; ++OI) 836 if (Instruction *OInst = dyn_cast<Instruction>(OI)) 837 if (!SE.DT->dominates(OInst, IVIncInsertPos)) 838 return false; 839 } 840 // Advance to the next instruction. 841 IncV = dyn_cast<Instruction>(IncV->getOperand(0)); 842 if (!IncV) 843 return false; 844 845 if (IncV->mayHaveSideEffects()) 846 return false; 847 848 if (IncV != PN) 849 return true; 850 851 return isNormalAddRecExprPHI(PN, IncV, L); 852 } 853 854 /// getIVIncOperand returns an induction variable increment's induction 855 /// variable operand. 856 /// 857 /// If allowScale is set, any type of GEP is allowed as long as the nonIV 858 /// operands dominate InsertPos. 859 /// 860 /// If allowScale is not set, ensure that a GEP increment conforms to one of the 861 /// simple patterns generated by getAddRecExprPHILiterally and 862 /// expandAddtoGEP. If the pattern isn't recognized, return NULL. 863 Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV, 864 Instruction *InsertPos, 865 bool allowScale) { 866 if (IncV == InsertPos) 867 return nullptr; 868 869 switch (IncV->getOpcode()) { 870 default: 871 return nullptr; 872 // Check for a simple Add/Sub or GEP of a loop invariant step. 873 case Instruction::Add: 874 case Instruction::Sub: { 875 Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1)); 876 if (!OInst || SE.DT->dominates(OInst, InsertPos)) 877 return dyn_cast<Instruction>(IncV->getOperand(0)); 878 return nullptr; 879 } 880 case Instruction::BitCast: 881 return dyn_cast<Instruction>(IncV->getOperand(0)); 882 case Instruction::GetElementPtr: 883 for (Instruction::op_iterator I = IncV->op_begin()+1, E = IncV->op_end(); 884 I != E; ++I) { 885 if (isa<Constant>(*I)) 886 continue; 887 if (Instruction *OInst = dyn_cast<Instruction>(*I)) { 888 if (!SE.DT->dominates(OInst, InsertPos)) 889 return nullptr; 890 } 891 if (allowScale) { 892 // allow any kind of GEP as long as it can be hoisted. 893 continue; 894 } 895 // This must be a pointer addition of constants (pretty), which is already 896 // handled, or some number of address-size elements (ugly). Ugly geps 897 // have 2 operands. i1* is used by the expander to represent an 898 // address-size element. 899 if (IncV->getNumOperands() != 2) 900 return nullptr; 901 unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace(); 902 if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS) 903 && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS)) 904 return nullptr; 905 break; 906 } 907 return dyn_cast<Instruction>(IncV->getOperand(0)); 908 } 909 } 910 911 /// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make 912 /// it available to other uses in this loop. Recursively hoist any operands, 913 /// until we reach a value that dominates InsertPos. 914 bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) { 915 if (SE.DT->dominates(IncV, InsertPos)) 916 return true; 917 918 // InsertPos must itself dominate IncV so that IncV's new position satisfies 919 // its existing users. 920 if (isa<PHINode>(InsertPos) 921 || !SE.DT->dominates(InsertPos->getParent(), IncV->getParent())) 922 return false; 923 924 // Check that the chain of IV operands leading back to Phi can be hoisted. 925 SmallVector<Instruction*, 4> IVIncs; 926 for(;;) { 927 Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true); 928 if (!Oper) 929 return false; 930 // IncV is safe to hoist. 931 IVIncs.push_back(IncV); 932 IncV = Oper; 933 if (SE.DT->dominates(IncV, InsertPos)) 934 break; 935 } 936 for (SmallVectorImpl<Instruction*>::reverse_iterator I = IVIncs.rbegin(), 937 E = IVIncs.rend(); I != E; ++I) { 938 (*I)->moveBefore(InsertPos); 939 } 940 return true; 941 } 942 943 /// Determine if this cyclic phi is in a form that would have been generated by 944 /// LSR. We don't care if the phi was actually expanded in this pass, as long 945 /// as it is in a low-cost form, for example, no implied multiplication. This 946 /// should match any patterns generated by getAddRecExprPHILiterally and 947 /// expandAddtoGEP. 948 bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV, 949 const Loop *L) { 950 for(Instruction *IVOper = IncV; 951 (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(), 952 /*allowScale=*/false));) { 953 if (IVOper == PN) 954 return true; 955 } 956 return false; 957 } 958 959 /// expandIVInc - Expand an IV increment at Builder's current InsertPos. 960 /// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may 961 /// need to materialize IV increments elsewhere to handle difficult situations. 962 Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L, 963 Type *ExpandTy, Type *IntTy, 964 bool useSubtract) { 965 Value *IncV; 966 // If the PHI is a pointer, use a GEP, otherwise use an add or sub. 967 if (ExpandTy->isPointerTy()) { 968 PointerType *GEPPtrTy = cast<PointerType>(ExpandTy); 969 // If the step isn't constant, don't use an implicitly scaled GEP, because 970 // that would require a multiply inside the loop. 971 if (!isa<ConstantInt>(StepV)) 972 GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()), 973 GEPPtrTy->getAddressSpace()); 974 const SCEV *const StepArray[1] = { SE.getSCEV(StepV) }; 975 IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN); 976 if (IncV->getType() != PN->getType()) { 977 IncV = Builder.CreateBitCast(IncV, PN->getType()); 978 rememberInstruction(IncV); 979 } 980 } else { 981 IncV = useSubtract ? 982 Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") : 983 Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next"); 984 rememberInstruction(IncV); 985 } 986 return IncV; 987 } 988 989 /// \brief Hoist the addrec instruction chain rooted in the loop phi above the 990 /// position. This routine assumes that this is possible (has been checked). 991 static void hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist, 992 Instruction *Pos, PHINode *LoopPhi) { 993 do { 994 if (DT->dominates(InstToHoist, Pos)) 995 break; 996 // Make sure the increment is where we want it. But don't move it 997 // down past a potential existing post-inc user. 998 InstToHoist->moveBefore(Pos); 999 Pos = InstToHoist; 1000 InstToHoist = cast<Instruction>(InstToHoist->getOperand(0)); 1001 } while (InstToHoist != LoopPhi); 1002 } 1003 1004 /// \brief Check whether we can cheaply express the requested SCEV in terms of 1005 /// the available PHI SCEV by truncation and/or invertion of the step. 1006 static bool canBeCheaplyTransformed(ScalarEvolution &SE, 1007 const SCEVAddRecExpr *Phi, 1008 const SCEVAddRecExpr *Requested, 1009 bool &InvertStep) { 1010 Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType()); 1011 Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType()); 1012 1013 if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth()) 1014 return false; 1015 1016 // Try truncate it if necessary. 1017 Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy)); 1018 if (!Phi) 1019 return false; 1020 1021 // Check whether truncation will help. 1022 if (Phi == Requested) { 1023 InvertStep = false; 1024 return true; 1025 } 1026 1027 // Check whether inverting will help: {R,+,-1} == R - {0,+,1}. 1028 if (SE.getAddExpr(Requested->getStart(), 1029 SE.getNegativeSCEV(Requested)) == Phi) { 1030 InvertStep = true; 1031 return true; 1032 } 1033 1034 return false; 1035 } 1036 1037 static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) { 1038 if (!isa<IntegerType>(AR->getType())) 1039 return false; 1040 1041 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth(); 1042 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2); 1043 const SCEV *Step = AR->getStepRecurrence(SE); 1044 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getSignExtendExpr(Step, WideTy), 1045 SE.getSignExtendExpr(AR, WideTy)); 1046 const SCEV *ExtendAfterOp = 1047 SE.getSignExtendExpr(SE.getAddExpr(AR, Step), WideTy); 1048 return ExtendAfterOp == OpAfterExtend; 1049 } 1050 1051 static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) { 1052 if (!isa<IntegerType>(AR->getType())) 1053 return false; 1054 1055 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth(); 1056 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2); 1057 const SCEV *Step = AR->getStepRecurrence(SE); 1058 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getZeroExtendExpr(Step, WideTy), 1059 SE.getZeroExtendExpr(AR, WideTy)); 1060 const SCEV *ExtendAfterOp = 1061 SE.getZeroExtendExpr(SE.getAddExpr(AR, Step), WideTy); 1062 return ExtendAfterOp == OpAfterExtend; 1063 } 1064 1065 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand 1066 /// the base addrec, which is the addrec without any non-loop-dominating 1067 /// values, and return the PHI. 1068 PHINode * 1069 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized, 1070 const Loop *L, 1071 Type *ExpandTy, 1072 Type *IntTy, 1073 Type *&TruncTy, 1074 bool &InvertStep) { 1075 assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position"); 1076 1077 // Reuse a previously-inserted PHI, if present. 1078 BasicBlock *LatchBlock = L->getLoopLatch(); 1079 if (LatchBlock) { 1080 PHINode *AddRecPhiMatch = nullptr; 1081 Instruction *IncV = nullptr; 1082 TruncTy = nullptr; 1083 InvertStep = false; 1084 1085 // Only try partially matching scevs that need truncation and/or 1086 // step-inversion if we know this loop is outside the current loop. 1087 bool TryNonMatchingSCEV = IVIncInsertLoop && 1088 SE.DT->properlyDominates(LatchBlock, IVIncInsertLoop->getHeader()); 1089 1090 for (BasicBlock::iterator I = L->getHeader()->begin(); 1091 PHINode *PN = dyn_cast<PHINode>(I); ++I) { 1092 if (!SE.isSCEVable(PN->getType())) 1093 continue; 1094 1095 const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(PN)); 1096 if (!PhiSCEV) 1097 continue; 1098 1099 bool IsMatchingSCEV = PhiSCEV == Normalized; 1100 // We only handle truncation and inversion of phi recurrences for the 1101 // expanded expression if the expanded expression's loop dominates the 1102 // loop we insert to. Check now, so we can bail out early. 1103 if (!IsMatchingSCEV && !TryNonMatchingSCEV) 1104 continue; 1105 1106 Instruction *TempIncV = 1107 cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock)); 1108 1109 // Check whether we can reuse this PHI node. 1110 if (LSRMode) { 1111 if (!isExpandedAddRecExprPHI(PN, TempIncV, L)) 1112 continue; 1113 if (L == IVIncInsertLoop && !hoistIVInc(TempIncV, IVIncInsertPos)) 1114 continue; 1115 } else { 1116 if (!isNormalAddRecExprPHI(PN, TempIncV, L)) 1117 continue; 1118 } 1119 1120 // Stop if we have found an exact match SCEV. 1121 if (IsMatchingSCEV) { 1122 IncV = TempIncV; 1123 TruncTy = nullptr; 1124 InvertStep = false; 1125 AddRecPhiMatch = PN; 1126 break; 1127 } 1128 1129 // Try whether the phi can be translated into the requested form 1130 // (truncated and/or offset by a constant). 1131 if ((!TruncTy || InvertStep) && 1132 canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) { 1133 // Record the phi node. But don't stop we might find an exact match 1134 // later. 1135 AddRecPhiMatch = PN; 1136 IncV = TempIncV; 1137 TruncTy = SE.getEffectiveSCEVType(Normalized->getType()); 1138 } 1139 } 1140 1141 if (AddRecPhiMatch) { 1142 // Potentially, move the increment. We have made sure in 1143 // isExpandedAddRecExprPHI or hoistIVInc that this is possible. 1144 if (L == IVIncInsertLoop) 1145 hoistBeforePos(SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch); 1146 1147 // Ok, the add recurrence looks usable. 1148 // Remember this PHI, even in post-inc mode. 1149 InsertedValues.insert(AddRecPhiMatch); 1150 // Remember the increment. 1151 rememberInstruction(IncV); 1152 return AddRecPhiMatch; 1153 } 1154 } 1155 1156 // Save the original insertion point so we can restore it when we're done. 1157 BuilderType::InsertPointGuard Guard(Builder); 1158 1159 // Another AddRec may need to be recursively expanded below. For example, if 1160 // this AddRec is quadratic, the StepV may itself be an AddRec in this 1161 // loop. Remove this loop from the PostIncLoops set before expanding such 1162 // AddRecs. Otherwise, we cannot find a valid position for the step 1163 // (i.e. StepV can never dominate its loop header). Ideally, we could do 1164 // SavedIncLoops.swap(PostIncLoops), but we generally have a single element, 1165 // so it's not worth implementing SmallPtrSet::swap. 1166 PostIncLoopSet SavedPostIncLoops = PostIncLoops; 1167 PostIncLoops.clear(); 1168 1169 // Expand code for the start value. 1170 Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy, 1171 L->getHeader()->begin()); 1172 1173 // StartV must be hoisted into L's preheader to dominate the new phi. 1174 assert(!isa<Instruction>(StartV) || 1175 SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(), 1176 L->getHeader())); 1177 1178 // Expand code for the step value. Do this before creating the PHI so that PHI 1179 // reuse code doesn't see an incomplete PHI. 1180 const SCEV *Step = Normalized->getStepRecurrence(SE); 1181 // If the stride is negative, insert a sub instead of an add for the increment 1182 // (unless it's a constant, because subtracts of constants are canonicalized 1183 // to adds). 1184 bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative(); 1185 if (useSubtract) 1186 Step = SE.getNegativeSCEV(Step); 1187 // Expand the step somewhere that dominates the loop header. 1188 Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin()); 1189 1190 // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if 1191 // we actually do emit an addition. It does not apply if we emit a 1192 // subtraction. 1193 bool IncrementIsNUW = !useSubtract && IsIncrementNUW(SE, Normalized); 1194 bool IncrementIsNSW = !useSubtract && IsIncrementNSW(SE, Normalized); 1195 1196 // Create the PHI. 1197 BasicBlock *Header = L->getHeader(); 1198 Builder.SetInsertPoint(Header, Header->begin()); 1199 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1200 PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE), 1201 Twine(IVName) + ".iv"); 1202 rememberInstruction(PN); 1203 1204 // Create the step instructions and populate the PHI. 1205 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1206 BasicBlock *Pred = *HPI; 1207 1208 // Add a start value. 1209 if (!L->contains(Pred)) { 1210 PN->addIncoming(StartV, Pred); 1211 continue; 1212 } 1213 1214 // Create a step value and add it to the PHI. 1215 // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the 1216 // instructions at IVIncInsertPos. 1217 Instruction *InsertPos = L == IVIncInsertLoop ? 1218 IVIncInsertPos : Pred->getTerminator(); 1219 Builder.SetInsertPoint(InsertPos); 1220 Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); 1221 1222 if (isa<OverflowingBinaryOperator>(IncV)) { 1223 if (IncrementIsNUW) 1224 cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap(); 1225 if (IncrementIsNSW) 1226 cast<BinaryOperator>(IncV)->setHasNoSignedWrap(); 1227 } 1228 PN->addIncoming(IncV, Pred); 1229 } 1230 1231 // After expanding subexpressions, restore the PostIncLoops set so the caller 1232 // can ensure that IVIncrement dominates the current uses. 1233 PostIncLoops = SavedPostIncLoops; 1234 1235 // Remember this PHI, even in post-inc mode. 1236 InsertedValues.insert(PN); 1237 1238 return PN; 1239 } 1240 1241 Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) { 1242 Type *STy = S->getType(); 1243 Type *IntTy = SE.getEffectiveSCEVType(STy); 1244 const Loop *L = S->getLoop(); 1245 1246 // Determine a normalized form of this expression, which is the expression 1247 // before any post-inc adjustment is made. 1248 const SCEVAddRecExpr *Normalized = S; 1249 if (PostIncLoops.count(L)) { 1250 PostIncLoopSet Loops; 1251 Loops.insert(L); 1252 Normalized = 1253 cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, nullptr, 1254 nullptr, Loops, SE, *SE.DT)); 1255 } 1256 1257 // Strip off any non-loop-dominating component from the addrec start. 1258 const SCEV *Start = Normalized->getStart(); 1259 const SCEV *PostLoopOffset = nullptr; 1260 if (!SE.properlyDominates(Start, L->getHeader())) { 1261 PostLoopOffset = Start; 1262 Start = SE.getConstant(Normalized->getType(), 0); 1263 Normalized = cast<SCEVAddRecExpr>( 1264 SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE), 1265 Normalized->getLoop(), 1266 Normalized->getNoWrapFlags(SCEV::FlagNW))); 1267 } 1268 1269 // Strip off any non-loop-dominating component from the addrec step. 1270 const SCEV *Step = Normalized->getStepRecurrence(SE); 1271 const SCEV *PostLoopScale = nullptr; 1272 if (!SE.dominates(Step, L->getHeader())) { 1273 PostLoopScale = Step; 1274 Step = SE.getConstant(Normalized->getType(), 1); 1275 Normalized = 1276 cast<SCEVAddRecExpr>(SE.getAddRecExpr( 1277 Start, Step, Normalized->getLoop(), 1278 Normalized->getNoWrapFlags(SCEV::FlagNW))); 1279 } 1280 1281 // Expand the core addrec. If we need post-loop scaling, force it to 1282 // expand to an integer type to avoid the need for additional casting. 1283 Type *ExpandTy = PostLoopScale ? IntTy : STy; 1284 // In some cases, we decide to reuse an existing phi node but need to truncate 1285 // it and/or invert the step. 1286 Type *TruncTy = nullptr; 1287 bool InvertStep = false; 1288 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy, 1289 TruncTy, InvertStep); 1290 1291 // Accommodate post-inc mode, if necessary. 1292 Value *Result; 1293 if (!PostIncLoops.count(L)) 1294 Result = PN; 1295 else { 1296 // In PostInc mode, use the post-incremented value. 1297 BasicBlock *LatchBlock = L->getLoopLatch(); 1298 assert(LatchBlock && "PostInc mode requires a unique loop latch!"); 1299 Result = PN->getIncomingValueForBlock(LatchBlock); 1300 1301 // For an expansion to use the postinc form, the client must call 1302 // expandCodeFor with an InsertPoint that is either outside the PostIncLoop 1303 // or dominated by IVIncInsertPos. 1304 if (isa<Instruction>(Result) 1305 && !SE.DT->dominates(cast<Instruction>(Result), 1306 Builder.GetInsertPoint())) { 1307 // The induction variable's postinc expansion does not dominate this use. 1308 // IVUsers tries to prevent this case, so it is rare. However, it can 1309 // happen when an IVUser outside the loop is not dominated by the latch 1310 // block. Adjusting IVIncInsertPos before expansion begins cannot handle 1311 // all cases. Consider a phi outide whose operand is replaced during 1312 // expansion with the value of the postinc user. Without fundamentally 1313 // changing the way postinc users are tracked, the only remedy is 1314 // inserting an extra IV increment. StepV might fold into PostLoopOffset, 1315 // but hopefully expandCodeFor handles that. 1316 bool useSubtract = 1317 !ExpandTy->isPointerTy() && Step->isNonConstantNegative(); 1318 if (useSubtract) 1319 Step = SE.getNegativeSCEV(Step); 1320 Value *StepV; 1321 { 1322 // Expand the step somewhere that dominates the loop header. 1323 BuilderType::InsertPointGuard Guard(Builder); 1324 StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin()); 1325 } 1326 Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); 1327 } 1328 } 1329 1330 // We have decided to reuse an induction variable of a dominating loop. Apply 1331 // truncation and/or invertion of the step. 1332 if (TruncTy) { 1333 Type *ResTy = Result->getType(); 1334 // Normalize the result type. 1335 if (ResTy != SE.getEffectiveSCEVType(ResTy)) 1336 Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy)); 1337 // Truncate the result. 1338 if (TruncTy != Result->getType()) { 1339 Result = Builder.CreateTrunc(Result, TruncTy); 1340 rememberInstruction(Result); 1341 } 1342 // Invert the result. 1343 if (InvertStep) { 1344 Result = Builder.CreateSub(expandCodeFor(Normalized->getStart(), TruncTy), 1345 Result); 1346 rememberInstruction(Result); 1347 } 1348 } 1349 1350 // Re-apply any non-loop-dominating scale. 1351 if (PostLoopScale) { 1352 assert(S->isAffine() && "Can't linearly scale non-affine recurrences."); 1353 Result = InsertNoopCastOfTo(Result, IntTy); 1354 Result = Builder.CreateMul(Result, 1355 expandCodeFor(PostLoopScale, IntTy)); 1356 rememberInstruction(Result); 1357 } 1358 1359 // Re-apply any non-loop-dominating offset. 1360 if (PostLoopOffset) { 1361 if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) { 1362 const SCEV *const OffsetArray[1] = { PostLoopOffset }; 1363 Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result); 1364 } else { 1365 Result = InsertNoopCastOfTo(Result, IntTy); 1366 Result = Builder.CreateAdd(Result, 1367 expandCodeFor(PostLoopOffset, IntTy)); 1368 rememberInstruction(Result); 1369 } 1370 } 1371 1372 return Result; 1373 } 1374 1375 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { 1376 if (!CanonicalMode) return expandAddRecExprLiterally(S); 1377 1378 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1379 const Loop *L = S->getLoop(); 1380 1381 // First check for an existing canonical IV in a suitable type. 1382 PHINode *CanonicalIV = nullptr; 1383 if (PHINode *PN = L->getCanonicalInductionVariable()) 1384 if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty)) 1385 CanonicalIV = PN; 1386 1387 // Rewrite an AddRec in terms of the canonical induction variable, if 1388 // its type is more narrow. 1389 if (CanonicalIV && 1390 SE.getTypeSizeInBits(CanonicalIV->getType()) > 1391 SE.getTypeSizeInBits(Ty)) { 1392 SmallVector<const SCEV *, 4> NewOps(S->getNumOperands()); 1393 for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i) 1394 NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType()); 1395 Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(), 1396 S->getNoWrapFlags(SCEV::FlagNW))); 1397 BasicBlock::iterator NewInsertPt = 1398 std::next(BasicBlock::iterator(cast<Instruction>(V))); 1399 BuilderType::InsertPointGuard Guard(Builder); 1400 while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt) || 1401 isa<LandingPadInst>(NewInsertPt)) 1402 ++NewInsertPt; 1403 V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr, 1404 NewInsertPt); 1405 return V; 1406 } 1407 1408 // {X,+,F} --> X + {0,+,F} 1409 if (!S->getStart()->isZero()) { 1410 SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end()); 1411 NewOps[0] = SE.getConstant(Ty, 0); 1412 const SCEV *Rest = SE.getAddRecExpr(NewOps, L, 1413 S->getNoWrapFlags(SCEV::FlagNW)); 1414 1415 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the 1416 // comments on expandAddToGEP for details. 1417 const SCEV *Base = S->getStart(); 1418 const SCEV *RestArray[1] = { Rest }; 1419 // Dig into the expression to find the pointer base for a GEP. 1420 ExposePointerBase(Base, RestArray[0], SE); 1421 // If we found a pointer, expand the AddRec with a GEP. 1422 if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) { 1423 // Make sure the Base isn't something exotic, such as a multiplied 1424 // or divided pointer value. In those cases, the result type isn't 1425 // actually a pointer type. 1426 if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) { 1427 Value *StartV = expand(Base); 1428 assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!"); 1429 return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV); 1430 } 1431 } 1432 1433 // Just do a normal add. Pre-expand the operands to suppress folding. 1434 return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())), 1435 SE.getUnknown(expand(Rest)))); 1436 } 1437 1438 // If we don't yet have a canonical IV, create one. 1439 if (!CanonicalIV) { 1440 // Create and insert the PHI node for the induction variable in the 1441 // specified loop. 1442 BasicBlock *Header = L->getHeader(); 1443 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1444 CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar", 1445 Header->begin()); 1446 rememberInstruction(CanonicalIV); 1447 1448 SmallSet<BasicBlock *, 4> PredSeen; 1449 Constant *One = ConstantInt::get(Ty, 1); 1450 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1451 BasicBlock *HP = *HPI; 1452 if (!PredSeen.insert(HP).second) { 1453 // There must be an incoming value for each predecessor, even the 1454 // duplicates! 1455 CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP); 1456 continue; 1457 } 1458 1459 if (L->contains(HP)) { 1460 // Insert a unit add instruction right before the terminator 1461 // corresponding to the back-edge. 1462 Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One, 1463 "indvar.next", 1464 HP->getTerminator()); 1465 Add->setDebugLoc(HP->getTerminator()->getDebugLoc()); 1466 rememberInstruction(Add); 1467 CanonicalIV->addIncoming(Add, HP); 1468 } else { 1469 CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP); 1470 } 1471 } 1472 } 1473 1474 // {0,+,1} --> Insert a canonical induction variable into the loop! 1475 if (S->isAffine() && S->getOperand(1)->isOne()) { 1476 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) && 1477 "IVs with types different from the canonical IV should " 1478 "already have been handled!"); 1479 return CanonicalIV; 1480 } 1481 1482 // {0,+,F} --> {0,+,1} * F 1483 1484 // If this is a simple linear addrec, emit it now as a special case. 1485 if (S->isAffine()) // {0,+,F} --> i*F 1486 return 1487 expand(SE.getTruncateOrNoop( 1488 SE.getMulExpr(SE.getUnknown(CanonicalIV), 1489 SE.getNoopOrAnyExtend(S->getOperand(1), 1490 CanonicalIV->getType())), 1491 Ty)); 1492 1493 // If this is a chain of recurrences, turn it into a closed form, using the 1494 // folders, then expandCodeFor the closed form. This allows the folders to 1495 // simplify the expression without having to build a bunch of special code 1496 // into this folder. 1497 const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV. 1498 1499 // Promote S up to the canonical IV type, if the cast is foldable. 1500 const SCEV *NewS = S; 1501 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType()); 1502 if (isa<SCEVAddRecExpr>(Ext)) 1503 NewS = Ext; 1504 1505 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE); 1506 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n"; 1507 1508 // Truncate the result down to the original type, if needed. 1509 const SCEV *T = SE.getTruncateOrNoop(V, Ty); 1510 return expand(T); 1511 } 1512 1513 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) { 1514 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1515 Value *V = expandCodeFor(S->getOperand(), 1516 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1517 Value *I = Builder.CreateTrunc(V, Ty); 1518 rememberInstruction(I); 1519 return I; 1520 } 1521 1522 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) { 1523 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1524 Value *V = expandCodeFor(S->getOperand(), 1525 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1526 Value *I = Builder.CreateZExt(V, Ty); 1527 rememberInstruction(I); 1528 return I; 1529 } 1530 1531 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) { 1532 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1533 Value *V = expandCodeFor(S->getOperand(), 1534 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1535 Value *I = Builder.CreateSExt(V, Ty); 1536 rememberInstruction(I); 1537 return I; 1538 } 1539 1540 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) { 1541 Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1542 Type *Ty = LHS->getType(); 1543 for (int i = S->getNumOperands()-2; i >= 0; --i) { 1544 // In the case of mixed integer and pointer types, do the 1545 // rest of the comparisons as integer. 1546 if (S->getOperand(i)->getType() != Ty) { 1547 Ty = SE.getEffectiveSCEVType(Ty); 1548 LHS = InsertNoopCastOfTo(LHS, Ty); 1549 } 1550 Value *RHS = expandCodeFor(S->getOperand(i), Ty); 1551 Value *ICmp = Builder.CreateICmpSGT(LHS, RHS); 1552 rememberInstruction(ICmp); 1553 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax"); 1554 rememberInstruction(Sel); 1555 LHS = Sel; 1556 } 1557 // In the case of mixed integer and pointer types, cast the 1558 // final result back to the pointer type. 1559 if (LHS->getType() != S->getType()) 1560 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1561 return LHS; 1562 } 1563 1564 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) { 1565 Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1566 Type *Ty = LHS->getType(); 1567 for (int i = S->getNumOperands()-2; i >= 0; --i) { 1568 // In the case of mixed integer and pointer types, do the 1569 // rest of the comparisons as integer. 1570 if (S->getOperand(i)->getType() != Ty) { 1571 Ty = SE.getEffectiveSCEVType(Ty); 1572 LHS = InsertNoopCastOfTo(LHS, Ty); 1573 } 1574 Value *RHS = expandCodeFor(S->getOperand(i), Ty); 1575 Value *ICmp = Builder.CreateICmpUGT(LHS, RHS); 1576 rememberInstruction(ICmp); 1577 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax"); 1578 rememberInstruction(Sel); 1579 LHS = Sel; 1580 } 1581 // In the case of mixed integer and pointer types, cast the 1582 // final result back to the pointer type. 1583 if (LHS->getType() != S->getType()) 1584 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1585 return LHS; 1586 } 1587 1588 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty, 1589 Instruction *IP) { 1590 Builder.SetInsertPoint(IP->getParent(), IP); 1591 return expandCodeFor(SH, Ty); 1592 } 1593 1594 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) { 1595 // Expand the code for this SCEV. 1596 Value *V = expand(SH); 1597 if (Ty) { 1598 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) && 1599 "non-trivial casts should be done with the SCEVs directly!"); 1600 V = InsertNoopCastOfTo(V, Ty); 1601 } 1602 return V; 1603 } 1604 1605 Value *SCEVExpander::expand(const SCEV *S) { 1606 // Compute an insertion point for this SCEV object. Hoist the instructions 1607 // as far out in the loop nest as possible. 1608 Instruction *InsertPt = Builder.GetInsertPoint(); 1609 for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ; 1610 L = L->getParentLoop()) 1611 if (SE.isLoopInvariant(S, L)) { 1612 if (!L) break; 1613 if (BasicBlock *Preheader = L->getLoopPreheader()) 1614 InsertPt = Preheader->getTerminator(); 1615 else { 1616 // LSR sets the insertion point for AddRec start/step values to the 1617 // block start to simplify value reuse, even though it's an invalid 1618 // position. SCEVExpander must correct for this in all cases. 1619 InsertPt = L->getHeader()->getFirstInsertionPt(); 1620 } 1621 } else { 1622 // If the SCEV is computable at this level, insert it into the header 1623 // after the PHIs (and after any other instructions that we've inserted 1624 // there) so that it is guaranteed to dominate any user inside the loop. 1625 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L)) 1626 InsertPt = L->getHeader()->getFirstInsertionPt(); 1627 while (InsertPt != Builder.GetInsertPoint() 1628 && (isInsertedInstruction(InsertPt) 1629 || isa<DbgInfoIntrinsic>(InsertPt))) { 1630 InsertPt = std::next(BasicBlock::iterator(InsertPt)); 1631 } 1632 break; 1633 } 1634 1635 // Check to see if we already expanded this here. 1636 std::map<std::pair<const SCEV *, Instruction *>, TrackingVH<Value> >::iterator 1637 I = InsertedExpressions.find(std::make_pair(S, InsertPt)); 1638 if (I != InsertedExpressions.end()) 1639 return I->second; 1640 1641 BuilderType::InsertPointGuard Guard(Builder); 1642 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt); 1643 1644 // Expand the expression into instructions. 1645 Value *V = visit(S); 1646 1647 // Remember the expanded value for this SCEV at this location. 1648 // 1649 // This is independent of PostIncLoops. The mapped value simply materializes 1650 // the expression at this insertion point. If the mapped value happened to be 1651 // a postinc expansion, it could be reused by a non-postinc user, but only if 1652 // its insertion point was already at the head of the loop. 1653 InsertedExpressions[std::make_pair(S, InsertPt)] = V; 1654 return V; 1655 } 1656 1657 void SCEVExpander::rememberInstruction(Value *I) { 1658 if (!PostIncLoops.empty()) 1659 InsertedPostIncValues.insert(I); 1660 else 1661 InsertedValues.insert(I); 1662 } 1663 1664 /// getOrInsertCanonicalInductionVariable - This method returns the 1665 /// canonical induction variable of the specified type for the specified 1666 /// loop (inserting one if there is none). A canonical induction variable 1667 /// starts at zero and steps by one on each iteration. 1668 PHINode * 1669 SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L, 1670 Type *Ty) { 1671 assert(Ty->isIntegerTy() && "Can only insert integer induction variables!"); 1672 1673 // Build a SCEV for {0,+,1}<L>. 1674 // Conservatively use FlagAnyWrap for now. 1675 const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0), 1676 SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap); 1677 1678 // Emit code for it. 1679 BuilderType::InsertPointGuard Guard(Builder); 1680 PHINode *V = cast<PHINode>(expandCodeFor(H, nullptr, 1681 L->getHeader()->begin())); 1682 1683 return V; 1684 } 1685 1686 /// replaceCongruentIVs - Check for congruent phis in this loop header and 1687 /// replace them with their most canonical representative. Return the number of 1688 /// phis eliminated. 1689 /// 1690 /// This does not depend on any SCEVExpander state but should be used in 1691 /// the same context that SCEVExpander is used. 1692 unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT, 1693 SmallVectorImpl<WeakVH> &DeadInsts, 1694 const TargetTransformInfo *TTI) { 1695 // Find integer phis in order of increasing width. 1696 SmallVector<PHINode*, 8> Phis; 1697 for (BasicBlock::iterator I = L->getHeader()->begin(); 1698 PHINode *Phi = dyn_cast<PHINode>(I); ++I) { 1699 Phis.push_back(Phi); 1700 } 1701 if (TTI) 1702 std::sort(Phis.begin(), Phis.end(), [](Value *LHS, Value *RHS) { 1703 // Put pointers at the back and make sure pointer < pointer = false. 1704 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 1705 return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy(); 1706 return RHS->getType()->getPrimitiveSizeInBits() < 1707 LHS->getType()->getPrimitiveSizeInBits(); 1708 }); 1709 1710 unsigned NumElim = 0; 1711 DenseMap<const SCEV *, PHINode *> ExprToIVMap; 1712 // Process phis from wide to narrow. Map wide phis to their truncation 1713 // so narrow phis can reuse them. 1714 for (SmallVectorImpl<PHINode*>::const_iterator PIter = Phis.begin(), 1715 PEnd = Phis.end(); PIter != PEnd; ++PIter) { 1716 PHINode *Phi = *PIter; 1717 1718 // Fold constant phis. They may be congruent to other constant phis and 1719 // would confuse the logic below that expects proper IVs. 1720 if (Value *V = SimplifyInstruction(Phi, DL, SE.TLI, SE.DT, SE.AC)) { 1721 Phi->replaceAllUsesWith(V); 1722 DeadInsts.emplace_back(Phi); 1723 ++NumElim; 1724 DEBUG_WITH_TYPE(DebugType, dbgs() 1725 << "INDVARS: Eliminated constant iv: " << *Phi << '\n'); 1726 continue; 1727 } 1728 1729 if (!SE.isSCEVable(Phi->getType())) 1730 continue; 1731 1732 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)]; 1733 if (!OrigPhiRef) { 1734 OrigPhiRef = Phi; 1735 if (Phi->getType()->isIntegerTy() && TTI 1736 && TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) { 1737 // This phi can be freely truncated to the narrowest phi type. Map the 1738 // truncated expression to it so it will be reused for narrow types. 1739 const SCEV *TruncExpr = 1740 SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType()); 1741 ExprToIVMap[TruncExpr] = Phi; 1742 } 1743 continue; 1744 } 1745 1746 // Replacing a pointer phi with an integer phi or vice-versa doesn't make 1747 // sense. 1748 if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy()) 1749 continue; 1750 1751 if (BasicBlock *LatchBlock = L->getLoopLatch()) { 1752 Instruction *OrigInc = 1753 cast<Instruction>(OrigPhiRef->getIncomingValueForBlock(LatchBlock)); 1754 Instruction *IsomorphicInc = 1755 cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock)); 1756 1757 // If this phi has the same width but is more canonical, replace the 1758 // original with it. As part of the "more canonical" determination, 1759 // respect a prior decision to use an IV chain. 1760 if (OrigPhiRef->getType() == Phi->getType() 1761 && !(ChainedPhis.count(Phi) 1762 || isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) 1763 && (ChainedPhis.count(Phi) 1764 || isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) { 1765 std::swap(OrigPhiRef, Phi); 1766 std::swap(OrigInc, IsomorphicInc); 1767 } 1768 // Replacing the congruent phi is sufficient because acyclic redundancy 1769 // elimination, CSE/GVN, should handle the rest. However, once SCEV proves 1770 // that a phi is congruent, it's often the head of an IV user cycle that 1771 // is isomorphic with the original phi. It's worth eagerly cleaning up the 1772 // common case of a single IV increment so that DeleteDeadPHIs can remove 1773 // cycles that had postinc uses. 1774 const SCEV *TruncExpr = SE.getTruncateOrNoop(SE.getSCEV(OrigInc), 1775 IsomorphicInc->getType()); 1776 if (OrigInc != IsomorphicInc 1777 && TruncExpr == SE.getSCEV(IsomorphicInc) 1778 && ((isa<PHINode>(OrigInc) && isa<PHINode>(IsomorphicInc)) 1779 || hoistIVInc(OrigInc, IsomorphicInc))) { 1780 DEBUG_WITH_TYPE(DebugType, dbgs() 1781 << "INDVARS: Eliminated congruent iv.inc: " 1782 << *IsomorphicInc << '\n'); 1783 Value *NewInc = OrigInc; 1784 if (OrigInc->getType() != IsomorphicInc->getType()) { 1785 Instruction *IP = nullptr; 1786 if (PHINode *PN = dyn_cast<PHINode>(OrigInc)) 1787 IP = PN->getParent()->getFirstInsertionPt(); 1788 else 1789 IP = OrigInc->getNextNode(); 1790 1791 IRBuilder<> Builder(IP); 1792 Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc()); 1793 NewInc = Builder. 1794 CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName); 1795 } 1796 IsomorphicInc->replaceAllUsesWith(NewInc); 1797 DeadInsts.emplace_back(IsomorphicInc); 1798 } 1799 } 1800 DEBUG_WITH_TYPE(DebugType, dbgs() 1801 << "INDVARS: Eliminated congruent iv: " << *Phi << '\n'); 1802 ++NumElim; 1803 Value *NewIV = OrigPhiRef; 1804 if (OrigPhiRef->getType() != Phi->getType()) { 1805 IRBuilder<> Builder(L->getHeader()->getFirstInsertionPt()); 1806 Builder.SetCurrentDebugLocation(Phi->getDebugLoc()); 1807 NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName); 1808 } 1809 Phi->replaceAllUsesWith(NewIV); 1810 DeadInsts.emplace_back(Phi); 1811 } 1812 return NumElim; 1813 } 1814 1815 bool SCEVExpander::isHighCostExpansionHelper( 1816 const SCEV *S, Loop *L, SmallPtrSetImpl<const SCEV *> &Processed) { 1817 1818 // Zero/One operand expressions 1819 switch (S->getSCEVType()) { 1820 case scUnknown: 1821 case scConstant: 1822 return false; 1823 case scTruncate: 1824 return isHighCostExpansionHelper(cast<SCEVTruncateExpr>(S)->getOperand(), L, 1825 Processed); 1826 case scZeroExtend: 1827 return isHighCostExpansionHelper(cast<SCEVZeroExtendExpr>(S)->getOperand(), 1828 L, Processed); 1829 case scSignExtend: 1830 return isHighCostExpansionHelper(cast<SCEVSignExtendExpr>(S)->getOperand(), 1831 L, Processed); 1832 } 1833 1834 if (!Processed.insert(S).second) 1835 return false; 1836 1837 if (auto *UDivExpr = dyn_cast<SCEVUDivExpr>(S)) { 1838 // If the divisor is a power of two and the SCEV type fits in a native 1839 // integer, consider the divison cheap irrespective of whether it occurs in 1840 // the user code since it can be lowered into a right shift. 1841 if (auto *SC = dyn_cast<SCEVConstant>(UDivExpr->getRHS())) 1842 if (SC->getValue()->getValue().isPowerOf2()) { 1843 const DataLayout &DL = 1844 L->getHeader()->getParent()->getParent()->getDataLayout(); 1845 unsigned Width = cast<IntegerType>(UDivExpr->getType())->getBitWidth(); 1846 return DL.isIllegalInteger(Width); 1847 } 1848 1849 // UDivExpr is very likely a UDiv that ScalarEvolution's HowFarToZero or 1850 // HowManyLessThans produced to compute a precise expression, rather than a 1851 // UDiv from the user's code. If we can't find a UDiv in the code with some 1852 // simple searching, assume the former consider UDivExpr expensive to 1853 // compute. 1854 BasicBlock *ExitingBB = L->getExitingBlock(); 1855 if (!ExitingBB) 1856 return true; 1857 1858 BranchInst *ExitingBI = dyn_cast<BranchInst>(ExitingBB->getTerminator()); 1859 if (!ExitingBI || !ExitingBI->isConditional()) 1860 return true; 1861 1862 ICmpInst *OrigCond = dyn_cast<ICmpInst>(ExitingBI->getCondition()); 1863 if (!OrigCond) 1864 return true; 1865 1866 const SCEV *RHS = SE.getSCEV(OrigCond->getOperand(1)); 1867 RHS = SE.getMinusSCEV(RHS, SE.getConstant(RHS->getType(), 1)); 1868 if (RHS != S) { 1869 const SCEV *LHS = SE.getSCEV(OrigCond->getOperand(0)); 1870 LHS = SE.getMinusSCEV(LHS, SE.getConstant(LHS->getType(), 1)); 1871 if (LHS != S) 1872 return true; 1873 } 1874 } 1875 1876 // HowManyLessThans uses a Max expression whenever the loop is not guarded by 1877 // the exit condition. 1878 if (isa<SCEVSMaxExpr>(S) || isa<SCEVUMaxExpr>(S)) 1879 return true; 1880 1881 // Recurse past nary expressions, which commonly occur in the 1882 // BackedgeTakenCount. They may already exist in program code, and if not, 1883 // they are not too expensive rematerialize. 1884 if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(S)) { 1885 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 1886 I != E; ++I) { 1887 if (isHighCostExpansionHelper(*I, L, Processed)) 1888 return true; 1889 } 1890 } 1891 1892 // If we haven't recognized an expensive SCEV pattern, assume it's an 1893 // expression produced by program code. 1894 return false; 1895 } 1896 1897 namespace { 1898 // Search for a SCEV subexpression that is not safe to expand. Any expression 1899 // that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely 1900 // UDiv expressions. We don't know if the UDiv is derived from an IR divide 1901 // instruction, but the important thing is that we prove the denominator is 1902 // nonzero before expansion. 1903 // 1904 // IVUsers already checks that IV-derived expressions are safe. So this check is 1905 // only needed when the expression includes some subexpression that is not IV 1906 // derived. 1907 // 1908 // Currently, we only allow division by a nonzero constant here. If this is 1909 // inadequate, we could easily allow division by SCEVUnknown by using 1910 // ValueTracking to check isKnownNonZero(). 1911 // 1912 // We cannot generally expand recurrences unless the step dominates the loop 1913 // header. The expander handles the special case of affine recurrences by 1914 // scaling the recurrence outside the loop, but this technique isn't generally 1915 // applicable. Expanding a nested recurrence outside a loop requires computing 1916 // binomial coefficients. This could be done, but the recurrence has to be in a 1917 // perfectly reduced form, which can't be guaranteed. 1918 struct SCEVFindUnsafe { 1919 ScalarEvolution &SE; 1920 bool IsUnsafe; 1921 1922 SCEVFindUnsafe(ScalarEvolution &se): SE(se), IsUnsafe(false) {} 1923 1924 bool follow(const SCEV *S) { 1925 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 1926 const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS()); 1927 if (!SC || SC->getValue()->isZero()) { 1928 IsUnsafe = true; 1929 return false; 1930 } 1931 } 1932 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 1933 const SCEV *Step = AR->getStepRecurrence(SE); 1934 if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) { 1935 IsUnsafe = true; 1936 return false; 1937 } 1938 } 1939 return true; 1940 } 1941 bool isDone() const { return IsUnsafe; } 1942 }; 1943 } 1944 1945 namespace llvm { 1946 bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE) { 1947 SCEVFindUnsafe Search(SE); 1948 visitAll(S, Search); 1949 return !Search.IsUnsafe; 1950 } 1951 } 1952