1 //=== ScalarEvolutionExpanderTest.cpp - ScalarEvolutionExpander unit tests ===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 10 #include "llvm/ADT/SmallVector.h" 11 #include "llvm/Analysis/AssumptionCache.h" 12 #include "llvm/Analysis/LoopInfo.h" 13 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 14 #include "llvm/Analysis/TargetLibraryInfo.h" 15 #include "llvm/AsmParser/Parser.h" 16 #include "llvm/IR/Constants.h" 17 #include "llvm/IR/Dominators.h" 18 #include "llvm/IR/GlobalVariable.h" 19 #include "llvm/IR/IRBuilder.h" 20 #include "llvm/IR/InstIterator.h" 21 #include "llvm/IR/LLVMContext.h" 22 #include "llvm/IR/Module.h" 23 #include "llvm/IR/PatternMatch.h" 24 #include "llvm/IR/Verifier.h" 25 #include "llvm/Support/SourceMgr.h" 26 #include "gtest/gtest.h" 27 28 namespace llvm { 29 30 using namespace PatternMatch; 31 32 // We use this fixture to ensure that we clean up ScalarEvolution before 33 // deleting the PassManager. 34 class ScalarEvolutionExpanderTest : public testing::Test { 35 protected: 36 LLVMContext Context; 37 Module M; 38 TargetLibraryInfoImpl TLII; 39 TargetLibraryInfo TLI; 40 41 std::unique_ptr<AssumptionCache> AC; 42 std::unique_ptr<DominatorTree> DT; 43 std::unique_ptr<LoopInfo> LI; 44 45 ScalarEvolutionExpanderTest() : M("", Context), TLII(), TLI(TLII) {} 46 47 ScalarEvolution buildSE(Function &F) { 48 AC.reset(new AssumptionCache(F)); 49 DT.reset(new DominatorTree(F)); 50 LI.reset(new LoopInfo(*DT)); 51 return ScalarEvolution(F, TLI, *AC, *DT, *LI); 52 } 53 54 void runWithSE( 55 Module &M, StringRef FuncName, 56 function_ref<void(Function &F, LoopInfo &LI, ScalarEvolution &SE)> Test) { 57 auto *F = M.getFunction(FuncName); 58 ASSERT_NE(F, nullptr) << "Could not find " << FuncName; 59 ScalarEvolution SE = buildSE(*F); 60 Test(*F, *LI, SE); 61 } 62 }; 63 64 static Instruction &GetInstByName(Function &F, StringRef Name) { 65 for (auto &I : instructions(F)) 66 if (I.getName() == Name) 67 return I; 68 llvm_unreachable("Could not find instructions!"); 69 } 70 71 TEST_F(ScalarEvolutionExpanderTest, ExpandPtrTypeSCEV) { 72 // It is to test the fix for PR30213. It exercises the branch in scev 73 // expansion when the value in ValueOffsetPair is a ptr and the offset 74 // is not divisible by the elem type size of value. 75 auto *I8Ty = Type::getInt8Ty(Context); 76 auto *I8PtrTy = Type::getInt8PtrTy(Context); 77 auto *I32Ty = Type::getInt32Ty(Context); 78 auto *I32PtrTy = Type::getInt32PtrTy(Context); 79 FunctionType *FTy = 80 FunctionType::get(Type::getVoidTy(Context), std::vector<Type *>(), false); 81 Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M); 82 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F); 83 BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F); 84 BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F); 85 BranchInst::Create(LoopBB, EntryBB); 86 ReturnInst::Create(Context, nullptr, ExitBB); 87 88 // loop: ; preds = %loop, %entry 89 // %alloca = alloca i32 90 // %gep0 = getelementptr i32, i32* %alloca, i32 1 91 // %bitcast1 = bitcast i32* %gep0 to i8* 92 // %gep1 = getelementptr i8, i8* %bitcast1, i32 1 93 // %gep2 = getelementptr i8, i8* undef, i32 1 94 // %cmp = icmp ult i8* undef, %bitcast1 95 // %select = select i1 %cmp, i8* %gep1, i8* %gep2 96 // %bitcast2 = bitcast i8* %select to i32* 97 // br i1 undef, label %loop, label %exit 98 99 const DataLayout &DL = F->getParent()->getDataLayout(); 100 BranchInst *Br = BranchInst::Create( 101 LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)), LoopBB); 102 AllocaInst *Alloca = 103 new AllocaInst(I32Ty, DL.getAllocaAddrSpace(), "alloca", Br); 104 ConstantInt *Ci32 = ConstantInt::get(Context, APInt(32, 1)); 105 GetElementPtrInst *Gep0 = 106 GetElementPtrInst::Create(I32Ty, Alloca, Ci32, "gep0", Br); 107 CastInst *CastA = 108 CastInst::CreateBitOrPointerCast(Gep0, I8PtrTy, "bitcast1", Br); 109 GetElementPtrInst *Gep1 = 110 GetElementPtrInst::Create(I8Ty, CastA, Ci32, "gep1", Br); 111 GetElementPtrInst *Gep2 = GetElementPtrInst::Create( 112 I8Ty, UndefValue::get(I8PtrTy), Ci32, "gep2", Br); 113 CmpInst *Cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT, 114 UndefValue::get(I8PtrTy), CastA, "cmp", Br); 115 SelectInst *Sel = SelectInst::Create(Cmp, Gep1, Gep2, "select", Br); 116 CastInst *CastB = 117 CastInst::CreateBitOrPointerCast(Sel, I32PtrTy, "bitcast2", Br); 118 119 ScalarEvolution SE = buildSE(*F); 120 auto *S = SE.getSCEV(CastB); 121 SCEVExpander Exp(SE, M.getDataLayout(), "expander"); 122 Value *V = 123 Exp.expandCodeFor(cast<SCEVAddExpr>(S)->getOperand(1), nullptr, Br); 124 125 // Expect the expansion code contains: 126 // %0 = bitcast i32* %bitcast2 to i8* 127 // %uglygep = getelementptr i8, i8* %0, i64 -1 128 // %1 = bitcast i8* %uglygep to i32* 129 EXPECT_TRUE(isa<BitCastInst>(V)); 130 Instruction *Gep = cast<Instruction>(V)->getPrevNode(); 131 EXPECT_TRUE(isa<GetElementPtrInst>(Gep)); 132 EXPECT_TRUE(isa<ConstantInt>(Gep->getOperand(1))); 133 EXPECT_EQ(cast<ConstantInt>(Gep->getOperand(1))->getSExtValue(), -1); 134 EXPECT_TRUE(isa<BitCastInst>(Gep->getPrevNode())); 135 } 136 137 // Make sure that SCEV doesn't introduce illegal ptrtoint/inttoptr instructions 138 TEST_F(ScalarEvolutionExpanderTest, SCEVZeroExtendExprNonIntegral) { 139 /* 140 * Create the following code: 141 * func(i64 addrspace(10)* %arg) 142 * top: 143 * br label %L.ph 144 * L.ph: 145 * br label %L 146 * L: 147 * %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ] 148 * %add = add i64 %phi2, 1 149 * br i1 undef, label %post, label %L2 150 * post: 151 * %gepbase = getelementptr i64 addrspace(10)* %arg, i64 1 152 * #= %gep = getelementptr i64 addrspace(10)* %gepbase, i64 %add =# 153 * ret void 154 * 155 * We will create the appropriate SCEV expression for %gep and expand it, 156 * then check that no inttoptr/ptrtoint instructions got inserted. 157 */ 158 159 // Create a module with non-integral pointers in it's datalayout 160 Module NIM("nonintegral", Context); 161 std::string DataLayout = M.getDataLayoutStr(); 162 if (!DataLayout.empty()) 163 DataLayout += "-"; 164 DataLayout += "ni:10"; 165 NIM.setDataLayout(DataLayout); 166 167 Type *T_int1 = Type::getInt1Ty(Context); 168 Type *T_int64 = Type::getInt64Ty(Context); 169 Type *T_pint64 = T_int64->getPointerTo(10); 170 171 FunctionType *FTy = 172 FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false); 173 Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM); 174 175 Argument *Arg = &*F->arg_begin(); 176 177 BasicBlock *Top = BasicBlock::Create(Context, "top", F); 178 BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F); 179 BasicBlock *L = BasicBlock::Create(Context, "L", F); 180 BasicBlock *Post = BasicBlock::Create(Context, "post", F); 181 182 IRBuilder<> Builder(Top); 183 Builder.CreateBr(LPh); 184 185 Builder.SetInsertPoint(LPh); 186 Builder.CreateBr(L); 187 188 Builder.SetInsertPoint(L); 189 PHINode *Phi = Builder.CreatePHI(T_int64, 2); 190 Value *Add = Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add"); 191 Builder.CreateCondBr(UndefValue::get(T_int1), L, Post); 192 Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh); 193 Phi->addIncoming(Add, L); 194 195 Builder.SetInsertPoint(Post); 196 Value *GepBase = 197 Builder.CreateGEP(T_int64, Arg, ConstantInt::get(T_int64, 1)); 198 Instruction *Ret = Builder.CreateRetVoid(); 199 200 ScalarEvolution SE = buildSE(*F); 201 auto *AddRec = 202 SE.getAddRecExpr(SE.getUnknown(GepBase), SE.getConstant(T_int64, 1), 203 LI->getLoopFor(L), SCEV::FlagNUW); 204 205 SCEVExpander Exp(SE, NIM.getDataLayout(), "expander"); 206 Exp.disableCanonicalMode(); 207 Exp.expandCodeFor(AddRec, T_pint64, Ret); 208 209 // Make sure none of the instructions inserted were inttoptr/ptrtoint. 210 // The verifier will check this. 211 EXPECT_FALSE(verifyFunction(*F, &errs())); 212 } 213 214 // Check that we can correctly identify the points at which the SCEV of the 215 // AddRec can be expanded. 216 TEST_F(ScalarEvolutionExpanderTest, SCEVExpanderIsSafeToExpandAt) { 217 /* 218 * Create the following code: 219 * func(i64 addrspace(10)* %arg) 220 * top: 221 * br label %L.ph 222 * L.ph: 223 * br label %L 224 * L: 225 * %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ] 226 * %add = add i64 %phi2, 1 227 * %cond = icmp slt i64 %add, 1000; then becomes 2000. 228 * br i1 %cond, label %post, label %L2 229 * post: 230 * ret void 231 * 232 */ 233 234 // Create a module with non-integral pointers in it's datalayout 235 Module NIM("nonintegral", Context); 236 std::string DataLayout = M.getDataLayoutStr(); 237 if (!DataLayout.empty()) 238 DataLayout += "-"; 239 DataLayout += "ni:10"; 240 NIM.setDataLayout(DataLayout); 241 242 Type *T_int64 = Type::getInt64Ty(Context); 243 Type *T_pint64 = T_int64->getPointerTo(10); 244 245 FunctionType *FTy = 246 FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false); 247 Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM); 248 249 BasicBlock *Top = BasicBlock::Create(Context, "top", F); 250 BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F); 251 BasicBlock *L = BasicBlock::Create(Context, "L", F); 252 BasicBlock *Post = BasicBlock::Create(Context, "post", F); 253 254 IRBuilder<> Builder(Top); 255 Builder.CreateBr(LPh); 256 257 Builder.SetInsertPoint(LPh); 258 Builder.CreateBr(L); 259 260 Builder.SetInsertPoint(L); 261 PHINode *Phi = Builder.CreatePHI(T_int64, 2); 262 auto *Add = cast<Instruction>( 263 Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add")); 264 auto *Limit = ConstantInt::get(T_int64, 1000); 265 auto *Cond = cast<Instruction>( 266 Builder.CreateICmp(ICmpInst::ICMP_SLT, Add, Limit, "cond")); 267 Builder.CreateCondBr(Cond, L, Post); 268 Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh); 269 Phi->addIncoming(Add, L); 270 271 Builder.SetInsertPoint(Post); 272 Instruction *Ret = Builder.CreateRetVoid(); 273 274 ScalarEvolution SE = buildSE(*F); 275 const SCEV *S = SE.getSCEV(Phi); 276 EXPECT_TRUE(isa<SCEVAddRecExpr>(S)); 277 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 278 EXPECT_TRUE(AR->isAffine()); 279 EXPECT_FALSE(isSafeToExpandAt(AR, Top->getTerminator(), SE)); 280 EXPECT_FALSE(isSafeToExpandAt(AR, LPh->getTerminator(), SE)); 281 EXPECT_TRUE(isSafeToExpandAt(AR, L->getTerminator(), SE)); 282 EXPECT_TRUE(isSafeToExpandAt(AR, Post->getTerminator(), SE)); 283 284 EXPECT_TRUE(LI->getLoopFor(L)->isLCSSAForm(*DT)); 285 SCEVExpander Exp(SE, M.getDataLayout(), "expander"); 286 Exp.expandCodeFor(SE.getSCEV(Add), nullptr, Ret); 287 EXPECT_TRUE(LI->getLoopFor(L)->isLCSSAForm(*DT)); 288 } 289 290 // Check that SCEV expander does not use the nuw instruction 291 // for expansion. 292 TEST_F(ScalarEvolutionExpanderTest, SCEVExpanderNUW) { 293 /* 294 * Create the following code: 295 * func(i64 %a) 296 * entry: 297 * br false, label %exit, label %body 298 * body: 299 * %s1 = add i64 %a, -1 300 * br label %exit 301 * exit: 302 * %s = add nuw i64 %a, -1 303 * ret %s 304 */ 305 306 // Create a module. 307 Module M("SCEVExpanderNUW", Context); 308 309 Type *T_int64 = Type::getInt64Ty(Context); 310 311 FunctionType *FTy = 312 FunctionType::get(Type::getVoidTy(Context), {T_int64}, false); 313 Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M); 314 Argument *Arg = &*F->arg_begin(); 315 ConstantInt *C = ConstantInt::get(Context, APInt(64, -1)); 316 317 BasicBlock *Entry = BasicBlock::Create(Context, "entry", F); 318 BasicBlock *Body = BasicBlock::Create(Context, "body", F); 319 BasicBlock *Exit = BasicBlock::Create(Context, "exit", F); 320 321 IRBuilder<> Builder(Entry); 322 ConstantInt *Cond = ConstantInt::get(Context, APInt(1, 0)); 323 Builder.CreateCondBr(Cond, Exit, Body); 324 325 Builder.SetInsertPoint(Body); 326 auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add")); 327 Builder.CreateBr(Exit); 328 329 Builder.SetInsertPoint(Exit); 330 auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add")); 331 S2->setHasNoUnsignedWrap(true); 332 auto *R = cast<Instruction>(Builder.CreateRetVoid()); 333 334 ScalarEvolution SE = buildSE(*F); 335 const SCEV *S = SE.getSCEV(S1); 336 EXPECT_TRUE(isa<SCEVAddExpr>(S)); 337 SCEVExpander Exp(SE, M.getDataLayout(), "expander"); 338 auto *I = cast<Instruction>(Exp.expandCodeFor(S, nullptr, R)); 339 EXPECT_FALSE(I->hasNoUnsignedWrap()); 340 } 341 342 // Check that SCEV expander does not use the nsw instruction 343 // for expansion. 344 TEST_F(ScalarEvolutionExpanderTest, SCEVExpanderNSW) { 345 /* 346 * Create the following code: 347 * func(i64 %a) 348 * entry: 349 * br false, label %exit, label %body 350 * body: 351 * %s1 = add i64 %a, -1 352 * br label %exit 353 * exit: 354 * %s = add nsw i64 %a, -1 355 * ret %s 356 */ 357 358 // Create a module. 359 Module M("SCEVExpanderNSW", Context); 360 361 Type *T_int64 = Type::getInt64Ty(Context); 362 363 FunctionType *FTy = 364 FunctionType::get(Type::getVoidTy(Context), {T_int64}, false); 365 Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M); 366 Argument *Arg = &*F->arg_begin(); 367 ConstantInt *C = ConstantInt::get(Context, APInt(64, -1)); 368 369 BasicBlock *Entry = BasicBlock::Create(Context, "entry", F); 370 BasicBlock *Body = BasicBlock::Create(Context, "body", F); 371 BasicBlock *Exit = BasicBlock::Create(Context, "exit", F); 372 373 IRBuilder<> Builder(Entry); 374 ConstantInt *Cond = ConstantInt::get(Context, APInt(1, 0)); 375 Builder.CreateCondBr(Cond, Exit, Body); 376 377 Builder.SetInsertPoint(Body); 378 auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add")); 379 Builder.CreateBr(Exit); 380 381 Builder.SetInsertPoint(Exit); 382 auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add")); 383 S2->setHasNoSignedWrap(true); 384 auto *R = cast<Instruction>(Builder.CreateRetVoid()); 385 386 ScalarEvolution SE = buildSE(*F); 387 const SCEV *S = SE.getSCEV(S1); 388 EXPECT_TRUE(isa<SCEVAddExpr>(S)); 389 SCEVExpander Exp(SE, M.getDataLayout(), "expander"); 390 auto *I = cast<Instruction>(Exp.expandCodeFor(S, nullptr, R)); 391 EXPECT_FALSE(I->hasNoSignedWrap()); 392 } 393 394 // Check that SCEV does not save the SCEV -> V 395 // mapping of SCEV differ from V in NUW flag. 396 TEST_F(ScalarEvolutionExpanderTest, SCEVCacheNUW) { 397 /* 398 * Create the following code: 399 * func(i64 %a) 400 * entry: 401 * %s1 = add i64 %a, -1 402 * %s2 = add nuw i64 %a, -1 403 * br label %exit 404 * exit: 405 * ret %s 406 */ 407 408 // Create a module. 409 Module M("SCEVCacheNUW", Context); 410 411 Type *T_int64 = Type::getInt64Ty(Context); 412 413 FunctionType *FTy = 414 FunctionType::get(Type::getVoidTy(Context), {T_int64}, false); 415 Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M); 416 Argument *Arg = &*F->arg_begin(); 417 ConstantInt *C = ConstantInt::get(Context, APInt(64, -1)); 418 419 BasicBlock *Entry = BasicBlock::Create(Context, "entry", F); 420 BasicBlock *Exit = BasicBlock::Create(Context, "exit", F); 421 422 IRBuilder<> Builder(Entry); 423 auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add")); 424 auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add")); 425 S2->setHasNoUnsignedWrap(true); 426 Builder.CreateBr(Exit); 427 428 Builder.SetInsertPoint(Exit); 429 auto *R = cast<Instruction>(Builder.CreateRetVoid()); 430 431 ScalarEvolution SE = buildSE(*F); 432 // Get S2 first to move it to cache. 433 const SCEV *SC2 = SE.getSCEV(S2); 434 EXPECT_TRUE(isa<SCEVAddExpr>(SC2)); 435 // Now get S1. 436 const SCEV *SC1 = SE.getSCEV(S1); 437 EXPECT_TRUE(isa<SCEVAddExpr>(SC1)); 438 // Expand for S1, it should use S1 not S2 in spite S2 439 // first in the cache. 440 SCEVExpander Exp(SE, M.getDataLayout(), "expander"); 441 auto *I = cast<Instruction>(Exp.expandCodeFor(SC1, nullptr, R)); 442 EXPECT_FALSE(I->hasNoUnsignedWrap()); 443 } 444 445 // Check that SCEV does not save the SCEV -> V 446 // mapping of SCEV differ from V in NSW flag. 447 TEST_F(ScalarEvolutionExpanderTest, SCEVCacheNSW) { 448 /* 449 * Create the following code: 450 * func(i64 %a) 451 * entry: 452 * %s1 = add i64 %a, -1 453 * %s2 = add nsw i64 %a, -1 454 * br label %exit 455 * exit: 456 * ret %s 457 */ 458 459 // Create a module. 460 Module M("SCEVCacheNUW", Context); 461 462 Type *T_int64 = Type::getInt64Ty(Context); 463 464 FunctionType *FTy = 465 FunctionType::get(Type::getVoidTy(Context), {T_int64}, false); 466 Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M); 467 Argument *Arg = &*F->arg_begin(); 468 ConstantInt *C = ConstantInt::get(Context, APInt(64, -1)); 469 470 BasicBlock *Entry = BasicBlock::Create(Context, "entry", F); 471 BasicBlock *Exit = BasicBlock::Create(Context, "exit", F); 472 473 IRBuilder<> Builder(Entry); 474 auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add")); 475 auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add")); 476 S2->setHasNoSignedWrap(true); 477 Builder.CreateBr(Exit); 478 479 Builder.SetInsertPoint(Exit); 480 auto *R = cast<Instruction>(Builder.CreateRetVoid()); 481 482 ScalarEvolution SE = buildSE(*F); 483 // Get S2 first to move it to cache. 484 const SCEV *SC2 = SE.getSCEV(S2); 485 EXPECT_TRUE(isa<SCEVAddExpr>(SC2)); 486 // Now get S1. 487 const SCEV *SC1 = SE.getSCEV(S1); 488 EXPECT_TRUE(isa<SCEVAddExpr>(SC1)); 489 // Expand for S1, it should use S1 not S2 in spite S2 490 // first in the cache. 491 SCEVExpander Exp(SE, M.getDataLayout(), "expander"); 492 auto *I = cast<Instruction>(Exp.expandCodeFor(SC1, nullptr, R)); 493 EXPECT_FALSE(I->hasNoSignedWrap()); 494 } 495 496 TEST_F(ScalarEvolutionExpanderTest, SCEVExpandInsertCanonicalIV) { 497 LLVMContext C; 498 SMDiagnostic Err; 499 500 // Expand the addrec produced by GetAddRec into a loop without a canonical IV. 501 // SCEVExpander will insert one. 502 auto TestNoCanonicalIV = 503 [&](std::function<const SCEV *(ScalarEvolution & SE, Loop * L)> 504 GetAddRec) { 505 std::unique_ptr<Module> M = parseAssemblyString( 506 "define i32 @test(i32 %limit) { " 507 "entry: " 508 " br label %loop " 509 "loop: " 510 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] " 511 " %i.inc = add nsw i32 %i, 1 " 512 " %cont = icmp slt i32 %i.inc, %limit " 513 " br i1 %cont, label %loop, label %exit " 514 "exit: " 515 " ret i32 %i.inc " 516 "}", 517 Err, C); 518 519 assert(M && "Could not parse module?"); 520 assert(!verifyModule(*M) && "Must have been well formed!"); 521 522 runWithSE( 523 *M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 524 auto &I = GetInstByName(F, "i"); 525 auto *Loop = LI.getLoopFor(I.getParent()); 526 EXPECT_FALSE(Loop->getCanonicalInductionVariable()); 527 528 auto *AR = GetAddRec(SE, Loop); 529 unsigned ExpectedCanonicalIVWidth = 530 SE.getTypeSizeInBits(AR->getType()); 531 532 SCEVExpander Exp(SE, M->getDataLayout(), "expander"); 533 auto *InsertAt = I.getNextNode(); 534 Exp.expandCodeFor(AR, nullptr, InsertAt); 535 PHINode *CanonicalIV = Loop->getCanonicalInductionVariable(); 536 unsigned CanonicalIVBitWidth = 537 cast<IntegerType>(CanonicalIV->getType())->getBitWidth(); 538 EXPECT_EQ(CanonicalIVBitWidth, ExpectedCanonicalIVWidth); 539 }); 540 }; 541 542 // Expand the addrec produced by GetAddRec into a loop with a canonical IV 543 // which is narrower than addrec type. 544 // SCEVExpander will insert a canonical IV of a wider type to expand the 545 // addrec. 546 auto TestNarrowCanonicalIV = [&](std::function<const SCEV *( 547 ScalarEvolution & SE, Loop * L)> 548 GetAddRec) { 549 std::unique_ptr<Module> M = parseAssemblyString( 550 "define i32 @test(i32 %limit) { " 551 "entry: " 552 " br label %loop " 553 "loop: " 554 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] " 555 " %canonical.iv = phi i8 [ 0, %entry ], [ %canonical.iv.inc, %loop ] " 556 " %i.inc = add nsw i32 %i, 1 " 557 " %canonical.iv.inc = add i8 %canonical.iv, 1 " 558 " %cont = icmp slt i32 %i.inc, %limit " 559 " br i1 %cont, label %loop, label %exit " 560 "exit: " 561 " ret i32 %i.inc " 562 "}", 563 Err, C); 564 565 assert(M && "Could not parse module?"); 566 assert(!verifyModule(*M) && "Must have been well formed!"); 567 568 runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 569 auto &I = GetInstByName(F, "i"); 570 571 auto *LoopHeaderBB = I.getParent(); 572 auto *Loop = LI.getLoopFor(LoopHeaderBB); 573 PHINode *CanonicalIV = Loop->getCanonicalInductionVariable(); 574 EXPECT_EQ(CanonicalIV, &GetInstByName(F, "canonical.iv")); 575 576 auto *AR = GetAddRec(SE, Loop); 577 578 unsigned ExpectedCanonicalIVWidth = SE.getTypeSizeInBits(AR->getType()); 579 unsigned CanonicalIVBitWidth = 580 cast<IntegerType>(CanonicalIV->getType())->getBitWidth(); 581 EXPECT_LT(CanonicalIVBitWidth, ExpectedCanonicalIVWidth); 582 583 SCEVExpander Exp(SE, M->getDataLayout(), "expander"); 584 auto *InsertAt = I.getNextNode(); 585 Exp.expandCodeFor(AR, nullptr, InsertAt); 586 587 // Loop over all of the PHI nodes, looking for the new canonical indvar. 588 PHINode *NewCanonicalIV = nullptr; 589 for (BasicBlock::iterator i = LoopHeaderBB->begin(); isa<PHINode>(i); 590 ++i) { 591 PHINode *PN = cast<PHINode>(i); 592 if (PN == &I || PN == CanonicalIV) 593 continue; 594 // We expect that the only PHI added is the new canonical IV 595 EXPECT_FALSE(NewCanonicalIV); 596 NewCanonicalIV = PN; 597 } 598 599 // Check that NewCanonicalIV is a canonical IV, i.e {0,+,1} 600 BasicBlock *Incoming = nullptr, *Backedge = nullptr; 601 EXPECT_TRUE(Loop->getIncomingAndBackEdge(Incoming, Backedge)); 602 auto *Start = NewCanonicalIV->getIncomingValueForBlock(Incoming); 603 EXPECT_TRUE(isa<ConstantInt>(Start)); 604 EXPECT_TRUE(dyn_cast<ConstantInt>(Start)->isZero()); 605 auto *Next = NewCanonicalIV->getIncomingValueForBlock(Backedge); 606 EXPECT_TRUE(isa<BinaryOperator>(Next)); 607 auto *NextBinOp = dyn_cast<BinaryOperator>(Next); 608 EXPECT_EQ(NextBinOp->getOpcode(), Instruction::Add); 609 EXPECT_EQ(NextBinOp->getOperand(0), NewCanonicalIV); 610 auto *Step = NextBinOp->getOperand(1); 611 EXPECT_TRUE(isa<ConstantInt>(Step)); 612 EXPECT_TRUE(dyn_cast<ConstantInt>(Step)->isOne()); 613 614 unsigned NewCanonicalIVBitWidth = 615 cast<IntegerType>(NewCanonicalIV->getType())->getBitWidth(); 616 EXPECT_EQ(NewCanonicalIVBitWidth, ExpectedCanonicalIVWidth); 617 }); 618 }; 619 620 // Expand the addrec produced by GetAddRec into a loop with a canonical IV 621 // of addrec width. 622 // To expand the addrec SCEVExpander should use the existing canonical IV. 623 auto TestMatchingCanonicalIV = 624 [&](std::function<const SCEV *(ScalarEvolution & SE, Loop * L)> GetAddRec, 625 unsigned ARBitWidth) { 626 auto ARBitWidthTypeStr = "i" + std::to_string(ARBitWidth); 627 std::unique_ptr<Module> M = parseAssemblyString( 628 "define i32 @test(i32 %limit) { " 629 "entry: " 630 " br label %loop " 631 "loop: " 632 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] " 633 " %canonical.iv = phi " + 634 ARBitWidthTypeStr + 635 " [ 0, %entry ], [ %canonical.iv.inc, %loop ] " 636 " %i.inc = add nsw i32 %i, 1 " 637 " %canonical.iv.inc = add " + 638 ARBitWidthTypeStr + 639 " %canonical.iv, 1 " 640 " %cont = icmp slt i32 %i.inc, %limit " 641 " br i1 %cont, label %loop, label %exit " 642 "exit: " 643 " ret i32 %i.inc " 644 "}", 645 Err, C); 646 647 assert(M && "Could not parse module?"); 648 assert(!verifyModule(*M) && "Must have been well formed!"); 649 650 runWithSE( 651 *M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 652 auto &I = GetInstByName(F, "i"); 653 auto &CanonicalIV = GetInstByName(F, "canonical.iv"); 654 655 auto *LoopHeaderBB = I.getParent(); 656 auto *Loop = LI.getLoopFor(LoopHeaderBB); 657 EXPECT_EQ(&CanonicalIV, Loop->getCanonicalInductionVariable()); 658 unsigned CanonicalIVBitWidth = 659 cast<IntegerType>(CanonicalIV.getType())->getBitWidth(); 660 661 auto *AR = GetAddRec(SE, Loop); 662 EXPECT_EQ(ARBitWidth, SE.getTypeSizeInBits(AR->getType())); 663 EXPECT_EQ(CanonicalIVBitWidth, ARBitWidth); 664 665 SCEVExpander Exp(SE, M->getDataLayout(), "expander"); 666 auto *InsertAt = I.getNextNode(); 667 Exp.expandCodeFor(AR, nullptr, InsertAt); 668 669 // Loop over all of the PHI nodes, looking if a new canonical 670 // indvar was introduced. 671 PHINode *NewCanonicalIV = nullptr; 672 for (BasicBlock::iterator i = LoopHeaderBB->begin(); 673 isa<PHINode>(i); ++i) { 674 PHINode *PN = cast<PHINode>(i); 675 if (PN == &I || PN == &CanonicalIV) 676 continue; 677 NewCanonicalIV = PN; 678 } 679 EXPECT_FALSE(NewCanonicalIV); 680 }); 681 }; 682 683 unsigned ARBitWidth = 16; 684 Type *ARType = IntegerType::get(C, ARBitWidth); 685 686 // Expand {5,+,1} 687 auto GetAR2 = [&](ScalarEvolution &SE, Loop *L) -> const SCEV * { 688 return SE.getAddRecExpr(SE.getConstant(APInt(ARBitWidth, 5)), 689 SE.getOne(ARType), L, SCEV::FlagAnyWrap); 690 }; 691 TestNoCanonicalIV(GetAR2); 692 TestNarrowCanonicalIV(GetAR2); 693 TestMatchingCanonicalIV(GetAR2, ARBitWidth); 694 } 695 696 TEST_F(ScalarEvolutionExpanderTest, SCEVExpanderShlNSW) { 697 698 auto checkOneCase = [this](std::string &&str) { 699 LLVMContext C; 700 SMDiagnostic Err; 701 std::unique_ptr<Module> M = parseAssemblyString(str, Err, C); 702 703 assert(M && "Could not parse module?"); 704 assert(!verifyModule(*M) && "Must have been well formed!"); 705 706 Function *F = M->getFunction("f"); 707 ASSERT_NE(F, nullptr) << "Could not find function 'f'"; 708 709 BasicBlock &Entry = F->getEntryBlock(); 710 LoadInst *Load = cast<LoadInst>(&Entry.front()); 711 BinaryOperator *And = cast<BinaryOperator>(*Load->user_begin()); 712 713 ScalarEvolution SE = buildSE(*F); 714 const SCEV *AndSCEV = SE.getSCEV(And); 715 EXPECT_TRUE(isa<SCEVMulExpr>(AndSCEV)); 716 EXPECT_TRUE(cast<SCEVMulExpr>(AndSCEV)->hasNoSignedWrap()); 717 718 SCEVExpander Exp(SE, M->getDataLayout(), "expander"); 719 auto *I = cast<Instruction>(Exp.expandCodeFor(AndSCEV, nullptr, And)); 720 EXPECT_EQ(I->getOpcode(), Instruction::Shl); 721 EXPECT_FALSE(I->hasNoSignedWrap()); 722 }; 723 724 checkOneCase("define void @f(i16* %arrayidx) { " 725 " %1 = load i16, i16* %arrayidx " 726 " %2 = and i16 %1, -32768 " 727 " ret void " 728 "} "); 729 730 checkOneCase("define void @f(i8* %arrayidx) { " 731 " %1 = load i8, i8* %arrayidx " 732 " %2 = and i8 %1, -128 " 733 " ret void " 734 "} "); 735 } 736 737 // Test expansion of nested addrecs in CanonicalMode. 738 // Expanding nested addrecs in canonical mode requiers a canonical IV of a 739 // type wider than the type of the addrec itself. Currently, SCEVExpander 740 // just falls back to literal mode for nested addrecs. 741 TEST_F(ScalarEvolutionExpanderTest, SCEVExpandNonAffineAddRec) { 742 LLVMContext C; 743 SMDiagnostic Err; 744 745 // Expand the addrec produced by GetAddRec into a loop without a canonical IV. 746 auto TestNoCanonicalIV = 747 [&](std::function<const SCEVAddRecExpr *(ScalarEvolution & SE, Loop * L)> 748 GetAddRec) { 749 std::unique_ptr<Module> M = parseAssemblyString( 750 "define i32 @test(i32 %limit) { " 751 "entry: " 752 " br label %loop " 753 "loop: " 754 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] " 755 " %i.inc = add nsw i32 %i, 1 " 756 " %cont = icmp slt i32 %i.inc, %limit " 757 " br i1 %cont, label %loop, label %exit " 758 "exit: " 759 " ret i32 %i.inc " 760 "}", 761 Err, C); 762 763 assert(M && "Could not parse module?"); 764 assert(!verifyModule(*M) && "Must have been well formed!"); 765 766 runWithSE(*M, "test", 767 [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 768 auto &I = GetInstByName(F, "i"); 769 auto *Loop = LI.getLoopFor(I.getParent()); 770 EXPECT_FALSE(Loop->getCanonicalInductionVariable()); 771 772 auto *AR = GetAddRec(SE, Loop); 773 EXPECT_FALSE(AR->isAffine()); 774 775 SCEVExpander Exp(SE, M->getDataLayout(), "expander"); 776 auto *InsertAt = I.getNextNode(); 777 Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt); 778 auto *ExpandedAR = SE.getSCEV(V); 779 // Check that the expansion happened literally. 780 EXPECT_EQ(AR, ExpandedAR); 781 }); 782 }; 783 784 // Expand the addrec produced by GetAddRec into a loop with a canonical IV 785 // which is narrower than addrec type. 786 auto TestNarrowCanonicalIV = [&](std::function<const SCEVAddRecExpr *( 787 ScalarEvolution & SE, Loop * L)> 788 GetAddRec) { 789 std::unique_ptr<Module> M = parseAssemblyString( 790 "define i32 @test(i32 %limit) { " 791 "entry: " 792 " br label %loop " 793 "loop: " 794 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] " 795 " %canonical.iv = phi i8 [ 0, %entry ], [ %canonical.iv.inc, %loop ] " 796 " %i.inc = add nsw i32 %i, 1 " 797 " %canonical.iv.inc = add i8 %canonical.iv, 1 " 798 " %cont = icmp slt i32 %i.inc, %limit " 799 " br i1 %cont, label %loop, label %exit " 800 "exit: " 801 " ret i32 %i.inc " 802 "}", 803 Err, C); 804 805 assert(M && "Could not parse module?"); 806 assert(!verifyModule(*M) && "Must have been well formed!"); 807 808 runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 809 auto &I = GetInstByName(F, "i"); 810 811 auto *LoopHeaderBB = I.getParent(); 812 auto *Loop = LI.getLoopFor(LoopHeaderBB); 813 PHINode *CanonicalIV = Loop->getCanonicalInductionVariable(); 814 EXPECT_EQ(CanonicalIV, &GetInstByName(F, "canonical.iv")); 815 816 auto *AR = GetAddRec(SE, Loop); 817 EXPECT_FALSE(AR->isAffine()); 818 819 unsigned ExpectedCanonicalIVWidth = SE.getTypeSizeInBits(AR->getType()); 820 unsigned CanonicalIVBitWidth = 821 cast<IntegerType>(CanonicalIV->getType())->getBitWidth(); 822 EXPECT_LT(CanonicalIVBitWidth, ExpectedCanonicalIVWidth); 823 824 SCEVExpander Exp(SE, M->getDataLayout(), "expander"); 825 auto *InsertAt = I.getNextNode(); 826 Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt); 827 auto *ExpandedAR = SE.getSCEV(V); 828 // Check that the expansion happened literally. 829 EXPECT_EQ(AR, ExpandedAR); 830 }); 831 }; 832 833 // Expand the addrec produced by GetAddRec into a loop with a canonical IV 834 // of addrec width. 835 auto TestMatchingCanonicalIV = 836 [&](std::function<const SCEVAddRecExpr *(ScalarEvolution & SE, Loop * L)> 837 GetAddRec, 838 unsigned ARBitWidth) { 839 auto ARBitWidthTypeStr = "i" + std::to_string(ARBitWidth); 840 std::unique_ptr<Module> M = parseAssemblyString( 841 "define i32 @test(i32 %limit) { " 842 "entry: " 843 " br label %loop " 844 "loop: " 845 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] " 846 " %canonical.iv = phi " + 847 ARBitWidthTypeStr + 848 " [ 0, %entry ], [ %canonical.iv.inc, %loop ] " 849 " %i.inc = add nsw i32 %i, 1 " 850 " %canonical.iv.inc = add " + 851 ARBitWidthTypeStr + 852 " %canonical.iv, 1 " 853 " %cont = icmp slt i32 %i.inc, %limit " 854 " br i1 %cont, label %loop, label %exit " 855 "exit: " 856 " ret i32 %i.inc " 857 "}", 858 Err, C); 859 860 assert(M && "Could not parse module?"); 861 assert(!verifyModule(*M) && "Must have been well formed!"); 862 863 runWithSE( 864 *M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 865 auto &I = GetInstByName(F, "i"); 866 auto &CanonicalIV = GetInstByName(F, "canonical.iv"); 867 868 auto *LoopHeaderBB = I.getParent(); 869 auto *Loop = LI.getLoopFor(LoopHeaderBB); 870 EXPECT_EQ(&CanonicalIV, Loop->getCanonicalInductionVariable()); 871 unsigned CanonicalIVBitWidth = 872 cast<IntegerType>(CanonicalIV.getType())->getBitWidth(); 873 874 auto *AR = GetAddRec(SE, Loop); 875 EXPECT_FALSE(AR->isAffine()); 876 EXPECT_EQ(ARBitWidth, SE.getTypeSizeInBits(AR->getType())); 877 EXPECT_EQ(CanonicalIVBitWidth, ARBitWidth); 878 879 SCEVExpander Exp(SE, M->getDataLayout(), "expander"); 880 auto *InsertAt = I.getNextNode(); 881 Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt); 882 auto *ExpandedAR = SE.getSCEV(V); 883 // Check that the expansion happened literally. 884 EXPECT_EQ(AR, ExpandedAR); 885 }); 886 }; 887 888 unsigned ARBitWidth = 16; 889 Type *ARType = IntegerType::get(C, ARBitWidth); 890 891 // Expand {5,+,1,+,1} 892 auto GetAR3 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * { 893 SmallVector<const SCEV *, 3> Ops = {SE.getConstant(APInt(ARBitWidth, 5)), 894 SE.getOne(ARType), SE.getOne(ARType)}; 895 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap)); 896 }; 897 TestNoCanonicalIV(GetAR3); 898 TestNarrowCanonicalIV(GetAR3); 899 TestMatchingCanonicalIV(GetAR3, ARBitWidth); 900 901 // Expand {5,+,1,+,1,+,1} 902 auto GetAR4 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * { 903 SmallVector<const SCEV *, 4> Ops = {SE.getConstant(APInt(ARBitWidth, 5)), 904 SE.getOne(ARType), SE.getOne(ARType), 905 SE.getOne(ARType)}; 906 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap)); 907 }; 908 TestNoCanonicalIV(GetAR4); 909 TestNarrowCanonicalIV(GetAR4); 910 TestMatchingCanonicalIV(GetAR4, ARBitWidth); 911 912 // Expand {5,+,1,+,1,+,1,+,1} 913 auto GetAR5 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * { 914 SmallVector<const SCEV *, 5> Ops = {SE.getConstant(APInt(ARBitWidth, 5)), 915 SE.getOne(ARType), SE.getOne(ARType), 916 SE.getOne(ARType), SE.getOne(ARType)}; 917 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap)); 918 }; 919 TestNoCanonicalIV(GetAR5); 920 TestNarrowCanonicalIV(GetAR5); 921 TestMatchingCanonicalIV(GetAR5, ARBitWidth); 922 } 923 924 TEST_F(ScalarEvolutionExpanderTest, ExpandNonIntegralPtrWithNullBase) { 925 LLVMContext C; 926 SMDiagnostic Err; 927 928 std::unique_ptr<Module> M = 929 parseAssemblyString("target datalayout = " 930 "\"e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:" 931 "128-n8:16:32:64-S128-ni:1-p2:32:8:8:32-ni:2\"" 932 "define float addrspace(1)* @test(i64 %offset) { " 933 " %ptr = getelementptr inbounds float, float " 934 "addrspace(1)* null, i64 %offset" 935 " ret float addrspace(1)* %ptr" 936 "}", 937 Err, C); 938 939 assert(M && "Could not parse module?"); 940 assert(!verifyModule(*M) && "Must have been well formed!"); 941 942 runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) { 943 auto &I = GetInstByName(F, "ptr"); 944 auto PtrPlus1 = 945 SE.getAddExpr(SE.getSCEV(&I), SE.getConstant(I.getType(), 1)); 946 SCEVExpander Exp(SE, M->getDataLayout(), "expander"); 947 948 Value *V = Exp.expandCodeFor(PtrPlus1, I.getType(), &I); 949 I.replaceAllUsesWith(V); 950 951 // Check that the expander created: 952 // define float addrspace(1)* @test(i64 %off) { 953 // %scevgep = getelementptr float, float addrspace(1)* null, i64 %off 954 // %scevgep1 = bitcast float addrspace(1)* %scevgep to i8 addrspace(1)* 955 // %uglygep = getelementptr i8, i8 addrspace(1)* %scevgep1, i64 1 956 // %uglygep2 = bitcast i8 addrspace(1)* %uglygep to float addrspace(1)* 957 // %ptr = getelementptr inbounds float, float addrspace(1)* null, i64 %off 958 // ret float addrspace(1)* %uglygep2 959 // } 960 961 auto *Cast = dyn_cast<BitCastInst>(V); 962 EXPECT_TRUE(Cast); 963 EXPECT_EQ(Cast->getType(), I.getType()); 964 auto *GEP = dyn_cast<GetElementPtrInst>(Cast->getOperand(0)); 965 EXPECT_TRUE(GEP); 966 EXPECT_TRUE(match(GEP->getOperand(1), m_SpecificInt(1))); 967 auto *Cast1 = dyn_cast<BitCastInst>(GEP->getPointerOperand()); 968 EXPECT_TRUE(Cast1); 969 auto *GEP1 = dyn_cast<GetElementPtrInst>(Cast1->getOperand(0)); 970 EXPECT_TRUE(GEP1); 971 EXPECT_TRUE(cast<Constant>(GEP1->getPointerOperand())->isNullValue()); 972 EXPECT_EQ(GEP1->getOperand(1), &*F.arg_begin()); 973 EXPECT_EQ(cast<PointerType>(GEP1->getPointerOperand()->getType()) 974 ->getAddressSpace(), 975 cast<PointerType>(I.getType())->getAddressSpace()); 976 EXPECT_FALSE(verifyFunction(F, &errs())); 977 }); 978 } 979 980 } // end namespace llvm 981