1 //===- ValueTrackingTest.cpp - ValueTracking tests ------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/Analysis/ValueTracking.h" 10 #include "llvm/Analysis/AssumptionCache.h" 11 #include "llvm/AsmParser/Parser.h" 12 #include "llvm/IR/ConstantRange.h" 13 #include "llvm/IR/Dominators.h" 14 #include "llvm/IR/Function.h" 15 #include "llvm/IR/InstIterator.h" 16 #include "llvm/IR/Instructions.h" 17 #include "llvm/IR/LLVMContext.h" 18 #include "llvm/IR/Module.h" 19 #include "llvm/Support/ErrorHandling.h" 20 #include "llvm/Support/KnownBits.h" 21 #include "llvm/Support/SourceMgr.h" 22 #include "llvm/Transforms/Utils/Local.h" 23 #include "gtest/gtest.h" 24 25 using namespace llvm; 26 27 namespace { 28 29 static Instruction *findInstructionByNameOrNull(Function *F, StringRef Name) { 30 for (Instruction &I : instructions(F)) 31 if (I.getName() == Name) 32 return &I; 33 34 return nullptr; 35 } 36 37 static Instruction &findInstructionByName(Function *F, StringRef Name) { 38 auto *I = findInstructionByNameOrNull(F, Name); 39 if (I) 40 return *I; 41 42 llvm_unreachable("Expected value not found"); 43 } 44 45 class ValueTrackingTest : public testing::Test { 46 protected: 47 std::unique_ptr<Module> parseModule(StringRef Assembly) { 48 SMDiagnostic Error; 49 std::unique_ptr<Module> M = parseAssemblyString(Assembly, Error, Context); 50 51 std::string errMsg; 52 raw_string_ostream os(errMsg); 53 Error.print("", os); 54 EXPECT_TRUE(M) << os.str(); 55 56 return M; 57 } 58 59 void parseAssembly(StringRef Assembly) { 60 M = parseModule(Assembly); 61 ASSERT_TRUE(M); 62 63 F = M->getFunction("test"); 64 ASSERT_TRUE(F) << "Test must have a function @test"; 65 if (!F) 66 return; 67 68 A = findInstructionByNameOrNull(F, "A"); 69 ASSERT_TRUE(A) << "@test must have an instruction %A"; 70 A2 = findInstructionByNameOrNull(F, "A2"); 71 A3 = findInstructionByNameOrNull(F, "A3"); 72 A4 = findInstructionByNameOrNull(F, "A4"); 73 74 CxtI = findInstructionByNameOrNull(F, "CxtI"); 75 CxtI2 = findInstructionByNameOrNull(F, "CxtI2"); 76 CxtI3 = findInstructionByNameOrNull(F, "CxtI3"); 77 } 78 79 LLVMContext Context; 80 std::unique_ptr<Module> M; 81 Function *F = nullptr; 82 Instruction *A = nullptr; 83 // Instructions (optional) 84 Instruction *A2 = nullptr, *A3 = nullptr, *A4 = nullptr; 85 86 // Context instructions (optional) 87 Instruction *CxtI = nullptr, *CxtI2 = nullptr, *CxtI3 = nullptr; 88 }; 89 90 class MatchSelectPatternTest : public ValueTrackingTest { 91 protected: 92 void expectPattern(const SelectPatternResult &P) { 93 Value *LHS, *RHS; 94 Instruction::CastOps CastOp; 95 SelectPatternResult R = matchSelectPattern(A, LHS, RHS, &CastOp); 96 EXPECT_EQ(P.Flavor, R.Flavor); 97 EXPECT_EQ(P.NaNBehavior, R.NaNBehavior); 98 EXPECT_EQ(P.Ordered, R.Ordered); 99 } 100 }; 101 102 class ComputeKnownBitsTest : public ValueTrackingTest { 103 protected: 104 void expectKnownBits(uint64_t Zero, uint64_t One) { 105 auto Known = computeKnownBits(A, M->getDataLayout()); 106 ASSERT_FALSE(Known.hasConflict()); 107 EXPECT_EQ(Known.One.getZExtValue(), One); 108 EXPECT_EQ(Known.Zero.getZExtValue(), Zero); 109 } 110 }; 111 112 } 113 114 TEST_F(MatchSelectPatternTest, SimpleFMin) { 115 parseAssembly( 116 "define float @test(float %a) {\n" 117 " %1 = fcmp ult float %a, 5.0\n" 118 " %A = select i1 %1, float %a, float 5.0\n" 119 " ret float %A\n" 120 "}\n"); 121 expectPattern({SPF_FMINNUM, SPNB_RETURNS_NAN, false}); 122 } 123 124 TEST_F(MatchSelectPatternTest, SimpleFMax) { 125 parseAssembly( 126 "define float @test(float %a) {\n" 127 " %1 = fcmp ogt float %a, 5.0\n" 128 " %A = select i1 %1, float %a, float 5.0\n" 129 " ret float %A\n" 130 "}\n"); 131 expectPattern({SPF_FMAXNUM, SPNB_RETURNS_OTHER, true}); 132 } 133 134 TEST_F(MatchSelectPatternTest, SwappedFMax) { 135 parseAssembly( 136 "define float @test(float %a) {\n" 137 " %1 = fcmp olt float 5.0, %a\n" 138 " %A = select i1 %1, float %a, float 5.0\n" 139 " ret float %A\n" 140 "}\n"); 141 expectPattern({SPF_FMAXNUM, SPNB_RETURNS_OTHER, false}); 142 } 143 144 TEST_F(MatchSelectPatternTest, SwappedFMax2) { 145 parseAssembly( 146 "define float @test(float %a) {\n" 147 " %1 = fcmp olt float %a, 5.0\n" 148 " %A = select i1 %1, float 5.0, float %a\n" 149 " ret float %A\n" 150 "}\n"); 151 expectPattern({SPF_FMAXNUM, SPNB_RETURNS_NAN, false}); 152 } 153 154 TEST_F(MatchSelectPatternTest, SwappedFMax3) { 155 parseAssembly( 156 "define float @test(float %a) {\n" 157 " %1 = fcmp ult float %a, 5.0\n" 158 " %A = select i1 %1, float 5.0, float %a\n" 159 " ret float %A\n" 160 "}\n"); 161 expectPattern({SPF_FMAXNUM, SPNB_RETURNS_OTHER, true}); 162 } 163 164 TEST_F(MatchSelectPatternTest, FastFMin) { 165 parseAssembly( 166 "define float @test(float %a) {\n" 167 " %1 = fcmp nnan olt float %a, 5.0\n" 168 " %A = select i1 %1, float %a, float 5.0\n" 169 " ret float %A\n" 170 "}\n"); 171 expectPattern({SPF_FMINNUM, SPNB_RETURNS_ANY, false}); 172 } 173 174 TEST_F(MatchSelectPatternTest, FMinConstantZero) { 175 parseAssembly( 176 "define float @test(float %a) {\n" 177 " %1 = fcmp ole float %a, 0.0\n" 178 " %A = select i1 %1, float %a, float 0.0\n" 179 " ret float %A\n" 180 "}\n"); 181 // This shouldn't be matched, as %a could be -0.0. 182 expectPattern({SPF_UNKNOWN, SPNB_NA, false}); 183 } 184 185 TEST_F(MatchSelectPatternTest, FMinConstantZeroNsz) { 186 parseAssembly( 187 "define float @test(float %a) {\n" 188 " %1 = fcmp nsz ole float %a, 0.0\n" 189 " %A = select i1 %1, float %a, float 0.0\n" 190 " ret float %A\n" 191 "}\n"); 192 // But this should be, because we've ignored signed zeroes. 193 expectPattern({SPF_FMINNUM, SPNB_RETURNS_OTHER, true}); 194 } 195 196 TEST_F(MatchSelectPatternTest, FMinMismatchConstantZero1) { 197 parseAssembly( 198 "define float @test(float %a) {\n" 199 " %1 = fcmp olt float -0.0, %a\n" 200 " %A = select i1 %1, float 0.0, float %a\n" 201 " ret float %A\n" 202 "}\n"); 203 // The sign of zero doesn't matter in fcmp. 204 expectPattern({SPF_FMINNUM, SPNB_RETURNS_NAN, true}); 205 } 206 207 TEST_F(MatchSelectPatternTest, FMinMismatchConstantZero2) { 208 parseAssembly( 209 "define float @test(float %a) {\n" 210 " %1 = fcmp ogt float %a, -0.0\n" 211 " %A = select i1 %1, float 0.0, float %a\n" 212 " ret float %A\n" 213 "}\n"); 214 // The sign of zero doesn't matter in fcmp. 215 expectPattern({SPF_FMINNUM, SPNB_RETURNS_NAN, false}); 216 } 217 218 TEST_F(MatchSelectPatternTest, FMinMismatchConstantZero3) { 219 parseAssembly( 220 "define float @test(float %a) {\n" 221 " %1 = fcmp olt float 0.0, %a\n" 222 " %A = select i1 %1, float -0.0, float %a\n" 223 " ret float %A\n" 224 "}\n"); 225 // The sign of zero doesn't matter in fcmp. 226 expectPattern({SPF_FMINNUM, SPNB_RETURNS_NAN, true}); 227 } 228 229 TEST_F(MatchSelectPatternTest, FMinMismatchConstantZero4) { 230 parseAssembly( 231 "define float @test(float %a) {\n" 232 " %1 = fcmp ogt float %a, 0.0\n" 233 " %A = select i1 %1, float -0.0, float %a\n" 234 " ret float %A\n" 235 "}\n"); 236 // The sign of zero doesn't matter in fcmp. 237 expectPattern({SPF_FMINNUM, SPNB_RETURNS_NAN, false}); 238 } 239 240 TEST_F(MatchSelectPatternTest, FMinMismatchConstantZero5) { 241 parseAssembly( 242 "define float @test(float %a) {\n" 243 " %1 = fcmp ogt float -0.0, %a\n" 244 " %A = select i1 %1, float %a, float 0.0\n" 245 " ret float %A\n" 246 "}\n"); 247 // The sign of zero doesn't matter in fcmp. 248 expectPattern({SPF_FMINNUM, SPNB_RETURNS_OTHER, false}); 249 } 250 251 TEST_F(MatchSelectPatternTest, FMinMismatchConstantZero6) { 252 parseAssembly( 253 "define float @test(float %a) {\n" 254 " %1 = fcmp olt float %a, -0.0\n" 255 " %A = select i1 %1, float %a, float 0.0\n" 256 " ret float %A\n" 257 "}\n"); 258 // The sign of zero doesn't matter in fcmp. 259 expectPattern({SPF_FMINNUM, SPNB_RETURNS_OTHER, true}); 260 } 261 262 TEST_F(MatchSelectPatternTest, FMinMismatchConstantZero7) { 263 parseAssembly( 264 "define float @test(float %a) {\n" 265 " %1 = fcmp ogt float 0.0, %a\n" 266 " %A = select i1 %1, float %a, float -0.0\n" 267 " ret float %A\n" 268 "}\n"); 269 // The sign of zero doesn't matter in fcmp. 270 expectPattern({SPF_FMINNUM, SPNB_RETURNS_OTHER, false}); 271 } 272 273 TEST_F(MatchSelectPatternTest, FMinMismatchConstantZero8) { 274 parseAssembly( 275 "define float @test(float %a) {\n" 276 " %1 = fcmp olt float %a, 0.0\n" 277 " %A = select i1 %1, float %a, float -0.0\n" 278 " ret float %A\n" 279 "}\n"); 280 // The sign of zero doesn't matter in fcmp. 281 expectPattern({SPF_FMINNUM, SPNB_RETURNS_OTHER, true}); 282 } 283 284 TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZero1) { 285 parseAssembly( 286 "define float @test(float %a) {\n" 287 " %1 = fcmp ogt float -0.0, %a\n" 288 " %A = select i1 %1, float 0.0, float %a\n" 289 " ret float %A\n" 290 "}\n"); 291 // The sign of zero doesn't matter in fcmp. 292 expectPattern({SPF_FMAXNUM, SPNB_RETURNS_NAN, true}); 293 } 294 295 TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZero2) { 296 parseAssembly( 297 "define float @test(float %a) {\n" 298 " %1 = fcmp olt float %a, -0.0\n" 299 " %A = select i1 %1, float 0.0, float %a\n" 300 " ret float %A\n" 301 "}\n"); 302 // The sign of zero doesn't matter in fcmp. 303 expectPattern({SPF_FMAXNUM, SPNB_RETURNS_NAN, false}); 304 } 305 306 TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZero3) { 307 parseAssembly( 308 "define float @test(float %a) {\n" 309 " %1 = fcmp ogt float 0.0, %a\n" 310 " %A = select i1 %1, float -0.0, float %a\n" 311 " ret float %A\n" 312 "}\n"); 313 // The sign of zero doesn't matter in fcmp. 314 expectPattern({SPF_FMAXNUM, SPNB_RETURNS_NAN, true}); 315 } 316 317 TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZero4) { 318 parseAssembly( 319 "define float @test(float %a) {\n" 320 " %1 = fcmp olt float %a, 0.0\n" 321 " %A = select i1 %1, float -0.0, float %a\n" 322 " ret float %A\n" 323 "}\n"); 324 // The sign of zero doesn't matter in fcmp. 325 expectPattern({SPF_FMAXNUM, SPNB_RETURNS_NAN, false}); 326 } 327 328 TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZero5) { 329 parseAssembly( 330 "define float @test(float %a) {\n" 331 " %1 = fcmp olt float -0.0, %a\n" 332 " %A = select i1 %1, float %a, float 0.0\n" 333 " ret float %A\n" 334 "}\n"); 335 // The sign of zero doesn't matter in fcmp. 336 expectPattern({SPF_FMAXNUM, SPNB_RETURNS_OTHER, false}); 337 } 338 339 TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZero6) { 340 parseAssembly( 341 "define float @test(float %a) {\n" 342 " %1 = fcmp ogt float %a, -0.0\n" 343 " %A = select i1 %1, float %a, float 0.0\n" 344 " ret float %A\n" 345 "}\n"); 346 // The sign of zero doesn't matter in fcmp. 347 expectPattern({SPF_FMAXNUM, SPNB_RETURNS_OTHER, true}); 348 } 349 350 TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZero7) { 351 parseAssembly( 352 "define float @test(float %a) {\n" 353 " %1 = fcmp olt float 0.0, %a\n" 354 " %A = select i1 %1, float %a, float -0.0\n" 355 " ret float %A\n" 356 "}\n"); 357 // The sign of zero doesn't matter in fcmp. 358 expectPattern({SPF_FMAXNUM, SPNB_RETURNS_OTHER, false}); 359 } 360 361 TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZero8) { 362 parseAssembly( 363 "define float @test(float %a) {\n" 364 " %1 = fcmp ogt float %a, 0.0\n" 365 " %A = select i1 %1, float %a, float -0.0\n" 366 " ret float %A\n" 367 "}\n"); 368 // The sign of zero doesn't matter in fcmp. 369 expectPattern({SPF_FMAXNUM, SPNB_RETURNS_OTHER, true}); 370 } 371 372 TEST_F(MatchSelectPatternTest, FMinMismatchConstantZeroVecUndef) { 373 parseAssembly( 374 "define <2 x float> @test(<2 x float> %a) {\n" 375 " %1 = fcmp ogt <2 x float> %a, <float -0.0, float -0.0>\n" 376 " %A = select <2 x i1> %1, <2 x float> <float undef, float 0.0>, <2 x float> %a\n" 377 " ret <2 x float> %A\n" 378 "}\n"); 379 // An undef in a vector constant can not be back-propagated for this analysis. 380 expectPattern({SPF_UNKNOWN, SPNB_NA, false}); 381 } 382 383 TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZeroVecUndef) { 384 parseAssembly( 385 "define <2 x float> @test(<2 x float> %a) {\n" 386 " %1 = fcmp ogt <2 x float> %a, zeroinitializer\n" 387 " %A = select <2 x i1> %1, <2 x float> %a, <2 x float> <float -0.0, float undef>\n" 388 " ret <2 x float> %A\n" 389 "}\n"); 390 // An undef in a vector constant can not be back-propagated for this analysis. 391 expectPattern({SPF_UNKNOWN, SPNB_NA, false}); 392 } 393 394 TEST_F(MatchSelectPatternTest, VectorFMinimum) { 395 parseAssembly( 396 "define <4 x float> @test(<4 x float> %a) {\n" 397 " %1 = fcmp ule <4 x float> %a, \n" 398 " <float 5.0, float 5.0, float 5.0, float 5.0>\n" 399 " %A = select <4 x i1> %1, <4 x float> %a,\n" 400 " <4 x float> <float 5.0, float 5.0, float 5.0, float 5.0>\n" 401 " ret <4 x float> %A\n" 402 "}\n"); 403 // Check that pattern matching works on vectors where each lane has the same 404 // unordered pattern. 405 expectPattern({SPF_FMINNUM, SPNB_RETURNS_NAN, false}); 406 } 407 408 TEST_F(MatchSelectPatternTest, VectorFMinOtherOrdered) { 409 parseAssembly( 410 "define <4 x float> @test(<4 x float> %a) {\n" 411 " %1 = fcmp ole <4 x float> %a, \n" 412 " <float 5.0, float 5.0, float 5.0, float 5.0>\n" 413 " %A = select <4 x i1> %1, <4 x float> %a,\n" 414 " <4 x float> <float 5.0, float 5.0, float 5.0, float 5.0>\n" 415 " ret <4 x float> %A\n" 416 "}\n"); 417 // Check that pattern matching works on vectors where each lane has the same 418 // ordered pattern. 419 expectPattern({SPF_FMINNUM, SPNB_RETURNS_OTHER, true}); 420 } 421 422 TEST_F(MatchSelectPatternTest, VectorNotFMinimum) { 423 parseAssembly( 424 "define <4 x float> @test(<4 x float> %a) {\n" 425 " %1 = fcmp ule <4 x float> %a, \n" 426 " <float 5.0, float 0x7ff8000000000000, float 5.0, float 5.0>\n" 427 " %A = select <4 x i1> %1, <4 x float> %a,\n" 428 " <4 x float> <float 5.0, float 0x7ff8000000000000, float 5.0, float " 429 "5.0>\n" 430 " ret <4 x float> %A\n" 431 "}\n"); 432 // The lane that contains a NaN (0x7ff80...) behaves like a 433 // non-NaN-propagating min and the other lines behave like a NaN-propagating 434 // min, so check that neither is returned. 435 expectPattern({SPF_UNKNOWN, SPNB_NA, false}); 436 } 437 438 TEST_F(MatchSelectPatternTest, VectorNotFMinZero) { 439 parseAssembly( 440 "define <4 x float> @test(<4 x float> %a) {\n" 441 " %1 = fcmp ule <4 x float> %a, \n" 442 " <float 5.0, float -0.0, float 5.0, float 5.0>\n" 443 " %A = select <4 x i1> %1, <4 x float> %a,\n" 444 " <4 x float> <float 5.0, float 0.0, float 5.0, float 5.0>\n" 445 " ret <4 x float> %A\n" 446 "}\n"); 447 // Always selects the second lane of %a if it is positive or negative zero, so 448 // this is stricter than a min. 449 expectPattern({SPF_UNKNOWN, SPNB_NA, false}); 450 } 451 452 TEST_F(MatchSelectPatternTest, DoubleCastU) { 453 parseAssembly( 454 "define i32 @test(i8 %a, i8 %b) {\n" 455 " %1 = icmp ult i8 %a, %b\n" 456 " %2 = zext i8 %a to i32\n" 457 " %3 = zext i8 %b to i32\n" 458 " %A = select i1 %1, i32 %2, i32 %3\n" 459 " ret i32 %A\n" 460 "}\n"); 461 // We should be able to look through the situation where we cast both operands 462 // to the select. 463 expectPattern({SPF_UMIN, SPNB_NA, false}); 464 } 465 466 TEST_F(MatchSelectPatternTest, DoubleCastS) { 467 parseAssembly( 468 "define i32 @test(i8 %a, i8 %b) {\n" 469 " %1 = icmp slt i8 %a, %b\n" 470 " %2 = sext i8 %a to i32\n" 471 " %3 = sext i8 %b to i32\n" 472 " %A = select i1 %1, i32 %2, i32 %3\n" 473 " ret i32 %A\n" 474 "}\n"); 475 // We should be able to look through the situation where we cast both operands 476 // to the select. 477 expectPattern({SPF_SMIN, SPNB_NA, false}); 478 } 479 480 TEST_F(MatchSelectPatternTest, DoubleCastBad) { 481 parseAssembly( 482 "define i32 @test(i8 %a, i8 %b) {\n" 483 " %1 = icmp ult i8 %a, %b\n" 484 " %2 = zext i8 %a to i32\n" 485 " %3 = sext i8 %b to i32\n" 486 " %A = select i1 %1, i32 %2, i32 %3\n" 487 " ret i32 %A\n" 488 "}\n"); 489 // The cast types here aren't the same, so we cannot match an UMIN. 490 expectPattern({SPF_UNKNOWN, SPNB_NA, false}); 491 } 492 493 TEST_F(MatchSelectPatternTest, NotNotSMin) { 494 parseAssembly( 495 "define i8 @test(i8 %a, i8 %b) {\n" 496 " %cmp = icmp sgt i8 %a, %b\n" 497 " %an = xor i8 %a, -1\n" 498 " %bn = xor i8 %b, -1\n" 499 " %A = select i1 %cmp, i8 %an, i8 %bn\n" 500 " ret i8 %A\n" 501 "}\n"); 502 expectPattern({SPF_SMIN, SPNB_NA, false}); 503 } 504 505 TEST_F(MatchSelectPatternTest, NotNotSMinSwap) { 506 parseAssembly( 507 "define <2 x i8> @test(<2 x i8> %a, <2 x i8> %b) {\n" 508 " %cmp = icmp slt <2 x i8> %a, %b\n" 509 " %an = xor <2 x i8> %a, <i8 -1, i8-1>\n" 510 " %bn = xor <2 x i8> %b, <i8 -1, i8-1>\n" 511 " %A = select <2 x i1> %cmp, <2 x i8> %bn, <2 x i8> %an\n" 512 " ret <2 x i8> %A\n" 513 "}\n"); 514 expectPattern({SPF_SMIN, SPNB_NA, false}); 515 } 516 517 TEST_F(MatchSelectPatternTest, NotNotSMax) { 518 parseAssembly( 519 "define i8 @test(i8 %a, i8 %b) {\n" 520 " %cmp = icmp slt i8 %a, %b\n" 521 " %an = xor i8 %a, -1\n" 522 " %bn = xor i8 %b, -1\n" 523 " %A = select i1 %cmp, i8 %an, i8 %bn\n" 524 " ret i8 %A\n" 525 "}\n"); 526 expectPattern({SPF_SMAX, SPNB_NA, false}); 527 } 528 529 TEST_F(MatchSelectPatternTest, NotNotSMaxSwap) { 530 parseAssembly( 531 "define <2 x i8> @test(<2 x i8> %a, <2 x i8> %b) {\n" 532 " %cmp = icmp sgt <2 x i8> %a, %b\n" 533 " %an = xor <2 x i8> %a, <i8 -1, i8-1>\n" 534 " %bn = xor <2 x i8> %b, <i8 -1, i8-1>\n" 535 " %A = select <2 x i1> %cmp, <2 x i8> %bn, <2 x i8> %an\n" 536 " ret <2 x i8> %A\n" 537 "}\n"); 538 expectPattern({SPF_SMAX, SPNB_NA, false}); 539 } 540 541 TEST_F(MatchSelectPatternTest, NotNotUMin) { 542 parseAssembly( 543 "define <2 x i8> @test(<2 x i8> %a, <2 x i8> %b) {\n" 544 " %cmp = icmp ugt <2 x i8> %a, %b\n" 545 " %an = xor <2 x i8> %a, <i8 -1, i8-1>\n" 546 " %bn = xor <2 x i8> %b, <i8 -1, i8-1>\n" 547 " %A = select <2 x i1> %cmp, <2 x i8> %an, <2 x i8> %bn\n" 548 " ret <2 x i8> %A\n" 549 "}\n"); 550 expectPattern({SPF_UMIN, SPNB_NA, false}); 551 } 552 553 TEST_F(MatchSelectPatternTest, NotNotUMinSwap) { 554 parseAssembly( 555 "define i8 @test(i8 %a, i8 %b) {\n" 556 " %cmp = icmp ult i8 %a, %b\n" 557 " %an = xor i8 %a, -1\n" 558 " %bn = xor i8 %b, -1\n" 559 " %A = select i1 %cmp, i8 %bn, i8 %an\n" 560 " ret i8 %A\n" 561 "}\n"); 562 expectPattern({SPF_UMIN, SPNB_NA, false}); 563 } 564 565 TEST_F(MatchSelectPatternTest, NotNotUMax) { 566 parseAssembly( 567 "define <2 x i8> @test(<2 x i8> %a, <2 x i8> %b) {\n" 568 " %cmp = icmp ult <2 x i8> %a, %b\n" 569 " %an = xor <2 x i8> %a, <i8 -1, i8-1>\n" 570 " %bn = xor <2 x i8> %b, <i8 -1, i8-1>\n" 571 " %A = select <2 x i1> %cmp, <2 x i8> %an, <2 x i8> %bn\n" 572 " ret <2 x i8> %A\n" 573 "}\n"); 574 expectPattern({SPF_UMAX, SPNB_NA, false}); 575 } 576 577 TEST_F(MatchSelectPatternTest, NotNotUMaxSwap) { 578 parseAssembly( 579 "define i8 @test(i8 %a, i8 %b) {\n" 580 " %cmp = icmp ugt i8 %a, %b\n" 581 " %an = xor i8 %a, -1\n" 582 " %bn = xor i8 %b, -1\n" 583 " %A = select i1 %cmp, i8 %bn, i8 %an\n" 584 " ret i8 %A\n" 585 "}\n"); 586 expectPattern({SPF_UMAX, SPNB_NA, false}); 587 } 588 589 TEST_F(MatchSelectPatternTest, NotNotEq) { 590 parseAssembly( 591 "define i8 @test(i8 %a, i8 %b) {\n" 592 " %cmp = icmp eq i8 %a, %b\n" 593 " %an = xor i8 %a, -1\n" 594 " %bn = xor i8 %b, -1\n" 595 " %A = select i1 %cmp, i8 %bn, i8 %an\n" 596 " ret i8 %A\n" 597 "}\n"); 598 expectPattern({SPF_UNKNOWN, SPNB_NA, false}); 599 } 600 601 TEST_F(MatchSelectPatternTest, NotNotNe) { 602 parseAssembly( 603 "define i8 @test(i8 %a, i8 %b) {\n" 604 " %cmp = icmp ne i8 %a, %b\n" 605 " %an = xor i8 %a, -1\n" 606 " %bn = xor i8 %b, -1\n" 607 " %A = select i1 %cmp, i8 %bn, i8 %an\n" 608 " ret i8 %A\n" 609 "}\n"); 610 expectPattern({SPF_UNKNOWN, SPNB_NA, false}); 611 } 612 613 TEST(ValueTracking, GuaranteedToTransferExecutionToSuccessor) { 614 StringRef Assembly = 615 "declare void @nounwind_readonly(i32*) nounwind readonly " 616 "declare void @nounwind_argmemonly(i32*) nounwind argmemonly " 617 "declare void @nounwind_willreturn(i32*) nounwind willreturn " 618 "declare void @throws_but_readonly(i32*) readonly " 619 "declare void @throws_but_argmemonly(i32*) argmemonly " 620 "declare void @throws_but_willreturn(i32*) willreturn " 621 " " 622 "declare void @unknown(i32*) " 623 " " 624 "define void @f(i32* %p) { " 625 " call void @nounwind_readonly(i32* %p) " 626 " call void @nounwind_argmemonly(i32* %p) " 627 " call void @nounwind_willreturn(i32* %p)" 628 " call void @throws_but_readonly(i32* %p) " 629 " call void @throws_but_argmemonly(i32* %p) " 630 " call void @throws_but_willreturn(i32* %p) " 631 " call void @unknown(i32* %p) nounwind readonly " 632 " call void @unknown(i32* %p) nounwind argmemonly " 633 " call void @unknown(i32* %p) nounwind willreturn " 634 " call void @unknown(i32* %p) readonly " 635 " call void @unknown(i32* %p) argmemonly " 636 " call void @unknown(i32* %p) willreturn " 637 " ret void " 638 "} "; 639 640 LLVMContext Context; 641 SMDiagnostic Error; 642 auto M = parseAssemblyString(Assembly, Error, Context); 643 assert(M && "Bad assembly?"); 644 645 auto *F = M->getFunction("f"); 646 assert(F && "Bad assembly?"); 647 648 auto &BB = F->getEntryBlock(); 649 bool ExpectedAnswers[] = { 650 false, // call void @nounwind_readonly(i32* %p) 651 false, // call void @nounwind_argmemonly(i32* %p) 652 true, // call void @nounwind_willreturn(i32* %p) 653 false, // call void @throws_but_readonly(i32* %p) 654 false, // call void @throws_but_argmemonly(i32* %p) 655 false, // call void @throws_but_willreturn(i32* %p) 656 false, // call void @unknown(i32* %p) nounwind readonly 657 false, // call void @unknown(i32* %p) nounwind argmemonly 658 true, // call void @unknown(i32* %p) nounwind willreturn 659 false, // call void @unknown(i32* %p) readonly 660 false, // call void @unknown(i32* %p) argmemonly 661 false, // call void @unknown(i32* %p) willreturn 662 false, // ret void 663 }; 664 665 int Index = 0; 666 for (auto &I : BB) { 667 EXPECT_EQ(isGuaranteedToTransferExecutionToSuccessor(&I), 668 ExpectedAnswers[Index]) 669 << "Incorrect answer at instruction " << Index << " = " << I; 670 Index++; 671 } 672 } 673 674 TEST_F(ValueTrackingTest, ComputeNumSignBits_PR32045) { 675 parseAssembly( 676 "define i32 @test(i32 %a) {\n" 677 " %A = ashr i32 %a, -1\n" 678 " ret i32 %A\n" 679 "}\n"); 680 EXPECT_EQ(ComputeNumSignBits(A, M->getDataLayout()), 1u); 681 } 682 683 // No guarantees for canonical IR in this analysis, so this just bails out. 684 TEST_F(ValueTrackingTest, ComputeNumSignBits_Shuffle) { 685 parseAssembly( 686 "define <2 x i32> @test() {\n" 687 " %A = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 0, i32 0>\n" 688 " ret <2 x i32> %A\n" 689 "}\n"); 690 EXPECT_EQ(ComputeNumSignBits(A, M->getDataLayout()), 1u); 691 } 692 693 // No guarantees for canonical IR in this analysis, so a shuffle element that 694 // references an undef value means this can't return any extra information. 695 TEST_F(ValueTrackingTest, ComputeNumSignBits_Shuffle2) { 696 parseAssembly( 697 "define <2 x i32> @test(<2 x i1> %x) {\n" 698 " %sext = sext <2 x i1> %x to <2 x i32>\n" 699 " %A = shufflevector <2 x i32> %sext, <2 x i32> undef, <2 x i32> <i32 0, i32 2>\n" 700 " ret <2 x i32> %A\n" 701 "}\n"); 702 EXPECT_EQ(ComputeNumSignBits(A, M->getDataLayout()), 1u); 703 } 704 705 TEST_F(ValueTrackingTest, impliesPoisonTest_Identity) { 706 parseAssembly("define void @test(i32 %x, i32 %y) {\n" 707 " %A = add i32 %x, %y\n" 708 " ret void\n" 709 "}"); 710 EXPECT_TRUE(impliesPoison(A, A)); 711 } 712 713 TEST_F(ValueTrackingTest, impliesPoisonTest_ICmp) { 714 parseAssembly("define void @test(i32 %x) {\n" 715 " %A2 = icmp eq i32 %x, 0\n" 716 " %A = icmp eq i32 %x, 1\n" 717 " ret void\n" 718 "}"); 719 EXPECT_TRUE(impliesPoison(A2, A)); 720 } 721 722 TEST_F(ValueTrackingTest, impliesPoisonTest_ICmpUnknown) { 723 parseAssembly("define void @test(i32 %x, i32 %y) {\n" 724 " %A2 = icmp eq i32 %x, %y\n" 725 " %A = icmp eq i32 %x, 1\n" 726 " ret void\n" 727 "}"); 728 EXPECT_FALSE(impliesPoison(A2, A)); 729 } 730 731 TEST_F(ValueTrackingTest, impliesPoisonTest_AddNswOkay) { 732 parseAssembly("define void @test(i32 %x) {\n" 733 " %A2 = add nsw i32 %x, 1\n" 734 " %A = add i32 %A2, 1\n" 735 " ret void\n" 736 "}"); 737 EXPECT_TRUE(impliesPoison(A2, A)); 738 } 739 740 TEST_F(ValueTrackingTest, impliesPoisonTest_AddNswOkay2) { 741 parseAssembly("define void @test(i32 %x) {\n" 742 " %A2 = add i32 %x, 1\n" 743 " %A = add nsw i32 %A2, 1\n" 744 " ret void\n" 745 "}"); 746 EXPECT_TRUE(impliesPoison(A2, A)); 747 } 748 749 TEST_F(ValueTrackingTest, impliesPoisonTest_AddNsw) { 750 parseAssembly("define void @test(i32 %x) {\n" 751 " %A2 = add nsw i32 %x, 1\n" 752 " %A = add i32 %x, 1\n" 753 " ret void\n" 754 "}"); 755 EXPECT_FALSE(impliesPoison(A2, A)); 756 } 757 758 TEST_F(ValueTrackingTest, impliesPoisonTest_Cmp) { 759 parseAssembly("define void @test(i32 %x, i32 %y, i1 %c) {\n" 760 " %A2 = icmp eq i32 %x, %y\n" 761 " %A0 = icmp ult i32 %x, %y\n" 762 " %A = or i1 %A0, %c\n" 763 " ret void\n" 764 "}"); 765 EXPECT_TRUE(impliesPoison(A2, A)); 766 } 767 768 TEST_F(ValueTrackingTest, impliesPoisonTest_FCmpFMF) { 769 parseAssembly("define void @test(float %x, float %y, i1 %c) {\n" 770 " %A2 = fcmp nnan oeq float %x, %y\n" 771 " %A0 = fcmp olt float %x, %y\n" 772 " %A = or i1 %A0, %c\n" 773 " ret void\n" 774 "}"); 775 EXPECT_FALSE(impliesPoison(A2, A)); 776 } 777 778 TEST_F(ValueTrackingTest, impliesPoisonTest_AddSubSameOps) { 779 parseAssembly("define void @test(i32 %x, i32 %y, i1 %c) {\n" 780 " %A2 = add i32 %x, %y\n" 781 " %A = sub i32 %x, %y\n" 782 " ret void\n" 783 "}"); 784 EXPECT_TRUE(impliesPoison(A2, A)); 785 } 786 787 TEST_F(ValueTrackingTest, impliesPoisonTest_MaskCmp) { 788 parseAssembly("define void @test(i32 %x, i32 %y, i1 %c) {\n" 789 " %M2 = and i32 %x, 7\n" 790 " %A2 = icmp eq i32 %M2, 1\n" 791 " %M = and i32 %x, 15\n" 792 " %A = icmp eq i32 %M, 3\n" 793 " ret void\n" 794 "}"); 795 EXPECT_TRUE(impliesPoison(A2, A)); 796 } 797 798 TEST_F(ValueTrackingTest, ComputeNumSignBits_Shuffle_Pointers) { 799 parseAssembly( 800 "define <2 x i32*> @test(<2 x i32*> %x) {\n" 801 " %A = shufflevector <2 x i32*> zeroinitializer, <2 x i32*> undef, <2 x i32> zeroinitializer\n" 802 " ret <2 x i32*> %A\n" 803 "}\n"); 804 EXPECT_EQ(ComputeNumSignBits(A, M->getDataLayout()), 64u); 805 } 806 807 TEST(ValueTracking, propagatesPoison) { 808 std::string AsmHead = 809 "declare i32 @g(i32)\n" 810 "declare {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)\n" 811 "declare {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)\n" 812 "declare {i32, i1} @llvm.smul.with.overflow.i32(i32 %a, i32 %b)\n" 813 "declare {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)\n" 814 "declare {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)\n" 815 "declare {i32, i1} @llvm.umul.with.overflow.i32(i32 %a, i32 %b)\n" 816 "define void @f(i32 %x, i32 %y, float %fx, float %fy, " 817 "i1 %cond, i8* %p) {\n"; 818 std::string AsmTail = " ret void\n}"; 819 // (propagates poison?, IR instruction) 820 SmallVector<std::pair<bool, std::string>, 32> Data = { 821 {true, "add i32 %x, %y"}, 822 {true, "add nsw nuw i32 %x, %y"}, 823 {true, "ashr i32 %x, %y"}, 824 {true, "lshr exact i32 %x, 31"}, 825 {true, "fcmp oeq float %fx, %fy"}, 826 {true, "icmp eq i32 %x, %y"}, 827 {true, "getelementptr i8, i8* %p, i32 %x"}, 828 {true, "getelementptr inbounds i8, i8* %p, i32 %x"}, 829 {true, "bitcast float %fx to i32"}, 830 {false, "select i1 %cond, i32 %x, i32 %y"}, 831 {false, "freeze i32 %x"}, 832 {true, "udiv i32 %x, %y"}, 833 {true, "urem i32 %x, %y"}, 834 {true, "sdiv exact i32 %x, %y"}, 835 {true, "srem i32 %x, %y"}, 836 {false, "call i32 @g(i32 %x)"}, 837 {true, "call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %x, i32 %y)"}, 838 {true, "call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %x, i32 %y)"}, 839 {true, "call {i32, i1} @llvm.smul.with.overflow.i32(i32 %x, i32 %y)"}, 840 {true, "call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)"}, 841 {true, "call {i32, i1} @llvm.usub.with.overflow.i32(i32 %x, i32 %y)"}, 842 {true, "call {i32, i1} @llvm.umul.with.overflow.i32(i32 %x, i32 %y)"}}; 843 844 std::string AssemblyStr = AsmHead; 845 for (auto &Itm : Data) 846 AssemblyStr += Itm.second + "\n"; 847 AssemblyStr += AsmTail; 848 849 LLVMContext Context; 850 SMDiagnostic Error; 851 auto M = parseAssemblyString(AssemblyStr, Error, Context); 852 assert(M && "Bad assembly?"); 853 854 auto *F = M->getFunction("f"); 855 assert(F && "Bad assembly?"); 856 857 auto &BB = F->getEntryBlock(); 858 859 int Index = 0; 860 for (auto &I : BB) { 861 if (isa<ReturnInst>(&I)) 862 break; 863 EXPECT_EQ(propagatesPoison(cast<Operator>(&I)), Data[Index].first) 864 << "Incorrect answer at instruction " << Index << " = " << I; 865 Index++; 866 } 867 } 868 869 TEST_F(ValueTrackingTest, programUndefinedIfPoison) { 870 parseAssembly("declare i32 @any_num()" 871 "define void @test(i32 %mask) {\n" 872 " %A = call i32 @any_num()\n" 873 " %B = or i32 %A, %mask\n" 874 " udiv i32 1, %B" 875 " ret void\n" 876 "}\n"); 877 // If %A was poison, udiv raises UB regardless of %mask's value 878 EXPECT_EQ(programUndefinedIfPoison(A), true); 879 } 880 881 TEST_F(ValueTrackingTest, programUndefinedIfUndefOrPoison) { 882 parseAssembly("declare i32 @any_num()" 883 "define void @test(i32 %mask) {\n" 884 " %A = call i32 @any_num()\n" 885 " %B = or i32 %A, %mask\n" 886 " udiv i32 1, %B" 887 " ret void\n" 888 "}\n"); 889 // If %A was undef and %mask was 1, udiv does not raise UB 890 EXPECT_EQ(programUndefinedIfUndefOrPoison(A), false); 891 } 892 893 TEST_F(ValueTrackingTest, isGuaranteedNotToBePoison_exploitBranchCond) { 894 parseAssembly("declare i1 @any_bool()" 895 "define void @test(i1 %y) {\n" 896 " %A = call i1 @any_bool()\n" 897 " %cond = and i1 %A, %y\n" 898 " br i1 %cond, label %BB1, label %BB2\n" 899 "BB1:\n" 900 " ret void\n" 901 "BB2:\n" 902 " ret void\n" 903 "}\n"); 904 DominatorTree DT(*F); 905 for (auto &BB : *F) { 906 if (&BB == &F->getEntryBlock()) 907 continue; 908 909 EXPECT_EQ(isGuaranteedNotToBePoison(A, nullptr, BB.getTerminator(), &DT), 910 true) 911 << "isGuaranteedNotToBePoison does not hold at " << *BB.getTerminator(); 912 } 913 } 914 915 TEST_F(ValueTrackingTest, isGuaranteedNotToBePoison_phi) { 916 parseAssembly("declare i32 @any_i32(i32)" 917 "define void @test() {\n" 918 "ENTRY:\n" 919 " br label %LOOP\n" 920 "LOOP:\n" 921 " %A = phi i32 [0, %ENTRY], [%A.next, %NEXT]\n" 922 " %A.next = call i32 @any_i32(i32 %A)\n" 923 " %cond = icmp eq i32 %A.next, 0\n" 924 " br i1 %cond, label %NEXT, label %EXIT\n" 925 "NEXT:\n" 926 " br label %LOOP\n" 927 "EXIT:\n" 928 " ret void\n" 929 "}\n"); 930 DominatorTree DT(*F); 931 for (auto &BB : *F) { 932 if (BB.getName() == "LOOP") { 933 EXPECT_EQ(isGuaranteedNotToBePoison(A, nullptr, A, &DT), true) 934 << "isGuaranteedNotToBePoison does not hold"; 935 } 936 } 937 } 938 939 TEST_F(ValueTrackingTest, isGuaranteedNotToBeUndefOrPoison) { 940 parseAssembly("declare void @f(i32 noundef)" 941 "define void @test(i32 %x) {\n" 942 " %A = bitcast i32 %x to i32\n" 943 " call void @f(i32 noundef %x)\n" 944 " ret void\n" 945 "}\n"); 946 EXPECT_EQ(isGuaranteedNotToBeUndefOrPoison(A), true); 947 EXPECT_EQ(isGuaranteedNotToBeUndefOrPoison(UndefValue::get(IntegerType::get(Context, 8))), false); 948 EXPECT_EQ(isGuaranteedNotToBeUndefOrPoison(PoisonValue::get(IntegerType::get(Context, 8))), false); 949 EXPECT_EQ(isGuaranteedNotToBePoison(UndefValue::get(IntegerType::get(Context, 8))), true); 950 EXPECT_EQ(isGuaranteedNotToBePoison(PoisonValue::get(IntegerType::get(Context, 8))), false); 951 952 Type *Int32Ty = Type::getInt32Ty(Context); 953 Constant *CU = UndefValue::get(Int32Ty); 954 Constant *CP = PoisonValue::get(Int32Ty); 955 Constant *C1 = ConstantInt::get(Int32Ty, 1); 956 Constant *C2 = ConstantInt::get(Int32Ty, 2); 957 958 { 959 Constant *V1 = ConstantVector::get({C1, C2}); 960 EXPECT_TRUE(isGuaranteedNotToBeUndefOrPoison(V1)); 961 EXPECT_TRUE(isGuaranteedNotToBePoison(V1)); 962 } 963 964 { 965 Constant *V2 = ConstantVector::get({C1, CU}); 966 EXPECT_FALSE(isGuaranteedNotToBeUndefOrPoison(V2)); 967 EXPECT_TRUE(isGuaranteedNotToBePoison(V2)); 968 } 969 970 { 971 Constant *V3 = ConstantVector::get({C1, CP}); 972 EXPECT_FALSE(isGuaranteedNotToBeUndefOrPoison(V3)); 973 EXPECT_FALSE(isGuaranteedNotToBePoison(V3)); 974 } 975 } 976 977 TEST_F(ValueTrackingTest, isGuaranteedNotToBeUndefOrPoison_assume) { 978 parseAssembly("declare i1 @f_i1()\n" 979 "declare i32 @f_i32()\n" 980 "declare void @llvm.assume(i1)\n" 981 "define void @test() {\n" 982 " %A = call i32 @f_i32()\n" 983 " %cond = call i1 @f_i1()\n" 984 " %CxtI = add i32 0, 0\n" 985 " br i1 %cond, label %BB1, label %EXIT\n" 986 "BB1:\n" 987 " %CxtI2 = add i32 0, 0\n" 988 " %cond2 = call i1 @f_i1()\n" 989 " call void @llvm.assume(i1 true) [ \"noundef\"(i32 %A) ]\n" 990 " br i1 %cond2, label %BB2, label %EXIT\n" 991 "BB2:\n" 992 " %CxtI3 = add i32 0, 0\n" 993 " ret void\n" 994 "EXIT:\n" 995 " ret void\n" 996 "}"); 997 AssumptionCache AC(*F); 998 DominatorTree DT(*F); 999 EXPECT_FALSE(isGuaranteedNotToBeUndefOrPoison(A, &AC, CxtI, &DT)); 1000 EXPECT_FALSE(isGuaranteedNotToBeUndefOrPoison(A, &AC, CxtI2, &DT)); 1001 EXPECT_TRUE(isGuaranteedNotToBeUndefOrPoison(A, &AC, CxtI3, &DT)); 1002 } 1003 1004 TEST(ValueTracking, canCreatePoisonOrUndef) { 1005 std::string AsmHead = 1006 "@s = external dso_local global i32, align 1\n" 1007 "declare i32 @g(i32)\n" 1008 "declare {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)\n" 1009 "declare {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)\n" 1010 "declare {i32, i1} @llvm.smul.with.overflow.i32(i32 %a, i32 %b)\n" 1011 "declare {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)\n" 1012 "declare {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)\n" 1013 "declare {i32, i1} @llvm.umul.with.overflow.i32(i32 %a, i32 %b)\n" 1014 "define void @f(i32 %x, i32 %y, float %fx, float %fy, i1 %cond, " 1015 "<4 x i32> %vx, <4 x i32> %vx2, <vscale x 4 x i32> %svx, i8* %p) {\n"; 1016 std::string AsmTail = " ret void\n}"; 1017 // (can create poison?, can create undef?, IR instruction) 1018 SmallVector<std::pair<std::pair<bool, bool>, std::string>, 32> Data = { 1019 {{false, false}, "add i32 %x, %y"}, 1020 {{true, false}, "add nsw nuw i32 %x, %y"}, 1021 {{true, false}, "shl i32 %x, %y"}, 1022 {{true, false}, "shl <4 x i32> %vx, %vx2"}, 1023 {{true, false}, "shl nsw i32 %x, %y"}, 1024 {{true, false}, "shl nsw <4 x i32> %vx, <i32 0, i32 1, i32 2, i32 3>"}, 1025 {{false, false}, "shl i32 %x, 31"}, 1026 {{true, false}, "shl i32 %x, 32"}, 1027 {{false, false}, "shl <4 x i32> %vx, <i32 0, i32 1, i32 2, i32 3>"}, 1028 {{true, false}, "shl <4 x i32> %vx, <i32 0, i32 1, i32 2, i32 32>"}, 1029 {{true, false}, "ashr i32 %x, %y"}, 1030 {{true, false}, "ashr exact i32 %x, %y"}, 1031 {{false, false}, "ashr i32 %x, 31"}, 1032 {{true, false}, "ashr exact i32 %x, 31"}, 1033 {{false, false}, "ashr <4 x i32> %vx, <i32 0, i32 1, i32 2, i32 3>"}, 1034 {{true, false}, "ashr <4 x i32> %vx, <i32 0, i32 1, i32 2, i32 32>"}, 1035 {{true, false}, "ashr exact <4 x i32> %vx, <i32 0, i32 1, i32 2, i32 3>"}, 1036 {{true, false}, "lshr i32 %x, %y"}, 1037 {{true, false}, "lshr exact i32 %x, 31"}, 1038 {{false, false}, "udiv i32 %x, %y"}, 1039 {{true, false}, "udiv exact i32 %x, %y"}, 1040 {{false, false}, "getelementptr i8, i8* %p, i32 %x"}, 1041 {{true, false}, "getelementptr inbounds i8, i8* %p, i32 %x"}, 1042 {{true, false}, "fneg nnan float %fx"}, 1043 {{false, false}, "fneg float %fx"}, 1044 {{false, false}, "fadd float %fx, %fy"}, 1045 {{true, false}, "fadd nnan float %fx, %fy"}, 1046 {{false, false}, "urem i32 %x, %y"}, 1047 {{true, false}, "fptoui float %fx to i32"}, 1048 {{true, false}, "fptosi float %fx to i32"}, 1049 {{false, false}, "bitcast float %fx to i32"}, 1050 {{false, false}, "select i1 %cond, i32 %x, i32 %y"}, 1051 {{true, false}, "select nnan i1 %cond, float %fx, float %fy"}, 1052 {{true, false}, "extractelement <4 x i32> %vx, i32 %x"}, 1053 {{false, false}, "extractelement <4 x i32> %vx, i32 3"}, 1054 {{true, false}, "extractelement <vscale x 4 x i32> %svx, i32 4"}, 1055 {{true, false}, "insertelement <4 x i32> %vx, i32 %x, i32 %y"}, 1056 {{false, false}, "insertelement <4 x i32> %vx, i32 %x, i32 3"}, 1057 {{true, false}, "insertelement <vscale x 4 x i32> %svx, i32 %x, i32 4"}, 1058 {{false, false}, "freeze i32 %x"}, 1059 {{false, false}, 1060 "shufflevector <4 x i32> %vx, <4 x i32> %vx2, " 1061 "<4 x i32> <i32 0, i32 1, i32 2, i32 3>"}, 1062 {{false, true}, 1063 "shufflevector <4 x i32> %vx, <4 x i32> %vx2, " 1064 "<4 x i32> <i32 0, i32 1, i32 2, i32 undef>"}, 1065 {{false, true}, 1066 "shufflevector <vscale x 4 x i32> %svx, " 1067 "<vscale x 4 x i32> %svx, <vscale x 4 x i32> undef"}, 1068 {{true, false}, "call i32 @g(i32 %x)"}, 1069 {{false, false}, "call noundef i32 @g(i32 %x)"}, 1070 {{true, false}, "fcmp nnan oeq float %fx, %fy"}, 1071 {{false, false}, "fcmp oeq float %fx, %fy"}, 1072 {{true, false}, 1073 "ashr <4 x i32> %vx, select (i1 icmp sgt (i32 ptrtoint (i32* @s to " 1074 "i32), i32 1), <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 " 1075 "2, i32 3>)"}, 1076 {{false, false}, 1077 "call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %x, i32 %y)"}, 1078 {{false, false}, 1079 "call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %x, i32 %y)"}, 1080 {{false, false}, 1081 "call {i32, i1} @llvm.smul.with.overflow.i32(i32 %x, i32 %y)"}, 1082 {{false, false}, 1083 "call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)"}, 1084 {{false, false}, 1085 "call {i32, i1} @llvm.usub.with.overflow.i32(i32 %x, i32 %y)"}, 1086 {{false, false}, 1087 "call {i32, i1} @llvm.umul.with.overflow.i32(i32 %x, i32 %y)"}}; 1088 1089 std::string AssemblyStr = AsmHead; 1090 for (auto &Itm : Data) 1091 AssemblyStr += Itm.second + "\n"; 1092 AssemblyStr += AsmTail; 1093 1094 LLVMContext Context; 1095 SMDiagnostic Error; 1096 auto M = parseAssemblyString(AssemblyStr, Error, Context); 1097 assert(M && "Bad assembly?"); 1098 1099 auto *F = M->getFunction("f"); 1100 assert(F && "Bad assembly?"); 1101 1102 auto &BB = F->getEntryBlock(); 1103 1104 int Index = 0; 1105 for (auto &I : BB) { 1106 if (isa<ReturnInst>(&I)) 1107 break; 1108 bool Poison = Data[Index].first.first; 1109 bool Undef = Data[Index].first.second; 1110 EXPECT_EQ(canCreatePoison(cast<Operator>(&I)), Poison) 1111 << "Incorrect answer of canCreatePoison at instruction " << Index 1112 << " = " << I; 1113 EXPECT_EQ(canCreateUndefOrPoison(cast<Operator>(&I)), Undef || Poison) 1114 << "Incorrect answer of canCreateUndef at instruction " << Index 1115 << " = " << I; 1116 Index++; 1117 } 1118 } 1119 1120 TEST_F(ValueTrackingTest, computePtrAlignment) { 1121 parseAssembly("declare i1 @f_i1()\n" 1122 "declare i8* @f_i8p()\n" 1123 "declare void @llvm.assume(i1)\n" 1124 "define void @test() {\n" 1125 " %A = call i8* @f_i8p()\n" 1126 " %cond = call i1 @f_i1()\n" 1127 " %CxtI = add i32 0, 0\n" 1128 " br i1 %cond, label %BB1, label %EXIT\n" 1129 "BB1:\n" 1130 " %CxtI2 = add i32 0, 0\n" 1131 " %cond2 = call i1 @f_i1()\n" 1132 " call void @llvm.assume(i1 true) [ \"align\"(i8* %A, i64 16) ]\n" 1133 " br i1 %cond2, label %BB2, label %EXIT\n" 1134 "BB2:\n" 1135 " %CxtI3 = add i32 0, 0\n" 1136 " ret void\n" 1137 "EXIT:\n" 1138 " ret void\n" 1139 "}"); 1140 AssumptionCache AC(*F); 1141 DominatorTree DT(*F); 1142 DataLayout DL = M->getDataLayout(); 1143 EXPECT_EQ(getKnownAlignment(A, DL, CxtI, &AC, &DT), Align(1)); 1144 EXPECT_EQ(getKnownAlignment(A, DL, CxtI2, &AC, &DT), Align(1)); 1145 EXPECT_EQ(getKnownAlignment(A, DL, CxtI3, &AC, &DT), Align(16)); 1146 } 1147 1148 TEST_F(ComputeKnownBitsTest, ComputeKnownBits) { 1149 parseAssembly( 1150 "define i32 @test(i32 %a, i32 %b) {\n" 1151 " %ash = mul i32 %a, 8\n" 1152 " %aad = add i32 %ash, 7\n" 1153 " %aan = and i32 %aad, 4095\n" 1154 " %bsh = shl i32 %b, 4\n" 1155 " %bad = or i32 %bsh, 6\n" 1156 " %ban = and i32 %bad, 4095\n" 1157 " %A = mul i32 %aan, %ban\n" 1158 " ret i32 %A\n" 1159 "}\n"); 1160 expectKnownBits(/*zero*/ 4278190085u, /*one*/ 10u); 1161 } 1162 1163 TEST_F(ComputeKnownBitsTest, ComputeKnownMulBits) { 1164 parseAssembly( 1165 "define i32 @test(i32 %a, i32 %b) {\n" 1166 " %aa = shl i32 %a, 5\n" 1167 " %bb = shl i32 %b, 5\n" 1168 " %aaa = or i32 %aa, 24\n" 1169 " %bbb = or i32 %bb, 28\n" 1170 " %A = mul i32 %aaa, %bbb\n" 1171 " ret i32 %A\n" 1172 "}\n"); 1173 expectKnownBits(/*zero*/ 95u, /*one*/ 32u); 1174 } 1175 1176 TEST_F(ValueTrackingTest, KnownNonZeroFromDomCond) { 1177 parseAssembly(R"( 1178 declare i8* @f_i8() 1179 define void @test(i1 %c) { 1180 %A = call i8* @f_i8() 1181 %B = call i8* @f_i8() 1182 %c1 = icmp ne i8* %A, null 1183 %cond = and i1 %c1, %c 1184 br i1 %cond, label %T, label %Q 1185 T: 1186 %CxtI = add i32 0, 0 1187 ret void 1188 Q: 1189 %CxtI2 = add i32 0, 0 1190 ret void 1191 } 1192 )"); 1193 AssumptionCache AC(*F); 1194 DominatorTree DT(*F); 1195 DataLayout DL = M->getDataLayout(); 1196 EXPECT_EQ(isKnownNonZero(A, DL, 0, &AC, CxtI, &DT), true); 1197 EXPECT_EQ(isKnownNonZero(A, DL, 0, &AC, CxtI2, &DT), false); 1198 } 1199 1200 TEST_F(ValueTrackingTest, KnownNonZeroFromDomCond2) { 1201 parseAssembly(R"( 1202 declare i8* @f_i8() 1203 define void @test(i1 %c) { 1204 %A = call i8* @f_i8() 1205 %B = call i8* @f_i8() 1206 %c1 = icmp ne i8* %A, null 1207 %cond = select i1 %c, i1 %c1, i1 false 1208 br i1 %cond, label %T, label %Q 1209 T: 1210 %CxtI = add i32 0, 0 1211 ret void 1212 Q: 1213 %CxtI2 = add i32 0, 0 1214 ret void 1215 } 1216 )"); 1217 AssumptionCache AC(*F); 1218 DominatorTree DT(*F); 1219 DataLayout DL = M->getDataLayout(); 1220 EXPECT_EQ(isKnownNonZero(A, DL, 0, &AC, CxtI, &DT), true); 1221 EXPECT_EQ(isKnownNonZero(A, DL, 0, &AC, CxtI2, &DT), false); 1222 } 1223 1224 TEST_F(ValueTrackingTest, IsImpliedConditionAnd) { 1225 parseAssembly(R"( 1226 define void @test(i32 %x, i32 %y) { 1227 %c1 = icmp ult i32 %x, 10 1228 %c2 = icmp ult i32 %y, 15 1229 %A = and i1 %c1, %c2 1230 ; x < 10 /\ y < 15 1231 %A2 = icmp ult i32 %x, 20 1232 %A3 = icmp uge i32 %y, 20 1233 %A4 = icmp ult i32 %x, 5 1234 ret void 1235 } 1236 )"); 1237 DataLayout DL = M->getDataLayout(); 1238 EXPECT_EQ(isImpliedCondition(A, A2, DL), true); 1239 EXPECT_EQ(isImpliedCondition(A, A3, DL), false); 1240 EXPECT_EQ(isImpliedCondition(A, A4, DL), None); 1241 } 1242 1243 TEST_F(ValueTrackingTest, IsImpliedConditionAnd2) { 1244 parseAssembly(R"( 1245 define void @test(i32 %x, i32 %y) { 1246 %c1 = icmp ult i32 %x, 10 1247 %c2 = icmp ult i32 %y, 15 1248 %A = select i1 %c1, i1 %c2, i1 false 1249 ; x < 10 /\ y < 15 1250 %A2 = icmp ult i32 %x, 20 1251 %A3 = icmp uge i32 %y, 20 1252 %A4 = icmp ult i32 %x, 5 1253 ret void 1254 } 1255 )"); 1256 DataLayout DL = M->getDataLayout(); 1257 EXPECT_EQ(isImpliedCondition(A, A2, DL), true); 1258 EXPECT_EQ(isImpliedCondition(A, A3, DL), false); 1259 EXPECT_EQ(isImpliedCondition(A, A4, DL), None); 1260 } 1261 1262 TEST_F(ValueTrackingTest, IsImpliedConditionOr) { 1263 parseAssembly(R"( 1264 define void @test(i32 %x, i32 %y) { 1265 %c1 = icmp ult i32 %x, 10 1266 %c2 = icmp ult i32 %y, 15 1267 %A = or i1 %c1, %c2 ; negated 1268 ; x >= 10 /\ y >= 15 1269 %A2 = icmp ult i32 %x, 5 1270 %A3 = icmp uge i32 %y, 10 1271 %A4 = icmp ult i32 %x, 15 1272 ret void 1273 } 1274 )"); 1275 DataLayout DL = M->getDataLayout(); 1276 EXPECT_EQ(isImpliedCondition(A, A2, DL, false), false); 1277 EXPECT_EQ(isImpliedCondition(A, A3, DL, false), true); 1278 EXPECT_EQ(isImpliedCondition(A, A4, DL, false), None); 1279 } 1280 1281 TEST_F(ValueTrackingTest, IsImpliedConditionOr2) { 1282 parseAssembly(R"( 1283 define void @test(i32 %x, i32 %y) { 1284 %c1 = icmp ult i32 %x, 10 1285 %c2 = icmp ult i32 %y, 15 1286 %A = select i1 %c1, i1 true, i1 %c2 ; negated 1287 ; x >= 10 /\ y >= 15 1288 %A2 = icmp ult i32 %x, 5 1289 %A3 = icmp uge i32 %y, 10 1290 %A4 = icmp ult i32 %x, 15 1291 ret void 1292 } 1293 )"); 1294 DataLayout DL = M->getDataLayout(); 1295 EXPECT_EQ(isImpliedCondition(A, A2, DL, false), false); 1296 EXPECT_EQ(isImpliedCondition(A, A3, DL, false), true); 1297 EXPECT_EQ(isImpliedCondition(A, A4, DL, false), None); 1298 } 1299 1300 TEST_F(ComputeKnownBitsTest, KnownNonZeroShift) { 1301 // %q is known nonzero without known bits. 1302 // Because %q is nonzero, %A[0] is known to be zero. 1303 parseAssembly( 1304 "define i8 @test(i8 %p, i8* %pq) {\n" 1305 " %q = load i8, i8* %pq, !range !0\n" 1306 " %A = shl i8 %p, %q\n" 1307 " ret i8 %A\n" 1308 "}\n" 1309 "!0 = !{ i8 1, i8 5 }\n"); 1310 expectKnownBits(/*zero*/ 1u, /*one*/ 0u); 1311 } 1312 1313 TEST_F(ComputeKnownBitsTest, ComputeKnownFshl) { 1314 // fshl(....1111....0000, 00..1111........, 6) 1315 // = 11....000000..11 1316 parseAssembly( 1317 "define i16 @test(i16 %a, i16 %b) {\n" 1318 " %aa = shl i16 %a, 4\n" 1319 " %bb = lshr i16 %b, 2\n" 1320 " %aaa = or i16 %aa, 3840\n" 1321 " %bbb = or i16 %bb, 3840\n" 1322 " %A = call i16 @llvm.fshl.i16(i16 %aaa, i16 %bbb, i16 6)\n" 1323 " ret i16 %A\n" 1324 "}\n" 1325 "declare i16 @llvm.fshl.i16(i16, i16, i16)\n"); 1326 expectKnownBits(/*zero*/ 1008u, /*one*/ 49155u); 1327 } 1328 1329 TEST_F(ComputeKnownBitsTest, ComputeKnownFshr) { 1330 // fshr(....1111....0000, 00..1111........, 26) 1331 // = 11....000000..11 1332 parseAssembly( 1333 "define i16 @test(i16 %a, i16 %b) {\n" 1334 " %aa = shl i16 %a, 4\n" 1335 " %bb = lshr i16 %b, 2\n" 1336 " %aaa = or i16 %aa, 3840\n" 1337 " %bbb = or i16 %bb, 3840\n" 1338 " %A = call i16 @llvm.fshr.i16(i16 %aaa, i16 %bbb, i16 26)\n" 1339 " ret i16 %A\n" 1340 "}\n" 1341 "declare i16 @llvm.fshr.i16(i16, i16, i16)\n"); 1342 expectKnownBits(/*zero*/ 1008u, /*one*/ 49155u); 1343 } 1344 1345 TEST_F(ComputeKnownBitsTest, ComputeKnownFshlZero) { 1346 // fshl(....1111....0000, 00..1111........, 0) 1347 // = ....1111....0000 1348 parseAssembly( 1349 "define i16 @test(i16 %a, i16 %b) {\n" 1350 " %aa = shl i16 %a, 4\n" 1351 " %bb = lshr i16 %b, 2\n" 1352 " %aaa = or i16 %aa, 3840\n" 1353 " %bbb = or i16 %bb, 3840\n" 1354 " %A = call i16 @llvm.fshl.i16(i16 %aaa, i16 %bbb, i16 0)\n" 1355 " ret i16 %A\n" 1356 "}\n" 1357 "declare i16 @llvm.fshl.i16(i16, i16, i16)\n"); 1358 expectKnownBits(/*zero*/ 15u, /*one*/ 3840u); 1359 } 1360 1361 TEST_F(ComputeKnownBitsTest, ComputeKnownUAddSatLeadingOnes) { 1362 // uadd.sat(1111...1, ........) 1363 // = 1111.... 1364 parseAssembly( 1365 "define i8 @test(i8 %a, i8 %b) {\n" 1366 " %aa = or i8 %a, 241\n" 1367 " %A = call i8 @llvm.uadd.sat.i8(i8 %aa, i8 %b)\n" 1368 " ret i8 %A\n" 1369 "}\n" 1370 "declare i8 @llvm.uadd.sat.i8(i8, i8)\n"); 1371 expectKnownBits(/*zero*/ 0u, /*one*/ 240u); 1372 } 1373 1374 TEST_F(ComputeKnownBitsTest, ComputeKnownUAddSatOnesPreserved) { 1375 // uadd.sat(00...011, .1...110) 1376 // = .......1 1377 parseAssembly( 1378 "define i8 @test(i8 %a, i8 %b) {\n" 1379 " %aa = or i8 %a, 3\n" 1380 " %aaa = and i8 %aa, 59\n" 1381 " %bb = or i8 %b, 70\n" 1382 " %bbb = and i8 %bb, 254\n" 1383 " %A = call i8 @llvm.uadd.sat.i8(i8 %aaa, i8 %bbb)\n" 1384 " ret i8 %A\n" 1385 "}\n" 1386 "declare i8 @llvm.uadd.sat.i8(i8, i8)\n"); 1387 expectKnownBits(/*zero*/ 0u, /*one*/ 1u); 1388 } 1389 1390 TEST_F(ComputeKnownBitsTest, ComputeKnownUSubSatLHSLeadingZeros) { 1391 // usub.sat(0000...0, ........) 1392 // = 0000.... 1393 parseAssembly( 1394 "define i8 @test(i8 %a, i8 %b) {\n" 1395 " %aa = and i8 %a, 14\n" 1396 " %A = call i8 @llvm.usub.sat.i8(i8 %aa, i8 %b)\n" 1397 " ret i8 %A\n" 1398 "}\n" 1399 "declare i8 @llvm.usub.sat.i8(i8, i8)\n"); 1400 expectKnownBits(/*zero*/ 240u, /*one*/ 0u); 1401 } 1402 1403 TEST_F(ComputeKnownBitsTest, ComputeKnownUSubSatRHSLeadingOnes) { 1404 // usub.sat(........, 1111...1) 1405 // = 0000.... 1406 parseAssembly( 1407 "define i8 @test(i8 %a, i8 %b) {\n" 1408 " %bb = or i8 %a, 241\n" 1409 " %A = call i8 @llvm.usub.sat.i8(i8 %a, i8 %bb)\n" 1410 " ret i8 %A\n" 1411 "}\n" 1412 "declare i8 @llvm.usub.sat.i8(i8, i8)\n"); 1413 expectKnownBits(/*zero*/ 240u, /*one*/ 0u); 1414 } 1415 1416 TEST_F(ComputeKnownBitsTest, ComputeKnownUSubSatZerosPreserved) { 1417 // usub.sat(11...011, .1...110) 1418 // = ......0. 1419 parseAssembly( 1420 "define i8 @test(i8 %a, i8 %b) {\n" 1421 " %aa = or i8 %a, 195\n" 1422 " %aaa = and i8 %aa, 251\n" 1423 " %bb = or i8 %b, 70\n" 1424 " %bbb = and i8 %bb, 254\n" 1425 " %A = call i8 @llvm.usub.sat.i8(i8 %aaa, i8 %bbb)\n" 1426 " ret i8 %A\n" 1427 "}\n" 1428 "declare i8 @llvm.usub.sat.i8(i8, i8)\n"); 1429 expectKnownBits(/*zero*/ 2u, /*one*/ 0u); 1430 } 1431 1432 TEST_F(ComputeKnownBitsTest, ComputeKnownBitsPtrToIntTrunc) { 1433 // ptrtoint truncates the pointer type. 1434 parseAssembly( 1435 "define void @test(i8** %p) {\n" 1436 " %A = load i8*, i8** %p\n" 1437 " %i = ptrtoint i8* %A to i32\n" 1438 " %m = and i32 %i, 31\n" 1439 " %c = icmp eq i32 %m, 0\n" 1440 " call void @llvm.assume(i1 %c)\n" 1441 " ret void\n" 1442 "}\n" 1443 "declare void @llvm.assume(i1)\n"); 1444 AssumptionCache AC(*F); 1445 KnownBits Known = computeKnownBits( 1446 A, M->getDataLayout(), /* Depth */ 0, &AC, F->front().getTerminator()); 1447 EXPECT_EQ(Known.Zero.getZExtValue(), 31u); 1448 EXPECT_EQ(Known.One.getZExtValue(), 0u); 1449 } 1450 1451 TEST_F(ComputeKnownBitsTest, ComputeKnownBitsPtrToIntZext) { 1452 // ptrtoint zero extends the pointer type. 1453 parseAssembly( 1454 "define void @test(i8** %p) {\n" 1455 " %A = load i8*, i8** %p\n" 1456 " %i = ptrtoint i8* %A to i128\n" 1457 " %m = and i128 %i, 31\n" 1458 " %c = icmp eq i128 %m, 0\n" 1459 " call void @llvm.assume(i1 %c)\n" 1460 " ret void\n" 1461 "}\n" 1462 "declare void @llvm.assume(i1)\n"); 1463 AssumptionCache AC(*F); 1464 KnownBits Known = computeKnownBits( 1465 A, M->getDataLayout(), /* Depth */ 0, &AC, F->front().getTerminator()); 1466 EXPECT_EQ(Known.Zero.getZExtValue(), 31u); 1467 EXPECT_EQ(Known.One.getZExtValue(), 0u); 1468 } 1469 1470 TEST_F(ComputeKnownBitsTest, ComputeKnownBitsFreeze) { 1471 parseAssembly("define void @test() {\n" 1472 " %m = call i32 @any_num()\n" 1473 " %A = freeze i32 %m\n" 1474 " %n = and i32 %m, 31\n" 1475 " %c = icmp eq i32 %n, 0\n" 1476 " call void @llvm.assume(i1 %c)\n" 1477 " ret void\n" 1478 "}\n" 1479 "declare void @llvm.assume(i1)\n" 1480 "declare i32 @any_num()\n"); 1481 AssumptionCache AC(*F); 1482 KnownBits Known = computeKnownBits(A, M->getDataLayout(), /* Depth */ 0, &AC, 1483 F->front().getTerminator()); 1484 EXPECT_EQ(Known.Zero.getZExtValue(), 31u); 1485 EXPECT_EQ(Known.One.getZExtValue(), 0u); 1486 } 1487 1488 TEST_F(ComputeKnownBitsTest, ComputeKnownBitsAddWithRange) { 1489 parseAssembly("define void @test(i64* %p) {\n" 1490 " %A = load i64, i64* %p, !range !{i64 64, i64 65536}\n" 1491 " %APlus512 = add i64 %A, 512\n" 1492 " %c = icmp ugt i64 %APlus512, 523\n" 1493 " call void @llvm.assume(i1 %c)\n" 1494 " ret void\n" 1495 "}\n" 1496 "declare void @llvm.assume(i1)\n"); 1497 AssumptionCache AC(*F); 1498 KnownBits Known = computeKnownBits(A, M->getDataLayout(), /* Depth */ 0, &AC, 1499 F->front().getTerminator()); 1500 EXPECT_EQ(Known.Zero.getZExtValue(), ~(65536llu - 1)); 1501 EXPECT_EQ(Known.One.getZExtValue(), 0u); 1502 Instruction &APlus512 = findInstructionByName(F, "APlus512"); 1503 Known = computeKnownBits(&APlus512, M->getDataLayout(), /* Depth */ 0, &AC, 1504 F->front().getTerminator()); 1505 // We know of one less zero because 512 may have produced a 1 that 1506 // got carried all the way to the first trailing zero. 1507 EXPECT_EQ(Known.Zero.getZExtValue(), (~(65536llu - 1)) << 1); 1508 EXPECT_EQ(Known.One.getZExtValue(), 0u); 1509 // The known range is not precise given computeKnownBits works 1510 // with the masks of zeros and ones, not the ranges. 1511 EXPECT_EQ(Known.getMinValue(), 0u); 1512 EXPECT_EQ(Known.getMaxValue(), 131071); 1513 } 1514 1515 // 512 + [32, 64) doesn't produce overlapping bits. 1516 // Make sure we get all the individual bits properly. 1517 TEST_F(ComputeKnownBitsTest, ComputeKnownBitsAddWithRangeNoOverlap) { 1518 parseAssembly("define void @test(i64* %p) {\n" 1519 " %A = load i64, i64* %p, !range !{i64 32, i64 64}\n" 1520 " %APlus512 = add i64 %A, 512\n" 1521 " %c = icmp ugt i64 %APlus512, 523\n" 1522 " call void @llvm.assume(i1 %c)\n" 1523 " ret void\n" 1524 "}\n" 1525 "declare void @llvm.assume(i1)\n"); 1526 AssumptionCache AC(*F); 1527 KnownBits Known = computeKnownBits(A, M->getDataLayout(), /* Depth */ 0, &AC, 1528 F->front().getTerminator()); 1529 EXPECT_EQ(Known.Zero.getZExtValue(), ~(64llu - 1)); 1530 EXPECT_EQ(Known.One.getZExtValue(), 32u); 1531 Instruction &APlus512 = findInstructionByName(F, "APlus512"); 1532 Known = computeKnownBits(&APlus512, M->getDataLayout(), /* Depth */ 0, &AC, 1533 F->front().getTerminator()); 1534 EXPECT_EQ(Known.Zero.getZExtValue(), ~512llu & ~(64llu - 1)); 1535 EXPECT_EQ(Known.One.getZExtValue(), 512u | 32u); 1536 // The known range is not precise given computeKnownBits works 1537 // with the masks of zeros and ones, not the ranges. 1538 EXPECT_EQ(Known.getMinValue(), 544); 1539 EXPECT_EQ(Known.getMaxValue(), 575); 1540 } 1541 1542 TEST_F(ComputeKnownBitsTest, ComputeKnownBitsGEPWithRange) { 1543 parseAssembly( 1544 "define void @test(i64* %p) {\n" 1545 " %A = load i64, i64* %p, !range !{i64 64, i64 65536}\n" 1546 " %APtr = inttoptr i64 %A to float*" 1547 " %APtrPlus512 = getelementptr float, float* %APtr, i32 128\n" 1548 " %c = icmp ugt float* %APtrPlus512, inttoptr (i32 523 to float*)\n" 1549 " call void @llvm.assume(i1 %c)\n" 1550 " ret void\n" 1551 "}\n" 1552 "declare void @llvm.assume(i1)\n"); 1553 AssumptionCache AC(*F); 1554 KnownBits Known = computeKnownBits(A, M->getDataLayout(), /* Depth */ 0, &AC, 1555 F->front().getTerminator()); 1556 EXPECT_EQ(Known.Zero.getZExtValue(), ~(65536llu - 1)); 1557 EXPECT_EQ(Known.One.getZExtValue(), 0u); 1558 Instruction &APtrPlus512 = findInstructionByName(F, "APtrPlus512"); 1559 Known = computeKnownBits(&APtrPlus512, M->getDataLayout(), /* Depth */ 0, &AC, 1560 F->front().getTerminator()); 1561 // We know of one less zero because 512 may have produced a 1 that 1562 // got carried all the way to the first trailing zero. 1563 EXPECT_EQ(Known.Zero.getZExtValue(), ~(65536llu - 1) << 1); 1564 EXPECT_EQ(Known.One.getZExtValue(), 0u); 1565 // The known range is not precise given computeKnownBits works 1566 // with the masks of zeros and ones, not the ranges. 1567 EXPECT_EQ(Known.getMinValue(), 0u); 1568 EXPECT_EQ(Known.getMaxValue(), 131071); 1569 } 1570 1571 // 4*128 + [32, 64) doesn't produce overlapping bits. 1572 // Make sure we get all the individual bits properly. 1573 // This test is useful to check that we account for the scaling factor 1574 // in the gep. Indeed, gep float, [32,64), 128 is not 128 + [32,64). 1575 TEST_F(ComputeKnownBitsTest, ComputeKnownBitsGEPWithRangeNoOverlap) { 1576 parseAssembly( 1577 "define void @test(i64* %p) {\n" 1578 " %A = load i64, i64* %p, !range !{i64 32, i64 64}\n" 1579 " %APtr = inttoptr i64 %A to float*" 1580 " %APtrPlus512 = getelementptr float, float* %APtr, i32 128\n" 1581 " %c = icmp ugt float* %APtrPlus512, inttoptr (i32 523 to float*)\n" 1582 " call void @llvm.assume(i1 %c)\n" 1583 " ret void\n" 1584 "}\n" 1585 "declare void @llvm.assume(i1)\n"); 1586 AssumptionCache AC(*F); 1587 KnownBits Known = computeKnownBits(A, M->getDataLayout(), /* Depth */ 0, &AC, 1588 F->front().getTerminator()); 1589 EXPECT_EQ(Known.Zero.getZExtValue(), ~(64llu - 1)); 1590 EXPECT_EQ(Known.One.getZExtValue(), 32u); 1591 Instruction &APtrPlus512 = findInstructionByName(F, "APtrPlus512"); 1592 Known = computeKnownBits(&APtrPlus512, M->getDataLayout(), /* Depth */ 0, &AC, 1593 F->front().getTerminator()); 1594 EXPECT_EQ(Known.Zero.getZExtValue(), ~512llu & ~(64llu - 1)); 1595 EXPECT_EQ(Known.One.getZExtValue(), 512u | 32u); 1596 // The known range is not precise given computeKnownBits works 1597 // with the masks of zeros and ones, not the ranges. 1598 EXPECT_EQ(Known.getMinValue(), 544); 1599 EXPECT_EQ(Known.getMaxValue(), 575); 1600 } 1601 1602 class IsBytewiseValueTest : public ValueTrackingTest, 1603 public ::testing::WithParamInterface< 1604 std::pair<const char *, const char *>> { 1605 protected: 1606 }; 1607 1608 const std::pair<const char *, const char *> IsBytewiseValueTests[] = { 1609 { 1610 "i8 0", 1611 "i48* null", 1612 }, 1613 { 1614 "i8 undef", 1615 "i48* undef", 1616 }, 1617 { 1618 "i8 0", 1619 "i8 zeroinitializer", 1620 }, 1621 { 1622 "i8 0", 1623 "i8 0", 1624 }, 1625 { 1626 "i8 -86", 1627 "i8 -86", 1628 }, 1629 { 1630 "i8 -1", 1631 "i8 -1", 1632 }, 1633 { 1634 "i8 undef", 1635 "i16 undef", 1636 }, 1637 { 1638 "i8 0", 1639 "i16 0", 1640 }, 1641 { 1642 "", 1643 "i16 7", 1644 }, 1645 { 1646 "i8 -86", 1647 "i16 -21846", 1648 }, 1649 { 1650 "i8 -1", 1651 "i16 -1", 1652 }, 1653 { 1654 "i8 0", 1655 "i48 0", 1656 }, 1657 { 1658 "i8 -1", 1659 "i48 -1", 1660 }, 1661 { 1662 "i8 0", 1663 "i49 0", 1664 }, 1665 { 1666 "", 1667 "i49 -1", 1668 }, 1669 { 1670 "i8 0", 1671 "half 0xH0000", 1672 }, 1673 { 1674 "i8 -85", 1675 "half 0xHABAB", 1676 }, 1677 { 1678 "i8 0", 1679 "float 0.0", 1680 }, 1681 { 1682 "i8 -1", 1683 "float 0xFFFFFFFFE0000000", 1684 }, 1685 { 1686 "i8 0", 1687 "double 0.0", 1688 }, 1689 { 1690 "i8 -15", 1691 "double 0xF1F1F1F1F1F1F1F1", 1692 }, 1693 { 1694 "i8 undef", 1695 "i16* undef", 1696 }, 1697 { 1698 "i8 0", 1699 "i16* inttoptr (i64 0 to i16*)", 1700 }, 1701 { 1702 "i8 -1", 1703 "i16* inttoptr (i64 -1 to i16*)", 1704 }, 1705 { 1706 "i8 -86", 1707 "i16* inttoptr (i64 -6148914691236517206 to i16*)", 1708 }, 1709 { 1710 "", 1711 "i16* inttoptr (i48 -1 to i16*)", 1712 }, 1713 { 1714 "i8 -1", 1715 "i16* inttoptr (i96 -1 to i16*)", 1716 }, 1717 { 1718 "i8 undef", 1719 "[0 x i8] zeroinitializer", 1720 }, 1721 { 1722 "i8 undef", 1723 "[0 x i8] undef", 1724 }, 1725 { 1726 "i8 undef", 1727 "[5 x [0 x i8]] zeroinitializer", 1728 }, 1729 { 1730 "i8 undef", 1731 "[5 x [0 x i8]] undef", 1732 }, 1733 { 1734 "i8 0", 1735 "[6 x i8] zeroinitializer", 1736 }, 1737 { 1738 "i8 undef", 1739 "[6 x i8] undef", 1740 }, 1741 { 1742 "i8 1", 1743 "[5 x i8] [i8 1, i8 1, i8 1, i8 1, i8 1]", 1744 }, 1745 { 1746 "", 1747 "[5 x i64] [i64 1, i64 1, i64 1, i64 1, i64 1]", 1748 }, 1749 { 1750 "i8 -1", 1751 "[5 x i64] [i64 -1, i64 -1, i64 -1, i64 -1, i64 -1]", 1752 }, 1753 { 1754 "", 1755 "[4 x i8] [i8 1, i8 2, i8 1, i8 1]", 1756 }, 1757 { 1758 "i8 1", 1759 "[4 x i8] [i8 1, i8 undef, i8 1, i8 1]", 1760 }, 1761 { 1762 "i8 0", 1763 "<6 x i8> zeroinitializer", 1764 }, 1765 { 1766 "i8 undef", 1767 "<6 x i8> undef", 1768 }, 1769 { 1770 "i8 1", 1771 "<5 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1>", 1772 }, 1773 { 1774 "", 1775 "<5 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1>", 1776 }, 1777 { 1778 "i8 -1", 1779 "<5 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>", 1780 }, 1781 { 1782 "", 1783 "<4 x i8> <i8 1, i8 1, i8 2, i8 1>", 1784 }, 1785 { 1786 "i8 5", 1787 "<2 x i8> < i8 5, i8 undef >", 1788 }, 1789 { 1790 "i8 0", 1791 "[2 x [2 x i16]] zeroinitializer", 1792 }, 1793 { 1794 "i8 undef", 1795 "[2 x [2 x i16]] undef", 1796 }, 1797 { 1798 "i8 -86", 1799 "[2 x [2 x i16]] [[2 x i16] [i16 -21846, i16 -21846], " 1800 "[2 x i16] [i16 -21846, i16 -21846]]", 1801 }, 1802 { 1803 "", 1804 "[2 x [2 x i16]] [[2 x i16] [i16 -21846, i16 -21846], " 1805 "[2 x i16] [i16 -21836, i16 -21846]]", 1806 }, 1807 { 1808 "i8 undef", 1809 "{ } zeroinitializer", 1810 }, 1811 { 1812 "i8 undef", 1813 "{ } undef", 1814 }, 1815 { 1816 "i8 undef", 1817 "{ {}, {} } zeroinitializer", 1818 }, 1819 { 1820 "i8 undef", 1821 "{ {}, {} } undef", 1822 }, 1823 { 1824 "i8 0", 1825 "{i8, i64, i16*} zeroinitializer", 1826 }, 1827 { 1828 "i8 undef", 1829 "{i8, i64, i16*} undef", 1830 }, 1831 { 1832 "i8 -86", 1833 "{i8, i64, i16*} {i8 -86, i64 -6148914691236517206, i16* undef}", 1834 }, 1835 { 1836 "", 1837 "{i8, i64, i16*} {i8 86, i64 -6148914691236517206, i16* undef}", 1838 }, 1839 }; 1840 1841 INSTANTIATE_TEST_CASE_P(IsBytewiseValueParamTests, IsBytewiseValueTest, 1842 ::testing::ValuesIn(IsBytewiseValueTests),); 1843 1844 TEST_P(IsBytewiseValueTest, IsBytewiseValue) { 1845 auto M = parseModule(std::string("@test = global ") + GetParam().second); 1846 GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getNamedValue("test")); 1847 Value *Actual = isBytewiseValue(GV->getInitializer(), M->getDataLayout()); 1848 std::string Buff; 1849 raw_string_ostream S(Buff); 1850 if (Actual) 1851 S << *Actual; 1852 EXPECT_EQ(GetParam().first, S.str()); 1853 } 1854 1855 TEST_F(ValueTrackingTest, ComputeConstantRange) { 1856 { 1857 // Assumptions: 1858 // * stride >= 5 1859 // * stride < 10 1860 // 1861 // stride = [5, 10) 1862 auto M = parseModule(R"( 1863 declare void @llvm.assume(i1) 1864 1865 define i32 @test(i32 %stride) { 1866 %gt = icmp uge i32 %stride, 5 1867 call void @llvm.assume(i1 %gt) 1868 %lt = icmp ult i32 %stride, 10 1869 call void @llvm.assume(i1 %lt) 1870 %stride.plus.one = add nsw nuw i32 %stride, 1 1871 ret i32 %stride.plus.one 1872 })"); 1873 Function *F = M->getFunction("test"); 1874 1875 AssumptionCache AC(*F); 1876 Value *Stride = &*F->arg_begin(); 1877 ConstantRange CR1 = computeConstantRange(Stride, true, &AC, nullptr); 1878 EXPECT_TRUE(CR1.isFullSet()); 1879 1880 Instruction *I = &findInstructionByName(F, "stride.plus.one"); 1881 ConstantRange CR2 = computeConstantRange(Stride, true, &AC, I); 1882 EXPECT_EQ(5, CR2.getLower()); 1883 EXPECT_EQ(10, CR2.getUpper()); 1884 } 1885 1886 { 1887 // Assumptions: 1888 // * stride >= 5 1889 // * stride < 200 1890 // * stride == 99 1891 // 1892 // stride = [99, 100) 1893 auto M = parseModule(R"( 1894 declare void @llvm.assume(i1) 1895 1896 define i32 @test(i32 %stride) { 1897 %gt = icmp uge i32 %stride, 5 1898 call void @llvm.assume(i1 %gt) 1899 %lt = icmp ult i32 %stride, 200 1900 call void @llvm.assume(i1 %lt) 1901 %eq = icmp eq i32 %stride, 99 1902 call void @llvm.assume(i1 %eq) 1903 %stride.plus.one = add nsw nuw i32 %stride, 1 1904 ret i32 %stride.plus.one 1905 })"); 1906 Function *F = M->getFunction("test"); 1907 1908 AssumptionCache AC(*F); 1909 Value *Stride = &*F->arg_begin(); 1910 Instruction *I = &findInstructionByName(F, "stride.plus.one"); 1911 ConstantRange CR = computeConstantRange(Stride, true, &AC, I); 1912 EXPECT_EQ(99, *CR.getSingleElement()); 1913 } 1914 1915 { 1916 // Assumptions: 1917 // * stride >= 5 1918 // * stride >= 50 1919 // * stride < 100 1920 // * stride < 200 1921 // 1922 // stride = [50, 100) 1923 auto M = parseModule(R"( 1924 declare void @llvm.assume(i1) 1925 1926 define i32 @test(i32 %stride, i1 %cond) { 1927 %gt = icmp uge i32 %stride, 5 1928 call void @llvm.assume(i1 %gt) 1929 %gt.2 = icmp uge i32 %stride, 50 1930 call void @llvm.assume(i1 %gt.2) 1931 br i1 %cond, label %bb1, label %bb2 1932 1933 bb1: 1934 %lt = icmp ult i32 %stride, 200 1935 call void @llvm.assume(i1 %lt) 1936 %lt.2 = icmp ult i32 %stride, 100 1937 call void @llvm.assume(i1 %lt.2) 1938 %stride.plus.one = add nsw nuw i32 %stride, 1 1939 ret i32 %stride.plus.one 1940 1941 bb2: 1942 ret i32 0 1943 })"); 1944 Function *F = M->getFunction("test"); 1945 1946 AssumptionCache AC(*F); 1947 Value *Stride = &*F->arg_begin(); 1948 Instruction *GT2 = &findInstructionByName(F, "gt.2"); 1949 ConstantRange CR = computeConstantRange(Stride, true, &AC, GT2); 1950 EXPECT_EQ(5, CR.getLower()); 1951 EXPECT_EQ(0, CR.getUpper()); 1952 1953 Instruction *I = &findInstructionByName(F, "stride.plus.one"); 1954 ConstantRange CR2 = computeConstantRange(Stride, true, &AC, I); 1955 EXPECT_EQ(50, CR2.getLower()); 1956 EXPECT_EQ(100, CR2.getUpper()); 1957 } 1958 1959 { 1960 // Assumptions: 1961 // * stride > 5 1962 // * stride < 5 1963 // 1964 // stride = empty range, as the assumptions contradict each other. 1965 auto M = parseModule(R"( 1966 declare void @llvm.assume(i1) 1967 1968 define i32 @test(i32 %stride, i1 %cond) { 1969 %gt = icmp ugt i32 %stride, 5 1970 call void @llvm.assume(i1 %gt) 1971 %lt = icmp ult i32 %stride, 5 1972 call void @llvm.assume(i1 %lt) 1973 %stride.plus.one = add nsw nuw i32 %stride, 1 1974 ret i32 %stride.plus.one 1975 })"); 1976 Function *F = M->getFunction("test"); 1977 1978 AssumptionCache AC(*F); 1979 Value *Stride = &*F->arg_begin(); 1980 1981 Instruction *I = &findInstructionByName(F, "stride.plus.one"); 1982 ConstantRange CR = computeConstantRange(Stride, true, &AC, I); 1983 EXPECT_TRUE(CR.isEmptySet()); 1984 } 1985 1986 { 1987 // Assumptions: 1988 // * x.1 >= 5 1989 // * x.2 < x.1 1990 // 1991 // stride = [0, 5) 1992 auto M = parseModule(R"( 1993 declare void @llvm.assume(i1) 1994 1995 define i32 @test(i32 %x.1, i32 %x.2) { 1996 %gt = icmp uge i32 %x.1, 5 1997 call void @llvm.assume(i1 %gt) 1998 %lt = icmp ult i32 %x.2, %x.1 1999 call void @llvm.assume(i1 %lt) 2000 %stride.plus.one = add nsw nuw i32 %x.1, 1 2001 ret i32 %stride.plus.one 2002 })"); 2003 Function *F = M->getFunction("test"); 2004 2005 AssumptionCache AC(*F); 2006 Value *X2 = &*std::next(F->arg_begin()); 2007 2008 Instruction *I = &findInstructionByName(F, "stride.plus.one"); 2009 ConstantRange CR1 = computeConstantRange(X2, true, &AC, I); 2010 EXPECT_EQ(0, CR1.getLower()); 2011 EXPECT_EQ(5, CR1.getUpper()); 2012 2013 // Check the depth cutoff results in a conservative result (full set) by 2014 // passing Depth == MaxDepth == 6. 2015 ConstantRange CR2 = computeConstantRange(X2, true, &AC, I, 6); 2016 EXPECT_TRUE(CR2.isFullSet()); 2017 } 2018 } 2019 2020 struct FindAllocaForValueTestParams { 2021 const char *IR; 2022 bool AnyOffsetResult; 2023 bool ZeroOffsetResult; 2024 }; 2025 2026 class FindAllocaForValueTest 2027 : public ValueTrackingTest, 2028 public ::testing::WithParamInterface<FindAllocaForValueTestParams> { 2029 protected: 2030 }; 2031 2032 const FindAllocaForValueTestParams FindAllocaForValueTests[] = { 2033 {R"( 2034 define void @test() { 2035 %a = alloca i64 2036 %r = bitcast i64* %a to i32* 2037 ret void 2038 })", 2039 true, true}, 2040 2041 {R"( 2042 define void @test() { 2043 %a = alloca i32 2044 %r = getelementptr i32, i32* %a, i32 1 2045 ret void 2046 })", 2047 true, false}, 2048 2049 {R"( 2050 define void @test() { 2051 %a = alloca i32 2052 %r = getelementptr i32, i32* %a, i32 0 2053 ret void 2054 })", 2055 true, true}, 2056 2057 {R"( 2058 define void @test(i1 %cond) { 2059 entry: 2060 %a = alloca i32 2061 br label %bb1 2062 2063 bb1: 2064 %r = phi i32* [ %a, %entry ], [ %r, %bb1 ] 2065 br i1 %cond, label %bb1, label %exit 2066 2067 exit: 2068 ret void 2069 })", 2070 true, true}, 2071 2072 {R"( 2073 define void @test(i1 %cond) { 2074 %a = alloca i32 2075 %r = select i1 %cond, i32* %a, i32* %a 2076 ret void 2077 })", 2078 true, true}, 2079 2080 {R"( 2081 define void @test(i1 %cond) { 2082 %a = alloca i32 2083 %b = alloca i32 2084 %r = select i1 %cond, i32* %a, i32* %b 2085 ret void 2086 })", 2087 false, false}, 2088 2089 {R"( 2090 define void @test(i1 %cond) { 2091 entry: 2092 %a = alloca i64 2093 %a32 = bitcast i64* %a to i32* 2094 br label %bb1 2095 2096 bb1: 2097 %x = phi i32* [ %a32, %entry ], [ %x, %bb1 ] 2098 %r = getelementptr i32, i32* %x, i32 1 2099 br i1 %cond, label %bb1, label %exit 2100 2101 exit: 2102 ret void 2103 })", 2104 true, false}, 2105 2106 {R"( 2107 define void @test(i1 %cond) { 2108 entry: 2109 %a = alloca i64 2110 %a32 = bitcast i64* %a to i32* 2111 br label %bb1 2112 2113 bb1: 2114 %x = phi i32* [ %a32, %entry ], [ %r, %bb1 ] 2115 %r = getelementptr i32, i32* %x, i32 1 2116 br i1 %cond, label %bb1, label %exit 2117 2118 exit: 2119 ret void 2120 })", 2121 true, false}, 2122 2123 {R"( 2124 define void @test(i1 %cond, i64* %a) { 2125 entry: 2126 %r = bitcast i64* %a to i32* 2127 ret void 2128 })", 2129 false, false}, 2130 2131 {R"( 2132 define void @test(i1 %cond) { 2133 entry: 2134 %a = alloca i32 2135 %b = alloca i32 2136 br label %bb1 2137 2138 bb1: 2139 %r = phi i32* [ %a, %entry ], [ %b, %bb1 ] 2140 br i1 %cond, label %bb1, label %exit 2141 2142 exit: 2143 ret void 2144 })", 2145 false, false}, 2146 }; 2147 2148 TEST_P(FindAllocaForValueTest, findAllocaForValue) { 2149 auto M = parseModule(GetParam().IR); 2150 Function *F = M->getFunction("test"); 2151 Instruction *I = &findInstructionByName(F, "r"); 2152 const AllocaInst *AI = findAllocaForValue(I); 2153 EXPECT_EQ(!!AI, GetParam().AnyOffsetResult); 2154 } 2155 2156 TEST_P(FindAllocaForValueTest, findAllocaForValueZeroOffset) { 2157 auto M = parseModule(GetParam().IR); 2158 Function *F = M->getFunction("test"); 2159 Instruction *I = &findInstructionByName(F, "r"); 2160 const AllocaInst *AI = findAllocaForValue(I, true); 2161 EXPECT_EQ(!!AI, GetParam().ZeroOffsetResult); 2162 } 2163 2164 INSTANTIATE_TEST_CASE_P(FindAllocaForValueTest, FindAllocaForValueTest, 2165 ::testing::ValuesIn(FindAllocaForValueTests), ); 2166