1 //===- ValueTrackingTest.cpp - ValueTracking tests ------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/Analysis/ValueTracking.h" 10 #include "llvm/Analysis/AssumptionCache.h" 11 #include "llvm/AsmParser/Parser.h" 12 #include "llvm/IR/ConstantRange.h" 13 #include "llvm/IR/Dominators.h" 14 #include "llvm/IR/Function.h" 15 #include "llvm/IR/InstIterator.h" 16 #include "llvm/IR/Instructions.h" 17 #include "llvm/IR/LLVMContext.h" 18 #include "llvm/IR/Module.h" 19 #include "llvm/Support/ErrorHandling.h" 20 #include "llvm/Support/KnownBits.h" 21 #include "llvm/Support/SourceMgr.h" 22 #include "llvm/Transforms/Utils/Local.h" 23 #include "gtest/gtest.h" 24 25 using namespace llvm; 26 27 namespace { 28 29 static Instruction *findInstructionByNameOrNull(Function *F, StringRef Name) { 30 for (Instruction &I : instructions(F)) 31 if (I.getName() == Name) 32 return &I; 33 34 return nullptr; 35 } 36 37 static Instruction &findInstructionByName(Function *F, StringRef Name) { 38 auto *I = findInstructionByNameOrNull(F, Name); 39 if (I) 40 return *I; 41 42 llvm_unreachable("Expected value not found"); 43 } 44 45 class ValueTrackingTest : public testing::Test { 46 protected: 47 std::unique_ptr<Module> parseModule(StringRef Assembly) { 48 SMDiagnostic Error; 49 std::unique_ptr<Module> M = parseAssemblyString(Assembly, Error, Context); 50 51 std::string errMsg; 52 raw_string_ostream os(errMsg); 53 Error.print("", os); 54 EXPECT_TRUE(M) << os.str(); 55 56 return M; 57 } 58 59 void parseAssembly(StringRef Assembly) { 60 M = parseModule(Assembly); 61 ASSERT_TRUE(M); 62 63 F = M->getFunction("test"); 64 ASSERT_TRUE(F) << "Test must have a function @test"; 65 if (!F) 66 return; 67 68 A = findInstructionByNameOrNull(F, "A"); 69 ASSERT_TRUE(A) << "@test must have an instruction %A"; 70 A2 = findInstructionByNameOrNull(F, "A2"); 71 A3 = findInstructionByNameOrNull(F, "A3"); 72 A4 = findInstructionByNameOrNull(F, "A4"); 73 74 CxtI = findInstructionByNameOrNull(F, "CxtI"); 75 CxtI2 = findInstructionByNameOrNull(F, "CxtI2"); 76 CxtI3 = findInstructionByNameOrNull(F, "CxtI3"); 77 } 78 79 LLVMContext Context; 80 std::unique_ptr<Module> M; 81 Function *F = nullptr; 82 Instruction *A = nullptr; 83 // Instructions (optional) 84 Instruction *A2 = nullptr, *A3 = nullptr, *A4 = nullptr; 85 86 // Context instructions (optional) 87 Instruction *CxtI = nullptr, *CxtI2 = nullptr, *CxtI3 = nullptr; 88 }; 89 90 class MatchSelectPatternTest : public ValueTrackingTest { 91 protected: 92 void expectPattern(const SelectPatternResult &P) { 93 Value *LHS, *RHS; 94 Instruction::CastOps CastOp; 95 SelectPatternResult R = matchSelectPattern(A, LHS, RHS, &CastOp); 96 EXPECT_EQ(P.Flavor, R.Flavor); 97 EXPECT_EQ(P.NaNBehavior, R.NaNBehavior); 98 EXPECT_EQ(P.Ordered, R.Ordered); 99 } 100 }; 101 102 class ComputeKnownBitsTest : public ValueTrackingTest { 103 protected: 104 void expectKnownBits(uint64_t Zero, uint64_t One) { 105 auto Known = computeKnownBits(A, M->getDataLayout()); 106 ASSERT_FALSE(Known.hasConflict()); 107 EXPECT_EQ(Known.One.getZExtValue(), One); 108 EXPECT_EQ(Known.Zero.getZExtValue(), Zero); 109 } 110 }; 111 112 } 113 114 TEST_F(MatchSelectPatternTest, SimpleFMin) { 115 parseAssembly( 116 "define float @test(float %a) {\n" 117 " %1 = fcmp ult float %a, 5.0\n" 118 " %A = select i1 %1, float %a, float 5.0\n" 119 " ret float %A\n" 120 "}\n"); 121 expectPattern({SPF_FMINNUM, SPNB_RETURNS_NAN, false}); 122 } 123 124 TEST_F(MatchSelectPatternTest, SimpleFMax) { 125 parseAssembly( 126 "define float @test(float %a) {\n" 127 " %1 = fcmp ogt float %a, 5.0\n" 128 " %A = select i1 %1, float %a, float 5.0\n" 129 " ret float %A\n" 130 "}\n"); 131 expectPattern({SPF_FMAXNUM, SPNB_RETURNS_OTHER, true}); 132 } 133 134 TEST_F(MatchSelectPatternTest, SwappedFMax) { 135 parseAssembly( 136 "define float @test(float %a) {\n" 137 " %1 = fcmp olt float 5.0, %a\n" 138 " %A = select i1 %1, float %a, float 5.0\n" 139 " ret float %A\n" 140 "}\n"); 141 expectPattern({SPF_FMAXNUM, SPNB_RETURNS_OTHER, false}); 142 } 143 144 TEST_F(MatchSelectPatternTest, SwappedFMax2) { 145 parseAssembly( 146 "define float @test(float %a) {\n" 147 " %1 = fcmp olt float %a, 5.0\n" 148 " %A = select i1 %1, float 5.0, float %a\n" 149 " ret float %A\n" 150 "}\n"); 151 expectPattern({SPF_FMAXNUM, SPNB_RETURNS_NAN, false}); 152 } 153 154 TEST_F(MatchSelectPatternTest, SwappedFMax3) { 155 parseAssembly( 156 "define float @test(float %a) {\n" 157 " %1 = fcmp ult float %a, 5.0\n" 158 " %A = select i1 %1, float 5.0, float %a\n" 159 " ret float %A\n" 160 "}\n"); 161 expectPattern({SPF_FMAXNUM, SPNB_RETURNS_OTHER, true}); 162 } 163 164 TEST_F(MatchSelectPatternTest, FastFMin) { 165 parseAssembly( 166 "define float @test(float %a) {\n" 167 " %1 = fcmp nnan olt float %a, 5.0\n" 168 " %A = select i1 %1, float %a, float 5.0\n" 169 " ret float %A\n" 170 "}\n"); 171 expectPattern({SPF_FMINNUM, SPNB_RETURNS_ANY, false}); 172 } 173 174 TEST_F(MatchSelectPatternTest, FMinConstantZero) { 175 parseAssembly( 176 "define float @test(float %a) {\n" 177 " %1 = fcmp ole float %a, 0.0\n" 178 " %A = select i1 %1, float %a, float 0.0\n" 179 " ret float %A\n" 180 "}\n"); 181 // This shouldn't be matched, as %a could be -0.0. 182 expectPattern({SPF_UNKNOWN, SPNB_NA, false}); 183 } 184 185 TEST_F(MatchSelectPatternTest, FMinConstantZeroNsz) { 186 parseAssembly( 187 "define float @test(float %a) {\n" 188 " %1 = fcmp nsz ole float %a, 0.0\n" 189 " %A = select i1 %1, float %a, float 0.0\n" 190 " ret float %A\n" 191 "}\n"); 192 // But this should be, because we've ignored signed zeroes. 193 expectPattern({SPF_FMINNUM, SPNB_RETURNS_OTHER, true}); 194 } 195 196 TEST_F(MatchSelectPatternTest, FMinMismatchConstantZero1) { 197 parseAssembly( 198 "define float @test(float %a) {\n" 199 " %1 = fcmp olt float -0.0, %a\n" 200 " %A = select i1 %1, float 0.0, float %a\n" 201 " ret float %A\n" 202 "}\n"); 203 // The sign of zero doesn't matter in fcmp. 204 expectPattern({SPF_FMINNUM, SPNB_RETURNS_NAN, true}); 205 } 206 207 TEST_F(MatchSelectPatternTest, FMinMismatchConstantZero2) { 208 parseAssembly( 209 "define float @test(float %a) {\n" 210 " %1 = fcmp ogt float %a, -0.0\n" 211 " %A = select i1 %1, float 0.0, float %a\n" 212 " ret float %A\n" 213 "}\n"); 214 // The sign of zero doesn't matter in fcmp. 215 expectPattern({SPF_FMINNUM, SPNB_RETURNS_NAN, false}); 216 } 217 218 TEST_F(MatchSelectPatternTest, FMinMismatchConstantZero3) { 219 parseAssembly( 220 "define float @test(float %a) {\n" 221 " %1 = fcmp olt float 0.0, %a\n" 222 " %A = select i1 %1, float -0.0, float %a\n" 223 " ret float %A\n" 224 "}\n"); 225 // The sign of zero doesn't matter in fcmp. 226 expectPattern({SPF_FMINNUM, SPNB_RETURNS_NAN, true}); 227 } 228 229 TEST_F(MatchSelectPatternTest, FMinMismatchConstantZero4) { 230 parseAssembly( 231 "define float @test(float %a) {\n" 232 " %1 = fcmp ogt float %a, 0.0\n" 233 " %A = select i1 %1, float -0.0, float %a\n" 234 " ret float %A\n" 235 "}\n"); 236 // The sign of zero doesn't matter in fcmp. 237 expectPattern({SPF_FMINNUM, SPNB_RETURNS_NAN, false}); 238 } 239 240 TEST_F(MatchSelectPatternTest, FMinMismatchConstantZero5) { 241 parseAssembly( 242 "define float @test(float %a) {\n" 243 " %1 = fcmp ogt float -0.0, %a\n" 244 " %A = select i1 %1, float %a, float 0.0\n" 245 " ret float %A\n" 246 "}\n"); 247 // The sign of zero doesn't matter in fcmp. 248 expectPattern({SPF_FMINNUM, SPNB_RETURNS_OTHER, false}); 249 } 250 251 TEST_F(MatchSelectPatternTest, FMinMismatchConstantZero6) { 252 parseAssembly( 253 "define float @test(float %a) {\n" 254 " %1 = fcmp olt float %a, -0.0\n" 255 " %A = select i1 %1, float %a, float 0.0\n" 256 " ret float %A\n" 257 "}\n"); 258 // The sign of zero doesn't matter in fcmp. 259 expectPattern({SPF_FMINNUM, SPNB_RETURNS_OTHER, true}); 260 } 261 262 TEST_F(MatchSelectPatternTest, FMinMismatchConstantZero7) { 263 parseAssembly( 264 "define float @test(float %a) {\n" 265 " %1 = fcmp ogt float 0.0, %a\n" 266 " %A = select i1 %1, float %a, float -0.0\n" 267 " ret float %A\n" 268 "}\n"); 269 // The sign of zero doesn't matter in fcmp. 270 expectPattern({SPF_FMINNUM, SPNB_RETURNS_OTHER, false}); 271 } 272 273 TEST_F(MatchSelectPatternTest, FMinMismatchConstantZero8) { 274 parseAssembly( 275 "define float @test(float %a) {\n" 276 " %1 = fcmp olt float %a, 0.0\n" 277 " %A = select i1 %1, float %a, float -0.0\n" 278 " ret float %A\n" 279 "}\n"); 280 // The sign of zero doesn't matter in fcmp. 281 expectPattern({SPF_FMINNUM, SPNB_RETURNS_OTHER, true}); 282 } 283 284 TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZero1) { 285 parseAssembly( 286 "define float @test(float %a) {\n" 287 " %1 = fcmp ogt float -0.0, %a\n" 288 " %A = select i1 %1, float 0.0, float %a\n" 289 " ret float %A\n" 290 "}\n"); 291 // The sign of zero doesn't matter in fcmp. 292 expectPattern({SPF_FMAXNUM, SPNB_RETURNS_NAN, true}); 293 } 294 295 TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZero2) { 296 parseAssembly( 297 "define float @test(float %a) {\n" 298 " %1 = fcmp olt float %a, -0.0\n" 299 " %A = select i1 %1, float 0.0, float %a\n" 300 " ret float %A\n" 301 "}\n"); 302 // The sign of zero doesn't matter in fcmp. 303 expectPattern({SPF_FMAXNUM, SPNB_RETURNS_NAN, false}); 304 } 305 306 TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZero3) { 307 parseAssembly( 308 "define float @test(float %a) {\n" 309 " %1 = fcmp ogt float 0.0, %a\n" 310 " %A = select i1 %1, float -0.0, float %a\n" 311 " ret float %A\n" 312 "}\n"); 313 // The sign of zero doesn't matter in fcmp. 314 expectPattern({SPF_FMAXNUM, SPNB_RETURNS_NAN, true}); 315 } 316 317 TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZero4) { 318 parseAssembly( 319 "define float @test(float %a) {\n" 320 " %1 = fcmp olt float %a, 0.0\n" 321 " %A = select i1 %1, float -0.0, float %a\n" 322 " ret float %A\n" 323 "}\n"); 324 // The sign of zero doesn't matter in fcmp. 325 expectPattern({SPF_FMAXNUM, SPNB_RETURNS_NAN, false}); 326 } 327 328 TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZero5) { 329 parseAssembly( 330 "define float @test(float %a) {\n" 331 " %1 = fcmp olt float -0.0, %a\n" 332 " %A = select i1 %1, float %a, float 0.0\n" 333 " ret float %A\n" 334 "}\n"); 335 // The sign of zero doesn't matter in fcmp. 336 expectPattern({SPF_FMAXNUM, SPNB_RETURNS_OTHER, false}); 337 } 338 339 TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZero6) { 340 parseAssembly( 341 "define float @test(float %a) {\n" 342 " %1 = fcmp ogt float %a, -0.0\n" 343 " %A = select i1 %1, float %a, float 0.0\n" 344 " ret float %A\n" 345 "}\n"); 346 // The sign of zero doesn't matter in fcmp. 347 expectPattern({SPF_FMAXNUM, SPNB_RETURNS_OTHER, true}); 348 } 349 350 TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZero7) { 351 parseAssembly( 352 "define float @test(float %a) {\n" 353 " %1 = fcmp olt float 0.0, %a\n" 354 " %A = select i1 %1, float %a, float -0.0\n" 355 " ret float %A\n" 356 "}\n"); 357 // The sign of zero doesn't matter in fcmp. 358 expectPattern({SPF_FMAXNUM, SPNB_RETURNS_OTHER, false}); 359 } 360 361 TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZero8) { 362 parseAssembly( 363 "define float @test(float %a) {\n" 364 " %1 = fcmp ogt float %a, 0.0\n" 365 " %A = select i1 %1, float %a, float -0.0\n" 366 " ret float %A\n" 367 "}\n"); 368 // The sign of zero doesn't matter in fcmp. 369 expectPattern({SPF_FMAXNUM, SPNB_RETURNS_OTHER, true}); 370 } 371 372 TEST_F(MatchSelectPatternTest, FMinMismatchConstantZeroVecUndef) { 373 parseAssembly( 374 "define <2 x float> @test(<2 x float> %a) {\n" 375 " %1 = fcmp ogt <2 x float> %a, <float -0.0, float -0.0>\n" 376 " %A = select <2 x i1> %1, <2 x float> <float undef, float 0.0>, <2 x float> %a\n" 377 " ret <2 x float> %A\n" 378 "}\n"); 379 // An undef in a vector constant can not be back-propagated for this analysis. 380 expectPattern({SPF_UNKNOWN, SPNB_NA, false}); 381 } 382 383 TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZeroVecUndef) { 384 parseAssembly( 385 "define <2 x float> @test(<2 x float> %a) {\n" 386 " %1 = fcmp ogt <2 x float> %a, zeroinitializer\n" 387 " %A = select <2 x i1> %1, <2 x float> %a, <2 x float> <float -0.0, float undef>\n" 388 " ret <2 x float> %A\n" 389 "}\n"); 390 // An undef in a vector constant can not be back-propagated for this analysis. 391 expectPattern({SPF_UNKNOWN, SPNB_NA, false}); 392 } 393 394 TEST_F(MatchSelectPatternTest, VectorFMinimum) { 395 parseAssembly( 396 "define <4 x float> @test(<4 x float> %a) {\n" 397 " %1 = fcmp ule <4 x float> %a, \n" 398 " <float 5.0, float 5.0, float 5.0, float 5.0>\n" 399 " %A = select <4 x i1> %1, <4 x float> %a,\n" 400 " <4 x float> <float 5.0, float 5.0, float 5.0, float 5.0>\n" 401 " ret <4 x float> %A\n" 402 "}\n"); 403 // Check that pattern matching works on vectors where each lane has the same 404 // unordered pattern. 405 expectPattern({SPF_FMINNUM, SPNB_RETURNS_NAN, false}); 406 } 407 408 TEST_F(MatchSelectPatternTest, VectorFMinOtherOrdered) { 409 parseAssembly( 410 "define <4 x float> @test(<4 x float> %a) {\n" 411 " %1 = fcmp ole <4 x float> %a, \n" 412 " <float 5.0, float 5.0, float 5.0, float 5.0>\n" 413 " %A = select <4 x i1> %1, <4 x float> %a,\n" 414 " <4 x float> <float 5.0, float 5.0, float 5.0, float 5.0>\n" 415 " ret <4 x float> %A\n" 416 "}\n"); 417 // Check that pattern matching works on vectors where each lane has the same 418 // ordered pattern. 419 expectPattern({SPF_FMINNUM, SPNB_RETURNS_OTHER, true}); 420 } 421 422 TEST_F(MatchSelectPatternTest, VectorNotFMinimum) { 423 parseAssembly( 424 "define <4 x float> @test(<4 x float> %a) {\n" 425 " %1 = fcmp ule <4 x float> %a, \n" 426 " <float 5.0, float 0x7ff8000000000000, float 5.0, float 5.0>\n" 427 " %A = select <4 x i1> %1, <4 x float> %a,\n" 428 " <4 x float> <float 5.0, float 0x7ff8000000000000, float 5.0, float " 429 "5.0>\n" 430 " ret <4 x float> %A\n" 431 "}\n"); 432 // The lane that contains a NaN (0x7ff80...) behaves like a 433 // non-NaN-propagating min and the other lines behave like a NaN-propagating 434 // min, so check that neither is returned. 435 expectPattern({SPF_UNKNOWN, SPNB_NA, false}); 436 } 437 438 TEST_F(MatchSelectPatternTest, VectorNotFMinZero) { 439 parseAssembly( 440 "define <4 x float> @test(<4 x float> %a) {\n" 441 " %1 = fcmp ule <4 x float> %a, \n" 442 " <float 5.0, float -0.0, float 5.0, float 5.0>\n" 443 " %A = select <4 x i1> %1, <4 x float> %a,\n" 444 " <4 x float> <float 5.0, float 0.0, float 5.0, float 5.0>\n" 445 " ret <4 x float> %A\n" 446 "}\n"); 447 // Always selects the second lane of %a if it is positive or negative zero, so 448 // this is stricter than a min. 449 expectPattern({SPF_UNKNOWN, SPNB_NA, false}); 450 } 451 452 TEST_F(MatchSelectPatternTest, DoubleCastU) { 453 parseAssembly( 454 "define i32 @test(i8 %a, i8 %b) {\n" 455 " %1 = icmp ult i8 %a, %b\n" 456 " %2 = zext i8 %a to i32\n" 457 " %3 = zext i8 %b to i32\n" 458 " %A = select i1 %1, i32 %2, i32 %3\n" 459 " ret i32 %A\n" 460 "}\n"); 461 // We should be able to look through the situation where we cast both operands 462 // to the select. 463 expectPattern({SPF_UMIN, SPNB_NA, false}); 464 } 465 466 TEST_F(MatchSelectPatternTest, DoubleCastS) { 467 parseAssembly( 468 "define i32 @test(i8 %a, i8 %b) {\n" 469 " %1 = icmp slt i8 %a, %b\n" 470 " %2 = sext i8 %a to i32\n" 471 " %3 = sext i8 %b to i32\n" 472 " %A = select i1 %1, i32 %2, i32 %3\n" 473 " ret i32 %A\n" 474 "}\n"); 475 // We should be able to look through the situation where we cast both operands 476 // to the select. 477 expectPattern({SPF_SMIN, SPNB_NA, false}); 478 } 479 480 TEST_F(MatchSelectPatternTest, DoubleCastBad) { 481 parseAssembly( 482 "define i32 @test(i8 %a, i8 %b) {\n" 483 " %1 = icmp ult i8 %a, %b\n" 484 " %2 = zext i8 %a to i32\n" 485 " %3 = sext i8 %b to i32\n" 486 " %A = select i1 %1, i32 %2, i32 %3\n" 487 " ret i32 %A\n" 488 "}\n"); 489 // The cast types here aren't the same, so we cannot match an UMIN. 490 expectPattern({SPF_UNKNOWN, SPNB_NA, false}); 491 } 492 493 TEST_F(MatchSelectPatternTest, NotNotSMin) { 494 parseAssembly( 495 "define i8 @test(i8 %a, i8 %b) {\n" 496 " %cmp = icmp sgt i8 %a, %b\n" 497 " %an = xor i8 %a, -1\n" 498 " %bn = xor i8 %b, -1\n" 499 " %A = select i1 %cmp, i8 %an, i8 %bn\n" 500 " ret i8 %A\n" 501 "}\n"); 502 expectPattern({SPF_SMIN, SPNB_NA, false}); 503 } 504 505 TEST_F(MatchSelectPatternTest, NotNotSMinSwap) { 506 parseAssembly( 507 "define <2 x i8> @test(<2 x i8> %a, <2 x i8> %b) {\n" 508 " %cmp = icmp slt <2 x i8> %a, %b\n" 509 " %an = xor <2 x i8> %a, <i8 -1, i8-1>\n" 510 " %bn = xor <2 x i8> %b, <i8 -1, i8-1>\n" 511 " %A = select <2 x i1> %cmp, <2 x i8> %bn, <2 x i8> %an\n" 512 " ret <2 x i8> %A\n" 513 "}\n"); 514 expectPattern({SPF_SMIN, SPNB_NA, false}); 515 } 516 517 TEST_F(MatchSelectPatternTest, NotNotSMax) { 518 parseAssembly( 519 "define i8 @test(i8 %a, i8 %b) {\n" 520 " %cmp = icmp slt i8 %a, %b\n" 521 " %an = xor i8 %a, -1\n" 522 " %bn = xor i8 %b, -1\n" 523 " %A = select i1 %cmp, i8 %an, i8 %bn\n" 524 " ret i8 %A\n" 525 "}\n"); 526 expectPattern({SPF_SMAX, SPNB_NA, false}); 527 } 528 529 TEST_F(MatchSelectPatternTest, NotNotSMaxSwap) { 530 parseAssembly( 531 "define <2 x i8> @test(<2 x i8> %a, <2 x i8> %b) {\n" 532 " %cmp = icmp sgt <2 x i8> %a, %b\n" 533 " %an = xor <2 x i8> %a, <i8 -1, i8-1>\n" 534 " %bn = xor <2 x i8> %b, <i8 -1, i8-1>\n" 535 " %A = select <2 x i1> %cmp, <2 x i8> %bn, <2 x i8> %an\n" 536 " ret <2 x i8> %A\n" 537 "}\n"); 538 expectPattern({SPF_SMAX, SPNB_NA, false}); 539 } 540 541 TEST_F(MatchSelectPatternTest, NotNotUMin) { 542 parseAssembly( 543 "define <2 x i8> @test(<2 x i8> %a, <2 x i8> %b) {\n" 544 " %cmp = icmp ugt <2 x i8> %a, %b\n" 545 " %an = xor <2 x i8> %a, <i8 -1, i8-1>\n" 546 " %bn = xor <2 x i8> %b, <i8 -1, i8-1>\n" 547 " %A = select <2 x i1> %cmp, <2 x i8> %an, <2 x i8> %bn\n" 548 " ret <2 x i8> %A\n" 549 "}\n"); 550 expectPattern({SPF_UMIN, SPNB_NA, false}); 551 } 552 553 TEST_F(MatchSelectPatternTest, NotNotUMinSwap) { 554 parseAssembly( 555 "define i8 @test(i8 %a, i8 %b) {\n" 556 " %cmp = icmp ult i8 %a, %b\n" 557 " %an = xor i8 %a, -1\n" 558 " %bn = xor i8 %b, -1\n" 559 " %A = select i1 %cmp, i8 %bn, i8 %an\n" 560 " ret i8 %A\n" 561 "}\n"); 562 expectPattern({SPF_UMIN, SPNB_NA, false}); 563 } 564 565 TEST_F(MatchSelectPatternTest, NotNotUMax) { 566 parseAssembly( 567 "define <2 x i8> @test(<2 x i8> %a, <2 x i8> %b) {\n" 568 " %cmp = icmp ult <2 x i8> %a, %b\n" 569 " %an = xor <2 x i8> %a, <i8 -1, i8-1>\n" 570 " %bn = xor <2 x i8> %b, <i8 -1, i8-1>\n" 571 " %A = select <2 x i1> %cmp, <2 x i8> %an, <2 x i8> %bn\n" 572 " ret <2 x i8> %A\n" 573 "}\n"); 574 expectPattern({SPF_UMAX, SPNB_NA, false}); 575 } 576 577 TEST_F(MatchSelectPatternTest, NotNotUMaxSwap) { 578 parseAssembly( 579 "define i8 @test(i8 %a, i8 %b) {\n" 580 " %cmp = icmp ugt i8 %a, %b\n" 581 " %an = xor i8 %a, -1\n" 582 " %bn = xor i8 %b, -1\n" 583 " %A = select i1 %cmp, i8 %bn, i8 %an\n" 584 " ret i8 %A\n" 585 "}\n"); 586 expectPattern({SPF_UMAX, SPNB_NA, false}); 587 } 588 589 TEST_F(MatchSelectPatternTest, NotNotEq) { 590 parseAssembly( 591 "define i8 @test(i8 %a, i8 %b) {\n" 592 " %cmp = icmp eq i8 %a, %b\n" 593 " %an = xor i8 %a, -1\n" 594 " %bn = xor i8 %b, -1\n" 595 " %A = select i1 %cmp, i8 %bn, i8 %an\n" 596 " ret i8 %A\n" 597 "}\n"); 598 expectPattern({SPF_UNKNOWN, SPNB_NA, false}); 599 } 600 601 TEST_F(MatchSelectPatternTest, NotNotNe) { 602 parseAssembly( 603 "define i8 @test(i8 %a, i8 %b) {\n" 604 " %cmp = icmp ne i8 %a, %b\n" 605 " %an = xor i8 %a, -1\n" 606 " %bn = xor i8 %b, -1\n" 607 " %A = select i1 %cmp, i8 %bn, i8 %an\n" 608 " ret i8 %A\n" 609 "}\n"); 610 expectPattern({SPF_UNKNOWN, SPNB_NA, false}); 611 } 612 613 TEST(ValueTracking, GuaranteedToTransferExecutionToSuccessor) { 614 StringRef Assembly = 615 "declare void @nounwind_readonly(i32*) nounwind readonly " 616 "declare void @nounwind_argmemonly(i32*) nounwind argmemonly " 617 "declare void @nounwind_willreturn(i32*) nounwind willreturn " 618 "declare void @throws_but_readonly(i32*) readonly " 619 "declare void @throws_but_argmemonly(i32*) argmemonly " 620 "declare void @throws_but_willreturn(i32*) willreturn " 621 " " 622 "declare void @unknown(i32*) " 623 " " 624 "define void @f(i32* %p) { " 625 " call void @nounwind_readonly(i32* %p) " 626 " call void @nounwind_argmemonly(i32* %p) " 627 " call void @nounwind_willreturn(i32* %p)" 628 " call void @throws_but_readonly(i32* %p) " 629 " call void @throws_but_argmemonly(i32* %p) " 630 " call void @throws_but_willreturn(i32* %p) " 631 " call void @unknown(i32* %p) nounwind readonly " 632 " call void @unknown(i32* %p) nounwind argmemonly " 633 " call void @unknown(i32* %p) nounwind willreturn " 634 " call void @unknown(i32* %p) readonly " 635 " call void @unknown(i32* %p) argmemonly " 636 " call void @unknown(i32* %p) willreturn " 637 " ret void " 638 "} "; 639 640 LLVMContext Context; 641 SMDiagnostic Error; 642 auto M = parseAssemblyString(Assembly, Error, Context); 643 assert(M && "Bad assembly?"); 644 645 auto *F = M->getFunction("f"); 646 assert(F && "Bad assembly?"); 647 648 auto &BB = F->getEntryBlock(); 649 bool ExpectedAnswers[] = { 650 false, // call void @nounwind_readonly(i32* %p) 651 false, // call void @nounwind_argmemonly(i32* %p) 652 true, // call void @nounwind_willreturn(i32* %p) 653 false, // call void @throws_but_readonly(i32* %p) 654 false, // call void @throws_but_argmemonly(i32* %p) 655 false, // call void @throws_but_willreturn(i32* %p) 656 false, // call void @unknown(i32* %p) nounwind readonly 657 false, // call void @unknown(i32* %p) nounwind argmemonly 658 true, // call void @unknown(i32* %p) nounwind willreturn 659 false, // call void @unknown(i32* %p) readonly 660 false, // call void @unknown(i32* %p) argmemonly 661 false, // call void @unknown(i32* %p) willreturn 662 false, // ret void 663 }; 664 665 int Index = 0; 666 for (auto &I : BB) { 667 EXPECT_EQ(isGuaranteedToTransferExecutionToSuccessor(&I), 668 ExpectedAnswers[Index]) 669 << "Incorrect answer at instruction " << Index << " = " << I; 670 Index++; 671 } 672 } 673 674 TEST_F(ValueTrackingTest, ComputeNumSignBits_PR32045) { 675 parseAssembly( 676 "define i32 @test(i32 %a) {\n" 677 " %A = ashr i32 %a, -1\n" 678 " ret i32 %A\n" 679 "}\n"); 680 EXPECT_EQ(ComputeNumSignBits(A, M->getDataLayout()), 1u); 681 } 682 683 // No guarantees for canonical IR in this analysis, so this just bails out. 684 TEST_F(ValueTrackingTest, ComputeNumSignBits_Shuffle) { 685 parseAssembly( 686 "define <2 x i32> @test() {\n" 687 " %A = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 0, i32 0>\n" 688 " ret <2 x i32> %A\n" 689 "}\n"); 690 EXPECT_EQ(ComputeNumSignBits(A, M->getDataLayout()), 1u); 691 } 692 693 // No guarantees for canonical IR in this analysis, so a shuffle element that 694 // references an undef value means this can't return any extra information. 695 TEST_F(ValueTrackingTest, ComputeNumSignBits_Shuffle2) { 696 parseAssembly( 697 "define <2 x i32> @test(<2 x i1> %x) {\n" 698 " %sext = sext <2 x i1> %x to <2 x i32>\n" 699 " %A = shufflevector <2 x i32> %sext, <2 x i32> undef, <2 x i32> <i32 0, i32 2>\n" 700 " ret <2 x i32> %A\n" 701 "}\n"); 702 EXPECT_EQ(ComputeNumSignBits(A, M->getDataLayout()), 1u); 703 } 704 705 TEST_F(ValueTrackingTest, impliesPoisonTest_Identity) { 706 parseAssembly("define void @test(i32 %x, i32 %y) {\n" 707 " %A = add i32 %x, %y\n" 708 " ret void\n" 709 "}"); 710 EXPECT_TRUE(impliesPoison(A, A)); 711 } 712 713 TEST_F(ValueTrackingTest, impliesPoisonTest_ICmp) { 714 parseAssembly("define void @test(i32 %x) {\n" 715 " %A2 = icmp eq i32 %x, 0\n" 716 " %A = icmp eq i32 %x, 1\n" 717 " ret void\n" 718 "}"); 719 EXPECT_TRUE(impliesPoison(A2, A)); 720 } 721 722 TEST_F(ValueTrackingTest, impliesPoisonTest_ICmpUnknown) { 723 parseAssembly("define void @test(i32 %x, i32 %y) {\n" 724 " %A2 = icmp eq i32 %x, %y\n" 725 " %A = icmp eq i32 %x, 1\n" 726 " ret void\n" 727 "}"); 728 EXPECT_FALSE(impliesPoison(A2, A)); 729 } 730 731 TEST_F(ValueTrackingTest, impliesPoisonTest_AddNswOkay) { 732 parseAssembly("define void @test(i32 %x) {\n" 733 " %A2 = add nsw i32 %x, 1\n" 734 " %A = add i32 %A2, 1\n" 735 " ret void\n" 736 "}"); 737 EXPECT_TRUE(impliesPoison(A2, A)); 738 } 739 740 TEST_F(ValueTrackingTest, impliesPoisonTest_AddNswOkay2) { 741 parseAssembly("define void @test(i32 %x) {\n" 742 " %A2 = add i32 %x, 1\n" 743 " %A = add nsw i32 %A2, 1\n" 744 " ret void\n" 745 "}"); 746 EXPECT_TRUE(impliesPoison(A2, A)); 747 } 748 749 TEST_F(ValueTrackingTest, impliesPoisonTest_AddNsw) { 750 parseAssembly("define void @test(i32 %x) {\n" 751 " %A2 = add nsw i32 %x, 1\n" 752 " %A = add i32 %x, 1\n" 753 " ret void\n" 754 "}"); 755 EXPECT_FALSE(impliesPoison(A2, A)); 756 } 757 758 TEST_F(ValueTrackingTest, impliesPoisonTest_Cmp) { 759 parseAssembly("define void @test(i32 %x, i32 %y, i1 %c) {\n" 760 " %A2 = icmp eq i32 %x, %y\n" 761 " %A0 = icmp ult i32 %x, %y\n" 762 " %A = or i1 %A0, %c\n" 763 " ret void\n" 764 "}"); 765 EXPECT_TRUE(impliesPoison(A2, A)); 766 } 767 768 TEST_F(ValueTrackingTest, impliesPoisonTest_FCmpFMF) { 769 parseAssembly("define void @test(float %x, float %y, i1 %c) {\n" 770 " %A2 = fcmp nnan oeq float %x, %y\n" 771 " %A0 = fcmp olt float %x, %y\n" 772 " %A = or i1 %A0, %c\n" 773 " ret void\n" 774 "}"); 775 EXPECT_FALSE(impliesPoison(A2, A)); 776 } 777 778 TEST_F(ValueTrackingTest, impliesPoisonTest_AddSubSameOps) { 779 parseAssembly("define void @test(i32 %x, i32 %y, i1 %c) {\n" 780 " %A2 = add i32 %x, %y\n" 781 " %A = sub i32 %x, %y\n" 782 " ret void\n" 783 "}"); 784 EXPECT_TRUE(impliesPoison(A2, A)); 785 } 786 787 TEST_F(ValueTrackingTest, impliesPoisonTest_MaskCmp) { 788 parseAssembly("define void @test(i32 %x, i32 %y, i1 %c) {\n" 789 " %M2 = and i32 %x, 7\n" 790 " %A2 = icmp eq i32 %M2, 1\n" 791 " %M = and i32 %x, 15\n" 792 " %A = icmp eq i32 %M, 3\n" 793 " ret void\n" 794 "}"); 795 EXPECT_TRUE(impliesPoison(A2, A)); 796 } 797 798 TEST_F(ValueTrackingTest, ComputeNumSignBits_Shuffle_Pointers) { 799 parseAssembly( 800 "define <2 x i32*> @test(<2 x i32*> %x) {\n" 801 " %A = shufflevector <2 x i32*> zeroinitializer, <2 x i32*> undef, <2 x i32> zeroinitializer\n" 802 " ret <2 x i32*> %A\n" 803 "}\n"); 804 EXPECT_EQ(ComputeNumSignBits(A, M->getDataLayout()), 64u); 805 } 806 807 TEST(ValueTracking, propagatesPoison) { 808 std::string AsmHead = 809 "declare i32 @g(i32)\n" 810 "declare {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)\n" 811 "declare {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)\n" 812 "declare {i32, i1} @llvm.smul.with.overflow.i32(i32 %a, i32 %b)\n" 813 "declare {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)\n" 814 "declare {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)\n" 815 "declare {i32, i1} @llvm.umul.with.overflow.i32(i32 %a, i32 %b)\n" 816 "define void @f(i32 %x, i32 %y, float %fx, float %fy, " 817 "i1 %cond, i8* %p) {\n"; 818 std::string AsmTail = " ret void\n}"; 819 // (propagates poison?, IR instruction) 820 SmallVector<std::pair<bool, std::string>, 32> Data = { 821 {true, "add i32 %x, %y"}, 822 {true, "add nsw nuw i32 %x, %y"}, 823 {true, "ashr i32 %x, %y"}, 824 {true, "lshr exact i32 %x, 31"}, 825 {true, "fcmp oeq float %fx, %fy"}, 826 {true, "icmp eq i32 %x, %y"}, 827 {true, "getelementptr i8, i8* %p, i32 %x"}, 828 {true, "getelementptr inbounds i8, i8* %p, i32 %x"}, 829 {true, "bitcast float %fx to i32"}, 830 {false, "select i1 %cond, i32 %x, i32 %y"}, 831 {false, "freeze i32 %x"}, 832 {true, "udiv i32 %x, %y"}, 833 {true, "urem i32 %x, %y"}, 834 {true, "sdiv exact i32 %x, %y"}, 835 {true, "srem i32 %x, %y"}, 836 {false, "call i32 @g(i32 %x)"}, 837 {true, "call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %x, i32 %y)"}, 838 {true, "call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %x, i32 %y)"}, 839 {true, "call {i32, i1} @llvm.smul.with.overflow.i32(i32 %x, i32 %y)"}, 840 {true, "call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)"}, 841 {true, "call {i32, i1} @llvm.usub.with.overflow.i32(i32 %x, i32 %y)"}, 842 {true, "call {i32, i1} @llvm.umul.with.overflow.i32(i32 %x, i32 %y)"}}; 843 844 std::string AssemblyStr = AsmHead; 845 for (auto &Itm : Data) 846 AssemblyStr += Itm.second + "\n"; 847 AssemblyStr += AsmTail; 848 849 LLVMContext Context; 850 SMDiagnostic Error; 851 auto M = parseAssemblyString(AssemblyStr, Error, Context); 852 assert(M && "Bad assembly?"); 853 854 auto *F = M->getFunction("f"); 855 assert(F && "Bad assembly?"); 856 857 auto &BB = F->getEntryBlock(); 858 859 int Index = 0; 860 for (auto &I : BB) { 861 if (isa<ReturnInst>(&I)) 862 break; 863 EXPECT_EQ(propagatesPoison(cast<Operator>(&I)), Data[Index].first) 864 << "Incorrect answer at instruction " << Index << " = " << I; 865 Index++; 866 } 867 } 868 869 TEST_F(ValueTrackingTest, programUndefinedIfPoison) { 870 parseAssembly("declare i32 @any_num()" 871 "define void @test(i32 %mask) {\n" 872 " %A = call i32 @any_num()\n" 873 " %B = or i32 %A, %mask\n" 874 " udiv i32 1, %B" 875 " ret void\n" 876 "}\n"); 877 // If %A was poison, udiv raises UB regardless of %mask's value 878 EXPECT_EQ(programUndefinedIfPoison(A), true); 879 } 880 881 TEST_F(ValueTrackingTest, programUndefinedIfUndefOrPoison) { 882 parseAssembly("declare i32 @any_num()" 883 "define void @test(i32 %mask) {\n" 884 " %A = call i32 @any_num()\n" 885 " %B = or i32 %A, %mask\n" 886 " udiv i32 1, %B" 887 " ret void\n" 888 "}\n"); 889 // If %A was undef and %mask was 1, udiv does not raise UB 890 EXPECT_EQ(programUndefinedIfUndefOrPoison(A), false); 891 } 892 893 TEST_F(ValueTrackingTest, isGuaranteedNotToBePoison_exploitBranchCond) { 894 parseAssembly("declare i1 @any_bool()" 895 "define void @test(i1 %y) {\n" 896 " %A = call i1 @any_bool()\n" 897 " %cond = and i1 %A, %y\n" 898 " br i1 %cond, label %BB1, label %BB2\n" 899 "BB1:\n" 900 " ret void\n" 901 "BB2:\n" 902 " ret void\n" 903 "}\n"); 904 DominatorTree DT(*F); 905 for (auto &BB : *F) { 906 if (&BB == &F->getEntryBlock()) 907 continue; 908 909 EXPECT_EQ(isGuaranteedNotToBePoison(A, nullptr, BB.getTerminator(), &DT), 910 true) 911 << "isGuaranteedNotToBePoison does not hold at " << *BB.getTerminator(); 912 } 913 } 914 915 TEST_F(ValueTrackingTest, isGuaranteedNotToBePoison_phi) { 916 parseAssembly("declare i32 @any_i32(i32)" 917 "define void @test() {\n" 918 "ENTRY:\n" 919 " br label %LOOP\n" 920 "LOOP:\n" 921 " %A = phi i32 [0, %ENTRY], [%A.next, %NEXT]\n" 922 " %A.next = call i32 @any_i32(i32 %A)\n" 923 " %cond = icmp eq i32 %A.next, 0\n" 924 " br i1 %cond, label %NEXT, label %EXIT\n" 925 "NEXT:\n" 926 " br label %LOOP\n" 927 "EXIT:\n" 928 " ret void\n" 929 "}\n"); 930 DominatorTree DT(*F); 931 for (auto &BB : *F) { 932 if (BB.getName() == "LOOP") { 933 EXPECT_EQ(isGuaranteedNotToBePoison(A, nullptr, A, &DT), true) 934 << "isGuaranteedNotToBePoison does not hold"; 935 } 936 } 937 } 938 939 TEST_F(ValueTrackingTest, isGuaranteedNotToBeUndefOrPoison) { 940 parseAssembly("declare void @f(i32 noundef)" 941 "define void @test(i32 %x) {\n" 942 " %A = bitcast i32 %x to i32\n" 943 " call void @f(i32 noundef %x)\n" 944 " ret void\n" 945 "}\n"); 946 EXPECT_EQ(isGuaranteedNotToBeUndefOrPoison(A), true); 947 EXPECT_EQ(isGuaranteedNotToBeUndefOrPoison(UndefValue::get(IntegerType::get(Context, 8))), false); 948 EXPECT_EQ(isGuaranteedNotToBeUndefOrPoison(PoisonValue::get(IntegerType::get(Context, 8))), false); 949 EXPECT_EQ(isGuaranteedNotToBePoison(UndefValue::get(IntegerType::get(Context, 8))), true); 950 EXPECT_EQ(isGuaranteedNotToBePoison(PoisonValue::get(IntegerType::get(Context, 8))), false); 951 952 Type *Int32Ty = Type::getInt32Ty(Context); 953 Constant *CU = UndefValue::get(Int32Ty); 954 Constant *CP = PoisonValue::get(Int32Ty); 955 Constant *C1 = ConstantInt::get(Int32Ty, 1); 956 Constant *C2 = ConstantInt::get(Int32Ty, 2); 957 958 { 959 Constant *V1 = ConstantVector::get({C1, C2}); 960 EXPECT_TRUE(isGuaranteedNotToBeUndefOrPoison(V1)); 961 EXPECT_TRUE(isGuaranteedNotToBePoison(V1)); 962 } 963 964 { 965 Constant *V2 = ConstantVector::get({C1, CU}); 966 EXPECT_FALSE(isGuaranteedNotToBeUndefOrPoison(V2)); 967 EXPECT_TRUE(isGuaranteedNotToBePoison(V2)); 968 } 969 970 { 971 Constant *V3 = ConstantVector::get({C1, CP}); 972 EXPECT_FALSE(isGuaranteedNotToBeUndefOrPoison(V3)); 973 EXPECT_FALSE(isGuaranteedNotToBePoison(V3)); 974 } 975 } 976 977 TEST_F(ValueTrackingTest, isGuaranteedNotToBeUndefOrPoison_assume) { 978 parseAssembly("declare i1 @f_i1()\n" 979 "declare i32 @f_i32()\n" 980 "declare void @llvm.assume(i1)\n" 981 "define void @test() {\n" 982 " %A = call i32 @f_i32()\n" 983 " %cond = call i1 @f_i1()\n" 984 " %CxtI = add i32 0, 0\n" 985 " br i1 %cond, label %BB1, label %EXIT\n" 986 "BB1:\n" 987 " %CxtI2 = add i32 0, 0\n" 988 " %cond2 = call i1 @f_i1()\n" 989 " call void @llvm.assume(i1 true) [ \"noundef\"(i32 %A) ]\n" 990 " br i1 %cond2, label %BB2, label %EXIT\n" 991 "BB2:\n" 992 " %CxtI3 = add i32 0, 0\n" 993 " ret void\n" 994 "EXIT:\n" 995 " ret void\n" 996 "}"); 997 AssumptionCache AC(*F); 998 DominatorTree DT(*F); 999 EXPECT_FALSE(isGuaranteedNotToBeUndefOrPoison(A, &AC, CxtI, &DT)); 1000 EXPECT_FALSE(isGuaranteedNotToBeUndefOrPoison(A, &AC, CxtI2, &DT)); 1001 EXPECT_TRUE(isGuaranteedNotToBeUndefOrPoison(A, &AC, CxtI3, &DT)); 1002 } 1003 1004 TEST(ValueTracking, canCreatePoisonOrUndef) { 1005 std::string AsmHead = 1006 "@s = external dso_local global i32, align 1\n" 1007 "declare i32 @g(i32)\n" 1008 "declare {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)\n" 1009 "declare {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)\n" 1010 "declare {i32, i1} @llvm.smul.with.overflow.i32(i32 %a, i32 %b)\n" 1011 "declare {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)\n" 1012 "declare {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)\n" 1013 "declare {i32, i1} @llvm.umul.with.overflow.i32(i32 %a, i32 %b)\n" 1014 "define void @f(i32 %x, i32 %y, float %fx, float %fy, i1 %cond, " 1015 "<4 x i32> %vx, <4 x i32> %vx2, <vscale x 4 x i32> %svx, i8* %p) {\n"; 1016 std::string AsmTail = " ret void\n}"; 1017 // (can create poison?, can create undef?, IR instruction) 1018 SmallVector<std::pair<std::pair<bool, bool>, std::string>, 32> Data = { 1019 {{false, false}, "add i32 %x, %y"}, 1020 {{true, false}, "add nsw nuw i32 %x, %y"}, 1021 {{true, false}, "shl i32 %x, %y"}, 1022 {{true, false}, "shl <4 x i32> %vx, %vx2"}, 1023 {{true, false}, "shl nsw i32 %x, %y"}, 1024 {{true, false}, "shl nsw <4 x i32> %vx, <i32 0, i32 1, i32 2, i32 3>"}, 1025 {{false, false}, "shl i32 %x, 31"}, 1026 {{true, false}, "shl i32 %x, 32"}, 1027 {{false, false}, "shl <4 x i32> %vx, <i32 0, i32 1, i32 2, i32 3>"}, 1028 {{true, false}, "shl <4 x i32> %vx, <i32 0, i32 1, i32 2, i32 32>"}, 1029 {{true, false}, "ashr i32 %x, %y"}, 1030 {{true, false}, "ashr exact i32 %x, %y"}, 1031 {{false, false}, "ashr i32 %x, 31"}, 1032 {{true, false}, "ashr exact i32 %x, 31"}, 1033 {{false, false}, "ashr <4 x i32> %vx, <i32 0, i32 1, i32 2, i32 3>"}, 1034 {{true, false}, "ashr <4 x i32> %vx, <i32 0, i32 1, i32 2, i32 32>"}, 1035 {{true, false}, "ashr exact <4 x i32> %vx, <i32 0, i32 1, i32 2, i32 3>"}, 1036 {{true, false}, "lshr i32 %x, %y"}, 1037 {{true, false}, "lshr exact i32 %x, 31"}, 1038 {{false, false}, "udiv i32 %x, %y"}, 1039 {{true, false}, "udiv exact i32 %x, %y"}, 1040 {{false, false}, "getelementptr i8, i8* %p, i32 %x"}, 1041 {{true, false}, "getelementptr inbounds i8, i8* %p, i32 %x"}, 1042 {{true, false}, "fneg nnan float %fx"}, 1043 {{false, false}, "fneg float %fx"}, 1044 {{false, false}, "fadd float %fx, %fy"}, 1045 {{true, false}, "fadd nnan float %fx, %fy"}, 1046 {{false, false}, "urem i32 %x, %y"}, 1047 {{true, false}, "fptoui float %fx to i32"}, 1048 {{true, false}, "fptosi float %fx to i32"}, 1049 {{false, false}, "bitcast float %fx to i32"}, 1050 {{false, false}, "select i1 %cond, i32 %x, i32 %y"}, 1051 {{true, false}, "select nnan i1 %cond, float %fx, float %fy"}, 1052 {{true, false}, "extractelement <4 x i32> %vx, i32 %x"}, 1053 {{false, false}, "extractelement <4 x i32> %vx, i32 3"}, 1054 {{true, false}, "extractelement <vscale x 4 x i32> %svx, i32 4"}, 1055 {{true, false}, "insertelement <4 x i32> %vx, i32 %x, i32 %y"}, 1056 {{false, false}, "insertelement <4 x i32> %vx, i32 %x, i32 3"}, 1057 {{true, false}, "insertelement <vscale x 4 x i32> %svx, i32 %x, i32 4"}, 1058 {{false, false}, "freeze i32 %x"}, 1059 {{false, false}, 1060 "shufflevector <4 x i32> %vx, <4 x i32> %vx2, " 1061 "<4 x i32> <i32 0, i32 1, i32 2, i32 3>"}, 1062 {{false, true}, 1063 "shufflevector <4 x i32> %vx, <4 x i32> %vx2, " 1064 "<4 x i32> <i32 0, i32 1, i32 2, i32 undef>"}, 1065 {{false, true}, 1066 "shufflevector <vscale x 4 x i32> %svx, " 1067 "<vscale x 4 x i32> %svx, <vscale x 4 x i32> undef"}, 1068 {{true, false}, "call i32 @g(i32 %x)"}, 1069 {{false, false}, "call noundef i32 @g(i32 %x)"}, 1070 {{true, false}, "fcmp nnan oeq float %fx, %fy"}, 1071 {{false, false}, "fcmp oeq float %fx, %fy"}, 1072 {{true, false}, 1073 "ashr <4 x i32> %vx, select (i1 icmp sgt (i32 ptrtoint (i32* @s to " 1074 "i32), i32 1), <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 " 1075 "2, i32 3>)"}, 1076 {{false, false}, 1077 "call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %x, i32 %y)"}, 1078 {{false, false}, 1079 "call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %x, i32 %y)"}, 1080 {{false, false}, 1081 "call {i32, i1} @llvm.smul.with.overflow.i32(i32 %x, i32 %y)"}, 1082 {{false, false}, 1083 "call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)"}, 1084 {{false, false}, 1085 "call {i32, i1} @llvm.usub.with.overflow.i32(i32 %x, i32 %y)"}, 1086 {{false, false}, 1087 "call {i32, i1} @llvm.umul.with.overflow.i32(i32 %x, i32 %y)"}}; 1088 1089 std::string AssemblyStr = AsmHead; 1090 for (auto &Itm : Data) 1091 AssemblyStr += Itm.second + "\n"; 1092 AssemblyStr += AsmTail; 1093 1094 LLVMContext Context; 1095 SMDiagnostic Error; 1096 auto M = parseAssemblyString(AssemblyStr, Error, Context); 1097 assert(M && "Bad assembly?"); 1098 1099 auto *F = M->getFunction("f"); 1100 assert(F && "Bad assembly?"); 1101 1102 auto &BB = F->getEntryBlock(); 1103 1104 int Index = 0; 1105 for (auto &I : BB) { 1106 if (isa<ReturnInst>(&I)) 1107 break; 1108 bool Poison = Data[Index].first.first; 1109 bool Undef = Data[Index].first.second; 1110 EXPECT_EQ(canCreatePoison(cast<Operator>(&I)), Poison) 1111 << "Incorrect answer of canCreatePoison at instruction " << Index 1112 << " = " << I; 1113 EXPECT_EQ(canCreateUndefOrPoison(cast<Operator>(&I)), Undef || Poison) 1114 << "Incorrect answer of canCreateUndef at instruction " << Index 1115 << " = " << I; 1116 Index++; 1117 } 1118 } 1119 1120 TEST_F(ValueTrackingTest, computePtrAlignment) { 1121 parseAssembly("declare i1 @f_i1()\n" 1122 "declare i8* @f_i8p()\n" 1123 "declare void @llvm.assume(i1)\n" 1124 "define void @test() {\n" 1125 " %A = call i8* @f_i8p()\n" 1126 " %cond = call i1 @f_i1()\n" 1127 " %CxtI = add i32 0, 0\n" 1128 " br i1 %cond, label %BB1, label %EXIT\n" 1129 "BB1:\n" 1130 " %CxtI2 = add i32 0, 0\n" 1131 " %cond2 = call i1 @f_i1()\n" 1132 " call void @llvm.assume(i1 true) [ \"align\"(i8* %A, i64 16) ]\n" 1133 " br i1 %cond2, label %BB2, label %EXIT\n" 1134 "BB2:\n" 1135 " %CxtI3 = add i32 0, 0\n" 1136 " ret void\n" 1137 "EXIT:\n" 1138 " ret void\n" 1139 "}"); 1140 AssumptionCache AC(*F); 1141 DominatorTree DT(*F); 1142 DataLayout DL = M->getDataLayout(); 1143 EXPECT_EQ(getKnownAlignment(A, DL, CxtI, &AC, &DT), Align(1)); 1144 EXPECT_EQ(getKnownAlignment(A, DL, CxtI2, &AC, &DT), Align(1)); 1145 EXPECT_EQ(getKnownAlignment(A, DL, CxtI3, &AC, &DT), Align(16)); 1146 } 1147 1148 TEST_F(ComputeKnownBitsTest, ComputeKnownBits) { 1149 parseAssembly( 1150 "define i32 @test(i32 %a, i32 %b) {\n" 1151 " %ash = mul i32 %a, 8\n" 1152 " %aad = add i32 %ash, 7\n" 1153 " %aan = and i32 %aad, 4095\n" 1154 " %bsh = shl i32 %b, 4\n" 1155 " %bad = or i32 %bsh, 6\n" 1156 " %ban = and i32 %bad, 4095\n" 1157 " %A = mul i32 %aan, %ban\n" 1158 " ret i32 %A\n" 1159 "}\n"); 1160 expectKnownBits(/*zero*/ 4278190085u, /*one*/ 10u); 1161 } 1162 1163 TEST_F(ComputeKnownBitsTest, ComputeKnownMulBits) { 1164 parseAssembly( 1165 "define i32 @test(i32 %a, i32 %b) {\n" 1166 " %aa = shl i32 %a, 5\n" 1167 " %bb = shl i32 %b, 5\n" 1168 " %aaa = or i32 %aa, 24\n" 1169 " %bbb = or i32 %bb, 28\n" 1170 " %A = mul i32 %aaa, %bbb\n" 1171 " ret i32 %A\n" 1172 "}\n"); 1173 expectKnownBits(/*zero*/ 95u, /*one*/ 32u); 1174 } 1175 1176 TEST_F(ValueTrackingTest, isNonZeroRecurrence) { 1177 parseAssembly(R"( 1178 define i1 @test(i8 %n, i8 %r) { 1179 entry: 1180 br label %loop 1181 loop: 1182 %p = phi i8 [ -1, %entry ], [ %next, %loop ] 1183 %next = add nsw i8 %p, -1 1184 %cmp1 = icmp eq i8 %p, %n 1185 br i1 %cmp1, label %exit, label %loop 1186 exit: 1187 %A = or i8 %p, %r 1188 %CxtI = icmp eq i8 %A, 0 1189 ret i1 %CxtI 1190 } 1191 )"); 1192 DataLayout DL = M->getDataLayout(); 1193 AssumptionCache AC(*F); 1194 EXPECT_TRUE(isKnownNonZero(A, DL, 0, &AC, CxtI)); 1195 } 1196 1197 TEST_F(ValueTrackingTest, KnownNonZeroFromDomCond) { 1198 parseAssembly(R"( 1199 declare i8* @f_i8() 1200 define void @test(i1 %c) { 1201 %A = call i8* @f_i8() 1202 %B = call i8* @f_i8() 1203 %c1 = icmp ne i8* %A, null 1204 %cond = and i1 %c1, %c 1205 br i1 %cond, label %T, label %Q 1206 T: 1207 %CxtI = add i32 0, 0 1208 ret void 1209 Q: 1210 %CxtI2 = add i32 0, 0 1211 ret void 1212 } 1213 )"); 1214 AssumptionCache AC(*F); 1215 DominatorTree DT(*F); 1216 DataLayout DL = M->getDataLayout(); 1217 EXPECT_EQ(isKnownNonZero(A, DL, 0, &AC, CxtI, &DT), true); 1218 EXPECT_EQ(isKnownNonZero(A, DL, 0, &AC, CxtI2, &DT), false); 1219 } 1220 1221 TEST_F(ValueTrackingTest, KnownNonZeroFromDomCond2) { 1222 parseAssembly(R"( 1223 declare i8* @f_i8() 1224 define void @test(i1 %c) { 1225 %A = call i8* @f_i8() 1226 %B = call i8* @f_i8() 1227 %c1 = icmp ne i8* %A, null 1228 %cond = select i1 %c, i1 %c1, i1 false 1229 br i1 %cond, label %T, label %Q 1230 T: 1231 %CxtI = add i32 0, 0 1232 ret void 1233 Q: 1234 %CxtI2 = add i32 0, 0 1235 ret void 1236 } 1237 )"); 1238 AssumptionCache AC(*F); 1239 DominatorTree DT(*F); 1240 DataLayout DL = M->getDataLayout(); 1241 EXPECT_EQ(isKnownNonZero(A, DL, 0, &AC, CxtI, &DT), true); 1242 EXPECT_EQ(isKnownNonZero(A, DL, 0, &AC, CxtI2, &DT), false); 1243 } 1244 1245 TEST_F(ValueTrackingTest, IsImpliedConditionAnd) { 1246 parseAssembly(R"( 1247 define void @test(i32 %x, i32 %y) { 1248 %c1 = icmp ult i32 %x, 10 1249 %c2 = icmp ult i32 %y, 15 1250 %A = and i1 %c1, %c2 1251 ; x < 10 /\ y < 15 1252 %A2 = icmp ult i32 %x, 20 1253 %A3 = icmp uge i32 %y, 20 1254 %A4 = icmp ult i32 %x, 5 1255 ret void 1256 } 1257 )"); 1258 DataLayout DL = M->getDataLayout(); 1259 EXPECT_EQ(isImpliedCondition(A, A2, DL), true); 1260 EXPECT_EQ(isImpliedCondition(A, A3, DL), false); 1261 EXPECT_EQ(isImpliedCondition(A, A4, DL), None); 1262 } 1263 1264 TEST_F(ValueTrackingTest, IsImpliedConditionAnd2) { 1265 parseAssembly(R"( 1266 define void @test(i32 %x, i32 %y) { 1267 %c1 = icmp ult i32 %x, 10 1268 %c2 = icmp ult i32 %y, 15 1269 %A = select i1 %c1, i1 %c2, i1 false 1270 ; x < 10 /\ y < 15 1271 %A2 = icmp ult i32 %x, 20 1272 %A3 = icmp uge i32 %y, 20 1273 %A4 = icmp ult i32 %x, 5 1274 ret void 1275 } 1276 )"); 1277 DataLayout DL = M->getDataLayout(); 1278 EXPECT_EQ(isImpliedCondition(A, A2, DL), true); 1279 EXPECT_EQ(isImpliedCondition(A, A3, DL), false); 1280 EXPECT_EQ(isImpliedCondition(A, A4, DL), None); 1281 } 1282 1283 TEST_F(ValueTrackingTest, IsImpliedConditionOr) { 1284 parseAssembly(R"( 1285 define void @test(i32 %x, i32 %y) { 1286 %c1 = icmp ult i32 %x, 10 1287 %c2 = icmp ult i32 %y, 15 1288 %A = or i1 %c1, %c2 ; negated 1289 ; x >= 10 /\ y >= 15 1290 %A2 = icmp ult i32 %x, 5 1291 %A3 = icmp uge i32 %y, 10 1292 %A4 = icmp ult i32 %x, 15 1293 ret void 1294 } 1295 )"); 1296 DataLayout DL = M->getDataLayout(); 1297 EXPECT_EQ(isImpliedCondition(A, A2, DL, false), false); 1298 EXPECT_EQ(isImpliedCondition(A, A3, DL, false), true); 1299 EXPECT_EQ(isImpliedCondition(A, A4, DL, false), None); 1300 } 1301 1302 TEST_F(ValueTrackingTest, IsImpliedConditionOr2) { 1303 parseAssembly(R"( 1304 define void @test(i32 %x, i32 %y) { 1305 %c1 = icmp ult i32 %x, 10 1306 %c2 = icmp ult i32 %y, 15 1307 %A = select i1 %c1, i1 true, i1 %c2 ; negated 1308 ; x >= 10 /\ y >= 15 1309 %A2 = icmp ult i32 %x, 5 1310 %A3 = icmp uge i32 %y, 10 1311 %A4 = icmp ult i32 %x, 15 1312 ret void 1313 } 1314 )"); 1315 DataLayout DL = M->getDataLayout(); 1316 EXPECT_EQ(isImpliedCondition(A, A2, DL, false), false); 1317 EXPECT_EQ(isImpliedCondition(A, A3, DL, false), true); 1318 EXPECT_EQ(isImpliedCondition(A, A4, DL, false), None); 1319 } 1320 1321 TEST_F(ComputeKnownBitsTest, KnownNonZeroShift) { 1322 // %q is known nonzero without known bits. 1323 // Because %q is nonzero, %A[0] is known to be zero. 1324 parseAssembly( 1325 "define i8 @test(i8 %p, i8* %pq) {\n" 1326 " %q = load i8, i8* %pq, !range !0\n" 1327 " %A = shl i8 %p, %q\n" 1328 " ret i8 %A\n" 1329 "}\n" 1330 "!0 = !{ i8 1, i8 5 }\n"); 1331 expectKnownBits(/*zero*/ 1u, /*one*/ 0u); 1332 } 1333 1334 TEST_F(ComputeKnownBitsTest, ComputeKnownFshl) { 1335 // fshl(....1111....0000, 00..1111........, 6) 1336 // = 11....000000..11 1337 parseAssembly( 1338 "define i16 @test(i16 %a, i16 %b) {\n" 1339 " %aa = shl i16 %a, 4\n" 1340 " %bb = lshr i16 %b, 2\n" 1341 " %aaa = or i16 %aa, 3840\n" 1342 " %bbb = or i16 %bb, 3840\n" 1343 " %A = call i16 @llvm.fshl.i16(i16 %aaa, i16 %bbb, i16 6)\n" 1344 " ret i16 %A\n" 1345 "}\n" 1346 "declare i16 @llvm.fshl.i16(i16, i16, i16)\n"); 1347 expectKnownBits(/*zero*/ 1008u, /*one*/ 49155u); 1348 } 1349 1350 TEST_F(ComputeKnownBitsTest, ComputeKnownFshr) { 1351 // fshr(....1111....0000, 00..1111........, 26) 1352 // = 11....000000..11 1353 parseAssembly( 1354 "define i16 @test(i16 %a, i16 %b) {\n" 1355 " %aa = shl i16 %a, 4\n" 1356 " %bb = lshr i16 %b, 2\n" 1357 " %aaa = or i16 %aa, 3840\n" 1358 " %bbb = or i16 %bb, 3840\n" 1359 " %A = call i16 @llvm.fshr.i16(i16 %aaa, i16 %bbb, i16 26)\n" 1360 " ret i16 %A\n" 1361 "}\n" 1362 "declare i16 @llvm.fshr.i16(i16, i16, i16)\n"); 1363 expectKnownBits(/*zero*/ 1008u, /*one*/ 49155u); 1364 } 1365 1366 TEST_F(ComputeKnownBitsTest, ComputeKnownFshlZero) { 1367 // fshl(....1111....0000, 00..1111........, 0) 1368 // = ....1111....0000 1369 parseAssembly( 1370 "define i16 @test(i16 %a, i16 %b) {\n" 1371 " %aa = shl i16 %a, 4\n" 1372 " %bb = lshr i16 %b, 2\n" 1373 " %aaa = or i16 %aa, 3840\n" 1374 " %bbb = or i16 %bb, 3840\n" 1375 " %A = call i16 @llvm.fshl.i16(i16 %aaa, i16 %bbb, i16 0)\n" 1376 " ret i16 %A\n" 1377 "}\n" 1378 "declare i16 @llvm.fshl.i16(i16, i16, i16)\n"); 1379 expectKnownBits(/*zero*/ 15u, /*one*/ 3840u); 1380 } 1381 1382 TEST_F(ComputeKnownBitsTest, ComputeKnownUAddSatLeadingOnes) { 1383 // uadd.sat(1111...1, ........) 1384 // = 1111.... 1385 parseAssembly( 1386 "define i8 @test(i8 %a, i8 %b) {\n" 1387 " %aa = or i8 %a, 241\n" 1388 " %A = call i8 @llvm.uadd.sat.i8(i8 %aa, i8 %b)\n" 1389 " ret i8 %A\n" 1390 "}\n" 1391 "declare i8 @llvm.uadd.sat.i8(i8, i8)\n"); 1392 expectKnownBits(/*zero*/ 0u, /*one*/ 240u); 1393 } 1394 1395 TEST_F(ComputeKnownBitsTest, ComputeKnownUAddSatOnesPreserved) { 1396 // uadd.sat(00...011, .1...110) 1397 // = .......1 1398 parseAssembly( 1399 "define i8 @test(i8 %a, i8 %b) {\n" 1400 " %aa = or i8 %a, 3\n" 1401 " %aaa = and i8 %aa, 59\n" 1402 " %bb = or i8 %b, 70\n" 1403 " %bbb = and i8 %bb, 254\n" 1404 " %A = call i8 @llvm.uadd.sat.i8(i8 %aaa, i8 %bbb)\n" 1405 " ret i8 %A\n" 1406 "}\n" 1407 "declare i8 @llvm.uadd.sat.i8(i8, i8)\n"); 1408 expectKnownBits(/*zero*/ 0u, /*one*/ 1u); 1409 } 1410 1411 TEST_F(ComputeKnownBitsTest, ComputeKnownUSubSatLHSLeadingZeros) { 1412 // usub.sat(0000...0, ........) 1413 // = 0000.... 1414 parseAssembly( 1415 "define i8 @test(i8 %a, i8 %b) {\n" 1416 " %aa = and i8 %a, 14\n" 1417 " %A = call i8 @llvm.usub.sat.i8(i8 %aa, i8 %b)\n" 1418 " ret i8 %A\n" 1419 "}\n" 1420 "declare i8 @llvm.usub.sat.i8(i8, i8)\n"); 1421 expectKnownBits(/*zero*/ 240u, /*one*/ 0u); 1422 } 1423 1424 TEST_F(ComputeKnownBitsTest, ComputeKnownUSubSatRHSLeadingOnes) { 1425 // usub.sat(........, 1111...1) 1426 // = 0000.... 1427 parseAssembly( 1428 "define i8 @test(i8 %a, i8 %b) {\n" 1429 " %bb = or i8 %a, 241\n" 1430 " %A = call i8 @llvm.usub.sat.i8(i8 %a, i8 %bb)\n" 1431 " ret i8 %A\n" 1432 "}\n" 1433 "declare i8 @llvm.usub.sat.i8(i8, i8)\n"); 1434 expectKnownBits(/*zero*/ 240u, /*one*/ 0u); 1435 } 1436 1437 TEST_F(ComputeKnownBitsTest, ComputeKnownUSubSatZerosPreserved) { 1438 // usub.sat(11...011, .1...110) 1439 // = ......0. 1440 parseAssembly( 1441 "define i8 @test(i8 %a, i8 %b) {\n" 1442 " %aa = or i8 %a, 195\n" 1443 " %aaa = and i8 %aa, 251\n" 1444 " %bb = or i8 %b, 70\n" 1445 " %bbb = and i8 %bb, 254\n" 1446 " %A = call i8 @llvm.usub.sat.i8(i8 %aaa, i8 %bbb)\n" 1447 " ret i8 %A\n" 1448 "}\n" 1449 "declare i8 @llvm.usub.sat.i8(i8, i8)\n"); 1450 expectKnownBits(/*zero*/ 2u, /*one*/ 0u); 1451 } 1452 1453 TEST_F(ComputeKnownBitsTest, ComputeKnownBitsPtrToIntTrunc) { 1454 // ptrtoint truncates the pointer type. 1455 parseAssembly( 1456 "define void @test(i8** %p) {\n" 1457 " %A = load i8*, i8** %p\n" 1458 " %i = ptrtoint i8* %A to i32\n" 1459 " %m = and i32 %i, 31\n" 1460 " %c = icmp eq i32 %m, 0\n" 1461 " call void @llvm.assume(i1 %c)\n" 1462 " ret void\n" 1463 "}\n" 1464 "declare void @llvm.assume(i1)\n"); 1465 AssumptionCache AC(*F); 1466 KnownBits Known = computeKnownBits( 1467 A, M->getDataLayout(), /* Depth */ 0, &AC, F->front().getTerminator()); 1468 EXPECT_EQ(Known.Zero.getZExtValue(), 31u); 1469 EXPECT_EQ(Known.One.getZExtValue(), 0u); 1470 } 1471 1472 TEST_F(ComputeKnownBitsTest, ComputeKnownBitsPtrToIntZext) { 1473 // ptrtoint zero extends the pointer type. 1474 parseAssembly( 1475 "define void @test(i8** %p) {\n" 1476 " %A = load i8*, i8** %p\n" 1477 " %i = ptrtoint i8* %A to i128\n" 1478 " %m = and i128 %i, 31\n" 1479 " %c = icmp eq i128 %m, 0\n" 1480 " call void @llvm.assume(i1 %c)\n" 1481 " ret void\n" 1482 "}\n" 1483 "declare void @llvm.assume(i1)\n"); 1484 AssumptionCache AC(*F); 1485 KnownBits Known = computeKnownBits( 1486 A, M->getDataLayout(), /* Depth */ 0, &AC, F->front().getTerminator()); 1487 EXPECT_EQ(Known.Zero.getZExtValue(), 31u); 1488 EXPECT_EQ(Known.One.getZExtValue(), 0u); 1489 } 1490 1491 TEST_F(ComputeKnownBitsTest, ComputeKnownBitsFreeze) { 1492 parseAssembly("define void @test() {\n" 1493 " %m = call i32 @any_num()\n" 1494 " %A = freeze i32 %m\n" 1495 " %n = and i32 %m, 31\n" 1496 " %c = icmp eq i32 %n, 0\n" 1497 " call void @llvm.assume(i1 %c)\n" 1498 " ret void\n" 1499 "}\n" 1500 "declare void @llvm.assume(i1)\n" 1501 "declare i32 @any_num()\n"); 1502 AssumptionCache AC(*F); 1503 KnownBits Known = computeKnownBits(A, M->getDataLayout(), /* Depth */ 0, &AC, 1504 F->front().getTerminator()); 1505 EXPECT_EQ(Known.Zero.getZExtValue(), 31u); 1506 EXPECT_EQ(Known.One.getZExtValue(), 0u); 1507 } 1508 1509 TEST_F(ComputeKnownBitsTest, ComputeKnownBitsAddWithRange) { 1510 parseAssembly("define void @test(i64* %p) {\n" 1511 " %A = load i64, i64* %p, !range !{i64 64, i64 65536}\n" 1512 " %APlus512 = add i64 %A, 512\n" 1513 " %c = icmp ugt i64 %APlus512, 523\n" 1514 " call void @llvm.assume(i1 %c)\n" 1515 " ret void\n" 1516 "}\n" 1517 "declare void @llvm.assume(i1)\n"); 1518 AssumptionCache AC(*F); 1519 KnownBits Known = computeKnownBits(A, M->getDataLayout(), /* Depth */ 0, &AC, 1520 F->front().getTerminator()); 1521 EXPECT_EQ(Known.Zero.getZExtValue(), ~(65536llu - 1)); 1522 EXPECT_EQ(Known.One.getZExtValue(), 0u); 1523 Instruction &APlus512 = findInstructionByName(F, "APlus512"); 1524 Known = computeKnownBits(&APlus512, M->getDataLayout(), /* Depth */ 0, &AC, 1525 F->front().getTerminator()); 1526 // We know of one less zero because 512 may have produced a 1 that 1527 // got carried all the way to the first trailing zero. 1528 EXPECT_EQ(Known.Zero.getZExtValue(), (~(65536llu - 1)) << 1); 1529 EXPECT_EQ(Known.One.getZExtValue(), 0u); 1530 // The known range is not precise given computeKnownBits works 1531 // with the masks of zeros and ones, not the ranges. 1532 EXPECT_EQ(Known.getMinValue(), 0u); 1533 EXPECT_EQ(Known.getMaxValue(), 131071); 1534 } 1535 1536 // 512 + [32, 64) doesn't produce overlapping bits. 1537 // Make sure we get all the individual bits properly. 1538 TEST_F(ComputeKnownBitsTest, ComputeKnownBitsAddWithRangeNoOverlap) { 1539 parseAssembly("define void @test(i64* %p) {\n" 1540 " %A = load i64, i64* %p, !range !{i64 32, i64 64}\n" 1541 " %APlus512 = add i64 %A, 512\n" 1542 " %c = icmp ugt i64 %APlus512, 523\n" 1543 " call void @llvm.assume(i1 %c)\n" 1544 " ret void\n" 1545 "}\n" 1546 "declare void @llvm.assume(i1)\n"); 1547 AssumptionCache AC(*F); 1548 KnownBits Known = computeKnownBits(A, M->getDataLayout(), /* Depth */ 0, &AC, 1549 F->front().getTerminator()); 1550 EXPECT_EQ(Known.Zero.getZExtValue(), ~(64llu - 1)); 1551 EXPECT_EQ(Known.One.getZExtValue(), 32u); 1552 Instruction &APlus512 = findInstructionByName(F, "APlus512"); 1553 Known = computeKnownBits(&APlus512, M->getDataLayout(), /* Depth */ 0, &AC, 1554 F->front().getTerminator()); 1555 EXPECT_EQ(Known.Zero.getZExtValue(), ~512llu & ~(64llu - 1)); 1556 EXPECT_EQ(Known.One.getZExtValue(), 512u | 32u); 1557 // The known range is not precise given computeKnownBits works 1558 // with the masks of zeros and ones, not the ranges. 1559 EXPECT_EQ(Known.getMinValue(), 544); 1560 EXPECT_EQ(Known.getMaxValue(), 575); 1561 } 1562 1563 TEST_F(ComputeKnownBitsTest, ComputeKnownBitsGEPWithRange) { 1564 parseAssembly( 1565 "define void @test(i64* %p) {\n" 1566 " %A = load i64, i64* %p, !range !{i64 64, i64 65536}\n" 1567 " %APtr = inttoptr i64 %A to float*" 1568 " %APtrPlus512 = getelementptr float, float* %APtr, i32 128\n" 1569 " %c = icmp ugt float* %APtrPlus512, inttoptr (i32 523 to float*)\n" 1570 " call void @llvm.assume(i1 %c)\n" 1571 " ret void\n" 1572 "}\n" 1573 "declare void @llvm.assume(i1)\n"); 1574 AssumptionCache AC(*F); 1575 KnownBits Known = computeKnownBits(A, M->getDataLayout(), /* Depth */ 0, &AC, 1576 F->front().getTerminator()); 1577 EXPECT_EQ(Known.Zero.getZExtValue(), ~(65536llu - 1)); 1578 EXPECT_EQ(Known.One.getZExtValue(), 0u); 1579 Instruction &APtrPlus512 = findInstructionByName(F, "APtrPlus512"); 1580 Known = computeKnownBits(&APtrPlus512, M->getDataLayout(), /* Depth */ 0, &AC, 1581 F->front().getTerminator()); 1582 // We know of one less zero because 512 may have produced a 1 that 1583 // got carried all the way to the first trailing zero. 1584 EXPECT_EQ(Known.Zero.getZExtValue(), ~(65536llu - 1) << 1); 1585 EXPECT_EQ(Known.One.getZExtValue(), 0u); 1586 // The known range is not precise given computeKnownBits works 1587 // with the masks of zeros and ones, not the ranges. 1588 EXPECT_EQ(Known.getMinValue(), 0u); 1589 EXPECT_EQ(Known.getMaxValue(), 131071); 1590 } 1591 1592 // 4*128 + [32, 64) doesn't produce overlapping bits. 1593 // Make sure we get all the individual bits properly. 1594 // This test is useful to check that we account for the scaling factor 1595 // in the gep. Indeed, gep float, [32,64), 128 is not 128 + [32,64). 1596 TEST_F(ComputeKnownBitsTest, ComputeKnownBitsGEPWithRangeNoOverlap) { 1597 parseAssembly( 1598 "define void @test(i64* %p) {\n" 1599 " %A = load i64, i64* %p, !range !{i64 32, i64 64}\n" 1600 " %APtr = inttoptr i64 %A to float*" 1601 " %APtrPlus512 = getelementptr float, float* %APtr, i32 128\n" 1602 " %c = icmp ugt float* %APtrPlus512, inttoptr (i32 523 to float*)\n" 1603 " call void @llvm.assume(i1 %c)\n" 1604 " ret void\n" 1605 "}\n" 1606 "declare void @llvm.assume(i1)\n"); 1607 AssumptionCache AC(*F); 1608 KnownBits Known = computeKnownBits(A, M->getDataLayout(), /* Depth */ 0, &AC, 1609 F->front().getTerminator()); 1610 EXPECT_EQ(Known.Zero.getZExtValue(), ~(64llu - 1)); 1611 EXPECT_EQ(Known.One.getZExtValue(), 32u); 1612 Instruction &APtrPlus512 = findInstructionByName(F, "APtrPlus512"); 1613 Known = computeKnownBits(&APtrPlus512, M->getDataLayout(), /* Depth */ 0, &AC, 1614 F->front().getTerminator()); 1615 EXPECT_EQ(Known.Zero.getZExtValue(), ~512llu & ~(64llu - 1)); 1616 EXPECT_EQ(Known.One.getZExtValue(), 512u | 32u); 1617 // The known range is not precise given computeKnownBits works 1618 // with the masks of zeros and ones, not the ranges. 1619 EXPECT_EQ(Known.getMinValue(), 544); 1620 EXPECT_EQ(Known.getMaxValue(), 575); 1621 } 1622 1623 class IsBytewiseValueTest : public ValueTrackingTest, 1624 public ::testing::WithParamInterface< 1625 std::pair<const char *, const char *>> { 1626 protected: 1627 }; 1628 1629 const std::pair<const char *, const char *> IsBytewiseValueTests[] = { 1630 { 1631 "i8 0", 1632 "i48* null", 1633 }, 1634 { 1635 "i8 undef", 1636 "i48* undef", 1637 }, 1638 { 1639 "i8 0", 1640 "i8 zeroinitializer", 1641 }, 1642 { 1643 "i8 0", 1644 "i8 0", 1645 }, 1646 { 1647 "i8 -86", 1648 "i8 -86", 1649 }, 1650 { 1651 "i8 -1", 1652 "i8 -1", 1653 }, 1654 { 1655 "i8 undef", 1656 "i16 undef", 1657 }, 1658 { 1659 "i8 0", 1660 "i16 0", 1661 }, 1662 { 1663 "", 1664 "i16 7", 1665 }, 1666 { 1667 "i8 -86", 1668 "i16 -21846", 1669 }, 1670 { 1671 "i8 -1", 1672 "i16 -1", 1673 }, 1674 { 1675 "i8 0", 1676 "i48 0", 1677 }, 1678 { 1679 "i8 -1", 1680 "i48 -1", 1681 }, 1682 { 1683 "i8 0", 1684 "i49 0", 1685 }, 1686 { 1687 "", 1688 "i49 -1", 1689 }, 1690 { 1691 "i8 0", 1692 "half 0xH0000", 1693 }, 1694 { 1695 "i8 -85", 1696 "half 0xHABAB", 1697 }, 1698 { 1699 "i8 0", 1700 "float 0.0", 1701 }, 1702 { 1703 "i8 -1", 1704 "float 0xFFFFFFFFE0000000", 1705 }, 1706 { 1707 "i8 0", 1708 "double 0.0", 1709 }, 1710 { 1711 "i8 -15", 1712 "double 0xF1F1F1F1F1F1F1F1", 1713 }, 1714 { 1715 "i8 undef", 1716 "i16* undef", 1717 }, 1718 { 1719 "i8 0", 1720 "i16* inttoptr (i64 0 to i16*)", 1721 }, 1722 { 1723 "i8 -1", 1724 "i16* inttoptr (i64 -1 to i16*)", 1725 }, 1726 { 1727 "i8 -86", 1728 "i16* inttoptr (i64 -6148914691236517206 to i16*)", 1729 }, 1730 { 1731 "", 1732 "i16* inttoptr (i48 -1 to i16*)", 1733 }, 1734 { 1735 "i8 -1", 1736 "i16* inttoptr (i96 -1 to i16*)", 1737 }, 1738 { 1739 "i8 undef", 1740 "[0 x i8] zeroinitializer", 1741 }, 1742 { 1743 "i8 undef", 1744 "[0 x i8] undef", 1745 }, 1746 { 1747 "i8 undef", 1748 "[5 x [0 x i8]] zeroinitializer", 1749 }, 1750 { 1751 "i8 undef", 1752 "[5 x [0 x i8]] undef", 1753 }, 1754 { 1755 "i8 0", 1756 "[6 x i8] zeroinitializer", 1757 }, 1758 { 1759 "i8 undef", 1760 "[6 x i8] undef", 1761 }, 1762 { 1763 "i8 1", 1764 "[5 x i8] [i8 1, i8 1, i8 1, i8 1, i8 1]", 1765 }, 1766 { 1767 "", 1768 "[5 x i64] [i64 1, i64 1, i64 1, i64 1, i64 1]", 1769 }, 1770 { 1771 "i8 -1", 1772 "[5 x i64] [i64 -1, i64 -1, i64 -1, i64 -1, i64 -1]", 1773 }, 1774 { 1775 "", 1776 "[4 x i8] [i8 1, i8 2, i8 1, i8 1]", 1777 }, 1778 { 1779 "i8 1", 1780 "[4 x i8] [i8 1, i8 undef, i8 1, i8 1]", 1781 }, 1782 { 1783 "i8 0", 1784 "<6 x i8> zeroinitializer", 1785 }, 1786 { 1787 "i8 undef", 1788 "<6 x i8> undef", 1789 }, 1790 { 1791 "i8 1", 1792 "<5 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1>", 1793 }, 1794 { 1795 "", 1796 "<5 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1>", 1797 }, 1798 { 1799 "i8 -1", 1800 "<5 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>", 1801 }, 1802 { 1803 "", 1804 "<4 x i8> <i8 1, i8 1, i8 2, i8 1>", 1805 }, 1806 { 1807 "i8 5", 1808 "<2 x i8> < i8 5, i8 undef >", 1809 }, 1810 { 1811 "i8 0", 1812 "[2 x [2 x i16]] zeroinitializer", 1813 }, 1814 { 1815 "i8 undef", 1816 "[2 x [2 x i16]] undef", 1817 }, 1818 { 1819 "i8 -86", 1820 "[2 x [2 x i16]] [[2 x i16] [i16 -21846, i16 -21846], " 1821 "[2 x i16] [i16 -21846, i16 -21846]]", 1822 }, 1823 { 1824 "", 1825 "[2 x [2 x i16]] [[2 x i16] [i16 -21846, i16 -21846], " 1826 "[2 x i16] [i16 -21836, i16 -21846]]", 1827 }, 1828 { 1829 "i8 undef", 1830 "{ } zeroinitializer", 1831 }, 1832 { 1833 "i8 undef", 1834 "{ } undef", 1835 }, 1836 { 1837 "i8 undef", 1838 "{ {}, {} } zeroinitializer", 1839 }, 1840 { 1841 "i8 undef", 1842 "{ {}, {} } undef", 1843 }, 1844 { 1845 "i8 0", 1846 "{i8, i64, i16*} zeroinitializer", 1847 }, 1848 { 1849 "i8 undef", 1850 "{i8, i64, i16*} undef", 1851 }, 1852 { 1853 "i8 -86", 1854 "{i8, i64, i16*} {i8 -86, i64 -6148914691236517206, i16* undef}", 1855 }, 1856 { 1857 "", 1858 "{i8, i64, i16*} {i8 86, i64 -6148914691236517206, i16* undef}", 1859 }, 1860 }; 1861 1862 INSTANTIATE_TEST_SUITE_P(IsBytewiseValueParamTests, IsBytewiseValueTest, 1863 ::testing::ValuesIn(IsBytewiseValueTests)); 1864 1865 TEST_P(IsBytewiseValueTest, IsBytewiseValue) { 1866 auto M = parseModule(std::string("@test = global ") + GetParam().second); 1867 GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getNamedValue("test")); 1868 Value *Actual = isBytewiseValue(GV->getInitializer(), M->getDataLayout()); 1869 std::string Buff; 1870 raw_string_ostream S(Buff); 1871 if (Actual) 1872 S << *Actual; 1873 EXPECT_EQ(GetParam().first, S.str()); 1874 } 1875 1876 TEST_F(ValueTrackingTest, ComputeConstantRange) { 1877 { 1878 // Assumptions: 1879 // * stride >= 5 1880 // * stride < 10 1881 // 1882 // stride = [5, 10) 1883 auto M = parseModule(R"( 1884 declare void @llvm.assume(i1) 1885 1886 define i32 @test(i32 %stride) { 1887 %gt = icmp uge i32 %stride, 5 1888 call void @llvm.assume(i1 %gt) 1889 %lt = icmp ult i32 %stride, 10 1890 call void @llvm.assume(i1 %lt) 1891 %stride.plus.one = add nsw nuw i32 %stride, 1 1892 ret i32 %stride.plus.one 1893 })"); 1894 Function *F = M->getFunction("test"); 1895 1896 AssumptionCache AC(*F); 1897 Value *Stride = &*F->arg_begin(); 1898 ConstantRange CR1 = computeConstantRange(Stride, true, &AC, nullptr); 1899 EXPECT_TRUE(CR1.isFullSet()); 1900 1901 Instruction *I = &findInstructionByName(F, "stride.plus.one"); 1902 ConstantRange CR2 = computeConstantRange(Stride, true, &AC, I); 1903 EXPECT_EQ(5, CR2.getLower()); 1904 EXPECT_EQ(10, CR2.getUpper()); 1905 } 1906 1907 { 1908 // Assumptions: 1909 // * stride >= 5 1910 // * stride < 200 1911 // * stride == 99 1912 // 1913 // stride = [99, 100) 1914 auto M = parseModule(R"( 1915 declare void @llvm.assume(i1) 1916 1917 define i32 @test(i32 %stride) { 1918 %gt = icmp uge i32 %stride, 5 1919 call void @llvm.assume(i1 %gt) 1920 %lt = icmp ult i32 %stride, 200 1921 call void @llvm.assume(i1 %lt) 1922 %eq = icmp eq i32 %stride, 99 1923 call void @llvm.assume(i1 %eq) 1924 %stride.plus.one = add nsw nuw i32 %stride, 1 1925 ret i32 %stride.plus.one 1926 })"); 1927 Function *F = M->getFunction("test"); 1928 1929 AssumptionCache AC(*F); 1930 Value *Stride = &*F->arg_begin(); 1931 Instruction *I = &findInstructionByName(F, "stride.plus.one"); 1932 ConstantRange CR = computeConstantRange(Stride, true, &AC, I); 1933 EXPECT_EQ(99, *CR.getSingleElement()); 1934 } 1935 1936 { 1937 // Assumptions: 1938 // * stride >= 5 1939 // * stride >= 50 1940 // * stride < 100 1941 // * stride < 200 1942 // 1943 // stride = [50, 100) 1944 auto M = parseModule(R"( 1945 declare void @llvm.assume(i1) 1946 1947 define i32 @test(i32 %stride, i1 %cond) { 1948 %gt = icmp uge i32 %stride, 5 1949 call void @llvm.assume(i1 %gt) 1950 %gt.2 = icmp uge i32 %stride, 50 1951 call void @llvm.assume(i1 %gt.2) 1952 br i1 %cond, label %bb1, label %bb2 1953 1954 bb1: 1955 %lt = icmp ult i32 %stride, 200 1956 call void @llvm.assume(i1 %lt) 1957 %lt.2 = icmp ult i32 %stride, 100 1958 call void @llvm.assume(i1 %lt.2) 1959 %stride.plus.one = add nsw nuw i32 %stride, 1 1960 ret i32 %stride.plus.one 1961 1962 bb2: 1963 ret i32 0 1964 })"); 1965 Function *F = M->getFunction("test"); 1966 1967 AssumptionCache AC(*F); 1968 Value *Stride = &*F->arg_begin(); 1969 Instruction *GT2 = &findInstructionByName(F, "gt.2"); 1970 ConstantRange CR = computeConstantRange(Stride, true, &AC, GT2); 1971 EXPECT_EQ(5, CR.getLower()); 1972 EXPECT_EQ(0, CR.getUpper()); 1973 1974 Instruction *I = &findInstructionByName(F, "stride.plus.one"); 1975 ConstantRange CR2 = computeConstantRange(Stride, true, &AC, I); 1976 EXPECT_EQ(50, CR2.getLower()); 1977 EXPECT_EQ(100, CR2.getUpper()); 1978 } 1979 1980 { 1981 // Assumptions: 1982 // * stride > 5 1983 // * stride < 5 1984 // 1985 // stride = empty range, as the assumptions contradict each other. 1986 auto M = parseModule(R"( 1987 declare void @llvm.assume(i1) 1988 1989 define i32 @test(i32 %stride, i1 %cond) { 1990 %gt = icmp ugt i32 %stride, 5 1991 call void @llvm.assume(i1 %gt) 1992 %lt = icmp ult i32 %stride, 5 1993 call void @llvm.assume(i1 %lt) 1994 %stride.plus.one = add nsw nuw i32 %stride, 1 1995 ret i32 %stride.plus.one 1996 })"); 1997 Function *F = M->getFunction("test"); 1998 1999 AssumptionCache AC(*F); 2000 Value *Stride = &*F->arg_begin(); 2001 2002 Instruction *I = &findInstructionByName(F, "stride.plus.one"); 2003 ConstantRange CR = computeConstantRange(Stride, true, &AC, I); 2004 EXPECT_TRUE(CR.isEmptySet()); 2005 } 2006 2007 { 2008 // Assumptions: 2009 // * x.1 >= 5 2010 // * x.2 < x.1 2011 // 2012 // stride = [0, 5) 2013 auto M = parseModule(R"( 2014 declare void @llvm.assume(i1) 2015 2016 define i32 @test(i32 %x.1, i32 %x.2) { 2017 %gt = icmp uge i32 %x.1, 5 2018 call void @llvm.assume(i1 %gt) 2019 %lt = icmp ult i32 %x.2, %x.1 2020 call void @llvm.assume(i1 %lt) 2021 %stride.plus.one = add nsw nuw i32 %x.1, 1 2022 ret i32 %stride.plus.one 2023 })"); 2024 Function *F = M->getFunction("test"); 2025 2026 AssumptionCache AC(*F); 2027 Value *X2 = &*std::next(F->arg_begin()); 2028 2029 Instruction *I = &findInstructionByName(F, "stride.plus.one"); 2030 ConstantRange CR1 = computeConstantRange(X2, true, &AC, I); 2031 EXPECT_EQ(0, CR1.getLower()); 2032 EXPECT_EQ(5, CR1.getUpper()); 2033 2034 // Check the depth cutoff results in a conservative result (full set) by 2035 // passing Depth == MaxDepth == 6. 2036 ConstantRange CR2 = computeConstantRange(X2, true, &AC, I, 6); 2037 EXPECT_TRUE(CR2.isFullSet()); 2038 } 2039 } 2040 2041 struct FindAllocaForValueTestParams { 2042 const char *IR; 2043 bool AnyOffsetResult; 2044 bool ZeroOffsetResult; 2045 }; 2046 2047 class FindAllocaForValueTest 2048 : public ValueTrackingTest, 2049 public ::testing::WithParamInterface<FindAllocaForValueTestParams> { 2050 protected: 2051 }; 2052 2053 const FindAllocaForValueTestParams FindAllocaForValueTests[] = { 2054 {R"( 2055 define void @test() { 2056 %a = alloca i64 2057 %r = bitcast i64* %a to i32* 2058 ret void 2059 })", 2060 true, true}, 2061 2062 {R"( 2063 define void @test() { 2064 %a = alloca i32 2065 %r = getelementptr i32, i32* %a, i32 1 2066 ret void 2067 })", 2068 true, false}, 2069 2070 {R"( 2071 define void @test() { 2072 %a = alloca i32 2073 %r = getelementptr i32, i32* %a, i32 0 2074 ret void 2075 })", 2076 true, true}, 2077 2078 {R"( 2079 define void @test(i1 %cond) { 2080 entry: 2081 %a = alloca i32 2082 br label %bb1 2083 2084 bb1: 2085 %r = phi i32* [ %a, %entry ], [ %r, %bb1 ] 2086 br i1 %cond, label %bb1, label %exit 2087 2088 exit: 2089 ret void 2090 })", 2091 true, true}, 2092 2093 {R"( 2094 define void @test(i1 %cond) { 2095 %a = alloca i32 2096 %r = select i1 %cond, i32* %a, i32* %a 2097 ret void 2098 })", 2099 true, true}, 2100 2101 {R"( 2102 define void @test(i1 %cond) { 2103 %a = alloca i32 2104 %b = alloca i32 2105 %r = select i1 %cond, i32* %a, i32* %b 2106 ret void 2107 })", 2108 false, false}, 2109 2110 {R"( 2111 define void @test(i1 %cond) { 2112 entry: 2113 %a = alloca i64 2114 %a32 = bitcast i64* %a to i32* 2115 br label %bb1 2116 2117 bb1: 2118 %x = phi i32* [ %a32, %entry ], [ %x, %bb1 ] 2119 %r = getelementptr i32, i32* %x, i32 1 2120 br i1 %cond, label %bb1, label %exit 2121 2122 exit: 2123 ret void 2124 })", 2125 true, false}, 2126 2127 {R"( 2128 define void @test(i1 %cond) { 2129 entry: 2130 %a = alloca i64 2131 %a32 = bitcast i64* %a to i32* 2132 br label %bb1 2133 2134 bb1: 2135 %x = phi i32* [ %a32, %entry ], [ %r, %bb1 ] 2136 %r = getelementptr i32, i32* %x, i32 1 2137 br i1 %cond, label %bb1, label %exit 2138 2139 exit: 2140 ret void 2141 })", 2142 true, false}, 2143 2144 {R"( 2145 define void @test(i1 %cond, i64* %a) { 2146 entry: 2147 %r = bitcast i64* %a to i32* 2148 ret void 2149 })", 2150 false, false}, 2151 2152 {R"( 2153 define void @test(i1 %cond) { 2154 entry: 2155 %a = alloca i32 2156 %b = alloca i32 2157 br label %bb1 2158 2159 bb1: 2160 %r = phi i32* [ %a, %entry ], [ %b, %bb1 ] 2161 br i1 %cond, label %bb1, label %exit 2162 2163 exit: 2164 ret void 2165 })", 2166 false, false}, 2167 }; 2168 2169 TEST_P(FindAllocaForValueTest, findAllocaForValue) { 2170 auto M = parseModule(GetParam().IR); 2171 Function *F = M->getFunction("test"); 2172 Instruction *I = &findInstructionByName(F, "r"); 2173 const AllocaInst *AI = findAllocaForValue(I); 2174 EXPECT_EQ(!!AI, GetParam().AnyOffsetResult); 2175 } 2176 2177 TEST_P(FindAllocaForValueTest, findAllocaForValueZeroOffset) { 2178 auto M = parseModule(GetParam().IR); 2179 Function *F = M->getFunction("test"); 2180 Instruction *I = &findInstructionByName(F, "r"); 2181 const AllocaInst *AI = findAllocaForValue(I, true); 2182 EXPECT_EQ(!!AI, GetParam().ZeroOffsetResult); 2183 } 2184 2185 INSTANTIATE_TEST_SUITE_P(FindAllocaForValueTest, FindAllocaForValueTest, 2186 ::testing::ValuesIn(FindAllocaForValueTests)); 2187