1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -passes=instsimplify -S | FileCheck %s 3; RUN: opt < %s -passes=instsimplify -S | FileCheck %s 4 5declare {i8, i1} @llvm.uadd.with.overflow.i8(i8 %a, i8 %b) 6declare {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a, i8 %b) 7declare {i8, i1} @llvm.usub.with.overflow.i8(i8 %a, i8 %b) 8declare {i8, i1} @llvm.ssub.with.overflow.i8(i8 %a, i8 %b) 9declare {i8, i1} @llvm.umul.with.overflow.i8(i8 %a, i8 %b) 10declare {i8, i1} @llvm.smul.with.overflow.i8(i8 %a, i8 %b) 11 12define i1 @test_uadd1() { 13; CHECK-LABEL: @test_uadd1( 14; CHECK-NEXT: ret i1 true 15; 16 %x = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 254, i8 3) 17 %overflow = extractvalue {i8, i1} %x, 1 18 ret i1 %overflow 19} 20 21define i8 @test_uadd2() { 22; CHECK-LABEL: @test_uadd2( 23; CHECK-NEXT: ret i8 42 24; 25 %x = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 254, i8 44) 26 %result = extractvalue {i8, i1} %x, 0 27 ret i8 %result 28} 29 30define {i8, i1} @test_uadd3(i8 %v) { 31; CHECK-LABEL: @test_uadd3( 32; CHECK-NEXT: ret { i8, i1 } { i8 -1, i1 false } 33; 34 %result = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 %v, i8 undef) 35 ret {i8, i1} %result 36} 37 38define {i8, i1} @test_uadd3_poison(i8 %v) { 39; CHECK-LABEL: @test_uadd3_poison( 40; CHECK-NEXT: ret { i8, i1 } { i8 -1, i1 false } 41; 42 %result = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 %v, i8 poison) 43 ret {i8, i1} %result 44} 45 46define {i8, i1} @test_uadd4(i8 %v) { 47; CHECK-LABEL: @test_uadd4( 48; CHECK-NEXT: ret { i8, i1 } { i8 -1, i1 false } 49; 50 %result = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 undef, i8 %v) 51 ret {i8, i1} %result 52} 53 54define {i8, i1} @test_uadd4_poison(i8 %v) { 55; CHECK-LABEL: @test_uadd4_poison( 56; CHECK-NEXT: ret { i8, i1 } { i8 -1, i1 false } 57; 58 %result = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 poison, i8 %v) 59 ret {i8, i1} %result 60} 61 62define i1 @test_sadd1() { 63; CHECK-LABEL: @test_sadd1( 64; CHECK-NEXT: ret i1 true 65; 66 %x = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 126, i8 3) 67 %overflow = extractvalue {i8, i1} %x, 1 68 ret i1 %overflow 69} 70 71define i8 @test_sadd2() { 72; CHECK-LABEL: @test_sadd2( 73; CHECK-NEXT: ret i8 -86 74; 75 %x = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 126, i8 44) 76 %result = extractvalue {i8, i1} %x, 0 77 ret i8 %result 78} 79 80define {i8, i1} @test_sadd3(i8 %v) { 81; CHECK-LABEL: @test_sadd3( 82; CHECK-NEXT: ret { i8, i1 } { i8 -1, i1 false } 83; 84 %result = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %v, i8 undef) 85 ret {i8, i1} %result 86} 87 88define {i8, i1} @test_sadd3_poison(i8 %v) { 89; CHECK-LABEL: @test_sadd3_poison( 90; CHECK-NEXT: ret { i8, i1 } { i8 -1, i1 false } 91; 92 %result = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %v, i8 poison) 93 ret {i8, i1} %result 94} 95 96define {i8, i1} @test_sadd4(i8 %v) { 97; CHECK-LABEL: @test_sadd4( 98; CHECK-NEXT: ret { i8, i1 } { i8 -1, i1 false } 99; 100 %result = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 undef, i8 %v) 101 ret {i8, i1} %result 102} 103 104define {i8, i1} @test_sadd4_poison(i8 %v) { 105; CHECK-LABEL: @test_sadd4_poison( 106; CHECK-NEXT: ret { i8, i1 } { i8 -1, i1 false } 107; 108 %result = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 poison, i8 %v) 109 ret {i8, i1} %result 110} 111 112define {i8, i1} @test_usub1(i8 %V) { 113; CHECK-LABEL: @test_usub1( 114; CHECK-NEXT: ret { i8, i1 } zeroinitializer 115; 116 %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 %V, i8 %V) 117 ret {i8, i1} %x 118} 119 120define {i8, i1} @test_usub2(i8 %V) { 121; CHECK-LABEL: @test_usub2( 122; CHECK-NEXT: ret { i8, i1 } zeroinitializer 123; 124 %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 %V, i8 undef) 125 ret {i8, i1} %x 126} 127 128define {i8, i1} @test_usub2_poison(i8 %V) { 129; CHECK-LABEL: @test_usub2_poison( 130; CHECK-NEXT: ret { i8, i1 } zeroinitializer 131; 132 %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 %V, i8 poison) 133 ret {i8, i1} %x 134} 135 136define {i8, i1} @test_usub3(i8 %V) { 137; CHECK-LABEL: @test_usub3( 138; CHECK-NEXT: ret { i8, i1 } zeroinitializer 139; 140 %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 undef, i8 %V) 141 ret {i8, i1} %x 142} 143 144define {i8, i1} @test_usub3_poison(i8 %V) { 145; CHECK-LABEL: @test_usub3_poison( 146; CHECK-NEXT: ret { i8, i1 } zeroinitializer 147; 148 %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 poison, i8 %V) 149 ret {i8, i1} %x 150} 151 152define {i8, i1} @test_ssub1(i8 %V) { 153; CHECK-LABEL: @test_ssub1( 154; CHECK-NEXT: ret { i8, i1 } zeroinitializer 155; 156 %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 %V, i8 %V) 157 ret {i8, i1} %x 158} 159 160define {i8, i1} @test_ssub2(i8 %V) { 161; CHECK-LABEL: @test_ssub2( 162; CHECK-NEXT: ret { i8, i1 } zeroinitializer 163; 164 %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 %V, i8 undef) 165 ret {i8, i1} %x 166} 167 168define {i8, i1} @test_ssub2_poison(i8 %V) { 169; CHECK-LABEL: @test_ssub2_poison( 170; CHECK-NEXT: ret { i8, i1 } zeroinitializer 171; 172 %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 %V, i8 poison) 173 ret {i8, i1} %x 174} 175 176define {i8, i1} @test_ssub3(i8 %V) { 177; CHECK-LABEL: @test_ssub3( 178; CHECK-NEXT: ret { i8, i1 } zeroinitializer 179; 180 %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 undef, i8 %V) 181 ret {i8, i1} %x 182} 183 184define {i8, i1} @test_ssub3_poison(i8 %V) { 185; CHECK-LABEL: @test_ssub3_poison( 186; CHECK-NEXT: ret { i8, i1 } zeroinitializer 187; 188 %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 poison, i8 %V) 189 ret {i8, i1} %x 190} 191 192define {i8, i1} @test_umul1(i8 %V) { 193; CHECK-LABEL: @test_umul1( 194; CHECK-NEXT: ret { i8, i1 } zeroinitializer 195; 196 %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %V, i8 0) 197 ret {i8, i1} %x 198} 199 200define {i8, i1} @test_umul2(i8 %V) { 201; CHECK-LABEL: @test_umul2( 202; CHECK-NEXT: ret { i8, i1 } zeroinitializer 203; 204 %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %V, i8 undef) 205 ret {i8, i1} %x 206} 207 208define {i8, i1} @test_umul2_poison(i8 %V) { 209; CHECK-LABEL: @test_umul2_poison( 210; CHECK-NEXT: ret { i8, i1 } zeroinitializer 211; 212 %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %V, i8 poison) 213 ret {i8, i1} %x 214} 215 216define {i8, i1} @test_umul3(i8 %V) { 217; CHECK-LABEL: @test_umul3( 218; CHECK-NEXT: ret { i8, i1 } zeroinitializer 219; 220 %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 0, i8 %V) 221 ret {i8, i1} %x 222} 223 224define {i8, i1} @test_umul4(i8 %V) { 225; CHECK-LABEL: @test_umul4( 226; CHECK-NEXT: ret { i8, i1 } zeroinitializer 227; 228 %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 undef, i8 %V) 229 ret {i8, i1} %x 230} 231 232define {i8, i1} @test_umul4_poison(i8 %V) { 233; CHECK-LABEL: @test_umul4_poison( 234; CHECK-NEXT: ret { i8, i1 } zeroinitializer 235; 236 %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 poison, i8 %V) 237 ret {i8, i1} %x 238} 239 240define {i8, i1} @test_smul1(i8 %V) { 241; CHECK-LABEL: @test_smul1( 242; CHECK-NEXT: ret { i8, i1 } zeroinitializer 243; 244 %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %V, i8 0) 245 ret {i8, i1} %x 246} 247 248define {i8, i1} @test_smul2(i8 %V) { 249; CHECK-LABEL: @test_smul2( 250; CHECK-NEXT: ret { i8, i1 } zeroinitializer 251; 252 %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %V, i8 undef) 253 ret {i8, i1} %x 254} 255 256define {i8, i1} @test_smul2_poison(i8 %V) { 257; CHECK-LABEL: @test_smul2_poison( 258; CHECK-NEXT: ret { i8, i1 } zeroinitializer 259; 260 %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %V, i8 poison) 261 ret {i8, i1} %x 262} 263 264define {i8, i1} @test_smul3(i8 %V) { 265; CHECK-LABEL: @test_smul3( 266; CHECK-NEXT: ret { i8, i1 } zeroinitializer 267; 268 %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 0, i8 %V) 269 ret {i8, i1} %x 270} 271 272define {i8, i1} @test_smul4(i8 %V) { 273; CHECK-LABEL: @test_smul4( 274; CHECK-NEXT: ret { i8, i1 } zeroinitializer 275; 276 %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 undef, i8 %V) 277 ret {i8, i1} %x 278} 279 280define {i8, i1} @test_smul4_poison(i8 %V) { 281; CHECK-LABEL: @test_smul4_poison( 282; CHECK-NEXT: ret { i8, i1 } zeroinitializer 283; 284 %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 poison, i8 %V) 285 ret {i8, i1} %x 286} 287 288; Test a non-intrinsic that we know about as a library call. 289declare float @fabs(float %x) 290 291define float @test_fabs_libcall() { 292; CHECK-LABEL: @test_fabs_libcall( 293; CHECK-NEXT: [[X:%.*]] = call float @fabs(float -4.200000e+01) 294; CHECK-NEXT: ret float 4.200000e+01 295; 296 297 %x = call float @fabs(float -42.0) 298; This is still a real function call, so instsimplify won't nuke it -- other 299; passes have to do that. 300 301 ret float %x 302} 303 304 305declare float @llvm.fabs.f32(float) nounwind readnone 306declare float @llvm.floor.f32(float) nounwind readnone 307declare float @llvm.ceil.f32(float) nounwind readnone 308declare float @llvm.trunc.f32(float) nounwind readnone 309declare float @llvm.rint.f32(float) nounwind readnone 310declare float @llvm.nearbyint.f32(float) nounwind readnone 311declare float @llvm.canonicalize.f32(float) nounwind readnone 312 313; Test idempotent intrinsics 314define float @test_idempotence(float %a) { 315; CHECK-LABEL: @test_idempotence( 316; CHECK-NEXT: [[A0:%.*]] = call float @llvm.fabs.f32(float [[A:%.*]]) 317; CHECK-NEXT: [[B0:%.*]] = call float @llvm.floor.f32(float [[A]]) 318; CHECK-NEXT: [[C0:%.*]] = call float @llvm.ceil.f32(float [[A]]) 319; CHECK-NEXT: [[D0:%.*]] = call float @llvm.trunc.f32(float [[A]]) 320; CHECK-NEXT: [[E0:%.*]] = call float @llvm.rint.f32(float [[A]]) 321; CHECK-NEXT: [[F0:%.*]] = call float @llvm.nearbyint.f32(float [[A]]) 322; CHECK-NEXT: [[G0:%.*]] = call float @llvm.canonicalize.f32(float [[A]]) 323; CHECK-NEXT: [[R0:%.*]] = fadd float [[A0]], [[B0]] 324; CHECK-NEXT: [[R1:%.*]] = fadd float [[R0]], [[C0]] 325; CHECK-NEXT: [[R2:%.*]] = fadd float [[R1]], [[D0]] 326; CHECK-NEXT: [[R3:%.*]] = fadd float [[R2]], [[E0]] 327; CHECK-NEXT: [[R4:%.*]] = fadd float [[R3]], [[F0]] 328; CHECK-NEXT: [[R5:%.*]] = fadd float [[R4]], [[G0]] 329; CHECK-NEXT: ret float [[R5]] 330; 331 332 %a0 = call float @llvm.fabs.f32(float %a) 333 %a1 = call float @llvm.fabs.f32(float %a0) 334 335 %b0 = call float @llvm.floor.f32(float %a) 336 %b1 = call float @llvm.floor.f32(float %b0) 337 338 %c0 = call float @llvm.ceil.f32(float %a) 339 %c1 = call float @llvm.ceil.f32(float %c0) 340 341 %d0 = call float @llvm.trunc.f32(float %a) 342 %d1 = call float @llvm.trunc.f32(float %d0) 343 344 %e0 = call float @llvm.rint.f32(float %a) 345 %e1 = call float @llvm.rint.f32(float %e0) 346 347 %f0 = call float @llvm.nearbyint.f32(float %a) 348 %f1 = call float @llvm.nearbyint.f32(float %f0) 349 350 %g0 = call float @llvm.canonicalize.f32(float %a) 351 %g1 = call float @llvm.canonicalize.f32(float %g0) 352 353 %r0 = fadd float %a1, %b1 354 %r1 = fadd float %r0, %c1 355 %r2 = fadd float %r1, %d1 356 %r3 = fadd float %r2, %e1 357 %r4 = fadd float %r3, %f1 358 %r5 = fadd float %r4, %g1 359 360 ret float %r5 361} 362 363define ptr @operator_new() { 364; CHECK-LABEL: @operator_new( 365; CHECK-NEXT: entry: 366; CHECK-NEXT: [[CALL:%.*]] = tail call noalias ptr @_Znwm(i64 8) 367; CHECK-NEXT: br i1 false, label [[CAST_END:%.*]], label [[CAST_NOTNULL:%.*]] 368; CHECK: cast.notnull: 369; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, ptr [[CALL]], i64 4 370; CHECK-NEXT: br label [[CAST_END]] 371; CHECK: cast.end: 372; CHECK-NEXT: [[CAST_RESULT:%.*]] = phi ptr [ [[ADD_PTR]], [[CAST_NOTNULL]] ], [ null, [[ENTRY:%.*]] ] 373; CHECK-NEXT: ret ptr [[CAST_RESULT]] 374; 375entry: 376 %call = tail call noalias ptr @_Znwm(i64 8) 377 %cmp = icmp eq ptr %call, null 378 br i1 %cmp, label %cast.end, label %cast.notnull 379 380cast.notnull: ; preds = %entry 381 %add.ptr = getelementptr inbounds i8, ptr %call, i64 4 382 br label %cast.end 383 384cast.end: ; preds = %cast.notnull, %entry 385 %cast.result = phi ptr [ %add.ptr, %cast.notnull ], [ null, %entry ] 386 ret ptr %cast.result 387 388} 389 390declare nonnull noalias ptr @_Znwm(i64) 391 392%"struct.std::nothrow_t" = type { i8 } 393@_ZSt7nothrow = external global %"struct.std::nothrow_t" 394 395define ptr @operator_new_nothrow_t() { 396; CHECK-LABEL: @operator_new_nothrow_t( 397; CHECK-NEXT: entry: 398; CHECK-NEXT: [[CALL:%.*]] = tail call noalias ptr @_ZnamRKSt9nothrow_t(i64 8, ptr @_ZSt7nothrow) 399; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[CALL]], null 400; CHECK-NEXT: br i1 [[CMP]], label [[CAST_END:%.*]], label [[CAST_NOTNULL:%.*]] 401; CHECK: cast.notnull: 402; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, ptr [[CALL]], i64 4 403; CHECK-NEXT: br label [[CAST_END]] 404; CHECK: cast.end: 405; CHECK-NEXT: [[CAST_RESULT:%.*]] = phi ptr [ [[ADD_PTR]], [[CAST_NOTNULL]] ], [ null, [[ENTRY:%.*]] ] 406; CHECK-NEXT: ret ptr [[CAST_RESULT]] 407; 408entry: 409 %call = tail call noalias ptr @_ZnamRKSt9nothrow_t(i64 8, ptr @_ZSt7nothrow) 410 %cmp = icmp eq ptr %call, null 411 br i1 %cmp, label %cast.end, label %cast.notnull 412 413cast.notnull: ; preds = %entry 414 %add.ptr = getelementptr inbounds i8, ptr %call, i64 4 415 br label %cast.end 416 417cast.end: ; preds = %cast.notnull, %entry 418 %cast.result = phi ptr [ %add.ptr, %cast.notnull ], [ null, %entry ] 419 ret ptr %cast.result 420 421} 422 423declare ptr @_ZnamRKSt9nothrow_t(i64, ptr) nounwind 424 425define ptr @malloc_can_return_null() { 426; CHECK-LABEL: @malloc_can_return_null( 427; CHECK-NEXT: entry: 428; CHECK-NEXT: [[CALL:%.*]] = tail call noalias ptr @malloc(i64 8) 429; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[CALL]], null 430; CHECK-NEXT: br i1 [[CMP]], label [[CAST_END:%.*]], label [[CAST_NOTNULL:%.*]] 431; CHECK: cast.notnull: 432; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, ptr [[CALL]], i64 4 433; CHECK-NEXT: br label [[CAST_END]] 434; CHECK: cast.end: 435; CHECK-NEXT: [[CAST_RESULT:%.*]] = phi ptr [ [[ADD_PTR]], [[CAST_NOTNULL]] ], [ null, [[ENTRY:%.*]] ] 436; CHECK-NEXT: ret ptr [[CAST_RESULT]] 437; 438entry: 439 %call = tail call noalias ptr @malloc(i64 8) 440 %cmp = icmp eq ptr %call, null 441 br i1 %cmp, label %cast.end, label %cast.notnull 442 443cast.notnull: ; preds = %entry 444 %add.ptr = getelementptr inbounds i8, ptr %call, i64 4 445 br label %cast.end 446 447cast.end: ; preds = %cast.notnull, %entry 448 %cast.result = phi ptr [ %add.ptr, %cast.notnull ], [ null, %entry ] 449 ret ptr %cast.result 450 451} 452 453define i32 @call_null() { 454; CHECK-LABEL: @call_null( 455; CHECK-NEXT: entry: 456; CHECK-NEXT: [[CALL:%.*]] = call i32 null() 457; CHECK-NEXT: ret i32 poison 458; 459entry: 460 %call = call i32 null() 461 ret i32 %call 462} 463 464define i32 @call_undef() { 465; CHECK-LABEL: @call_undef( 466; CHECK-NEXT: entry: 467; CHECK-NEXT: [[CALL:%.*]] = call i32 undef() 468; CHECK-NEXT: ret i32 poison 469; 470entry: 471 %call = call i32 undef() 472 ret i32 %call 473} 474 475@GV = private constant [8 x i32] [i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49] 476 477define <8 x i32> @partial_masked_load() { 478; CHECK-LABEL: @partial_masked_load( 479; CHECK-NEXT: ret <8 x i32> <i32 undef, i32 undef, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47> 480; 481 %masked.load = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr getelementptr ([8 x i32], ptr @GV, i64 0, i64 -2), i32 4, <8 x i1> <i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef) 482 ret <8 x i32> %masked.load 483} 484 485define <8 x i32> @masked_load_undef_mask(ptr %V) { 486; CHECK-LABEL: @masked_load_undef_mask( 487; CHECK-NEXT: ret <8 x i32> <i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0> 488; 489 %masked.load = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr %V, i32 4, <8 x i1> undef, <8 x i32> <i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0>) 490 ret <8 x i32> %masked.load 491} 492 493declare noalias ptr @malloc(i64) 494 495declare <8 x i32> @llvm.masked.load.v8i32.p0(ptr, i32, <8 x i1>, <8 x i32>) 496 497declare double @llvm.powi.f64.i16(double, i16) 498declare <2 x double> @llvm.powi.v2f64.i16(<2 x double>, i16) 499declare double @llvm.powi.f64.i32(double, i32) 500declare <2 x double> @llvm.powi.v2f64.i32(<2 x double>, i32) 501 502define double @constant_fold_powi() { 503; CHECK-LABEL: @constant_fold_powi( 504; CHECK-NEXT: ret double 9.000000e+00 505; 506 %t0 = call double @llvm.powi.f64.i32(double 3.00000e+00, i32 2) 507 ret double %t0 508} 509 510define double @constant_fold_powi_i16() { 511; CHECK-LABEL: @constant_fold_powi_i16( 512; CHECK-NEXT: ret double 9.000000e+00 513; 514 %t0 = call double @llvm.powi.f64.i16(double 3.00000e+00, i16 2) 515 ret double %t0 516} 517 518define <2 x double> @constant_fold_powi_vec() { 519; CHECK-LABEL: @constant_fold_powi_vec( 520; CHECK-NEXT: ret <2 x double> <double 9.000000e+00, double 2.500000e+01> 521; 522 %t0 = call <2 x double> @llvm.powi.v2f64.i32(<2 x double> <double 3.00000e+00, double 5.00000e+00>, i32 2) 523 ret <2 x double> %t0 524} 525 526define <2 x double> @constant_fold_powi_vec_i16() { 527; CHECK-LABEL: @constant_fold_powi_vec_i16( 528; CHECK-NEXT: ret <2 x double> <double 9.000000e+00, double 2.500000e+01> 529; 530 %t0 = call <2 x double> @llvm.powi.v2f64.i16(<2 x double> <double 3.00000e+00, double 5.00000e+00>, i16 2) 531 ret <2 x double> %t0 532} 533 534declare i8 @llvm.fshl.i8(i8, i8, i8) 535declare i9 @llvm.fshr.i9(i9, i9, i9) 536declare <2 x i7> @llvm.fshl.v2i7(<2 x i7>, <2 x i7>, <2 x i7>) 537declare <2 x i8> @llvm.fshr.v2i8(<2 x i8>, <2 x i8>, <2 x i8>) 538 539define i8 @fshl_no_shift(i8 %x, i8 %y) { 540; CHECK-LABEL: @fshl_no_shift( 541; CHECK-NEXT: ret i8 [[X:%.*]] 542; 543 %z = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 0) 544 ret i8 %z 545} 546 547define i9 @fshr_no_shift(i9 %x, i9 %y) { 548; CHECK-LABEL: @fshr_no_shift( 549; CHECK-NEXT: ret i9 [[Y:%.*]] 550; 551 %z = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 0) 552 ret i9 %z 553} 554 555define i8 @fshl_no_shift_modulo_bitwidth(i8 %x, i8 %y) { 556; CHECK-LABEL: @fshl_no_shift_modulo_bitwidth( 557; CHECK-NEXT: ret i8 [[X:%.*]] 558; 559 %z = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 40) 560 ret i8 %z 561} 562 563define i9 @fshr_no_shift_modulo_bitwidth(i9 %x, i9 %y) { 564; CHECK-LABEL: @fshr_no_shift_modulo_bitwidth( 565; CHECK-NEXT: ret i9 [[Y:%.*]] 566; 567 %z = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 189) 568 ret i9 %z 569} 570 571define <2 x i7> @fshl_no_shift_modulo_bitwidth_splat(<2 x i7> %x, <2 x i7> %y) { 572; CHECK-LABEL: @fshl_no_shift_modulo_bitwidth_splat( 573; CHECK-NEXT: ret <2 x i7> [[X:%.*]] 574; 575 %z = call <2 x i7> @llvm.fshl.v2i7(<2 x i7> %x, <2 x i7> %y, <2 x i7> <i7 21, i7 21>) 576 ret <2 x i7> %z 577} 578 579define <2 x i8> @fshr_no_shift_modulo_bitwidth_splat(<2 x i8> %x, <2 x i8> %y) { 580; CHECK-LABEL: @fshr_no_shift_modulo_bitwidth_splat( 581; CHECK-NEXT: ret <2 x i8> [[Y:%.*]] 582; 583 %z = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> %x, <2 x i8> %y, <2 x i8> <i8 72, i8 72>) 584 ret <2 x i8> %z 585} 586 587; If y is poison, eliminating the guard is not safe. 588 589define i8 @fshl_zero_shift_guard(i8 %x, i8 %y, i8 %sh) { 590; CHECK-LABEL: @fshl_zero_shift_guard( 591; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[SH:%.*]], 0 592; CHECK-NEXT: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[SH]]) 593; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i8 [[X]], i8 [[F]] 594; CHECK-NEXT: ret i8 [[S]] 595; 596 %c = icmp eq i8 %sh, 0 597 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 %sh) 598 %s = select i1 %c, i8 %x, i8 %f 599 ret i8 %s 600} 601 602; If y is poison, eliminating the guard is not safe. 603 604define i8 @fshl_zero_shift_guard_swapped(i8 %x, i8 %y, i8 %sh) { 605; CHECK-LABEL: @fshl_zero_shift_guard_swapped( 606; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[SH:%.*]], 0 607; CHECK-NEXT: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[SH]]) 608; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i8 [[F]], i8 [[X]] 609; CHECK-NEXT: ret i8 [[S]] 610; 611 %c = icmp ne i8 %sh, 0 612 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 %sh) 613 %s = select i1 %c, i8 %f, i8 %x 614 ret i8 %s 615} 616 617; When the shift amount is 0, fshl returns its 1st parameter (x), so everything is deleted. 618 619define i8 @fshl_zero_shift_guard_inverted(i8 %x, i8 %y, i8 %sh) { 620; CHECK-LABEL: @fshl_zero_shift_guard_inverted( 621; CHECK-NEXT: ret i8 [[X:%.*]] 622; 623 %c = icmp eq i8 %sh, 0 624 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 %sh) 625 %s = select i1 %c, i8 %f, i8 %x 626 ret i8 %s 627} 628 629; When the shift amount is 0, fshl returns its 1st parameter (x), so everything is deleted. 630 631define i8 @fshl_zero_shift_guard_inverted_swapped(i8 %x, i8 %y, i8 %sh) { 632; CHECK-LABEL: @fshl_zero_shift_guard_inverted_swapped( 633; CHECK-NEXT: ret i8 [[X:%.*]] 634; 635 %c = icmp ne i8 %sh, 0 636 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 %sh) 637 %s = select i1 %c, i8 %x, i8 %f 638 ret i8 %s 639} 640 641; If x is poison, eliminating the guard is not safe. 642 643define i9 @fshr_zero_shift_guard(i9 %x, i9 %y, i9 %sh) { 644; CHECK-LABEL: @fshr_zero_shift_guard( 645; CHECK-NEXT: [[C:%.*]] = icmp eq i9 [[SH:%.*]], 0 646; CHECK-NEXT: [[F:%.*]] = call i9 @llvm.fshr.i9(i9 [[X:%.*]], i9 [[Y:%.*]], i9 [[SH]]) 647; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i9 [[Y]], i9 [[F]] 648; CHECK-NEXT: ret i9 [[S]] 649; 650 %c = icmp eq i9 %sh, 0 651 %f = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 %sh) 652 %s = select i1 %c, i9 %y, i9 %f 653 ret i9 %s 654} 655 656; If x is poison, eliminating the guard is not safe. 657 658define i9 @fshr_zero_shift_guard_swapped(i9 %x, i9 %y, i9 %sh) { 659; CHECK-LABEL: @fshr_zero_shift_guard_swapped( 660; CHECK-NEXT: [[C:%.*]] = icmp ne i9 [[SH:%.*]], 0 661; CHECK-NEXT: [[F:%.*]] = call i9 @llvm.fshr.i9(i9 [[X:%.*]], i9 [[Y:%.*]], i9 [[SH]]) 662; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i9 [[F]], i9 [[Y]] 663; CHECK-NEXT: ret i9 [[S]] 664; 665 %c = icmp ne i9 %sh, 0 666 %f = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 %sh) 667 %s = select i1 %c, i9 %f, i9 %y 668 ret i9 %s 669} 670 671; When the shift amount is 0, fshr returns its 2nd parameter (y), so everything is deleted. 672 673define i9 @fshr_zero_shift_guard_inverted(i9 %x, i9 %y, i9 %sh) { 674; CHECK-LABEL: @fshr_zero_shift_guard_inverted( 675; CHECK-NEXT: ret i9 [[Y:%.*]] 676; 677 %c = icmp eq i9 %sh, 0 678 %f = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 %sh) 679 %s = select i1 %c, i9 %f, i9 %y 680 ret i9 %s 681} 682 683; When the shift amount is 0, fshr returns its 2nd parameter (y), so everything is deleted. 684 685define i9 @fshr_zero_shift_guard_inverted_swapped(i9 %x, i9 %y, i9 %sh) { 686; CHECK-LABEL: @fshr_zero_shift_guard_inverted_swapped( 687; CHECK-NEXT: ret i9 [[Y:%.*]] 688; 689 %c = icmp ne i9 %sh, 0 690 %f = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 %sh) 691 %s = select i1 %c, i9 %y, i9 %f 692 ret i9 %s 693} 694 695; When the shift amount is 0, fshl returns its 1st parameter (x), so the guard is not needed. 696 697define i8 @rotl_zero_shift_guard(i8 %x, i8 %sh) { 698; CHECK-LABEL: @rotl_zero_shift_guard( 699; CHECK-NEXT: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 [[SH:%.*]]) 700; CHECK-NEXT: ret i8 [[F]] 701; 702 %c = icmp eq i8 %sh, 0 703 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 %sh) 704 %s = select i1 %c, i8 %x, i8 %f 705 ret i8 %s 706} 707 708; When the shift amount is 0, fshl returns its 1st parameter (x), so the guard is not needed. 709 710define i8 @rotl_zero_shift_guard_swapped(i8 %x, i8 %sh) { 711; CHECK-LABEL: @rotl_zero_shift_guard_swapped( 712; CHECK-NEXT: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 [[SH:%.*]]) 713; CHECK-NEXT: ret i8 [[F]] 714; 715 %c = icmp ne i8 %sh, 0 716 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 %sh) 717 %s = select i1 %c, i8 %f, i8 %x 718 ret i8 %s 719} 720 721; When the shift amount is 0, fshl returns its 1st parameter (x), so everything is deleted. 722 723define i8 @rotl_zero_shift_guard_inverted(i8 %x, i8 %sh) { 724; CHECK-LABEL: @rotl_zero_shift_guard_inverted( 725; CHECK-NEXT: ret i8 [[X:%.*]] 726; 727 %c = icmp eq i8 %sh, 0 728 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 %sh) 729 %s = select i1 %c, i8 %f, i8 %x 730 ret i8 %s 731} 732 733; When the shift amount is 0, fshl returns its 1st parameter (x), so everything is deleted. 734 735define i8 @rotl_zero_shift_guard_inverted_swapped(i8 %x, i8 %sh) { 736; CHECK-LABEL: @rotl_zero_shift_guard_inverted_swapped( 737; CHECK-NEXT: ret i8 [[X:%.*]] 738; 739 %c = icmp ne i8 %sh, 0 740 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 %sh) 741 %s = select i1 %c, i8 %x, i8 %f 742 ret i8 %s 743} 744 745; When the shift amount is 0, fshr returns its 2nd parameter (x), so the guard is not needed. 746 747define i9 @rotr_zero_shift_guard(i9 %x, i9 %sh) { 748; CHECK-LABEL: @rotr_zero_shift_guard( 749; CHECK-NEXT: [[F:%.*]] = call i9 @llvm.fshr.i9(i9 [[X:%.*]], i9 [[X]], i9 [[SH:%.*]]) 750; CHECK-NEXT: ret i9 [[F]] 751; 752 %c = icmp eq i9 %sh, 0 753 %f = call i9 @llvm.fshr.i9(i9 %x, i9 %x, i9 %sh) 754 %s = select i1 %c, i9 %x, i9 %f 755 ret i9 %s 756} 757 758; When the shift amount is 0, fshr returns its 2nd parameter (x), so the guard is not needed. 759 760define i9 @rotr_zero_shift_guard_swapped(i9 %x, i9 %sh) { 761; CHECK-LABEL: @rotr_zero_shift_guard_swapped( 762; CHECK-NEXT: [[F:%.*]] = call i9 @llvm.fshr.i9(i9 [[X:%.*]], i9 [[X]], i9 [[SH:%.*]]) 763; CHECK-NEXT: ret i9 [[F]] 764; 765 %c = icmp ne i9 %sh, 0 766 %f = call i9 @llvm.fshr.i9(i9 %x, i9 %x, i9 %sh) 767 %s = select i1 %c, i9 %f, i9 %x 768 ret i9 %s 769} 770 771; When the shift amount is 0, fshr returns its 2nd parameter (x), so everything is deleted. 772 773define i9 @rotr_zero_shift_guard_inverted(i9 %x, i9 %sh) { 774; CHECK-LABEL: @rotr_zero_shift_guard_inverted( 775; CHECK-NEXT: ret i9 [[X:%.*]] 776; 777 %c = icmp eq i9 %sh, 0 778 %f = call i9 @llvm.fshr.i9(i9 %x, i9 %x, i9 %sh) 779 %s = select i1 %c, i9 %f, i9 %x 780 ret i9 %s 781} 782 783; When the shift amount is 0, fshr returns its 2nd parameter (x), so everything is deleted. 784 785define i9 @rotr_zero_shift_guard_inverted_swapped(i9 %x, i9 %sh) { 786; CHECK-LABEL: @rotr_zero_shift_guard_inverted_swapped( 787; CHECK-NEXT: ret i9 [[X:%.*]] 788; 789 %c = icmp ne i9 %sh, 0 790 %f = call i9 @llvm.fshr.i9(i9 %x, i9 %x, i9 %sh) 791 %s = select i1 %c, i9 %x, i9 %f 792 ret i9 %s 793} 794 795; Negative test - make sure we're matching the correct parameter of fshl. 796 797define i8 @fshl_zero_shift_guard_wrong_select_op(i8 %x, i8 %y, i8 %sh) { 798; CHECK-LABEL: @fshl_zero_shift_guard_wrong_select_op( 799; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[SH:%.*]], 0 800; CHECK-NEXT: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[SH]]) 801; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i8 [[Y]], i8 [[F]] 802; CHECK-NEXT: ret i8 [[S]] 803; 804 %c = icmp eq i8 %sh, 0 805 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 %sh) 806 %s = select i1 %c, i8 %y, i8 %f 807 ret i8 %s 808} 809 810; Vector types work too. 811 812define <2 x i8> @rotr_zero_shift_guard_splat(<2 x i8> %x, <2 x i8> %sh) { 813; CHECK-LABEL: @rotr_zero_shift_guard_splat( 814; CHECK-NEXT: [[F:%.*]] = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> [[X:%.*]], <2 x i8> [[X]], <2 x i8> [[SH:%.*]]) 815; CHECK-NEXT: ret <2 x i8> [[F]] 816; 817 %c = icmp eq <2 x i8> %sh, zeroinitializer 818 %f = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> %x, <2 x i8> %x, <2 x i8> %sh) 819 %s = select <2 x i1> %c, <2 x i8> %x, <2 x i8> %f 820 ret <2 x i8> %s 821} 822 823; If first two operands of funnel shift are undef, the result is undef 824 825define i8 @fshl_ops_undef(i8 %shamt) { 826; CHECK-LABEL: @fshl_ops_undef( 827; CHECK-NEXT: ret i8 undef 828; 829 %r = call i8 @llvm.fshl.i8(i8 undef, i8 undef, i8 %shamt) 830 ret i8 %r 831} 832 833define i9 @fshr_ops_undef(i9 %shamt) { 834; CHECK-LABEL: @fshr_ops_undef( 835; CHECK-NEXT: ret i9 undef 836; 837 %r = call i9 @llvm.fshr.i9(i9 undef, i9 undef, i9 %shamt) 838 ret i9 %r 839} 840 841; If shift amount is undef, treat it as zero, returning operand 0 or 1 842 843define i8 @fshl_shift_undef(i8 %x, i8 %y) { 844; CHECK-LABEL: @fshl_shift_undef( 845; CHECK-NEXT: ret i8 [[X:%.*]] 846; 847 %r = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 undef) 848 ret i8 %r 849} 850 851define i9 @fshr_shift_undef(i9 %x, i9 %y) { 852; CHECK-LABEL: @fshr_shift_undef( 853; CHECK-NEXT: ret i9 [[Y:%.*]] 854; 855 %r = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 undef) 856 ret i9 %r 857} 858 859; If one of operands is poison, the result is poison 860; TODO: these should be poison 861define i8 @fshl_ops_poison(i8 %b, i8 %shamt) { 862; CHECK-LABEL: @fshl_ops_poison( 863; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.fshl.i8(i8 poison, i8 [[B:%.*]], i8 [[SHAMT:%.*]]) 864; CHECK-NEXT: ret i8 [[R]] 865; 866 %r = call i8 @llvm.fshl.i8(i8 poison, i8 %b, i8 %shamt) 867 ret i8 %r 868} 869 870define i8 @fshl_ops_poison2(i8 %shamt) { 871; CHECK-LABEL: @fshl_ops_poison2( 872; CHECK-NEXT: ret i8 undef 873; 874 %r = call i8 @llvm.fshl.i8(i8 poison, i8 undef, i8 %shamt) 875 ret i8 %r 876} 877 878define i8 @fshl_ops_poison3(i8 %a, i8 %shamt) { 879; CHECK-LABEL: @fshl_ops_poison3( 880; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.fshl.i8(i8 [[A:%.*]], i8 poison, i8 [[SHAMT:%.*]]) 881; CHECK-NEXT: ret i8 [[R]] 882; 883 %r = call i8 @llvm.fshl.i8(i8 %a, i8 poison, i8 %shamt) 884 ret i8 %r 885} 886 887define i8 @fshl_ops_poison4(i8 %shamt) { 888; CHECK-LABEL: @fshl_ops_poison4( 889; CHECK-NEXT: ret i8 undef 890; 891 %r = call i8 @llvm.fshl.i8(i8 undef, i8 poison, i8 %shamt) 892 ret i8 %r 893} 894 895define i8 @fshl_ops_poison5(i8 %a, i8 %b) { 896; CHECK-LABEL: @fshl_ops_poison5( 897; CHECK-NEXT: ret i8 [[A:%.*]] 898; 899 %r = call i8 @llvm.fshl.i8(i8 %a, i8 %b, i8 poison) 900 ret i8 %r 901} 902 903define i8 @fshl_ops_poison6() { 904; CHECK-LABEL: @fshl_ops_poison6( 905; CHECK-NEXT: ret i8 undef 906; 907 %r = call i8 @llvm.fshl.i8(i8 undef, i8 undef, i8 poison) 908 ret i8 %r 909} 910 911define i9 @fshr_ops_poison(i9 %b, i9 %shamt) { 912; CHECK-LABEL: @fshr_ops_poison( 913; CHECK-NEXT: [[R:%.*]] = call i9 @llvm.fshr.i9(i9 poison, i9 [[B:%.*]], i9 [[SHAMT:%.*]]) 914; CHECK-NEXT: ret i9 [[R]] 915; 916 %r = call i9 @llvm.fshr.i9(i9 poison, i9 %b, i9 %shamt) 917 ret i9 %r 918} 919 920define i9 @fshr_ops_poison2(i9 %shamt) { 921; CHECK-LABEL: @fshr_ops_poison2( 922; CHECK-NEXT: ret i9 undef 923; 924 %r = call i9 @llvm.fshr.i9(i9 poison, i9 undef, i9 %shamt) 925 ret i9 %r 926} 927 928define i9 @fshr_ops_poison3(i9 %a, i9 %shamt) { 929; CHECK-LABEL: @fshr_ops_poison3( 930; CHECK-NEXT: [[R:%.*]] = call i9 @llvm.fshr.i9(i9 [[A:%.*]], i9 poison, i9 [[SHAMT:%.*]]) 931; CHECK-NEXT: ret i9 [[R]] 932; 933 %r = call i9 @llvm.fshr.i9(i9 %a, i9 poison, i9 %shamt) 934 ret i9 %r 935} 936 937define i9 @fshr_ops_poison4(i9 %shamt) { 938; CHECK-LABEL: @fshr_ops_poison4( 939; CHECK-NEXT: ret i9 undef 940; 941 %r = call i9 @llvm.fshr.i9(i9 undef, i9 poison, i9 %shamt) 942 ret i9 %r 943} 944 945define i9 @fshr_ops_poison5(i9 %a, i9 %b) { 946; CHECK-LABEL: @fshr_ops_poison5( 947; CHECK-NEXT: ret i9 [[B:%.*]] 948; 949 %r = call i9 @llvm.fshr.i9(i9 %a, i9 %b, i9 poison) 950 ret i9 %r 951} 952 953define i9 @fshr_ops_poison6() { 954; CHECK-LABEL: @fshr_ops_poison6( 955; CHECK-NEXT: ret i9 undef 956; 957 %r = call i9 @llvm.fshr.i9(i9 undef, i9 undef, i9 poison) 958 ret i9 %r 959} 960 961define i8 @fshl_zero(i8 %shamt) { 962; CHECK-LABEL: @fshl_zero( 963; CHECK-NEXT: ret i8 0 964; 965 %r = call i8 @llvm.fshl.i8(i8 0, i8 0, i8 %shamt) 966 ret i8 %r 967} 968 969define <2 x i8> @fshr_zero_vec(<2 x i8> %shamt) { 970; CHECK-LABEL: @fshr_zero_vec( 971; CHECK-NEXT: ret <2 x i8> zeroinitializer 972; 973 %r = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> zeroinitializer, <2 x i8> <i8 0, i8 undef>, <2 x i8> %shamt) 974 ret <2 x i8> %r 975} 976 977define <2 x i7> @fshl_ones_vec(<2 x i7> %shamt) { 978; CHECK-LABEL: @fshl_ones_vec( 979; CHECK-NEXT: ret <2 x i7> <i7 -1, i7 -1> 980; 981 %r = call <2 x i7> @llvm.fshl.v2i7(<2 x i7> <i7 undef, i7 -1>, <2 x i7> <i7 -1, i7 undef>, <2 x i7> %shamt) 982 ret <2 x i7> %r 983} 984 985define i9 @fshr_ones(i9 %shamt) { 986; CHECK-LABEL: @fshr_ones( 987; CHECK-NEXT: ret i9 -1 988; 989 %r = call i9 @llvm.fshr.i9(i9 -1, i9 -1, i9 %shamt) 990 ret i9 %r 991} 992 993declare double @llvm.fma.f64(double,double,double) 994declare double @llvm.fmuladd.f64(double,double,double) 995 996define double @fma_undef_op0(double %x, double %y) { 997; CHECK-LABEL: @fma_undef_op0( 998; CHECK-NEXT: ret double 0x7FF8000000000000 999; 1000 %r = call double @llvm.fma.f64(double undef, double %x, double %y) 1001 ret double %r 1002} 1003 1004define double @fma_poison_op0(double %x, double %y) { 1005; CHECK-LABEL: @fma_poison_op0( 1006; CHECK-NEXT: ret double poison 1007; 1008 %r = call double @llvm.fma.f64(double poison, double %x, double %y) 1009 ret double %r 1010} 1011 1012define double @fma_undef_op1(double %x, double %y) { 1013; CHECK-LABEL: @fma_undef_op1( 1014; CHECK-NEXT: ret double 0x7FF8000000000000 1015; 1016 %r = call double @llvm.fma.f64(double %x, double undef, double %y) 1017 ret double %r 1018} 1019 1020define double @fma_poison_op1(double %x, double %y) { 1021; CHECK-LABEL: @fma_poison_op1( 1022; CHECK-NEXT: ret double poison 1023; 1024 %r = call double @llvm.fma.f64(double %x, double poison, double %y) 1025 ret double %r 1026} 1027 1028define double @fma_undef_op2(double %x, double %y) { 1029; CHECK-LABEL: @fma_undef_op2( 1030; CHECK-NEXT: ret double 0x7FF8000000000000 1031; 1032 %r = call double @llvm.fma.f64(double %x, double %y, double undef) 1033 ret double %r 1034} 1035 1036define double @fma_poison_op2(double %x, double %y) { 1037; CHECK-LABEL: @fma_poison_op2( 1038; CHECK-NEXT: ret double poison 1039; 1040 %r = call double @llvm.fma.f64(double %x, double %y, double poison) 1041 ret double %r 1042} 1043 1044define double @fma_undef_op0_poison_op1(double %x) { 1045; CHECK-LABEL: @fma_undef_op0_poison_op1( 1046; CHECK-NEXT: ret double poison 1047; 1048 %r = call double @llvm.fma.f64(double undef, double poison, double %x) 1049 ret double %r 1050} 1051 1052define double @fma_undef_op0_poison_op2(double %x) { 1053; CHECK-LABEL: @fma_undef_op0_poison_op2( 1054; CHECK-NEXT: ret double poison 1055; 1056 %r = call double @llvm.fma.f64(double undef, double %x, double poison) 1057 ret double %r 1058} 1059 1060define double @fmuladd_undef_op0(double %x, double %y) { 1061; CHECK-LABEL: @fmuladd_undef_op0( 1062; CHECK-NEXT: ret double 0x7FF8000000000000 1063; 1064 %r = call double @llvm.fmuladd.f64(double undef, double %x, double %y) 1065 ret double %r 1066} 1067 1068define double @fmuladd_poison_op0(double %x, double %y) { 1069; CHECK-LABEL: @fmuladd_poison_op0( 1070; CHECK-NEXT: ret double poison 1071; 1072 %r = call double @llvm.fmuladd.f64(double poison, double %x, double %y) 1073 ret double %r 1074} 1075 1076define double @fmuladd_undef_op1(double %x, double %y) { 1077; CHECK-LABEL: @fmuladd_undef_op1( 1078; CHECK-NEXT: ret double 0x7FF8000000000000 1079; 1080 %r = call double @llvm.fmuladd.f64(double %x, double undef, double %y) 1081 ret double %r 1082} 1083 1084define double @fmuladd_poison_op1(double %x, double %y) { 1085; CHECK-LABEL: @fmuladd_poison_op1( 1086; CHECK-NEXT: ret double poison 1087; 1088 %r = call double @llvm.fmuladd.f64(double %x, double poison, double %y) 1089 ret double %r 1090} 1091 1092define double @fmuladd_undef_op2(double %x, double %y) { 1093; CHECK-LABEL: @fmuladd_undef_op2( 1094; CHECK-NEXT: ret double 0x7FF8000000000000 1095; 1096 %r = call double @llvm.fmuladd.f64(double %x, double %y, double undef) 1097 ret double %r 1098} 1099 1100define double @fmuladd_poison_op2(double %x, double %y) { 1101; CHECK-LABEL: @fmuladd_poison_op2( 1102; CHECK-NEXT: ret double poison 1103; 1104 %r = call double @llvm.fmuladd.f64(double %x, double %y, double poison) 1105 ret double %r 1106} 1107 1108define double @fmuladd_nan_op0_poison_op1(double %x) { 1109; CHECK-LABEL: @fmuladd_nan_op0_poison_op1( 1110; CHECK-NEXT: ret double poison 1111; 1112 %r = call double @llvm.fmuladd.f64(double 0x7ff8000000000000, double poison, double %x) 1113 ret double %r 1114} 1115 1116define double @fmuladd_nan_op1_poison_op2(double %x) { 1117; CHECK-LABEL: @fmuladd_nan_op1_poison_op2( 1118; CHECK-NEXT: ret double poison 1119; 1120 %r = call double @llvm.fmuladd.f64(double %x, double 0x7ff8000000000000, double poison) 1121 ret double %r 1122} 1123 1124define double @fma_nan_op0(double %x, double %y) { 1125; CHECK-LABEL: @fma_nan_op0( 1126; CHECK-NEXT: ret double 0x7FF8000000000000 1127; 1128 %r = call double @llvm.fma.f64(double 0x7ff8000000000000, double %x, double %y) 1129 ret double %r 1130} 1131 1132define double @fma_nan_op1(double %x, double %y) { 1133; CHECK-LABEL: @fma_nan_op1( 1134; CHECK-NEXT: ret double 0x7FF8000000000001 1135; 1136 %r = call double @llvm.fma.f64(double %x, double 0x7ff8000000000001, double %y) 1137 ret double %r 1138} 1139 1140define double @fma_nan_op2(double %x, double %y) { 1141; CHECK-LABEL: @fma_nan_op2( 1142; CHECK-NEXT: ret double 0x7FF8000000000002 1143; 1144 %r = call double @llvm.fma.f64(double %x, double %y, double 0x7ff8000000000002) 1145 ret double %r 1146} 1147 1148define double @fmuladd_nan_op0_op1(double %x) { 1149; CHECK-LABEL: @fmuladd_nan_op0_op1( 1150; CHECK-NEXT: ret double 0x7FF8000000001234 1151; 1152 %r = call double @llvm.fmuladd.f64(double 0x7ff8000000001234, double 0x7ff800000000dead, double %x) 1153 ret double %r 1154} 1155 1156define double @fmuladd_nan_op0_op2(double %x) { 1157; CHECK-LABEL: @fmuladd_nan_op0_op2( 1158; CHECK-NEXT: ret double 0x7FF8000000005678 1159; 1160 %r = call double @llvm.fmuladd.f64(double 0x7ff8000000005678, double %x, double 0x7ff800000000dead) 1161 ret double %r 1162} 1163 1164define double @fmuladd_nan_op1_op2(double %x) { 1165; CHECK-LABEL: @fmuladd_nan_op1_op2( 1166; CHECK-NEXT: ret double 0x7FF80000AAAAAAAA 1167; 1168 %r = call double @llvm.fmuladd.f64(double %x, double 0x7ff80000aaaaaaaa, double 0x7ff800000000dead) 1169 ret double %r 1170} 1171 1172define double @fma_nan_multiplicand_inf_zero(double %x) { 1173; CHECK-LABEL: @fma_nan_multiplicand_inf_zero( 1174; CHECK-NEXT: [[R:%.*]] = call double @llvm.fma.f64(double 0x7FF0000000000000, double 0.000000e+00, double [[X:%.*]]) 1175; CHECK-NEXT: ret double [[R]] 1176; 1177 %r = call double @llvm.fma.f64(double 0x7ff0000000000000, double 0.0, double %x) 1178 ret double %r 1179} 1180 1181define double @fma_nan_multiplicand_zero_inf(double %x) { 1182; CHECK-LABEL: @fma_nan_multiplicand_zero_inf( 1183; CHECK-NEXT: [[R:%.*]] = call double @llvm.fma.f64(double 0.000000e+00, double 0x7FF0000000000000, double [[X:%.*]]) 1184; CHECK-NEXT: ret double [[R]] 1185; 1186 %r = call double @llvm.fma.f64(double 0.0, double 0x7ff0000000000000, double %x) 1187 ret double %r 1188} 1189 1190define double @fma_nan_addend_inf_neginf(double %x, i32 %y) { 1191; CHECK-LABEL: @fma_nan_addend_inf_neginf( 1192; CHECK-NEXT: [[NOTNAN:%.*]] = uitofp i32 [[Y:%.*]] to double 1193; CHECK-NEXT: [[R:%.*]] = call double @llvm.fma.f64(double 0x7FF0000000000000, double [[NOTNAN]], double 0xFFF0000000000000) 1194; CHECK-NEXT: ret double [[R]] 1195; 1196 %notnan = uitofp i32 %y to double 1197 %r = call double @llvm.fma.f64(double 0x7ff0000000000000, double %notnan, double 0xfff0000000000000) 1198 ret double %r 1199} 1200 1201define double @fma_nan_addend_neginf_inf(double %x, i1 %y) { 1202; CHECK-LABEL: @fma_nan_addend_neginf_inf( 1203; CHECK-NEXT: [[NOTNAN:%.*]] = select i1 [[Y:%.*]], double 4.200000e+01, double -1.000000e-01 1204; CHECK-NEXT: [[R:%.*]] = call double @llvm.fma.f64(double [[NOTNAN]], double 0xFFF0000000000000, double 0x7FF0000000000000) 1205; CHECK-NEXT: ret double [[R]] 1206; 1207 %notnan = select i1 %y, double 42.0, double -0.1 1208 %r = call double @llvm.fma.f64(double %notnan, double 0xfff0000000000000, double 0x7ff0000000000000) 1209 ret double %r 1210} 1211 1212define double @fmuladd_nan_multiplicand_neginf_zero(double %x) { 1213; CHECK-LABEL: @fmuladd_nan_multiplicand_neginf_zero( 1214; CHECK-NEXT: [[R:%.*]] = call double @llvm.fmuladd.f64(double 0xFFF0000000000000, double 0.000000e+00, double [[X:%.*]]) 1215; CHECK-NEXT: ret double [[R]] 1216; 1217 %r = call double @llvm.fmuladd.f64(double 0xfff0000000000000, double 0.0, double %x) 1218 ret double %r 1219} 1220 1221define double @fmuladd_nan_multiplicand_negzero_inf(double %x) { 1222; CHECK-LABEL: @fmuladd_nan_multiplicand_negzero_inf( 1223; CHECK-NEXT: [[R:%.*]] = call double @llvm.fmuladd.f64(double -0.000000e+00, double 0x7FF0000000000000, double [[X:%.*]]) 1224; CHECK-NEXT: ret double [[R]] 1225; 1226 %r = call double @llvm.fmuladd.f64(double -0.0, double 0x7ff0000000000000, double %x) 1227 ret double %r 1228} 1229 1230define double @fmuladd_nan_addend_inf_neginf(double %x, i32 %y) { 1231; CHECK-LABEL: @fmuladd_nan_addend_inf_neginf( 1232; CHECK-NEXT: [[NOTNAN:%.*]] = sitofp i32 [[Y:%.*]] to double 1233; CHECK-NEXT: [[R:%.*]] = call double @llvm.fmuladd.f64(double 0x7FF0000000000000, double [[NOTNAN]], double 0xFFF0000000000000) 1234; CHECK-NEXT: ret double [[R]] 1235; 1236 %notnan = sitofp i32 %y to double 1237 %r = call double @llvm.fmuladd.f64(double 0x7ff0000000000000, double %notnan, double 0xfff0000000000000) 1238 ret double %r 1239} 1240 1241define double @fmuladd_nan_addend_neginf_inf(double %x, i1 %y) { 1242; CHECK-LABEL: @fmuladd_nan_addend_neginf_inf( 1243; CHECK-NEXT: [[NOTNAN:%.*]] = select i1 [[Y:%.*]], double 4.200000e+01, double -1.000000e-01 1244; CHECK-NEXT: [[R:%.*]] = call double @llvm.fmuladd.f64(double [[NOTNAN]], double 0xFFF0000000000000, double 0x7FF0000000000000) 1245; CHECK-NEXT: ret double [[R]] 1246; 1247 %notnan = select i1 %y, double 42.0, double -0.1 1248 %r = call double @llvm.fmuladd.f64(double %notnan, double 0xfff0000000000000, double 0x7ff0000000000000) 1249 ret double %r 1250} 1251 1252declare float @llvm.copysign.f32(float, float) 1253declare <2 x double> @llvm.copysign.v2f64(<2 x double>, <2 x double>) 1254 1255define float @copysign_same_operand(float %x) { 1256; CHECK-LABEL: @copysign_same_operand( 1257; CHECK-NEXT: ret float [[X:%.*]] 1258; 1259 %r = call float @llvm.copysign.f32(float %x, float %x) 1260 ret float %r 1261} 1262 1263define <2 x double> @copysign_same_operand_vec(<2 x double> %x) { 1264; CHECK-LABEL: @copysign_same_operand_vec( 1265; CHECK-NEXT: ret <2 x double> [[X:%.*]] 1266; 1267 %r = call <2 x double> @llvm.copysign.v2f64(<2 x double> %x, <2 x double> %x) 1268 ret <2 x double> %r 1269} 1270 1271define float @negated_sign_arg(float %x) { 1272; CHECK-LABEL: @negated_sign_arg( 1273; CHECK-NEXT: [[NEGX:%.*]] = fsub ninf float -0.000000e+00, [[X:%.*]] 1274; CHECK-NEXT: ret float [[NEGX]] 1275; 1276 %negx = fsub ninf float -0.0, %x 1277 %r = call arcp float @llvm.copysign.f32(float %x, float %negx) 1278 ret float %r 1279} 1280 1281define <2 x double> @negated_sign_arg_vec(<2 x double> %x) { 1282; CHECK-LABEL: @negated_sign_arg_vec( 1283; CHECK-NEXT: [[NEGX:%.*]] = fneg afn <2 x double> [[X:%.*]] 1284; CHECK-NEXT: ret <2 x double> [[NEGX]] 1285; 1286 %negx = fneg afn <2 x double> %x 1287 %r = call arcp <2 x double> @llvm.copysign.v2f64(<2 x double> %x, <2 x double> %negx) 1288 ret <2 x double> %r 1289} 1290 1291define float @negated_mag_arg(float %x) { 1292; CHECK-LABEL: @negated_mag_arg( 1293; CHECK-NEXT: ret float [[X:%.*]] 1294; 1295 %negx = fneg nnan float %x 1296 %r = call ninf float @llvm.copysign.f32(float %negx, float %x) 1297 ret float %r 1298} 1299 1300define <2 x double> @negated_mag_arg_vec(<2 x double> %x) { 1301; CHECK-LABEL: @negated_mag_arg_vec( 1302; CHECK-NEXT: ret <2 x double> [[X:%.*]] 1303; 1304 %negx = fneg afn <2 x double> %x 1305 %r = call arcp <2 x double> @llvm.copysign.v2f64(<2 x double> %negx, <2 x double> %x) 1306 ret <2 x double> %r 1307} 1308 1309; We handle the "returned" attribute only in InstCombine, because the fact 1310; that this simplification may replace one call with another may cause issues 1311; for call graph passes. 1312 1313declare i32 @passthru_i32(i32 returned) 1314declare ptr @passthru_p8(ptr returned) 1315 1316define i32 @returned_const_int_arg() { 1317; CHECK-LABEL: @returned_const_int_arg( 1318; CHECK-NEXT: [[X:%.*]] = call i32 @passthru_i32(i32 42) 1319; CHECK-NEXT: ret i32 [[X]] 1320; 1321 %x = call i32 @passthru_i32(i32 42) 1322 ret i32 %x 1323} 1324 1325define ptr @returned_const_ptr_arg() { 1326; CHECK-LABEL: @returned_const_ptr_arg( 1327; CHECK-NEXT: [[X:%.*]] = call ptr @passthru_p8(ptr null) 1328; CHECK-NEXT: ret ptr [[X]] 1329; 1330 %x = call ptr @passthru_p8(ptr null) 1331 ret ptr %x 1332} 1333 1334define i32 @returned_var_arg(i32 %arg) { 1335; CHECK-LABEL: @returned_var_arg( 1336; CHECK-NEXT: [[X:%.*]] = call i32 @passthru_i32(i32 [[ARG:%.*]]) 1337; CHECK-NEXT: ret i32 [[X]] 1338; 1339 %x = call i32 @passthru_i32(i32 %arg) 1340 ret i32 %x 1341} 1342 1343define i32 @returned_const_int_arg_musttail(i32 %arg) { 1344; CHECK-LABEL: @returned_const_int_arg_musttail( 1345; CHECK-NEXT: [[X:%.*]] = musttail call i32 @passthru_i32(i32 42) 1346; CHECK-NEXT: ret i32 [[X]] 1347; 1348 %x = musttail call i32 @passthru_i32(i32 42) 1349 ret i32 %x 1350} 1351 1352define i32 @returned_var_arg_musttail(i32 %arg) { 1353; CHECK-LABEL: @returned_var_arg_musttail( 1354; CHECK-NEXT: [[X:%.*]] = musttail call i32 @passthru_i32(i32 [[ARG:%.*]]) 1355; CHECK-NEXT: ret i32 [[X]] 1356; 1357 %x = musttail call i32 @passthru_i32(i32 %arg) 1358 ret i32 %x 1359} 1360 1361define i32 @call_undef_musttail() { 1362; CHECK-LABEL: @call_undef_musttail( 1363; CHECK-NEXT: [[X:%.*]] = musttail call i32 undef() 1364; CHECK-NEXT: ret i32 [[X]] 1365; 1366 %x = musttail call i32 undef() 1367 ret i32 %x 1368} 1369 1370; This is not the builtin fmax, so we don't know anything about its behavior. 1371 1372declare float @fmaxf(float, float) 1373 1374define float @nobuiltin_fmax() { 1375; CHECK-LABEL: @nobuiltin_fmax( 1376; CHECK-NEXT: [[M:%.*]] = call float @fmaxf(float 0.000000e+00, float 1.000000e+00) #[[ATTR3:[0-9]+]] 1377; CHECK-NEXT: [[R:%.*]] = call float @llvm.fabs.f32(float [[M]]) 1378; CHECK-NEXT: ret float [[R]] 1379; 1380 %m = call float @fmaxf(float 0.0, float 1.0) #0 1381 %r = call float @llvm.fabs.f32(float %m) 1382 ret float %r 1383} 1384 1385 1386declare i32 @llvm.ctpop.i32(i32) 1387declare <3 x i33> @llvm.ctpop.v3i33(<3 x i33>) 1388declare i1 @llvm.ctpop.i1(i1) 1389 1390define i32 @ctpop_lowbit(i32 %x) { 1391; CHECK-LABEL: @ctpop_lowbit( 1392; CHECK-NEXT: [[B:%.*]] = and i32 [[X:%.*]], 1 1393; CHECK-NEXT: ret i32 [[B]] 1394; 1395 %b = and i32 %x, 1 1396 %r = call i32 @llvm.ctpop.i32(i32 %b) 1397 ret i32 %r 1398} 1399 1400; Negative test - only low bit allowed 1401; This could be reduced by instcombine to and+shift. 1402 1403define i32 @ctpop_pow2(i32 %x) { 1404; CHECK-LABEL: @ctpop_pow2( 1405; CHECK-NEXT: [[B:%.*]] = and i32 [[X:%.*]], 4 1406; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.ctpop.i32(i32 [[B]]) 1407; CHECK-NEXT: ret i32 [[R]] 1408; 1409 %b = and i32 %x, 4 1410 %r = call i32 @llvm.ctpop.i32(i32 %b) 1411 ret i32 %r 1412} 1413 1414define <3 x i33> @ctpop_signbit(<3 x i33> %x) { 1415; CHECK-LABEL: @ctpop_signbit( 1416; CHECK-NEXT: [[B:%.*]] = lshr <3 x i33> [[X:%.*]], <i33 32, i33 32, i33 32> 1417; CHECK-NEXT: ret <3 x i33> [[B]] 1418; 1419 %b = lshr <3 x i33> %x, <i33 32, i33 32, i33 32> 1420 %r = tail call <3 x i33> @llvm.ctpop.v3i33(<3 x i33> %b) 1421 ret <3 x i33> %r 1422} 1423 1424; Negative test - only 1 bit allowed 1425 1426define <3 x i33> @ctpop_notsignbit(<3 x i33> %x) { 1427; CHECK-LABEL: @ctpop_notsignbit( 1428; CHECK-NEXT: [[B:%.*]] = lshr <3 x i33> [[X:%.*]], <i33 31, i33 31, i33 31> 1429; CHECK-NEXT: [[R:%.*]] = tail call <3 x i33> @llvm.ctpop.v3i33(<3 x i33> [[B]]) 1430; CHECK-NEXT: ret <3 x i33> [[R]] 1431; 1432 %b = lshr <3 x i33> %x, <i33 31, i33 31, i33 31> 1433 %r = tail call <3 x i33> @llvm.ctpop.v3i33(<3 x i33> %b) 1434 ret <3 x i33> %r 1435} 1436 1437define i1 @ctpop_bool(i1 %x) { 1438; CHECK-LABEL: @ctpop_bool( 1439; CHECK-NEXT: ret i1 [[X:%.*]] 1440; 1441 %r = tail call i1 @llvm.ctpop.i1(i1 %x) 1442 ret i1 %r 1443} 1444 1445declare i32 @llvm.cttz.i32(i32, i1) 1446declare <3 x i33> @llvm.cttz.v3i33(<3 x i33>, i1) 1447 1448define i32 @cttz_shl1(i32 %x) { 1449; CHECK-LABEL: @cttz_shl1( 1450; CHECK-NEXT: ret i32 [[X:%.*]] 1451; 1452 %s = shl i32 1, %x 1453 %r = call i32 @llvm.cttz.i32(i32 %s, i1 true) 1454 ret i32 %r 1455} 1456 1457define <3 x i33> @cttz_shl1_vec(<3 x i33> %x) { 1458; CHECK-LABEL: @cttz_shl1_vec( 1459; CHECK-NEXT: ret <3 x i33> [[X:%.*]] 1460; 1461 %s = shl <3 x i33> <i33 1, i33 1, i33 undef>, %x 1462 %r = call <3 x i33> @llvm.cttz.v3i33(<3 x i33> %s, i1 false) 1463 ret <3 x i33> %r 1464} 1465 1466; Negative test - this could be generalized in instcombine though. 1467 1468define i32 @cttz_shl_not_low_bit(i32 %x) { 1469; CHECK-LABEL: @cttz_shl_not_low_bit( 1470; CHECK-NEXT: [[S:%.*]] = shl i32 2, [[X:%.*]] 1471; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.cttz.i32(i32 [[S]], i1 true) 1472; CHECK-NEXT: ret i32 [[R]] 1473; 1474 %s = shl i32 2, %x 1475 %r = call i32 @llvm.cttz.i32(i32 %s, i1 true) 1476 ret i32 %r 1477} 1478 1479declare i32 @llvm.ctlz.i32(i32, i1) 1480declare <3 x i33> @llvm.ctlz.v3i33(<3 x i33>, i1) 1481 1482define i32 @ctlz_lshr_sign_bit(i32 %x) { 1483; CHECK-LABEL: @ctlz_lshr_sign_bit( 1484; CHECK-NEXT: ret i32 [[X:%.*]] 1485; 1486 %s = lshr i32 2147483648, %x 1487 %r = call i32 @llvm.ctlz.i32(i32 %s, i1 true) 1488 ret i32 %r 1489} 1490 1491define i32 @ctlz_lshr_negative(i32 %x) { 1492; CHECK-LABEL: @ctlz_lshr_negative( 1493; CHECK-NEXT: ret i32 [[X:%.*]] 1494; 1495 %s = lshr i32 -42, %x 1496 %r = call i32 @llvm.ctlz.i32(i32 %s, i1 true) 1497 ret i32 %r 1498} 1499 1500define <3 x i33> @ctlz_lshr_sign_bit_vec(<3 x i33> %x) { 1501; CHECK-LABEL: @ctlz_lshr_sign_bit_vec( 1502; CHECK-NEXT: ret <3 x i33> [[X:%.*]] 1503; 1504 %s = lshr <3 x i33> <i33 undef, i33 4294967296, i33 4294967296>, %x 1505 %r = call <3 x i33> @llvm.ctlz.v3i33(<3 x i33> %s, i1 false) 1506 ret <3 x i33> %r 1507} 1508 1509; Negative test - this could be generalized in instcombine though. 1510 1511define i32 @ctlz_lshr_not_negative(i32 %x) { 1512; CHECK-LABEL: @ctlz_lshr_not_negative( 1513; CHECK-NEXT: [[S:%.*]] = lshr i32 42, [[X:%.*]] 1514; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.ctlz.i32(i32 [[S]], i1 true) 1515; CHECK-NEXT: ret i32 [[R]] 1516; 1517 %s = lshr i32 42, %x 1518 %r = call i32 @llvm.ctlz.i32(i32 %s, i1 true) 1519 ret i32 %r 1520} 1521 1522define i32 @ctlz_ashr_sign_bit(i32 %x) { 1523; CHECK-LABEL: @ctlz_ashr_sign_bit( 1524; CHECK-NEXT: ret i32 0 1525; 1526 %s = ashr i32 2147483648, %x 1527 %r = call i32 @llvm.ctlz.i32(i32 %s, i1 false) 1528 ret i32 %r 1529} 1530 1531define i32 @ctlz_ashr_negative(i32 %x) { 1532; CHECK-LABEL: @ctlz_ashr_negative( 1533; CHECK-NEXT: ret i32 0 1534; 1535 %s = ashr i32 -42, %x 1536 %r = call i32 @llvm.ctlz.i32(i32 %s, i1 false) 1537 ret i32 %r 1538} 1539 1540define <3 x i33> @ctlz_ashr_sign_bit_vec(<3 x i33> %x) { 1541; CHECK-LABEL: @ctlz_ashr_sign_bit_vec( 1542; CHECK-NEXT: ret <3 x i33> zeroinitializer 1543; 1544 %s = ashr <3 x i33> <i33 4294967296, i33 undef, i33 4294967296>, %x 1545 %r = call <3 x i33> @llvm.ctlz.v3i33(<3 x i33> %s, i1 true) 1546 ret <3 x i33> %r 1547} 1548 1549declare ptr @llvm.ptrmask.p0.i64(ptr , i64) 1550 1551define i1 @capture_vs_recurse(i64 %mask) { 1552; CHECK-LABEL: @capture_vs_recurse( 1553; CHECK-NEXT: [[A:%.*]] = call noalias ptr @malloc(i64 8) 1554; CHECK-NEXT: [[B:%.*]] = call nonnull ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 [[MASK:%.*]]) 1555; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[A]], [[B]] 1556; CHECK-NEXT: ret i1 [[CMP]] 1557; 1558 %a = call noalias ptr @malloc(i64 8) 1559 %b = call nonnull ptr @llvm.ptrmask.p0.i64(ptr %a, i64 %mask) 1560 %cmp = icmp eq ptr %a, %b 1561 ret i1 %cmp 1562} 1563 1564 1565attributes #0 = { nobuiltin readnone } 1566