1*2e5dc4a1SAnshil Gandhi; RUN: opt -mtriple=amdgcn-amd-amdhsa -amdgpu-codegenprepare -verify -S %s -o - | FileCheck %s
2*2e5dc4a1SAnshil Gandhi
3*2e5dc4a1SAnshil Gandhideclare i1 @llvm.amdgcn.class.f32(float, i32) nounwind readnone
4*2e5dc4a1SAnshil Gandhideclare i1 @llvm.amdgcn.class.f64(double, i32) nounwind readnone
5*2e5dc4a1SAnshil Gandhi
6*2e5dc4a1SAnshil Gandhi; Trivial case, xor instruction should be removed and
7*2e5dc4a1SAnshil Gandhi; the second argument of the intrinsic call should be
8*2e5dc4a1SAnshil Gandhi; bitwise-negated
9*2e5dc4a1SAnshil Gandhi; CHECK: @fold_negate_intrinsic_test_mask
10*2e5dc4a1SAnshil Gandhi; CHECK: %1 = call i1 @llvm.amdgcn.class.f32(float %x, i32 1018)
11*2e5dc4a1SAnshil Gandhidefine i1 @fold_negate_intrinsic_test_mask(float %x) nounwind {
12*2e5dc4a1SAnshil Gandhi  %1 = call i1 @llvm.amdgcn.class.f32(float %x, i32 5)
13*2e5dc4a1SAnshil Gandhi  %2 = xor i1 %1, -1
14*2e5dc4a1SAnshil Gandhi  ret i1 %2
15*2e5dc4a1SAnshil Gandhi}
16*2e5dc4a1SAnshil Gandhi
17*2e5dc4a1SAnshil Gandhi; Trivial case, xor instruction should be removed and
18*2e5dc4a1SAnshil Gandhi; the second argument of the intrinsic call should be
19*2e5dc4a1SAnshil Gandhi; bitwise-negated
20*2e5dc4a1SAnshil Gandhi; CHECK: @fold_negate_intrinsic_test_mask_dbl
21*2e5dc4a1SAnshil Gandhi; CHECK: %1 = call i1 @llvm.amdgcn.class.f64(double %x, i32 1018)
22*2e5dc4a1SAnshil Gandhidefine i1 @fold_negate_intrinsic_test_mask_dbl(double %x) nounwind {
23*2e5dc4a1SAnshil Gandhi  %1 = call i1 @llvm.amdgcn.class.f64(double %x, i32 5)
24*2e5dc4a1SAnshil Gandhi  %2 = xor i1 %1, -1
25*2e5dc4a1SAnshil Gandhi  ret i1 %2
26*2e5dc4a1SAnshil Gandhi}
27*2e5dc4a1SAnshil Gandhi
28*2e5dc4a1SAnshil Gandhi; Negative test: should not transform for variable test masks
29*2e5dc4a1SAnshil Gandhi; CHECK: @fold_negate_intrinsic_test_mask_neg_var
30*2e5dc4a1SAnshil Gandhi; CHECK: %[[X0:.*]] = alloca i32
31*2e5dc4a1SAnshil Gandhi; CHECK: %[[X1:.*]] = load i32, i32* %[[X0]]
32*2e5dc4a1SAnshil Gandhi; CHECK: call i1 @llvm.amdgcn.class.f32(float %x, i32 %[[X1]])
33*2e5dc4a1SAnshil Gandhi; CHECK: xor
34*2e5dc4a1SAnshil Gandhidefine i1 @fold_negate_intrinsic_test_mask_neg_var(float %x) nounwind {
35*2e5dc4a1SAnshil Gandhi  %1 = alloca i32
36*2e5dc4a1SAnshil Gandhi  store i32 7, i32* %1
37*2e5dc4a1SAnshil Gandhi  %2 = load i32, i32* %1
38*2e5dc4a1SAnshil Gandhi  %3 = call i1 @llvm.amdgcn.class.f32(float %x, i32 %2)
39*2e5dc4a1SAnshil Gandhi  %4 = xor i1 %3, -1
40*2e5dc4a1SAnshil Gandhi  ret i1 %4
41*2e5dc4a1SAnshil Gandhi}
42*2e5dc4a1SAnshil Gandhi
43*2e5dc4a1SAnshil Gandhi; Negative test: should not transform for multiple uses of the
44*2e5dc4a1SAnshil Gandhi;   intrinsic returned value
45*2e5dc4a1SAnshil Gandhi; CHECK: @fold_negate_intrinsic_test_mask_neg_multiple_uses
46*2e5dc4a1SAnshil Gandhi; CHECK: %[[X1:.*]] = call i1 @llvm.amdgcn.class.f32(float %x, i32 7)
47*2e5dc4a1SAnshil Gandhi; CHECK: store i1 %[[X1]]
48*2e5dc4a1SAnshil Gandhi; CHECK: %[[X2:.*]] = xor i1 %[[X1]]
49*2e5dc4a1SAnshil Gandhidefine i1 @fold_negate_intrinsic_test_mask_neg_multiple_uses(float %x) nounwind {
50*2e5dc4a1SAnshil Gandhi  %y = alloca i1
51*2e5dc4a1SAnshil Gandhi  %1 = call i1 @llvm.amdgcn.class.f32(float %x, i32 7)
52*2e5dc4a1SAnshil Gandhi  %2 = xor i1 %1, -1
53*2e5dc4a1SAnshil Gandhi  store i1 %1, i1* %y
54*2e5dc4a1SAnshil Gandhi  %3 = xor i1 %1, -1
55*2e5dc4a1SAnshil Gandhi  ret i1 %2
56*2e5dc4a1SAnshil Gandhi}
57*2e5dc4a1SAnshil Gandhi
58*2e5dc4a1SAnshil Gandhi; Negative test: should not transform for a xor with no operand equal to -1
59*2e5dc4a1SAnshil Gandhi; CHECK: @fold_negate_intrinsic_test_mask_neg_one
60*2e5dc4a1SAnshil Gandhi; CHECK: %[[X0:.*]] = call i1 @llvm.amdgcn.class.f32(float %x, i32 7)
61*2e5dc4a1SAnshil Gandhi; CHECK: xor i1 %[[X0]], false
62*2e5dc4a1SAnshil Gandhidefine i1 @fold_negate_intrinsic_test_mask_neg_one(float %x) nounwind {
63*2e5dc4a1SAnshil Gandhi  %1 = call i1 @llvm.amdgcn.class.f32(float %x, i32 7)
64*2e5dc4a1SAnshil Gandhi  %2 = xor i1 %1, false
65*2e5dc4a1SAnshil Gandhi  ret i1 %2
66*2e5dc4a1SAnshil Gandhi}
67