1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=amdgcn-amd-mesa3d < %s | FileCheck %s 3 4define i1 @test_srem_odd(i29 %X) nounwind { 5; CHECK-LABEL: test_srem_odd: 6; CHECK: ; %bb.0: 7; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) 8; CHECK-NEXT: s_mov_b32 s4, 0x1f5a814b 9; CHECK-NEXT: s_mov_b32 s5, 0x52bf5b 10; CHECK-NEXT: v_mul_lo_u32 v0, v0, s4 11; CHECK-NEXT: v_add_i32_e32 v0, vcc, 0x295fad, v0 12; CHECK-NEXT: v_and_b32_e32 v0, 0x1fffffff, v0 13; CHECK-NEXT: v_cmp_gt_u32_e32 vcc, s5, v0 14; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc 15; CHECK-NEXT: s_setpc_b64 s[30:31] 16 %srem = srem i29 %X, 99 17 %cmp = icmp eq i29 %srem, 0 18 ret i1 %cmp 19} 20 21define i1 @test_srem_even(i4 %X) nounwind { 22; CHECK-LABEL: test_srem_even: 23; CHECK: ; %bb.0: 24; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) 25; CHECK-NEXT: v_bfe_i32 v1, v0, 0, 4 26; CHECK-NEXT: v_mul_i32_i24_e32 v1, 3, v1 27; CHECK-NEXT: v_lshrrev_b32_e32 v2, 4, v1 28; CHECK-NEXT: v_bfe_u32 v1, v1, 7, 1 29; CHECK-NEXT: v_add_i32_e32 v1, vcc, v2, v1 30; CHECK-NEXT: v_and_b32_e32 v1, 15, v1 31; CHECK-NEXT: v_mul_u32_u24_e32 v1, 6, v1 32; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v0, v1 33; CHECK-NEXT: v_and_b32_e32 v0, 15, v0 34; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 35; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc 36; CHECK-NEXT: s_setpc_b64 s[30:31] 37 %srem = srem i4 %X, 6 38 %cmp = icmp eq i4 %srem, 1 39 ret i1 %cmp 40} 41 42define i1 @test_srem_pow2_setne(i6 %X) nounwind { 43; CHECK-LABEL: test_srem_pow2_setne: 44; CHECK: ; %bb.0: 45; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) 46; CHECK-NEXT: v_bfe_i32 v1, v0, 0, 6 47; CHECK-NEXT: v_bfe_u32 v1, v1, 9, 2 48; CHECK-NEXT: v_add_i32_e32 v1, vcc, v0, v1 49; CHECK-NEXT: v_and_b32_e32 v1, 60, v1 50; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v0, v1 51; CHECK-NEXT: v_and_b32_e32 v0, 63, v0 52; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 53; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc 54; CHECK-NEXT: s_setpc_b64 s[30:31] 55 %srem = srem i6 %X, 4 56 %cmp = icmp ne i6 %srem, 0 57 ret i1 %cmp 58} 59 60define <3 x i1> @test_srem_vec(<3 x i31> %X) nounwind { 61; CHECK-LABEL: test_srem_vec: 62; CHECK: ; %bb.0: 63; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) 64; CHECK-NEXT: v_bfe_i32 v3, v2, 0, 31 65; CHECK-NEXT: v_bfe_i32 v4, v1, 0, 31 66; CHECK-NEXT: v_bfe_i32 v5, v0, 0, 31 67; CHECK-NEXT: s_mov_b32 s4, 0x38e38e39 68; CHECK-NEXT: s_mov_b32 s5, 0xc71c71c7 69; CHECK-NEXT: s_mov_b32 s6, 0x7ffffffd 70; CHECK-NEXT: v_mul_hi_i32 v5, v5, s4 71; CHECK-NEXT: v_mul_hi_i32 v4, v4, s4 72; CHECK-NEXT: v_mul_hi_i32 v3, v3, s5 73; CHECK-NEXT: v_lshrrev_b32_e32 v6, 31, v5 74; CHECK-NEXT: v_lshrrev_b32_e32 v5, 1, v5 75; CHECK-NEXT: v_lshrrev_b32_e32 v7, 31, v4 76; CHECK-NEXT: v_lshrrev_b32_e32 v4, 1, v4 77; CHECK-NEXT: v_lshrrev_b32_e32 v8, 31, v3 78; CHECK-NEXT: v_lshrrev_b32_e32 v3, 1, v3 79; CHECK-NEXT: v_add_i32_e32 v5, vcc, v5, v6 80; CHECK-NEXT: v_add_i32_e32 v4, vcc, v4, v7 81; CHECK-NEXT: v_add_i32_e32 v3, vcc, v3, v8 82; CHECK-NEXT: v_mul_lo_u32 v5, v5, 9 83; CHECK-NEXT: v_mul_lo_u32 v4, v4, 9 84; CHECK-NEXT: v_mul_lo_u32 v3, v3, -9 85; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v0, v5 86; CHECK-NEXT: v_sub_i32_e32 v1, vcc, v1, v4 87; CHECK-NEXT: v_sub_i32_e32 v2, vcc, v2, v3 88; CHECK-NEXT: v_and_b32_e32 v2, 0x7fffffff, v2 89; CHECK-NEXT: v_and_b32_e32 v1, 0x7fffffff, v1 90; CHECK-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0 91; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 3, v0 92; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc 93; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, s6, v1 94; CHECK-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc 95; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 3, v2 96; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc 97; CHECK-NEXT: s_setpc_b64 s[30:31] 98 %srem = srem <3 x i31> %X, <i31 9, i31 9, i31 -9> 99 %cmp = icmp ne <3 x i31> %srem, <i31 3, i31 -3, i31 3> 100 ret <3 x i1> %cmp 101} 102