1; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefixes=GCN,GFX1032 %s 2; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefixes=GCN,GFX1064 %s 3; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -amdgpu-early-ifcvt=1 -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefixes=GCN,GFX1032 %s 4; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -amdgpu-early-ifcvt=1 -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefixes=GCN,GFX1064 %s 5; RUN: llc -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefixes=GCN,GFX1032,GFX10DEFWAVE %s 6 7; GCN-LABEL: {{^}}test_vopc_i32: 8; GFX1032: v_cmp_lt_i32_e32 vcc_lo, 0, v{{[0-9]+}} 9; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, vcc_lo 10; GFX1064: v_cmp_lt_i32_e32 vcc, 0, v{{[0-9]+}} 11; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, vcc{{$}} 12define amdgpu_kernel void @test_vopc_i32(i32 addrspace(1)* %arg) { 13 %lid = tail call i32 @llvm.amdgcn.workitem.id.x() 14 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %lid 15 %load = load i32, i32 addrspace(1)* %gep, align 4 16 %cmp = icmp sgt i32 %load, 0 17 %sel = select i1 %cmp, i32 1, i32 2 18 store i32 %sel, i32 addrspace(1)* %gep, align 4 19 ret void 20} 21 22; GCN-LABEL: {{^}}test_vopc_f32: 23; GFX1032: v_cmp_nge_f32_e32 vcc_lo, 0, v{{[0-9]+}} 24; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, vcc_lo 25; GFX1064: v_cmp_nge_f32_e32 vcc, 0, v{{[0-9]+}} 26; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, vcc{{$}} 27define amdgpu_kernel void @test_vopc_f32(float addrspace(1)* %arg) { 28 %lid = tail call i32 @llvm.amdgcn.workitem.id.x() 29 %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %lid 30 %load = load float, float addrspace(1)* %gep, align 4 31 %cmp = fcmp ugt float %load, 0.0 32 %sel = select i1 %cmp, float 1.0, float 2.0 33 store float %sel, float addrspace(1)* %gep, align 4 34 ret void 35} 36 37; GCN-LABEL: {{^}}test_vopc_vcmp: 38; GFX1032: v_cmp_nle_f32_e32 vcc_lo, 0, v{{[0-9]+}} 39; GFX1064: v_cmp_nle_f32_e32 vcc, 0, v{{[0-9]+}} 40define amdgpu_ps void @test_vopc_vcmp(float %x) { 41 %cmp = fcmp oge float %x, 0.0 42 call void @llvm.amdgcn.kill(i1 %cmp) 43 ret void 44} 45 46; GCN-LABEL: {{^}}test_vopc_2xf16: 47; GFX1032: v_cmp_le_f16_sdwa [[SC:vcc_lo|s[0-9]+]], {{[vs][0-9]+}}, v{{[0-9]+}} src0_sel:WORD_1 src1_sel:DWORD 48; GFX1032: v_cndmask_b32_e32 v{{[0-9]+}}, 0x3c003c00, v{{[0-9]+}}, [[SC]] 49; GFX1064: v_cmp_le_f16_sdwa [[SC:vcc|s\[[0-9:]+\]]], {{[vs][0-9]+}}, v{{[0-9]+}} src0_sel:WORD_1 src1_sel:DWORD 50; GFX1064: v_cndmask_b32_e32 v{{[0-9]+}}, 0x3c003c00, v{{[0-9]+}}, [[SC]] 51define amdgpu_kernel void @test_vopc_2xf16(<2 x half> addrspace(1)* %arg) { 52 %lid = tail call i32 @llvm.amdgcn.workitem.id.x() 53 %gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %arg, i32 %lid 54 %load = load <2 x half>, <2 x half> addrspace(1)* %gep, align 4 55 %elt = extractelement <2 x half> %load, i32 1 56 %cmp = fcmp ugt half %elt, 0.0 57 %sel = select i1 %cmp, <2 x half> <half 1.0, half 1.0>, <2 x half> %load 58 store <2 x half> %sel, <2 x half> addrspace(1)* %gep, align 4 59 ret void 60} 61 62; GCN-LABEL: {{^}}test_vopc_class: 63; GFX1032: v_cmp_class_f32_e64 [[C:vcc_lo|s[0-9:]+]], s{{[0-9]+}}, 0x204 64; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[C]] 65; GFX1064: v_cmp_class_f32_e64 [[C:vcc|s\[[0-9:]+\]]], s{{[0-9]+}}, 0x204 66; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[C]]{{$}} 67define amdgpu_kernel void @test_vopc_class(i32 addrspace(1)* %out, float %x) #0 { 68 %fabs = tail call float @llvm.fabs.f32(float %x) 69 %cmp = fcmp oeq float %fabs, 0x7FF0000000000000 70 %ext = zext i1 %cmp to i32 71 store i32 %ext, i32 addrspace(1)* %out, align 4 72 ret void 73} 74 75; GCN-LABEL: {{^}}test_vcmp_vcnd_f16: 76; GFX1032: v_cmp_neq_f16_e64 [[C:vcc_lo|s\[[0-9:]+\]]], 0x7c00, s{{[0-9]+}} 77; GFX1032: v_cndmask_b32_e32 v{{[0-9]+}}, 0x3c00, v{{[0-9]+}}, [[C]] 78 79; GFX1064: v_cmp_neq_f16_e64 [[C:vcc|s\[[0-9:]+\]]], 0x7c00, s{{[0-9]+}} 80; GFX1064: v_cndmask_b32_e32 v{{[0-9]+}}, 0x3c00, v{{[0-9]+}}, [[C]]{{$}} 81define amdgpu_kernel void @test_vcmp_vcnd_f16(half addrspace(1)* %out, half %x) #0 { 82 %cmp = fcmp oeq half %x, 0x7FF0000000000000 83 %sel = select i1 %cmp, half 1.0, half %x 84 store half %sel, half addrspace(1)* %out, align 2 85 ret void 86} 87 88; GCN-LABEL: {{^}}test_vop3_cmp_f32_sop_and: 89; GFX1032: v_cmp_nge_f32_e32 vcc_lo, 0, v{{[0-9]+}} 90; GFX1032: v_cmp_nle_f32_e64 [[C2:s[0-9]+]], 1.0, v{{[0-9]+}} 91; GFX1032: s_and_b32 [[AND:s[0-9]+]], vcc_lo, [[C2]] 92; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, [[AND]] 93; GFX1064: v_cmp_nge_f32_e32 vcc, 0, v{{[0-9]+}} 94; GFX1064: v_cmp_nle_f32_e64 [[C2:s\[[0-9:]+\]]], 1.0, v{{[0-9]+}} 95; GFX1064: s_and_b64 [[AND:s\[[0-9:]+\]]], vcc, [[C2]] 96; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, [[AND]] 97define amdgpu_kernel void @test_vop3_cmp_f32_sop_and(float addrspace(1)* %arg) { 98 %lid = tail call i32 @llvm.amdgcn.workitem.id.x() 99 %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %lid 100 %load = load float, float addrspace(1)* %gep, align 4 101 %cmp = fcmp ugt float %load, 0.0 102 %cmp2 = fcmp ult float %load, 1.0 103 %and = and i1 %cmp, %cmp2 104 %sel = select i1 %and, float 1.0, float 2.0 105 store float %sel, float addrspace(1)* %gep, align 4 106 ret void 107} 108 109; GCN-LABEL: {{^}}test_vop3_cmp_i32_sop_xor: 110; GFX1032: v_cmp_lt_i32_e32 vcc_lo, 0, v{{[0-9]+}} 111; GFX1032: v_cmp_gt_i32_e64 [[C2:s[0-9]+]], 1, v{{[0-9]+}} 112; GFX1032: s_xor_b32 [[AND:s[0-9]+]], vcc_lo, [[C2]] 113; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]] 114; GFX1064: v_cmp_lt_i32_e32 vcc, 0, v{{[0-9]+}} 115; GFX1064: v_cmp_gt_i32_e64 [[C2:s\[[0-9:]+\]]], 1, v{{[0-9]+}} 116; GFX1064: s_xor_b64 [[AND:s\[[0-9:]+\]]], vcc, [[C2]] 117; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]] 118define amdgpu_kernel void @test_vop3_cmp_i32_sop_xor(i32 addrspace(1)* %arg) { 119 %lid = tail call i32 @llvm.amdgcn.workitem.id.x() 120 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %lid 121 %load = load i32, i32 addrspace(1)* %gep, align 4 122 %cmp = icmp sgt i32 %load, 0 123 %cmp2 = icmp slt i32 %load, 1 124 %xor = xor i1 %cmp, %cmp2 125 %sel = select i1 %xor, i32 1, i32 2 126 store i32 %sel, i32 addrspace(1)* %gep, align 4 127 ret void 128} 129 130; GCN-LABEL: {{^}}test_vop3_cmp_u32_sop_or: 131; GFX1032: v_cmp_lt_u32_e32 vcc_lo, 3, v{{[0-9]+}} 132; GFX1032: v_cmp_gt_u32_e64 [[C2:s[0-9]+]], 2, v{{[0-9]+}} 133; GFX1032: s_or_b32 [[AND:s[0-9]+]], vcc_lo, [[C2]] 134; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]] 135; GFX1064: v_cmp_lt_u32_e32 vcc, 3, v{{[0-9]+}} 136; GFX1064: v_cmp_gt_u32_e64 [[C2:s\[[0-9:]+\]]], 2, v{{[0-9]+}} 137; GFX1064: s_or_b64 [[AND:s\[[0-9:]+\]]], vcc, [[C2]] 138; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]] 139define amdgpu_kernel void @test_vop3_cmp_u32_sop_or(i32 addrspace(1)* %arg) { 140 %lid = tail call i32 @llvm.amdgcn.workitem.id.x() 141 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %lid 142 %load = load i32, i32 addrspace(1)* %gep, align 4 143 %cmp = icmp ugt i32 %load, 3 144 %cmp2 = icmp ult i32 %load, 2 145 %or = or i1 %cmp, %cmp2 146 %sel = select i1 %or, i32 1, i32 2 147 store i32 %sel, i32 addrspace(1)* %gep, align 4 148 ret void 149} 150 151; GCN-LABEL: {{^}}test_mask_if: 152; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, vcc_lo 153; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], vcc{{$}} 154; GCN: s_cbranch_execz 155define amdgpu_kernel void @test_mask_if(i32 addrspace(1)* %arg) #0 { 156 %lid = tail call i32 @llvm.amdgcn.workitem.id.x() 157 %cmp = icmp ugt i32 %lid, 10 158 br i1 %cmp, label %if, label %endif 159 160if: 161 store i32 0, i32 addrspace(1)* %arg, align 4 162 br label %endif 163 164endif: 165 ret void 166} 167 168; GCN-LABEL: {{^}}test_loop_with_if: 169; GFX1032: s_or_b32 s{{[0-9]+}}, vcc_lo, s{{[0-9]+}} 170; GFX1032: s_andn2_b32 exec_lo, exec_lo, s{{[0-9]+}} 171; GFX1064: s_or_b64 s[{{[0-9:]+}}], vcc, s[{{[0-9:]+}}] 172; GFX1064: s_andn2_b64 exec, exec, s[{{[0-9:]+}}] 173; GCN: s_cbranch_execz 174; GCN: .LBB{{.*}}: 175; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, vcc_lo 176; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], vcc{{$}} 177; GCN: s_cbranch_execz 178; GCN: ; %bb.{{[0-9]+}}: 179; GCN: .LBB{{.*}}: 180; GFX1032: s_xor_b32 s{{[0-9]+}}, exec_lo, s{{[0-9]+}} 181; GFX1064: s_xor_b64 s[{{[0-9:]+}}], exec, s[{{[0-9:]+}}] 182; GCN: ; %bb.{{[0-9]+}}: 183; GCN: ; %bb.{{[0-9]+}}: 184; GFX1032: s_or_b32 exec_lo, exec_lo, s{{[0-9]+}} 185; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, s{{[0-9]+}} 186; GFX1064: s_or_b64 exec, exec, s[{{[0-9:]+}}] 187; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}]{{$}} 188; GCN: s_cbranch_execz .LBB 189; GCN: ; %bb.{{[0-9]+}}: 190; GCN: .LBB{{.*}}: 191; GCN: s_endpgm 192define amdgpu_kernel void @test_loop_with_if(i32 addrspace(1)* %arg) #0 { 193bb: 194 %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() 195 br label %bb2 196 197bb1: 198 ret void 199 200bb2: 201 %tmp3 = phi i32 [ 0, %bb ], [ %tmp15, %bb13 ] 202 %tmp4 = icmp slt i32 %tmp3, %tmp 203 br i1 %tmp4, label %bb5, label %bb11 204 205bb5: 206 %tmp6 = sext i32 %tmp3 to i64 207 %tmp7 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp6 208 %tmp8 = load i32, i32 addrspace(1)* %tmp7, align 4 209 %tmp9 = icmp sgt i32 %tmp8, 10 210 br i1 %tmp9, label %bb10, label %bb11 211 212bb10: 213 store i32 %tmp, i32 addrspace(1)* %tmp7, align 4 214 br label %bb13 215 216bb11: 217 %tmp12 = sdiv i32 %tmp3, 2 218 br label %bb13 219 220bb13: 221 %tmp14 = phi i32 [ %tmp3, %bb10 ], [ %tmp12, %bb11 ] 222 %tmp15 = add nsw i32 %tmp14, 1 223 %tmp16 = icmp slt i32 %tmp14, 255 224 br i1 %tmp16, label %bb2, label %bb1 225} 226 227; GCN-LABEL: {{^}}test_loop_with_if_else_break: 228; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, vcc_lo 229; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], vcc{{$}} 230; GCN: s_cbranch_execz 231; GCN: ; %bb.{{[0-9]+}}: ; %.preheader 232; GCN: .LBB{{.*}}: 233 234; GCN: global_store_dword 235; GFX1032: s_or_b32 [[MASK0:s[0-9]+]], [[MASK0]], vcc_lo 236; GFX1064: s_or_b64 [[MASK0:s\[[0-9:]+\]]], [[MASK0]], vcc 237; GFX1032: s_andn2_b32 [[MASK1:s[0-9]+]], [[MASK1]], exec_lo 238; GFX1064: s_andn2_b64 [[MASK1:s\[[0-9:]+\]]], [[MASK1]], exec 239; GFX1032: s_and_b32 [[MASK0]], [[MASK0]], exec_lo 240; GFX1064: s_and_b64 [[MASK0]], [[MASK0]], exec 241; GFX1032: s_or_b32 [[MASK1]], [[MASK1]], [[MASK0]] 242; GFX1064: s_or_b64 [[MASK1]], [[MASK1]], [[MASK0]] 243; GCN: .LBB{{.*}}: ; %Flow 244; GFX1032: s_and_b32 [[TMP0:s[0-9]+]], exec_lo, [[MASK1]] 245; GFX1064: s_and_b64 [[TMP0:s\[[0-9:]+\]]], exec, [[MASK1]] 246; GFX1032: s_or_b32 [[ACC:s[0-9]+]], [[TMP0]], [[ACC]] 247; GFX1064: s_or_b64 [[ACC:s\[[0-9:]+\]]], [[TMP0]], [[ACC]] 248; GFX1032: s_andn2_b32 exec_lo, exec_lo, [[ACC]] 249; GFX1064: s_andn2_b64 exec, exec, [[ACC]] 250; GCN: s_cbranch_execz 251; GCN: .LBB{{.*}}: 252 253; GFX1032-DAG: s_or_b32 [[MASK1]], [[MASK1]], exec_lo 254; GFX1064-DAG: s_or_b64 [[MASK1]], [[MASK1]], exec 255; GCN-DAG: global_load_dword [[LOAD:v[0-9]+]] 256; GFX1032: v_cmp_gt_i32_e32 vcc_lo, 11, [[LOAD]] 257; GFX1064: v_cmp_gt_i32_e32 vcc, 11, [[LOAD]] 258define amdgpu_kernel void @test_loop_with_if_else_break(i32 addrspace(1)* %arg) #0 { 259bb: 260 %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() 261 %tmp1 = icmp eq i32 %tmp, 0 262 br i1 %tmp1, label %.loopexit, label %.preheader 263 264.preheader: 265 br label %bb2 266 267bb2: 268 %tmp3 = phi i32 [ %tmp9, %bb8 ], [ 0, %.preheader ] 269 %tmp4 = zext i32 %tmp3 to i64 270 %tmp5 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp4 271 %tmp6 = load i32, i32 addrspace(1)* %tmp5, align 4 272 %tmp7 = icmp sgt i32 %tmp6, 10 273 br i1 %tmp7, label %bb8, label %.loopexit 274 275bb8: 276 store i32 %tmp, i32 addrspace(1)* %tmp5, align 4 277 %tmp9 = add nuw nsw i32 %tmp3, 1 278 %tmp10 = icmp ult i32 %tmp9, 256 279 %tmp11 = icmp ult i32 %tmp9, %tmp 280 %tmp12 = and i1 %tmp10, %tmp11 281 br i1 %tmp12, label %bb2, label %.loopexit 282 283.loopexit: 284 ret void 285} 286 287; GCN-LABEL: {{^}}test_addc_vop2b: 288; GFX1032: v_add_co_u32 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, s{{[0-9]+}} 289; GFX1032: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, s{{[0-9]+}}, v{{[0-9]+}}, vcc_lo 290; GFX1064: v_add_co_u32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, s{{[0-9]+}} 291; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}, vcc{{$}} 292define amdgpu_kernel void @test_addc_vop2b(i64 addrspace(1)* %arg, i64 %arg1) #0 { 293bb: 294 %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() 295 %tmp3 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp 296 %tmp4 = load i64, i64 addrspace(1)* %tmp3, align 8 297 %tmp5 = add nsw i64 %tmp4, %arg1 298 store i64 %tmp5, i64 addrspace(1)* %tmp3, align 8 299 ret void 300} 301 302; GCN-LABEL: {{^}}test_subbrev_vop2b: 303; GFX1032: v_sub_co_u32 v{{[0-9]+}}, [[A0:s[0-9]+|vcc_lo]], v{{[0-9]+}}, s{{[0-9]+}}{{$}} 304; GFX1032: v_subrev_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, {{[vs][0-9]+}}, {{[vs][0-9]+}}, [[A0]]{{$}} 305; GFX1064: v_sub_co_u32 v{{[0-9]+}}, [[A0:s\[[0-9:]+\]|vcc]], v{{[0-9]+}}, s{{[0-9]+}}{{$}} 306; GFX1064: v_subrev_co_ci_u32_e32 v{{[0-9]+}}, vcc, {{[vs][0-9]+}}, {{[vs][0-9]+}}, [[A0]]{{$}} 307define amdgpu_kernel void @test_subbrev_vop2b(i64 addrspace(1)* %arg, i64 %arg1) #0 { 308bb: 309 %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() 310 %tmp3 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp 311 %tmp4 = load i64, i64 addrspace(1)* %tmp3, align 8 312 %tmp5 = sub nsw i64 %tmp4, %arg1 313 store i64 %tmp5, i64 addrspace(1)* %tmp3, align 8 314 ret void 315} 316 317; GCN-LABEL: {{^}}test_subb_vop2b: 318; GFX1032: v_sub_co_u32 v{{[0-9]+}}, [[A0:s[0-9]+|vcc_lo]], s{{[0-9]+}}, v{{[0-9]+}}{{$}} 319; GFX1032: v_sub_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, {{[vs][0-9]+}}, v{{[0-9]+}}, [[A0]]{{$}} 320; GFX1064: v_sub_co_u32 v{{[0-9]+}}, [[A0:s\[[0-9:]+\]|vcc]], s{{[0-9]+}}, v{{[0-9]+}}{{$}} 321; GFX1064: v_sub_co_ci_u32_e32 v{{[0-9]+}}, vcc, {{[vs][0-9]+}}, v{{[0-9]+}}, [[A0]]{{$}} 322define amdgpu_kernel void @test_subb_vop2b(i64 addrspace(1)* %arg, i64 %arg1) #0 { 323bb: 324 %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() 325 %tmp3 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp 326 %tmp4 = load i64, i64 addrspace(1)* %tmp3, align 8 327 %tmp5 = sub nsw i64 %arg1, %tmp4 328 store i64 %tmp5, i64 addrspace(1)* %tmp3, align 8 329 ret void 330} 331 332; GCN-LABEL: {{^}}test_udiv64: 333; GFX1032: v_add_co_u32 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}} 334; GFX1032: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, 0, v{{[0-9]+}}, vcc_lo 335; GFX1032: v_add_co_u32 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}} 336; GFX1032: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}}, vcc_lo 337; GFX1032: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, 0, v{{[0-9]+}}, vcc_lo 338; GFX1032: v_add_co_u32 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}} 339; GFX1032: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, 0, v{{[0-9]+}}, vcc_lo 340; GFX1032: v_add_co_u32 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}} 341; GFX1032: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}}, vcc_lo 342; GFX1064: v_add_co_u32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} 343; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc{{$}} 344; GFX1064: v_add_co_u32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} 345; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}, vcc{{$}} 346; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc{{$}} 347; GFX1064: v_add_co_u32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} 348; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc{{$}} 349; GFX1064: v_add_co_u32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} 350; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}, vcc{{$}} 351define amdgpu_kernel void @test_udiv64(i64 addrspace(1)* %arg) #0 { 352bb: 353 %tmp = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 1 354 %tmp1 = load i64, i64 addrspace(1)* %tmp, align 8 355 %tmp2 = load i64, i64 addrspace(1)* %arg, align 8 356 %tmp3 = udiv i64 %tmp1, %tmp2 357 %tmp4 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 2 358 store i64 %tmp3, i64 addrspace(1)* %tmp4, align 8 359 ret void 360} 361 362; GCN-LABEL: {{^}}test_div_scale_f32: 363; GFX1032: v_div_scale_f32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} 364; GFX1064: v_div_scale_f32 v{{[0-9]+}}, s[{{[0-9:]+}}], v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} 365define amdgpu_kernel void @test_div_scale_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 { 366 %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone 367 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid 368 %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 369 370 %a = load volatile float, float addrspace(1)* %gep.0, align 4 371 %b = load volatile float, float addrspace(1)* %gep.1, align 4 372 373 %result = call { float, i1 } @llvm.amdgcn.div.scale.f32(float %a, float %b, i1 false) nounwind readnone 374 %result0 = extractvalue { float, i1 } %result, 0 375 store float %result0, float addrspace(1)* %out, align 4 376 ret void 377} 378 379; GCN-LABEL: {{^}}test_div_scale_f64: 380; GFX1032: v_div_scale_f64 v[{{[0-9:]+}}], s{{[0-9]+}}, v[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}] 381; GFX1064: v_div_scale_f64 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}] 382define amdgpu_kernel void @test_div_scale_f64(double addrspace(1)* %out, double addrspace(1)* %aptr, double addrspace(1)* %in) #0 { 383 %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone 384 %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid 385 %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1 386 387 %a = load volatile double, double addrspace(1)* %gep.0, align 8 388 %b = load volatile double, double addrspace(1)* %gep.1, align 8 389 390 %result = call { double, i1 } @llvm.amdgcn.div.scale.f64(double %a, double %b, i1 true) nounwind readnone 391 %result0 = extractvalue { double, i1 } %result, 0 392 store double %result0, double addrspace(1)* %out, align 8 393 ret void 394} 395 396; GCN-LABEL: {{^}}test_mad_i64_i32: 397; GFX1032: v_mad_i64_i32 v[{{[0-9:]+}}], s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}] 398; GFX1064: v_mad_i64_i32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}] 399define i64 @test_mad_i64_i32(i32 %arg0, i32 %arg1, i64 %arg2) #0 { 400 %sext0 = sext i32 %arg0 to i64 401 %sext1 = sext i32 %arg1 to i64 402 %mul = mul i64 %sext0, %sext1 403 %mad = add i64 %mul, %arg2 404 ret i64 %mad 405} 406 407; GCN-LABEL: {{^}}test_mad_u64_u32: 408; GFX1032: v_mad_u64_u32 v[{{[0-9:]+}}], s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}] 409; GFX1064: v_mad_u64_u32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}] 410define i64 @test_mad_u64_u32(i32 %arg0, i32 %arg1, i64 %arg2) #0 { 411 %sext0 = zext i32 %arg0 to i64 412 %sext1 = zext i32 %arg1 to i64 413 %mul = mul i64 %sext0, %sext1 414 %mad = add i64 %mul, %arg2 415 ret i64 %mad 416} 417 418; GCN-LABEL: {{^}}test_div_fmas_f32: 419; GCN: s_bitcmp1_b32 s{{[0-9]+}}, 0 420; GFX1032: s_cselect_b32 vcc_lo, -1, 0 421; GFX1064: s_cselect_b64 vcc, -1, 0 422; GCN: v_div_fmas_f32 v{{[0-9]+}}, {{[vs][0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} 423define amdgpu_kernel void @test_div_fmas_f32(float addrspace(1)* %out, float %a, float %b, float %c, i1 %d) nounwind { 424 %result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 %d) nounwind readnone 425 store float %result, float addrspace(1)* %out, align 4 426 ret void 427} 428 429; GCN-LABEL: {{^}}test_div_fmas_f64: 430; GCN: s_bitcmp1_b32 s{{[0-9]+}}, 0 431; GFX1032: s_cselect_b32 vcc_lo, -1, 0 432; GFX1064: s_cselect_b64 vcc, -1, 0 433; GCN-DAG: v_div_fmas_f64 v[{{[0-9:]+}}], {{[vs]}}[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}] 434define amdgpu_kernel void @test_div_fmas_f64(double addrspace(1)* %out, double %a, double %b, double %c, i1 %d) nounwind { 435 %result = call double @llvm.amdgcn.div.fmas.f64(double %a, double %b, double %c, i1 %d) nounwind readnone 436 store double %result, double addrspace(1)* %out, align 8 437 ret void 438} 439 440; GCN-LABEL: {{^}}test_div_fmas_f32_i1_phi_vcc: 441; GFX1032: s_mov_b32 [[VCC:vcc_lo]], 0{{$}} 442; GFX1064: s_mov_b64 [[VCC:vcc]], 0{{$}} 443; GFX1032: s_and_saveexec_b32 [[SAVE:s[0-9]+]], s{{[0-9]+}}{{$}} 444; GFX1064: s_and_saveexec_b64 [[SAVE:s\[[0-9]+:[0-9]+\]]], s[{{[0-9:]+}}]{{$}} 445 446; GCN: load_dword [[LOAD:v[0-9]+]] 447; GCN: v_cmp_ne_u32_e32 [[VCC]], 0, [[LOAD]] 448 449; GCN: .LBB{{[0-9_]+}}: 450; GFX1032: s_or_b32 exec_lo, exec_lo, [[SAVE]] 451; GFX1064: s_or_b64 exec, exec, [[SAVE]] 452; GCN: v_div_fmas_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} 453define amdgpu_kernel void @test_div_fmas_f32_i1_phi_vcc(float addrspace(1)* %out, float addrspace(1)* %in, i32 addrspace(1)* %dummy) #0 { 454entry: 455 %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone 456 %gep.out = getelementptr float, float addrspace(1)* %out, i32 2 457 %gep.a = getelementptr float, float addrspace(1)* %in, i32 %tid 458 %gep.b = getelementptr float, float addrspace(1)* %gep.a, i32 1 459 %gep.c = getelementptr float, float addrspace(1)* %gep.a, i32 2 460 461 %a = load float, float addrspace(1)* %gep.a 462 %b = load float, float addrspace(1)* %gep.b 463 %c = load float, float addrspace(1)* %gep.c 464 465 %cmp0 = icmp eq i32 %tid, 0 466 br i1 %cmp0, label %bb, label %exit 467 468bb: 469 %val = load volatile i32, i32 addrspace(1)* %dummy 470 %cmp1 = icmp ne i32 %val, 0 471 br label %exit 472 473exit: 474 %cond = phi i1 [false, %entry], [%cmp1, %bb] 475 %result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 %cond) nounwind readnone 476 store float %result, float addrspace(1)* %gep.out, align 4 477 ret void 478} 479 480; GCN-LABEL: {{^}}fdiv_f32: 481; GFX1032: v_div_scale_f32 v{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}} 482; GFX1064: v_div_scale_f32 v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}} 483; GCN: v_rcp_f32_e32 v{{[0-9]+}}, v{{[0-9]+}} 484; GFX1032: v_div_scale_f32 v{{[0-9]+}}, vcc_lo, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}} 485; GFX1064: v_div_scale_f32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}} 486 487; GCN-NOT: vcc 488; GCN: v_div_fmas_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} 489define amdgpu_kernel void @fdiv_f32(float addrspace(1)* %out, float %a, float %b) #0 { 490entry: 491 %fdiv = fdiv float %a, %b 492 store float %fdiv, float addrspace(1)* %out 493 ret void 494} 495 496; GCN-LABEL: {{^}}test_br_cc_f16: 497; GFX1032: v_cmp_nlt_f16_e32 vcc_lo, 498; GFX1032: s_and_b32 vcc_lo, exec_lo, vcc_lo 499; GFX1064: v_cmp_nlt_f16_e32 vcc, 500; GFX1064: s_and_b64 vcc, exec, vcc{{$}} 501; GCN-NEXT: s_cbranch_vccnz 502define amdgpu_kernel void @test_br_cc_f16( 503 half addrspace(1)* %r, 504 half addrspace(1)* %a, 505 half addrspace(1)* %b) { 506entry: 507 %a.val = load half, half addrspace(1)* %a 508 %b.val = load half, half addrspace(1)* %b 509 %fcmp = fcmp olt half %a.val, %b.val 510 br i1 %fcmp, label %one, label %two 511 512one: 513 store half %a.val, half addrspace(1)* %r 514 ret void 515 516two: 517 store half %b.val, half addrspace(1)* %r 518 ret void 519} 520 521; GCN-LABEL: {{^}}test_brcc_i1: 522; GCN: s_bitcmp0_b32 s{{[0-9]+}}, 0 523; GCN-NEXT: s_cbranch_scc1 524define amdgpu_kernel void @test_brcc_i1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i1 %val) #0 { 525 %cmp0 = icmp ne i1 %val, 0 526 br i1 %cmp0, label %store, label %end 527 528store: 529 store i32 222, i32 addrspace(1)* %out 530 ret void 531 532end: 533 ret void 534} 535 536; GCN-LABEL: {{^}}test_preserve_condition_undef_flag: 537; GFX1032-DAG: v_cmp_nlt_f32_e64 s{{[0-9]+}}, s{{[0-9]+}}, 1.0 538; GFX1032-DAG: v_cmp_ngt_f32_e64 s{{[0-9]+}}, s{{[0-9]+}}, 0 539; GFX1032: v_cmp_nlt_f32_e64 s{{[0-9]+}}, s{{[0-9]+}}, 1.0 540; GFX1032: s_or_b32 [[OR1:s[0-9]+]], s{{[0-9]+}}, s{{[0-9]+}} 541; GFX1032: s_or_b32 [[OR2:s[0-9]+]], [[OR1]], s{{[0-9]+}} 542; GFX1032: s_and_b32 vcc_lo, exec_lo, [[OR2]] 543; GFX1064-DAG: v_cmp_nlt_f32_e64 s[{{[0-9:]+}}], s{{[0-9]+}}, 1.0 544; GFX1064-DAG: v_cmp_ngt_f32_e64 s[{{[0-9:]+}}], s{{[0-9]+}}, 0 545; GFX1064: v_cmp_nlt_f32_e64 s[{{[0-9:]+}}], s{{[0-9]+}}, 1.0 546; GFX1064: s_or_b64 [[OR1:s\[[0-9:]+\]]], s[{{[0-9:]+}}], s[{{[0-9:]+}}] 547; GFX1064: s_or_b64 [[OR2:s\[[0-9:]+\]]], [[OR1]], s[{{[0-9:]+}}] 548; GFX1064: s_and_b64 vcc, exec, [[OR2]] 549; GCN: s_cbranch_vccnz 550define amdgpu_kernel void @test_preserve_condition_undef_flag(float %arg, i32 %arg1, float %arg2) #0 { 551bb0: 552 %tmp = icmp sgt i32 %arg1, 4 553 %undef = call i1 @llvm.amdgcn.class.f32(float undef, i32 undef) 554 %tmp4 = select i1 %undef, float %arg, float 1.000000e+00 555 %tmp5 = fcmp ogt float %arg2, 0.000000e+00 556 %tmp6 = fcmp olt float %arg2, 1.000000e+00 557 %tmp7 = fcmp olt float %arg, %tmp4 558 %tmp8 = and i1 %tmp5, %tmp6 559 %tmp9 = and i1 %tmp8, %tmp7 560 br i1 %tmp9, label %bb1, label %bb2 561 562bb1: 563 store volatile i32 0, i32 addrspace(1)* undef 564 br label %bb2 565 566bb2: 567 ret void 568} 569 570; GCN-LABEL: {{^}}test_invert_true_phi_cond_break_loop: 571; GFX1032: s_xor_b32 s{{[0-9]+}}, s{{[0-9]+}}, -1 572; GFX1032: s_or_b32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}} 573; GFX1064: s_xor_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], -1 574; GFX1064: s_or_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], s[{{[0-9:]+}}] 575define amdgpu_kernel void @test_invert_true_phi_cond_break_loop(i32 %arg) #0 { 576bb: 577 %id = call i32 @llvm.amdgcn.workitem.id.x() 578 %tmp = sub i32 %id, %arg 579 br label %bb1 580 581bb1: ; preds = %Flow, %bb 582 %lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ] 583 %lsr.iv.next = add i32 %lsr.iv, 1 584 %cmp0 = icmp slt i32 %lsr.iv.next, 0 585 br i1 %cmp0, label %bb4, label %Flow 586 587bb4: ; preds = %bb1 588 %load = load volatile i32, i32 addrspace(1)* undef, align 4 589 %cmp1 = icmp sge i32 %tmp, %load 590 br label %Flow 591 592Flow: ; preds = %bb4, %bb1 593 %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ] 594 %tmp3 = phi i1 [ %cmp1, %bb4 ], [ true, %bb1 ] 595 br i1 %tmp3, label %bb1, label %bb9 596 597bb9: ; preds = %Flow 598 store volatile i32 7, i32 addrspace(3)* undef 599 ret void 600} 601 602; GCN-LABEL: {{^}}test_movrels_extract_neg_offset_vgpr: 603; GFX1032: v_cmp_eq_u32_e32 vcc_lo, 1, v{{[0-9]+}} 604; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc_lo 605; GFX1032: v_cmp_ne_u32_e32 vcc_lo, 2, v{{[0-9]+}} 606; GFX1032: v_cndmask_b32_e32 v{{[0-9]+}}, 2, v{{[0-9]+}}, vcc_lo 607; GFX1032: v_cmp_ne_u32_e32 vcc_lo, 3, v{{[0-9]+}} 608; GFX1032: v_cndmask_b32_e32 v{{[0-9]+}}, 3, v{{[0-9]+}}, vcc_lo 609; GFX1064: v_cmp_eq_u32_e32 vcc, 1, v{{[0-9]+}} 610; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc 611; GFX1064: v_cmp_ne_u32_e32 vcc, 2, v{{[0-9]+}} 612; GFX1064: v_cndmask_b32_e32 v{{[0-9]+}}, 2, v{{[0-9]+}}, vcc 613; GFX1064: v_cmp_ne_u32_e32 vcc, 3, v{{[0-9]+}} 614; GFX1064: v_cndmask_b32_e32 v{{[0-9]+}}, 3, v{{[0-9]+}}, vcc 615define amdgpu_kernel void @test_movrels_extract_neg_offset_vgpr(i32 addrspace(1)* %out) #0 { 616entry: 617 %id = call i32 @llvm.amdgcn.workitem.id.x() #1 618 %index = add i32 %id, -512 619 %value = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index 620 store i32 %value, i32 addrspace(1)* %out 621 ret void 622} 623 624; GCN-LABEL: {{^}}test_set_inactive: 625; GFX1032: s_not_b32 exec_lo, exec_lo 626; GFX1032: v_mov_b32_e32 {{v[0-9]+}}, 42 627; GFX1032: s_not_b32 exec_lo, exec_lo 628; GFX1064: s_not_b64 exec, exec{{$}} 629; GFX1064: v_mov_b32_e32 {{v[0-9]+}}, 42 630; GFX1064: s_not_b64 exec, exec{{$}} 631define amdgpu_kernel void @test_set_inactive(i32 addrspace(1)* %out, i32 %in) #0 { 632 %tmp = call i32 @llvm.amdgcn.set.inactive.i32(i32 %in, i32 42) 633 store i32 %tmp, i32 addrspace(1)* %out 634 ret void 635} 636 637; GCN-LABEL: {{^}}test_set_inactive_64: 638; GFX1032: s_not_b32 exec_lo, exec_lo 639; GFX1032: v_mov_b32_e32 {{v[0-9]+}}, 0 640; GFX1032: v_mov_b32_e32 {{v[0-9]+}}, 0 641; GFX1032: s_not_b32 exec_lo, exec_lo 642; GFX1064: s_not_b64 exec, exec{{$}} 643; GFX1064: v_mov_b32_e32 {{v[0-9]+}}, 0 644; GFX1064: v_mov_b32_e32 {{v[0-9]+}}, 0 645; GFX1064: s_not_b64 exec, exec{{$}} 646define amdgpu_kernel void @test_set_inactive_64(i64 addrspace(1)* %out, i64 %in) #0 { 647 %tmp = call i64 @llvm.amdgcn.set.inactive.i64(i64 %in, i64 0) 648 store i64 %tmp, i64 addrspace(1)* %out 649 ret void 650} 651 652; GCN-LABEL: {{^}}test_kill_i1_terminator_float: 653; GFX1032: s_mov_b32 exec_lo, 0 654; GFX1064: s_mov_b64 exec, 0 655define amdgpu_ps void @test_kill_i1_terminator_float() #0 { 656 call void @llvm.amdgcn.kill(i1 false) 657 ret void 658} 659 660; GCN-LABEL: {{^}}test_kill_i1_terminator_i1: 661; GFX1032: s_mov_b32 [[LIVE:s[0-9]+]], exec_lo 662; GFX1032: s_or_b32 [[OR:s[0-9]+]], 663; GFX1032: s_xor_b32 [[KILL:s[0-9]+]], [[OR]], exec_lo 664; GFX1032: s_andn2_b32 [[MASK:s[0-9]+]], [[LIVE]], [[KILL]] 665; GFX1032: s_and_b32 exec_lo, exec_lo, [[MASK]] 666; GFX1064: s_mov_b64 [[LIVE:s\[[0-9:]+\]]], exec 667; GFX1064: s_or_b64 [[OR:s\[[0-9:]+\]]], 668; GFX1064: s_xor_b64 [[KILL:s\[[0-9:]+\]]], [[OR]], exec 669; GFX1064: s_andn2_b64 [[MASK:s\[[0-9:]+\]]], [[LIVE]], [[KILL]] 670; GFX1064: s_and_b64 exec, exec, [[MASK]] 671define amdgpu_gs void @test_kill_i1_terminator_i1(i32 %a, i32 %b, i32 %c, i32 %d) #0 { 672 %c1 = icmp slt i32 %a, %b 673 %c2 = icmp slt i32 %c, %d 674 %x = or i1 %c1, %c2 675 call void @llvm.amdgcn.kill(i1 %x) 676 call void @llvm.amdgcn.exp.f32(i32 0, i32 0, float 0.0, float 0.0, float 0.0, float 0.0, i1 false, i1 false) 677 ret void 678} 679 680; GCN-LABEL: {{^}}test_loop_vcc: 681; GFX1032: v_cmp_lt_f32_e32 vcc_lo, 682; GFX1064: v_cmp_lt_f32_e32 vcc, 683; GCN: s_cbranch_vccz 684define amdgpu_ps <4 x float> @test_loop_vcc(<4 x float> %in) #0 { 685entry: 686 br label %loop 687 688loop: 689 %ctr.iv = phi float [ 0.0, %entry ], [ %ctr.next, %body ] 690 %c.iv = phi <4 x float> [ %in, %entry ], [ %c.next, %body ] 691 %cc = fcmp ogt float %ctr.iv, 7.0 692 br i1 %cc, label %break, label %body 693 694body: 695 %c.iv0 = extractelement <4 x float> %c.iv, i32 0 696 %c.next = call <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f32(i32 15, float %c.iv0, <8 x i32> undef, <4 x i32> undef, i1 0, i32 0, i32 0) 697 %ctr.next = fadd float %ctr.iv, 2.0 698 br label %loop 699 700break: 701 ret <4 x float> %c.iv 702} 703 704; NOTE: llvm.amdgcn.wwm is deprecated, use llvm.amdgcn.strict.wwm instead. 705; GCN-LABEL: {{^}}test_wwm1: 706; GFX1032: s_or_saveexec_b32 [[SAVE:s[0-9]+]], -1 707; GFX1032: s_mov_b32 exec_lo, [[SAVE]] 708; GFX1064: s_or_saveexec_b64 [[SAVE:s\[[0-9]+:[0-9]+\]]], -1 709; GFX1064: s_mov_b64 exec, [[SAVE]] 710define amdgpu_ps float @test_wwm1(i32 inreg %idx0, i32 inreg %idx1, float %src0, float %src1) { 711main_body: 712 %out = fadd float %src0, %src1 713 %out.0 = call float @llvm.amdgcn.wwm.f32(float %out) 714 ret float %out.0 715} 716 717; GCN-LABEL: {{^}}test_wwm2: 718; GFX1032: v_cmp_gt_u32_e32 vcc_lo, 32, v{{[0-9]+}} 719; GFX1032: s_and_saveexec_b32 [[SAVE1:s[0-9]+]], vcc_lo 720; GFX1032: s_or_saveexec_b32 [[SAVE2:s[0-9]+]], -1 721; GFX1032: s_mov_b32 exec_lo, [[SAVE2]] 722; GFX1032: s_or_b32 exec_lo, exec_lo, [[SAVE1]] 723; GFX1064: v_cmp_gt_u32_e32 vcc, 32, v{{[0-9]+}} 724; GFX1064: s_and_saveexec_b64 [[SAVE1:s\[[0-9:]+\]]], vcc{{$}} 725; GFX1064: s_or_saveexec_b64 [[SAVE2:s\[[0-9:]+\]]], -1 726; GFX1064: s_mov_b64 exec, [[SAVE2]] 727; GFX1064: s_or_b64 exec, exec, [[SAVE1]] 728define amdgpu_ps float @test_wwm2(i32 inreg %idx) { 729main_body: 730 ; use mbcnt to make sure the branch is divergent 731 %lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) 732 %hi = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %lo) 733 %cc = icmp uge i32 %hi, 32 734 br i1 %cc, label %endif, label %if 735 736if: 737 %src = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> undef, i32 %idx, i32 0, i32 0, i32 0) 738 %out = fadd float %src, %src 739 %out.0 = call float @llvm.amdgcn.wwm.f32(float %out) 740 %out.1 = fadd float %src, %out.0 741 br label %endif 742 743endif: 744 %out.2 = phi float [ %out.1, %if ], [ 0.0, %main_body ] 745 ret float %out.2 746} 747 748; GCN-LABEL: {{^}}test_strict_wwm1: 749; GFX1032: s_or_saveexec_b32 [[SAVE:s[0-9]+]], -1 750; GFX1032: s_mov_b32 exec_lo, [[SAVE]] 751; GFX1064: s_or_saveexec_b64 [[SAVE:s\[[0-9]+:[0-9]+\]]], -1 752; GFX1064: s_mov_b64 exec, [[SAVE]] 753define amdgpu_ps float @test_strict_wwm1(i32 inreg %idx0, i32 inreg %idx1, float %src0, float %src1) { 754main_body: 755 %out = fadd float %src0, %src1 756 %out.0 = call float @llvm.amdgcn.strict.wwm.f32(float %out) 757 ret float %out.0 758} 759 760; GCN-LABEL: {{^}}test_strict_wwm2: 761; GFX1032: v_cmp_gt_u32_e32 vcc_lo, 32, v{{[0-9]+}} 762; GFX1032: s_and_saveexec_b32 [[SAVE1:s[0-9]+]], vcc_lo 763; GFX1032: s_or_saveexec_b32 [[SAVE2:s[0-9]+]], -1 764; GFX1032: s_mov_b32 exec_lo, [[SAVE2]] 765; GFX1032: s_or_b32 exec_lo, exec_lo, [[SAVE1]] 766; GFX1064: v_cmp_gt_u32_e32 vcc, 32, v{{[0-9]+}} 767; GFX1064: s_and_saveexec_b64 [[SAVE1:s\[[0-9:]+\]]], vcc{{$}} 768; GFX1064: s_or_saveexec_b64 [[SAVE2:s\[[0-9:]+\]]], -1 769; GFX1064: s_mov_b64 exec, [[SAVE2]] 770; GFX1064: s_or_b64 exec, exec, [[SAVE1]] 771define amdgpu_ps float @test_strict_wwm2(i32 inreg %idx) { 772main_body: 773 ; use mbcnt to make sure the branch is divergent 774 %lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) 775 %hi = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %lo) 776 %cc = icmp uge i32 %hi, 32 777 br i1 %cc, label %endif, label %if 778 779if: 780 %src = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> undef, i32 %idx, i32 0, i32 0, i32 0) 781 %out = fadd float %src, %src 782 %out.0 = call float @llvm.amdgcn.strict.wwm.f32(float %out) 783 %out.1 = fadd float %src, %out.0 784 br label %endif 785 786endif: 787 %out.2 = phi float [ %out.1, %if ], [ 0.0, %main_body ] 788 ret float %out.2 789} 790 791 792; GCN-LABEL: {{^}}test_wqm1: 793; GFX1032: s_mov_b32 [[ORIG:s[0-9]+]], exec_lo 794; GFX1032: s_wqm_b32 exec_lo, exec_lo 795; GFX1032: s_and_b32 exec_lo, exec_lo, [[ORIG]] 796; GFX1064: s_mov_b64 [[ORIG:s\[[0-9]+:[0-9]+\]]], exec{{$}} 797; GFX1064: s_wqm_b64 exec, exec{{$}} 798; GFX1064: s_and_b64 exec, exec, [[ORIG]] 799define amdgpu_ps <4 x float> @test_wqm1(i32 inreg, i32 inreg, i32 inreg, i32 inreg %m0, <8 x i32> inreg %rsrc, <4 x i32> inreg %sampler, <2 x float> %pos) #0 { 800main_body: 801 %inst23 = extractelement <2 x float> %pos, i32 0 802 %inst24 = extractelement <2 x float> %pos, i32 1 803 %inst25 = tail call float @llvm.amdgcn.interp.p1(float %inst23, i32 0, i32 0, i32 %m0) 804 %inst26 = tail call float @llvm.amdgcn.interp.p2(float %inst25, float %inst24, i32 0, i32 0, i32 %m0) 805 %inst28 = tail call float @llvm.amdgcn.interp.p1(float %inst23, i32 1, i32 0, i32 %m0) 806 %inst29 = tail call float @llvm.amdgcn.interp.p2(float %inst28, float %inst24, i32 1, i32 0, i32 %m0) 807 %tex = call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32 15, float %inst26, float %inst29, <8 x i32> %rsrc, <4 x i32> %sampler, i1 0, i32 0, i32 0) 808 ret <4 x float> %tex 809} 810 811; GCN-LABEL: {{^}}test_wqm2: 812; GFX1032: s_wqm_b32 exec_lo, exec_lo 813; GFX1032: s_and_b32 exec_lo, exec_lo, s{{[0-9]+}} 814; GFX1064: s_wqm_b64 exec, exec{{$}} 815; GFX1064: s_and_b64 exec, exec, s[{{[0-9:]+}}] 816define amdgpu_ps float @test_wqm2(i32 inreg %idx0, i32 inreg %idx1) #0 { 817main_body: 818 %src0 = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> undef, i32 %idx0, i32 0, i32 0, i32 0) 819 %src1 = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> undef, i32 %idx1, i32 0, i32 0, i32 0) 820 %out = fadd float %src0, %src1 821 %out.0 = bitcast float %out to i32 822 %out.1 = call i32 @llvm.amdgcn.wqm.i32(i32 %out.0) 823 %out.2 = bitcast i32 %out.1 to float 824 ret float %out.2 825} 826 827; GCN-LABEL: {{^}}test_intr_fcmp_i64: 828; GFX1032-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], 0{{$}} 829; GFX1032-DAG: v_cmp_eq_f32_e64 s[[C_LO:[0-9]+]], {{s[0-9]+}}, |{{[vs][0-9]+}}| 830; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]] 831; GFX1064: v_cmp_eq_f32_e64 s{{\[}}[[C_LO:[0-9]+]]:[[C_HI:[0-9]+]]], {{s[0-9]+}}, |{{[vs][0-9]+}}| 832; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]] 833; GFX1064-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[C_HI]] 834; GCN: store_dwordx2 v{{[0-9]+}}, v{{\[}}[[V_LO]]:[[V_HI]]], s 835define amdgpu_kernel void @test_intr_fcmp_i64(i64 addrspace(1)* %out, float %src, float %a) { 836 %temp = call float @llvm.fabs.f32(float %a) 837 %result = call i64 @llvm.amdgcn.fcmp.i64.f32(float %src, float %temp, i32 1) 838 store i64 %result, i64 addrspace(1)* %out 839 ret void 840} 841 842; GCN-LABEL: {{^}}test_intr_icmp_i64: 843; GFX1032-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], 0{{$}} 844; GFX1032-DAG: v_cmp_eq_u32_e64 [[C_LO:vcc_lo|s[0-9]+]], 0x64, {{s[0-9]+}} 845; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], [[C_LO]] 846; GFX1064: v_cmp_eq_u32_e64 s{{\[}}[[C_LO:[0-9]+]]:[[C_HI:[0-9]+]]], 0x64, {{s[0-9]+}} 847; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]] 848; GFX1064-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[C_HI]] 849; GCN: store_dwordx2 v{{[0-9]+}}, v{{\[}}[[V_LO]]:[[V_HI]]], s 850define amdgpu_kernel void @test_intr_icmp_i64(i64 addrspace(1)* %out, i32 %src) { 851 %result = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %src, i32 100, i32 32) 852 store i64 %result, i64 addrspace(1)* %out 853 ret void 854} 855 856; GCN-LABEL: {{^}}test_intr_fcmp_i32: 857; GFX1032-DAG: v_cmp_eq_f32_e64 s[[C_LO:[0-9]+]], {{s[0-9]+}}, |{{[vs][0-9]+}}| 858; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]] 859; GFX1064: v_cmp_eq_f32_e64 s{{\[}}[[C_LO:[0-9]+]]:[[C_HI:[0-9]+]]], {{s[0-9]+}}, |{{[vs][0-9]+}}| 860; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]] 861; GCN: store_dword v{{[0-9]+}}, v[[V_LO]], s 862define amdgpu_kernel void @test_intr_fcmp_i32(i32 addrspace(1)* %out, float %src, float %a) { 863 %temp = call float @llvm.fabs.f32(float %a) 864 %result = call i32 @llvm.amdgcn.fcmp.i32.f32(float %src, float %temp, i32 1) 865 store i32 %result, i32 addrspace(1)* %out 866 ret void 867} 868 869; GCN-LABEL: {{^}}test_intr_icmp_i32: 870; GFX1032-DAG: v_cmp_eq_u32_e64 s[[C_LO:[0-9]+]], 0x64, {{s[0-9]+}} 871; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]{{$}} 872; GFX1064: v_cmp_eq_u32_e64 s{{\[}}[[C_LO:[0-9]+]]:{{[0-9]+}}], 0x64, {{s[0-9]+}} 873; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]{{$}} 874; GCN: store_dword v{{[0-9]+}}, v[[V_LO]], s 875define amdgpu_kernel void @test_intr_icmp_i32(i32 addrspace(1)* %out, i32 %src) { 876 %result = call i32 @llvm.amdgcn.icmp.i32.i32(i32 %src, i32 100, i32 32) 877 store i32 %result, i32 addrspace(1)* %out 878 ret void 879} 880 881; GCN-LABEL: {{^}}test_wqm_vote: 882; GFX1032: v_cmp_neq_f32_e32 vcc_lo, 0 883; GFX1032: s_mov_b32 [[LIVE:s[0-9]+]], exec_lo 884; GFX1032: s_wqm_b32 [[WQM:s[0-9]+]], vcc_lo 885; GFX1032: s_xor_b32 [[KILL:s[0-9]+]], [[WQM]], exec_lo 886; GFX1032: s_andn2_b32 [[MASK:s[0-9]+]], [[LIVE]], [[KILL]] 887; GFX1032: s_and_b32 exec_lo, exec_lo, [[MASK]] 888; GFX1064: v_cmp_neq_f32_e32 vcc, 0 889; GFX1064: s_mov_b64 [[LIVE:s\[[0-9:]+\]]], exec 890; GFX1064: s_wqm_b64 [[WQM:s\[[0-9:]+\]]], vcc 891; GFX1064: s_xor_b64 [[KILL:s\[[0-9:]+\]]], [[WQM]], exec 892; GFX1064: s_andn2_b64 [[MASK:s\[[0-9:]+\]]], [[LIVE]], [[KILL]] 893; GFX1064: s_and_b64 exec, exec, [[MASK]] 894define amdgpu_ps void @test_wqm_vote(float %a) { 895 %c1 = fcmp une float %a, 0.0 896 %c2 = call i1 @llvm.amdgcn.wqm.vote(i1 %c1) 897 call void @llvm.amdgcn.kill(i1 %c2) 898 call void @llvm.amdgcn.exp.f32(i32 0, i32 0, float 0.0, float 0.0, float 0.0, float 0.0, i1 false, i1 false) 899 ret void 900} 901 902; GCN-LABEL: {{^}}test_branch_true: 903; GFX1032: s_mov_b32 vcc_lo, exec_lo 904; GFX1064: s_mov_b64 vcc, exec 905define amdgpu_kernel void @test_branch_true() #2 { 906entry: 907 br i1 true, label %for.end, label %for.body.lr.ph 908 909for.body.lr.ph: ; preds = %entry 910 br label %for.body 911 912for.body: ; preds = %for.body, %for.body.lr.ph 913 br i1 undef, label %for.end, label %for.body 914 915for.end: ; preds = %for.body, %entry 916 ret void 917} 918 919; GCN-LABEL: {{^}}test_ps_live: 920; GFX1032: s_mov_b32 [[C:s[0-9]+]], exec_lo 921; GFX1064: s_mov_b64 [[C:s\[[0-9:]+\]]], exec{{$}} 922; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[C]] 923define amdgpu_ps float @test_ps_live() #0 { 924 %live = call i1 @llvm.amdgcn.ps.live() 925 %live.32 = zext i1 %live to i32 926 %r = bitcast i32 %live.32 to float 927 ret float %r 928} 929 930; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle64: 931; GFX1032: v_cmp_neq_f64_e64 [[C:s[0-9]+]], s[{{[0-9:]+}}], 1.0 932; GFX1032: s_and_b32 vcc_lo, exec_lo, [[C]] 933; GFX1064: v_cmp_neq_f64_e64 [[C:s\[[0-9:]+\]]], s[{{[0-9:]+}}], 1.0 934; GFX1064: s_and_b64 vcc, exec, [[C]] 935define amdgpu_kernel void @test_vccnz_ifcvt_triangle64(double addrspace(1)* %out, double addrspace(1)* %in) #0 { 936entry: 937 %v = load double, double addrspace(1)* %in 938 %cc = fcmp oeq double %v, 1.000000e+00 939 br i1 %cc, label %if, label %endif 940 941if: 942 %u = fadd double %v, %v 943 br label %endif 944 945endif: 946 %r = phi double [ %v, %entry ], [ %u, %if ] 947 store double %r, double addrspace(1)* %out 948 ret void 949} 950 951; GCN-LABEL: {{^}}test_vgprblocks_w32_attr: 952; Test that the wave size can be overridden in function attributes and that the block size is correct as a result 953; GFX10DEFWAVE: ; VGPRBlocks: 1 954define amdgpu_gs float @test_vgprblocks_w32_attr(float %a, float %b, float %c, float %d, float %e, 955 float %f, float %g, float %h, float %i, float %j, float %k, float %l) #3 { 956main_body: 957 %s = fadd float %a, %b 958 %s.1 = fadd float %s, %c 959 %s.2 = fadd float %s.1, %d 960 %s.3 = fadd float %s.2, %e 961 %s.4 = fadd float %s.3, %f 962 %s.5 = fadd float %s.4, %g 963 %s.6 = fadd float %s.5, %h 964 %s.7 = fadd float %s.6, %i 965 %s.8 = fadd float %s.7, %j 966 %s.9 = fadd float %s.8, %k 967 %s.10 = fadd float %s.9, %l 968 ret float %s.10 969} 970 971; GCN-LABEL: {{^}}test_vgprblocks_w64_attr: 972; Test that the wave size can be overridden in function attributes and that the block size is correct as a result 973; GFX10DEFWAVE: ; VGPRBlocks: 2 974define amdgpu_gs float @test_vgprblocks_w64_attr(float %a, float %b, float %c, float %d, float %e, 975 float %f, float %g, float %h, float %i, float %j, float %k, float %l) #4 { 976main_body: 977 %s = fadd float %a, %b 978 %s.1 = fadd float %s, %c 979 %s.2 = fadd float %s.1, %d 980 %s.3 = fadd float %s.2, %e 981 %s.4 = fadd float %s.3, %f 982 %s.5 = fadd float %s.4, %g 983 %s.6 = fadd float %s.5, %h 984 %s.7 = fadd float %s.6, %i 985 %s.8 = fadd float %s.7, %j 986 %s.9 = fadd float %s.8, %k 987 %s.10 = fadd float %s.9, %l 988 ret float %s.10 989} 990 991; GCN-LABEL: {{^}}icmp64: 992; GFX1032: v_cmp_eq_u32_e32 vcc_lo, 0, v 993; GFX1064: v_cmp_eq_u32_e32 vcc, 0, v 994define amdgpu_kernel void @icmp64(i32 %n, i32 %s) { 995entry: 996 %id = tail call i32 @llvm.amdgcn.workitem.id.x() 997 %mul4 = mul nsw i32 %s, %n 998 %cmp = icmp slt i32 0, %mul4 999 br label %if.end 1000 1001if.end: ; preds = %entry 1002 %rem = urem i32 %id, %s 1003 %icmp = tail call i64 @llvm.amdgcn.icmp.i64.i32(i32 %rem, i32 0, i32 32) 1004 %shr = lshr i64 %icmp, 1 1005 %notmask = shl nsw i64 -1, 0 1006 %and = and i64 %notmask, %shr 1007 %or = or i64 %and, -9223372036854775808 1008 %cttz = tail call i64 @llvm.cttz.i64(i64 %or, i1 true) 1009 %cast = trunc i64 %cttz to i32 1010 %cmp3 = icmp ugt i32 10, %cast 1011 %cmp6 = icmp ne i32 %rem, 0 1012 %brmerge = or i1 %cmp6, %cmp3 1013 br i1 %brmerge, label %if.end2, label %if.then 1014 1015if.then: ; preds = %if.end 1016 unreachable 1017 1018if.end2: ; preds = %if.end 1019 ret void 1020} 1021 1022; GCN-LABEL: {{^}}fcmp64: 1023; GFX1032: v_cmp_eq_f32_e32 vcc_lo, 0, v 1024; GFX1064: v_cmp_eq_f32_e32 vcc, 0, v 1025define amdgpu_kernel void @fcmp64(float %n, float %s) { 1026entry: 1027 %id = tail call i32 @llvm.amdgcn.workitem.id.x() 1028 %id.f = uitofp i32 %id to float 1029 %mul4 = fmul float %s, %n 1030 %cmp = fcmp ult float 0.0, %mul4 1031 br label %if.end 1032 1033if.end: ; preds = %entry 1034 %rem.f = frem float %id.f, %s 1035 %fcmp = tail call i64 @llvm.amdgcn.fcmp.i64.f32(float %rem.f, float 0.0, i32 1) 1036 %shr = lshr i64 %fcmp, 1 1037 %notmask = shl nsw i64 -1, 0 1038 %and = and i64 %notmask, %shr 1039 %or = or i64 %and, -9223372036854775808 1040 %cttz = tail call i64 @llvm.cttz.i64(i64 %or, i1 true) 1041 %cast = trunc i64 %cttz to i32 1042 %cmp3 = icmp ugt i32 10, %cast 1043 %cmp6 = fcmp one float %rem.f, 0.0 1044 %brmerge = or i1 %cmp6, %cmp3 1045 br i1 %brmerge, label %if.end2, label %if.then 1046 1047if.then: ; preds = %if.end 1048 unreachable 1049 1050if.end2: ; preds = %if.end 1051 ret void 1052} 1053 1054; GCN-LABEL: {{^}}icmp32: 1055; GFX1032: v_cmp_eq_u32_e32 vcc_lo, 0, v 1056; GFX1064: v_cmp_eq_u32_e32 vcc, 0, v 1057define amdgpu_kernel void @icmp32(i32 %n, i32 %s) { 1058entry: 1059 %id = tail call i32 @llvm.amdgcn.workitem.id.x() 1060 %mul4 = mul nsw i32 %s, %n 1061 %cmp = icmp slt i32 0, %mul4 1062 br label %if.end 1063 1064if.end: ; preds = %entry 1065 %rem = urem i32 %id, %s 1066 %icmp = tail call i32 @llvm.amdgcn.icmp.i32.i32(i32 %rem, i32 0, i32 32) 1067 %shr = lshr i32 %icmp, 1 1068 %notmask = shl nsw i32 -1, 0 1069 %and = and i32 %notmask, %shr 1070 %or = or i32 %and, 2147483648 1071 %cttz = tail call i32 @llvm.cttz.i32(i32 %or, i1 true) 1072 %cmp3 = icmp ugt i32 10, %cttz 1073 %cmp6 = icmp ne i32 %rem, 0 1074 %brmerge = or i1 %cmp6, %cmp3 1075 br i1 %brmerge, label %if.end2, label %if.then 1076 1077if.then: ; preds = %if.end 1078 unreachable 1079 1080if.end2: ; preds = %if.end 1081 ret void 1082} 1083 1084; GCN-LABEL: {{^}}fcmp32: 1085; GFX1032: v_cmp_eq_f32_e32 vcc_lo, 0, v 1086; GFX1064: v_cmp_eq_f32_e32 vcc, 0, v 1087define amdgpu_kernel void @fcmp32(float %n, float %s) { 1088entry: 1089 %id = tail call i32 @llvm.amdgcn.workitem.id.x() 1090 %id.f = uitofp i32 %id to float 1091 %mul4 = fmul float %s, %n 1092 %cmp = fcmp ult float 0.0, %mul4 1093 br label %if.end 1094 1095if.end: ; preds = %entry 1096 %rem.f = frem float %id.f, %s 1097 %fcmp = tail call i32 @llvm.amdgcn.fcmp.i32.f32(float %rem.f, float 0.0, i32 1) 1098 %shr = lshr i32 %fcmp, 1 1099 %notmask = shl nsw i32 -1, 0 1100 %and = and i32 %notmask, %shr 1101 %or = or i32 %and, 2147483648 1102 %cttz = tail call i32 @llvm.cttz.i32(i32 %or, i1 true) 1103 %cmp3 = icmp ugt i32 10, %cttz 1104 %cmp6 = fcmp one float %rem.f, 0.0 1105 %brmerge = or i1 %cmp6, %cmp3 1106 br i1 %brmerge, label %if.end2, label %if.then 1107 1108if.then: ; preds = %if.end 1109 unreachable 1110 1111if.end2: ; preds = %if.end 1112 ret void 1113} 1114 1115declare void @external_void_func_void() #1 1116 1117; Test save/restore of VGPR needed for SGPR spilling. 1118 1119; GCN-LABEL: {{^}}callee_no_stack_with_call: 1120; GCN: s_waitcnt 1121; GCN-NEXT: s_waitcnt_vscnt 1122 1123; GFX1064-NEXT: s_or_saveexec_b64 [[COPY_EXEC0:s\[[0-9]+:[0-9]+\]]], -1{{$}} 1124; GFX1032-NEXT: s_or_saveexec_b32 [[COPY_EXEC0:s[0-9]+]], -1{{$}} 1125; GCN-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill 1126; GCN-NEXT: s_waitcnt_depctr 0xffe3 1127; GFX1064-NEXT: s_mov_b64 exec, [[COPY_EXEC0]] 1128; GFX1032-NEXT: s_mov_b32 exec_lo, [[COPY_EXEC0]] 1129 1130; GCN-NEXT: v_writelane_b32 v40, s33, 2 1131; GCN: s_mov_b32 s33, s32 1132; GFX1064: s_addk_i32 s32, 0x400 1133; GFX1032: s_addk_i32 s32, 0x200 1134 1135 1136; GCN-DAG: v_writelane_b32 v40, s30, 0 1137; GCN-DAG: v_writelane_b32 v40, s31, 1 1138; GCN: s_swappc_b64 1139; GCN-DAG: v_readlane_b32 s4, v40, 0 1140; GCN-DAG: v_readlane_b32 s5, v40, 1 1141 1142 1143; GFX1064: s_addk_i32 s32, 0xfc00 1144; GFX1032: s_addk_i32 s32, 0xfe00 1145; GCN: v_readlane_b32 s33, v40, 2 1146; GFX1064: s_or_saveexec_b64 [[COPY_EXEC1:s\[[0-9]+:[0-9]+\]]], -1{{$}} 1147; GFX1032: s_or_saveexec_b32 [[COPY_EXEC1:s[0-9]]], -1{{$}} 1148; GCN-NEXT: buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload 1149; GCN-NEXT: s_waitcnt_depctr 0xffe3 1150; GFX1064-NEXT: s_mov_b64 exec, [[COPY_EXEC1]] 1151; GFX1032-NEXT: s_mov_b32 exec_lo, [[COPY_EXEC1]] 1152; GCN-NEXT: s_waitcnt vmcnt(0) 1153; GCN-NEXT: s_setpc_b64 1154define void @callee_no_stack_with_call() #1 { 1155 call void @external_void_func_void() 1156 ret void 1157} 1158 1159 1160declare i32 @llvm.amdgcn.workitem.id.x() 1161declare float @llvm.fabs.f32(float) 1162declare { float, i1 } @llvm.amdgcn.div.scale.f32(float, float, i1) 1163declare { double, i1 } @llvm.amdgcn.div.scale.f64(double, double, i1) 1164declare float @llvm.amdgcn.div.fmas.f32(float, float, float, i1) 1165declare double @llvm.amdgcn.div.fmas.f64(double, double, double, i1) 1166declare i1 @llvm.amdgcn.class.f32(float, i32) 1167declare i32 @llvm.amdgcn.set.inactive.i32(i32, i32) 1168declare i64 @llvm.amdgcn.set.inactive.i64(i64, i64) 1169declare <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f32(i32, float, <8 x i32>, <4 x i32>, i1, i32, i32) 1170declare <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) 1171declare float @llvm.amdgcn.strict.wwm.f32(float) 1172declare float @llvm.amdgcn.wwm.f32(float) 1173declare i32 @llvm.amdgcn.wqm.i32(i32) 1174declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) 1175declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) 1176declare float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32>, i32, i32, i32, i32 immarg) 1177declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) 1178declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32) 1179declare i64 @llvm.amdgcn.fcmp.i64.f32(float, float, i32) 1180declare i64 @llvm.amdgcn.icmp.i64.i32(i32, i32, i32) 1181declare i32 @llvm.amdgcn.fcmp.i32.f32(float, float, i32) 1182declare i32 @llvm.amdgcn.icmp.i32.i32(i32, i32, i32) 1183declare void @llvm.amdgcn.kill(i1) 1184declare i1 @llvm.amdgcn.wqm.vote(i1) 1185declare i1 @llvm.amdgcn.ps.live() 1186declare i64 @llvm.cttz.i64(i64, i1) 1187declare i32 @llvm.cttz.i32(i32, i1) 1188declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #5 1189 1190attributes #0 = { nounwind readnone speculatable } 1191attributes #1 = { nounwind } 1192attributes #2 = { nounwind readnone optnone noinline } 1193attributes #3 = { "target-features"="+wavefrontsize32" } 1194attributes #4 = { "target-features"="+wavefrontsize64" } 1195attributes #5 = { inaccessiblememonly nounwind } 1196