1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx908 -amdgpu-atomic-optimizations=false -verify-machineinstrs < %s | FileCheck %s -check-prefix=CHECK
3
4
5define amdgpu_ps void @struct_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
6; CHECK-LABEL: struct_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
7; CHECK:       ; %bb.0:
8; CHECK-NEXT:    s_mov_b32 s11, s5
9; CHECK-NEXT:    s_mov_b32 s10, s4
10; CHECK-NEXT:    s_mov_b32 s9, s3
11; CHECK-NEXT:    s_mov_b32 s8, s2
12; CHECK-NEXT:    buffer_atomic_add_f32 v0, v[1:2], s[8:11], s6 idxen offen
13; CHECK-NEXT:    s_endpgm
14  %ret = call float @llvm.amdgcn.struct.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
15  ret void
16}
17
18; Natural mapping, no voffset
19define amdgpu_ps void @struct_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 inreg %soffset) {
20; CHECK-LABEL: struct_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
21; CHECK:       ; %bb.0:
22; CHECK-NEXT:    s_mov_b32 s11, s5
23; CHECK-NEXT:    s_mov_b32 s10, s4
24; CHECK-NEXT:    s_mov_b32 s9, s3
25; CHECK-NEXT:    s_mov_b32 s8, s2
26; CHECK-NEXT:    buffer_atomic_add_f32 v0, v1, s[8:11], s6 idxen
27; CHECK-NEXT:    s_endpgm
28  %ret = call float @llvm.amdgcn.struct.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0)
29  ret void
30}
31
32define amdgpu_ps void @struct_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc(float %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
33; CHECK-LABEL: struct_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
34; CHECK:       ; %bb.0:
35; CHECK-NEXT:    s_mov_b32 s11, s5
36; CHECK-NEXT:    s_mov_b32 s10, s4
37; CHECK-NEXT:    s_mov_b32 s9, s3
38; CHECK-NEXT:    s_mov_b32 s8, s2
39; CHECK-NEXT:    buffer_atomic_add_f32 v0, v[1:2], s[8:11], s6 idxen offen slc
40; CHECK-NEXT:    s_endpgm
41  %ret = call float @llvm.amdgcn.struct.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2)
42  ret void
43}
44
45define amdgpu_ps void @struct_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(<2 x half> %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
46; CHECK-LABEL: struct_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
47; CHECK:       ; %bb.0:
48; CHECK-NEXT:    s_mov_b32 s11, s5
49; CHECK-NEXT:    s_mov_b32 s10, s4
50; CHECK-NEXT:    s_mov_b32 s9, s3
51; CHECK-NEXT:    s_mov_b32 s8, s2
52; CHECK-NEXT:    buffer_atomic_pk_add_f16 v0, v[1:2], s[8:11], s6 idxen offen
53; CHECK-NEXT:    s_endpgm
54  %ret = call <2 x half> @llvm.amdgcn.struct.buffer.atomic.fadd.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
55  ret void
56}
57
58declare float @llvm.amdgcn.struct.buffer.atomic.fadd.f32(float, <4 x i32>, i32, i32, i32, i32 immarg) #0
59declare <2 x half> @llvm.amdgcn.struct.buffer.atomic.fadd.v2f16(<2 x half>, <4 x i32>, i32, i32, i32, i32 immarg) #0
60
61attributes #0 = { nounwind }
62