1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-globals
2; RUN: opt -S -mtriple=amdgcn-unknown-unknown -amdgpu-annotate-kernel-features < %s | FileCheck -check-prefixes=CHECK,AKF_CHECK %s
3; RUN: opt -S -mtriple=amdgcn-unknown-unknown -amdgpu-attributor < %s | FileCheck -check-prefixes=CHECK,ATTRIBUTOR_CHECK %s
4
5declare i32 @llvm.r600.read.tgid.x() #0
6declare i32 @llvm.r600.read.tgid.y() #0
7declare i32 @llvm.r600.read.tgid.z() #0
8
9declare i32 @llvm.r600.read.tidig.x() #0
10declare i32 @llvm.r600.read.tidig.y() #0
11declare i32 @llvm.r600.read.tidig.z() #0
12
13declare i32 @llvm.r600.read.local.size.x() #0
14declare i32 @llvm.r600.read.local.size.y() #0
15declare i32 @llvm.r600.read.local.size.z() #0
16
17define amdgpu_kernel void @use_tgid_x(i32 addrspace(1)* %ptr) #1 {
18; CHECK-LABEL: define {{[^@]+}}@use_tgid_x
19; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1:[0-9]+]] {
20; CHECK-NEXT:    [[VAL:%.*]] = call i32 @llvm.r600.read.tgid.x()
21; CHECK-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
22; CHECK-NEXT:    ret void
23;
24  %val = call i32 @llvm.r600.read.tgid.x()
25  store i32 %val, i32 addrspace(1)* %ptr
26  ret void
27}
28
29define amdgpu_kernel void @use_tgid_y(i32 addrspace(1)* %ptr) #1 {
30; CHECK-LABEL: define {{[^@]+}}@use_tgid_y
31; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR2:[0-9]+]] {
32; CHECK-NEXT:    [[VAL:%.*]] = call i32 @llvm.r600.read.tgid.y()
33; CHECK-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
34; CHECK-NEXT:    ret void
35;
36  %val = call i32 @llvm.r600.read.tgid.y()
37  store i32 %val, i32 addrspace(1)* %ptr
38  ret void
39}
40
41define amdgpu_kernel void @multi_use_tgid_y(i32 addrspace(1)* %ptr) #1 {
42; CHECK-LABEL: define {{[^@]+}}@multi_use_tgid_y
43; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR2]] {
44; CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.y()
45; CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
46; CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
47; CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
48; CHECK-NEXT:    ret void
49;
50  %val0 = call i32 @llvm.r600.read.tgid.y()
51  store volatile i32 %val0, i32 addrspace(1)* %ptr
52  %val1 = call i32 @llvm.r600.read.tgid.y()
53  store volatile i32 %val1, i32 addrspace(1)* %ptr
54  ret void
55}
56
57define amdgpu_kernel void @use_tgid_x_y(i32 addrspace(1)* %ptr) #1 {
58; CHECK-LABEL: define {{[^@]+}}@use_tgid_x_y
59; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR2]] {
60; CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.x()
61; CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
62; CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
63; CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
64; CHECK-NEXT:    ret void
65;
66  %val0 = call i32 @llvm.r600.read.tgid.x()
67  %val1 = call i32 @llvm.r600.read.tgid.y()
68  store volatile i32 %val0, i32 addrspace(1)* %ptr
69  store volatile i32 %val1, i32 addrspace(1)* %ptr
70  ret void
71}
72
73define amdgpu_kernel void @use_tgid_z(i32 addrspace(1)* %ptr) #1 {
74; CHECK-LABEL: define {{[^@]+}}@use_tgid_z
75; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR3:[0-9]+]] {
76; CHECK-NEXT:    [[VAL:%.*]] = call i32 @llvm.r600.read.tgid.z()
77; CHECK-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
78; CHECK-NEXT:    ret void
79;
80  %val = call i32 @llvm.r600.read.tgid.z()
81  store i32 %val, i32 addrspace(1)* %ptr
82  ret void
83}
84
85define amdgpu_kernel void @use_tgid_x_z(i32 addrspace(1)* %ptr) #1 {
86; CHECK-LABEL: define {{[^@]+}}@use_tgid_x_z
87; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR3]] {
88; CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.x()
89; CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.z()
90; CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
91; CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
92; CHECK-NEXT:    ret void
93;
94  %val0 = call i32 @llvm.r600.read.tgid.x()
95  %val1 = call i32 @llvm.r600.read.tgid.z()
96  store volatile i32 %val0, i32 addrspace(1)* %ptr
97  store volatile i32 %val1, i32 addrspace(1)* %ptr
98  ret void
99}
100
101define amdgpu_kernel void @use_tgid_y_z(i32 addrspace(1)* %ptr) #1 {
102; CHECK-LABEL: define {{[^@]+}}@use_tgid_y_z
103; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR4:[0-9]+]] {
104; CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.y()
105; CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.z()
106; CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
107; CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
108; CHECK-NEXT:    ret void
109;
110  %val0 = call i32 @llvm.r600.read.tgid.y()
111  %val1 = call i32 @llvm.r600.read.tgid.z()
112  store volatile i32 %val0, i32 addrspace(1)* %ptr
113  store volatile i32 %val1, i32 addrspace(1)* %ptr
114  ret void
115}
116
117define amdgpu_kernel void @use_tgid_x_y_z(i32 addrspace(1)* %ptr) #1 {
118; CHECK-LABEL: define {{[^@]+}}@use_tgid_x_y_z
119; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR4]] {
120; CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.x()
121; CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
122; CHECK-NEXT:    [[VAL2:%.*]] = call i32 @llvm.r600.read.tgid.z()
123; CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
124; CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
125; CHECK-NEXT:    store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
126; CHECK-NEXT:    ret void
127;
128  %val0 = call i32 @llvm.r600.read.tgid.x()
129  %val1 = call i32 @llvm.r600.read.tgid.y()
130  %val2 = call i32 @llvm.r600.read.tgid.z()
131  store volatile i32 %val0, i32 addrspace(1)* %ptr
132  store volatile i32 %val1, i32 addrspace(1)* %ptr
133  store volatile i32 %val2, i32 addrspace(1)* %ptr
134  ret void
135}
136
137define amdgpu_kernel void @use_tidig_x(i32 addrspace(1)* %ptr) #1 {
138; CHECK-LABEL: define {{[^@]+}}@use_tidig_x
139; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
140; CHECK-NEXT:    [[VAL:%.*]] = call i32 @llvm.r600.read.tidig.x()
141; CHECK-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
142; CHECK-NEXT:    ret void
143;
144  %val = call i32 @llvm.r600.read.tidig.x()
145  store i32 %val, i32 addrspace(1)* %ptr
146  ret void
147}
148
149define amdgpu_kernel void @use_tidig_y(i32 addrspace(1)* %ptr) #1 {
150; CHECK-LABEL: define {{[^@]+}}@use_tidig_y
151; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR5:[0-9]+]] {
152; CHECK-NEXT:    [[VAL:%.*]] = call i32 @llvm.r600.read.tidig.y()
153; CHECK-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
154; CHECK-NEXT:    ret void
155;
156  %val = call i32 @llvm.r600.read.tidig.y()
157  store i32 %val, i32 addrspace(1)* %ptr
158  ret void
159}
160
161define amdgpu_kernel void @use_tidig_z(i32 addrspace(1)* %ptr) #1 {
162; CHECK-LABEL: define {{[^@]+}}@use_tidig_z
163; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR6:[0-9]+]] {
164; CHECK-NEXT:    [[VAL:%.*]] = call i32 @llvm.r600.read.tidig.z()
165; CHECK-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
166; CHECK-NEXT:    ret void
167;
168  %val = call i32 @llvm.r600.read.tidig.z()
169  store i32 %val, i32 addrspace(1)* %ptr
170  ret void
171}
172
173define amdgpu_kernel void @use_tidig_x_tgid_x(i32 addrspace(1)* %ptr) #1 {
174; CHECK-LABEL: define {{[^@]+}}@use_tidig_x_tgid_x
175; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
176; CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.x()
177; CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.x()
178; CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
179; CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
180; CHECK-NEXT:    ret void
181;
182  %val0 = call i32 @llvm.r600.read.tidig.x()
183  %val1 = call i32 @llvm.r600.read.tgid.x()
184  store volatile i32 %val0, i32 addrspace(1)* %ptr
185  store volatile i32 %val1, i32 addrspace(1)* %ptr
186  ret void
187}
188
189define amdgpu_kernel void @use_tidig_y_tgid_y(i32 addrspace(1)* %ptr) #1 {
190; CHECK-LABEL: define {{[^@]+}}@use_tidig_y_tgid_y
191; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR7:[0-9]+]] {
192; CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.y()
193; CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
194; CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
195; CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
196; CHECK-NEXT:    ret void
197;
198  %val0 = call i32 @llvm.r600.read.tidig.y()
199  %val1 = call i32 @llvm.r600.read.tgid.y()
200  store volatile i32 %val0, i32 addrspace(1)* %ptr
201  store volatile i32 %val1, i32 addrspace(1)* %ptr
202  ret void
203}
204
205define amdgpu_kernel void @use_tidig_x_y_z(i32 addrspace(1)* %ptr) #1 {
206; CHECK-LABEL: define {{[^@]+}}@use_tidig_x_y_z
207; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR8:[0-9]+]] {
208; CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.x()
209; CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tidig.y()
210; CHECK-NEXT:    [[VAL2:%.*]] = call i32 @llvm.r600.read.tidig.z()
211; CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
212; CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
213; CHECK-NEXT:    store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
214; CHECK-NEXT:    ret void
215;
216  %val0 = call i32 @llvm.r600.read.tidig.x()
217  %val1 = call i32 @llvm.r600.read.tidig.y()
218  %val2 = call i32 @llvm.r600.read.tidig.z()
219  store volatile i32 %val0, i32 addrspace(1)* %ptr
220  store volatile i32 %val1, i32 addrspace(1)* %ptr
221  store volatile i32 %val2, i32 addrspace(1)* %ptr
222  ret void
223}
224
225define amdgpu_kernel void @use_all_workitems(i32 addrspace(1)* %ptr) #1 {
226; CHECK-LABEL: define {{[^@]+}}@use_all_workitems
227; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR9:[0-9]+]] {
228; CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.x()
229; CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tidig.y()
230; CHECK-NEXT:    [[VAL2:%.*]] = call i32 @llvm.r600.read.tidig.z()
231; CHECK-NEXT:    [[VAL3:%.*]] = call i32 @llvm.r600.read.tgid.x()
232; CHECK-NEXT:    [[VAL4:%.*]] = call i32 @llvm.r600.read.tgid.y()
233; CHECK-NEXT:    [[VAL5:%.*]] = call i32 @llvm.r600.read.tgid.z()
234; CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
235; CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
236; CHECK-NEXT:    store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
237; CHECK-NEXT:    store volatile i32 [[VAL3]], i32 addrspace(1)* [[PTR]], align 4
238; CHECK-NEXT:    store volatile i32 [[VAL4]], i32 addrspace(1)* [[PTR]], align 4
239; CHECK-NEXT:    store volatile i32 [[VAL5]], i32 addrspace(1)* [[PTR]], align 4
240; CHECK-NEXT:    ret void
241;
242  %val0 = call i32 @llvm.r600.read.tidig.x()
243  %val1 = call i32 @llvm.r600.read.tidig.y()
244  %val2 = call i32 @llvm.r600.read.tidig.z()
245  %val3 = call i32 @llvm.r600.read.tgid.x()
246  %val4 = call i32 @llvm.r600.read.tgid.y()
247  %val5 = call i32 @llvm.r600.read.tgid.z()
248  store volatile i32 %val0, i32 addrspace(1)* %ptr
249  store volatile i32 %val1, i32 addrspace(1)* %ptr
250  store volatile i32 %val2, i32 addrspace(1)* %ptr
251  store volatile i32 %val3, i32 addrspace(1)* %ptr
252  store volatile i32 %val4, i32 addrspace(1)* %ptr
253  store volatile i32 %val5, i32 addrspace(1)* %ptr
254  ret void
255}
256
257define amdgpu_kernel void @use_get_local_size_x(i32 addrspace(1)* %ptr) #1 {
258; CHECK-LABEL: define {{[^@]+}}@use_get_local_size_x
259; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
260; CHECK-NEXT:    [[VAL:%.*]] = call i32 @llvm.r600.read.local.size.x()
261; CHECK-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
262; CHECK-NEXT:    ret void
263;
264  %val = call i32 @llvm.r600.read.local.size.x()
265  store i32 %val, i32 addrspace(1)* %ptr
266  ret void
267}
268
269define amdgpu_kernel void @use_get_local_size_y(i32 addrspace(1)* %ptr) #1 {
270; CHECK-LABEL: define {{[^@]+}}@use_get_local_size_y
271; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
272; CHECK-NEXT:    [[VAL:%.*]] = call i32 @llvm.r600.read.local.size.y()
273; CHECK-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
274; CHECK-NEXT:    ret void
275;
276  %val = call i32 @llvm.r600.read.local.size.y()
277  store i32 %val, i32 addrspace(1)* %ptr
278  ret void
279}
280
281define amdgpu_kernel void @use_get_local_size_z(i32 addrspace(1)* %ptr) #1 {
282; CHECK-LABEL: define {{[^@]+}}@use_get_local_size_z
283; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
284; CHECK-NEXT:    [[VAL:%.*]] = call i32 @llvm.r600.read.local.size.z()
285; CHECK-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
286; CHECK-NEXT:    ret void
287;
288  %val = call i32 @llvm.r600.read.local.size.z()
289  store i32 %val, i32 addrspace(1)* %ptr
290  ret void
291}
292
293attributes #0 = { nounwind readnone }
294attributes #1 = { nounwind }
295
296; ALL: attributes #[[ATTR0:[0-9]+]] = { nounwind readnone speculatable willreturn "uniform-work-group-size"="false" }
297; ALL: attributes #[[ATTR1]] = { nounwind "uniform-work-group-size"="false" }
298; ALL: attributes #[[ATTR2]] = { nounwind "amdgpu-work-group-id-y" "uniform-work-group-size"="false" }
299; ALL: attributes #[[ATTR3]] = { nounwind "amdgpu-work-group-id-z" "uniform-work-group-size"="false" }
300; ALL: attributes #[[ATTR4]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "uniform-work-group-size"="false" }
301; ALL: attributes #[[ATTR5]] = { nounwind "amdgpu-work-item-id-y" "uniform-work-group-size"="false" }
302; ALL: attributes #[[ATTR6]] = { nounwind "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
303; ALL: attributes #[[ATTR7]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-item-id-y" "uniform-work-group-size"="false" }
304; ALL: attributes #[[ATTR8]] = { nounwind "amdgpu-work-item-id-y" "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
305; ALL: attributes #[[ATTR9]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "amdgpu-work-item-id-y" "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
306; NOHSA: attributes #[[ATTR0:[0-9]+]] = { nounwind readnone speculatable willreturn "uniform-work-group-size"="false" }
307; NOHSA: attributes #[[ATTR1]] = { nounwind "uniform-work-group-size"="false" }
308; NOHSA: attributes #[[ATTR2]] = { nounwind "amdgpu-work-group-id-y" "uniform-work-group-size"="false" }
309; NOHSA: attributes #[[ATTR3]] = { nounwind "amdgpu-work-group-id-z" "uniform-work-group-size"="false" }
310; NOHSA: attributes #[[ATTR4]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "uniform-work-group-size"="false" }
311; NOHSA: attributes #[[ATTR5]] = { nounwind "amdgpu-work-item-id-y" "uniform-work-group-size"="false" }
312; NOHSA: attributes #[[ATTR6]] = { nounwind "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
313; NOHSA: attributes #[[ATTR7]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-item-id-y" "uniform-work-group-size"="false" }
314; NOHSA: attributes #[[ATTR8]] = { nounwind "amdgpu-work-item-id-y" "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
315; NOHSA: attributes #[[ATTR9]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "amdgpu-work-item-id-y" "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
316;.
317; AKF_CHECK: attributes #[[ATTR0:[0-9]+]] = { nounwind readnone speculatable willreturn }
318; AKF_CHECK: attributes #[[ATTR1]] = { nounwind }
319; AKF_CHECK: attributes #[[ATTR2]] = { nounwind "amdgpu-work-group-id-y" }
320; AKF_CHECK: attributes #[[ATTR3]] = { nounwind "amdgpu-work-group-id-z" }
321; AKF_CHECK: attributes #[[ATTR4]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" }
322; AKF_CHECK: attributes #[[ATTR5]] = { nounwind "amdgpu-work-item-id-y" }
323; AKF_CHECK: attributes #[[ATTR6]] = { nounwind "amdgpu-work-item-id-z" }
324; AKF_CHECK: attributes #[[ATTR7]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-item-id-y" }
325; AKF_CHECK: attributes #[[ATTR8]] = { nounwind "amdgpu-work-item-id-y" "amdgpu-work-item-id-z" }
326; AKF_CHECK: attributes #[[ATTR9]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "amdgpu-work-item-id-y" "amdgpu-work-item-id-z" }
327;.
328; ATTRIBUTOR_CHECK: attributes #[[ATTR0:[0-9]+]] = { nounwind readnone speculatable willreturn }
329; ATTRIBUTOR_CHECK: attributes #[[ATTR1]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
330; ATTRIBUTOR_CHECK: attributes #[[ATTR2]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
331; ATTRIBUTOR_CHECK: attributes #[[ATTR3]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
332; ATTRIBUTOR_CHECK: attributes #[[ATTR4]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
333; ATTRIBUTOR_CHECK: attributes #[[ATTR5]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
334; ATTRIBUTOR_CHECK: attributes #[[ATTR6]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "uniform-work-group-size"="false" }
335; ATTRIBUTOR_CHECK: attributes #[[ATTR7]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
336; ATTRIBUTOR_CHECK: attributes #[[ATTR8]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
337; ATTRIBUTOR_CHECK: attributes #[[ATTR9]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
338;.
339