1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
3; RUN: opt -mtriple=amdgcn-- -S -structurizecfg -si-annotate-control-flow %s | FileCheck -check-prefix=IR %s
4; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
5
6; After structurizing, there are 3 levels of loops. The i1 phi
7; conditions mutually depend on each other, so it isn't safe to delete
8; the condition that appears to have no uses until the loop is
9; completely processed.
10
11define amdgpu_kernel void @reduced_nested_loop_conditions(i64 addrspace(3)* nocapture %arg) #0 {
12; GCN-LABEL: reduced_nested_loop_conditions:
13; GCN:       ; %bb.0: ; %bb
14; GCN-NEXT:    s_load_dword s0, s[0:1], 0x9
15; GCN-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
16; GCN-NEXT:    s_mov_b32 m0, -1
17; GCN-NEXT:    s_waitcnt lgkmcnt(0)
18; GCN-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
19; GCN-NEXT:    ds_read_b64 v[0:1], v0
20; GCN-NEXT:    s_mov_b32 s0, 0
21; GCN-NEXT:    s_and_b64 vcc, exec, 0
22; GCN-NEXT:  BB0_1: ; %bb5
23; GCN-NEXT:    ; =>This Inner Loop Header: Depth=1
24; GCN-NEXT:    s_cmp_lg_u32 s0, 1
25; GCN-NEXT:    s_cbranch_scc0 BB0_3
26; GCN-NEXT:  ; %bb.2: ; %bb10
27; GCN-NEXT:    ; in Loop: Header=BB0_1 Depth=1
28; GCN-NEXT:    ; implicit-def: $sgpr0
29; GCN-NEXT:    s_cbranch_vccnz BB0_1
30; GCN-NEXT:    s_branch BB0_5
31; GCN-NEXT:  BB0_3: ; %bb8
32; GCN-NEXT:    s_waitcnt lgkmcnt(0)
33; GCN-NEXT:    ds_read_b32 v0, v0
34; GCN-NEXT:    s_and_b64 vcc, exec, 0
35; GCN-NEXT:  BB0_4: ; %bb9
36; GCN-NEXT:    ; =>This Inner Loop Header: Depth=1
37; GCN-NEXT:    s_cbranch_vccz BB0_4
38; GCN-NEXT:  BB0_5: ; %DummyReturnBlock
39; GCN-NEXT:    s_endpgm
40; IR-LABEL: @reduced_nested_loop_conditions(
41; IR-NEXT:  bb:
42; IR-NEXT:    [[MY_TMP:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x() #4
43; IR-NEXT:    [[MY_TMP1:%.*]] = getelementptr inbounds i64, i64 addrspace(3)* [[ARG:%.*]], i32 [[MY_TMP]]
44; IR-NEXT:    [[MY_TMP2:%.*]] = load volatile i64, i64 addrspace(3)* [[MY_TMP1]]
45; IR-NEXT:    br label [[BB5:%.*]]
46; IR:       bb3:
47; IR-NEXT:    br i1 true, label [[BB4:%.*]], label [[BB13:%.*]]
48; IR:       bb4:
49; IR-NEXT:    br label [[FLOW:%.*]]
50; IR:       bb5:
51; IR-NEXT:    [[PHI_BROKEN:%.*]] = phi i64 [ [[TMP6:%.*]], [[BB10:%.*]] ], [ 0, [[BB:%.*]] ]
52; IR-NEXT:    [[MY_TMP6:%.*]] = phi i32 [ 0, [[BB]] ], [ [[TMP5:%.*]], [[BB10]] ]
53; IR-NEXT:    [[MY_TMP7:%.*]] = icmp eq i32 [[MY_TMP6]], 1
54; IR-NEXT:    [[TMP0:%.*]] = call { i1, i64 } @llvm.amdgcn.if.i64(i1 [[MY_TMP7]])
55; IR-NEXT:    [[TMP1:%.*]] = extractvalue { i1, i64 } [[TMP0]], 0
56; IR-NEXT:    [[TMP2:%.*]] = extractvalue { i1, i64 } [[TMP0]], 1
57; IR-NEXT:    br i1 [[TMP1]], label [[BB8:%.*]], label [[FLOW]]
58; IR:       bb8:
59; IR-NEXT:    br label [[BB13]]
60; IR:       bb9:
61; IR-NEXT:    br i1 false, label [[BB3:%.*]], label [[BB9:%.*]]
62; IR:       bb10:
63; IR-NEXT:    [[TMP3:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP6]])
64; IR-NEXT:    br i1 [[TMP3]], label [[BB23:%.*]], label [[BB5]]
65; IR:       Flow:
66; IR-NEXT:    [[TMP4:%.*]] = phi i1 [ [[MY_TMP22:%.*]], [[BB4]] ], [ true, [[BB5]] ]
67; IR-NEXT:    [[TMP5]] = phi i32 [ [[MY_TMP21:%.*]], [[BB4]] ], [ undef, [[BB5]] ]
68; IR-NEXT:    call void @llvm.amdgcn.end.cf.i64(i64 [[TMP2]])
69; IR-NEXT:    [[TMP6]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[TMP4]], i64 [[PHI_BROKEN]])
70; IR-NEXT:    br label [[BB10]]
71; IR:       bb13:
72; IR-NEXT:    [[MY_TMP14:%.*]] = phi i1 [ [[MY_TMP22]], [[BB3]] ], [ true, [[BB8]] ]
73; IR-NEXT:    [[MY_TMP15:%.*]] = bitcast i64 [[MY_TMP2]] to <2 x i32>
74; IR-NEXT:    br i1 [[MY_TMP14]], label [[BB16:%.*]], label [[BB20:%.*]]
75; IR:       bb16:
76; IR-NEXT:    [[MY_TMP17:%.*]] = extractelement <2 x i32> [[MY_TMP15]], i64 1
77; IR-NEXT:    [[MY_TMP18:%.*]] = getelementptr inbounds i32, i32 addrspace(3)* undef, i32 [[MY_TMP17]]
78; IR-NEXT:    [[MY_TMP19:%.*]] = load volatile i32, i32 addrspace(3)* [[MY_TMP18]]
79; IR-NEXT:    br label [[BB20]]
80; IR:       bb20:
81; IR-NEXT:    [[MY_TMP21]] = phi i32 [ [[MY_TMP19]], [[BB16]] ], [ 0, [[BB13]] ]
82; IR-NEXT:    [[MY_TMP22]] = phi i1 [ false, [[BB16]] ], [ [[MY_TMP14]], [[BB13]] ]
83; IR-NEXT:    br label [[BB9]]
84; IR:       bb23:
85; IR-NEXT:    call void @llvm.amdgcn.end.cf.i64(i64 [[TMP6]])
86; IR-NEXT:    ret void
87;
88bb:
89  %my.tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #1
90  %my.tmp1 = getelementptr inbounds i64, i64 addrspace(3)* %arg, i32 %my.tmp
91  %my.tmp2 = load volatile i64, i64 addrspace(3)* %my.tmp1
92  br label %bb5
93
94bb3:                                              ; preds = %bb9
95  br i1 true, label %bb4, label %bb13
96
97bb4:                                              ; preds = %bb3
98  br label %bb10
99
100bb5:                                              ; preds = %bb10, %bb
101  %my.tmp6 = phi i32 [ 0, %bb ], [ %my.tmp11, %bb10 ]
102  %my.tmp7 = icmp eq i32 %my.tmp6, 1
103  br i1 %my.tmp7, label %bb8, label %bb10
104
105bb8:                                              ; preds = %bb5
106  br label %bb13
107
108bb9:                                              ; preds = %bb20, %bb9
109  br i1 false, label %bb3, label %bb9
110
111bb10:                                             ; preds = %bb5, %bb4
112  %my.tmp11 = phi i32 [ %my.tmp21, %bb4 ], [ undef, %bb5 ]
113  %my.tmp12 = phi i1 [ %my.tmp22, %bb4 ], [ true, %bb5 ]
114  br i1 %my.tmp12, label %bb23, label %bb5
115
116bb13:                                             ; preds = %bb8, %bb3
117  %my.tmp14 = phi i1 [ %my.tmp22, %bb3 ], [ true, %bb8 ]
118  %my.tmp15 = bitcast i64 %my.tmp2 to <2 x i32>
119  br i1 %my.tmp14, label %bb16, label %bb20
120
121bb16:                                             ; preds = %bb13
122  %my.tmp17 = extractelement <2 x i32> %my.tmp15, i64 1
123  %my.tmp18 = getelementptr inbounds i32, i32 addrspace(3)* undef, i32 %my.tmp17
124  %my.tmp19 = load volatile i32, i32 addrspace(3)* %my.tmp18
125  br label %bb20
126
127bb20:                                             ; preds = %bb16, %bb13
128  %my.tmp21 = phi i32 [ %my.tmp19, %bb16 ], [ 0, %bb13 ]
129  %my.tmp22 = phi i1 [ false, %bb16 ], [ %my.tmp14, %bb13 ]
130  br label %bb9
131
132bb23:                                             ; preds = %bb10
133  ret void
134}
135
136; Earlier version of above, before a run of the structurizer.
137
138define amdgpu_kernel void @nested_loop_conditions(i64 addrspace(1)* nocapture %arg) #0 {
139; GCN-LABEL: nested_loop_conditions:
140; GCN:       ; %bb.0: ; %bb
141; GCN-NEXT:    s_mov_b32 s3, 0xf000
142; GCN-NEXT:    s_mov_b32 s2, -1
143; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], 0
144; GCN-NEXT:    s_waitcnt vmcnt(0)
145; GCN-NEXT:    v_cmp_lt_i32_e32 vcc, 8, v0
146; GCN-NEXT:    s_and_b64 vcc, exec, vcc
147; GCN-NEXT:    s_cbranch_vccnz BB1_6
148; GCN-NEXT:  ; %bb.1: ; %bb14.lr.ph
149; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], 0
150; GCN-NEXT:    s_branch BB1_3
151; GCN-NEXT:  BB1_2: ; in Loop: Header=BB1_3 Depth=1
152; GCN-NEXT:    s_mov_b64 s[0:1], -1
153; GCN-NEXT:    ; implicit-def: $vgpr0
154; GCN-NEXT:    s_cbranch_execnz BB1_6
155; GCN-NEXT:  BB1_3: ; %bb14
156; GCN-NEXT:    ; =>This Loop Header: Depth=1
157; GCN-NEXT:    ; Child Loop BB1_4 Depth 2
158; GCN-NEXT:    s_waitcnt vmcnt(0)
159; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 1, v0
160; GCN-NEXT:    s_and_b64 vcc, exec, vcc
161; GCN-NEXT:    s_cbranch_vccnz BB1_2
162; GCN-NEXT:  BB1_4: ; %bb18
163; GCN-NEXT:    ; Parent Loop BB1_3 Depth=1
164; GCN-NEXT:    ; => This Inner Loop Header: Depth=2
165; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], 0
166; GCN-NEXT:    s_waitcnt vmcnt(0)
167; GCN-NEXT:    v_cmp_lt_i32_e32 vcc, 8, v0
168; GCN-NEXT:    s_and_b64 vcc, exec, vcc
169; GCN-NEXT:    s_cbranch_vccnz BB1_4
170; GCN-NEXT:  ; %bb.5: ; %bb21
171; GCN-NEXT:    ; in Loop: Header=BB1_3 Depth=1
172; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], 0
173; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], 0
174; GCN-NEXT:    s_waitcnt vmcnt(0)
175; GCN-NEXT:    v_cmp_lt_i32_e64 s[0:1], 8, v1
176; GCN-NEXT:    s_and_b64 vcc, exec, s[0:1]
177; GCN-NEXT:    s_cbranch_vccz BB1_3
178; GCN-NEXT:  BB1_6: ; %bb31
179; GCN-NEXT:    v_mov_b32_e32 v0, 0
180; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], 0
181; GCN-NEXT:    s_endpgm
182; IR-LABEL: @nested_loop_conditions(
183; IR-NEXT:  bb:
184; IR-NEXT:    [[MY_TMP:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x() #4
185; IR-NEXT:    [[MY_TMP1:%.*]] = zext i32 [[MY_TMP]] to i64
186; IR-NEXT:    [[MY_TMP2:%.*]] = getelementptr inbounds i64, i64 addrspace(1)* [[ARG:%.*]], i64 [[MY_TMP1]]
187; IR-NEXT:    [[MY_TMP3:%.*]] = load i64, i64 addrspace(1)* [[MY_TMP2]], align 16
188; IR-NEXT:    [[MY_TMP932:%.*]] = load <4 x i32>, <4 x i32> addrspace(1)* undef, align 16
189; IR-NEXT:    [[MY_TMP1033:%.*]] = extractelement <4 x i32> [[MY_TMP932]], i64 0
190; IR-NEXT:    [[MY_TMP1134:%.*]] = load volatile i32, i32 addrspace(1)* undef
191; IR-NEXT:    [[MY_TMP1235:%.*]] = icmp slt i32 [[MY_TMP1134]], 9
192; IR-NEXT:    br i1 [[MY_TMP1235]], label [[BB14_LR_PH:%.*]], label [[FLOW:%.*]]
193; IR:       bb14.lr.ph:
194; IR-NEXT:    br label [[BB14:%.*]]
195; IR:       Flow3:
196; IR-NEXT:    call void @llvm.amdgcn.end.cf.i64(i64 [[TMP21:%.*]])
197; IR-NEXT:    [[TMP0:%.*]] = call { i1, i64 } @llvm.amdgcn.if.i64(i1 [[TMP14:%.*]])
198; IR-NEXT:    [[TMP1:%.*]] = extractvalue { i1, i64 } [[TMP0]], 0
199; IR-NEXT:    [[TMP2:%.*]] = extractvalue { i1, i64 } [[TMP0]], 1
200; IR-NEXT:    br i1 [[TMP1]], label [[BB4_BB13_CRIT_EDGE:%.*]], label [[FLOW4:%.*]]
201; IR:       bb4.bb13_crit_edge:
202; IR-NEXT:    br label [[FLOW4]]
203; IR:       Flow4:
204; IR-NEXT:    [[TMP3:%.*]] = phi i1 [ true, [[BB4_BB13_CRIT_EDGE]] ], [ false, [[FLOW3:%.*]] ]
205; IR-NEXT:    call void @llvm.amdgcn.end.cf.i64(i64 [[TMP2]])
206; IR-NEXT:    br label [[FLOW]]
207; IR:       bb13:
208; IR-NEXT:    br label [[BB31:%.*]]
209; IR:       Flow:
210; IR-NEXT:    [[TMP4:%.*]] = phi i1 [ [[TMP3]], [[FLOW4]] ], [ true, [[BB:%.*]] ]
211; IR-NEXT:    [[TMP5:%.*]] = call { i1, i64 } @llvm.amdgcn.if.i64(i1 [[TMP4]])
212; IR-NEXT:    [[TMP6:%.*]] = extractvalue { i1, i64 } [[TMP5]], 0
213; IR-NEXT:    [[TMP7:%.*]] = extractvalue { i1, i64 } [[TMP5]], 1
214; IR-NEXT:    br i1 [[TMP6]], label [[BB13:%.*]], label [[BB31]]
215; IR:       bb14:
216; IR-NEXT:    [[PHI_BROKEN:%.*]] = phi i64 [ [[TMP16:%.*]], [[FLOW1:%.*]] ], [ 0, [[BB14_LR_PH]] ]
217; IR-NEXT:    [[MY_TMP1037:%.*]] = phi i32 [ [[MY_TMP1033]], [[BB14_LR_PH]] ], [ [[TMP12:%.*]], [[FLOW1]] ]
218; IR-NEXT:    [[MY_TMP936:%.*]] = phi <4 x i32> [ [[MY_TMP932]], [[BB14_LR_PH]] ], [ [[TMP11:%.*]], [[FLOW1]] ]
219; IR-NEXT:    [[MY_TMP15:%.*]] = icmp eq i32 [[MY_TMP1037]], 1
220; IR-NEXT:    [[TMP8:%.*]] = call { i1, i64 } @llvm.amdgcn.if.i64(i1 [[MY_TMP15]])
221; IR-NEXT:    [[TMP9:%.*]] = extractvalue { i1, i64 } [[TMP8]], 0
222; IR-NEXT:    [[TMP10:%.*]] = extractvalue { i1, i64 } [[TMP8]], 1
223; IR-NEXT:    br i1 [[TMP9]], label [[BB16:%.*]], label [[FLOW1]]
224; IR:       bb16:
225; IR-NEXT:    [[MY_TMP17:%.*]] = bitcast i64 [[MY_TMP3]] to <2 x i32>
226; IR-NEXT:    br label [[BB18:%.*]]
227; IR:       Flow1:
228; IR-NEXT:    [[TMP11]] = phi <4 x i32> [ [[MY_TMP9:%.*]], [[BB21:%.*]] ], [ undef, [[BB14]] ]
229; IR-NEXT:    [[TMP12]] = phi i32 [ [[MY_TMP10:%.*]], [[BB21]] ], [ undef, [[BB14]] ]
230; IR-NEXT:    [[TMP13:%.*]] = phi i1 [ [[TMP18:%.*]], [[BB21]] ], [ true, [[BB14]] ]
231; IR-NEXT:    [[TMP14]] = phi i1 [ [[TMP18]], [[BB21]] ], [ false, [[BB14]] ]
232; IR-NEXT:    [[TMP15:%.*]] = phi i1 [ false, [[BB21]] ], [ true, [[BB14]] ]
233; IR-NEXT:    call void @llvm.amdgcn.end.cf.i64(i64 [[TMP10]])
234; IR-NEXT:    [[TMP16]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[TMP13]], i64 [[PHI_BROKEN]])
235; IR-NEXT:    [[TMP17:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP16]])
236; IR-NEXT:    br i1 [[TMP17]], label [[FLOW2:%.*]], label [[BB14]]
237; IR:       bb18:
238; IR-NEXT:    [[MY_TMP19:%.*]] = load volatile i32, i32 addrspace(1)* undef
239; IR-NEXT:    [[MY_TMP20:%.*]] = icmp slt i32 [[MY_TMP19]], 9
240; IR-NEXT:    br i1 [[MY_TMP20]], label [[BB21]], label [[BB18]]
241; IR:       bb21:
242; IR-NEXT:    [[MY_TMP22:%.*]] = extractelement <2 x i32> [[MY_TMP17]], i64 1
243; IR-NEXT:    [[MY_TMP23:%.*]] = lshr i32 [[MY_TMP22]], 16
244; IR-NEXT:    [[MY_TMP24:%.*]] = select i1 undef, i32 undef, i32 [[MY_TMP23]]
245; IR-NEXT:    [[MY_TMP25:%.*]] = uitofp i32 [[MY_TMP24]] to float
246; IR-NEXT:    [[MY_TMP26:%.*]] = fmul float [[MY_TMP25]], 0x3EF0001000000000
247; IR-NEXT:    [[MY_TMP27:%.*]] = fsub float [[MY_TMP26]], undef
248; IR-NEXT:    [[MY_TMP28:%.*]] = fcmp olt float [[MY_TMP27]], 5.000000e-01
249; IR-NEXT:    [[MY_TMP29:%.*]] = select i1 [[MY_TMP28]], i64 1, i64 2
250; IR-NEXT:    [[MY_TMP30:%.*]] = extractelement <4 x i32> [[MY_TMP936]], i64 [[MY_TMP29]]
251; IR-NEXT:    [[MY_TMP7:%.*]] = zext i32 [[MY_TMP30]] to i64
252; IR-NEXT:    [[MY_TMP8:%.*]] = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* undef, i64 [[MY_TMP7]]
253; IR-NEXT:    [[MY_TMP9]] = load <4 x i32>, <4 x i32> addrspace(1)* [[MY_TMP8]], align 16
254; IR-NEXT:    [[MY_TMP10]] = extractelement <4 x i32> [[MY_TMP9]], i64 0
255; IR-NEXT:    [[MY_TMP11:%.*]] = load volatile i32, i32 addrspace(1)* undef
256; IR-NEXT:    [[MY_TMP12:%.*]] = icmp slt i32 [[MY_TMP11]], 9
257; IR-NEXT:    [[TMP18]] = xor i1 [[MY_TMP12]], true
258; IR-NEXT:    br label [[FLOW1]]
259; IR:       Flow2:
260; IR-NEXT:    call void @llvm.amdgcn.end.cf.i64(i64 [[TMP16]])
261; IR-NEXT:    [[TMP19:%.*]] = call { i1, i64 } @llvm.amdgcn.if.i64(i1 [[TMP15]])
262; IR-NEXT:    [[TMP20:%.*]] = extractvalue { i1, i64 } [[TMP19]], 0
263; IR-NEXT:    [[TMP21]] = extractvalue { i1, i64 } [[TMP19]], 1
264; IR-NEXT:    br i1 [[TMP20]], label [[BB31_LOOPEXIT:%.*]], label [[FLOW3]]
265; IR:       bb31.loopexit:
266; IR-NEXT:    br label [[FLOW3]]
267; IR:       bb31:
268; IR-NEXT:    call void @llvm.amdgcn.end.cf.i64(i64 [[TMP7]])
269; IR-NEXT:    store volatile i32 0, i32 addrspace(1)* undef
270; IR-NEXT:    ret void
271;
272bb:
273  %my.tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #1
274  %my.tmp1 = zext i32 %my.tmp to i64
275  %my.tmp2 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 %my.tmp1
276  %my.tmp3 = load i64, i64 addrspace(1)* %my.tmp2, align 16
277  %my.tmp932 = load <4 x i32>, <4 x i32> addrspace(1)* undef, align 16
278  %my.tmp1033 = extractelement <4 x i32> %my.tmp932, i64 0
279  %my.tmp1134 = load volatile i32, i32 addrspace(1)* undef
280  %my.tmp1235 = icmp slt i32 %my.tmp1134, 9
281  br i1 %my.tmp1235, label %bb14.lr.ph, label %bb13
282
283bb14.lr.ph:                                       ; preds = %bb
284  br label %bb14
285
286bb4.bb13_crit_edge:                               ; preds = %bb21
287  br label %bb13
288
289bb13:                                             ; preds = %bb4.bb13_crit_edge, %bb
290  br label %bb31
291
292bb14:                                             ; preds = %bb21, %bb14.lr.ph
293  %my.tmp1037 = phi i32 [ %my.tmp1033, %bb14.lr.ph ], [ %my.tmp10, %bb21 ]
294  %my.tmp936 = phi <4 x i32> [ %my.tmp932, %bb14.lr.ph ], [ %my.tmp9, %bb21 ]
295  %my.tmp15 = icmp eq i32 %my.tmp1037, 1
296  br i1 %my.tmp15, label %bb16, label %bb31.loopexit
297
298bb16:                                             ; preds = %bb14
299  %my.tmp17 = bitcast i64 %my.tmp3 to <2 x i32>
300  br label %bb18
301
302bb18:                                             ; preds = %bb18, %bb16
303  %my.tmp19 = load volatile i32, i32 addrspace(1)* undef
304  %my.tmp20 = icmp slt i32 %my.tmp19, 9
305  br i1 %my.tmp20, label %bb21, label %bb18
306
307bb21:                                             ; preds = %bb18
308  %my.tmp22 = extractelement <2 x i32> %my.tmp17, i64 1
309  %my.tmp23 = lshr i32 %my.tmp22, 16
310  %my.tmp24 = select i1 undef, i32 undef, i32 %my.tmp23
311  %my.tmp25 = uitofp i32 %my.tmp24 to float
312  %my.tmp26 = fmul float %my.tmp25, 0x3EF0001000000000
313  %my.tmp27 = fsub float %my.tmp26, undef
314  %my.tmp28 = fcmp olt float %my.tmp27, 5.000000e-01
315  %my.tmp29 = select i1 %my.tmp28, i64 1, i64 2
316  %my.tmp30 = extractelement <4 x i32> %my.tmp936, i64 %my.tmp29
317  %my.tmp7 = zext i32 %my.tmp30 to i64
318  %my.tmp8 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* undef, i64 %my.tmp7
319  %my.tmp9 = load <4 x i32>, <4 x i32> addrspace(1)* %my.tmp8, align 16
320  %my.tmp10 = extractelement <4 x i32> %my.tmp9, i64 0
321  %my.tmp11 = load volatile i32, i32 addrspace(1)* undef
322  %my.tmp12 = icmp slt i32 %my.tmp11, 9
323  br i1 %my.tmp12, label %bb14, label %bb4.bb13_crit_edge
324
325bb31.loopexit:                                    ; preds = %bb14
326  br label %bb31
327
328bb31:                                             ; preds = %bb31.loopexit, %bb13
329  store volatile i32 0, i32 addrspace(1)* undef
330  ret void
331}
332
333declare i32 @llvm.amdgcn.workitem.id.x() #1
334
335attributes #0 = { nounwind }
336attributes #1 = { nounwind readnone }
337