1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -S -mtriple=amdgcn-- -structurizecfg -si-annotate-control-flow < %s | FileCheck -check-prefix=OPT %s
3; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
4
5; Ensure two if.break calls, for both the inner and outer loops
6; FIXME: duplicate comparison
7define amdgpu_vs void @multi_else_break(<4 x float> %vec, i32 %ub, i32 %cont) {
8; OPT-LABEL: @multi_else_break(
9; OPT-NEXT:  main_body:
10; OPT-NEXT:    br label [[LOOP_OUTER:%.*]]
11; OPT:       LOOP.outer:
12; OPT-NEXT:    [[PHI_BROKEN2:%.*]] = phi i64 [ [[TMP9:%.*]], [[FLOW1:%.*]] ], [ 0, [[MAIN_BODY:%.*]] ]
13; OPT-NEXT:    [[TMP43:%.*]] = phi i32 [ 0, [[MAIN_BODY]] ], [ [[TMP4:%.*]], [[FLOW1]] ]
14; OPT-NEXT:    br label [[LOOP:%.*]]
15; OPT:       LOOP:
16; OPT-NEXT:    [[PHI_BROKEN:%.*]] = phi i64 [ [[TMP7:%.*]], [[FLOW:%.*]] ], [ 0, [[LOOP_OUTER]] ]
17; OPT-NEXT:    [[TMP0:%.*]] = phi i32 [ undef, [[LOOP_OUTER]] ], [ [[TMP4]], [[FLOW]] ]
18; OPT-NEXT:    [[TMP45:%.*]] = phi i32 [ [[TMP43]], [[LOOP_OUTER]] ], [ [[TMP47:%.*]], [[FLOW]] ]
19; OPT-NEXT:    [[TMP47]] = add i32 [[TMP45]], 1
20; OPT-NEXT:    [[TMP48:%.*]] = icmp slt i32 [[TMP45]], [[UB:%.*]]
21; OPT-NEXT:    [[TMP1:%.*]] = call { i1, i64 } @llvm.amdgcn.if.i64(i1 [[TMP48]])
22; OPT-NEXT:    [[TMP2:%.*]] = extractvalue { i1, i64 } [[TMP1]], 0
23; OPT-NEXT:    [[TMP3:%.*]] = extractvalue { i1, i64 } [[TMP1]], 1
24; OPT-NEXT:    br i1 [[TMP2]], label [[ENDIF:%.*]], label [[FLOW]]
25; OPT:       Flow:
26; OPT-NEXT:    [[TMP4]] = phi i32 [ [[TMP47]], [[ENDIF]] ], [ [[TMP0]], [[LOOP]] ]
27; OPT-NEXT:    [[TMP5:%.*]] = phi i1 [ [[TMP51:%.*]], [[ENDIF]] ], [ true, [[LOOP]] ]
28; OPT-NEXT:    [[TMP6:%.*]] = phi i1 [ [[TMP51_INV:%.*]], [[ENDIF]] ], [ true, [[LOOP]] ]
29; OPT-NEXT:    call void @llvm.amdgcn.end.cf.i64(i64 [[TMP3]])
30; OPT-NEXT:    [[TMP7]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[TMP6]], i64 [[PHI_BROKEN]])
31; OPT-NEXT:    [[TMP8:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP7]])
32; OPT-NEXT:    [[TMP9]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[TMP5]], i64 [[PHI_BROKEN2]])
33; OPT-NEXT:    br i1 [[TMP8]], label [[FLOW1]], label [[LOOP]]
34; OPT:       Flow1:
35; OPT-NEXT:    call void @llvm.amdgcn.end.cf.i64(i64 [[TMP7]])
36; OPT-NEXT:    [[TMP10:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP9]])
37; OPT-NEXT:    br i1 [[TMP10]], label [[IF:%.*]], label [[LOOP_OUTER]]
38; OPT:       IF:
39; OPT-NEXT:    call void @llvm.amdgcn.end.cf.i64(i64 [[TMP9]])
40; OPT-NEXT:    ret void
41; OPT:       ENDIF:
42; OPT-NEXT:    [[TMP51]] = icmp eq i32 [[TMP47]], [[CONT:%.*]]
43; OPT-NEXT:    [[TMP51_INV]] = xor i1 [[TMP51]], true
44; OPT-NEXT:    br label [[FLOW]]
45;
46; GCN-LABEL: multi_else_break:
47; GCN:       ; %bb.0: ; %main_body
48; GCN-NEXT:    s_mov_b64 s[0:1], 0
49; GCN-NEXT:    v_mov_b32_e32 v0, 0
50; GCN-NEXT:    s_branch .LBB0_2
51; GCN-NEXT:  .LBB0_1: ; %loop.exit.guard
52; GCN-NEXT:    ; in Loop: Header=BB0_2 Depth=1
53; GCN-NEXT:    s_or_b64 exec, exec, s[4:5]
54; GCN-NEXT:    s_and_b64 s[2:3], exec, s[2:3]
55; GCN-NEXT:    s_or_b64 s[0:1], s[2:3], s[0:1]
56; GCN-NEXT:    s_andn2_b64 exec, exec, s[0:1]
57; GCN-NEXT:    s_cbranch_execz .LBB0_6
58; GCN-NEXT:  .LBB0_2: ; %LOOP.outer
59; GCN-NEXT:    ; =>This Loop Header: Depth=1
60; GCN-NEXT:    ; Child Loop BB0_4 Depth 2
61; GCN-NEXT:    ; implicit-def: $sgpr6_sgpr7
62; GCN-NEXT:    ; implicit-def: $sgpr2_sgpr3
63; GCN-NEXT:    s_mov_b64 s[4:5], 0
64; GCN-NEXT:    s_branch .LBB0_4
65; GCN-NEXT:  .LBB0_3: ; %Flow
66; GCN-NEXT:    ; in Loop: Header=BB0_4 Depth=2
67; GCN-NEXT:    s_or_b64 exec, exec, s[8:9]
68; GCN-NEXT:    s_and_b64 s[8:9], exec, s[6:7]
69; GCN-NEXT:    s_or_b64 s[4:5], s[8:9], s[4:5]
70; GCN-NEXT:    s_andn2_b64 exec, exec, s[4:5]
71; GCN-NEXT:    s_cbranch_execz .LBB0_1
72; GCN-NEXT:  .LBB0_4: ; %LOOP
73; GCN-NEXT:    ; Parent Loop BB0_2 Depth=1
74; GCN-NEXT:    ; => This Inner Loop Header: Depth=2
75; GCN-NEXT:    v_cmp_lt_i32_e32 vcc, v0, v4
76; GCN-NEXT:    s_or_b64 s[2:3], s[2:3], exec
77; GCN-NEXT:    s_or_b64 s[6:7], s[6:7], exec
78; GCN-NEXT:    s_and_saveexec_b64 s[8:9], vcc
79; GCN-NEXT:    s_cbranch_execz .LBB0_3
80; GCN-NEXT:  ; %bb.5: ; %ENDIF
81; GCN-NEXT:    ; in Loop: Header=BB0_4 Depth=2
82; GCN-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
83; GCN-NEXT:    s_andn2_b64 s[2:3], s[2:3], exec
84; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, v5, v0
85; GCN-NEXT:    s_andn2_b64 s[6:7], s[6:7], exec
86; GCN-NEXT:    s_and_b64 s[10:11], vcc, exec
87; GCN-NEXT:    s_or_b64 s[6:7], s[6:7], s[10:11]
88; GCN-NEXT:    s_branch .LBB0_3
89; GCN-NEXT:  .LBB0_6: ; %IF
90; GCN-NEXT:    s_endpgm
91main_body:
92  br label %LOOP.outer
93
94LOOP.outer:                                       ; preds = %ENDIF, %main_body
95  %tmp43 = phi i32 [ 0, %main_body ], [ %tmp47, %ENDIF ]
96  br label %LOOP
97
98LOOP:                                             ; preds = %ENDIF, %LOOP.outer
99  %tmp45 = phi i32 [ %tmp43, %LOOP.outer ], [ %tmp47, %ENDIF ]
100  %tmp47 = add i32 %tmp45, 1
101  %tmp48 = icmp slt i32 %tmp45, %ub
102  br i1 %tmp48, label %ENDIF, label %IF
103
104IF:                                               ; preds = %LOOP
105  ret void
106
107ENDIF:                                            ; preds = %LOOP
108  %tmp51 = icmp eq i32 %tmp47, %cont
109  br i1 %tmp51, label %LOOP, label %LOOP.outer
110}
111
112define amdgpu_kernel void @multi_if_break_loop(i32 %arg) #0 {
113; OPT-LABEL: @multi_if_break_loop(
114; OPT-NEXT:  bb:
115; OPT-NEXT:    [[ID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
116; OPT-NEXT:    [[TMP:%.*]] = sub i32 [[ID]], [[ARG:%.*]]
117; OPT-NEXT:    br label [[BB1:%.*]]
118; OPT:       bb1:
119; OPT-NEXT:    [[PHI_BROKEN:%.*]] = phi i64 [ [[TMP4:%.*]], [[FLOW4:%.*]] ], [ 0, [[BB:%.*]] ]
120; OPT-NEXT:    [[LSR_IV:%.*]] = phi i32 [ undef, [[BB]] ], [ [[LSR_IV_NEXT:%.*]], [[FLOW4]] ]
121; OPT-NEXT:    [[LSR_IV_NEXT]] = add i32 [[LSR_IV]], 1
122; OPT-NEXT:    [[CMP0:%.*]] = icmp slt i32 [[LSR_IV_NEXT]], 0
123; OPT-NEXT:    [[LOAD0:%.*]] = load volatile i32, i32 addrspace(1)* undef, align 4
124; OPT-NEXT:    br label [[NODEBLOCK:%.*]]
125; OPT:       NodeBlock:
126; OPT-NEXT:    [[PIVOT:%.*]] = icmp slt i32 [[LOAD0]], 1
127; OPT-NEXT:    [[PIVOT_INV:%.*]] = xor i1 [[PIVOT]], true
128; OPT-NEXT:    br i1 [[PIVOT_INV]], label [[LEAFBLOCK1:%.*]], label [[FLOW:%.*]]
129; OPT:       LeafBlock1:
130; OPT-NEXT:    [[SWITCHLEAF2:%.*]] = icmp eq i32 [[LOAD0]], 1
131; OPT-NEXT:    br i1 [[SWITCHLEAF2]], label [[CASE1:%.*]], label [[FLOW3:%.*]]
132; OPT:       Flow3:
133; OPT-NEXT:    [[TMP0:%.*]] = phi i1 [ [[CMP2_INV:%.*]], [[CASE1]] ], [ true, [[LEAFBLOCK1]] ]
134; OPT-NEXT:    [[TMP1:%.*]] = phi i1 [ false, [[CASE1]] ], [ true, [[LEAFBLOCK1]] ]
135; OPT-NEXT:    br label [[FLOW]]
136; OPT:       LeafBlock:
137; OPT-NEXT:    [[SWITCHLEAF:%.*]] = icmp eq i32 [[LOAD0]], 0
138; OPT-NEXT:    br i1 [[SWITCHLEAF]], label [[CASE0:%.*]], label [[FLOW5:%.*]]
139; OPT:       Flow4:
140; OPT-NEXT:    [[TMP2:%.*]] = phi i1 [ [[TMP9:%.*]], [[FLOW5]] ], [ [[TMP6:%.*]], [[FLOW]] ]
141; OPT-NEXT:    [[TMP3:%.*]] = phi i1 [ [[TMP10:%.*]], [[FLOW5]] ], [ [[TMP7:%.*]], [[FLOW]] ]
142; OPT-NEXT:    [[TMP4]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[TMP2]], i64 [[PHI_BROKEN]])
143; OPT-NEXT:    [[TMP5:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP4]])
144; OPT-NEXT:    br i1 [[TMP5]], label [[FLOW6:%.*]], label [[BB1]]
145; OPT:       case0:
146; OPT-NEXT:    [[LOAD1:%.*]] = load volatile i32, i32 addrspace(1)* undef, align 4
147; OPT-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[TMP]], [[LOAD1]]
148; OPT-NEXT:    [[CMP1_INV:%.*]] = xor i1 [[CMP1]], true
149; OPT-NEXT:    br label [[FLOW5]]
150; OPT:       Flow:
151; OPT-NEXT:    [[TMP6]] = phi i1 [ [[TMP0]], [[FLOW3]] ], [ true, [[NODEBLOCK]] ]
152; OPT-NEXT:    [[TMP7]] = phi i1 [ [[TMP1]], [[FLOW3]] ], [ false, [[NODEBLOCK]] ]
153; OPT-NEXT:    [[TMP8:%.*]] = phi i1 [ false, [[FLOW3]] ], [ true, [[NODEBLOCK]] ]
154; OPT-NEXT:    br i1 [[TMP8]], label [[LEAFBLOCK:%.*]], label [[FLOW4]]
155; OPT:       case1:
156; OPT-NEXT:    [[LOAD2:%.*]] = load volatile i32, i32 addrspace(1)* undef, align 4
157; OPT-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[TMP]], [[LOAD2]]
158; OPT-NEXT:    [[CMP2_INV]] = xor i1 [[CMP2]], true
159; OPT-NEXT:    br label [[FLOW3]]
160; OPT:       Flow5:
161; OPT-NEXT:    [[TMP9]] = phi i1 [ [[CMP1_INV]], [[CASE0]] ], [ [[TMP6]], [[LEAFBLOCK]] ]
162; OPT-NEXT:    [[TMP10]] = phi i1 [ false, [[CASE0]] ], [ true, [[LEAFBLOCK]] ]
163; OPT-NEXT:    br label [[FLOW4]]
164; OPT:       Flow6:
165; OPT-NEXT:    call void @llvm.amdgcn.end.cf.i64(i64 [[TMP4]])
166; OPT-NEXT:    [[TMP11:%.*]] = call { i1, i64 } @llvm.amdgcn.if.i64(i1 [[TMP3]])
167; OPT-NEXT:    [[TMP12:%.*]] = extractvalue { i1, i64 } [[TMP11]], 0
168; OPT-NEXT:    [[TMP13:%.*]] = extractvalue { i1, i64 } [[TMP11]], 1
169; OPT-NEXT:    br i1 [[TMP12]], label [[NEWDEFAULT:%.*]], label [[BB9:%.*]]
170; OPT:       NewDefault:
171; OPT-NEXT:    br label [[BB9]]
172; OPT:       bb9:
173; OPT-NEXT:    call void @llvm.amdgcn.end.cf.i64(i64 [[TMP13]])
174; OPT-NEXT:    ret void
175;
176; GCN-LABEL: multi_if_break_loop:
177; GCN:       ; %bb.0: ; %bb
178; GCN-NEXT:    s_load_dword s2, s[0:1], 0x9
179; GCN-NEXT:    s_mov_b64 s[0:1], 0
180; GCN-NEXT:    s_mov_b32 s3, 0xf000
181; GCN-NEXT:    s_waitcnt lgkmcnt(0)
182; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s2, v0
183; GCN-NEXT:    s_mov_b32 s2, -1
184; GCN-NEXT:    ; implicit-def: $sgpr4_sgpr5
185; GCN-NEXT:    s_branch .LBB1_2
186; GCN-NEXT:  .LBB1_1: ; %Flow4
187; GCN-NEXT:    ; in Loop: Header=BB1_2 Depth=1
188; GCN-NEXT:    s_and_b64 s[6:7], exec, s[6:7]
189; GCN-NEXT:    s_or_b64 s[0:1], s[6:7], s[0:1]
190; GCN-NEXT:    s_andn2_b64 s[4:5], s[4:5], exec
191; GCN-NEXT:    s_and_b64 s[6:7], s[8:9], exec
192; GCN-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
193; GCN-NEXT:    s_andn2_b64 exec, exec, s[0:1]
194; GCN-NEXT:    s_cbranch_execz .LBB1_9
195; GCN-NEXT:  .LBB1_2: ; %bb1
196; GCN-NEXT:    ; =>This Inner Loop Header: Depth=1
197; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], 0 glc
198; GCN-NEXT:    s_waitcnt vmcnt(0)
199; GCN-NEXT:    v_cmp_gt_i32_e32 vcc, 1, v1
200; GCN-NEXT:    s_mov_b64 s[6:7], -1
201; GCN-NEXT:    s_and_b64 vcc, exec, vcc
202; GCN-NEXT:    ; implicit-def: $sgpr8_sgpr9
203; GCN-NEXT:    s_mov_b64 s[10:11], -1
204; GCN-NEXT:    s_cbranch_vccnz .LBB1_6
205; GCN-NEXT:  ; %bb.3: ; %LeafBlock1
206; GCN-NEXT:    ; in Loop: Header=BB1_2 Depth=1
207; GCN-NEXT:    s_mov_b64 s[6:7], -1
208; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
209; GCN-NEXT:    s_and_b64 vcc, exec, vcc
210; GCN-NEXT:    s_mov_b64 s[8:9], -1
211; GCN-NEXT:    s_cbranch_vccz .LBB1_5
212; GCN-NEXT:  ; %bb.4: ; %case1
213; GCN-NEXT:    ; in Loop: Header=BB1_2 Depth=1
214; GCN-NEXT:    buffer_load_dword v2, off, s[0:3], 0 glc
215; GCN-NEXT:    s_waitcnt vmcnt(0)
216; GCN-NEXT:    v_cmp_ge_i32_e32 vcc, v0, v2
217; GCN-NEXT:    s_mov_b64 s[8:9], 0
218; GCN-NEXT:    s_orn2_b64 s[6:7], vcc, exec
219; GCN-NEXT:  .LBB1_5: ; %Flow3
220; GCN-NEXT:    ; in Loop: Header=BB1_2 Depth=1
221; GCN-NEXT:    s_mov_b64 s[10:11], 0
222; GCN-NEXT:  .LBB1_6: ; %Flow
223; GCN-NEXT:    ; in Loop: Header=BB1_2 Depth=1
224; GCN-NEXT:    s_and_b64 vcc, exec, s[10:11]
225; GCN-NEXT:    s_cbranch_vccz .LBB1_1
226; GCN-NEXT:  ; %bb.7: ; %LeafBlock
227; GCN-NEXT:    ; in Loop: Header=BB1_2 Depth=1
228; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
229; GCN-NEXT:    s_and_b64 vcc, exec, vcc
230; GCN-NEXT:    s_mov_b64 s[8:9], -1
231; GCN-NEXT:    s_cbranch_vccz .LBB1_1
232; GCN-NEXT:  ; %bb.8: ; %case0
233; GCN-NEXT:    ; in Loop: Header=BB1_2 Depth=1
234; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], 0 glc
235; GCN-NEXT:    s_waitcnt vmcnt(0)
236; GCN-NEXT:    s_mov_b64 s[8:9], 0
237; GCN-NEXT:    v_cmp_ge_i32_e32 vcc, v0, v1
238; GCN-NEXT:    s_andn2_b64 s[6:7], s[6:7], exec
239; GCN-NEXT:    s_and_b64 s[10:11], vcc, exec
240; GCN-NEXT:    s_or_b64 s[6:7], s[6:7], s[10:11]
241; GCN-NEXT:    s_branch .LBB1_1
242; GCN-NEXT:  .LBB1_9: ; %loop.exit.guard
243; GCN-NEXT:    s_or_b64 exec, exec, s[0:1]
244; GCN-NEXT:    s_and_saveexec_b64 s[0:1], s[4:5]
245; GCN-NEXT:    s_xor_b64 s[0:1], exec, s[0:1]
246; GCN-NEXT:    s_endpgm
247bb:
248  %id = call i32 @llvm.amdgcn.workitem.id.x()
249  %tmp = sub i32 %id, %arg
250  br label %bb1
251
252bb1:
253  %lsr.iv = phi i32 [ undef, %bb ], [ %lsr.iv.next, %case0 ], [ %lsr.iv.next, %case1 ]
254  %lsr.iv.next = add i32 %lsr.iv, 1
255  %cmp0 = icmp slt i32 %lsr.iv.next, 0
256  %load0 = load volatile i32, i32 addrspace(1)* undef, align 4
257  switch i32 %load0, label %bb9 [
258  i32 0, label %case0
259  i32 1, label %case1
260  ]
261
262case0:
263  %load1 = load volatile i32, i32 addrspace(1)* undef, align 4
264  %cmp1 = icmp slt i32 %tmp, %load1
265  br i1 %cmp1, label %bb1, label %bb9
266
267case1:
268  %load2 = load volatile i32, i32 addrspace(1)* undef, align 4
269  %cmp2 = icmp slt i32 %tmp, %load2
270  br i1 %cmp2, label %bb1, label %bb9
271
272bb9:
273  ret void
274}
275
276declare i32 @llvm.amdgcn.workitem.id.x() #1
277
278attributes #0 = { nounwind }
279attributes #1 = { nounwind readnone }
280