1; RUN: opt -S -mtriple=amdgcn-- -structurizecfg -si-annotate-control-flow < %s | FileCheck -check-prefix=OPT %s
2; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
3
4; OPT-LABEL: {{^}}define amdgpu_vs void @multi_else_break(
5; OPT: main_body:
6; OPT: LOOP.outer:
7; OPT: LOOP:
8; OPT:     [[if:%[0-9]+]] = call { i1, i64 } @llvm.amdgcn.if(
9; OPT:     [[if_exec:%[0-9]+]] = extractvalue { i1, i64 } [[if]], 1
10;
11; OPT: Flow:
12;
13; Ensure two else.break calls, for both the inner and outer loops
14
15; OPT:        call i64 @llvm.amdgcn.else.break(i64 [[if_exec]],
16; OPT-NEXT:   call i64 @llvm.amdgcn.else.break(i64 [[if_exec]],
17; OPT-NEXT:   call void @llvm.amdgcn.end.cf
18;
19; OPT: Flow1:
20
21; GCN-LABEL: {{^}}multi_else_break:
22
23; GCN: [[OUTER_LOOP:BB[0-9]+_[0-9]+]]: ; %LOOP.outer{{$}}
24
25; GCN: [[INNER_LOOP:BB[0-9]+_[0-9]+]]: ; %LOOP{{$}}
26; GCN: s_and_saveexec_b64 [[SAVE_BREAK:s\[[0-9]+:[0-9]+\]]], vcc
27
28; GCN: BB{{[0-9]+}}_{{[0-9]+}}: ; %Flow{{$}}
29; GCN-NEXT: ; in Loop: Header=[[INNER_LOOP]] Depth=2
30
31; Ensure extra or eliminated
32; GCN-NEXT: s_or_b64 exec, exec, [[SAVE_BREAK]]
33; GCN-NEXT: s_or_b64 [[OR_BREAK:s\[[0-9]+:[0-9]+\]]], [[SAVE_BREAK]], s{{\[[0-9]+:[0-9]+\]}}
34; GCN-NEXT: s_andn2_b64 exec, exec, [[OR_BREAK]]
35; GCN-NEXT: s_cbranch_execnz [[INNER_LOOP]]
36
37; GCN: ; BB#{{[0-9]+}}: ; %Flow1{{$}}
38; GCN-NEXT: ; in Loop: Header=[[OUTER_LOOP]] Depth=1
39
40; Ensure copy is eliminated
41; GCN-NEXT: s_or_b64 exec, exec, [[OR_BREAK]]
42; GCN-NEXT: s_or_b64 [[OUTER_OR_BREAK:s\[[0-9]+:[0-9]+\]]], [[SAVE_BREAK]], s{{\[[0-9]+:[0-9]+\]}}
43; GCN-NEXT: s_andn2_b64 exec, exec, [[OUTER_OR_BREAK]]
44; GCN-NEXT: s_cbranch_execnz [[OUTER_LOOP]]
45define amdgpu_vs void @multi_else_break(<4 x float> %vec, i32 %ub, i32 %cont) {
46main_body:
47  br label %LOOP.outer
48
49LOOP.outer:                                       ; preds = %ENDIF, %main_body
50  %tmp43 = phi i32 [ 0, %main_body ], [ %tmp47, %ENDIF ]
51  br label %LOOP
52
53LOOP:                                             ; preds = %ENDIF, %LOOP.outer
54  %tmp45 = phi i32 [ %tmp43, %LOOP.outer ], [ %tmp47, %ENDIF ]
55  %tmp47 = add i32 %tmp45, 1
56  %tmp48 = icmp slt i32 %tmp45, %ub
57  br i1 %tmp48, label %ENDIF, label %IF
58
59IF:                                               ; preds = %LOOP
60  ret void
61
62ENDIF:                                            ; preds = %LOOP
63  %tmp51 = icmp eq i32 %tmp47, %cont
64  br i1 %tmp51, label %LOOP, label %LOOP.outer
65}
66
67; OPT-LABEL: define void @multi_if_break_loop(
68; OPT: llvm.amdgcn.break
69; OPT: llvm.amdgcn.loop
70; OPT: llvm.amdgcn.if.break
71; OPT: llvm.amdgcn.if.break
72; OPT: llvm.amdgcn.end.cf
73
74; GCN-LABEL: {{^}}multi_if_break_loop:
75; GCN: s_mov_b64 [[BREAK_REG:s\[[0-9]+:[0-9]+\]]], 0{{$}}
76
77; GCN: [[LOOP:BB[0-9]+_[0-9]+]]: ; %bb1{{$}}
78
79; Uses a copy intsead of an or
80; GCN: s_mov_b64 [[COPY:s\[[0-9]+:[0-9]+\]]], [[BREAK_REG]]
81; GCN: s_or_b64 [[BREAK_REG]], exec, [[COPY]]
82define void @multi_if_break_loop(i32 %arg) #0 {
83bb:
84  %id = call i32 @llvm.amdgcn.workitem.id.x()
85  %tmp = sub i32 %id, %arg
86  br label %bb1
87
88bb1:
89  %lsr.iv = phi i32 [ undef, %bb ], [ %lsr.iv.next, %case0 ], [ %lsr.iv.next, %case1 ]
90  %lsr.iv.next = add i32 %lsr.iv, 1
91  %cmp0 = icmp slt i32 %lsr.iv.next, 0
92  %load0 = load volatile i32, i32 addrspace(1)* undef, align 4
93  switch i32 %load0, label %bb9 [
94    i32 0, label %case0
95    i32 1, label %case1
96  ]
97
98case0:
99  %load1 = load volatile i32, i32 addrspace(1)* undef, align 4
100  %cmp1 = icmp slt i32 %tmp, %load1
101  br i1 %cmp1, label %bb1, label %bb9
102
103case1:
104  %load2 = load volatile i32, i32 addrspace(1)* undef, align 4
105  %cmp2 = icmp slt i32 %tmp, %load2
106  br i1 %cmp2, label %bb1, label %bb9
107
108bb9:
109  ret void
110}
111
112declare i32 @llvm.amdgcn.workitem.id.x() #1
113
114attributes #0 = { nounwind }
115attributes #1 = { nounwind readnone }
116