1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s  -loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -prefer-inloop-reductions -dce -instcombine -S | FileCheck %s
3
4target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
5
6define i32 @reduction_sum_single(i32* noalias nocapture %A) {
7; CHECK-LABEL: @reduction_sum_single(
8; CHECK-NEXT:  entry:
9; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
10; CHECK:       vector.ph:
11; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
12; CHECK:       vector.body:
13; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
14; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
15; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDEX]]
16; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>*
17; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
18; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD]])
19; CHECK-NEXT:    [[TMP3]] = add i32 [[TMP2]], [[VEC_PHI]]
20; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
21; CHECK-NEXT:    [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
22; CHECK-NEXT:    br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
23; CHECK:       middle.block:
24; CHECK-NEXT:    br i1 true, label [[DOT_CRIT_EDGE:%.*]], label [[SCALAR_PH]]
25; CHECK:       scalar.ph:
26; CHECK-NEXT:    br label [[DOTLR_PH:%.*]]
27; CHECK:       .lr.ph:
28; CHECK-NEXT:    br i1 undef, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]], !llvm.loop [[LOOP2:![0-9]+]]
29; CHECK:       ._crit_edge:
30; CHECK-NEXT:    [[SUM_0_LCSSA:%.*]] = phi i32 [ undef, [[DOTLR_PH]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ]
31; CHECK-NEXT:    ret i32 [[SUM_0_LCSSA]]
32;
33entry:
34  br label %.lr.ph
35
36.lr.ph:                                           ; preds = %entry, %.lr.ph
37  %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %entry ]
38  %sum.02 = phi i32 [ %l7, %.lr.ph ], [ 0, %entry ]
39  %l2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
40  %l3 = load i32, i32* %l2, align 4
41  %l7 = add i32 %sum.02, %l3
42  %indvars.iv.next = add i64 %indvars.iv, 1
43  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
44  %exitcond = icmp eq i32 %lftr.wideiv, 256
45  br i1 %exitcond, label %._crit_edge, label %.lr.ph
46
47._crit_edge:                                      ; preds = %.lr.ph
48  %sum.0.lcssa = phi i32 [ %l7, %.lr.ph ]
49  ret i32 %sum.0.lcssa
50}
51
52define i32 @reduction_sum(i32* noalias nocapture %A, i32* noalias nocapture %B) {
53; CHECK-LABEL: @reduction_sum(
54; CHECK-NEXT:  entry:
55; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
56; CHECK:       vector.ph:
57; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
58; CHECK:       vector.body:
59; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
60; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
61; CHECK-NEXT:    [[VEC_IND1:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT2:%.*]], [[VECTOR_BODY]] ]
62; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDEX]]
63; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>*
64; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
65; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDEX]]
66; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
67; CHECK-NEXT:    [[WIDE_LOAD3:%.*]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
68; CHECK-NEXT:    [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[VEC_IND1]])
69; CHECK-NEXT:    [[TMP5:%.*]] = add i32 [[TMP4]], [[VEC_PHI]]
70; CHECK-NEXT:    [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD]])
71; CHECK-NEXT:    [[TMP7:%.*]] = add i32 [[TMP6]], [[TMP5]]
72; CHECK-NEXT:    [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD3]])
73; CHECK-NEXT:    [[TMP9]] = add i32 [[TMP8]], [[TMP7]]
74; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
75; CHECK-NEXT:    [[VEC_IND_NEXT2]] = add <4 x i32> [[VEC_IND1]], <i32 4, i32 4, i32 4, i32 4>
76; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
77; CHECK-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
78; CHECK:       middle.block:
79; CHECK-NEXT:    br i1 true, label [[DOT_CRIT_EDGE:%.*]], label [[SCALAR_PH]]
80; CHECK:       scalar.ph:
81; CHECK-NEXT:    br label [[DOTLR_PH:%.*]]
82; CHECK:       .lr.ph:
83; CHECK-NEXT:    br i1 undef, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]], !llvm.loop [[LOOP5:![0-9]+]]
84; CHECK:       ._crit_edge:
85; CHECK-NEXT:    [[SUM_0_LCSSA:%.*]] = phi i32 [ undef, [[DOTLR_PH]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ]
86; CHECK-NEXT:    ret i32 [[SUM_0_LCSSA]]
87;
88entry:
89  br label %.lr.ph
90
91.lr.ph:                                           ; preds = %entry, %.lr.ph
92  %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %entry ]
93  %sum.02 = phi i32 [ %l9, %.lr.ph ], [ 0, %entry ]
94  %l2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
95  %l3 = load i32, i32* %l2, align 4
96  %l4 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
97  %l5 = load i32, i32* %l4, align 4
98  %l6 = trunc i64 %indvars.iv to i32
99  %l7 = add i32 %sum.02, %l6
100  %l8 = add i32 %l7, %l3
101  %l9 = add i32 %l8, %l5
102  %indvars.iv.next = add i64 %indvars.iv, 1
103  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
104  %exitcond = icmp eq i32 %lftr.wideiv, 256
105  br i1 %exitcond, label %._crit_edge, label %.lr.ph
106
107._crit_edge:                                      ; preds = %.lr.ph
108  %sum.0.lcssa = phi i32 [ %l9, %.lr.ph ]
109  ret i32 %sum.0.lcssa
110}
111
112define i32 @reduction_sum_const(i32* noalias nocapture %A) {
113; CHECK-LABEL: @reduction_sum_const(
114; CHECK-NEXT:  entry:
115; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
116; CHECK:       vector.ph:
117; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
118; CHECK:       vector.body:
119; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
120; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
121; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDEX]]
122; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>*
123; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
124; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD]])
125; CHECK-NEXT:    [[TMP3:%.*]] = add i32 [[TMP2]], [[VEC_PHI]]
126; CHECK-NEXT:    [[TMP4]] = add i32 [[TMP3]], 12
127; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
128; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
129; CHECK-NEXT:    br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
130; CHECK:       middle.block:
131; CHECK-NEXT:    br i1 true, label [[DOT_CRIT_EDGE:%.*]], label [[SCALAR_PH]]
132; CHECK:       scalar.ph:
133; CHECK-NEXT:    br label [[DOTLR_PH:%.*]]
134; CHECK:       .lr.ph:
135; CHECK-NEXT:    br i1 undef, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]], !llvm.loop [[LOOP7:![0-9]+]]
136; CHECK:       ._crit_edge:
137; CHECK-NEXT:    [[SUM_0_LCSSA:%.*]] = phi i32 [ undef, [[DOTLR_PH]] ], [ [[TMP4]], [[MIDDLE_BLOCK]] ]
138; CHECK-NEXT:    ret i32 [[SUM_0_LCSSA]]
139;
140entry:
141  br label %.lr.ph
142
143.lr.ph:                                           ; preds = %entry, %.lr.ph
144  %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %entry ]
145  %sum.02 = phi i32 [ %l9, %.lr.ph ], [ 0, %entry ]
146  %l2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
147  %l3 = load i32, i32* %l2, align 4
148  %l7 = add i32 %sum.02, %l3
149  %l9 = add i32 %l7, 3
150  %indvars.iv.next = add i64 %indvars.iv, 1
151  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
152  %exitcond = icmp eq i32 %lftr.wideiv, 256
153  br i1 %exitcond, label %._crit_edge, label %.lr.ph
154
155._crit_edge:                                      ; preds = %.lr.ph
156  %sum.0.lcssa = phi i32 [ %l9, %.lr.ph ]
157  ret i32 %sum.0.lcssa
158}
159
160define i32 @reduction_prod(i32* noalias nocapture %A, i32* noalias nocapture %B) {
161; CHECK-LABEL: @reduction_prod(
162; CHECK-NEXT:  entry:
163; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
164; CHECK:       vector.ph:
165; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
166; CHECK:       vector.body:
167; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
168; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi i32 [ 1, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
169; CHECK-NEXT:    [[VEC_IND1:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT2:%.*]], [[VECTOR_BODY]] ]
170; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDEX]]
171; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>*
172; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
173; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDEX]]
174; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
175; CHECK-NEXT:    [[WIDE_LOAD3:%.*]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
176; CHECK-NEXT:    [[TMP4:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[VEC_IND1]])
177; CHECK-NEXT:    [[TMP5:%.*]] = mul i32 [[TMP4]], [[VEC_PHI]]
178; CHECK-NEXT:    [[TMP6:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD]])
179; CHECK-NEXT:    [[TMP7:%.*]] = mul i32 [[TMP6]], [[TMP5]]
180; CHECK-NEXT:    [[TMP8:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD3]])
181; CHECK-NEXT:    [[TMP9]] = mul i32 [[TMP8]], [[TMP7]]
182; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
183; CHECK-NEXT:    [[VEC_IND_NEXT2]] = add <4 x i32> [[VEC_IND1]], <i32 4, i32 4, i32 4, i32 4>
184; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
185; CHECK-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
186; CHECK:       middle.block:
187; CHECK-NEXT:    br i1 true, label [[DOT_CRIT_EDGE:%.*]], label [[SCALAR_PH]]
188; CHECK:       scalar.ph:
189; CHECK-NEXT:    br label [[DOTLR_PH:%.*]]
190; CHECK:       .lr.ph:
191; CHECK-NEXT:    br i1 undef, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]], !llvm.loop [[LOOP9:![0-9]+]]
192; CHECK:       ._crit_edge:
193; CHECK-NEXT:    [[PROD_0_LCSSA:%.*]] = phi i32 [ undef, [[DOTLR_PH]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ]
194; CHECK-NEXT:    ret i32 [[PROD_0_LCSSA]]
195;
196entry:
197  br label %.lr.ph
198
199.lr.ph:                                           ; preds = %entry, %.lr.ph
200  %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %entry ]
201  %prod.02 = phi i32 [ %l9, %.lr.ph ], [ 1, %entry ]
202  %l2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
203  %l3 = load i32, i32* %l2, align 4
204  %l4 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
205  %l5 = load i32, i32* %l4, align 4
206  %l6 = trunc i64 %indvars.iv to i32
207  %l7 = mul i32 %prod.02, %l6
208  %l8 = mul i32 %l7, %l3
209  %l9 = mul i32 %l8, %l5
210  %indvars.iv.next = add i64 %indvars.iv, 1
211  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
212  %exitcond = icmp eq i32 %lftr.wideiv, 256
213  br i1 %exitcond, label %._crit_edge, label %.lr.ph
214
215._crit_edge:                                      ; preds = %.lr.ph
216  %prod.0.lcssa = phi i32 [ %l9, %.lr.ph ]
217  ret i32 %prod.0.lcssa
218}
219
220define i32 @reduction_mix(i32* noalias nocapture %A, i32* noalias nocapture %B) {
221; CHECK-LABEL: @reduction_mix(
222; CHECK-NEXT:  entry:
223; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
224; CHECK:       vector.ph:
225; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
226; CHECK:       vector.body:
227; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
228; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP8:%.*]], [[VECTOR_BODY]] ]
229; CHECK-NEXT:    [[VEC_IND1:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT2:%.*]], [[VECTOR_BODY]] ]
230; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDEX]]
231; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>*
232; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
233; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDEX]]
234; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
235; CHECK-NEXT:    [[WIDE_LOAD3:%.*]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
236; CHECK-NEXT:    [[TMP4:%.*]] = mul nsw <4 x i32> [[WIDE_LOAD3]], [[WIDE_LOAD]]
237; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[VEC_IND1]])
238; CHECK-NEXT:    [[TMP6:%.*]] = add i32 [[TMP5]], [[VEC_PHI]]
239; CHECK-NEXT:    [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]])
240; CHECK-NEXT:    [[TMP8]] = add i32 [[TMP7]], [[TMP6]]
241; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
242; CHECK-NEXT:    [[VEC_IND_NEXT2]] = add <4 x i32> [[VEC_IND1]], <i32 4, i32 4, i32 4, i32 4>
243; CHECK-NEXT:    [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
244; CHECK-NEXT:    br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
245; CHECK:       middle.block:
246; CHECK-NEXT:    br i1 true, label [[DOT_CRIT_EDGE:%.*]], label [[SCALAR_PH]]
247; CHECK:       scalar.ph:
248; CHECK-NEXT:    br label [[DOTLR_PH:%.*]]
249; CHECK:       .lr.ph:
250; CHECK-NEXT:    br i1 undef, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]], !llvm.loop [[LOOP11:![0-9]+]]
251; CHECK:       ._crit_edge:
252; CHECK-NEXT:    [[SUM_0_LCSSA:%.*]] = phi i32 [ undef, [[DOTLR_PH]] ], [ [[TMP8]], [[MIDDLE_BLOCK]] ]
253; CHECK-NEXT:    ret i32 [[SUM_0_LCSSA]]
254;
255entry:
256  br label %.lr.ph
257
258.lr.ph:                                           ; preds = %entry, %.lr.ph
259  %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %entry ]
260  %sum.02 = phi i32 [ %l9, %.lr.ph ], [ 0, %entry ]
261  %l2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
262  %l3 = load i32, i32* %l2, align 4
263  %l4 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
264  %l5 = load i32, i32* %l4, align 4
265  %l6 = mul nsw i32 %l5, %l3
266  %l7 = trunc i64 %indvars.iv to i32
267  %l8 = add i32 %sum.02, %l7
268  %l9 = add i32 %l8, %l6
269  %indvars.iv.next = add i64 %indvars.iv, 1
270  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
271  %exitcond = icmp eq i32 %lftr.wideiv, 256
272  br i1 %exitcond, label %._crit_edge, label %.lr.ph
273
274._crit_edge:                                      ; preds = %.lr.ph
275  %sum.0.lcssa = phi i32 [ %l9, %.lr.ph ]
276  ret i32 %sum.0.lcssa
277}
278
279define i32 @reduction_mul(i32* noalias nocapture %A, i32* noalias nocapture %B) {
280; CHECK-LABEL: @reduction_mul(
281; CHECK-NEXT:  entry:
282; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
283; CHECK:       vector.ph:
284; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
285; CHECK:       vector.body:
286; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
287; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi i32 [ 19, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
288; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDEX]]
289; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>*
290; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
291; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDEX]]
292; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
293; CHECK-NEXT:    [[WIDE_LOAD1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
294; CHECK-NEXT:    [[TMP4:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD]])
295; CHECK-NEXT:    [[TMP5:%.*]] = mul i32 [[TMP4]], [[VEC_PHI]]
296; CHECK-NEXT:    [[TMP6:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD1]])
297; CHECK-NEXT:    [[TMP7]] = mul i32 [[TMP6]], [[TMP5]]
298; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
299; CHECK-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
300; CHECK-NEXT:    br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
301; CHECK:       middle.block:
302; CHECK-NEXT:    br i1 true, label [[DOT_CRIT_EDGE:%.*]], label [[SCALAR_PH]]
303; CHECK:       scalar.ph:
304; CHECK-NEXT:    br label [[DOTLR_PH:%.*]]
305; CHECK:       .lr.ph:
306; CHECK-NEXT:    br i1 undef, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]], !llvm.loop [[LOOP13:![0-9]+]]
307; CHECK:       ._crit_edge:
308; CHECK-NEXT:    [[SUM_0_LCSSA:%.*]] = phi i32 [ undef, [[DOTLR_PH]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
309; CHECK-NEXT:    ret i32 [[SUM_0_LCSSA]]
310;
311entry:
312  br label %.lr.ph
313
314.lr.ph:                                           ; preds = %entry, %.lr.ph
315  %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %entry ]
316  %sum.02 = phi i32 [ %l7, %.lr.ph ], [ 19, %entry ]
317  %l2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
318  %l3 = load i32, i32* %l2, align 4
319  %l4 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
320  %l5 = load i32, i32* %l4, align 4
321  %l6 = mul i32 %sum.02, %l3
322  %l7 = mul i32 %l6, %l5
323  %indvars.iv.next = add i64 %indvars.iv, 1
324  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
325  %exitcond = icmp eq i32 %lftr.wideiv, 256
326  br i1 %exitcond, label %._crit_edge, label %.lr.ph
327
328._crit_edge:                                      ; preds = %.lr.ph
329  %sum.0.lcssa = phi i32 [ %l7, %.lr.ph ]
330  ret i32 %sum.0.lcssa
331}
332
333define i32 @start_at_non_zero(i32* nocapture %in, i32* nocapture %coeff, i32* nocapture %out) {
334; CHECK-LABEL: @start_at_non_zero(
335; CHECK-NEXT:  entry:
336; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
337; CHECK:       vector.ph:
338; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
339; CHECK:       vector.body:
340; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
341; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi i32 [ 120, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ]
342; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[IN:%.*]], i64 [[INDEX]]
343; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>*
344; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
345; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[COEFF:%.*]], i64 [[INDEX]]
346; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
347; CHECK-NEXT:    [[WIDE_LOAD1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
348; CHECK-NEXT:    [[TMP4:%.*]] = mul nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
349; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]])
350; CHECK-NEXT:    [[TMP6]] = add i32 [[TMP5]], [[VEC_PHI]]
351; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
352; CHECK-NEXT:    [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
353; CHECK-NEXT:    br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
354; CHECK:       middle.block:
355; CHECK-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
356; CHECK:       scalar.ph:
357; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
358; CHECK:       for.body:
359; CHECK-NEXT:    br i1 undef, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
360; CHECK:       for.end:
361; CHECK-NEXT:    [[SUM_0_LCSSA:%.*]] = phi i32 [ undef, [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ]
362; CHECK-NEXT:    ret i32 [[SUM_0_LCSSA]]
363;
364entry:
365  br label %for.body
366
367for.body:                                         ; preds = %entry, %for.body
368  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
369  %sum.09 = phi i32 [ %add, %for.body ], [ 120, %entry ]
370  %arrayidx = getelementptr inbounds i32, i32* %in, i64 %indvars.iv
371  %l0 = load i32, i32* %arrayidx, align 4
372  %arrayidx2 = getelementptr inbounds i32, i32* %coeff, i64 %indvars.iv
373  %l1 = load i32, i32* %arrayidx2, align 4
374  %mul = mul nsw i32 %l1, %l0
375  %add = add nsw i32 %mul, %sum.09
376  %indvars.iv.next = add i64 %indvars.iv, 1
377  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
378  %exitcond = icmp eq i32 %lftr.wideiv, 256
379  br i1 %exitcond, label %for.end, label %for.body
380
381for.end:                                          ; preds = %for.body, %entry
382  %sum.0.lcssa = phi i32 [ %add, %for.body ]
383  ret i32 %sum.0.lcssa
384}
385
386define i32 @reduction_and(i32* nocapture %A, i32* nocapture %B) {
387; CHECK-LABEL: @reduction_and(
388; CHECK-NEXT:  entry:
389; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
390; CHECK:       vector.ph:
391; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
392; CHECK:       vector.body:
393; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
394; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi i32 [ -1, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
395; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDEX]]
396; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>*
397; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
398; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDEX]]
399; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
400; CHECK-NEXT:    [[WIDE_LOAD1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
401; CHECK-NEXT:    [[TMP4:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[WIDE_LOAD]])
402; CHECK-NEXT:    [[TMP5:%.*]] = and i32 [[TMP4]], [[VEC_PHI]]
403; CHECK-NEXT:    [[TMP6:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[WIDE_LOAD1]])
404; CHECK-NEXT:    [[TMP7]] = and i32 [[TMP6]], [[TMP5]]
405; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
406; CHECK-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
407; CHECK-NEXT:    br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
408; CHECK:       middle.block:
409; CHECK-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
410; CHECK:       scalar.ph:
411; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
412; CHECK:       for.body:
413; CHECK-NEXT:    br i1 undef, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
414; CHECK:       for.end:
415; CHECK-NEXT:    [[RESULT_0_LCSSA:%.*]] = phi i32 [ undef, [[FOR_BODY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
416; CHECK-NEXT:    ret i32 [[RESULT_0_LCSSA]]
417;
418entry:
419  br label %for.body
420
421for.body:                                         ; preds = %entry, %for.body
422  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
423  %result.08 = phi i32 [ %and, %for.body ], [ -1, %entry ]
424  %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
425  %l0 = load i32, i32* %arrayidx, align 4
426  %arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
427  %l1 = load i32, i32* %arrayidx2, align 4
428  %add = and i32 %result.08, %l0
429  %and = and i32 %add, %l1
430  %indvars.iv.next = add i64 %indvars.iv, 1
431  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
432  %exitcond = icmp eq i32 %lftr.wideiv, 256
433  br i1 %exitcond, label %for.end, label %for.body
434
435for.end:                                          ; preds = %for.body, %entry
436  %result.0.lcssa = phi i32 [ %and, %for.body ]
437  ret i32 %result.0.lcssa
438}
439
440define i32 @reduction_or(i32* nocapture %A, i32* nocapture %B) {
441; CHECK-LABEL: @reduction_or(
442; CHECK-NEXT:  entry:
443; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
444; CHECK:       vector.ph:
445; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
446; CHECK:       vector.body:
447; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
448; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ]
449; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDEX]]
450; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>*
451; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
452; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDEX]]
453; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
454; CHECK-NEXT:    [[WIDE_LOAD1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
455; CHECK-NEXT:    [[TMP4:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
456; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP4]])
457; CHECK-NEXT:    [[TMP6]] = or i32 [[TMP5]], [[VEC_PHI]]
458; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
459; CHECK-NEXT:    [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
460; CHECK-NEXT:    br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
461; CHECK:       middle.block:
462; CHECK-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
463; CHECK:       scalar.ph:
464; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
465; CHECK:       for.body:
466; CHECK-NEXT:    br i1 undef, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
467; CHECK:       for.end:
468; CHECK-NEXT:    [[RESULT_0_LCSSA:%.*]] = phi i32 [ undef, [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ]
469; CHECK-NEXT:    ret i32 [[RESULT_0_LCSSA]]
470;
471entry:
472  br label %for.body
473
474for.body:                                         ; preds = %entry, %for.body
475  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
476  %result.08 = phi i32 [ %or, %for.body ], [ 0, %entry ]
477  %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
478  %l0 = load i32, i32* %arrayidx, align 4
479  %arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
480  %l1 = load i32, i32* %arrayidx2, align 4
481  %add = add nsw i32 %l1, %l0
482  %or = or i32 %add, %result.08
483  %indvars.iv.next = add i64 %indvars.iv, 1
484  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
485  %exitcond = icmp eq i32 %lftr.wideiv, 256
486  br i1 %exitcond, label %for.end, label %for.body
487
488for.end:                                          ; preds = %for.body, %entry
489  %result.0.lcssa = phi i32 [ %or, %for.body ]
490  ret i32 %result.0.lcssa
491}
492
493define i32 @reduction_xor(i32* nocapture %A, i32* nocapture %B) {
494; CHECK-LABEL: @reduction_xor(
495; CHECK-NEXT:  entry:
496; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
497; CHECK:       vector.ph:
498; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
499; CHECK:       vector.body:
500; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
501; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ]
502; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDEX]]
503; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>*
504; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
505; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDEX]]
506; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
507; CHECK-NEXT:    [[WIDE_LOAD1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
508; CHECK-NEXT:    [[TMP4:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
509; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> [[TMP4]])
510; CHECK-NEXT:    [[TMP6]] = xor i32 [[TMP5]], [[VEC_PHI]]
511; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
512; CHECK-NEXT:    [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
513; CHECK-NEXT:    br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
514; CHECK:       middle.block:
515; CHECK-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
516; CHECK:       scalar.ph:
517; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
518; CHECK:       for.body:
519; CHECK-NEXT:    br i1 undef, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
520; CHECK:       for.end:
521; CHECK-NEXT:    [[RESULT_0_LCSSA:%.*]] = phi i32 [ undef, [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ]
522; CHECK-NEXT:    ret i32 [[RESULT_0_LCSSA]]
523;
524entry:
525  br label %for.body
526
527for.body:                                         ; preds = %entry, %for.body
528  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
529  %result.08 = phi i32 [ %xor, %for.body ], [ 0, %entry ]
530  %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
531  %l0 = load i32, i32* %arrayidx, align 4
532  %arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
533  %l1 = load i32, i32* %arrayidx2, align 4
534  %add = add nsw i32 %l1, %l0
535  %xor = xor i32 %add, %result.08
536  %indvars.iv.next = add i64 %indvars.iv, 1
537  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
538  %exitcond = icmp eq i32 %lftr.wideiv, 256
539  br i1 %exitcond, label %for.end, label %for.body
540
541for.end:                                          ; preds = %for.body, %entry
542  %result.0.lcssa = phi i32 [ %xor, %for.body ]
543  ret i32 %result.0.lcssa
544}
545
546define float @reduction_fadd(float* nocapture %A, float* nocapture %B) {
547; CHECK-LABEL: @reduction_fadd(
548; CHECK-NEXT:  entry:
549; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
550; CHECK:       vector.ph:
551; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
552; CHECK:       vector.body:
553; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
554; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
555; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 [[INDEX]]
556; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float* [[TMP0]] to <4 x float>*
557; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x float>, <4 x float>* [[TMP1]], align 4
558; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds float, float* [[B:%.*]], i64 [[INDEX]]
559; CHECK-NEXT:    [[TMP3:%.*]] = bitcast float* [[TMP2]] to <4 x float>*
560; CHECK-NEXT:    [[WIDE_LOAD1:%.*]] = load <4 x float>, <4 x float>* [[TMP3]], align 4
561; CHECK-NEXT:    [[TMP4:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float [[VEC_PHI]], <4 x float> [[WIDE_LOAD]])
562; CHECK-NEXT:    [[TMP5]] = call fast float @llvm.vector.reduce.fadd.v4f32(float [[TMP4]], <4 x float> [[WIDE_LOAD1]])
563; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
564; CHECK-NEXT:    [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
565; CHECK-NEXT:    br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
566; CHECK:       middle.block:
567; CHECK-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
568; CHECK:       scalar.ph:
569; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
570; CHECK:       for.body:
571; CHECK-NEXT:    br i1 undef, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
572; CHECK:       for.end:
573; CHECK-NEXT:    [[RESULT_0_LCSSA:%.*]] = phi float [ undef, [[FOR_BODY]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ]
574; CHECK-NEXT:    ret float [[RESULT_0_LCSSA]]
575;
576entry:
577  br label %for.body
578
579for.body:                                         ; preds = %entry, %for.body
580  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
581  %result.08 = phi float [ %fadd, %for.body ], [ 0.0, %entry ]
582  %arrayidx = getelementptr inbounds float, float* %A, i64 %indvars.iv
583  %l0 = load float, float* %arrayidx, align 4
584  %arrayidx2 = getelementptr inbounds float, float* %B, i64 %indvars.iv
585  %l1 = load float, float* %arrayidx2, align 4
586  %add = fadd fast float %result.08, %l0
587  %fadd = fadd fast float %add, %l1
588  %indvars.iv.next = add i64 %indvars.iv, 1
589  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
590  %exitcond = icmp eq i32 %lftr.wideiv, 256
591  br i1 %exitcond, label %for.end, label %for.body
592
593for.end:                                          ; preds = %for.body, %entry
594  %result.0.lcssa = phi float [ %fadd, %for.body ]
595  ret float %result.0.lcssa
596}
597
598define float @reduction_fmul(float* nocapture %A, float* nocapture %B) {
599; CHECK-LABEL: @reduction_fmul(
600; CHECK-NEXT:  entry:
601; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
602; CHECK:       vector.ph:
603; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
604; CHECK:       vector.body:
605; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
606; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
607; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 [[INDEX]]
608; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float* [[TMP0]] to <4 x float>*
609; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x float>, <4 x float>* [[TMP1]], align 4
610; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds float, float* [[B:%.*]], i64 [[INDEX]]
611; CHECK-NEXT:    [[TMP3:%.*]] = bitcast float* [[TMP2]] to <4 x float>*
612; CHECK-NEXT:    [[WIDE_LOAD1:%.*]] = load <4 x float>, <4 x float>* [[TMP3]], align 4
613; CHECK-NEXT:    [[TMP4:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[WIDE_LOAD]])
614; CHECK-NEXT:    [[TMP5:%.*]] = fmul fast float [[TMP4]], [[VEC_PHI]]
615; CHECK-NEXT:    [[TMP6:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[WIDE_LOAD1]])
616; CHECK-NEXT:    [[TMP7]] = fmul fast float [[TMP6]], [[TMP5]]
617; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
618; CHECK-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
619; CHECK-NEXT:    br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
620; CHECK:       middle.block:
621; CHECK-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
622; CHECK:       scalar.ph:
623; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
624; CHECK:       for.body:
625; CHECK-NEXT:    br i1 undef, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
626; CHECK:       for.end:
627; CHECK-NEXT:    [[RESULT_0_LCSSA:%.*]] = phi float [ undef, [[FOR_BODY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
628; CHECK-NEXT:    ret float [[RESULT_0_LCSSA]]
629;
630entry:
631  br label %for.body
632
633for.body:                                         ; preds = %entry, %for.body
634  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
635  %result.08 = phi float [ %fmul, %for.body ], [ 0.0, %entry ]
636  %arrayidx = getelementptr inbounds float, float* %A, i64 %indvars.iv
637  %l0 = load float, float* %arrayidx, align 4
638  %arrayidx2 = getelementptr inbounds float, float* %B, i64 %indvars.iv
639  %l1 = load float, float* %arrayidx2, align 4
640  %add = fmul fast float %result.08, %l0
641  %fmul = fmul fast float %add, %l1
642  %indvars.iv.next = add i64 %indvars.iv, 1
643  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
644  %exitcond = icmp eq i32 %lftr.wideiv, 256
645  br i1 %exitcond, label %for.end, label %for.body
646
647for.end:                                          ; preds = %for.body, %entry
648  %result.0.lcssa = phi float [ %fmul, %for.body ]
649  ret float %result.0.lcssa
650}
651
652define i32 @reduction_min(i32* nocapture %A, i32* nocapture %B) {
653; CHECK-LABEL: @reduction_min(
654; CHECK-NEXT:  entry:
655; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
656; CHECK:       vector.ph:
657; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
658; CHECK:       vector.body:
659; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
660; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi i32 [ 1000, [[VECTOR_PH]] ], [ [[RDX_MINMAX_SELECT:%.*]], [[VECTOR_BODY]] ]
661; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDEX]]
662; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>*
663; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
664; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> [[WIDE_LOAD]])
665; CHECK-NEXT:    [[RDX_MINMAX_CMP:%.*]] = icmp slt i32 [[TMP2]], [[VEC_PHI]]
666; CHECK-NEXT:    [[RDX_MINMAX_SELECT]] = select i1 [[RDX_MINMAX_CMP]], i32 [[TMP2]], i32 [[VEC_PHI]]
667; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
668; CHECK-NEXT:    [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
669; CHECK-NEXT:    br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
670; CHECK:       middle.block:
671; CHECK-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
672; CHECK:       scalar.ph:
673; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
674; CHECK:       for.body:
675; CHECK-NEXT:    br i1 undef, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
676; CHECK:       for.end:
677; CHECK-NEXT:    [[RESULT_0_LCSSA:%.*]] = phi i32 [ undef, [[FOR_BODY]] ], [ [[RDX_MINMAX_SELECT]], [[MIDDLE_BLOCK]] ]
678; CHECK-NEXT:    ret i32 [[RESULT_0_LCSSA]]
679;
680entry:
681  br label %for.body
682
683for.body:                                         ; preds = %entry, %for.body
684  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
685  %result.08 = phi i32 [ %v0, %for.body ], [ 1000, %entry ]
686  %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
687  %l0 = load i32, i32* %arrayidx, align 4
688  %c0 = icmp slt i32 %result.08, %l0
689  %v0 = select i1 %c0, i32 %result.08, i32 %l0
690  %indvars.iv.next = add i64 %indvars.iv, 1
691  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
692  %exitcond = icmp eq i32 %lftr.wideiv, 256
693  br i1 %exitcond, label %for.end, label %for.body
694
695for.end:                                          ; preds = %for.body, %entry
696  %result.0.lcssa = phi i32 [ %v0, %for.body ]
697  ret i32 %result.0.lcssa
698}
699
700define i32 @reduction_max(i32* nocapture %A, i32* nocapture %B) {
701; CHECK-LABEL: @reduction_max(
702; CHECK-NEXT:  entry:
703; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
704; CHECK:       vector.ph:
705; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
706; CHECK:       vector.body:
707; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
708; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi i32 [ 1000, [[VECTOR_PH]] ], [ [[RDX_MINMAX_SELECT:%.*]], [[VECTOR_BODY]] ]
709; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDEX]]
710; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>*
711; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
712; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> [[WIDE_LOAD]])
713; CHECK-NEXT:    [[RDX_MINMAX_CMP:%.*]] = icmp ugt i32 [[TMP2]], [[VEC_PHI]]
714; CHECK-NEXT:    [[RDX_MINMAX_SELECT]] = select i1 [[RDX_MINMAX_CMP]], i32 [[TMP2]], i32 [[VEC_PHI]]
715; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
716; CHECK-NEXT:    [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
717; CHECK-NEXT:    br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
718; CHECK:       middle.block:
719; CHECK-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
720; CHECK:       scalar.ph:
721; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
722; CHECK:       for.body:
723; CHECK-NEXT:    br i1 undef, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
724; CHECK:       for.end:
725; CHECK-NEXT:    [[RESULT_0_LCSSA:%.*]] = phi i32 [ undef, [[FOR_BODY]] ], [ [[RDX_MINMAX_SELECT]], [[MIDDLE_BLOCK]] ]
726; CHECK-NEXT:    ret i32 [[RESULT_0_LCSSA]]
727;
728entry:
729  br label %for.body
730
731for.body:                                         ; preds = %entry, %for.body
732  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
733  %result.08 = phi i32 [ %v0, %for.body ], [ 1000, %entry ]
734  %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
735  %l0 = load i32, i32* %arrayidx, align 4
736  %c0 = icmp ugt i32 %result.08, %l0
737  %v0 = select i1 %c0, i32 %result.08, i32 %l0
738  %indvars.iv.next = add i64 %indvars.iv, 1
739  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
740  %exitcond = icmp eq i32 %lftr.wideiv, 256
741  br i1 %exitcond, label %for.end, label %for.body
742
743for.end:                                          ; preds = %for.body, %entry
744  %result.0.lcssa = phi i32 [ %v0, %for.body ]
745  ret i32 %result.0.lcssa
746}
747
748; Sub we can create a reduction, but not inloop
749define i32 @reduction_sub_lhs(i32* noalias nocapture %A) {
750; CHECK-LABEL: @reduction_sub_lhs(
751; CHECK-NEXT:  entry:
752; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
753; CHECK:       vector.ph:
754; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
755; CHECK:       vector.body:
756; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
757; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
758; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDEX]]
759; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>*
760; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
761; CHECK-NEXT:    [[TMP2]] = sub <4 x i32> [[VEC_PHI]], [[WIDE_LOAD]]
762; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
763; CHECK-NEXT:    [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
764; CHECK-NEXT:    br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]]
765; CHECK:       middle.block:
766; CHECK-NEXT:    [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP2]])
767; CHECK-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
768; CHECK:       scalar.ph:
769; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
770; CHECK:       for.body:
771; CHECK-NEXT:    br i1 undef, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]]
772; CHECK:       for.end:
773; CHECK-NEXT:    [[X_0_LCSSA:%.*]] = phi i32 [ undef, [[FOR_BODY]] ], [ [[TMP4]], [[MIDDLE_BLOCK]] ]
774; CHECK-NEXT:    ret i32 [[X_0_LCSSA]]
775;
776entry:
777  br label %for.body
778
779for.body:                                         ; preds = %entry, %for.body
780  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
781  %x.05 = phi i32 [ %sub, %for.body ], [ 0, %entry ]
782  %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
783  %l0 = load i32, i32* %arrayidx, align 4
784  %sub = sub nsw i32 %x.05, %l0
785  %indvars.iv.next = add i64 %indvars.iv, 1
786  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
787  %exitcond = icmp eq i32 %lftr.wideiv, 256
788  br i1 %exitcond, label %for.end, label %for.body
789
790for.end:                                          ; preds = %for.body, %entry
791  %x.0.lcssa = phi i32 [ %sub, %for.body ]
792  ret i32 %x.0.lcssa
793}
794
795; Conditional reductions with multi-input phis.
796define float @reduction_conditional(float* %A, float* %B, float* %C, float %S) {
797; CHECK-LABEL: @reduction_conditional(
798; CHECK-NEXT:  entry:
799; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
800; CHECK:       vector.ph:
801; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <4 x float> <float poison, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, float [[S:%.*]], i32 0
802; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
803; CHECK:       vector.body:
804; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
805; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi <4 x float> [ [[TMP0]], [[VECTOR_PH]] ], [ [[PREDPHI3:%.*]], [[VECTOR_BODY]] ]
806; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 [[INDEX]]
807; CHECK-NEXT:    [[TMP2:%.*]] = bitcast float* [[TMP1]] to <4 x float>*
808; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x float>, <4 x float>* [[TMP2]], align 4
809; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds float, float* [[B:%.*]], i64 [[INDEX]]
810; CHECK-NEXT:    [[TMP4:%.*]] = bitcast float* [[TMP3]] to <4 x float>*
811; CHECK-NEXT:    [[WIDE_LOAD1:%.*]] = load <4 x float>, <4 x float>* [[TMP4]], align 4
812; CHECK-NEXT:    [[TMP5:%.*]] = fcmp ogt <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
813; CHECK-NEXT:    [[TMP6:%.*]] = fcmp ule <4 x float> [[WIDE_LOAD1]], <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
814; CHECK-NEXT:    [[TMP7:%.*]] = fcmp ogt <4 x float> [[WIDE_LOAD]], <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
815; CHECK-NEXT:    [[TMP8:%.*]] = and <4 x i1> [[TMP5]], [[TMP6]]
816; CHECK-NEXT:    [[TMP9:%.*]] = and <4 x i1> [[TMP8]], [[TMP7]]
817; CHECK-NEXT:    [[TMP10:%.*]] = xor <4 x i1> [[TMP7]], <i1 true, i1 true, i1 true, i1 true>
818; CHECK-NEXT:    [[TMP11:%.*]] = and <4 x i1> [[TMP8]], [[TMP10]]
819; CHECK-NEXT:    [[TMP12:%.*]] = xor <4 x i1> [[TMP5]], <i1 true, i1 true, i1 true, i1 true>
820; CHECK-NEXT:    [[PREDPHI_V:%.*]] = select <4 x i1> [[TMP9]], <4 x float> [[WIDE_LOAD1]], <4 x float> [[WIDE_LOAD]]
821; CHECK-NEXT:    [[TMP13:%.*]] = select <4 x i1> [[TMP12]], <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i1> [[TMP11]]
822; CHECK-NEXT:    [[PREDPHI2:%.*]] = select <4 x i1> [[TMP13]], <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, <4 x float> [[PREDPHI_V]]
823; CHECK-NEXT:    [[PREDPHI3]] = fadd fast <4 x float> [[VEC_PHI]], [[PREDPHI2]]
824; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
825; CHECK-NEXT:    [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128
826; CHECK-NEXT:    br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
827; CHECK:       middle.block:
828; CHECK-NEXT:    [[TMP15:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[PREDPHI3]])
829; CHECK-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
830; CHECK:       scalar.ph:
831; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
832; CHECK:       for.body:
833; CHECK-NEXT:    br i1 undef, label [[IF_THEN:%.*]], label [[FOR_INC:%.*]]
834; CHECK:       if.then:
835; CHECK-NEXT:    br i1 undef, label [[IF_THEN8:%.*]], label [[IF_ELSE:%.*]]
836; CHECK:       if.then8:
837; CHECK-NEXT:    br label [[FOR_INC]]
838; CHECK:       if.else:
839; CHECK-NEXT:    br i1 undef, label [[IF_THEN16:%.*]], label [[FOR_INC]]
840; CHECK:       if.then16:
841; CHECK-NEXT:    br label [[FOR_INC]]
842; CHECK:       for.inc:
843; CHECK-NEXT:    br i1 undef, label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP33:![0-9]+]]
844; CHECK:       for.end:
845; CHECK-NEXT:    [[SUM_1_LCSSA:%.*]] = phi float [ undef, [[FOR_INC]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ]
846; CHECK-NEXT:    ret float [[SUM_1_LCSSA]]
847;
848entry:
849  br label %for.body
850
851for.body:
852  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
853  %sum.033 = phi float [ %S, %entry ], [ %sum.1, %for.inc ]
854  %arrayidx = getelementptr inbounds float, float* %A, i64 %indvars.iv
855  %l0 = load float, float* %arrayidx, align 4
856  %arrayidx2 = getelementptr inbounds float, float* %B, i64 %indvars.iv
857  %l1 = load float, float* %arrayidx2, align 4
858  %cmp3 = fcmp ogt float %l0, %l1
859  br i1 %cmp3, label %if.then, label %for.inc
860
861if.then:
862  %cmp6 = fcmp ogt float %l1, 1.000000e+00
863  br i1 %cmp6, label %if.then8, label %if.else
864
865if.then8:
866  %add = fadd fast float %sum.033, %l0
867  br label %for.inc
868
869if.else:
870  %cmp14 = fcmp ogt float %l0, 2.000000e+00
871  br i1 %cmp14, label %if.then16, label %for.inc
872
873if.then16:
874  %add19 = fadd fast float %sum.033, %l1
875  br label %for.inc
876
877for.inc:
878  %sum.1 = phi float [ %add, %if.then8 ], [ %add19, %if.then16 ], [ %sum.033, %if.else ], [ %sum.033, %for.body ]
879  %indvars.iv.next = add i64 %indvars.iv, 1
880  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
881  %exitcond = icmp ne i32 %lftr.wideiv, 128
882  br i1 %exitcond, label %for.body, label %for.end
883
884for.end:
885  %sum.1.lcssa = phi float [ %sum.1, %for.inc ]
886  ret float %sum.1.lcssa
887}
888
889define i32 @reduction_sum_multiuse(i32* noalias nocapture %A, i32* noalias nocapture %B) {
890; CHECK-LABEL: @reduction_sum_multiuse(
891; CHECK-NEXT:  entry:
892; CHECK-NEXT:    br label [[DOTLR_PH:%.*]]
893; CHECK:       .lr.ph:
894; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[DOTLR_PH]] ], [ 0, [[ENTRY:%.*]] ]
895; CHECK-NEXT:    [[SUM_02:%.*]] = phi i32 [ [[L10:%.*]], [[DOTLR_PH]] ], [ 0, [[ENTRY]] ]
896; CHECK-NEXT:    [[L2:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV]]
897; CHECK-NEXT:    [[L3:%.*]] = load i32, i32* [[L2]], align 4
898; CHECK-NEXT:    [[L6:%.*]] = trunc i64 [[INDVARS_IV]] to i32
899; CHECK-NEXT:    [[L7:%.*]] = add i32 [[SUM_02]], [[L6]]
900; CHECK-NEXT:    [[L8:%.*]] = add i32 [[L7]], [[L3]]
901; CHECK-NEXT:    [[L10]] = add i32 [[L8]], [[SUM_02]]
902; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
903; CHECK-NEXT:    [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
904; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256
905; CHECK-NEXT:    br i1 [[EXITCOND]], label [[END:%.*]], label [[DOTLR_PH]]
906; CHECK:       end:
907; CHECK-NEXT:    ret i32 [[L10]]
908;
909entry:
910  br label %.lr.ph
911
912.lr.ph:                                           ; preds = %entry, %.lr.ph
913  %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %entry ]
914  %sum.02 = phi i32 [ %l10, %.lr.ph ], [ 0, %entry ]
915  %l2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
916  %l3 = load i32, i32* %l2, align 4
917  %l4 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
918  %l5 = load i32, i32* %l4, align 4
919  %l6 = trunc i64 %indvars.iv to i32
920  %l7 = add i32 %sum.02, %l6
921  %l8 = add i32 %l7, %l3
922  %l9 = add i32 %l8, %l5
923  %l10 = add i32 %l8, %sum.02
924  %indvars.iv.next = add i64 %indvars.iv, 1
925  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
926  %exitcond = icmp eq i32 %lftr.wideiv, 256
927  br i1 %exitcond, label %end, label %.lr.ph
928
929end:
930  %f1 = phi i32 [ %l10, %.lr.ph ]
931  ret i32 %f1
932}
933
934; Predicated loop, cannot (yet) use in-loop reductions.
935define i32 @reduction_predicated(i32* noalias nocapture %A, i32* noalias nocapture %B) {
936; CHECK-LABEL: @reduction_predicated(
937; CHECK-NEXT:  entry:
938; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
939; CHECK:       vector.ph:
940; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
941; CHECK:       vector.body:
942; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
943; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
944; CHECK-NEXT:    [[VEC_IND1:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT2:%.*]], [[VECTOR_BODY]] ]
945; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDEX]]
946; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>*
947; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
948; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDEX]]
949; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
950; CHECK-NEXT:    [[WIDE_LOAD3:%.*]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
951; CHECK-NEXT:    [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[VEC_IND1]])
952; CHECK-NEXT:    [[TMP5:%.*]] = add i32 [[TMP4]], [[VEC_PHI]]
953; CHECK-NEXT:    [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD]])
954; CHECK-NEXT:    [[TMP7:%.*]] = add i32 [[TMP6]], [[TMP5]]
955; CHECK-NEXT:    [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD3]])
956; CHECK-NEXT:    [[TMP9]] = add i32 [[TMP8]], [[TMP7]]
957; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
958; CHECK-NEXT:    [[VEC_IND_NEXT2]] = add <4 x i32> [[VEC_IND1]], <i32 4, i32 4, i32 4, i32 4>
959; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
960; CHECK-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]]
961; CHECK:       middle.block:
962; CHECK-NEXT:    br i1 true, label [[DOT_CRIT_EDGE:%.*]], label [[SCALAR_PH]]
963; CHECK:       scalar.ph:
964; CHECK-NEXT:    br label [[DOTLR_PH:%.*]]
965; CHECK:       .lr.ph:
966; CHECK-NEXT:    br i1 undef, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]], !llvm.loop [[LOOP35:![0-9]+]]
967; CHECK:       ._crit_edge:
968; CHECK-NEXT:    [[SUM_0_LCSSA:%.*]] = phi i32 [ undef, [[DOTLR_PH]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ]
969; CHECK-NEXT:    ret i32 [[SUM_0_LCSSA]]
970;
971entry:
972  br label %.lr.ph
973
974.lr.ph:                                           ; preds = %entry, %.lr.ph
975  %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %entry ]
976  %sum.02 = phi i32 [ %l9, %.lr.ph ], [ 0, %entry ]
977  %l2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
978  %l3 = load i32, i32* %l2, align 4
979  %l4 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
980  %l5 = load i32, i32* %l4, align 4
981  %l6 = trunc i64 %indvars.iv to i32
982  %l7 = add i32 %sum.02, %l6
983  %l8 = add i32 %l7, %l3
984  %l9 = add i32 %l8, %l5
985  %indvars.iv.next = add i64 %indvars.iv, 1
986  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
987  %exitcond = icmp eq i32 %lftr.wideiv, 256
988  br i1 %exitcond, label %._crit_edge, label %.lr.ph, !llvm.loop !6
989
990._crit_edge:                                      ; preds = %.lr.ph
991  %sum.0.lcssa = phi i32 [ %l9, %.lr.ph ]
992  ret i32 %sum.0.lcssa
993}
994
995define i8 @reduction_add_trunc(i8* noalias nocapture %A) {
996; CHECK-LABEL: @reduction_add_trunc(
997; CHECK-NEXT:  entry:
998; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
999; CHECK:       vector.ph:
1000; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
1001; CHECK:       vector.body:
1002; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
1003; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi <4 x i8> [ <i8 -1, i8 0, i8 0, i8 0>, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
1004; CHECK-NEXT:    [[TMP0:%.*]] = sext i32 [[INDEX]] to i64
1005; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, i8* [[A:%.*]], i64 [[TMP0]]
1006; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to <4 x i8>*
1007; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i8>, <4 x i8>* [[TMP2]], align 4
1008; CHECK-NEXT:    [[TMP3]] = add <4 x i8> [[VEC_PHI]], [[WIDE_LOAD]]
1009; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
1010; CHECK-NEXT:    [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256
1011; CHECK-NEXT:    br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
1012; CHECK:       middle.block:
1013; CHECK-NEXT:    [[TMP5:%.*]] = call i8 @llvm.vector.reduce.add.v4i8(<4 x i8> [[TMP3]])
1014; CHECK-NEXT:    br i1 true, label [[DOT_CRIT_EDGE:%.*]], label [[SCALAR_PH]]
1015; CHECK:       scalar.ph:
1016; CHECK-NEXT:    br label [[DOTLR_PH:%.*]]
1017; CHECK:       .lr.ph:
1018; CHECK-NEXT:    br i1 undef, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]], !llvm.loop [[LOOP37:![0-9]+]]
1019; CHECK:       ._crit_edge:
1020; CHECK-NEXT:    [[SUM_0_LCSSA:%.*]] = phi i8 [ undef, [[DOTLR_PH]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ]
1021; CHECK-NEXT:    ret i8 [[SUM_0_LCSSA]]
1022;
1023entry:
1024  br label %.lr.ph
1025
1026.lr.ph:                                           ; preds = %entry, %.lr.ph
1027  %indvars.iv = phi i32 [ %indvars.iv.next, %.lr.ph ], [ 0, %entry ]
1028  %sum.02p = phi i32 [ %l9, %.lr.ph ], [ 255, %entry ]
1029  %sum.02 = and i32 %sum.02p, 255
1030  %l2 = getelementptr inbounds i8, i8* %A, i32 %indvars.iv
1031  %l3 = load i8, i8* %l2, align 4
1032  %l3e = zext i8 %l3 to i32
1033  %l9 = add i32 %sum.02, %l3e
1034  %indvars.iv.next = add i32 %indvars.iv, 1
1035  %exitcond = icmp eq i32 %indvars.iv.next, 256
1036  br i1 %exitcond, label %._crit_edge, label %.lr.ph
1037
1038._crit_edge:                                      ; preds = %.lr.ph
1039  %sum.0.lcssa = phi i32 [ %l9, %.lr.ph ]
1040  %ret = trunc i32 %sum.0.lcssa to i8
1041  ret i8 %ret
1042}
1043
1044
1045define i8 @reduction_and_trunc(i8* noalias nocapture %A) {
1046; CHECK-LABEL: @reduction_and_trunc(
1047; CHECK-NEXT:  entry:
1048; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
1049; CHECK:       vector.ph:
1050; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
1051; CHECK:       vector.body:
1052; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
1053; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi <4 x i8> [ <i8 -1, i8 -1, i8 -1, i8 -1>, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
1054; CHECK-NEXT:    [[TMP0:%.*]] = sext i32 [[INDEX]] to i64
1055; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, i8* [[A:%.*]], i64 [[TMP0]]
1056; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to <4 x i8>*
1057; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i8>, <4 x i8>* [[TMP2]], align 4
1058; CHECK-NEXT:    [[TMP3]] = and <4 x i8> [[VEC_PHI]], [[WIDE_LOAD]]
1059; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
1060; CHECK-NEXT:    [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256
1061; CHECK-NEXT:    br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]]
1062; CHECK:       middle.block:
1063; CHECK-NEXT:    [[TMP5:%.*]] = call i8 @llvm.vector.reduce.and.v4i8(<4 x i8> [[TMP3]])
1064; CHECK-NEXT:    br i1 true, label [[DOT_CRIT_EDGE:%.*]], label [[SCALAR_PH]]
1065; CHECK:       scalar.ph:
1066; CHECK-NEXT:    br label [[DOTLR_PH:%.*]]
1067; CHECK:       .lr.ph:
1068; CHECK-NEXT:    br i1 undef, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]], !llvm.loop [[LOOP39:![0-9]+]]
1069; CHECK:       ._crit_edge:
1070; CHECK-NEXT:    [[SUM_0_LCSSA:%.*]] = phi i8 [ undef, [[DOTLR_PH]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ]
1071; CHECK-NEXT:    ret i8 [[SUM_0_LCSSA]]
1072;
1073entry:
1074  br label %.lr.ph
1075
1076.lr.ph:                                           ; preds = %entry, %.lr.ph
1077  %indvars.iv = phi i32 [ %indvars.iv.next, %.lr.ph ], [ 0, %entry ]
1078  %sum.02p = phi i32 [ %l9, %.lr.ph ], [ 255, %entry ]
1079  %sum.02 = and i32 %sum.02p, 255
1080  %l2 = getelementptr inbounds i8, i8* %A, i32 %indvars.iv
1081  %l3 = load i8, i8* %l2, align 4
1082  %l3e = zext i8 %l3 to i32
1083  %l9 = and i32 %sum.02, %l3e
1084  %indvars.iv.next = add i32 %indvars.iv, 1
1085  %exitcond = icmp eq i32 %indvars.iv.next, 256
1086  br i1 %exitcond, label %._crit_edge, label %.lr.ph
1087
1088._crit_edge:                                      ; preds = %.lr.ph
1089  %sum.0.lcssa = phi i32 [ %l9, %.lr.ph ]
1090  %ret = trunc i32 %sum.0.lcssa to i8
1091  ret i8 %ret
1092}
1093
1094; Test case when loop has a call to the llvm.fmuladd intrinsic.
1095define float @reduction_fmuladd(float* %a, float* %b, i64 %n) {
1096; CHECK-LABEL: @reduction_fmuladd(
1097; CHECK-NEXT:  entry:
1098; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
1099; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
1100; CHECK:       vector.ph:
1101; CHECK-NEXT:    [[N_VEC:%.*]] = and i64 [[N]], -4
1102; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
1103; CHECK:       vector.body:
1104; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
1105; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ]
1106; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 [[INDEX]]
1107; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float* [[TMP0]] to <4 x float>*
1108; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x float>, <4 x float>* [[TMP1]], align 4
1109; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds float, float* [[B:%.*]], i64 [[INDEX]]
1110; CHECK-NEXT:    [[TMP3:%.*]] = bitcast float* [[TMP2]] to <4 x float>*
1111; CHECK-NEXT:    [[WIDE_LOAD1:%.*]] = load <4 x float>, <4 x float>* [[TMP3]], align 4
1112; CHECK-NEXT:    [[TMP4:%.*]] = fmul <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
1113; CHECK-NEXT:    [[TMP5:%.*]] = call float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP4]])
1114; CHECK-NEXT:    [[TMP6]] = fadd float [[TMP5]], [[VEC_PHI]]
1115; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
1116; CHECK-NEXT:    [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
1117; CHECK-NEXT:    br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]]
1118; CHECK:       middle.block:
1119; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]]
1120; CHECK-NEXT:    br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
1121; CHECK:       scalar.ph:
1122; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
1123; CHECK-NEXT:    [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP6]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ]
1124; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
1125; CHECK:       for.body:
1126; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
1127; CHECK-NEXT:    [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
1128; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[IV]]
1129; CHECK-NEXT:    [[TMP8:%.*]] = load float, float* [[ARRAYIDX]], align 4
1130; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[B]], i64 [[IV]]
1131; CHECK-NEXT:    [[TMP9:%.*]] = load float, float* [[ARRAYIDX2]], align 4
1132; CHECK-NEXT:    [[MULADD]] = tail call float @llvm.fmuladd.f32(float [[TMP8]], float [[TMP9]], float [[SUM_07]])
1133; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
1134; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
1135; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP41:![0-9]+]]
1136; CHECK:       for.end:
1137; CHECK-NEXT:    [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ]
1138; CHECK-NEXT:    ret float [[MULADD_LCSSA]]
1139
1140entry:
1141  br label %for.body
1142
1143for.body:
1144  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
1145  %sum.07 = phi float [ 0.000000e+00, %entry ], [ %muladd, %for.body ]
1146  %arrayidx = getelementptr inbounds float, float* %a, i64 %iv
1147  %0 = load float, float* %arrayidx, align 4
1148  %arrayidx2 = getelementptr inbounds float, float* %b, i64 %iv
1149  %1 = load float, float* %arrayidx2, align 4
1150  %muladd = tail call float @llvm.fmuladd.f32(float %0, float %1, float %sum.07)
1151  %iv.next = add nuw nsw i64 %iv, 1
1152  %exitcond.not = icmp eq i64 %iv.next, %n
1153  br i1 %exitcond.not, label %for.end, label %for.body
1154
1155for.end:
1156  ret float %muladd
1157}
1158
1159declare float @llvm.fmuladd.f32(float, float, float)
1160
1161!6 = distinct !{!6, !7, !8}
1162!7 = !{!"llvm.loop.vectorize.predicate.enable", i1 true}
1163!8 = !{!"llvm.loop.vectorize.enable", i1 true}
1164