1; RUN: opt < %s -loop-vectorize -force-vector-interleave=1 -force-vector-width=2 -S | FileCheck %s
2; RUN: opt < %s -loop-vectorize -force-vector-interleave=1 -force-vector-width=2 -instcombine -S | FileCheck %s --check-prefix=IND
3; RUN: opt < %s -loop-vectorize -force-vector-interleave=2 -force-vector-width=2 -instcombine -S | FileCheck %s --check-prefix=UNROLL
4
5target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
6
7; Make sure that we can handle multiple integer induction variables.
8; CHECK-LABEL: @multi_int_induction(
9; CHECK: vector.body:
10; CHECK:  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
11; CHECK:  %[[VAR:.*]] = trunc i64 %index to i32
12; CHECK:  %offset.idx = add i32 190, %[[VAR]]
13define void @multi_int_induction(i32* %A, i32 %N) {
14for.body.lr.ph:
15  br label %for.body
16
17for.body:
18  %indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
19  %count.09 = phi i32 [ 190, %for.body.lr.ph ], [ %inc, %for.body ]
20  %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
21  store i32 %count.09, i32* %arrayidx2, align 4
22  %inc = add nsw i32 %count.09, 1
23  %indvars.iv.next = add i64 %indvars.iv, 1
24  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
25  %exitcond = icmp ne i32 %lftr.wideiv, %N
26  br i1 %exitcond, label %for.body, label %for.end
27
28for.end:
29  ret void
30}
31
32; Make sure we remove unneeded vectorization of induction variables.
33; In order for instcombine to cleanup the vectorized induction variables that we
34; create in the loop vectorizer we need to perform some form of redundancy
35; elimination to get rid of multiple uses.
36
37; IND-LABEL: scalar_use
38
39; IND:     br label %vector.body
40; IND:     vector.body:
41;   Vectorized induction variable.
42; IND-NOT:  insertelement <2 x i64>
43; IND-NOT:  shufflevector <2 x i64>
44; IND:     br {{.*}}, label %vector.body
45
46define void @scalar_use(float* %a, float %b, i64 %offset, i64 %offset2, i64 %n) {
47entry:
48  br label %for.body
49
50for.body:
51  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
52  %ind.sum = add i64 %iv, %offset
53  %arr.idx = getelementptr inbounds float, float* %a, i64 %ind.sum
54  %l1 = load float, float* %arr.idx, align 4
55  %ind.sum2 = add i64 %iv, %offset2
56  %arr.idx2 = getelementptr inbounds float, float* %a, i64 %ind.sum2
57  %l2 = load float, float* %arr.idx2, align 4
58  %m = fmul fast float %b, %l2
59  %ad = fadd fast float %l1, %m
60  store float %ad, float* %arr.idx, align 4
61  %iv.next = add nuw nsw i64 %iv, 1
62  %exitcond = icmp eq i64 %iv.next, %n
63  br i1 %exitcond, label %loopexit, label %for.body
64
65loopexit:
66  ret void
67}
68
69
70; Make sure that the loop exit count computation does not overflow for i8 and
71; i16. The exit count of these loops is i8/i16 max + 1. If we don't cast the
72; induction variable to a bigger type the exit count computation will overflow
73; to 0.
74; PR17532
75
76; CHECK-LABEL: i8_loop
77; CHECK: icmp eq i32 {{.*}}, 256
78define i32 @i8_loop() nounwind readnone ssp uwtable {
79  br label %1
80
81; <label>:1                                       ; preds = %1, %0
82  %a.0 = phi i32 [ 1, %0 ], [ %2, %1 ]
83  %b.0 = phi i8 [ 0, %0 ], [ %3, %1 ]
84  %2 = and i32 %a.0, 4
85  %3 = add i8 %b.0, -1
86  %4 = icmp eq i8 %3, 0
87  br i1 %4, label %5, label %1
88
89; <label>:5                                       ; preds = %1
90  ret i32 %2
91}
92
93; CHECK-LABEL: i16_loop
94; CHECK: icmp eq i32 {{.*}}, 65536
95
96define i32 @i16_loop() nounwind readnone ssp uwtable {
97  br label %1
98
99; <label>:1                                       ; preds = %1, %0
100  %a.0 = phi i32 [ 1, %0 ], [ %2, %1 ]
101  %b.0 = phi i16 [ 0, %0 ], [ %3, %1 ]
102  %2 = and i32 %a.0, 4
103  %3 = add i16 %b.0, -1
104  %4 = icmp eq i16 %3, 0
105  br i1 %4, label %5, label %1
106
107; <label>:5                                       ; preds = %1
108  ret i32 %2
109}
110
111; This loop has a backedge taken count of i32_max. We need to check for this
112; condition and branch directly to the scalar loop.
113
114; CHECK-LABEL: max_i32_backedgetaken
115; CHECK:  br i1 true, label %scalar.ph, label %min.iters.checked
116
117; CHECK: scalar.ph:
118; CHECK:  %bc.resume.val = phi i32 [ 0, %middle.block ], [ 0, %0 ]
119; CHECK:  %bc.merge.rdx = phi i32 [ 1, %0 ], [ 1, %min.iters.checked ], [ %5, %middle.block ]
120
121define i32 @max_i32_backedgetaken() nounwind readnone ssp uwtable {
122
123  br label %1
124
125; <label>:1                                       ; preds = %1, %0
126  %a.0 = phi i32 [ 1, %0 ], [ %2, %1 ]
127  %b.0 = phi i32 [ 0, %0 ], [ %3, %1 ]
128  %2 = and i32 %a.0, 4
129  %3 = add i32 %b.0, -1
130  %4 = icmp eq i32 %3, 0
131  br i1 %4, label %5, label %1
132
133; <label>:5                                       ; preds = %1
134  ret i32 %2
135}
136
137; When generating the overflow check we must sure that the induction start value
138; is defined before the branch to the scalar preheader.
139
140; CHECK-LABEL: testoverflowcheck
141; CHECK: entry
142; CHECK: %[[LOAD:.*]] = load i8
143; CHECK: br
144
145; CHECK: scalar.ph
146; CHECK: phi i8 [ %{{.*}}, %middle.block ], [ %[[LOAD]], %entry ]
147
148@e = global i8 1, align 1
149@d = common global i32 0, align 4
150@c = common global i32 0, align 4
151define i32 @testoverflowcheck() {
152entry:
153  %.pr.i = load i8, i8* @e, align 1
154  %0 = load i32, i32* @d, align 4
155  %c.promoted.i = load i32, i32* @c, align 4
156  br label %cond.end.i
157
158cond.end.i:
159  %inc4.i = phi i8 [ %.pr.i, %entry ], [ %inc.i, %cond.end.i ]
160  %and3.i = phi i32 [ %c.promoted.i, %entry ], [ %and.i, %cond.end.i ]
161  %and.i = and i32 %0, %and3.i
162  %inc.i = add i8 %inc4.i, 1
163  %tobool.i = icmp eq i8 %inc.i, 0
164  br i1 %tobool.i, label %loopexit, label %cond.end.i
165
166loopexit:
167  ret i32 %and.i
168}
169
170; The SCEV expression of %sphi is (zext i8 {%t,+,1}<%loop> to i32)
171; In order to recognize %sphi as an induction PHI and vectorize this loop,
172; we need to convert the SCEV expression into an AddRecExpr.
173; The expression gets converted to {zext i8 %t to i32,+,1}.
174
175; CHECK-LABEL: wrappingindvars1
176; CHECK-LABEL: vector.scevcheck
177; CHECK-LABEL: vector.body
178; CHECK: add <2 x i32> {{%[^ ]*}}, <i32 0, i32 1>
179define void @wrappingindvars1(i8 %t, i32 %len, i32 *%A) {
180 entry:
181  %st = zext i8 %t to i16
182  %ext = zext i8 %t to i32
183  %ecmp = icmp ult i16 %st, 42
184  br i1 %ecmp, label %loop, label %exit
185
186 loop:
187
188  %idx = phi i8 [ %t, %entry ], [ %idx.inc, %loop ]
189  %idx.b = phi i32 [ 0, %entry ], [ %idx.b.inc, %loop ]
190  %sphi = phi i32 [ %ext, %entry ], [%idx.inc.ext, %loop]
191
192  %ptr = getelementptr inbounds i32, i32* %A, i8 %idx
193  store i32 %sphi, i32* %ptr
194
195  %idx.inc = add i8 %idx, 1
196  %idx.inc.ext = zext i8 %idx.inc to i32
197  %idx.b.inc = add nuw nsw i32 %idx.b, 1
198
199  %c = icmp ult i32 %idx.b, %len
200  br i1 %c, label %loop, label %exit
201
202 exit:
203  ret void
204}
205
206; The SCEV expression of %sphi is (4 * (zext i8 {%t,+,1}<%loop> to i32))
207; In order to recognize %sphi as an induction PHI and vectorize this loop,
208; we need to convert the SCEV expression into an AddRecExpr.
209; The expression gets converted to ({4 * (zext %t to i32),+,4}).
210; CHECK-LABEL: wrappingindvars2
211; CHECK-LABEL: vector.scevcheck
212; CHECK-LABEL: vector.body
213; CHECK: add <2 x i32> {{%[^ ]*}}, <i32 0, i32 4>
214define void @wrappingindvars2(i8 %t, i32 %len, i32 *%A) {
215
216entry:
217  %st = zext i8 %t to i16
218  %ext = zext i8 %t to i32
219  %ext.mul = mul i32 %ext, 4
220
221  %ecmp = icmp ult i16 %st, 42
222  br i1 %ecmp, label %loop, label %exit
223
224 loop:
225
226  %idx = phi i8 [ %t, %entry ], [ %idx.inc, %loop ]
227  %sphi = phi i32 [ %ext.mul, %entry ], [%mul, %loop]
228  %idx.b = phi i32 [ 0, %entry ], [ %idx.b.inc, %loop ]
229
230  %ptr = getelementptr inbounds i32, i32* %A, i8 %idx
231  store i32 %sphi, i32* %ptr
232
233  %idx.inc = add i8 %idx, 1
234  %idx.inc.ext = zext i8 %idx.inc to i32
235  %mul = mul i32 %idx.inc.ext, 4
236  %idx.b.inc = add nuw nsw i32 %idx.b, 1
237
238  %c = icmp ult i32 %idx.b, %len
239  br i1 %c, label %loop, label %exit
240
241 exit:
242  ret void
243}
244
245; Check that we generate vectorized IVs in the pre-header
246; instead of widening the scalar IV inside the loop, when
247; we know how to do that.
248; IND-LABEL: veciv
249; IND: vector.body:
250; IND: %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
251; IND: %vec.ind = phi <2 x i32> [ <i32 0, i32 1>, %vector.ph ], [ %step.add, %vector.body ]
252; IND: %step.add = add <2 x i32> %vec.ind, <i32 2, i32 2>
253; IND: %index.next = add i32 %index, 2
254; IND: %[[CMP:.*]] = icmp eq i32 %index.next
255; IND: br i1 %[[CMP]]
256; UNROLL-LABEL: veciv
257; UNROLL: vector.body:
258; UNROLL: %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
259; UNROLL: %vec.ind = phi <2 x i32> [ <i32 0, i32 1>, %vector.ph ], [ %step.add1, %vector.body ]
260; UNROLL: %step.add = add <2 x i32> %vec.ind, <i32 2, i32 2>
261; UNROLL: %step.add1 = add <2 x i32> %vec.ind, <i32 4, i32 4>
262; UNROLL: %index.next = add i32 %index, 4
263; UNROLL: %[[CMP:.*]] = icmp eq i32 %index.next
264; UNROLL: br i1 %[[CMP]]
265define void @veciv(i32* nocapture %a, i32 %start, i32 %k) {
266for.body.preheader:
267  br label %for.body
268
269for.body:
270  %indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
271  %arrayidx = getelementptr inbounds i32, i32* %a, i32 %indvars.iv
272  store i32 %indvars.iv, i32* %arrayidx, align 4
273  %indvars.iv.next = add nuw nsw i32 %indvars.iv, 1
274  %exitcond = icmp eq i32 %indvars.iv.next, %k
275  br i1 %exitcond, label %exit, label %for.body
276
277exit:
278  ret void
279}
280
281; IND-LABEL: trunciv
282; IND: vector.body:
283; IND: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
284; IND: %[[VECIND:.*]] = phi <2 x i32> [ <i32 0, i32 1>, %vector.ph ], [ %[[STEPADD:.*]], %vector.body ]
285; IND: %[[STEPADD]] = add <2 x i32> %[[VECIND]], <i32 2, i32 2>
286; IND: %index.next = add i64 %index, 2
287; IND: %[[CMP:.*]] = icmp eq i64 %index.next
288; IND: br i1 %[[CMP]]
289define void @trunciv(i32* nocapture %a, i32 %start, i64 %k) {
290for.body.preheader:
291  br label %for.body
292
293for.body:
294  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
295  %trunc.iv = trunc i64 %indvars.iv to i32
296  %arrayidx = getelementptr inbounds i32, i32* %a, i32 %trunc.iv
297  store i32 %trunc.iv, i32* %arrayidx, align 4
298  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
299  %exitcond = icmp eq i64 %indvars.iv.next, %k
300  br i1 %exitcond, label %exit, label %for.body
301
302exit:
303  ret void
304}
305