1; This test verifies that the loop vectorizer will NOT produce a tail
2; loop with the optimize for size or the minimize size attributes.
3; REQUIRES: asserts
4; RUN: opt < %s -loop-vectorize -S | FileCheck %s
5; RUN: opt < %s -loop-vectorize -pgso -S | FileCheck %s -check-prefix=PGSO
6; RUN: opt < %s -loop-vectorize -pgso=false -S | FileCheck %s -check-prefix=NPGSO
7
8target datalayout = "E-m:e-p:32:32-i64:32-f64:32:64-a:0:32-n32-S128"
9
10@tab = common global [32 x i8] zeroinitializer, align 1
11
12define i32 @foo_optsize() #0 {
13; CHECK-LABEL: @foo_optsize(
14; CHECK-NOT: <2 x i8>
15; CHECK-NOT: <4 x i8>
16
17entry:
18  br label %for.body
19
20for.body:                                         ; preds = %for.body, %entry
21  %i.08 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
22  %arrayidx = getelementptr inbounds [32 x i8], [32 x i8]* @tab, i32 0, i32 %i.08
23  %0 = load i8, i8* %arrayidx, align 1
24  %cmp1 = icmp eq i8 %0, 0
25  %. = select i1 %cmp1, i8 2, i8 1
26  store i8 %., i8* %arrayidx, align 1
27  %inc = add nsw i32 %i.08, 1
28  %exitcond = icmp eq i32 %i.08, 202
29  br i1 %exitcond, label %for.end, label %for.body
30
31for.end:                                          ; preds = %for.body
32  ret i32 0
33}
34
35attributes #0 = { optsize }
36
37define i32 @foo_minsize() #1 {
38; CHECK-LABEL: @foo_minsize(
39; CHECK-NOT: <2 x i8>
40; CHECK-NOT: <4 x i8>
41; CHECK-LABEL: @foo_pgso(
42
43entry:
44  br label %for.body
45
46for.body:                                         ; preds = %for.body, %entry
47  %i.08 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
48  %arrayidx = getelementptr inbounds [32 x i8], [32 x i8]* @tab, i32 0, i32 %i.08
49  %0 = load i8, i8* %arrayidx, align 1
50  %cmp1 = icmp eq i8 %0, 0
51  %. = select i1 %cmp1, i8 2, i8 1
52  store i8 %., i8* %arrayidx, align 1
53  %inc = add nsw i32 %i.08, 1
54  %exitcond = icmp eq i32 %i.08, 202
55  br i1 %exitcond, label %for.end, label %for.body
56
57for.end:                                          ; preds = %for.body
58  ret i32 0
59}
60
61attributes #1 = { minsize }
62
63define i32 @foo_pgso() !prof !14 {
64; PGSO-LABEL: @foo_pgso(
65; PGSO-NOT: <{{[0-9]+}} x i8>
66; NPGSO-LABEL: @foo_pgso(
67; NPGSO: <{{[0-9]+}} x i8>
68
69entry:
70  br label %for.body
71
72for.body:                                         ; preds = %for.body, %entry
73  %i.08 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
74  %arrayidx = getelementptr inbounds [32 x i8], [32 x i8]* @tab, i32 0, i32 %i.08
75  %0 = load i8, i8* %arrayidx, align 1
76  %cmp1 = icmp eq i8 %0, 0
77  %. = select i1 %cmp1, i8 2, i8 1
78  store i8 %., i8* %arrayidx, align 1
79  %inc = add nsw i32 %i.08, 1
80  %exitcond = icmp eq i32 %i.08, 202
81  br i1 %exitcond, label %for.end, label %for.body
82
83for.end:                                          ; preds = %for.body
84  ret i32 0
85}
86
87; PR43371: don't run into an assert due to emitting SCEV runtime checks
88; with OptForSize.
89;
90@cm_array = external global [2592 x i16], align 1
91
92define void @pr43371() optsize {
93;
94; CHECK-LABEL: @pr43371
95; CHECK-NOT:   vector.scevcheck
96;
97; We do not want to generate SCEV predicates when optimising for size, because
98; that will lead to extra code generation such as the SCEV overflow runtime
99; checks. Not generating SCEV predicates can still result in vectorisation as
100; the non-consecutive loads/stores can be scalarized:
101;
102; CHECK: vector.body:
103; CHECK: store i16 0, i16* %{{.*}}, align 1
104; CHECK: store i16 0, i16* %{{.*}}, align 1
105; CHECK: br i1 {{.*}}, label %vector.body
106;
107entry:
108  br label %for.body29
109
110for.cond.cleanup28:
111  unreachable
112
113for.body29:
114  %i24.0170 = phi i16 [ 0, %entry], [ %inc37, %for.body29]
115  %add33 = add i16 undef, %i24.0170
116  %idxprom34 = zext i16 %add33 to i32
117  %arrayidx35 = getelementptr [2592 x i16], [2592 x i16] * @cm_array, i32 0, i32 %idxprom34
118  store i16 0, i16 * %arrayidx35, align 1
119  %inc37 = add i16 %i24.0170, 1
120  %cmp26 = icmp ult i16 %inc37, 756
121  br i1 %cmp26, label %for.body29, label %for.cond.cleanup28
122}
123
124define void @pr43371_pgso() !prof !14 {
125;
126; CHECK-LABEL: @pr43371_pgso
127; CHECK-NOT:   vector.scevcheck
128;
129; We do not want to generate SCEV predicates when optimising for size, because
130; that will lead to extra code generation such as the SCEV overflow runtime
131; checks. Not generating SCEV predicates can still result in vectorisation as
132; the non-consecutive loads/stores can be scalarized:
133;
134; CHECK: vector.body:
135; CHECK: store i16 0, i16* %{{.*}}, align 1
136; CHECK: store i16 0, i16* %{{.*}}, align 1
137; CHECK: br i1 {{.*}}, label %vector.body
138;
139entry:
140  br label %for.body29
141
142for.cond.cleanup28:
143  unreachable
144
145for.body29:
146  %i24.0170 = phi i16 [ 0, %entry], [ %inc37, %for.body29]
147  %add33 = add i16 undef, %i24.0170
148  %idxprom34 = zext i16 %add33 to i32
149  %arrayidx35 = getelementptr [2592 x i16], [2592 x i16] * @cm_array, i32 0, i32 %idxprom34
150  store i16 0, i16 * %arrayidx35, align 1
151  %inc37 = add i16 %i24.0170, 1
152  %cmp26 = icmp ult i16 %inc37, 756
153  br i1 %cmp26, label %for.body29, label %for.cond.cleanup28
154}
155
156; PR45526: don't vectorize with fold-tail if first-order-recurrence is live-out.
157;
158define i32 @pr45526() optsize {
159;
160; CHECK-LABEL: @pr45526
161; CHECK-NEXT: entry:
162; CHECK-NEXT:   br label %loop
163; CHECK-EMPTY:
164; CHECK-NEXT: loop:
165; CHECK-NEXT:   %piv = phi i32 [ 0, %entry ], [ %pivPlus1, %loop ]
166; CHECK-NEXT:   %for = phi i32 [ 5, %entry ], [ %pivPlus1, %loop ]
167; CHECK-NEXT:   %pivPlus1 = add nuw nsw i32 %piv, 1
168; CHECK-NEXT:   %cond = icmp ult i32 %piv, 510
169; CHECK-NEXT:   br i1 %cond, label %loop, label %exit
170; CHECK-EMPTY:
171; CHECK-NEXT: exit:
172; CHECK-NEXT:   %for.lcssa = phi i32 [ %for, %loop ]
173; CHECK-NEXT:   ret i32 %for.lcssa
174;
175entry:
176  br label %loop
177
178loop:
179  %piv = phi i32 [ 0, %entry ], [ %pivPlus1, %loop ]
180  %for = phi i32 [ 5, %entry ], [ %pivPlus1, %loop ]
181  %pivPlus1 = add nuw nsw i32 %piv, 1
182  %cond = icmp ult i32 %piv, 510
183  br i1 %cond, label %loop, label %exit
184
185exit:
186  ret i32 %for
187}
188
189define i32 @pr45526_pgso() !prof !14 {
190;
191; CHECK-LABEL: @pr45526_pgso
192; CHECK-NEXT: entry:
193; CHECK-NEXT:   br label %loop
194; CHECK-EMPTY:
195; CHECK-NEXT: loop:
196; CHECK-NEXT:   %piv = phi i32 [ 0, %entry ], [ %pivPlus1, %loop ]
197; CHECK-NEXT:   %for = phi i32 [ 5, %entry ], [ %pivPlus1, %loop ]
198; CHECK-NEXT:   %pivPlus1 = add nuw nsw i32 %piv, 1
199; CHECK-NEXT:   %cond = icmp ult i32 %piv, 510
200; CHECK-NEXT:   br i1 %cond, label %loop, label %exit
201; CHECK-EMPTY:
202; CHECK-NEXT: exit:
203; CHECK-NEXT:   %for.lcssa = phi i32 [ %for, %loop ]
204; CHECK-NEXT:   ret i32 %for.lcssa
205;
206entry:
207  br label %loop
208
209loop:
210  %piv = phi i32 [ 0, %entry ], [ %pivPlus1, %loop ]
211  %for = phi i32 [ 5, %entry ], [ %pivPlus1, %loop ]
212  %pivPlus1 = add nuw nsw i32 %piv, 1
213  %cond = icmp ult i32 %piv, 510
214  br i1 %cond, label %loop, label %exit
215
216exit:
217  ret i32 %for
218}
219
220; PR46228: Vectorize w/o versioning for unit stride under optsize and enabled
221; vectorization.
222
223; NOTE: Some assertions have been autogenerated by utils/update_test_checks.py
224define void @stride1(i16* noalias %B, i32 %BStride) optsize {
225; CHECK-LABEL: @stride1(
226; CHECK-NEXT:  entry:
227; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
228; CHECK:       vector.ph:
229; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i32> undef, i32 [[BSTRIDE:%.*]], i32 0
230; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i32> [[BROADCAST_SPLATINSERT]], <2 x i32> undef, <2 x i32> zeroinitializer
231; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
232; CHECK:       vector.body:
233; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ]
234; CHECK-NEXT:    [[VEC_IND:%.*]] = phi <2 x i32> [ <i32 0, i32 1>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE2]] ]
235; CHECK-NEXT:    [[TMP0:%.*]] = mul nsw <2 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
236; CHECK-NEXT:    [[TMP1:%.*]] = icmp ule <2 x i32> [[VEC_IND]], <i32 1024, i32 1024>
237; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i1> [[TMP1]], i32 0
238; CHECK-NEXT:    br i1 [[TMP2]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
239; CHECK:       pred.store.if:
240; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <2 x i32> [[TMP0]], i32 0
241; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i16, i16* [[B:%.*]], i32 [[TMP3]]
242; CHECK-NEXT:    store i16 42, i16* [[TMP4]], align 4
243; CHECK-NEXT:    br label [[PRED_STORE_CONTINUE]]
244; CHECK:       pred.store.continue:
245; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x i1> [[TMP1]], i32 1
246; CHECK-NEXT:    br i1 [[TMP5]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]]
247; CHECK:       pred.store.if1:
248; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i32> [[TMP0]], i32 1
249; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i16, i16* [[B]], i32 [[TMP6]]
250; CHECK-NEXT:    store i16 42, i16* [[TMP7]], align 4
251; CHECK-NEXT:    br label [[PRED_STORE_CONTINUE2]]
252; CHECK:       pred.store.continue2:
253; CHECK-NEXT:    [[INDEX_NEXT]] = add i32 [[INDEX]], 2
254; CHECK-NEXT:    [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], <i32 2, i32 2>
255; CHECK-NEXT:    [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1026
256; CHECK-NEXT:    br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !21
257; CHECK:       middle.block:
258; CHECK-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
259; CHECK:       scalar.ph:
260; CHECK:       for.end:
261; CHECK-NEXT:    ret void
262;
263; PGSO-LABEL: @stride1(
264; PGSO-NEXT:  entry:
265; PGSO-NEXT:    br i1 false, label %scalar.ph, label %vector.ph
266;
267; NPGSO-LABEL: @stride1(
268; NPGSO-NEXT:  entry:
269; NPGSO-NEXT:    br i1 false, label %scalar.ph, label %vector.ph
270
271entry:
272  br label %for.body
273
274for.body:
275  %iv = phi i32 [ %iv.next, %for.body ], [ 0, %entry ]
276  %mulB = mul nsw i32 %iv, %BStride
277  %gepOfB = getelementptr inbounds i16, i16* %B, i32 %mulB
278  store i16 42, i16* %gepOfB, align 4
279  %iv.next = add nuw nsw i32 %iv, 1
280  %exitcond = icmp eq i32 %iv.next, 1025
281  br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !15
282
283for.end:
284  ret void
285}
286
287; Vectorize with versioning for unit stride for PGSO and enabled vectorization.
288;
289define void @stride1_pgso(i16* noalias %B, i32 %BStride) !prof !14 {
290; CHECK-LABEL: @stride1_pgso(
291; CHECK: vector.body
292;
293; PGSO-LABEL: @stride1_pgso(
294; PGSO: vector.body
295;
296; NPGSO-LABEL: @stride1_pgso(
297; NPGSO: vector.body
298
299entry:
300  br label %for.body
301
302for.body:
303  %iv = phi i32 [ %iv.next, %for.body ], [ 0, %entry ]
304  %mulB = mul nsw i32 %iv, %BStride
305  %gepOfB = getelementptr inbounds i16, i16* %B, i32 %mulB
306  store i16 42, i16* %gepOfB, align 4
307  %iv.next = add nuw nsw i32 %iv, 1
308  %exitcond = icmp eq i32 %iv.next, 1025
309  br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !15
310
311for.end:
312  ret void
313}
314
315; PR46652: Check that the need for stride==1 check prevents vectorizing a loop
316; having tiny trip count, when compiling w/o -Os/-Oz.
317; CHECK-LABEL: @pr46652
318; CHECK-NOT: vector.scevcheck
319; CHECK-NOT: vector.body
320; CHECK-LABEL: for.body
321
322@g = external global [1 x i16], align 1
323
324define void @pr46652(i16 %stride) {
325entry:
326  br label %for.body
327
328for.body:                                        ; preds = %for.body, %entry
329  %l1.02 = phi i16 [ 1, %entry ], [ %inc9, %for.body ]
330  %mul = mul nsw i16 %l1.02, %stride
331  %arrayidx6 = getelementptr inbounds [1 x i16], [1 x i16]* @g, i16 0, i16 %mul
332  %0 = load i16, i16* %arrayidx6, align 1
333  %inc9 = add nuw nsw i16 %l1.02, 1
334  %exitcond.not = icmp eq i16 %inc9, 16
335  br i1 %exitcond.not, label %for.end, label %for.body
336
337for.end:                                        ; preds = %for.body
338  ret void
339}
340
341!llvm.module.flags = !{!0}
342!0 = !{i32 1, !"ProfileSummary", !1}
343!1 = !{!2, !3, !4, !5, !6, !7, !8, !9}
344!2 = !{!"ProfileFormat", !"InstrProf"}
345!3 = !{!"TotalCount", i64 10000}
346!4 = !{!"MaxCount", i64 10}
347!5 = !{!"MaxInternalCount", i64 1}
348!6 = !{!"MaxFunctionCount", i64 1000}
349!7 = !{!"NumCounts", i64 3}
350!8 = !{!"NumFunctions", i64 3}
351!9 = !{!"DetailedSummary", !10}
352!10 = !{!11, !12, !13}
353!11 = !{i32 10000, i64 100, i32 1}
354!12 = !{i32 999000, i64 100, i32 1}
355!13 = !{i32 999999, i64 1, i32 2}
356!14 = !{!"function_entry_count", i64 0}
357!15 = distinct !{!15, !16}
358!16 = !{!"llvm.loop.vectorize.enable", i1 true}
359