1; This test verifies that the loop vectorizer will NOT produce a tail
2; loop with the optimize for size or the minimize size attributes.
3; REQUIRES: asserts
4; RUN: opt < %s -enable-new-pm=0 -loop-vectorize -S | FileCheck %s
5; RUN: opt < %s -enable-new-pm=0 -loop-vectorize -pgso -S | FileCheck %s -check-prefix=PGSO
6; RUN: opt < %s -enable-new-pm=0 -loop-vectorize -pgso=false -S | FileCheck %s -check-prefix=NPGSO
7; RUN: opt < %s -passes='require<profile-summary>,loop-vectorize' -S | FileCheck %s
8; RUN: opt < %s -passes='require<profile-summary>,loop-vectorize' -pgso -S | FileCheck %s -check-prefix=PGSO
9; RUN: opt < %s -passes='require<profile-summary>,loop-vectorize' -pgso=false -S | FileCheck %s -check-prefix=NPGSO
10
11target datalayout = "E-m:e-p:32:32-i64:32-f64:32:64-a:0:32-n32-S128"
12
13@tab = common global [32 x i8] zeroinitializer, align 1
14
15define i32 @foo_optsize() #0 {
16; CHECK-LABEL: @foo_optsize(
17; CHECK-NOT: <2 x i8>
18; CHECK-NOT: <4 x i8>
19
20entry:
21  br label %for.body
22
23for.body:                                         ; preds = %for.body, %entry
24  %i.08 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
25  %arrayidx = getelementptr inbounds [32 x i8], [32 x i8]* @tab, i32 0, i32 %i.08
26  %0 = load i8, i8* %arrayidx, align 1
27  %cmp1 = icmp eq i8 %0, 0
28  %. = select i1 %cmp1, i8 2, i8 1
29  store i8 %., i8* %arrayidx, align 1
30  %inc = add nsw i32 %i.08, 1
31  %exitcond = icmp eq i32 %i.08, 202
32  br i1 %exitcond, label %for.end, label %for.body
33
34for.end:                                          ; preds = %for.body
35  ret i32 0
36}
37
38attributes #0 = { optsize }
39
40define i32 @foo_minsize() #1 {
41; CHECK-LABEL: @foo_minsize(
42; CHECK-NOT: <2 x i8>
43; CHECK-NOT: <4 x i8>
44; CHECK-LABEL: @foo_pgso(
45
46entry:
47  br label %for.body
48
49for.body:                                         ; preds = %for.body, %entry
50  %i.08 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
51  %arrayidx = getelementptr inbounds [32 x i8], [32 x i8]* @tab, i32 0, i32 %i.08
52  %0 = load i8, i8* %arrayidx, align 1
53  %cmp1 = icmp eq i8 %0, 0
54  %. = select i1 %cmp1, i8 2, i8 1
55  store i8 %., i8* %arrayidx, align 1
56  %inc = add nsw i32 %i.08, 1
57  %exitcond = icmp eq i32 %i.08, 202
58  br i1 %exitcond, label %for.end, label %for.body
59
60for.end:                                          ; preds = %for.body
61  ret i32 0
62}
63
64attributes #1 = { minsize }
65
66define i32 @foo_pgso() !prof !14 {
67; PGSO-LABEL: @foo_pgso(
68; PGSO-NOT: <{{[0-9]+}} x i8>
69; NPGSO-LABEL: @foo_pgso(
70; NPGSO: <{{[0-9]+}} x i8>
71
72entry:
73  br label %for.body
74
75for.body:                                         ; preds = %for.body, %entry
76  %i.08 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
77  %arrayidx = getelementptr inbounds [32 x i8], [32 x i8]* @tab, i32 0, i32 %i.08
78  %0 = load i8, i8* %arrayidx, align 1
79  %cmp1 = icmp eq i8 %0, 0
80  %. = select i1 %cmp1, i8 2, i8 1
81  store i8 %., i8* %arrayidx, align 1
82  %inc = add nsw i32 %i.08, 1
83  %exitcond = icmp eq i32 %i.08, 202
84  br i1 %exitcond, label %for.end, label %for.body
85
86for.end:                                          ; preds = %for.body
87  ret i32 0
88}
89
90; PR43371: don't run into an assert due to emitting SCEV runtime checks
91; with OptForSize.
92;
93@cm_array = external global [2592 x i16], align 1
94
95define void @pr43371() optsize {
96;
97; CHECK-LABEL: @pr43371
98; CHECK-NOT:   vector.scevcheck
99;
100; We do not want to generate SCEV predicates when optimising for size, because
101; that will lead to extra code generation such as the SCEV overflow runtime
102; checks. Not generating SCEV predicates can still result in vectorisation as
103; the non-consecutive loads/stores can be scalarized:
104;
105; CHECK: vector.body:
106; CHECK: store i16 0, i16* %{{.*}}, align 1
107; CHECK: store i16 0, i16* %{{.*}}, align 1
108; CHECK: br i1 {{.*}}, label %vector.body
109;
110entry:
111  br label %for.body29
112
113for.cond.cleanup28:
114  unreachable
115
116for.body29:
117  %i24.0170 = phi i16 [ 0, %entry], [ %inc37, %for.body29]
118  %add33 = add i16 undef, %i24.0170
119  %idxprom34 = zext i16 %add33 to i32
120  %arrayidx35 = getelementptr [2592 x i16], [2592 x i16] * @cm_array, i32 0, i32 %idxprom34
121  store i16 0, i16 * %arrayidx35, align 1
122  %inc37 = add i16 %i24.0170, 1
123  %cmp26 = icmp ult i16 %inc37, 756
124  br i1 %cmp26, label %for.body29, label %for.cond.cleanup28
125}
126
127define void @pr43371_pgso() !prof !14 {
128;
129; CHECK-LABEL: @pr43371_pgso
130; CHECK-NOT:   vector.scevcheck
131;
132; We do not want to generate SCEV predicates when optimising for size, because
133; that will lead to extra code generation such as the SCEV overflow runtime
134; checks. Not generating SCEV predicates can still result in vectorisation as
135; the non-consecutive loads/stores can be scalarized:
136;
137; CHECK: vector.body:
138; CHECK: store i16 0, i16* %{{.*}}, align 1
139; CHECK: store i16 0, i16* %{{.*}}, align 1
140; CHECK: br i1 {{.*}}, label %vector.body
141;
142entry:
143  br label %for.body29
144
145for.cond.cleanup28:
146  unreachable
147
148for.body29:
149  %i24.0170 = phi i16 [ 0, %entry], [ %inc37, %for.body29]
150  %add33 = add i16 undef, %i24.0170
151  %idxprom34 = zext i16 %add33 to i32
152  %arrayidx35 = getelementptr [2592 x i16], [2592 x i16] * @cm_array, i32 0, i32 %idxprom34
153  store i16 0, i16 * %arrayidx35, align 1
154  %inc37 = add i16 %i24.0170, 1
155  %cmp26 = icmp ult i16 %inc37, 756
156  br i1 %cmp26, label %for.body29, label %for.cond.cleanup28
157}
158
159; PR45526: don't vectorize with fold-tail if first-order-recurrence is live-out.
160;
161define i32 @pr45526() optsize {
162;
163; CHECK-LABEL: @pr45526
164; CHECK-NEXT: entry:
165; CHECK-NEXT:   br label %loop
166; CHECK-EMPTY:
167; CHECK-NEXT: loop:
168; CHECK-NEXT:   %piv = phi i32 [ 0, %entry ], [ %pivPlus1, %loop ]
169; CHECK-NEXT:   %for = phi i32 [ 5, %entry ], [ %pivPlus1, %loop ]
170; CHECK-NEXT:   %pivPlus1 = add nuw nsw i32 %piv, 1
171; CHECK-NEXT:   %cond = icmp ult i32 %piv, 510
172; CHECK-NEXT:   br i1 %cond, label %loop, label %exit
173; CHECK-EMPTY:
174; CHECK-NEXT: exit:
175; CHECK-NEXT:   %for.lcssa = phi i32 [ %for, %loop ]
176; CHECK-NEXT:   ret i32 %for.lcssa
177;
178entry:
179  br label %loop
180
181loop:
182  %piv = phi i32 [ 0, %entry ], [ %pivPlus1, %loop ]
183  %for = phi i32 [ 5, %entry ], [ %pivPlus1, %loop ]
184  %pivPlus1 = add nuw nsw i32 %piv, 1
185  %cond = icmp ult i32 %piv, 510
186  br i1 %cond, label %loop, label %exit
187
188exit:
189  ret i32 %for
190}
191
192define i32 @pr45526_pgso() !prof !14 {
193;
194; CHECK-LABEL: @pr45526_pgso
195; CHECK-NEXT: entry:
196; CHECK-NEXT:   br label %loop
197; CHECK-EMPTY:
198; CHECK-NEXT: loop:
199; CHECK-NEXT:   %piv = phi i32 [ 0, %entry ], [ %pivPlus1, %loop ]
200; CHECK-NEXT:   %for = phi i32 [ 5, %entry ], [ %pivPlus1, %loop ]
201; CHECK-NEXT:   %pivPlus1 = add nuw nsw i32 %piv, 1
202; CHECK-NEXT:   %cond = icmp ult i32 %piv, 510
203; CHECK-NEXT:   br i1 %cond, label %loop, label %exit
204; CHECK-EMPTY:
205; CHECK-NEXT: exit:
206; CHECK-NEXT:   %for.lcssa = phi i32 [ %for, %loop ]
207; CHECK-NEXT:   ret i32 %for.lcssa
208;
209entry:
210  br label %loop
211
212loop:
213  %piv = phi i32 [ 0, %entry ], [ %pivPlus1, %loop ]
214  %for = phi i32 [ 5, %entry ], [ %pivPlus1, %loop ]
215  %pivPlus1 = add nuw nsw i32 %piv, 1
216  %cond = icmp ult i32 %piv, 510
217  br i1 %cond, label %loop, label %exit
218
219exit:
220  ret i32 %for
221}
222
223; PR46228: Vectorize w/o versioning for unit stride under optsize and enabled
224; vectorization.
225
226; NOTE: Some assertions have been autogenerated by utils/update_test_checks.py
227define void @stride1(i16* noalias %B, i32 %BStride) optsize {
228; CHECK-LABEL: @stride1(
229; CHECK-NEXT:  entry:
230; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
231; CHECK:       vector.ph:
232; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[BSTRIDE:%.*]], i32 0
233; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i32> [[BROADCAST_SPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer
234; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
235; CHECK:       vector.body:
236; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ]
237; CHECK-NEXT:    [[VEC_IND:%.*]] = phi <2 x i32> [ <i32 0, i32 1>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE2]] ]
238; CHECK-NEXT:    [[TMP1:%.*]] = icmp ule <2 x i32> [[VEC_IND]], <i32 1024, i32 1024>
239; CHECK-NEXT:    [[TMP0:%.*]] = mul nsw <2 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
240; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i1> [[TMP1]], i32 0
241; CHECK-NEXT:    br i1 [[TMP2]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
242; CHECK:       pred.store.if:
243; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <2 x i32> [[TMP0]], i32 0
244; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i16, i16* [[B:%.*]], i32 [[TMP3]]
245; CHECK-NEXT:    store i16 42, i16* [[TMP4]], align 4
246; CHECK-NEXT:    br label [[PRED_STORE_CONTINUE]]
247; CHECK:       pred.store.continue:
248; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x i1> [[TMP1]], i32 1
249; CHECK-NEXT:    br i1 [[TMP5]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]]
250; CHECK:       pred.store.if1:
251; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i32> [[TMP0]], i32 1
252; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i16, i16* [[B]], i32 [[TMP6]]
253; CHECK-NEXT:    store i16 42, i16* [[TMP7]], align 4
254; CHECK-NEXT:    br label [[PRED_STORE_CONTINUE2]]
255; CHECK:       pred.store.continue2:
256; CHECK-NEXT:    [[INDEX_NEXT]] = add i32 [[INDEX]], 2
257; CHECK-NEXT:    [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], <i32 2, i32 2>
258; CHECK-NEXT:    [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1026
259; CHECK-NEXT:    br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !21
260; CHECK:       middle.block:
261; CHECK-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
262; CHECK:       scalar.ph:
263; CHECK:       for.end:
264; CHECK-NEXT:    ret void
265;
266; PGSO-LABEL: @stride1(
267; PGSO-NEXT:  entry:
268; PGSO-NEXT:    br i1 false, label %scalar.ph, label %vector.ph
269;
270; NPGSO-LABEL: @stride1(
271; NPGSO-NEXT:  entry:
272; NPGSO-NEXT:    br i1 false, label %scalar.ph, label %vector.ph
273
274entry:
275  br label %for.body
276
277for.body:
278  %iv = phi i32 [ %iv.next, %for.body ], [ 0, %entry ]
279  %mulB = mul nsw i32 %iv, %BStride
280  %gepOfB = getelementptr inbounds i16, i16* %B, i32 %mulB
281  store i16 42, i16* %gepOfB, align 4
282  %iv.next = add nuw nsw i32 %iv, 1
283  %exitcond = icmp eq i32 %iv.next, 1025
284  br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !15
285
286for.end:
287  ret void
288}
289
290; Vectorize with versioning for unit stride for PGSO and enabled vectorization.
291;
292define void @stride1_pgso(i16* noalias %B, i32 %BStride) !prof !14 {
293; CHECK-LABEL: @stride1_pgso(
294; CHECK: vector.body
295;
296; PGSO-LABEL: @stride1_pgso(
297; PGSO: vector.body
298;
299; NPGSO-LABEL: @stride1_pgso(
300; NPGSO: vector.body
301
302entry:
303  br label %for.body
304
305for.body:
306  %iv = phi i32 [ %iv.next, %for.body ], [ 0, %entry ]
307  %mulB = mul nsw i32 %iv, %BStride
308  %gepOfB = getelementptr inbounds i16, i16* %B, i32 %mulB
309  store i16 42, i16* %gepOfB, align 4
310  %iv.next = add nuw nsw i32 %iv, 1
311  %exitcond = icmp eq i32 %iv.next, 1025
312  br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !15
313
314for.end:
315  ret void
316}
317
318; PR46652: Check that the need for stride==1 check prevents vectorizing a loop
319; having tiny trip count, when compiling w/o -Os/-Oz.
320; CHECK-LABEL: @pr46652
321; CHECK-NOT: vector.scevcheck
322; CHECK-NOT: vector.body
323; CHECK-LABEL: for.body
324
325@g = external global [1 x i16], align 1
326
327define void @pr46652(i16 %stride) {
328entry:
329  br label %for.body
330
331for.body:                                        ; preds = %for.body, %entry
332  %l1.02 = phi i16 [ 1, %entry ], [ %inc9, %for.body ]
333  %mul = mul nsw i16 %l1.02, %stride
334  %arrayidx6 = getelementptr inbounds [1 x i16], [1 x i16]* @g, i16 0, i16 %mul
335  %0 = load i16, i16* %arrayidx6, align 1
336  %inc9 = add nuw nsw i16 %l1.02, 1
337  %exitcond.not = icmp eq i16 %inc9, 16
338  br i1 %exitcond.not, label %for.end, label %for.body
339
340for.end:                                        ; preds = %for.body
341  ret void
342}
343
344; Make sure we do not crash while building the VPlan for the loop with the
345; select below.
346define i32 @PR48142(i32* %ptr.start, i32* %ptr.end) optsize {
347; CHECK-LABEL: PR48142
348; CHECK-NOT: vector.body
349entry:
350  br label %for.body
351
352for.body:
353  %i.014 = phi i32 [ 20, %entry ], [ %cond, %for.body ]
354  %ptr.iv = phi i32* [ %ptr.start, %entry ], [ %ptr.next, %for.body ]
355  %cmp4 = icmp slt i32 %i.014, 99
356  %cond = select i1 %cmp4, i32 99, i32 %i.014
357  store i32 0, i32* %ptr.iv
358  %ptr.next = getelementptr inbounds i32, i32* %ptr.iv, i64 1
359  %cmp.not = icmp eq i32* %ptr.next, %ptr.end
360  br i1 %cmp.not, label %exit, label %for.body
361
362exit:
363  %res = phi i32 [ %cond, %for.body ]
364  ret i32 %res
365}
366
367!llvm.module.flags = !{!0}
368!0 = !{i32 1, !"ProfileSummary", !1}
369!1 = !{!2, !3, !4, !5, !6, !7, !8, !9}
370!2 = !{!"ProfileFormat", !"InstrProf"}
371!3 = !{!"TotalCount", i64 10000}
372!4 = !{!"MaxCount", i64 10}
373!5 = !{!"MaxInternalCount", i64 1}
374!6 = !{!"MaxFunctionCount", i64 1000}
375!7 = !{!"NumCounts", i64 3}
376!8 = !{!"NumFunctions", i64 3}
377!9 = !{!"DetailedSummary", !10}
378!10 = !{!11, !12, !13}
379!11 = !{i32 10000, i64 100, i32 1}
380!12 = !{i32 999000, i64 100, i32 1}
381!13 = !{i32 999999, i64 1, i32 2}
382!14 = !{!"function_entry_count", i64 0}
383!15 = distinct !{!15, !16}
384!16 = !{!"llvm.loop.vectorize.enable", i1 true}
385