1; RUN: opt < %s -loop-vectorize -S | FileCheck %s
2; RUN: opt < %s -loop-vectorize -prefer-predicate-over-epilog -S | FileCheck -check-prefix=PREDFLAG %s
3
4target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
5target triple = "x86_64-unknown-linux-gnu"
6
7define dso_local void @tail_folding_enabled(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C) local_unnamed_addr #0 {
8; CHECK-LABEL: tail_folding_enabled(
9; CHECK:  vector.body:
10; CHECK:  %wide.masked.load = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(
11; CHECK:  %wide.masked.load1 = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(
12; CHECK:  %8 = add nsw <8 x i32> %wide.masked.load1, %wide.masked.load
13; CHECK:  call void @llvm.masked.store.v8i32.p0v8i32(
14; CHECK:  %index.next = add i64 %index, 8
15; CHECK:  %12 = icmp eq i64 %index.next, 432
16; CHECK:  br i1 %12, label %middle.block, label %vector.body, !llvm.loop !0
17; PREDFLAG-LABEL: tail_folding_enabled(
18; PREDFLAG:  vector.body:
19; PREDFLAG:  %wide.masked.load = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(
20; PREDFLAG:  %wide.masked.load1 = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(
21; PREDFLAG:  %8 = add nsw <8 x i32> %wide.masked.load1, %wide.masked.load
22; PREDFLAG:  call void @llvm.masked.store.v8i32.p0v8i32(
23; PREDFLAG:  %index.next = add i64 %index, 8
24; PREDFLAG:  %12 = icmp eq i64 %index.next, 432
25; PREDFLAG:  br i1 %12, label %middle.block, label %vector.body, !llvm.loop !0
26entry:
27  br label %for.body
28
29for.cond.cleanup:
30  ret void
31
32for.body:
33  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
34  %arrayidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
35  %0 = load i32, i32* %arrayidx, align 4
36  %arrayidx2 = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
37  %1 = load i32, i32* %arrayidx2, align 4
38  %add = add nsw i32 %1, %0
39  %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
40  store i32 %add, i32* %arrayidx4, align 4
41  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
42  %exitcond = icmp eq i64 %indvars.iv.next, 430
43  br i1 %exitcond, label %for.cond.cleanup, label %for.body, !llvm.loop !6
44}
45
46define dso_local void @tail_folding_disabled(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C) local_unnamed_addr #0 {
47; CHECK-LABEL: tail_folding_disabled(
48; CHECK:      vector.body:
49; CHECK-NOT:  @llvm.masked.load.v8i32.p0v8i32(
50; CHECK-NOT:  @llvm.masked.store.v8i32.p0v8i32(
51; CHECK:      br i1 %44, label {{.*}}, label %vector.body
52; PREDFLAG-LABEL: tail_folding_disabled(
53; PREDFLAG:  vector.body:
54; PREDFLAG:  %wide.masked.load = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(
55; PREDFLAG:  %wide.masked.load1 = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(
56; PREDFLAG:  %8 = add nsw <8 x i32> %wide.masked.load1, %wide.masked.load
57; PREDFLAG:  call void @llvm.masked.store.v8i32.p0v8i32(
58; PREDFLAG:  %index.next = add i64 %index, 8
59; PREDFLAG:  %12 = icmp eq i64 %index.next, 432
60; PREDFLAG:  br i1 %12, label %middle.block, label %vector.body, !llvm.loop !4
61entry:
62  br label %for.body
63
64for.cond.cleanup:
65  ret void
66
67for.body:
68  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
69  %arrayidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
70  %0 = load i32, i32* %arrayidx, align 4
71  %arrayidx2 = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
72  %1 = load i32, i32* %arrayidx2, align 4
73  %add = add nsw i32 %1, %0
74  %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
75  store i32 %add, i32* %arrayidx4, align 4
76  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
77  %exitcond = icmp eq i64 %indvars.iv.next, 430
78  br i1 %exitcond, label %for.cond.cleanup, label %for.body, !llvm.loop !10
79}
80
81; Check that fold tail under optsize passes the reduction live-out value
82; through a select.
83; int reduction_i32(int *A, int *B, int N) {
84;   int sum = 0;
85;   for (int i = 0; i < N; ++i)
86;     sum += (A[i] + B[i]);
87;   return sum;
88; }
89;
90define i32 @reduction_i32(i32* nocapture readonly %A, i32* nocapture readonly %B, i32 %N) #0 {
91; CHECK-LABEL: @reduction_i32(
92; CHECK:       vector.body:
93; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
94; CHECK-NEXT:    [[ACCUM_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ [[ACCUM:%.*]], %vector.body ]
95; CHECK:         [[ICMPULE:%.*]] = icmp ule <8 x i64>
96; CHECK:         [[LOAD1:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* {{.*}}, i32 4, <8 x i1> [[ICMPULE]], <8 x i32> undef)
97; CHECK:         [[LOAD2:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* {{.*}}, i32 4, <8 x i1> [[ICMPULE]], <8 x i32> undef)
98; CHECK-NEXT:    [[ADD:%.*]] = add nsw <8 x i32> [[LOAD2]], [[LOAD1]]
99; CHECK-NEXT:    [[ACCUM]] = add nuw nsw <8 x i32> [[ADD]], [[ACCUM_PHI]]
100; CHECK:         [[LIVEOUT:%.*]] = select <8 x i1> [[ICMPULE]], <8 x i32> [[ACCUM]], <8 x i32> [[ACCUM_PHI]]
101; CHECK-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 8
102; CHECK:       middle.block:
103; CHECK-NEXT:    [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[LIVEOUT]], <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
104; CHECK-NEXT:    [[BIN_RDX:%.*]] = add <8 x i32> [[LIVEOUT]], [[RDX_SHUF]]
105; CHECK-NEXT:    [[RDX_SHUF4:%.*]] = shufflevector <8 x i32> [[BIN_RDX]], <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
106; CHECK-NEXT:    [[BIN_RDX5:%.*]] = add <8 x i32> [[BIN_RDX]], [[RDX_SHUF4]]
107; CHECK-NEXT:    [[RDX_SHUF6:%.*]] = shufflevector <8 x i32> [[BIN_RDX5]], <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
108; CHECK-NEXT:    [[BIN_RDX7:%.*]] = add <8 x i32> [[BIN_RDX5]], [[RDX_SHUF6]]
109; CHECK-NEXT:    [[TMP17:%.*]] = extractelement <8 x i32> [[BIN_RDX7]], i32 0
110; CHECK-NEXT:    br i1 true, label %for.cond.cleanup, label %scalar.ph
111; CHECK:       scalar.ph:
112; CHECK:       for.cond.cleanup:
113; CHECK-NEXT:    [[SUM_1_LCSSA:%.*]] = phi i32 [ {{.*}}, %for.body ], [ [[TMP17]], %middle.block ]
114; CHECK-NEXT:    ret i32 [[SUM_1_LCSSA]]
115;
116entry:
117  br label %for.body
118
119for.body:
120  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
121  %sum.0 = phi i32 [ %sum.1, %for.body ], [ 0, %entry ]
122  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
123  %arrayidxA = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
124  %0 = load i32, i32* %arrayidxA, align 4
125  %arrayidxB = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
126  %1 = load i32, i32* %arrayidxB, align 4
127  %add = add nsw i32 %1, %0
128  %sum.1 = add nuw nsw i32 %add, %sum.0
129  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
130  %exitcond = icmp eq i32 %lftr.wideiv, %N
131  br i1 %exitcond, label %for.cond.cleanup, label %for.body
132
133for.cond.cleanup:
134  ret i32 %sum.1
135}
136
137; CHECK:      !0 = distinct !{!0, !1}
138; CHECK-NEXT: !1 = !{!"llvm.loop.isvectorized", i32 1}
139; CHECK-NEXT: !2 = distinct !{!2, !3, !1}
140; CHECK-NEXT: !3 = !{!"llvm.loop.unroll.runtime.disable"}
141; CHECK-NEXT: !4 = distinct !{!4, !1}
142; CHECK-NEXT: !5 = distinct !{!5, !3, !1}
143
144attributes #0 = { nounwind optsize uwtable "target-cpu"="core-avx2" "target-features"="+avx,+avx2" }
145
146!6 = distinct !{!6, !7, !8}
147!7 = !{!"llvm.loop.vectorize.predicate.enable", i1 true}
148!8 = !{!"llvm.loop.vectorize.enable", i1 true}
149
150!10 = distinct !{!10, !11, !12}
151!11 = !{!"llvm.loop.vectorize.predicate.enable", i1 false}
152!12 = !{!"llvm.loop.vectorize.enable", i1 true}
153