1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s  -loop-vectorize -force-vector-interleave=4 -pass-remarks='loop-vectorize' -disable-output -S 2>&1 | FileCheck %s --check-prefix=CHECK-REMARKS
3; RUN: opt < %s  -loop-vectorize -force-vector-interleave=4 -S | FileCheck %s
4
5; These tests are to check that fold-tail procedure produces correct scalar code when
6; loop-vectorization is only unrolling but not vectorizing.
7
8; CHECK-REMARKS:      remark: {{.*}} interleaved loop (interleaved count: 4)
9; CHECK-REMARKS-NEXT: remark: {{.*}} interleaved loop (interleaved count: 4)
10; CHECK-REMARKS-NOT:  remark: {{.*}} vectorized loop
11
12define void @VF1-VPlanExe() {
13; CHECK-LABEL: @VF1-VPlanExe
14; CHECK-NEXT:  entry:
15; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
16; CHECK:       vector.ph:
17; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
18; CHECK:       vector.body:
19; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
20; CHECK-NEXT:    [[INDUCTION:%.*]] = add i64 [[INDEX]], 0
21; CHECK-NEXT:    [[INDUCTION1:%.*]] = add i64 [[INDEX]], 1
22; CHECK-NEXT:    [[INDUCTION2:%.*]] = add i64 [[INDEX]], 2
23; CHECK-NEXT:    [[INDUCTION3:%.*]] = add i64 [[INDEX]], 3
24; CHECK-NEXT:    [[TMP0:%.*]] = icmp ule i64 [[INDUCTION]], 14
25; CHECK-NEXT:    [[TMP1:%.*]] = icmp ule i64 [[INDUCTION1]], 14
26; CHECK-NEXT:    [[TMP2:%.*]] = icmp ule i64 [[INDUCTION2]], 14
27; CHECK-NEXT:    [[TMP3:%.*]] = icmp ule i64 [[INDUCTION3]], 14
28; CHECK-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 4
29; CHECK-NEXT:    [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
30; CHECK-NEXT:    br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !0
31; CHECK:       middle.block:
32; CHECK-NEXT:    br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
33; CHECK:       scalar.ph:
34; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
35; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
36; CHECK:       for.cond.cleanup:
37; CHECK-NEXT:    ret void
38; CHECK:       for.body:
39; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
40; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
41; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 15
42; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop !2
43;
44entry:
45  br label %for.body
46
47for.cond.cleanup:
48  ret void
49
50for.body:
51  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
52  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
53  %exitcond = icmp eq i64 %indvars.iv.next, 15
54  br i1 %exitcond, label %for.cond.cleanup, label %for.body
55}
56
57define void @VF1-VPWidenCanonicalIVRecipeExe(double* %ptr1) {
58; CHECK-LABEL: @VF1-VPWidenCanonicalIVRecipeExe
59; CHECK-NEXT:  entry:
60; CHECK-NEXT:    [[PTR2:%.*]] = getelementptr inbounds double, double* [[PTR1:%.*]], i64 15
61; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
62; CHECK:       vector.ph:
63; CHECK-NEXT:    [[IND_END:%.*]] = getelementptr double, double* [[PTR1]], i64 16
64; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
65; CHECK:       vector.body:
66; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
67; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 0
68; CHECK-NEXT:    [[NEXT_GEP:%.*]] = getelementptr double, double* [[PTR1]], i64 [[TMP0]]
69; CHECK-NEXT:    [[TMP1:%.*]] = add i64 [[INDEX]], 1
70; CHECK-NEXT:    [[NEXT_GEP1:%.*]] = getelementptr double, double* [[PTR1]], i64 [[TMP1]]
71; CHECK-NEXT:    [[TMP2:%.*]] = add i64 [[INDEX]], 2
72; CHECK-NEXT:    [[NEXT_GEP2:%.*]] = getelementptr double, double* [[PTR1]], i64 [[TMP2]]
73; CHECK-NEXT:    [[TMP3:%.*]] = add i64 [[INDEX]], 3
74; CHECK-NEXT:    [[NEXT_GEP3:%.*]] = getelementptr double, double* [[PTR1]], i64 [[TMP3]]
75; CHECK-NEXT:    [[VEC_IV:%.*]] = add i64 [[INDEX]], 0
76; CHECK-NEXT:    [[VEC_IV4:%.*]] = add i64 [[INDEX]], 1
77; CHECK-NEXT:    [[VEC_IV5:%.*]] = add i64 [[INDEX]], 2
78; CHECK-NEXT:    [[VEC_IV6:%.*]] = add i64 [[INDEX]], 3
79; CHECK-NEXT:    [[TMP4:%.*]] = icmp ule i64 [[VEC_IV]], 14
80; CHECK-NEXT:    [[TMP5:%.*]] = icmp ule i64 [[VEC_IV4]], 14
81; CHECK-NEXT:    [[TMP6:%.*]] = icmp ule i64 [[VEC_IV5]], 14
82; CHECK-NEXT:    [[TMP7:%.*]] = icmp ule i64 [[VEC_IV6]], 14
83; CHECK-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 4
84; CHECK-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
85; CHECK-NEXT:    br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !3
86; CHECK:       middle.block:
87; CHECK-NEXT:    br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
88; CHECK:       scalar.ph:
89; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi double* [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[PTR1]], [[ENTRY:%.*]] ]
90; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
91; CHECK:       for.cond.cleanup:
92; CHECK-NEXT:    ret void
93; CHECK:       for.body:
94; CHECK-NEXT:    [[ADDR:%.*]] = phi double* [ [[PTR:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
95; CHECK-NEXT:    [[PTR]] = getelementptr inbounds double, double* [[ADDR]], i64 1
96; CHECK-NEXT:    [[COND:%.*]] = icmp eq double* [[PTR]], [[PTR2]]
97; CHECK-NEXT:    br i1 [[COND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop !4
98;
99entry:
100  %ptr2 = getelementptr inbounds double, double* %ptr1, i64 15
101  br label %for.body
102
103for.cond.cleanup:
104  ret void
105
106for.body:
107  %addr = phi double* [ %ptr, %for.body ], [ %ptr1, %entry ]
108  %ptr = getelementptr inbounds double, double* %addr, i64 1
109  %cond = icmp eq double* %ptr, %ptr2
110  br i1 %cond, label %for.cond.cleanup, label %for.body
111}
112