1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s  -loop-vectorize -force-vector-interleave=4 -pass-remarks='loop-vectorize' -disable-output -S 2>&1 | FileCheck %s --check-prefix=CHECK-REMARKS
3; RUN: opt < %s  -loop-vectorize -force-vector-interleave=4 -S | FileCheck %s
4
5; These tests are to check that fold-tail procedure produces correct scalar code when
6; loop-vectorization is only unrolling but not vectorizing.
7
8; CHECK-REMARKS:      remark: {{.*}} interleaved loop (interleaved count: 4)
9; CHECK-REMARKS-NEXT: remark: {{.*}} interleaved loop (interleaved count: 4)
10; CHECK-REMARKS-NOT:  remark: {{.*}} vectorized loop
11
12define void @VF1-VPlanExe(i32* %dst) {
13; CHECK-LABEL: @VF1-VPlanExe(
14; CHECK-NEXT:  entry:
15; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
16; CHECK:       vector.ph:
17; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
18; CHECK:       vector.body:
19; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE9:%.*]] ]
20; CHECK-NEXT:    [[INDUCTION:%.*]] = add i64 [[INDEX]], 0
21; CHECK-NEXT:    [[INDUCTION1:%.*]] = add i64 [[INDEX]], 1
22; CHECK-NEXT:    [[INDUCTION2:%.*]] = add i64 [[INDEX]], 2
23; CHECK-NEXT:    [[INDUCTION3:%.*]] = add i64 [[INDEX]], 3
24; CHECK-NEXT:    [[TMP0:%.*]] = icmp ule i64 [[INDUCTION]], 14
25; CHECK-NEXT:    [[TMP1:%.*]] = icmp ule i64 [[INDUCTION1]], 14
26; CHECK-NEXT:    [[TMP2:%.*]] = icmp ule i64 [[INDUCTION2]], 14
27; CHECK-NEXT:    [[TMP3:%.*]] = icmp ule i64 [[INDUCTION3]], 14
28; CHECK-NEXT:    br i1 [[TMP0]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
29; CHECK:       pred.store.if:
30; CHECK-NEXT:    [[SUNK_IND0:%.*]] = add i64 [[INDEX]], 0
31; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i64 [[SUNK_IND0]]
32; CHECK-NEXT:    store i32 0, i32* [[TMP4]], align 4
33; CHECK-NEXT:    br label [[PRED_STORE_CONTINUE]]
34; CHECK:       pred.store.continue:
35; CHECK-NEXT:    br i1 [[TMP1]], label [[PRED_STORE_IF4:%.*]], label [[PRED_STORE_CONTINUE5:%.*]]
36; CHECK:       pred.store.if7:
37; CHECK-NEXT:    [[SUNK_IND1:%.*]] = add i64 [[INDEX]], 1
38; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 [[SUNK_IND1]]
39; CHECK-NEXT:    store i32 0, i32* [[TMP5]], align 4
40; CHECK-NEXT:    br label [[PRED_STORE_CONTINUE5]]
41; CHECK:       pred.store.continue8:
42; CHECK-NEXT:    br i1 [[TMP2]], label [[PRED_STORE_IF6:%.*]], label [[PRED_STORE_CONTINUE7:%.*]]
43; CHECK:       pred.store.if9:
44; CHECK-NEXT:    [[SUNK_IND2:%.*]] = add i64 [[INDEX]], 2
45; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 [[SUNK_IND2]]
46; CHECK-NEXT:    store i32 0, i32* [[TMP6]], align 4
47; CHECK-NEXT:    br label [[PRED_STORE_CONTINUE7]]
48; CHECK:       pred.store.continue10:
49; CHECK-NEXT:    br i1 [[TMP3]], label [[PRED_STORE_IF8:%.*]], label [[PRED_STORE_CONTINUE9]]
50; CHECK:       pred.store.if11:
51; CHECK-NEXT:    [[SUNK_IND3:%.*]] = add i64 [[INDEX]], 3
52; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 [[SUNK_IND3]]
53; CHECK-NEXT:    store i32 0, i32* [[TMP7]], align 4
54; CHECK-NEXT:    br label [[PRED_STORE_CONTINUE9]]
55; CHECK:       pred.store.continue12:
56; CHECK-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 4
57; CHECK-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
58; CHECK-NEXT:    br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
59; CHECK:       middle.block:
60; CHECK-NEXT:    br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
61; CHECK:       scalar.ph:
62; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
63; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
64; CHECK:       for.cond.cleanup:
65; CHECK-NEXT:    ret void
66; CHECK:       for.body:
67; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
68; CHECK-NEXT:    [[DST_PTR:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 [[INDVARS_IV]]
69; CHECK-NEXT:    store i32 0, i32* [[DST_PTR]], align 4
70; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
71; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 15
72; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
73;
74entry:
75  br label %for.body
76
77for.cond.cleanup:
78  ret void
79
80for.body:
81  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
82  %dst.ptr = getelementptr inbounds i32, i32* %dst, i64 %indvars.iv
83  store i32 0, i32* %dst.ptr
84  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
85  %exitcond = icmp eq i64 %indvars.iv.next, 15
86  br i1 %exitcond, label %for.cond.cleanup, label %for.body
87}
88
89define void @VF1-VPWidenCanonicalIVRecipeExe(double* %ptr1) {
90; CHECK-LABEL: @VF1-VPWidenCanonicalIVRecipeExe(
91; CHECK-NEXT:  entry:
92; CHECK-NEXT:    [[PTR2:%.*]] = getelementptr inbounds double, double* [[PTR1:%.*]], i64 15
93; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
94; CHECK:       vector.ph:
95; CHECK-NEXT:    [[IND_END:%.*]] = getelementptr double, double* [[PTR1]], i64 16
96; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
97; CHECK:       vector.body:
98; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE12:%.*]] ]
99; CHECK-NEXT:    [[VEC_IV:%.*]] = add i64 [[INDEX]], 0
100; CHECK-NEXT:    [[VEC_IV4:%.*]] = add i64 [[INDEX]], 1
101; CHECK-NEXT:    [[VEC_IV5:%.*]] = add i64 [[INDEX]], 2
102; CHECK-NEXT:    [[VEC_IV6:%.*]] = add i64 [[INDEX]], 3
103; CHECK-NEXT:    [[TMP0:%.*]] = icmp ule i64 [[VEC_IV]], 14
104; CHECK-NEXT:    [[TMP1:%.*]] = icmp ule i64 [[VEC_IV4]], 14
105; CHECK-NEXT:    [[TMP2:%.*]] = icmp ule i64 [[VEC_IV5]], 14
106; CHECK-NEXT:    [[TMP3:%.*]] = icmp ule i64 [[VEC_IV6]], 14
107; CHECK-NEXT:    br i1 [[TMP0]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
108; CHECK:       pred.store.if:
109; CHECK-NEXT:    [[TMP4:%.*]] = add i64 [[INDEX]], 0
110; CHECK-NEXT:    [[NEXT_GEP:%.*]] = getelementptr double, double* [[PTR1]], i64 [[TMP4]]
111; CHECK-NEXT:    store double 0.000000e+00, double* [[NEXT_GEP]], align 8
112; CHECK-NEXT:    br label [[PRED_STORE_CONTINUE]]
113; CHECK:       pred.store.continue:
114; CHECK-NEXT:    br i1 [[TMP1]], label [[PRED_STORE_IF7:%.*]], label [[PRED_STORE_CONTINUE8:%.*]]
115; CHECK:       pred.store.if7:
116; CHECK-NEXT:    [[TMP5:%.*]] = add i64 [[INDEX]], 1
117; CHECK-NEXT:    [[NEXT_GEP1:%.*]] = getelementptr double, double* [[PTR1]], i64 [[TMP5]]
118; CHECK-NEXT:    store double 0.000000e+00, double* [[NEXT_GEP1]], align 8
119; CHECK-NEXT:    br label [[PRED_STORE_CONTINUE8]]
120; CHECK:       pred.store.continue8:
121; CHECK-NEXT:    br i1 [[TMP2]], label [[PRED_STORE_IF9:%.*]], label [[PRED_STORE_CONTINUE10:%.*]]
122; CHECK:       pred.store.if9:
123; CHECK-NEXT:    [[TMP6:%.*]] = add i64 [[INDEX]], 2
124; CHECK-NEXT:    [[NEXT_GEP2:%.*]] = getelementptr double, double* [[PTR1]], i64 [[TMP6]]
125; CHECK-NEXT:    store double 0.000000e+00, double* [[NEXT_GEP2]], align 8
126; CHECK-NEXT:    br label [[PRED_STORE_CONTINUE10]]
127; CHECK:       pred.store.continue10:
128; CHECK-NEXT:    br i1 [[TMP3]], label [[PRED_STORE_IF11:%.*]], label [[PRED_STORE_CONTINUE12]]
129; CHECK:       pred.store.if11:
130; CHECK-NEXT:    [[TMP7:%.*]] = add i64 [[INDEX]], 3
131; CHECK-NEXT:    [[NEXT_GEP3:%.*]] = getelementptr double, double* [[PTR1]], i64 [[TMP7]]
132; CHECK-NEXT:    store double 0.000000e+00, double* [[NEXT_GEP3]], align 8
133; CHECK-NEXT:    br label [[PRED_STORE_CONTINUE12]]
134; CHECK:       pred.store.continue12:
135; CHECK-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 4
136; CHECK-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
137; CHECK-NEXT:    br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
138; CHECK:       middle.block:
139; CHECK-NEXT:    br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
140; CHECK:       scalar.ph:
141; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi double* [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[PTR1]], [[ENTRY:%.*]] ]
142; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
143; CHECK:       for.cond.cleanup:
144; CHECK-NEXT:    ret void
145; CHECK:       for.body:
146; CHECK-NEXT:    [[ADDR:%.*]] = phi double* [ [[PTR:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
147; CHECK-NEXT:    store double 0.000000e+00, double* [[ADDR]], align 8
148; CHECK-NEXT:    [[PTR]] = getelementptr inbounds double, double* [[ADDR]], i64 1
149; CHECK-NEXT:    [[COND:%.*]] = icmp eq double* [[PTR]], [[PTR2]]
150; CHECK-NEXT:    br i1 [[COND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
151;
152entry:
153  %ptr2 = getelementptr inbounds double, double* %ptr1, i64 15
154  br label %for.body
155
156for.cond.cleanup:
157  ret void
158
159for.body:
160  %addr = phi double* [ %ptr, %for.body ], [ %ptr1, %entry ]
161  store double 0.0, double* %addr
162  %ptr = getelementptr inbounds double, double* %addr, i64 1
163  %cond = icmp eq double* %ptr, %ptr2
164  br i1 %cond, label %for.cond.cleanup, label %for.body
165}
166