; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -loop-vectorize -force-vector-interleave=4 -pass-remarks='loop-vectorize' -disable-output -S 2>&1 | FileCheck %s --check-prefix=CHECK-REMARKS ; RUN: opt < %s -loop-vectorize -force-vector-interleave=4 -S | FileCheck %s ; RUN: opt < %s -loop-vectorize -force-vector-width=1 -force-vector-interleave=4 -S | FileCheck %s --check-prefix=CHECK-VF1 ; These tests are to check that fold-tail procedure produces correct scalar code when ; loop-vectorization is only unrolling but not vectorizing. ; CHECK-REMARKS: remark: {{.*}} interleaved loop (interleaved count: 4) ; CHECK-REMARKS-NEXT: remark: {{.*}} interleaved loop (interleaved count: 4) ; CHECK-REMARKS-NOT: remark: {{.*}} vectorized loop define void @VF1-VPlanExe() { ; CHECK-LABEL: @VF1-VPlanExe ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[INDUCTION:%.*]] = add i64 [[INDEX]], 0 ; CHECK-NEXT: [[INDUCTION1:%.*]] = add i64 [[INDEX]], 1 ; CHECK-NEXT: [[INDUCTION2:%.*]] = add i64 [[INDEX]], 2 ; CHECK-NEXT: [[INDUCTION3:%.*]] = add i64 [[INDEX]], 3 ; CHECK-NEXT: [[TMP0:%.*]] = icmp ule i64 [[INDUCTION]], 14 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ule i64 [[INDUCTION1]], 14 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ule i64 [[INDUCTION2]], 14 ; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i64 [[INDUCTION3]], 14 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !0 ; CHECK: middle.block: ; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void ; CHECK: for.body: ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 15 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop !2 ; entry: br label %for.body for.cond.cleanup: ret void for.body: %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 %exitcond = icmp eq i64 %indvars.iv.next, 15 br i1 %exitcond, label %for.cond.cleanup, label %for.body } define void @VF1-VPWidenCanonicalIVRecipeExe(double* %ptr1) { ; CHECK-LABEL: @VF1-VPWidenCanonicalIVRecipeExe ; CHECK-NEXT: entry: ; CHECK-NEXT: [[PTR2:%.*]] = getelementptr inbounds double, double* [[PTR1:%.*]], i64 15 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[IND_END:%.*]] = getelementptr double, double* [[PTR1]], i64 16 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr double, double* [[PTR1]], i64 [[TMP0]] ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 ; CHECK-NEXT: [[NEXT_GEP1:%.*]] = getelementptr double, double* [[PTR1]], i64 [[TMP1]] ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 ; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr double, double* [[PTR1]], i64 [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3 ; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr double, double* [[PTR1]], i64 [[TMP3]] ; CHECK-NEXT: [[VEC_IV:%.*]] = add i64 [[INDEX]], 0 ; CHECK-NEXT: [[VEC_IV4:%.*]] = add i64 [[INDEX]], 1 ; CHECK-NEXT: [[VEC_IV5:%.*]] = add i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IV6:%.*]] = add i64 [[INDEX]], 3 ; CHECK-NEXT: [[TMP4:%.*]] = icmp ule i64 [[VEC_IV]], 14 ; CHECK-NEXT: [[TMP5:%.*]] = icmp ule i64 [[VEC_IV4]], 14 ; CHECK-NEXT: [[TMP6:%.*]] = icmp ule i64 [[VEC_IV5]], 14 ; CHECK-NEXT: [[TMP7:%.*]] = icmp ule i64 [[VEC_IV6]], 14 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !3 ; CHECK: middle.block: ; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi double* [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[PTR1]], [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void ; CHECK: for.body: ; CHECK-NEXT: [[ADDR:%.*]] = phi double* [ [[PTR:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] ; CHECK-NEXT: [[PTR]] = getelementptr inbounds double, double* [[ADDR]], i64 1 ; CHECK-NEXT: [[COND:%.*]] = icmp eq double* [[PTR]], [[PTR2]] ; CHECK-NEXT: br i1 [[COND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop !4 ; entry: %ptr2 = getelementptr inbounds double, double* %ptr1, i64 15 br label %for.body for.cond.cleanup: ret void for.body: %addr = phi double* [ %ptr, %for.body ], [ %ptr1, %entry ] %ptr = getelementptr inbounds double, double* %addr, i64 1 %cond = icmp eq double* %ptr, %ptr2 br i1 %cond, label %for.cond.cleanup, label %for.body } ; The following testcase is extended from the test of https://reviews.llvm.org/D80085 ; Similar to two tests above, it is to check that fold-tail procedure produces correct scalar code when ; loop-vectorization is only unrolling but not vectorizing. define void @pr45679(i32* %A) optsize { ; CHECK-VF1-LABEL: @pr45679 ; CHECK-VF1-NEXT: entry: ; CHECK-VF1-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-VF1: vector.ph: ; CHECK-VF1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-VF1: vector.body: ; CHECK-VF1-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE9:%.*]] ] ; CHECK-VF1-NEXT: [[INDUCTION:%.*]] = add i32 [[INDEX]], 0 ; CHECK-VF1-NEXT: [[INDUCTION1:%.*]] = add i32 [[INDEX]], 1 ; CHECK-VF1-NEXT: [[INDUCTION2:%.*]] = add i32 [[INDEX]], 2 ; CHECK-VF1-NEXT: [[INDUCTION3:%.*]] = add i32 [[INDEX]], 3 ; CHECK-VF1-NEXT: [[TMP0:%.*]] = icmp ule i32 [[INDUCTION]], 13 ; CHECK-VF1-NEXT: [[TMP1:%.*]] = icmp ule i32 [[INDUCTION1]], 13 ; CHECK-VF1-NEXT: [[TMP2:%.*]] = icmp ule i32 [[INDUCTION2]], 13 ; CHECK-VF1-NEXT: [[TMP3:%.*]] = icmp ule i32 [[INDUCTION3]], 13 ; CHECK-VF1-NEXT: br i1 [[TMP0]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] ; CHECK-VF1: pred.store.if: ; CHECK-VF1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 [[INDUCTION]] ; CHECK-VF1-NEXT: store i32 13, i32* [[TMP4]], align 1 ; CHECK-VF1-NEXT: br label [[PRED_STORE_CONTINUE]] ; CHECK-VF1: pred.store.continue: ; CHECK-VF1-NEXT: br i1 [[TMP1]], label [[PRED_STORE_IF4:%.*]], label [[PRED_STORE_CONTINUE5:%.*]] ; CHECK-VF1: pred.store.if4: ; CHECK-VF1-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[INDUCTION1]] ; CHECK-VF1-NEXT: store i32 13, i32* [[TMP5]], align 1 ; CHECK-VF1-NEXT: br label [[PRED_STORE_CONTINUE5]] ; CHECK-VF1: pred.store.continue5: ; CHECK-VF1-NEXT: br i1 [[TMP2]], label [[PRED_STORE_IF6:%.*]], label [[PRED_STORE_CONTINUE7:%.*]] ; CHECK-VF1: pred.store.if6: ; CHECK-VF1-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[INDUCTION2]] ; CHECK-VF1-NEXT: store i32 13, i32* [[TMP6]], align 1 ; CHECK-VF1-NEXT: br label [[PRED_STORE_CONTINUE7]] ; CHECK-VF1: pred.store.continue7: ; CHECK-VF1-NEXT: br i1 [[TMP3]], label [[PRED_STORE_IF8:%.*]], label [[PRED_STORE_CONTINUE9]] ; CHECK-VF1: pred.store.if8: ; CHECK-VF1-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[INDUCTION3]] ; CHECK-VF1-NEXT: store i32 13, i32* [[TMP7]], align 1 ; CHECK-VF1-NEXT: br label [[PRED_STORE_CONTINUE9]] ; CHECK-VF1: pred.store.continue9: ; CHECK-VF1-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 ; CHECK-VF1-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], 16 ; CHECK-VF1-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]] ; CHECK-VF1: middle.block: ; CHECK-VF1-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-VF1: scalar.ph: ; CHECK-VF1-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 16, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; CHECK-VF1-NEXT: br label [[LOOP:%.*]] ; CHECK-VF1: loop: ; CHECK-VF1-NEXT: [[RIV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ] ; CHECK-VF1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[RIV]] ; CHECK-VF1-NEXT: store i32 13, i32* [[ARRAYIDX]], align 1 ; CHECK-VF1-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1 ; CHECK-VF1-NEXT: [[COND:%.*]] = icmp eq i32 [[RIVPLUS1]], 14 ; CHECK-VF1-NEXT: br i1 [[COND]], label [[EXIT]], label [[LOOP]] ; CHECK-VF1: exit: ; CHECK-VF1-NEXT: ret void ; entry: br label %loop loop: %riv = phi i32 [ 0, %entry ], [ %rivPlus1, %loop ] %arrayidx = getelementptr inbounds i32, i32* %A, i32 %riv store i32 13, i32* %arrayidx, align 1 %rivPlus1 = add nuw nsw i32 %riv, 1 %cond = icmp eq i32 %rivPlus1, 14 br i1 %cond, label %exit, label %loop exit: ret void }