1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -loop-vectorize -force-vector-interleave=4 -pass-remarks='loop-vectorize' -disable-output -S 2>&1 | FileCheck %s --check-prefix=CHECK-REMARKS 3; RUN: opt < %s -loop-vectorize -force-vector-interleave=4 -S | FileCheck %s 4; RUN: opt < %s -loop-vectorize -force-vector-width=1 -force-vector-interleave=4 -S | FileCheck %s --check-prefix=CHECK-VF1 5 6; These tests are to check that fold-tail procedure produces correct scalar code when 7; loop-vectorization is only unrolling but not vectorizing. 8 9; CHECK-REMARKS: remark: {{.*}} interleaved loop (interleaved count: 4) 10; CHECK-REMARKS-NEXT: remark: {{.*}} interleaved loop (interleaved count: 4) 11; CHECK-REMARKS-NOT: remark: {{.*}} vectorized loop 12 13define void @VF1-VPlanExe() { 14; CHECK-LABEL: @VF1-VPlanExe 15; CHECK-NEXT: entry: 16; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] 17; CHECK: vector.ph: 18; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 19; CHECK: vector.body: 20; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 21; CHECK-NEXT: [[INDUCTION:%.*]] = add i64 [[INDEX]], 0 22; CHECK-NEXT: [[INDUCTION1:%.*]] = add i64 [[INDEX]], 1 23; CHECK-NEXT: [[INDUCTION2:%.*]] = add i64 [[INDEX]], 2 24; CHECK-NEXT: [[INDUCTION3:%.*]] = add i64 [[INDEX]], 3 25; CHECK-NEXT: [[TMP0:%.*]] = icmp ule i64 [[INDUCTION]], 14 26; CHECK-NEXT: [[TMP1:%.*]] = icmp ule i64 [[INDUCTION1]], 14 27; CHECK-NEXT: [[TMP2:%.*]] = icmp ule i64 [[INDUCTION2]], 14 28; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i64 [[INDUCTION3]], 14 29; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4 30; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 31; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !0 32; CHECK: middle.block: 33; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] 34; CHECK: scalar.ph: 35; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] 36; CHECK-NEXT: br label [[FOR_BODY:%.*]] 37; CHECK: for.cond.cleanup: 38; CHECK-NEXT: ret void 39; CHECK: for.body: 40; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] 41; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 42; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 15 43; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop !2 44; 45entry: 46 br label %for.body 47 48for.cond.cleanup: 49 ret void 50 51for.body: 52 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] 53 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 54 %exitcond = icmp eq i64 %indvars.iv.next, 15 55 br i1 %exitcond, label %for.cond.cleanup, label %for.body 56} 57 58define void @VF1-VPWidenCanonicalIVRecipeExe(double* %ptr1) { 59; CHECK-LABEL: @VF1-VPWidenCanonicalIVRecipeExe 60; CHECK-NEXT: entry: 61; CHECK-NEXT: [[PTR2:%.*]] = getelementptr inbounds double, double* [[PTR1:%.*]], i64 15 62; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] 63; CHECK: vector.ph: 64; CHECK-NEXT: [[IND_END:%.*]] = getelementptr double, double* [[PTR1]], i64 16 65; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 66; CHECK: vector.body: 67; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 68; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 69; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr double, double* [[PTR1]], i64 [[TMP0]] 70; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 71; CHECK-NEXT: [[NEXT_GEP1:%.*]] = getelementptr double, double* [[PTR1]], i64 [[TMP1]] 72; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 73; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr double, double* [[PTR1]], i64 [[TMP2]] 74; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3 75; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr double, double* [[PTR1]], i64 [[TMP3]] 76; CHECK-NEXT: [[VEC_IV:%.*]] = add i64 [[INDEX]], 0 77; CHECK-NEXT: [[VEC_IV4:%.*]] = add i64 [[INDEX]], 1 78; CHECK-NEXT: [[VEC_IV5:%.*]] = add i64 [[INDEX]], 2 79; CHECK-NEXT: [[VEC_IV6:%.*]] = add i64 [[INDEX]], 3 80; CHECK-NEXT: [[TMP4:%.*]] = icmp ule i64 [[VEC_IV]], 14 81; CHECK-NEXT: [[TMP5:%.*]] = icmp ule i64 [[VEC_IV4]], 14 82; CHECK-NEXT: [[TMP6:%.*]] = icmp ule i64 [[VEC_IV5]], 14 83; CHECK-NEXT: [[TMP7:%.*]] = icmp ule i64 [[VEC_IV6]], 14 84; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4 85; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 86; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !3 87; CHECK: middle.block: 88; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] 89; CHECK: scalar.ph: 90; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi double* [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[PTR1]], [[ENTRY:%.*]] ] 91; CHECK-NEXT: br label [[FOR_BODY:%.*]] 92; CHECK: for.cond.cleanup: 93; CHECK-NEXT: ret void 94; CHECK: for.body: 95; CHECK-NEXT: [[ADDR:%.*]] = phi double* [ [[PTR:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] 96; CHECK-NEXT: [[PTR]] = getelementptr inbounds double, double* [[ADDR]], i64 1 97; CHECK-NEXT: [[COND:%.*]] = icmp eq double* [[PTR]], [[PTR2]] 98; CHECK-NEXT: br i1 [[COND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop !4 99; 100entry: 101 %ptr2 = getelementptr inbounds double, double* %ptr1, i64 15 102 br label %for.body 103 104for.cond.cleanup: 105 ret void 106 107for.body: 108 %addr = phi double* [ %ptr, %for.body ], [ %ptr1, %entry ] 109 %ptr = getelementptr inbounds double, double* %addr, i64 1 110 %cond = icmp eq double* %ptr, %ptr2 111 br i1 %cond, label %for.cond.cleanup, label %for.body 112} 113 114; The following testcase is extended from the test of https://reviews.llvm.org/D80085 115; Similar to two tests above, it is to check that fold-tail procedure produces correct scalar code when 116; loop-vectorization is only unrolling but not vectorizing. 117 118define void @pr45679(i32* %A) optsize { 119; CHECK-VF1-LABEL: @pr45679 120; CHECK-VF1-NEXT: entry: 121; CHECK-VF1-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] 122; CHECK-VF1: vector.ph: 123; CHECK-VF1-NEXT: br label [[VECTOR_BODY:%.*]] 124; CHECK-VF1: vector.body: 125; CHECK-VF1-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE9:%.*]] ] 126; CHECK-VF1-NEXT: [[INDUCTION:%.*]] = add i32 [[INDEX]], 0 127; CHECK-VF1-NEXT: [[INDUCTION1:%.*]] = add i32 [[INDEX]], 1 128; CHECK-VF1-NEXT: [[INDUCTION2:%.*]] = add i32 [[INDEX]], 2 129; CHECK-VF1-NEXT: [[INDUCTION3:%.*]] = add i32 [[INDEX]], 3 130; CHECK-VF1-NEXT: [[TMP0:%.*]] = icmp ule i32 [[INDUCTION]], 13 131; CHECK-VF1-NEXT: [[TMP1:%.*]] = icmp ule i32 [[INDUCTION1]], 13 132; CHECK-VF1-NEXT: [[TMP2:%.*]] = icmp ule i32 [[INDUCTION2]], 13 133; CHECK-VF1-NEXT: [[TMP3:%.*]] = icmp ule i32 [[INDUCTION3]], 13 134; CHECK-VF1-NEXT: br i1 [[TMP0]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] 135; CHECK-VF1: pred.store.if: 136; CHECK-VF1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 [[INDUCTION]] 137; CHECK-VF1-NEXT: store i32 13, i32* [[TMP4]], align 1 138; CHECK-VF1-NEXT: br label [[PRED_STORE_CONTINUE]] 139; CHECK-VF1: pred.store.continue: 140; CHECK-VF1-NEXT: br i1 [[TMP1]], label [[PRED_STORE_IF4:%.*]], label [[PRED_STORE_CONTINUE5:%.*]] 141; CHECK-VF1: pred.store.if4: 142; CHECK-VF1-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[INDUCTION1]] 143; CHECK-VF1-NEXT: store i32 13, i32* [[TMP5]], align 1 144; CHECK-VF1-NEXT: br label [[PRED_STORE_CONTINUE5]] 145; CHECK-VF1: pred.store.continue5: 146; CHECK-VF1-NEXT: br i1 [[TMP2]], label [[PRED_STORE_IF6:%.*]], label [[PRED_STORE_CONTINUE7:%.*]] 147; CHECK-VF1: pred.store.if6: 148; CHECK-VF1-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[INDUCTION2]] 149; CHECK-VF1-NEXT: store i32 13, i32* [[TMP6]], align 1 150; CHECK-VF1-NEXT: br label [[PRED_STORE_CONTINUE7]] 151; CHECK-VF1: pred.store.continue7: 152; CHECK-VF1-NEXT: br i1 [[TMP3]], label [[PRED_STORE_IF8:%.*]], label [[PRED_STORE_CONTINUE9]] 153; CHECK-VF1: pred.store.if8: 154; CHECK-VF1-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[INDUCTION3]] 155; CHECK-VF1-NEXT: store i32 13, i32* [[TMP7]], align 1 156; CHECK-VF1-NEXT: br label [[PRED_STORE_CONTINUE9]] 157; CHECK-VF1: pred.store.continue9: 158; CHECK-VF1-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 159; CHECK-VF1-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], 16 160; CHECK-VF1-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]] 161; CHECK-VF1: middle.block: 162; CHECK-VF1-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]] 163; CHECK-VF1: scalar.ph: 164; CHECK-VF1-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 16, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] 165; CHECK-VF1-NEXT: br label [[LOOP:%.*]] 166; CHECK-VF1: loop: 167; CHECK-VF1-NEXT: [[RIV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ] 168; CHECK-VF1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 [[RIV]] 169; CHECK-VF1-NEXT: store i32 13, i32* [[ARRAYIDX]], align 1 170; CHECK-VF1-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1 171; CHECK-VF1-NEXT: [[COND:%.*]] = icmp eq i32 [[RIVPLUS1]], 14 172; CHECK-VF1-NEXT: br i1 [[COND]], label [[EXIT]], label [[LOOP]] 173; CHECK-VF1: exit: 174; CHECK-VF1-NEXT: ret void 175; 176entry: 177 br label %loop 178 179loop: 180 %riv = phi i32 [ 0, %entry ], [ %rivPlus1, %loop ] 181 %arrayidx = getelementptr inbounds i32, i32* %A, i32 %riv 182 store i32 13, i32* %arrayidx, align 1 183 %rivPlus1 = add nuw nsw i32 %riv, 1 184 %cond = icmp eq i32 %rivPlus1, 14 185 br i1 %cond, label %exit, label %loop 186 187exit: 188 ret void 189} 190