1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; This is the loop in c++ being vectorize in this file with 3;experimental.vector.reverse 4; #pragma clang loop vectorize_width(8, scalable) 5; for (int i = N-1; i >= 0; --i) 6; a[i] = b[i] + 1.0; 7 8; RUN: opt -loop-vectorize -dce -instcombine -mtriple aarch64-linux-gnu -S \ 9; RUN: -prefer-predicate-over-epilogue=scalar-epilogue < %s | FileCheck %s 10 11define void @vector_reverse_f64(i64 %N, double* %a, double* %b) #0{ 12; CHECK-LABEL: @vector_reverse_f64( 13; CHECK-NEXT: entry: 14; CHECK-NEXT: [[A2:%.*]] = ptrtoint double* [[A:%.*]] to i64 15; CHECK-NEXT: [[B1:%.*]] = ptrtoint double* [[B:%.*]] to i64 16; CHECK-NEXT: [[CMP7:%.*]] = icmp sgt i64 [[N:%.*]], 0 17; CHECK-NEXT: br i1 [[CMP7]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]] 18; CHECK: for.body.preheader: 19; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() 20; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 3 21; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N]] 22; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] 23; CHECK: vector.memcheck: 24; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() 25; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 6 26; CHECK-NEXT: [[TMP4:%.*]] = shl i64 [[N]], 3 27; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], [[B1]] 28; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[A2]] 29; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP5]], [[TMP6]] 30; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP7]], [[TMP3]] 31; CHECK-NEXT: br i1 [[DIFF_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] 32; CHECK: vector.ph: 33; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() 34; CHECK-NEXT: [[TMP9:%.*]] = shl i64 [[TMP8]], 3 35; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP9]] 36; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] 37; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 38; CHECK: vector.body: 39; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 40; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[INDEX]], -1 41; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], [[N]] 42; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds double, double* [[B]], i64 [[TMP5]] 43; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vscale.i32() 44; CHECK-NEXT: [[DOTNEG:%.*]] = mul i32 [[TMP7]], -8 45; CHECK-NEXT: [[TMP8:%.*]] = or i32 [[DOTNEG]], 1 46; CHECK-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64 47; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP9]] 48; CHECK-NEXT: [[TMP11:%.*]] = bitcast double* [[TMP10]] to <vscale x 8 x double>* 49; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x double>, <vscale x 8 x double>* [[TMP11]], align 8 50; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[TMP5]] 51; CHECK-NEXT: [[TMP13:%.*]] = fadd <vscale x 8 x double> [[WIDE_LOAD]], shufflevector (<vscale x 8 x double> insertelement (<vscale x 8 x double> poison, double 1.000000e+00, i32 0), <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer) 52; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32() 53; CHECK-NEXT: [[DOTNEG7:%.*]] = mul i32 [[TMP14]], -8 54; CHECK-NEXT: [[TMP15:%.*]] = or i32 [[DOTNEG7]], 1 55; CHECK-NEXT: [[TMP16:%.*]] = sext i32 [[TMP15]] to i64 56; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds double, double* [[TMP12]], i64 [[TMP16]] 57; CHECK-NEXT: [[TMP18:%.*]] = bitcast double* [[TMP17]] to <vscale x 8 x double>* 58; CHECK-NEXT: store <vscale x 8 x double> [[TMP13]], <vscale x 8 x double>* [[TMP18]], align 8 59; CHECK-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64() 60; CHECK-NEXT: [[TMP20:%.*]] = shl i64 [[TMP19]], 3 61; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP20]] 62; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] 63; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] 64; CHECK: middle.block: 65; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 66; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]] 67; CHECK: scalar.ph: 68; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_MOD_VF]], [[MIDDLE_BLOCK]] ], [ [[N]], [[FOR_BODY_PREHEADER]] ], [ [[N]], [[VECTOR_MEMCHECK]] ] 69; CHECK-NEXT: br label [[FOR_BODY:%.*]] 70; CHECK: for.cond.cleanup.loopexit: 71; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] 72; CHECK: for.cond.cleanup: 73; CHECK-NEXT: ret void 74; CHECK: for.body: 75; CHECK-NEXT: [[I_08_IN:%.*]] = phi i64 [ [[I_08:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] 76; CHECK-NEXT: [[I_08]] = add nsw i64 [[I_08_IN]], -1 77; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[B]], i64 [[I_08]] 78; CHECK-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8 79; CHECK-NEXT: [[ADD:%.*]] = fadd double [[TMP22]], 1.000000e+00 80; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[I_08]] 81; CHECK-NEXT: store double [[ADD]], double* [[ARRAYIDX1]], align 8 82; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[I_08_IN]], 1 83; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP_LOOPEXIT]], !llvm.loop [[LOOP8:![0-9]+]] 84; 85entry: 86 %cmp7 = icmp sgt i64 %N, 0 87 br i1 %cmp7, label %for.body, label %for.cond.cleanup 88 89for.cond.cleanup: ; preds = %for.body 90 ret void 91 92for.body: ; preds = %entry, %for.body 93 %i.08.in = phi i64 [ %i.08, %for.body ], [ %N, %entry ] 94 %i.08 = add nsw i64 %i.08.in, -1 95 %arrayidx = getelementptr inbounds double, double* %b, i64 %i.08 96 %0 = load double, double* %arrayidx, align 8 97 %add = fadd double %0, 1.000000e+00 98 %arrayidx1 = getelementptr inbounds double, double* %a, i64 %i.08 99 store double %add, double* %arrayidx1, align 8 100 %cmp = icmp sgt i64 %i.08.in, 1 101 br i1 %cmp, label %for.body, label %for.cond.cleanup, !llvm.loop !0 102} 103 104 105define void @vector_reverse_i64(i64 %N, i64* %a, i64* %b) #0 { 106; CHECK-LABEL: @vector_reverse_i64( 107; CHECK-NEXT: entry: 108; CHECK-NEXT: [[A2:%.*]] = ptrtoint i64* [[A:%.*]] to i64 109; CHECK-NEXT: [[B1:%.*]] = ptrtoint i64* [[B:%.*]] to i64 110; CHECK-NEXT: [[CMP8:%.*]] = icmp sgt i64 [[N:%.*]], 0 111; CHECK-NEXT: br i1 [[CMP8]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]] 112; CHECK: for.body.preheader: 113; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() 114; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 3 115; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N]] 116; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] 117; CHECK: vector.memcheck: 118; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() 119; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 6 120; CHECK-NEXT: [[TMP4:%.*]] = shl i64 [[N]], 3 121; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], [[B1]] 122; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[A2]] 123; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP5]], [[TMP6]] 124; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP7]], [[TMP3]] 125; CHECK-NEXT: br i1 [[DIFF_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] 126; CHECK: vector.ph: 127; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() 128; CHECK-NEXT: [[TMP9:%.*]] = shl i64 [[TMP8]], 3 129; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP9]] 130; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] 131; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 132; CHECK: vector.body: 133; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 134; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[INDEX]], -1 135; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], [[N]] 136; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, i64* [[B]], i64 [[TMP5]] 137; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vscale.i32() 138; CHECK-NEXT: [[DOTNEG:%.*]] = mul i32 [[TMP7]], -8 139; CHECK-NEXT: [[TMP8:%.*]] = or i32 [[DOTNEG]], 1 140; CHECK-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64 141; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, i64* [[TMP6]], i64 [[TMP9]] 142; CHECK-NEXT: [[TMP11:%.*]] = bitcast i64* [[TMP10]] to <vscale x 8 x i64>* 143; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i64>, <vscale x 8 x i64>* [[TMP11]], align 8 144; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[TMP5]] 145; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 8 x i64> [[WIDE_LOAD]], shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 1, i32 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer) 146; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32() 147; CHECK-NEXT: [[DOTNEG7:%.*]] = mul i32 [[TMP14]], -8 148; CHECK-NEXT: [[TMP15:%.*]] = or i32 [[DOTNEG7]], 1 149; CHECK-NEXT: [[TMP16:%.*]] = sext i32 [[TMP15]] to i64 150; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i64, i64* [[TMP12]], i64 [[TMP16]] 151; CHECK-NEXT: [[TMP18:%.*]] = bitcast i64* [[TMP17]] to <vscale x 8 x i64>* 152; CHECK-NEXT: store <vscale x 8 x i64> [[TMP13]], <vscale x 8 x i64>* [[TMP18]], align 8 153; CHECK-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64() 154; CHECK-NEXT: [[TMP20:%.*]] = shl i64 [[TMP19]], 3 155; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP20]] 156; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] 157; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] 158; CHECK: middle.block: 159; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 160; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]] 161; CHECK: scalar.ph: 162; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_MOD_VF]], [[MIDDLE_BLOCK]] ], [ [[N]], [[FOR_BODY_PREHEADER]] ], [ [[N]], [[VECTOR_MEMCHECK]] ] 163; CHECK-NEXT: br label [[FOR_BODY:%.*]] 164; CHECK: for.cond.cleanup.loopexit: 165; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] 166; CHECK: for.cond.cleanup: 167; CHECK-NEXT: ret void 168; CHECK: for.body: 169; CHECK-NEXT: [[I_09_IN:%.*]] = phi i64 [ [[I_09:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] 170; CHECK-NEXT: [[I_09]] = add nsw i64 [[I_09_IN]], -1 171; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[B]], i64 [[I_09]] 172; CHECK-NEXT: [[TMP22:%.*]] = load i64, i64* [[ARRAYIDX]], align 8 173; CHECK-NEXT: [[ADD:%.*]] = add i64 [[TMP22]], 1 174; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[I_09]] 175; CHECK-NEXT: store i64 [[ADD]], i64* [[ARRAYIDX2]], align 8 176; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[I_09_IN]], 1 177; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP_LOOPEXIT]], !llvm.loop [[LOOP15:![0-9]+]] 178; 179entry: 180 %cmp8 = icmp sgt i64 %N, 0 181 br i1 %cmp8, label %for.body, label %for.cond.cleanup 182 183for.cond.cleanup: ; preds = %for.body 184 ret void 185 186for.body: ; preds = %entry, %for.body 187 %i.09.in = phi i64 [ %i.09, %for.body ], [ %N, %entry ] 188 %i.09 = add nsw i64 %i.09.in, -1 189 %arrayidx = getelementptr inbounds i64, i64* %b, i64 %i.09 190 %0 = load i64, i64* %arrayidx, align 8 191 %add = add i64 %0, 1 192 %arrayidx2 = getelementptr inbounds i64, i64* %a, i64 %i.09 193 store i64 %add, i64* %arrayidx2, align 8 194 %cmp = icmp sgt i64 %i.09.in, 1 195 br i1 %cmp, label %for.body, label %for.cond.cleanup, !llvm.loop !0 196} 197 198attributes #0 = { "target-cpu"="generic" "target-features"="+neon,+sve" } 199 200!0 = distinct !{!0, !1, !2, !3, !4} 201!1 = !{!"llvm.loop.mustprogress"} 202!2 = !{!"llvm.loop.vectorize.width", i32 8} 203!3 = !{!"llvm.loop.vectorize.scalable.enable", i1 true} 204!4 = !{!"llvm.loop.vectorize.enable", i1 true} 205 206