1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; This is the loop in c++ being vectorize in this file with 3;experimental.vector.reverse 4; #pragma clang loop vectorize_width(8, scalable) 5; for (int i = N-1; i >= 0; --i) 6; a[i] = b[i] + 1.0; 7 8; RUN: opt -loop-vectorize -dce -instcombine -mtriple aarch64-linux-gnu -S < %s | FileCheck %s 9 10define void @vector_reverse_f64(i64 %N, double* %a, double* %b) #0{ 11; CHECK-LABEL: @vector_reverse_f64( 12; CHECK-NEXT: entry: 13; CHECK-NEXT: [[A2:%.*]] = ptrtoint double* [[A:%.*]] to i64 14; CHECK-NEXT: [[B1:%.*]] = ptrtoint double* [[B:%.*]] to i64 15; CHECK-NEXT: [[CMP7:%.*]] = icmp sgt i64 [[N:%.*]], 0 16; CHECK-NEXT: br i1 [[CMP7]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]] 17; CHECK: for.body.preheader: 18; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() 19; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 3 20; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N]] 21; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] 22; CHECK: vector.memcheck: 23; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() 24; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 6 25; CHECK-NEXT: [[TMP4:%.*]] = shl i64 [[N]], 3 26; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], [[B1]] 27; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[A2]] 28; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP5]], [[TMP6]] 29; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP7]], [[TMP3]] 30; CHECK-NEXT: br i1 [[DIFF_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] 31; CHECK: vector.ph: 32; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() 33; CHECK-NEXT: [[TMP9:%.*]] = shl i64 [[TMP8]], 3 34; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP9]] 35; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] 36; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 37; CHECK: vector.body: 38; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 39; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[INDEX]], -1 40; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], [[N]] 41; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds double, double* [[B]], i64 [[TMP5]] 42; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vscale.i32() 43; CHECK-NEXT: [[DOTNEG:%.*]] = mul i32 [[TMP7]], -8 44; CHECK-NEXT: [[TMP8:%.*]] = or i32 [[DOTNEG]], 1 45; CHECK-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64 46; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP9]] 47; CHECK-NEXT: [[TMP11:%.*]] = bitcast double* [[TMP10]] to <vscale x 8 x double>* 48; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x double>, <vscale x 8 x double>* [[TMP11]], align 8 49; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[TMP5]] 50; CHECK-NEXT: [[TMP13:%.*]] = fadd <vscale x 8 x double> [[WIDE_LOAD]], shufflevector (<vscale x 8 x double> insertelement (<vscale x 8 x double> poison, double 1.000000e+00, i32 0), <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer) 51; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32() 52; CHECK-NEXT: [[DOTNEG7:%.*]] = mul i32 [[TMP14]], -8 53; CHECK-NEXT: [[TMP15:%.*]] = or i32 [[DOTNEG7]], 1 54; CHECK-NEXT: [[TMP16:%.*]] = sext i32 [[TMP15]] to i64 55; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds double, double* [[TMP12]], i64 [[TMP16]] 56; CHECK-NEXT: [[TMP18:%.*]] = bitcast double* [[TMP17]] to <vscale x 8 x double>* 57; CHECK-NEXT: store <vscale x 8 x double> [[TMP13]], <vscale x 8 x double>* [[TMP18]], align 8 58; CHECK-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64() 59; CHECK-NEXT: [[TMP20:%.*]] = shl i64 [[TMP19]], 3 60; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP20]] 61; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] 62; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] 63; CHECK: middle.block: 64; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 65; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]] 66; CHECK: scalar.ph: 67; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_MOD_VF]], [[MIDDLE_BLOCK]] ], [ [[N]], [[FOR_BODY_PREHEADER]] ], [ [[N]], [[VECTOR_MEMCHECK]] ] 68; CHECK-NEXT: br label [[FOR_BODY:%.*]] 69; CHECK: for.cond.cleanup.loopexit: 70; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] 71; CHECK: for.cond.cleanup: 72; CHECK-NEXT: ret void 73; CHECK: for.body: 74; CHECK-NEXT: [[I_08_IN:%.*]] = phi i64 [ [[I_08:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] 75; CHECK-NEXT: [[I_08]] = add nsw i64 [[I_08_IN]], -1 76; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[B]], i64 [[I_08]] 77; CHECK-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8 78; CHECK-NEXT: [[ADD:%.*]] = fadd double [[TMP22]], 1.000000e+00 79; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[I_08]] 80; CHECK-NEXT: store double [[ADD]], double* [[ARRAYIDX1]], align 8 81; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[I_08_IN]], 1 82; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP_LOOPEXIT]], !llvm.loop [[LOOP8:![0-9]+]] 83; 84entry: 85 %cmp7 = icmp sgt i64 %N, 0 86 br i1 %cmp7, label %for.body, label %for.cond.cleanup 87 88for.cond.cleanup: ; preds = %for.body 89 ret void 90 91for.body: ; preds = %entry, %for.body 92 %i.08.in = phi i64 [ %i.08, %for.body ], [ %N, %entry ] 93 %i.08 = add nsw i64 %i.08.in, -1 94 %arrayidx = getelementptr inbounds double, double* %b, i64 %i.08 95 %0 = load double, double* %arrayidx, align 8 96 %add = fadd double %0, 1.000000e+00 97 %arrayidx1 = getelementptr inbounds double, double* %a, i64 %i.08 98 store double %add, double* %arrayidx1, align 8 99 %cmp = icmp sgt i64 %i.08.in, 1 100 br i1 %cmp, label %for.body, label %for.cond.cleanup, !llvm.loop !0 101} 102 103 104define void @vector_reverse_i64(i64 %N, i64* %a, i64* %b) #0 { 105; CHECK-LABEL: @vector_reverse_i64( 106; CHECK-NEXT: entry: 107; CHECK-NEXT: [[A2:%.*]] = ptrtoint i64* [[A:%.*]] to i64 108; CHECK-NEXT: [[B1:%.*]] = ptrtoint i64* [[B:%.*]] to i64 109; CHECK-NEXT: [[CMP8:%.*]] = icmp sgt i64 [[N:%.*]], 0 110; CHECK-NEXT: br i1 [[CMP8]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]] 111; CHECK: for.body.preheader: 112; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() 113; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 3 114; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N]] 115; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] 116; CHECK: vector.memcheck: 117; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() 118; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 6 119; CHECK-NEXT: [[TMP4:%.*]] = shl i64 [[N]], 3 120; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], [[B1]] 121; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], [[A2]] 122; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP5]], [[TMP6]] 123; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP7]], [[TMP3]] 124; CHECK-NEXT: br i1 [[DIFF_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] 125; CHECK: vector.ph: 126; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() 127; CHECK-NEXT: [[TMP9:%.*]] = shl i64 [[TMP8]], 3 128; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP9]] 129; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] 130; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] 131; CHECK: vector.body: 132; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] 133; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[INDEX]], -1 134; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], [[N]] 135; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, i64* [[B]], i64 [[TMP5]] 136; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vscale.i32() 137; CHECK-NEXT: [[DOTNEG:%.*]] = mul i32 [[TMP7]], -8 138; CHECK-NEXT: [[TMP8:%.*]] = or i32 [[DOTNEG]], 1 139; CHECK-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64 140; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, i64* [[TMP6]], i64 [[TMP9]] 141; CHECK-NEXT: [[TMP11:%.*]] = bitcast i64* [[TMP10]] to <vscale x 8 x i64>* 142; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i64>, <vscale x 8 x i64>* [[TMP11]], align 8 143; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[TMP5]] 144; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 8 x i64> [[WIDE_LOAD]], shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 1, i32 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer) 145; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32() 146; CHECK-NEXT: [[DOTNEG7:%.*]] = mul i32 [[TMP14]], -8 147; CHECK-NEXT: [[TMP15:%.*]] = or i32 [[DOTNEG7]], 1 148; CHECK-NEXT: [[TMP16:%.*]] = sext i32 [[TMP15]] to i64 149; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i64, i64* [[TMP12]], i64 [[TMP16]] 150; CHECK-NEXT: [[TMP18:%.*]] = bitcast i64* [[TMP17]] to <vscale x 8 x i64>* 151; CHECK-NEXT: store <vscale x 8 x i64> [[TMP13]], <vscale x 8 x i64>* [[TMP18]], align 8 152; CHECK-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64() 153; CHECK-NEXT: [[TMP20:%.*]] = shl i64 [[TMP19]], 3 154; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP20]] 155; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] 156; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] 157; CHECK: middle.block: 158; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 159; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]] 160; CHECK: scalar.ph: 161; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_MOD_VF]], [[MIDDLE_BLOCK]] ], [ [[N]], [[FOR_BODY_PREHEADER]] ], [ [[N]], [[VECTOR_MEMCHECK]] ] 162; CHECK-NEXT: br label [[FOR_BODY:%.*]] 163; CHECK: for.cond.cleanup.loopexit: 164; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] 165; CHECK: for.cond.cleanup: 166; CHECK-NEXT: ret void 167; CHECK: for.body: 168; CHECK-NEXT: [[I_09_IN:%.*]] = phi i64 [ [[I_09:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] 169; CHECK-NEXT: [[I_09]] = add nsw i64 [[I_09_IN]], -1 170; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[B]], i64 [[I_09]] 171; CHECK-NEXT: [[TMP22:%.*]] = load i64, i64* [[ARRAYIDX]], align 8 172; CHECK-NEXT: [[ADD:%.*]] = add i64 [[TMP22]], 1 173; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[I_09]] 174; CHECK-NEXT: store i64 [[ADD]], i64* [[ARRAYIDX2]], align 8 175; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[I_09_IN]], 1 176; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP_LOOPEXIT]], !llvm.loop [[LOOP15:![0-9]+]] 177; 178entry: 179 %cmp8 = icmp sgt i64 %N, 0 180 br i1 %cmp8, label %for.body, label %for.cond.cleanup 181 182for.cond.cleanup: ; preds = %for.body 183 ret void 184 185for.body: ; preds = %entry, %for.body 186 %i.09.in = phi i64 [ %i.09, %for.body ], [ %N, %entry ] 187 %i.09 = add nsw i64 %i.09.in, -1 188 %arrayidx = getelementptr inbounds i64, i64* %b, i64 %i.09 189 %0 = load i64, i64* %arrayidx, align 8 190 %add = add i64 %0, 1 191 %arrayidx2 = getelementptr inbounds i64, i64* %a, i64 %i.09 192 store i64 %add, i64* %arrayidx2, align 8 193 %cmp = icmp sgt i64 %i.09.in, 1 194 br i1 %cmp, label %for.body, label %for.cond.cleanup, !llvm.loop !0 195} 196 197attributes #0 = { "target-cpu"="generic" "target-features"="+neon,+sve" } 198 199!0 = distinct !{!0, !1, !2, !3, !4} 200!1 = !{!"llvm.loop.mustprogress"} 201!2 = !{!"llvm.loop.vectorize.width", i32 8} 202!3 = !{!"llvm.loop.vectorize.scalable.enable", i1 true} 203!4 = !{!"llvm.loop.vectorize.enable", i1 true} 204 205