1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; This is the loop in c++ being vectorize in this file with
3;experimental.vector.reverse
4;  #pragma clang loop vectorize_width(8, scalable)
5;  for (int i = N-1; i >= 0; --i)
6;    a[i] = b[i] + 1.0;
7
8; RUN: opt -loop-vectorize -scalable-vectorization=on -dce -instcombine -mtriple aarch64-linux-gnu -S < %s | FileCheck %s
9
10define void @vector_reverse_f64(i64 %N, double* %a, double* %b) #0{
11; CHECK-LABEL: @vector_reverse_f64(
12; CHECK-NEXT:  entry:
13; CHECK-NEXT:    [[CMP7:%.*]] = icmp sgt i64 [[N:%.*]], 0
14; CHECK-NEXT:    br i1 [[CMP7]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
15; CHECK:       for.body.preheader:
16; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
17; CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 3
18; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N]]
19; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
20; CHECK:       vector.memcheck:
21; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr double, double* [[A:%.*]], i64 [[N]]
22; CHECK-NEXT:    [[SCEVGEP4:%.*]] = getelementptr double, double* [[B:%.*]], i64 [[N]]
23; CHECK-NEXT:    [[BOUND0:%.*]] = icmp ugt double* [[SCEVGEP4]], [[A]]
24; CHECK-NEXT:    [[BOUND1:%.*]] = icmp ugt double* [[SCEVGEP]], [[B]]
25; CHECK-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
26; CHECK-NEXT:    br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
27; CHECK:       vector.ph:
28; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
29; CHECK-NEXT:    [[TMP3:%.*]] = shl i64 [[TMP2]], 3
30; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
31; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
32; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
33; CHECK:       vector.body:
34; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
35; CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[INDEX]], -1
36; CHECK-NEXT:    [[TMP5:%.*]] = add i64 [[TMP4]], [[N]]
37; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds double, double* [[B]], i64 [[TMP5]]
38; CHECK-NEXT:    [[TMP7:%.*]] = call i32 @llvm.vscale.i32()
39; CHECK-NEXT:    [[DOTNEG:%.*]] = mul i32 [[TMP7]], -8
40; CHECK-NEXT:    [[TMP8:%.*]] = or i32 [[DOTNEG]], 1
41; CHECK-NEXT:    [[TMP9:%.*]] = sext i32 [[TMP8]] to i64
42; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP9]]
43; CHECK-NEXT:    [[TMP11:%.*]] = bitcast double* [[TMP10]] to <vscale x 8 x double>*
44; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <vscale x 8 x double>, <vscale x 8 x double>* [[TMP11]], align 8, !alias.scope !0
45; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[TMP5]]
46; CHECK-NEXT:    [[TMP13:%.*]] = fadd <vscale x 8 x double> [[WIDE_LOAD]], shufflevector (<vscale x 8 x double> insertelement (<vscale x 8 x double> poison, double 1.000000e+00, i32 0), <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer)
47; CHECK-NEXT:    [[TMP14:%.*]] = call i32 @llvm.vscale.i32()
48; CHECK-NEXT:    [[DOTNEG7:%.*]] = mul i32 [[TMP14]], -8
49; CHECK-NEXT:    [[TMP15:%.*]] = or i32 [[DOTNEG7]], 1
50; CHECK-NEXT:    [[TMP16:%.*]] = sext i32 [[TMP15]] to i64
51; CHECK-NEXT:    [[TMP17:%.*]] = getelementptr inbounds double, double* [[TMP12]], i64 [[TMP16]]
52; CHECK-NEXT:    [[TMP18:%.*]] = bitcast double* [[TMP17]] to <vscale x 8 x double>*
53; CHECK-NEXT:    store <vscale x 8 x double> [[TMP13]], <vscale x 8 x double>* [[TMP18]], align 8, !alias.scope !3, !noalias !0
54; CHECK-NEXT:    [[TMP19:%.*]] = call i64 @llvm.vscale.i64()
55; CHECK-NEXT:    [[TMP20:%.*]] = shl i64 [[TMP19]], 3
56; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP20]]
57; CHECK-NEXT:    [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
58; CHECK-NEXT:    br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
59; CHECK:       middle.block:
60; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
61; CHECK-NEXT:    br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]]
62; CHECK:       scalar.ph:
63; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_MOD_VF]], [[MIDDLE_BLOCK]] ], [ [[N]], [[FOR_BODY_PREHEADER]] ], [ [[N]], [[VECTOR_MEMCHECK]] ]
64; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
65; CHECK:       for.cond.cleanup.loopexit:
66; CHECK-NEXT:    br label [[FOR_COND_CLEANUP]]
67; CHECK:       for.cond.cleanup:
68; CHECK-NEXT:    ret void
69; CHECK:       for.body:
70; CHECK-NEXT:    [[I_08_IN:%.*]] = phi i64 [ [[I_08:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
71; CHECK-NEXT:    [[I_08]] = add nsw i64 [[I_08_IN]], -1
72; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[B]], i64 [[I_08]]
73; CHECK-NEXT:    [[TMP22:%.*]] = load double, double* [[ARRAYIDX]], align 8
74; CHECK-NEXT:    [[ADD:%.*]] = fadd double [[TMP22]], 1.000000e+00
75; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[I_08]]
76; CHECK-NEXT:    store double [[ADD]], double* [[ARRAYIDX1]], align 8
77; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i64 [[I_08_IN]], 1
78; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP_LOOPEXIT]], !llvm.loop [[LOOP8:![0-9]+]]
79;
80entry:
81  %cmp7 = icmp sgt i64 %N, 0
82  br i1 %cmp7, label %for.body, label %for.cond.cleanup
83
84for.cond.cleanup:                                 ; preds = %for.body
85  ret void
86
87for.body:                                         ; preds = %entry, %for.body
88  %i.08.in = phi i64 [ %i.08, %for.body ], [ %N, %entry ]
89  %i.08 = add nsw i64 %i.08.in, -1
90  %arrayidx = getelementptr inbounds double, double* %b, i64 %i.08
91  %0 = load double, double* %arrayidx, align 8
92  %add = fadd double %0, 1.000000e+00
93  %arrayidx1 = getelementptr inbounds double, double* %a, i64 %i.08
94  store double %add, double* %arrayidx1, align 8
95  %cmp = icmp sgt i64 %i.08.in, 1
96  br i1 %cmp, label %for.body, label %for.cond.cleanup, !llvm.loop !0
97}
98
99
100define void @vector_reverse_i64(i64 %N, i64* %a, i64* %b) #0 {
101; CHECK-LABEL: @vector_reverse_i64(
102; CHECK-NEXT:  entry:
103; CHECK-NEXT:    [[CMP8:%.*]] = icmp sgt i64 [[N:%.*]], 0
104; CHECK-NEXT:    br i1 [[CMP8]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
105; CHECK:       for.body.preheader:
106; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
107; CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 3
108; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N]]
109; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
110; CHECK:       vector.memcheck:
111; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr i64, i64* [[A:%.*]], i64 [[N]]
112; CHECK-NEXT:    [[SCEVGEP4:%.*]] = getelementptr i64, i64* [[B:%.*]], i64 [[N]]
113; CHECK-NEXT:    [[BOUND0:%.*]] = icmp ugt i64* [[SCEVGEP4]], [[A]]
114; CHECK-NEXT:    [[BOUND1:%.*]] = icmp ugt i64* [[SCEVGEP]], [[B]]
115; CHECK-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
116; CHECK-NEXT:    br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
117; CHECK:       vector.ph:
118; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
119; CHECK-NEXT:    [[TMP3:%.*]] = shl i64 [[TMP2]], 3
120; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
121; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
122; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
123; CHECK:       vector.body:
124; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
125; CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[INDEX]], -1
126; CHECK-NEXT:    [[TMP5:%.*]] = add i64 [[TMP4]], [[N]]
127; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i64, i64* [[B]], i64 [[TMP5]]
128; CHECK-NEXT:    [[TMP7:%.*]] = call i32 @llvm.vscale.i32()
129; CHECK-NEXT:    [[DOTNEG:%.*]] = mul i32 [[TMP7]], -8
130; CHECK-NEXT:    [[TMP8:%.*]] = or i32 [[DOTNEG]], 1
131; CHECK-NEXT:    [[TMP9:%.*]] = sext i32 [[TMP8]] to i64
132; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i64, i64* [[TMP6]], i64 [[TMP9]]
133; CHECK-NEXT:    [[TMP11:%.*]] = bitcast i64* [[TMP10]] to <vscale x 8 x i64>*
134; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <vscale x 8 x i64>, <vscale x 8 x i64>* [[TMP11]], align 8, !alias.scope !9
135; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[TMP5]]
136; CHECK-NEXT:    [[TMP13:%.*]] = add <vscale x 8 x i64> [[WIDE_LOAD]], shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 1, i32 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
137; CHECK-NEXT:    [[TMP14:%.*]] = call i32 @llvm.vscale.i32()
138; CHECK-NEXT:    [[DOTNEG7:%.*]] = mul i32 [[TMP14]], -8
139; CHECK-NEXT:    [[TMP15:%.*]] = or i32 [[DOTNEG7]], 1
140; CHECK-NEXT:    [[TMP16:%.*]] = sext i32 [[TMP15]] to i64
141; CHECK-NEXT:    [[TMP17:%.*]] = getelementptr inbounds i64, i64* [[TMP12]], i64 [[TMP16]]
142; CHECK-NEXT:    [[TMP18:%.*]] = bitcast i64* [[TMP17]] to <vscale x 8 x i64>*
143; CHECK-NEXT:    store <vscale x 8 x i64> [[TMP13]], <vscale x 8 x i64>* [[TMP18]], align 8, !alias.scope !12, !noalias !9
144; CHECK-NEXT:    [[TMP19:%.*]] = call i64 @llvm.vscale.i64()
145; CHECK-NEXT:    [[TMP20:%.*]] = shl i64 [[TMP19]], 3
146; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP20]]
147; CHECK-NEXT:    [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
148; CHECK-NEXT:    br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
149; CHECK:       middle.block:
150; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
151; CHECK-NEXT:    br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]]
152; CHECK:       scalar.ph:
153; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_MOD_VF]], [[MIDDLE_BLOCK]] ], [ [[N]], [[FOR_BODY_PREHEADER]] ], [ [[N]], [[VECTOR_MEMCHECK]] ]
154; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
155; CHECK:       for.cond.cleanup.loopexit:
156; CHECK-NEXT:    br label [[FOR_COND_CLEANUP]]
157; CHECK:       for.cond.cleanup:
158; CHECK-NEXT:    ret void
159; CHECK:       for.body:
160; CHECK-NEXT:    [[I_09_IN:%.*]] = phi i64 [ [[I_09:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
161; CHECK-NEXT:    [[I_09]] = add nsw i64 [[I_09_IN]], -1
162; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[B]], i64 [[I_09]]
163; CHECK-NEXT:    [[TMP22:%.*]] = load i64, i64* [[ARRAYIDX]], align 8
164; CHECK-NEXT:    [[ADD:%.*]] = add i64 [[TMP22]], 1
165; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[I_09]]
166; CHECK-NEXT:    store i64 [[ADD]], i64* [[ARRAYIDX2]], align 8
167; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i64 [[I_09_IN]], 1
168; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP_LOOPEXIT]], !llvm.loop [[LOOP15:![0-9]+]]
169;
170entry:
171  %cmp8 = icmp sgt i64 %N, 0
172  br i1 %cmp8, label %for.body, label %for.cond.cleanup
173
174for.cond.cleanup:                                 ; preds = %for.body
175  ret void
176
177for.body:                                         ; preds = %entry, %for.body
178  %i.09.in = phi i64 [ %i.09, %for.body ], [ %N, %entry ]
179  %i.09 = add nsw i64 %i.09.in, -1
180  %arrayidx = getelementptr inbounds i64, i64* %b, i64 %i.09
181  %0 = load i64, i64* %arrayidx, align 8
182  %add = add i64 %0, 1
183  %arrayidx2 = getelementptr inbounds i64, i64* %a, i64 %i.09
184  store i64 %add, i64* %arrayidx2, align 8
185  %cmp = icmp sgt i64 %i.09.in, 1
186  br i1 %cmp, label %for.body, label %for.cond.cleanup, !llvm.loop !0
187}
188
189attributes #0 = { "target-cpu"="generic" "target-features"="+neon,+sve" }
190
191!0 = distinct !{!0, !1, !2, !3, !4}
192!1 = !{!"llvm.loop.mustprogress"}
193!2 = !{!"llvm.loop.vectorize.width", i32 8}
194!3 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
195!4 = !{!"llvm.loop.vectorize.enable", i1 true}
196
197