1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -loop-vectorize -scalable-vectorization=on -force-target-instruction-cost=1 -force-target-supports-scalable-vectors -dce -instcombine < %s -S | FileCheck %s
3
4; Test that we can add on the induction variable
5;   for (long long i = 0; i < n; i++) {
6;     a[i] = b[i] + i;
7;   }
8; with an unroll factor (interleave count) of 2.
9
10define void @add_ind64_unrolled(i64* noalias nocapture %a, i64* noalias nocapture readonly %b, i64 %n) {
11; CHECK-LABEL: @add_ind64_unrolled(
12; CHECK-NEXT:  entry:
13; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
14; CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 2
15; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]]
16; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
17; CHECK:       vector.ph:
18; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
19; CHECK-NEXT:    [[TMP3:%.*]] = shl i64 [[TMP2]], 2
20; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
21; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
22; CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
23; CHECK-NEXT:    [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
24; CHECK-NEXT:    [[TMP6:%.*]] = shl i64 [[TMP5]], 1
25; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP6]], i64 0
26; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
27; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
28; CHECK:       vector.body:
29; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
30; CHECK-NEXT:    [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[TMP4]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
31; CHECK-NEXT:    [[STEP_ADD:%.*]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]]
32; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i64, i64* [[B:%.*]], i64 [[INDEX]]
33; CHECK-NEXT:    [[TMP8:%.*]] = bitcast i64* [[TMP7]] to <vscale x 2 x i64>*
34; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, <vscale x 2 x i64>* [[TMP8]], align 8
35; CHECK-NEXT:    [[TMP9:%.*]] = call i32 @llvm.vscale.i32()
36; CHECK-NEXT:    [[TMP10:%.*]] = shl i32 [[TMP9]], 1
37; CHECK-NEXT:    [[TMP11:%.*]] = sext i32 [[TMP10]] to i64
38; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds i64, i64* [[TMP7]], i64 [[TMP11]]
39; CHECK-NEXT:    [[TMP13:%.*]] = bitcast i64* [[TMP12]] to <vscale x 2 x i64>*
40; CHECK-NEXT:    [[WIDE_LOAD2:%.*]] = load <vscale x 2 x i64>, <vscale x 2 x i64>* [[TMP13]], align 8
41; CHECK-NEXT:    [[TMP14:%.*]] = add nsw <vscale x 2 x i64> [[WIDE_LOAD]], [[VEC_IND]]
42; CHECK-NEXT:    [[TMP15:%.*]] = add nsw <vscale x 2 x i64> [[WIDE_LOAD2]], [[STEP_ADD]]
43; CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]]
44; CHECK-NEXT:    [[TMP17:%.*]] = bitcast i64* [[TMP16]] to <vscale x 2 x i64>*
45; CHECK-NEXT:    store <vscale x 2 x i64> [[TMP14]], <vscale x 2 x i64>* [[TMP17]], align 8
46; CHECK-NEXT:    [[TMP18:%.*]] = call i32 @llvm.vscale.i32()
47; CHECK-NEXT:    [[TMP19:%.*]] = shl i32 [[TMP18]], 1
48; CHECK-NEXT:    [[TMP20:%.*]] = sext i32 [[TMP19]] to i64
49; CHECK-NEXT:    [[TMP21:%.*]] = getelementptr inbounds i64, i64* [[TMP16]], i64 [[TMP20]]
50; CHECK-NEXT:    [[TMP22:%.*]] = bitcast i64* [[TMP21]] to <vscale x 2 x i64>*
51; CHECK-NEXT:    store <vscale x 2 x i64> [[TMP15]], <vscale x 2 x i64>* [[TMP22]], align 8
52; CHECK-NEXT:    [[TMP23:%.*]] = call i64 @llvm.vscale.i64()
53; CHECK-NEXT:    [[TMP24:%.*]] = shl i64 [[TMP23]], 2
54; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP24]]
55; CHECK-NEXT:    [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[STEP_ADD]], [[DOTSPLAT]]
56; CHECK-NEXT:    [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
57; CHECK-NEXT:    br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
58; CHECK:       middle.block:
59; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
60; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
61; CHECK:       scalar.ph:
62; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
63; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
64; CHECK:       for.body:
65; CHECK-NEXT:    [[I_08:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
66; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[B]], i64 [[I_08]]
67; CHECK-NEXT:    [[TMP26:%.*]] = load i64, i64* [[ARRAYIDX]], align 8
68; CHECK-NEXT:    [[ADD:%.*]] = add nsw i64 [[TMP26]], [[I_08]]
69; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[I_08]]
70; CHECK-NEXT:    store i64 [[ADD]], i64* [[ARRAYIDX1]], align 8
71; CHECK-NEXT:    [[INC]] = add nuw nsw i64 [[I_08]], 1
72; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
73; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
74; CHECK:       exit:
75; CHECK-NEXT:    ret void
76;
77entry:
78  br label %for.body
79
80for.body:                                         ; preds = %entry, %for.body
81  %i.08 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
82  %arrayidx = getelementptr inbounds i64, i64* %b, i64 %i.08
83  %0 = load i64, i64* %arrayidx, align 8
84  %add = add nsw i64 %0, %i.08
85  %arrayidx1 = getelementptr inbounds i64, i64* %a, i64 %i.08
86  store i64 %add, i64* %arrayidx1, align 8
87  %inc = add nuw nsw i64 %i.08, 1
88  %exitcond.not = icmp eq i64 %inc, %n
89  br i1 %exitcond.not, label %exit, label %for.body, !llvm.loop !0
90
91exit:                                 ; preds = %for.body
92  ret void
93}
94
95
96; Same as above, except we test with a vectorisation factor of (1, scalable)
97
98define void @add_ind64_unrolled_nxv1i64(i64* noalias nocapture %a, i64* noalias nocapture readonly %b, i64 %n) {
99; CHECK-LABEL: @add_ind64_unrolled_nxv1i64(
100; CHECK-NEXT:  entry:
101; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
102; CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 1
103; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]]
104; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
105; CHECK:       vector.ph:
106; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
107; CHECK-NEXT:    [[TMP3:%.*]] = shl i64 [[TMP2]], 1
108; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
109; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
110; CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
111; CHECK-NEXT:    [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
112; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 1 x i64> poison, i64 [[TMP5]], i64 0
113; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 1 x i64> [[DOTSPLATINSERT]], <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
114; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
115; CHECK:       vector.body:
116; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
117; CHECK-NEXT:    [[VEC_IND:%.*]] = phi <vscale x 1 x i64> [ [[TMP4]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
118; CHECK-NEXT:    [[STEP_ADD:%.*]] = add <vscale x 1 x i64> [[VEC_IND]], [[DOTSPLAT]]
119; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i64, i64* [[B:%.*]], i64 [[INDEX]]
120; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i64* [[TMP6]] to <vscale x 1 x i64>*
121; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <vscale x 1 x i64>, <vscale x 1 x i64>* [[TMP7]], align 8
122; CHECK-NEXT:    [[TMP8:%.*]] = call i32 @llvm.vscale.i32()
123; CHECK-NEXT:    [[TMP9:%.*]] = sext i32 [[TMP8]] to i64
124; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i64, i64* [[TMP6]], i64 [[TMP9]]
125; CHECK-NEXT:    [[TMP11:%.*]] = bitcast i64* [[TMP10]] to <vscale x 1 x i64>*
126; CHECK-NEXT:    [[WIDE_LOAD2:%.*]] = load <vscale x 1 x i64>, <vscale x 1 x i64>* [[TMP11]], align 8
127; CHECK-NEXT:    [[TMP12:%.*]] = add nsw <vscale x 1 x i64> [[WIDE_LOAD]], [[VEC_IND]]
128; CHECK-NEXT:    [[TMP13:%.*]] = add nsw <vscale x 1 x i64> [[WIDE_LOAD2]], [[STEP_ADD]]
129; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]]
130; CHECK-NEXT:    [[TMP15:%.*]] = bitcast i64* [[TMP14]] to <vscale x 1 x i64>*
131; CHECK-NEXT:    store <vscale x 1 x i64> [[TMP12]], <vscale x 1 x i64>* [[TMP15]], align 8
132; CHECK-NEXT:    [[TMP16:%.*]] = call i32 @llvm.vscale.i32()
133; CHECK-NEXT:    [[TMP17:%.*]] = sext i32 [[TMP16]] to i64
134; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds i64, i64* [[TMP14]], i64 [[TMP17]]
135; CHECK-NEXT:    [[TMP19:%.*]] = bitcast i64* [[TMP18]] to <vscale x 1 x i64>*
136; CHECK-NEXT:    store <vscale x 1 x i64> [[TMP13]], <vscale x 1 x i64>* [[TMP19]], align 8
137; CHECK-NEXT:    [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
138; CHECK-NEXT:    [[TMP21:%.*]] = shl i64 [[TMP20]], 1
139; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP21]]
140; CHECK-NEXT:    [[VEC_IND_NEXT]] = add <vscale x 1 x i64> [[STEP_ADD]], [[DOTSPLAT]]
141; CHECK-NEXT:    [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
142; CHECK-NEXT:    br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
143; CHECK:       middle.block:
144; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
145; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
146; CHECK:       scalar.ph:
147; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
148; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
149; CHECK:       for.body:
150; CHECK-NEXT:    [[I_08:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
151; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[B]], i64 [[I_08]]
152; CHECK-NEXT:    [[TMP23:%.*]] = load i64, i64* [[ARRAYIDX]], align 8
153; CHECK-NEXT:    [[ADD:%.*]] = add nsw i64 [[TMP23]], [[I_08]]
154; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[I_08]]
155; CHECK-NEXT:    store i64 [[ADD]], i64* [[ARRAYIDX1]], align 8
156; CHECK-NEXT:    [[INC]] = add nuw nsw i64 [[I_08]], 1
157; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
158; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
159; CHECK:       exit:
160; CHECK-NEXT:    ret void
161;
162entry:
163  br label %for.body
164
165for.body:                                         ; preds = %entry, %for.body
166  %i.08 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
167  %arrayidx = getelementptr inbounds i64, i64* %b, i64 %i.08
168  %0 = load i64, i64* %arrayidx, align 8
169  %add = add nsw i64 %0, %i.08
170  %arrayidx1 = getelementptr inbounds i64, i64* %a, i64 %i.08
171  store i64 %add, i64* %arrayidx1, align 8
172  %inc = add nuw nsw i64 %i.08, 1
173  %exitcond.not = icmp eq i64 %inc, %n
174  br i1 %exitcond.not, label %exit, label %for.body, !llvm.loop !9
175
176exit:                                 ; preds = %for.body
177  ret void
178}
179
180
181; Test that we can vectorize a separate induction variable (not used for the branch)
182;   int r = 0;
183;   for (long long i = 0; i < n; i++) {
184;     a[i] = r;
185;     r += 2;
186;   }
187; with an unroll factor (interleave count) of 1.
188
189
190define void @add_unique_ind32(i32* noalias nocapture %a, i64 %n) {
191; CHECK-LABEL: @add_unique_ind32(
192; CHECK-NEXT:  entry:
193; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
194; CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 2
195; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]]
196; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
197; CHECK:       vector.ph:
198; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
199; CHECK-NEXT:    [[TMP3:%.*]] = shl i64 [[TMP2]], 2
200; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
201; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
202; CHECK-NEXT:    [[CAST_CRD:%.*]] = trunc i64 [[N_VEC]] to i32
203; CHECK-NEXT:    [[IND_END:%.*]] = shl i32 [[CAST_CRD]], 1
204; CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
205; CHECK-NEXT:    [[TMP5:%.*]] = shl <vscale x 4 x i32> [[TMP4]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
206; CHECK-NEXT:    [[TMP6:%.*]] = call i32 @llvm.vscale.i32()
207; CHECK-NEXT:    [[TMP7:%.*]] = shl i32 [[TMP6]], 3
208; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0
209; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
210; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
211; CHECK:       vector.body:
212; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
213; CHECK-NEXT:    [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[TMP5]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
214; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDEX]]
215; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i32* [[TMP8]] to <vscale x 4 x i32>*
216; CHECK-NEXT:    store <vscale x 4 x i32> [[VEC_IND]], <vscale x 4 x i32>* [[TMP9]], align 4
217; CHECK-NEXT:    [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
218; CHECK-NEXT:    [[TMP11:%.*]] = shl i64 [[TMP10]], 2
219; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
220; CHECK-NEXT:    [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]]
221; CHECK-NEXT:    [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
222; CHECK-NEXT:    br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
223; CHECK:       middle.block:
224; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
225; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
226; CHECK:       scalar.ph:
227; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
228; CHECK-NEXT:    [[BC_RESUME_VAL1:%.*]] = phi i32 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
229; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
230; CHECK:       for.body:
231; CHECK-NEXT:    [[I_08:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
232; CHECK-NEXT:    [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
233; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[I_08]]
234; CHECK-NEXT:    store i32 [[R_07]], i32* [[ARRAYIDX]], align 4
235; CHECK-NEXT:    [[ADD]] = add nuw nsw i32 [[R_07]], 2
236; CHECK-NEXT:    [[INC]] = add nuw nsw i64 [[I_08]], 1
237; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
238; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
239; CHECK:       exit:
240; CHECK-NEXT:    ret void
241;
242entry:
243  br label %for.body
244
245for.body:                                         ; preds = %entry, %for.body
246  %i.08 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
247  %r.07 = phi i32 [ %add, %for.body ], [ 0, %entry ]
248  %arrayidx = getelementptr inbounds i32, i32* %a, i64 %i.08
249  store i32 %r.07, i32* %arrayidx, align 4
250  %add = add nuw nsw i32 %r.07, 2
251  %inc = add nuw nsw i64 %i.08, 1
252  %exitcond.not = icmp eq i64 %inc, %n
253  br i1 %exitcond.not, label %exit, label %for.body, !llvm.loop !6
254
255exit:                                 ; preds = %for.body
256  ret void
257}
258
259
260; Test that we can vectorize a separate FP induction variable (not used for the branch)
261;   float r = 0;
262;   for (long long i = 0; i < n; i++) {
263;     a[i] = r;
264;     r += 2;
265;   }
266; with an unroll factor (interleave count) of 1.
267
268define void @add_unique_indf32(float* noalias nocapture %a, i64 %n) {
269; CHECK-LABEL: @add_unique_indf32(
270; CHECK-NEXT:  entry:
271; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
272; CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 2
273; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]]
274; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
275; CHECK:       vector.ph:
276; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
277; CHECK-NEXT:    [[TMP3:%.*]] = shl i64 [[TMP2]], 2
278; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
279; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
280; CHECK-NEXT:    [[CAST_CRD:%.*]] = sitofp i64 [[N_VEC]] to float
281; CHECK-NEXT:    [[TMP4:%.*]] = fmul float [[CAST_CRD]], 2.000000e+00
282; CHECK-NEXT:    [[IND_END:%.*]] = fadd float [[TMP4]], 0.000000e+00
283; CHECK-NEXT:    [[TMP5:%.*]] = call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
284; CHECK-NEXT:    [[TMP6:%.*]] = uitofp <vscale x 4 x i32> [[TMP5]] to <vscale x 4 x float>
285; CHECK-NEXT:    [[TMP7:%.*]] = fmul <vscale x 4 x float> [[TMP6]], shufflevector (<vscale x 4 x float> insertelement (<vscale x 4 x float> poison, float 2.000000e+00, i32 0), <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer)
286; CHECK-NEXT:    [[INDUCTION:%.*]] = fadd <vscale x 4 x float> [[TMP7]], zeroinitializer
287; CHECK-NEXT:    [[TMP8:%.*]] = call i32 @llvm.vscale.i32()
288; CHECK-NEXT:    [[TMP9:%.*]] = shl i32 [[TMP8]], 2
289; CHECK-NEXT:    [[TMP10:%.*]] = uitofp i32 [[TMP9]] to float
290; CHECK-NEXT:    [[TMP11:%.*]] = fmul float [[TMP10]], 2.000000e+00
291; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[TMP11]], i64 0
292; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x float> [[DOTSPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
293; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
294; CHECK:       vector.body:
295; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
296; CHECK-NEXT:    [[VEC_IND:%.*]] = phi <vscale x 4 x float> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
297; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 [[INDEX]]
298; CHECK-NEXT:    [[TMP13:%.*]] = bitcast float* [[TMP12]] to <vscale x 4 x float>*
299; CHECK-NEXT:    store <vscale x 4 x float> [[VEC_IND]], <vscale x 4 x float>* [[TMP13]], align 4
300; CHECK-NEXT:    [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
301; CHECK-NEXT:    [[TMP15:%.*]] = shl i64 [[TMP14]], 2
302; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP15]]
303; CHECK-NEXT:    [[VEC_IND_NEXT]] = fadd <vscale x 4 x float> [[VEC_IND]], [[DOTSPLAT]]
304; CHECK-NEXT:    [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
305; CHECK-NEXT:    br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
306; CHECK:       middle.block:
307; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
308; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
309; CHECK:       scalar.ph:
310; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
311; CHECK-NEXT:    [[BC_RESUME_VAL1:%.*]] = phi float [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ]
312; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
313; CHECK:       for.body:
314; CHECK-NEXT:    [[I_08:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
315; CHECK-NEXT:    [[R_07:%.*]] = phi float [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
316; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[I_08]]
317; CHECK-NEXT:    store float [[R_07]], float* [[ARRAYIDX]], align 4
318; CHECK-NEXT:    [[ADD]] = fadd float [[R_07]], 2.000000e+00
319; CHECK-NEXT:    [[INC]] = add nuw nsw i64 [[I_08]], 1
320; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
321; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
322; CHECK:       exit:
323; CHECK-NEXT:    ret void
324;
325entry:
326  br label %for.body
327
328for.body:                                         ; preds = %entry, %for.body
329  %i.08 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
330  %r.07 = phi float [ %add, %for.body ], [ 0.000000e+00, %entry ]
331  %arrayidx = getelementptr inbounds float, float* %a, i64 %i.08
332  store float %r.07, float* %arrayidx, align 4
333  %add = fadd float %r.07, 2.000000e+00
334  %inc = add nuw nsw i64 %i.08, 1
335  %exitcond.not = icmp eq i64 %inc, %n
336  br i1 %exitcond.not, label %exit, label %for.body, !llvm.loop !6
337
338exit:                                 ; preds = %for.body
339  ret void
340}
341
342!0 = distinct !{!0, !1, !2, !3, !4, !5}
343!1 = !{!"llvm.loop.mustprogress"}
344!2 = !{!"llvm.loop.vectorize.width", i32 2}
345!3 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
346!4 = !{!"llvm.loop.interleave.count", i32 2}
347!5 = !{!"llvm.loop.vectorize.enable", i1 true}
348!6 = distinct !{!6, !1, !7, !3, !8, !5}
349!7 = !{!"llvm.loop.vectorize.width", i32 4}
350!8 = !{!"llvm.loop.interleave.count", i32 1}
351!9 = distinct !{!9, !1, !10, !3, !4, !5}
352!10 = !{!"llvm.loop.vectorize.width", i32 1}
353