1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -loop-vectorize -scalable-vectorization=on -prefer-predicate-over-epilogue=predicate-dont-vectorize -mtriple riscv64-linux-gnu -mattr=+v,+f -S 2>%t | FileCheck %s -check-prefix=CHECK
3
4; Exercise tail folding on RISCV w/scalable vectors.
5
6target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
7target triple = "riscv64"
8
9define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) {
10; CHECK-LABEL: @vector_add(
11; CHECK-NEXT:  entry:
12; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
13; CHECK-NEXT:    [[TMP1:%.*]] = icmp ult i64 -1025, [[TMP0]]
14; CHECK-NEXT:    br i1 [[TMP1]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
15; CHECK:       vector.ph:
16; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
17; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
18; CHECK-NEXT:    [[TMP4:%.*]] = sub i64 [[TMP3]], 1
19; CHECK-NEXT:    [[N_RND_UP:%.*]] = add i64 1024, [[TMP4]]
20; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP2]]
21; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
22; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i64> poison, i64 [[V:%.*]], i32 0
23; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
24; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
25; CHECK:       vector.body:
26; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
27; CHECK-NEXT:    [[TMP5:%.*]] = add i64 [[INDEX]], 0
28; CHECK-NEXT:    [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 1 x i1> @llvm.get.active.lane.mask.nxv1i1.i64(i64 [[TMP5]], i64 1024)
29; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[TMP5]]
30; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[TMP6]], i32 0
31; CHECK-NEXT:    [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 1 x i64> @llvm.masked.load.nxv1i64.p0(ptr [[TMP7]], i32 8, <vscale x 1 x i1> [[ACTIVE_LANE_MASK]], <vscale x 1 x i64> poison)
32; CHECK-NEXT:    [[TMP8:%.*]] = add <vscale x 1 x i64> [[WIDE_MASKED_LOAD]], [[BROADCAST_SPLAT]]
33; CHECK-NEXT:    call void @llvm.masked.store.nxv1i64.p0(<vscale x 1 x i64> [[TMP8]], ptr [[TMP7]], i32 8, <vscale x 1 x i1> [[ACTIVE_LANE_MASK]])
34; CHECK-NEXT:    [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
35; CHECK-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP9]]
36; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
37; CHECK-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
38; CHECK:       middle.block:
39; CHECK-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
40; CHECK:       scalar.ph:
41; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
42; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
43; CHECK:       for.body:
44; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
45; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
46; CHECK-NEXT:    [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
47; CHECK-NEXT:    [[ADD:%.*]] = add i64 [[ELEM]], [[V]]
48; CHECK-NEXT:    store i64 [[ADD]], ptr [[ARRAYIDX]], align 8
49; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
50; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
51; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
52; CHECK:       for.end:
53; CHECK-NEXT:    ret void
54;
55entry:
56  br label %for.body
57
58for.body:
59  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
60  %arrayidx = getelementptr inbounds i64, ptr %a, i64 %iv
61  %elem = load i64, ptr %arrayidx
62  %add = add i64 %elem, %v
63  store i64 %add, ptr %arrayidx
64  %iv.next = add nuw nsw i64 %iv, 1
65  %exitcond.not = icmp eq i64 %iv.next, 1024
66  br i1 %exitcond.not, label %for.end, label %for.body
67
68for.end:
69  ret void
70}
71
72
73; a[b[i]] = v, exercise scatter support
74define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 %v, i64 %n) {
75; CHECK-LABEL: @indexed_store(
76; CHECK-NEXT:  entry:
77; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
78; CHECK:       for.body:
79; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
80; CHECK-NEXT:    [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[IV]]
81; CHECK-NEXT:    [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8
82; CHECK-NEXT:    [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[AIDX]]
83; CHECK-NEXT:    store i64 [[V:%.*]], ptr [[AADDR]], align 8
84; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
85; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
86; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
87; CHECK:       for.end:
88; CHECK-NEXT:    ret void
89;
90entry:
91  br label %for.body
92
93for.body:
94  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
95  %baddr = getelementptr inbounds i64, ptr %b, i64 %iv
96  %aidx = load i64, ptr %baddr
97  %aaddr = getelementptr inbounds i64, ptr %a, i64 %aidx
98  store i64 %v, ptr %aaddr
99  %iv.next = add nuw nsw i64 %iv, 1
100  %exitcond.not = icmp eq i64 %iv.next, 1024
101  br i1 %exitcond.not, label %for.end, label %for.body
102
103for.end:
104  ret void
105}
106
107define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 %v, i64 %n) {
108; CHECK-LABEL: @indexed_load(
109; CHECK-NEXT:  entry:
110; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
111; CHECK:       for.body:
112; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
113; CHECK-NEXT:    [[SUM:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ]
114; CHECK-NEXT:    [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[IV]]
115; CHECK-NEXT:    [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8
116; CHECK-NEXT:    [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[AIDX]]
117; CHECK-NEXT:    [[ELEM:%.*]] = load i64, ptr [[AADDR]], align 8
118; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
119; CHECK-NEXT:    [[SUM_NEXT]] = add i64 [[SUM]], [[ELEM]]
120; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
121; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
122; CHECK:       for.end:
123; CHECK-NEXT:    [[SUM_NEXT_LCSSA:%.*]] = phi i64 [ [[SUM_NEXT]], [[FOR_BODY]] ]
124; CHECK-NEXT:    ret i64 [[SUM_NEXT_LCSSA]]
125;
126entry:
127  br label %for.body
128
129for.body:
130  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
131  %sum = phi i64 [0, %entry], [%sum.next, %for.body]
132  %baddr = getelementptr inbounds i64, ptr %b, i64 %iv
133  %aidx = load i64, ptr %baddr
134  %aaddr = getelementptr inbounds i64, ptr %a, i64 %aidx
135  %elem = load i64, ptr %aaddr
136  %iv.next = add nuw nsw i64 %iv, 1
137  %sum.next = add i64 %sum, %elem
138  %exitcond.not = icmp eq i64 %iv.next, 1024
139  br i1 %exitcond.not, label %for.end, label %for.body
140
141for.end:
142  ret i64 %sum.next
143}
144
145define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) {
146; CHECK-LABEL: @splat_int(
147; CHECK-NEXT:  entry:
148; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
149; CHECK-NEXT:    [[TMP1:%.*]] = icmp ult i64 -1025, [[TMP0]]
150; CHECK-NEXT:    br i1 [[TMP1]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
151; CHECK:       vector.ph:
152; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
153; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
154; CHECK-NEXT:    [[TMP4:%.*]] = sub i64 [[TMP3]], 1
155; CHECK-NEXT:    [[N_RND_UP:%.*]] = add i64 1024, [[TMP4]]
156; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP2]]
157; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
158; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i64> poison, i64 [[V:%.*]], i32 0
159; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
160; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
161; CHECK:       vector.body:
162; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
163; CHECK-NEXT:    [[TMP5:%.*]] = add i64 [[INDEX]], 0
164; CHECK-NEXT:    [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 1 x i1> @llvm.get.active.lane.mask.nxv1i1.i64(i64 [[TMP5]], i64 1024)
165; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[TMP5]]
166; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[TMP6]], i32 0
167; CHECK-NEXT:    call void @llvm.masked.store.nxv1i64.p0(<vscale x 1 x i64> [[BROADCAST_SPLAT]], ptr [[TMP7]], i32 8, <vscale x 1 x i1> [[ACTIVE_LANE_MASK]])
168; CHECK-NEXT:    [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
169; CHECK-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP8]]
170; CHECK-NEXT:    [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
171; CHECK-NEXT:    br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
172; CHECK:       middle.block:
173; CHECK-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
174; CHECK:       scalar.ph:
175; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
176; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
177; CHECK:       for.body:
178; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
179; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
180; CHECK-NEXT:    store i64 [[V]], ptr [[ARRAYIDX]], align 8
181; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
182; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
183; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
184; CHECK:       for.end:
185; CHECK-NEXT:    ret void
186;
187entry:
188  br label %for.body
189
190for.body:
191  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
192  %arrayidx = getelementptr inbounds i64, ptr %a, i64 %iv
193  store i64 %v, ptr %arrayidx
194  %iv.next = add nuw nsw i64 %iv, 1
195  %exitcond.not = icmp eq i64 %iv.next, 1024
196  br i1 %exitcond.not, label %for.end, label %for.body
197
198for.end:
199  ret void
200}
201
202define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 %v, i64 %n) {
203; CHECK-LABEL: @uniform_store(
204; CHECK-NEXT:  entry:
205; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
206; CHECK:       for.body:
207; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
208; CHECK-NEXT:    store i64 [[V:%.*]], ptr [[B:%.*]], align 8
209; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[IV]]
210; CHECK-NEXT:    store i64 [[V]], ptr [[ARRAYIDX]], align 8
211; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
212; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
213; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
214; CHECK:       for.end:
215; CHECK-NEXT:    ret void
216;
217entry:
218  br label %for.body
219
220for.body:
221  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
222  store i64 %v, ptr %b, align 8
223  %arrayidx = getelementptr inbounds i64, ptr %a, i64 %iv
224  store i64 %v, ptr %arrayidx
225  %iv.next = add nuw nsw i64 %iv, 1
226  %exitcond.not = icmp eq i64 %iv.next, 1024
227  br i1 %exitcond.not, label %for.end, label %for.body
228
229for.end:
230  ret void
231}
232
233define i64 @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 %n) {
234; CHECK-LABEL: @uniform_load(
235; CHECK-NEXT:  entry:
236; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
237; CHECK:       for.body:
238; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
239; CHECK-NEXT:    [[V:%.*]] = load i64, ptr [[B:%.*]], align 8
240; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[IV]]
241; CHECK-NEXT:    store i64 [[V]], ptr [[ARRAYIDX]], align 8
242; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
243; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
244; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
245; CHECK:       for.end:
246; CHECK-NEXT:    [[V_LCSSA:%.*]] = phi i64 [ [[V]], [[FOR_BODY]] ]
247; CHECK-NEXT:    ret i64 [[V_LCSSA]]
248;
249entry:
250  br label %for.body
251
252for.body:
253  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
254  %v = load i64, ptr %b, align 8
255  %arrayidx = getelementptr inbounds i64, ptr %a, i64 %iv
256  store i64 %v, ptr %arrayidx
257  %iv.next = add nuw nsw i64 %iv, 1
258  %exitcond.not = icmp eq i64 %iv.next, 1024
259  br i1 %exitcond.not, label %for.end, label %for.body
260
261for.end:
262  ret i64 %v
263}
264