1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt %s -passes=loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -S | FileCheck %s
3
4target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
5
6define void @same_step_and_size(ptr %a, i32* %b, i64 %n) {
7; CHECK-LABEL: @same_step_and_size(
8; CHECK-NEXT:  entry:
9; CHECK-NEXT:    [[A2:%.*]] = ptrtoint ptr [[A:%.*]] to i64
10; CHECK-NEXT:    [[B1:%.*]] = ptrtoint ptr [[B:%.*]] to i64
11; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
12; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label %scalar.ph, label %vector.memcheck
13; CHECK:       vector.memcheck:
14; CHECK-NEXT:    [[TMP0:%.*]] = sub i64 [[B1]], [[A2]]
15; CHECK-NEXT:    [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], 16
16; CHECK-NEXT:    br i1 [[DIFF_CHECK]], label %scalar.ph, label %vector.ph
17;
18entry:
19  br label %loop
20
21loop:
22  %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
23  %gep.a = getelementptr inbounds i32, ptr %a, i64 %iv
24  %l = load i32, ptr %gep.a
25  %mul = mul nsw i32 %l, 3
26  %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv
27  store i32 %mul, ptr %gep.b
28  %iv.next = add nuw nsw i64 %iv, 1
29  %exitcond = icmp eq i64 %iv.next, %n
30  br i1 %exitcond, label %exit, label %loop
31
32exit:
33  ret void
34}
35
36define void @same_step_and_size_no_dominance_between_accesses(ptr %a, ptr %b, i64 %n, i64 %x) {
37; CHECK-LABEL: @same_step_and_size_no_dominance_between_accesses(
38; CHECK-NEXT:  entry:
39; CHECK-NEXT:    [[B2:%.*]] = ptrtoint ptr [[B:%.*]] to i64
40; CHECK-NEXT:    [[A1:%.*]] = ptrtoint ptr [[A:%.*]] to i64
41; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
42; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label %scalar.ph, label %vector.memcheck
43; CHECK:       vector.memcheck:
44; CHECK-NEXT:    [[TMP0:%.*]] = sub i64 [[A1]], [[B2]]
45; CHECK-NEXT:    [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], 16
46; CHECK-NEXT:    br i1 [[DIFF_CHECK]], label %scalar.ph, label %vector.ph
47;
48entry:
49  br label %loop
50
51loop:
52  %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
53  %cmp = icmp ne i64 %iv, %x
54  br i1 %cmp, label %then, label %else
55
56then:
57  %gep.a = getelementptr inbounds i32, ptr %a, i64 %iv
58  store i32 0, ptr %gep.a
59  br label %loop.latch
60
61else:
62  %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv
63  store i32 10, ptr %gep.b
64  br label %loop.latch
65
66loop.latch:
67  %iv.next = add nuw nsw i64 %iv, 1
68  %exitcond = icmp eq i64 %iv.next, %n
69  br i1 %exitcond, label %exit, label %loop
70
71exit:
72  ret void
73}
74
75define void @different_steps_and_different_access_sizes(ptr %a, ptr %b, i64 %n) {
76; CHECK-LABEL: @different_steps_and_different_access_sizes(
77; CHECK-NEXT:  entry:
78; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
79; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label %scalar.ph, label %vector.memcheck
80; CHECK:       vector.memcheck:
81; CHECK-NEXT:    [[N_SHL_2:%.]] = shl i64 %n, 2
82; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr i8, ptr %b, i64 [[N_SHL_2]]
83; CHECK-NEXT:    [[N_SHL_1:%.]] = shl i64 %n, 1
84; CHECK-NEXT:    [[SCEVGEP4:%.*]] = getelementptr i8, ptr %a, i64 [[N_SHL_1]]
85; CHECK-NEXT:    [[BOUND0:%.*]] = icmp ult ptr %b, [[SCEVGEP4]]
86; CHECK-NEXT:    [[BOUND1:%.*]] = icmp ult ptr %a, [[SCEVGEP]]
87; CHECK-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
88; CHECK-NEXT:    br i1 [[FOUND_CONFLICT]], label %scalar.ph, label %vector.ph
89;
90entry:
91  br label %loop
92
93loop:
94  %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
95  %gep.a = getelementptr inbounds i16, ptr %a, i64 %iv
96  %l = load i16, ptr %gep.a
97  %l.ext = sext i16 %l to i32
98  %mul = mul nsw i32 %l.ext, 3
99  %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv
100  store i32 %mul, ptr %gep.b
101  %iv.next = add nuw nsw i64 %iv, 1
102  %exitcond = icmp eq i64 %iv.next, %n
103  br i1 %exitcond, label %exit, label %loop
104
105exit:
106  ret void
107}
108
109define void @steps_match_but_different_access_sizes_1(ptr %a, ptr %b, i64 %n) {
110; CHECK-LABEL: @steps_match_but_different_access_sizes_1(
111; CHECK-NEXT:  entry:
112; CHECK-NEXT:    [[A2:%.*]] = ptrtoint ptr [[A:%.*]] to i64
113; CHECK-NEXT:    [[B1:%.*]] = ptrtoint ptr [[B:%.*]] to i64
114; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
115; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label %scalar.ph, label %vector.memcheck
116; CHECK:       vector.memcheck:
117; CHECK-NEXT:    [[TMP0:%.*]] = add nuw i64 [[A2]], 2
118; CHECK-NEXT:    [[TMP1:%.*]] = sub i64 [[B1]], [[TMP0]]
119; CHECK-NEXT:    [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP1]], 16
120; CHECK-NEXT:    br i1 [[DIFF_CHECK]], label %scalar.ph, label %vector.ph
121;
122entry:
123  br label %loop
124
125loop:
126  %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
127  %gep.a = getelementptr inbounds [2 x i16], ptr %a, i64 %iv, i64 1
128  %l = load i16, ptr %gep.a
129  %l.ext = sext i16 %l to i32
130  %mul = mul nsw i32 %l.ext, 3
131  %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv
132  store i32 %mul, ptr %gep.b
133  %iv.next = add nuw nsw i64 %iv, 1
134  %exitcond = icmp eq i64 %iv.next, %n
135  br i1 %exitcond, label %exit, label %loop
136
137exit:
138  ret void
139}
140
141; Same as @steps_match_but_different_access_sizes_1, but with source and sink
142; accesses flipped.
143define void @steps_match_but_different_access_sizes_2(ptr %a, ptr %b, i64 %n) {
144; CHECK-LABEL: @steps_match_but_different_access_sizes_2(
145; CHECK-NEXT:  entry:
146; CHECK-NEXT:    [[B2:%.*]] = ptrtoint ptr [[B:%.*]] to i64
147; CHECK-NEXT:    [[A1:%.*]] = ptrtoint ptr [[A:%.*]] to i64
148; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
149; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label %scalar.ph, label %vector.memcheck
150; CHECK:       vector.memcheck:
151; CHECK-NEXT:    [[TMP0:%.*]] = add nuw i64 [[A1]], 2
152; CHECK-NEXT:    [[TMP1:%.*]] = sub i64 [[TMP0]], [[B2]]
153; CHECK-NEXT:    [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP1]], 16
154; CHECK-NEXT:    br i1 [[DIFF_CHECK]], label %scalar.ph, label %vector.ph
155;
156entry:
157  br label %loop
158
159loop:
160  %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
161  %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv
162  %l = load i32, ptr %gep.b
163  %mul = mul nsw i32 %l, 3
164  %gep.a = getelementptr inbounds [2 x i16], ptr %a, i64 %iv, i64 1
165  %trunc = trunc i32 %mul to i16
166  store i16 %trunc, ptr %gep.a
167  %iv.next = add nuw nsw i64 %iv, 1
168  %exitcond = icmp eq i64 %iv.next, %n
169  br i1 %exitcond, label %exit, label %loop
170
171exit:
172  ret void
173}
174
175; Full no-overlap checks are required instead of difference checks, as
176; one of the add-recs used is invariant in the inner loop.
177; Test case for PR57315.
178define void @nested_loop_outer_iv_addrec_invariant_in_inner1(ptr %a, ptr %b, i64 %n) {
179; CHECK-LABEL: @nested_loop_outer_iv_addrec_invariant_in_inner1(
180; CHECK:        entry:
181; CHECK-NEXT:    [[N_SHL_2:%.]] = shl i64 %n, 2
182; CHECK-NEXT:    [[B_GEP_UPPER:%.*]] = getelementptr i8, ptr %b, i64 [[N_SHL_2]]
183; CHECK-NEXT:    br label %outer
184
185; CHECK:       outer.header:
186; CHECK:         [[OUTER_IV_SHL_2:%.]] = shl i64 %outer.iv, 2
187; CHECK-NEXT:    [[A_GEP_UPPER:%.*]] = getelementptr i8, ptr %a, i64 [[OUTER_IV_SHL_2]]
188; CHECK-NEXT:    [[OUTER_IV_4:%.]] = add i64 [[OUTER_IV_SHL_2]], 4
189; CHECK-NEXT:    [[A_GEP_UPPER_4:%.*]] = getelementptr i8, ptr %a, i64 [[OUTER_IV_4]]
190; CHECK:         [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
191; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label %scalar.ph, label %vector.memcheck
192
193; CHECK:       vector.memcheck:
194; CHECK-NEXT:    [[BOUND0:%.*]] = icmp ult ptr [[A_GEP_UPPER]], [[B_GEP_UPPER]]
195; CHECK-NEXT:    [[BOUND1:%.*]] = icmp ult ptr %b, [[A_GEP_UPPER_4]]
196; CHECK-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
197; CHECK-NEXT:    br i1 [[FOUND_CONFLICT]], label %scalar.ph, label %vector.ph
198;
199entry:
200  br label %outer.header
201
202outer.header:
203  %outer.iv = phi i64 [ %outer.iv.next, %outer.latch ], [ 0, %entry ]
204  %gep.a = getelementptr inbounds i32, ptr %a, i64 %outer.iv
205  br label %inner.body
206
207inner.body:
208  %inner.iv = phi i64 [ 0, %outer.header ], [ %inner.iv.next, %inner.body ]
209  %gep.b = getelementptr inbounds i32, ptr %b, i64 %inner.iv
210  %l = load i32, ptr %gep.b, align 4
211  %sub = sub i32 %l, 10
212  store i32 %sub, ptr %gep.a, align 4
213  %inner.iv.next = add nuw nsw i64 %inner.iv, 1
214  %inner.cond = icmp eq i64 %inner.iv.next, %n
215  br i1 %inner.cond, label %outer.latch, label %inner.body
216
217outer.latch:
218  %outer.iv.next = add nuw nsw i64 %outer.iv, 1
219  %outer.cond = icmp eq i64 %outer.iv.next, %n
220  br i1 %outer.cond, label %exit, label %outer.header
221
222exit:
223  ret void
224}
225
226; Same as @nested_loop_outer_iv_addrec_invariant_in_inner1 but with dependence
227; sink and source swapped.
228define void @nested_loop_outer_iv_addrec_invariant_in_inner2(ptr %a, ptr %b, i64 %n) {
229; CHECK-LABEL: @nested_loop_outer_iv_addrec_invariant_in_inner2(
230; CHECK:        entry:
231; CHECK-NEXT:    [[N_SHL_2:%.]] = shl i64 %n, 2
232; CHECK-NEXT:    [[B_GEP_UPPER:%.*]] = getelementptr i8, ptr %b, i64 [[N_SHL_2]]
233; CHECK-NEXT:    br label %outer
234
235; CHECK:       outer.header:
236; CHECK:         [[OUTER_IV_SHL_2:%.]] = shl i64 %outer.iv, 2
237; CHECK-NEXT:    [[A_GEP_UPPER:%.*]] = getelementptr i8, ptr %a, i64 [[OUTER_IV_SHL_2]]
238; CHECK-NEXT:    [[OUTER_IV_4:%.]] = add i64 [[OUTER_IV_SHL_2]], 4
239; CHECK-NEXT:    [[A_GEP_UPPER_4:%.*]] = getelementptr i8, ptr %a, i64 [[OUTER_IV_4]]
240; CHECK:         [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
241; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label %scalar.ph, label %vector.memcheck
242
243; CHECK:       vector.memcheck:
244; CHECK-NEXT:    [[BOUND0:%.*]] = icmp ult ptr %b, [[A_GEP_UPPER_4]]
245; CHECK-NEXT:    [[BOUND1:%.*]] = icmp ult ptr [[A_GEP_UPPER]], [[B_GEP_UPPER]]
246; CHECK-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
247; CHECK-NEXT:    br i1 [[FOUND_CONFLICT]], label %scalar.ph, label %vector.ph
248;
249entry:
250  br label %outer.header
251
252outer.header:
253  %outer.iv = phi i64 [ %outer.iv.next, %outer.latch ], [ 0, %entry ]
254  %gep.a = getelementptr inbounds i32, ptr %a, i64 %outer.iv
255  br label %inner.body
256
257inner.body:
258  %inner.iv = phi i64 [ 0, %outer.header ], [ %inner.iv.next, %inner.body ]
259  %l = load i32, ptr %gep.a, align 4
260  %sub = sub i32 %l, 10
261  %gep.b = getelementptr inbounds i32, ptr %b, i64 %inner.iv
262  store i32 %sub, ptr %gep.b, align 4
263  %inner.iv.next = add nuw nsw i64 %inner.iv, 1
264  %inner.cond = icmp eq i64 %inner.iv.next, %n
265  br i1 %inner.cond, label %outer.latch, label %inner.body
266
267outer.latch:
268  %outer.iv.next = add nuw nsw i64 %outer.iv, 1
269  %outer.cond = icmp eq i64 %outer.iv.next, %n
270  br i1 %outer.cond, label %exit, label %outer.header
271
272exit:
273  ret void
274}
275