1; RUN: opt < %s -licm -loop-vectorize -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
2
3; First licm pass is to hoist/sink invariant stores if possible. Today LICM does
4; not hoist/sink the invariant stores. Even if that changes, we should still
5; vectorize this loop in case licm is not run.
6
7; The next licm pass after vectorization is to hoist/sink loop invariant
8; instructions.
9target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
10
11; all tests check that it is legal to vectorize the stores to invariant
12; address.
13
14
15; CHECK-LABEL: inv_val_store_to_inv_address_with_reduction(
16; memory check is found.conflict = b[max(n-1,1)] > a && (i8* a)+1 > (i8* b)
17; CHECK: vector.memcheck:
18; CHECK:    found.conflict
19
20; CHECK-LABEL: vector.body:
21; CHECK:         %vec.phi = phi <4 x i32>  [ zeroinitializer, %vector.ph ], [ [[ADD:%[a-zA-Z0-9.]+]], %vector.body ]
22; CHECK:         %wide.load = load <4 x i32>
23; CHECK:         [[ADD]] = add <4 x i32> %vec.phi, %wide.load
24; CHECK-NEXT:    store i32 %ntrunc, i32* %a
25; CHECK-NEXT:    %index.next = add i64 %index, 4
26; CHECK-NEXT:    icmp eq i64 %index.next, %n.vec
27; CHECK-NEXT:    br i1
28
29; CHECK-LABEL: middle.block:
30; CHECK:         %rdx.shuf = shufflevector <4 x i32>
31define i32 @inv_val_store_to_inv_address_with_reduction(i32* %a, i64 %n, i32* %b) {
32entry:
33  %ntrunc = trunc i64 %n to i32
34  br label %for.body
35
36for.body:                                         ; preds = %for.body, %entry
37  %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
38  %tmp0 = phi i32 [ %tmp3, %for.body ], [ 0, %entry ]
39  %tmp1 = getelementptr inbounds i32, i32* %b, i64 %i
40  %tmp2 = load i32, i32* %tmp1, align 8
41  %tmp3 = add i32 %tmp0, %tmp2
42  store i32 %ntrunc, i32* %a
43  %i.next = add nuw nsw i64 %i, 1
44  %cond = icmp slt i64 %i.next, %n
45  br i1 %cond, label %for.body, label %for.end
46
47for.end:                                          ; preds = %for.body
48  %tmp4 = phi i32 [ %tmp3, %for.body ]
49  ret i32 %tmp4
50}
51
52; CHECK-LABEL: inv_val_store_to_inv_address(
53; CHECK-LABEL: vector.body:
54; CHECK:         store i32 %ntrunc, i32* %a
55; CHECK:         store <4 x i32>
56; CHECK-NEXT:    %index.next = add i64 %index, 4
57; CHECK-NEXT:    icmp eq i64 %index.next, %n.vec
58; CHECK-NEXT:    br i1
59define void @inv_val_store_to_inv_address(i32* %a, i64 %n, i32* %b) {
60entry:
61  %ntrunc = trunc i64 %n to i32
62  br label %for.body
63
64for.body:                                         ; preds = %for.body, %entry
65  %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
66  %tmp1 = getelementptr inbounds i32, i32* %b, i64 %i
67  %tmp2 = load i32, i32* %tmp1, align 8
68  store i32 %ntrunc, i32* %a
69  store i32 %ntrunc, i32* %tmp1
70  %i.next = add nuw nsw i64 %i, 1
71  %cond = icmp slt i64 %i.next, %n
72  br i1 %cond, label %for.body, label %for.end
73
74for.end:                                          ; preds = %for.body
75  ret void
76}
77
78
79; Both of these tests below are handled as predicated stores.
80
81; Conditional store
82; if (b[i] == k) a = ntrunc
83; TODO: We can be better with the code gen for the first test and we can have
84; just one scalar store if vector.or.reduce(vector_cmp(b[i] == k)) is 1.
85
86; CHECK-LABEL:inv_val_store_to_inv_address_conditional(
87; CHECK-LABEL: vector.body:
88; CHECK:           %wide.load = load <4 x i32>, <4 x i32>*
89; CHECK:           [[CMP:%[a-zA-Z0-9.]+]] = icmp eq <4 x i32> %wide.load, %{{.*}}
90; CHECK:           store <4 x i32>
91; CHECK-NEXT:      [[EE:%[a-zA-Z0-9.]+]] =  extractelement <4 x i1> [[CMP]], i32 0
92; CHECK-NEXT:      br i1 [[EE]], label %pred.store.if, label %pred.store.continue
93
94; CHECK-LABEL: pred.store.if:
95; CHECK-NEXT:      store i32 %ntrunc, i32* %a
96; CHECK-NEXT:      br label %pred.store.continue
97
98; CHECK-LABEL: pred.store.continue:
99; CHECK-NEXT:      [[EE1:%[a-zA-Z0-9.]+]] =  extractelement <4 x i1> [[CMP]], i32 1
100define void @inv_val_store_to_inv_address_conditional(i32* %a, i64 %n, i32* %b, i32 %k) {
101entry:
102  %ntrunc = trunc i64 %n to i32
103  br label %for.body
104
105for.body:                                         ; preds = %for.body, %entry
106  %i = phi i64 [ %i.next, %latch ], [ 0, %entry ]
107  %tmp1 = getelementptr inbounds i32, i32* %b, i64 %i
108  %tmp2 = load i32, i32* %tmp1, align 8
109  %cmp = icmp eq i32 %tmp2, %k
110  store i32 %ntrunc, i32* %tmp1
111  br i1 %cmp, label %cond_store, label %latch
112
113cond_store:
114  store i32 %ntrunc, i32* %a
115  br label %latch
116
117latch:
118  %i.next = add nuw nsw i64 %i, 1
119  %cond = icmp slt i64 %i.next, %n
120  br i1 %cond, label %for.body, label %for.end
121
122for.end:                                          ; preds = %for.body
123  ret void
124}
125
126; if (b[i] == k)
127;    a = ntrunc
128; else a = k;
129; TODO: We could vectorize this once we support multiple uniform stores to the
130; same address.
131; CHECK-LABEL:inv_val_store_to_inv_address_conditional_diff_values(
132; CHECK-NOT:           load <4 x i32>
133define void @inv_val_store_to_inv_address_conditional_diff_values(i32* %a, i64 %n, i32* %b, i32 %k) {
134entry:
135  %ntrunc = trunc i64 %n to i32
136  br label %for.body
137
138for.body:                                         ; preds = %for.body, %entry
139  %i = phi i64 [ %i.next, %latch ], [ 0, %entry ]
140  %tmp1 = getelementptr inbounds i32, i32* %b, i64 %i
141  %tmp2 = load i32, i32* %tmp1, align 8
142  %cmp = icmp eq i32 %tmp2, %k
143  store i32 %ntrunc, i32* %tmp1
144  br i1 %cmp, label %cond_store, label %cond_store_k
145
146cond_store:
147  store i32 %ntrunc, i32* %a
148  br label %latch
149
150cond_store_k:
151  store i32 %k, i32 * %a
152  br label %latch
153
154latch:
155  %i.next = add nuw nsw i64 %i, 1
156  %cond = icmp slt i64 %i.next, %n
157  br i1 %cond, label %for.body, label %for.end
158
159for.end:                                          ; preds = %for.body
160  ret void
161}
162
163; Instcombine'd version of above test. Now the store is no longer of invariant
164; value.
165; scalar store the value extracted from the last element of the vector value.
166; CHECK-LABEL: inv_val_store_to_inv_address_conditional_diff_values_ic
167; CHECK-NEXT:  entry:
168; CHECK-NEXT:    [[NTRUNC:%.*]] = trunc i64 [[N:%.*]] to i32
169; CHECK-NEXT:    [[TMP0:%.*]] = icmp sgt i64 [[N]], 1
170; CHECK-NEXT:    [[SMAX:%.*]] = select i1 [[TMP0]], i64 [[N]], i64 1
171; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4
172; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
173; CHECK:       vector.memcheck:
174; CHECK-NEXT:    [[A4:%.*]] = bitcast i32* [[A:%.*]] to i8*
175; CHECK-NEXT:    [[B1:%.*]] = bitcast i32* [[B:%.*]] to i8*
176; CHECK-NEXT:    [[TMP1:%.*]] = icmp sgt i64 [[N]], 1
177; CHECK-NEXT:    [[SMAX2:%.*]] = select i1 [[TMP1]], i64 [[N]], i64 1
178; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr i32, i32* [[B]], i64 [[SMAX2]]
179; CHECK-NEXT:    [[UGLYGEP:%.*]] = getelementptr i8, i8* [[A4]], i64 1
180; CHECK-NEXT:    [[BOUND0:%.*]] = icmp ugt i8* [[UGLYGEP]], [[B1]]
181; CHECK-NEXT:    [[BOUND1:%.*]] = icmp ugt i32* [[SCEVGEP]], [[A]]
182; CHECK-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
183; CHECK-NEXT:    br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
184; CHECK:       vector.ph:
185; CHECK-NEXT:    [[N_VEC:%.*]] = and i64 [[SMAX]], 9223372036854775804
186; CHECK-NEXT:    [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <4 x i32> undef, i32 [[K:%.*]], i32 0
187; CHECK-NEXT:    [[BROADCAST_SPLAT6:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT5]], <4 x i32> undef, <4 x i32> zeroinitializer
188; CHECK-NEXT:    [[BROADCAST_SPLATINSERT7:%.*]] = insertelement <4 x i32> undef, i32 [[NTRUNC]], i32 0
189; CHECK-NEXT:    [[BROADCAST_SPLAT8:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT7]], <4 x i32> undef, <4 x i32> zeroinitializer
190; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
191; CHECK:       vector.body:
192; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
193; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDEX]]
194; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
195; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 8
196; CHECK-NEXT:    [[TMP4:%.*]] = icmp eq <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT6]]
197; CHECK-NEXT:    [[TMP5:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
198; CHECK-NEXT:    store <4 x i32> [[BROADCAST_SPLAT8]], <4 x i32>* [[TMP5]], align 4
199; CHECK-NEXT:    [[PREDPHI:%.*]] = select <4 x i1> [[TMP4]], <4 x i32> [[BROADCAST_SPLAT8]], <4 x i32> [[BROADCAST_SPLAT6]]
200; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <4 x i32> [[PREDPHI]], i32 3
201; CHECK-NEXT:    store i32 [[TMP6]], i32* [[A]], align 4
202; CHECK-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 4
203; CHECK-NEXT:    [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
204; CHECK-NEXT:    br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]]
205; CHECK:       middle.block:
206; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]]
207; CHECK-NEXT:    br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
208; CHECK:       scalar.ph:
209; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
210; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
211; CHECK:       for.body:
212; CHECK-NEXT:    [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[LATCH:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
213; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[I]]
214; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 8
215; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP2]], [[K]]
216; CHECK-NEXT:    store i32 [[NTRUNC]], i32* [[TMP1]], align 4
217; CHECK-NEXT:    br i1 [[CMP]], label [[COND_STORE:%.*]], label [[COND_STORE_K:%.*]]
218; CHECK:       cond_store:
219; CHECK-NEXT:    br label [[LATCH]]
220; CHECK:       cond_store_k:
221; CHECK-NEXT:    br label [[LATCH]]
222; CHECK:       latch:
223; CHECK-NEXT:    [[STOREVAL:%.*]] = phi i32 [ [[NTRUNC]], [[COND_STORE]] ], [ [[K]], [[COND_STORE_K]] ]
224; CHECK-NEXT:    store i32 [[STOREVAL]], i32* [[A]], align 4
225; CHECK-NEXT:    [[I_NEXT]] = add nuw nsw i64 [[I]], 1
226; CHECK-NEXT:    [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]]
227; CHECK-NEXT:    br i1 [[COND]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT:%.*]]
228; CHECK:       for.end.loopexit:
229; CHECK-NEXT:    br label [[FOR_END]]
230; CHECK:       for.end:
231; CHECK-NEXT:    ret void
232;
233define void @inv_val_store_to_inv_address_conditional_diff_values_ic(i32* %a, i64 %n, i32* %b, i32 %k) {
234entry:
235  %ntrunc = trunc i64 %n to i32
236  br label %for.body
237
238for.body:                                         ; preds = %for.body, %entry
239  %i = phi i64 [ %i.next, %latch ], [ 0, %entry ]
240  %tmp1 = getelementptr inbounds i32, i32* %b, i64 %i
241  %tmp2 = load i32, i32* %tmp1, align 8
242  %cmp = icmp eq i32 %tmp2, %k
243  store i32 %ntrunc, i32* %tmp1
244  br i1 %cmp, label %cond_store, label %cond_store_k
245
246cond_store:
247  br label %latch
248
249cond_store_k:
250  br label %latch
251
252latch:
253  %storeval = phi i32 [ %ntrunc, %cond_store ], [ %k, %cond_store_k ]
254  store i32 %storeval, i32* %a
255  %i.next = add nuw nsw i64 %i, 1
256  %cond = icmp slt i64 %i.next, %n
257  br i1 %cond, label %for.body, label %for.end
258
259for.end:                                          ; preds = %for.body
260  ret void
261}
262
263; invariant val stored to invariant address predicated on invariant condition
264; This is not treated as a predicated store since the block the store belongs to
265; is the latch block (which doesn't need to be predicated).
266; variant/invariant values being stored to invariant address.
267; test checks that the last element of the phi is extracted and scalar stored
268; into the uniform address within the loop.
269; Since the condition and the phi is loop invariant, they are LICM'ed before
270; vectorization.
271; CHECK-LABEL: inv_val_store_to_inv_address_conditional_inv
272; CHECK-NEXT:  entry:
273; CHECK-NEXT:    [[B1:%.*]] = bitcast i32* [[B:%.*]] to i8*
274; CHECK-NEXT:    [[A4:%.*]] = bitcast i32* [[A:%.*]] to i8*
275; CHECK-NEXT:    [[NTRUNC:%.*]] = trunc i64 [[N:%.*]] to i32
276; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[NTRUNC]], [[K:%.*]]
277; CHECK-NEXT:    br i1 [[CMP]], label %[[COND_STORE_LICM:.*]], label %[[COND_STORE_K_LICM:.*]]
278; CHECK:       [[COND_STORE_LICM]]:
279; CHECK-NEXT:    br label %[[LATCH_LICM:.*]]
280; CHECK:       [[COND_STORE_K_LICM]]:
281; CHECK-NEXT:    br label %[[LATCH_LICM]]
282; CHECK:       [[LATCH_LICM]]:
283; CHECK-NEXT:    [[STOREVAL:%.*]] = phi i32 [ [[NTRUNC]], %[[COND_STORE_LICM]] ], [ [[K]], %[[COND_STORE_K_LICM]] ]
284; CHECK-NEXT:    [[TMP0:%.*]] = icmp sgt i64 [[N]], 1
285; CHECK-NEXT:    [[SMAX:%.*]] = select i1 [[TMP0]], i64 [[N]], i64 1
286; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4
287; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
288; CHECK:       vector.memcheck:
289; CHECK-NEXT:    [[TMP1:%.*]] = icmp sgt i64 [[N]], 1
290; CHECK-NEXT:    [[SMAX2:%.*]] = select i1 [[TMP1]], i64 [[N]], i64 1
291; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr i32, i32* [[B]], i64 [[SMAX2]]
292; CHECK-NEXT:    [[UGLYGEP:%.*]] = getelementptr i8, i8* [[A4]], i64 1
293; CHECK-NEXT:    [[BOUND0:%.*]] = icmp ugt i8* [[UGLYGEP]], [[B1]]
294; CHECK-NEXT:    [[BOUND1:%.*]] = icmp ugt i32* [[SCEVGEP]], [[A]]
295; CHECK-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
296; CHECK-NEXT:    br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
297; CHECK:       vector.ph:
298; CHECK-NEXT:    [[N_VEC:%.*]] = and i64 [[SMAX]], 9223372036854775804
299; CHECK-NEXT:    [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <4 x i32> undef, i32 [[NTRUNC]], i32 0
300; CHECK-NEXT:    [[BROADCAST_SPLAT6:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT5]], <4 x i32> undef, <4 x i32> zeroinitializer
301; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
302; CHECK:       vector.body:
303; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
304; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDEX]]
305; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i32* [[TMP6]] to <4 x i32>*
306; CHECK-NEXT:    store <4 x i32> [[BROADCAST_SPLAT6]], <4 x i32>* [[TMP7]], align 4
307; CHECK-NEXT:    store i32 [[STOREVAL]], i32* [[A]], align 4
308; CHECK-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 4
309; CHECK-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
310; CHECK-NEXT:    br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]]
311; CHECK:       middle.block:
312; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]]
313; CHECK-NEXT:    br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
314; CHECK:       scalar.ph:
315; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
316; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
317; CHECK:       for.body:
318; CHECK-NEXT:    [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[LATCH:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
319; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[I]]
320; CHECK-NEXT:    store i32 [[NTRUNC]], i32* [[TMP1]], align 4
321; CHECK-NEXT:    br i1 [[CMP]], label [[COND_STORE:%.*]], label [[COND_STORE_K:%.*]]
322; CHECK:       cond_store:
323; CHECK-NEXT:    br label [[LATCH]]
324; CHECK:       cond_store_k:
325; CHECK-NEXT:    br label [[LATCH]]
326; CHECK:       latch:
327; CHECK-NEXT:    store i32 [[STOREVAL]], i32* [[A]], align 4
328; CHECK-NEXT:    [[I_NEXT]] = add nuw nsw i64 [[I]], 1
329; CHECK-NEXT:    [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]]
330; CHECK-NEXT:    br i1 [[COND]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT:%.*]]
331; CHECK:       for.end.loopexit:
332; CHECK-NEXT:    br label [[FOR_END]]
333; CHECK:       for.end:
334; CHECK-NEXT:    ret void
335;
336define void @inv_val_store_to_inv_address_conditional_inv(i32* %a, i64 %n, i32* %b, i32 %k) {
337entry:
338  %ntrunc = trunc i64 %n to i32
339  %cmp = icmp eq i32 %ntrunc, %k
340  br label %for.body
341
342for.body:                                         ; preds = %for.body, %entry
343  %i = phi i64 [ %i.next, %latch ], [ 0, %entry ]
344  %tmp1 = getelementptr inbounds i32, i32* %b, i64 %i
345  %tmp2 = load i32, i32* %tmp1, align 8
346  store i32 %ntrunc, i32* %tmp1
347  br i1 %cmp, label %cond_store, label %cond_store_k
348
349cond_store:
350  br label %latch
351
352cond_store_k:
353  br label %latch
354
355latch:
356  %storeval = phi i32 [ %ntrunc, %cond_store ], [ %k, %cond_store_k ]
357  store i32 %storeval, i32* %a
358  %i.next = add nuw nsw i64 %i, 1
359  %cond = icmp slt i64 %i.next, %n
360  br i1 %cond, label %for.body, label %for.end
361
362for.end:                                          ; preds = %for.body
363  ret void
364}
365
366; variant value stored to uniform address tests that the code gen extracts the
367; last element from the variant vector and scalar stores it into the uniform
368; address.
369; CHECK-LABEL: variant_val_store_to_inv_address
370; CHECK-NEXT:  entry:
371; CHECK-NEXT:    [[TMP0:%.*]] = icmp sgt i64 [[N:%.*]], 1
372; CHECK-NEXT:    [[SMAX:%.*]] = select i1 [[TMP0]], i64 [[N]], i64 1
373; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4
374; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
375; CHECK:       vector.memcheck:
376; CHECK-NEXT:    [[B2:%.*]] = bitcast i32* [[B:%.*]] to i8*
377; CHECK-NEXT:    [[A1:%.*]] = bitcast i32* [[A:%.*]] to i8*
378; CHECK-NEXT:    [[UGLYGEP:%.*]] = getelementptr i8, i8* [[A1]], i64 1
379; CHECK-NEXT:    [[TMP1:%.*]] = icmp sgt i64 [[N]], 1
380; CHECK-NEXT:    [[SMAX3:%.*]] = select i1 [[TMP1]], i64 [[N]], i64 1
381; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr i32, i32* [[B]], i64 [[SMAX3]]
382; CHECK-NEXT:    [[BOUND0:%.*]] = icmp ugt i32* [[SCEVGEP]], [[A]]
383; CHECK-NEXT:    [[BOUND1:%.*]] = icmp ugt i8* [[UGLYGEP]], [[B2]]
384; CHECK-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
385; CHECK-NEXT:    br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
386; CHECK:       vector.ph:
387; CHECK-NEXT:    [[N_VEC:%.*]] = and i64 [[SMAX]], 9223372036854775804
388; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
389; CHECK:       vector.body:
390; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
391; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
392; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDEX]]
393; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
394; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 8
395; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 3
396; CHECK-NEXT:    store i32 [[TMP4]], i32* [[A]], align 4
397; CHECK-NEXT:    [[TMP5]] = add <4 x i32> [[VEC_PHI]], [[WIDE_LOAD]]
398; CHECK-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 4
399; CHECK-NEXT:    [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
400; CHECK-NEXT:    br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]]
401; CHECK:       middle.block:
402; CHECK-NEXT:    [[DOTLCSSA:%.*]] = phi <4 x i32> [ [[TMP5]], [[VECTOR_BODY]] ]
403; CHECK-NEXT:    [[RDX_SHUF:%.*]] = shufflevector <4 x i32> [[DOTLCSSA]], <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
404; CHECK-NEXT:    [[BIN_RDX:%.*]] = add <4 x i32> [[DOTLCSSA]], [[RDX_SHUF]]
405; CHECK-NEXT:    [[RDX_SHUF5:%.*]] = shufflevector <4 x i32> [[BIN_RDX]], <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
406; CHECK-NEXT:    [[BIN_RDX6:%.*]] = add <4 x i32> [[BIN_RDX]], [[RDX_SHUF5]]
407; CHECK-NEXT:    [[TMP7:%.*]] = extractelement <4 x i32> [[BIN_RDX6]], i32 0
408; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]]
409; CHECK-NEXT:    br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
410; CHECK:       scalar.ph:
411; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
412; CHECK-NEXT:    [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_MEMCHECK]] ]
413; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
414; CHECK:       for.body:
415; CHECK-NEXT:    [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
416; CHECK-NEXT:    [[TMP0:%.*]] = phi i32 [ [[TMP3:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
417; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[I]]
418; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 8
419; CHECK-NEXT:    store i32 [[TMP2]], i32* [[A]], align 4
420; CHECK-NEXT:    [[TMP3]] = add i32 [[TMP0]], [[TMP2]]
421; CHECK-NEXT:    [[I_NEXT]] = add nuw nsw i64 [[I]], 1
422; CHECK-NEXT:    [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]]
423; CHECK-NEXT:    br i1 [[COND]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT:%.*]]
424; CHECK:       for.end.loopexit:
425; CHECK-NEXT:    [[TMP3_LCSSA:%.*]] = phi i32 [ [[TMP3]], [[FOR_BODY]] ]
426; CHECK-NEXT:    br label [[FOR_END]]
427define i32 @variant_val_store_to_inv_address(i32* %a, i64 %n, i32* %b, i32 %k) {
428entry:
429  %ntrunc = trunc i64 %n to i32
430  %cmp = icmp eq i32 %ntrunc, %k
431  br label %for.body
432
433for.body:                                         ; preds = %for.body, %entry
434  %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
435  %tmp0 = phi i32 [ %tmp3, %for.body ], [ 0, %entry ]
436  %tmp1 = getelementptr inbounds i32, i32* %b, i64 %i
437  %tmp2 = load i32, i32* %tmp1, align 8
438  store i32 %tmp2, i32* %a
439  %tmp3 = add i32 %tmp0, %tmp2
440  %i.next = add nuw nsw i64 %i, 1
441  %cond = icmp slt i64 %i.next, %n
442  br i1 %cond, label %for.body, label %for.end
443
444for.end:                                          ; preds = %for.body
445  %rdx.lcssa = phi i32 [ %tmp3, %for.body ]
446  ret i32 %rdx.lcssa
447}
448
449; Multiple variant stores to the same uniform address
450; We do not vectorize such loops currently.
451;  for(; i < itr; i++) {
452;    for(; j < itr; j++) {
453;      var1[i] = var2[j] + var1[i];
454;      var1[i]++;
455;    }
456;  }
457
458; CHECK-LABEL: multiple_uniform_stores
459; CHECK-NOT:     <4 x i32>
460define i32 @multiple_uniform_stores(i32* nocapture %var1, i32* nocapture readonly %var2, i32 %itr) #0 {
461entry:
462  %cmp20 = icmp eq i32 %itr, 0
463  br i1 %cmp20, label %for.end10, label %for.cond1.preheader
464
465for.cond1.preheader:                              ; preds = %entry, %for.inc8
466  %indvars.iv23 = phi i64 [ %indvars.iv.next24, %for.inc8 ], [ 0, %entry ]
467  %j.022 = phi i32 [ %j.1.lcssa, %for.inc8 ], [ 0, %entry ]
468  %cmp218 = icmp ult i32 %j.022, %itr
469  br i1 %cmp218, label %for.body3.lr.ph, label %for.inc8
470
471for.body3.lr.ph:                                  ; preds = %for.cond1.preheader
472  %arrayidx5 = getelementptr inbounds i32, i32* %var1, i64 %indvars.iv23
473  %0 = zext i32 %j.022 to i64
474  br label %for.body3
475
476for.body3:                                        ; preds = %for.body3, %for.body3.lr.ph
477  %indvars.iv = phi i64 [ %0, %for.body3.lr.ph ], [ %indvars.iv.next, %for.body3 ]
478  %arrayidx = getelementptr inbounds i32, i32* %var2, i64 %indvars.iv
479  %1 = load i32, i32* %arrayidx, align 4
480  %2 = load i32, i32* %arrayidx5, align 4
481  %add = add nsw i32 %2, %1
482  store i32 %add, i32* %arrayidx5, align 4
483  %3 = load i32, i32* %arrayidx5, align 4
484  %4 = add nsw i32 %3, 1
485  store i32 %4, i32* %arrayidx5, align 4
486  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
487  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
488  %exitcond = icmp eq i32 %lftr.wideiv, %itr
489  br i1 %exitcond, label %for.inc8, label %for.body3
490
491for.inc8:                                         ; preds = %for.body3, %for.cond1.preheader
492  %j.1.lcssa = phi i32 [ %j.022, %for.cond1.preheader ], [ %itr, %for.body3 ]
493  %indvars.iv.next24 = add nuw nsw i64 %indvars.iv23, 1
494  %lftr.wideiv25 = trunc i64 %indvars.iv.next24 to i32
495  %exitcond26 = icmp eq i32 %lftr.wideiv25, %itr
496  br i1 %exitcond26, label %for.end10, label %for.cond1.preheader
497
498for.end10:                                        ; preds = %for.inc8, %entry
499  ret i32 undef
500}
501
502; second uniform store to the same address is conditional.
503; we do not vectorize this.
504; CHECK-LABEL: multiple_uniform_stores_conditional
505; CHECK-NOT:    <4 x i32>
506define i32 @multiple_uniform_stores_conditional(i32* nocapture %var1, i32* nocapture readonly %var2, i32 %itr) #0 {
507entry:
508  %cmp20 = icmp eq i32 %itr, 0
509  br i1 %cmp20, label %for.end10, label %for.cond1.preheader
510
511for.cond1.preheader:                              ; preds = %entry, %for.inc8
512  %indvars.iv23 = phi i64 [ %indvars.iv.next24, %for.inc8 ], [ 0, %entry ]
513  %j.022 = phi i32 [ %j.1.lcssa, %for.inc8 ], [ 0, %entry ]
514  %cmp218 = icmp ult i32 %j.022, %itr
515  br i1 %cmp218, label %for.body3.lr.ph, label %for.inc8
516
517for.body3.lr.ph:                                  ; preds = %for.cond1.preheader
518  %arrayidx5 = getelementptr inbounds i32, i32* %var1, i64 %indvars.iv23
519  %0 = zext i32 %j.022 to i64
520  br label %for.body3
521
522for.body3:                                        ; preds = %for.body3, %for.body3.lr.ph
523  %indvars.iv = phi i64 [ %0, %for.body3.lr.ph ], [ %indvars.iv.next, %latch ]
524  %arrayidx = getelementptr inbounds i32, i32* %var2, i64 %indvars.iv
525  %1 = load i32, i32* %arrayidx, align 4
526  %2 = load i32, i32* %arrayidx5, align 4
527  %add = add nsw i32 %2, %1
528  store i32 %add, i32* %arrayidx5, align 4
529  %3 = load i32, i32* %arrayidx5, align 4
530  %4 = add nsw i32 %3, 1
531  %5 = icmp ugt i32 %3, 42
532  br i1 %5, label %cond_store, label %latch
533
534cond_store:
535  store i32 %4, i32* %arrayidx5, align 4
536  br label %latch
537
538latch:
539  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
540  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
541  %exitcond = icmp eq i32 %lftr.wideiv, %itr
542  br i1 %exitcond, label %for.inc8, label %for.body3
543
544for.inc8:                                         ; preds = %for.body3, %for.cond1.preheader
545  %j.1.lcssa = phi i32 [ %j.022, %for.cond1.preheader ], [ %itr, %latch ]
546  %indvars.iv.next24 = add nuw nsw i64 %indvars.iv23, 1
547  %lftr.wideiv25 = trunc i64 %indvars.iv.next24 to i32
548  %exitcond26 = icmp eq i32 %lftr.wideiv25, %itr
549  br i1 %exitcond26, label %for.end10, label %for.cond1.preheader
550
551for.end10:                                        ; preds = %for.inc8, %entry
552  ret i32 undef
553}
554
555; cannot vectorize loop with unsafe dependency between uniform load (%tmp10) and store
556; (%tmp12) to the same address
557; PR39653
558; Note: %tmp10 could be replaced by phi(%arg4, %tmp12), a potentially vectorizable
559; 1st-order-recurrence
560define void @unsafe_dep_uniform_load_store(i32 %arg, i32 %arg1, i64 %arg2, i16* %arg3, i32 %arg4, i64 %arg5) {
561; CHECK-LABEL: unsafe_dep_uniform_load_store
562; CHECK-NOT: <4 x i32>
563bb:
564  %tmp = alloca i32
565  store i32 %arg4, i32* %tmp
566  %tmp6 = getelementptr inbounds i16, i16* %arg3, i64 %arg5
567  br label %bb7
568
569bb7:
570  %tmp8 = phi i64 [ 0, %bb ], [ %tmp24, %bb7 ]
571  %tmp9 = phi i32 [ %arg1, %bb ], [ %tmp23, %bb7 ]
572  %tmp10 = load i32, i32* %tmp
573  %tmp11 = mul nsw i32 %tmp9, %tmp10
574  %tmp12 = srem i32 %tmp11, 65536
575  %tmp13 = add nsw i32 %tmp12, %tmp9
576  %tmp14 = trunc i32 %tmp13 to i16
577  %tmp15 = trunc i64 %tmp8 to i32
578  %tmp16 = add i32 %arg, %tmp15
579  %tmp17 = zext i32 %tmp16 to i64
580  %tmp18 = getelementptr inbounds i16, i16* %tmp6, i64 %tmp17
581  store i16 %tmp14, i16* %tmp18, align 2
582  %tmp19 = add i32 %tmp13, %tmp9
583  %tmp20 = trunc i32 %tmp19 to i16
584  %tmp21 = and i16 %tmp20, 255
585  %tmp22 = getelementptr inbounds i16, i16* %arg3, i64 %tmp17
586  store i16 %tmp21, i16* %tmp22, align 2
587  %tmp23 = add nsw i32 %tmp9, 1
588  %tmp24 = add nuw nsw i64 %tmp8, 1
589  %tmp25 = icmp eq i64 %tmp24, %arg2
590  store i32 %tmp12, i32* %tmp
591  br i1 %tmp25, label %bb26, label %bb7
592
593bb26:
594  ret void
595}
596