1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -S %s | FileCheck %s
3
4
5@p = external local_unnamed_addr global [257 x i32], align 16
6@q = external local_unnamed_addr global [257 x i32], align 16
7
8; Test case for PR43398.
9
10define void @can_sink_after_store(i32 %x, i32* %ptr, i64 %tc) local_unnamed_addr #0 {
11; CHECK-LABEL: @can_sink_after_store(
12; CHECK-NEXT:  entry:
13; CHECK-NEXT:    br label [[PREHEADER:%.*]]
14; CHECK:       preheader:
15; CHECK-NEXT:    [[IDX_PHI_TRANS:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
16; CHECK-NEXT:    [[DOTPRE:%.*]] = load i32, i32* [[IDX_PHI_TRANS]], align 4
17; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
18; CHECK:       vector.ph:
19; CHECK-NEXT:    [[VECTOR_RECUR_INIT:%.*]] = insertelement <4 x i32> poison, i32 [[DOTPRE]], i32 3
20; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[X:%.*]], i32 0
21; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
22; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
23; CHECK:       vector.body:
24; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
25; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], [[VECTOR_BODY]] ]
26; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]]
27; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
28; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[TMP0]]
29; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 0
30; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
31; CHECK-NEXT:    [[WIDE_LOAD]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
32; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[WIDE_LOAD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
33; CHECK-NEXT:    [[TMP5:%.*]] = add <4 x i32> [[TMP4]], [[BROADCAST_SPLAT]]
34; CHECK-NEXT:    [[TMP6:%.*]] = add <4 x i32> [[TMP5]], [[WIDE_LOAD]]
35; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[TMP0]]
36; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[TMP7]], i32 0
37; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i32* [[TMP8]] to <4 x i32>*
38; CHECK-NEXT:    store <4 x i32> [[TMP6]], <4 x i32>* [[TMP9]], align 4
39; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
40; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1996
41; CHECK-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
42; CHECK:       middle.block:
43; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 1999, 1996
44; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 3
45; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 2
46; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
47; CHECK:       scalar.ph:
48; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[DOTPRE]], [[PREHEADER]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
49; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 1997, [[MIDDLE_BLOCK]] ], [ 1, [[PREHEADER]] ]
50; CHECK-NEXT:    br label [[FOR:%.*]]
51; CHECK:       for:
52; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[PRE_NEXT:%.*]], [[FOR]] ]
53; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR]] ]
54; CHECK-NEXT:    [[ADD_1:%.*]] = add i32 [[SCALAR_RECUR]], [[X]]
55; CHECK-NEXT:    [[IDX_1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[IV]]
56; CHECK-NEXT:    [[PRE_NEXT]] = load i32, i32* [[IDX_1]], align 4
57; CHECK-NEXT:    [[ADD_2:%.*]] = add i32 [[ADD_1]], [[PRE_NEXT]]
58; CHECK-NEXT:    [[IDX_2:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[IV]]
59; CHECK-NEXT:    store i32 [[ADD_2]], i32* [[IDX_2]], align 4
60; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
61; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 2000
62; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT]], label [[FOR]], !llvm.loop [[LOOP2:![0-9]+]]
63; CHECK:       exit:
64; CHECK-NEXT:    ret void
65;
66
67entry:
68  br label %preheader
69
70preheader:
71  %idx.phi.trans = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
72  %.pre = load i32, i32* %idx.phi.trans, align 4
73  br label %for
74
75for:
76  %pre.phi = phi i32 [ %.pre, %preheader ], [ %pre.next, %for ]
77  %iv = phi i64 [ 1, %preheader ], [ %iv.next, %for ]
78  %add.1 = add i32 %pre.phi, %x
79  %idx.1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %iv
80  %pre.next = load i32, i32* %idx.1, align 4
81  %add.2 = add i32 %add.1, %pre.next
82  %idx.2 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %iv
83  store i32 %add.2, i32* %idx.2, align 4
84  %iv.next = add nuw nsw i64 %iv, 1
85  %exitcond = icmp eq i64 %iv.next, 2000
86  br i1 %exitcond, label %exit, label %for
87
88exit:
89  ret void
90}
91
92; We can sink potential trapping instructions, as this will only delay the trap
93; and not introduce traps on additional paths.
94define void @sink_sdiv(i32 %x, i32* %ptr, i64 %tc) local_unnamed_addr #0 {
95; CHECK-LABEL: @sink_sdiv(
96; CHECK-NEXT:  entry:
97; CHECK-NEXT:    br label [[PREHEADER:%.*]]
98; CHECK:       preheader:
99; CHECK-NEXT:    [[IDX_PHI_TRANS:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
100; CHECK-NEXT:    [[DOTPRE:%.*]] = load i32, i32* [[IDX_PHI_TRANS]], align 4
101; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
102; CHECK:       vector.ph:
103; CHECK-NEXT:    [[VECTOR_RECUR_INIT:%.*]] = insertelement <4 x i32> poison, i32 [[DOTPRE]], i32 3
104; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[X:%.*]], i32 0
105; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
106; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
107; CHECK:       vector.body:
108; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
109; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], [[VECTOR_BODY]] ]
110; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]]
111; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
112; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[TMP0]]
113; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 0
114; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
115; CHECK-NEXT:    [[WIDE_LOAD]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
116; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[WIDE_LOAD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
117; CHECK-NEXT:    [[TMP5:%.*]] = sdiv <4 x i32> [[TMP4]], [[BROADCAST_SPLAT]]
118; CHECK-NEXT:    [[TMP6:%.*]] = add <4 x i32> [[TMP5]], [[WIDE_LOAD]]
119; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[TMP0]]
120; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[TMP7]], i32 0
121; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i32* [[TMP8]] to <4 x i32>*
122; CHECK-NEXT:    store <4 x i32> [[TMP6]], <4 x i32>* [[TMP9]], align 4
123; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
124; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1996
125; CHECK-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
126; CHECK:       middle.block:
127; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 1999, 1996
128; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 3
129; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 2
130; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
131; CHECK:       scalar.ph:
132; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[DOTPRE]], [[PREHEADER]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
133; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 1997, [[MIDDLE_BLOCK]] ], [ 1, [[PREHEADER]] ]
134; CHECK-NEXT:    br label [[FOR:%.*]]
135; CHECK:       for:
136; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[PRE_NEXT:%.*]], [[FOR]] ]
137; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR]] ]
138; CHECK-NEXT:    [[DIV_1:%.*]] = sdiv i32 [[SCALAR_RECUR]], [[X]]
139; CHECK-NEXT:    [[IDX_1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[IV]]
140; CHECK-NEXT:    [[PRE_NEXT]] = load i32, i32* [[IDX_1]], align 4
141; CHECK-NEXT:    [[ADD_2:%.*]] = add i32 [[DIV_1]], [[PRE_NEXT]]
142; CHECK-NEXT:    [[IDX_2:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[IV]]
143; CHECK-NEXT:    store i32 [[ADD_2]], i32* [[IDX_2]], align 4
144; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
145; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 2000
146; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT]], label [[FOR]], !llvm.loop [[LOOP5:![0-9]+]]
147; CHECK:       exit:
148; CHECK-NEXT:    ret void
149;
150
151entry:
152  br label %preheader
153
154preheader:
155  %idx.phi.trans = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
156  %.pre = load i32, i32* %idx.phi.trans, align 4
157  br label %for
158
159for:
160  %pre.phi = phi i32 [ %.pre, %preheader ], [ %pre.next, %for ]
161  %iv = phi i64 [ 1, %preheader ], [ %iv.next, %for ]
162  %div.1 = sdiv i32 %pre.phi, %x
163  %idx.1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %iv
164  %pre.next = load i32, i32* %idx.1, align 4
165  %add.2 = add i32 %div.1, %pre.next
166  %idx.2 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %iv
167  store i32 %add.2, i32* %idx.2, align 4
168  %iv.next = add nuw nsw i64 %iv, 1
169  %exitcond = icmp eq i64 %iv.next, 2000
170  br i1 %exitcond, label %exit, label %for
171
172exit:
173  ret void
174}
175
176; Sink users of %pre.phi recursively.
177define void @can_sink_with_additional_user(i32 %x, i32* %ptr, i64 %tc) {
178; CHECK-LABEL: @can_sink_with_additional_user(
179; CHECK-NEXT:  entry:
180; CHECK-NEXT:    br label [[PREHEADER:%.*]]
181; CHECK:       preheader:
182; CHECK-NEXT:    [[IDX_PHI_TRANS:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
183; CHECK-NEXT:    [[DOTPRE:%.*]] = load i32, i32* [[IDX_PHI_TRANS]], align 4
184; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
185; CHECK:       vector.ph:
186; CHECK-NEXT:    [[VECTOR_RECUR_INIT:%.*]] = insertelement <4 x i32> poison, i32 [[DOTPRE]], i32 3
187; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[X:%.*]], i32 0
188; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
189; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
190; CHECK:       vector.body:
191; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
192; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], [[VECTOR_BODY]] ]
193; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]]
194; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
195; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[TMP0]]
196; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 0
197; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
198; CHECK-NEXT:    [[WIDE_LOAD]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
199; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[WIDE_LOAD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
200; CHECK-NEXT:    [[TMP5:%.*]] = add <4 x i32> [[TMP4]], [[BROADCAST_SPLAT]]
201; CHECK-NEXT:    [[TMP6:%.*]] = add <4 x i32> [[TMP5]], [[BROADCAST_SPLAT]]
202; CHECK-NEXT:    [[TMP7:%.*]] = add <4 x i32> [[TMP5]], [[WIDE_LOAD]]
203; CHECK-NEXT:    [[TMP8:%.*]] = add <4 x i32> [[TMP6]], [[TMP7]]
204; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[TMP0]]
205; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[TMP9]], i32 0
206; CHECK-NEXT:    [[TMP11:%.*]] = bitcast i32* [[TMP10]] to <4 x i32>*
207; CHECK-NEXT:    store <4 x i32> [[TMP8]], <4 x i32>* [[TMP11]], align 4
208; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
209; CHECK-NEXT:    [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1996
210; CHECK-NEXT:    br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
211; CHECK:       middle.block:
212; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 1999, 1996
213; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 3
214; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 2
215; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
216; CHECK:       scalar.ph:
217; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[DOTPRE]], [[PREHEADER]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
218; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 1997, [[MIDDLE_BLOCK]] ], [ 1, [[PREHEADER]] ]
219; CHECK-NEXT:    br label [[FOR:%.*]]
220; CHECK:       for:
221; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[PRE_NEXT:%.*]], [[FOR]] ]
222; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR]] ]
223; CHECK-NEXT:    [[ADD_1:%.*]] = add i32 [[SCALAR_RECUR]], [[X]]
224; CHECK-NEXT:    [[ADD_2:%.*]] = add i32 [[ADD_1]], [[X]]
225; CHECK-NEXT:    [[IDX_1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[IV]]
226; CHECK-NEXT:    [[PRE_NEXT]] = load i32, i32* [[IDX_1]], align 4
227; CHECK-NEXT:    [[ADD_3:%.*]] = add i32 [[ADD_1]], [[PRE_NEXT]]
228; CHECK-NEXT:    [[ADD_4:%.*]] = add i32 [[ADD_2]], [[ADD_3]]
229; CHECK-NEXT:    [[IDX_2:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[IV]]
230; CHECK-NEXT:    store i32 [[ADD_4]], i32* [[IDX_2]], align 4
231; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
232; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 2000
233; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT]], label [[FOR]], !llvm.loop [[LOOP7:![0-9]+]]
234; CHECK:       exit:
235; CHECK-NEXT:    ret void
236;
237
238
239
240entry:
241  br label %preheader
242
243preheader:
244  %idx.phi.trans = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
245  %.pre = load i32, i32* %idx.phi.trans, align 4
246  br label %for
247
248for:
249  %pre.phi = phi i32 [ %.pre, %preheader ], [ %pre.next, %for ]
250  %iv = phi i64 [ 1, %preheader ], [ %iv.next, %for ]
251  %add.1 = add i32 %pre.phi, %x
252  %add.2 = add i32 %add.1, %x
253  %idx.1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %iv
254  %pre.next = load i32, i32* %idx.1, align 4
255  %add.3 = add i32 %add.1, %pre.next
256  %add.4 = add i32 %add.2, %add.3
257  %idx.2 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %iv
258  store i32 %add.4, i32* %idx.2, align 4
259  %iv.next = add nuw nsw i64 %iv, 1
260  %exitcond = icmp eq i64 %iv.next, 2000
261  br i1 %exitcond, label %exit, label %for
262
263exit:
264  ret void
265}
266
267; FIXME: We can sink a store, if we can guarantee that it does not alias any
268;        loads/stores in between.
269define void @cannot_sink_store(i32 %x, i32* %ptr, i64 %tc) {
270; CHECK-LABEL: @cannot_sink_store(
271; CHECK-NEXT:  entry:
272; CHECK-NEXT:    br label [[PREHEADER:%.*]]
273; CHECK:       preheader:
274; CHECK-NEXT:    [[IDX_PHI_TRANS:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
275; CHECK-NEXT:    [[DOTPRE:%.*]] = load i32, i32* [[IDX_PHI_TRANS]], align 4
276; CHECK-NEXT:    br label [[FOR:%.*]]
277; CHECK:       for:
278; CHECK-NEXT:    [[PRE_PHI:%.*]] = phi i32 [ [[DOTPRE]], [[PREHEADER]] ], [ [[PRE_NEXT:%.*]], [[FOR]] ]
279; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 1, [[PREHEADER]] ], [ [[IV_NEXT:%.*]], [[FOR]] ]
280; CHECK-NEXT:    [[ADD_1:%.*]] = add i32 [[PRE_PHI]], [[X:%.*]]
281; CHECK-NEXT:    store i32 [[ADD_1]], i32* [[PTR:%.*]], align 4
282; CHECK-NEXT:    [[IDX_1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[IV]]
283; CHECK-NEXT:    [[PRE_NEXT]] = load i32, i32* [[IDX_1]], align 4
284; CHECK-NEXT:    [[ADD_2:%.*]] = add i32 [[ADD_1]], [[PRE_NEXT]]
285; CHECK-NEXT:    [[IDX_2:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[IV]]
286; CHECK-NEXT:    store i32 [[ADD_2]], i32* [[IDX_2]], align 4
287; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
288; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 2000
289; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT:%.*]], label [[FOR]]
290; CHECK:       exit:
291; CHECK-NEXT:    ret void
292;
293
294
295
296entry:
297  br label %preheader
298
299preheader:
300  %idx.phi.trans = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
301  %.pre = load i32, i32* %idx.phi.trans, align 4
302  br label %for
303
304for:
305  %pre.phi = phi i32 [ %.pre, %preheader ], [ %pre.next, %for ]
306  %iv = phi i64 [ 1, %preheader ], [ %iv.next, %for ]
307  %add.1 = add i32 %pre.phi, %x
308  store i32 %add.1, i32* %ptr
309  %idx.1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %iv
310  %pre.next = load i32, i32* %idx.1, align 4
311  %add.2 = add i32 %add.1, %pre.next
312  %idx.2 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %iv
313  store i32 %add.2, i32* %idx.2, align 4
314  %iv.next = add nuw nsw i64 %iv, 1
315  %exitcond = icmp eq i64 %iv.next, 2000
316  br i1 %exitcond, label %exit, label %for
317
318exit:
319  ret void
320}
321
322; Some kinds of reductions are not detected by IVDescriptors. If we have a
323; cycle, we cannot sink it.
324define void @cannot_sink_reduction(i32 %x, i32* %ptr, i64 %tc) {
325; CHECK-LABEL: @cannot_sink_reduction(
326; CHECK-NEXT:  entry:
327; CHECK-NEXT:    br label [[PREHEADER:%.*]]
328; CHECK:       preheader:
329; CHECK-NEXT:    [[IDX_PHI_TRANS:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
330; CHECK-NEXT:    [[DOTPRE:%.*]] = load i32, i32* [[IDX_PHI_TRANS]], align 4
331; CHECK-NEXT:    br label [[FOR:%.*]]
332; CHECK:       for:
333; CHECK-NEXT:    [[PRE_PHI:%.*]] = phi i32 [ [[DOTPRE]], [[PREHEADER]] ], [ [[D:%.*]], [[FOR]] ]
334; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 1, [[PREHEADER]] ], [ [[IV_NEXT:%.*]], [[FOR]] ]
335; CHECK-NEXT:    [[D]] = sdiv i32 [[PRE_PHI]], [[X:%.*]]
336; CHECK-NEXT:    [[IDX_1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[IV]]
337; CHECK-NEXT:    [[PRE_NEXT:%.*]] = load i32, i32* [[IDX_1]], align 4
338; CHECK-NEXT:    [[ADD_2:%.*]] = add i32 [[X]], [[PRE_NEXT]]
339; CHECK-NEXT:    [[IDX_2:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[IV]]
340; CHECK-NEXT:    store i32 [[ADD_2]], i32* [[IDX_2]], align 4
341; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
342; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 2000
343; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT:%.*]], label [[FOR]]
344; CHECK:       exit:
345; CHECK-NEXT:    ret void
346;
347
348
349
350; CHECK-NET:     ret void
351entry:
352  br label %preheader
353
354preheader:
355  %idx.phi.trans = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
356  %.pre = load i32, i32* %idx.phi.trans, align 4
357  br label %for
358
359for:
360  %pre.phi = phi i32 [ %.pre, %preheader ], [ %d, %for ]
361  %iv = phi i64 [ 1, %preheader ], [ %iv.next, %for ]
362  %d = sdiv i32 %pre.phi, %x
363  %idx.1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %iv
364  %pre.next = load i32, i32* %idx.1, align 4
365  %add.2 = add i32 %x, %pre.next
366  %idx.2 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %iv
367  store i32 %add.2, i32* %idx.2, align 4
368  %iv.next = add nuw nsw i64 %iv, 1
369  %exitcond = icmp eq i64 %iv.next, 2000
370  br i1 %exitcond, label %exit, label %for
371
372exit:
373  ret void
374}
375
376; Sink %tmp38 after %tmp60, then it enable the loop vectorization.
377define void @instruction_with_2_FOR_operands() {
378; CHECK-LABEL: @instruction_with_2_FOR_operands(
379; CHECK-NEXT:  bb:
380; CHECK-NEXT:    [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 undef, i64 0)
381; CHECK-NEXT:    [[TMP0:%.*]] = add nuw i64 [[SMAX]], 1
382; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 4
383; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
384; CHECK:       vector.ph:
385; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 4
386; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
387; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
388; CHECK:       vector.body:
389; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
390; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x float> [ <float poison, float poison, float poison, float undef>, [[VECTOR_PH]] ], [ [[BROADCAST_SPLAT3:%.*]], [[VECTOR_BODY]] ]
391; CHECK-NEXT:    [[VECTOR_RECUR1:%.*]] = phi <4 x float> [ <float poison, float poison, float poison, float undef>, [[VECTOR_PH]] ], [ [[BROADCAST_SPLAT:%.*]], [[VECTOR_BODY]] ]
392; CHECK-NEXT:    [[TMP1:%.*]] = add i64 [[INDEX]], 0
393; CHECK-NEXT:    [[TMP2:%.*]] = load float, float* undef, align 4
394; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[TMP2]], i32 0
395; CHECK-NEXT:    [[BROADCAST_SPLAT]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
396; CHECK-NEXT:    [[TMP3:%.*]] = shufflevector <4 x float> [[VECTOR_RECUR1]], <4 x float> [[BROADCAST_SPLAT]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
397; CHECK-NEXT:    [[TMP4:%.*]] = load float, float* undef, align 4
398; CHECK-NEXT:    [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x float> poison, float [[TMP4]], i32 0
399; CHECK-NEXT:    [[BROADCAST_SPLAT3]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT2]], <4 x float> poison, <4 x i32> zeroinitializer
400; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <4 x float> [[VECTOR_RECUR]], <4 x float> [[BROADCAST_SPLAT3]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
401; CHECK-NEXT:    [[TMP6:%.*]] = fmul fast <4 x float> [[TMP5]], [[TMP3]]
402; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
403; CHECK-NEXT:    [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
404; CHECK-NEXT:    br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
405; CHECK:       middle.block:
406; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
407; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x float> [[BROADCAST_SPLAT3]], i32 3
408; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x float> [[BROADCAST_SPLAT3]], i32 2
409; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT4:%.*]] = extractelement <4 x float> [[BROADCAST_SPLAT]], i32 3
410; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI5:%.*]] = extractelement <4 x float> [[BROADCAST_SPLAT]], i32 2
411; CHECK-NEXT:    br i1 [[CMP_N]], label [[BB74:%.*]], label [[SCALAR_PH]]
412; CHECK:       scalar.ph:
413; CHECK-NEXT:    [[SCALAR_RECUR_INIT6:%.*]] = phi float [ undef, [[BB:%.*]] ], [ [[VECTOR_RECUR_EXTRACT4]], [[MIDDLE_BLOCK]] ]
414; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi float [ undef, [[BB]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
415; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[BB]] ]
416; CHECK-NEXT:    br label [[BB13:%.*]]
417; CHECK:       bb13:
418; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi float [ [[TMP60:%.*]], [[BB13]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
419; CHECK-NEXT:    [[SCALAR_RECUR7:%.*]] = phi float [ [[TMP49:%.*]], [[BB13]] ], [ [[SCALAR_RECUR_INIT6]], [[SCALAR_PH]] ]
420; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[BB13]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
421; CHECK-NEXT:    [[TMP38:%.*]] = fmul fast float [[SCALAR_RECUR]], [[SCALAR_RECUR7]]
422; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
423; CHECK-NEXT:    [[TMP49]] = load float, float* undef, align 4
424; CHECK-NEXT:    [[TMP60]] = load float, float* undef, align 4
425; CHECK-NEXT:    [[TMP12:%.*]] = icmp slt i64 [[INDVARS_IV]], undef
426; CHECK-NEXT:    br i1 [[TMP12]], label [[BB13]], label [[BB74]], !llvm.loop [[LOOP9:![0-9]+]]
427; CHECK:       bb74:
428; CHECK-NEXT:    ret void
429;
430
431
432bb:
433  br label %bb13
434
435bb13:                                             ; preds = %bb13, %bb
436  %tmp37 = phi float [ %tmp60, %bb13 ], [ undef, %bb ]
437  %tmp27 = phi float [ %tmp49, %bb13 ], [ undef, %bb ]
438  %indvars.iv = phi i64 [ %indvars.iv.next, %bb13 ], [ 0, %bb ]
439  %tmp38 = fmul fast float %tmp37, %tmp27
440  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
441  %tmp49 = load float, float* undef, align 4
442  %tmp60 = load float, float* undef, align 4
443  %tmp12 = icmp slt i64 %indvars.iv, undef
444  br i1 %tmp12, label %bb13, label %bb74
445
446bb74:                                             ; preds = %bb13
447  ret void
448}
449
450define void @instruction_with_2_FOR_operands_and_multiple_other_uses(float* noalias %dst.1, float* noalias %dst.2, float* noalias %dst.3, float* noalias %for.ptr.1, float* noalias %for.ptr.2) {
451; CHECK-LABEL: @instruction_with_2_FOR_operands_and_multiple_other_uses(
452; CHECK-NEXT:  bb:
453; CHECK-NEXT:    br label [[LOOP:%.*]]
454; CHECK:       loop:
455; CHECK-NEXT:    [[FOR_1:%.*]] = phi float [ 0.000000e+00, [[BB:%.*]] ], [ [[FOR_1_NEXT:%.*]], [[LOOP]] ]
456; CHECK-NEXT:    [[FOR_2:%.*]] = phi float [ 0.000000e+00, [[BB]] ], [ [[FOR_2_NEXT:%.*]], [[LOOP]] ]
457; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[BB]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
458; CHECK-NEXT:    [[FOR_1_USE_1:%.*]] = fmul fast float [[FOR_1]], 2.000000e+00
459; CHECK-NEXT:    [[USED_BY_BOTH:%.*]] = fmul fast float [[FOR_1]], [[FOR_2]]
460; CHECK-NEXT:    [[FOR_2_NEXT]] = load float, float* [[FOR_PTR_2:%.*]], align 4
461; CHECK-NEXT:    [[FOR_1_USE_3:%.*]] = fadd fast float [[FOR_1]], 1.000000e+00
462; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
463; CHECK-NEXT:    [[FOR_1_NEXT]] = load float, float* [[FOR_PTR_1:%.*]], align 4
464; CHECK-NEXT:    [[GEP_DST_1:%.*]] = getelementptr inbounds float, float* [[DST_1:%.*]], i64 [[IV]]
465; CHECK-NEXT:    store float [[USED_BY_BOTH]], float* [[GEP_DST_1]], align 4
466; CHECK-NEXT:    [[GEP_DST_2:%.*]] = getelementptr inbounds float, float* [[DST_2:%.*]], i64 [[IV]]
467; CHECK-NEXT:    store float [[FOR_1_USE_1]], float* [[GEP_DST_2]], align 4
468; CHECK-NEXT:    [[GEP_DST_3:%.*]] = getelementptr inbounds float, float* [[DST_3:%.*]], i64 [[IV]]
469; CHECK-NEXT:    store float [[FOR_1_USE_3]], float* [[GEP_DST_3]], align 4
470; CHECK-NEXT:    [[EC:%.*]] = icmp slt i64 [[IV]], 1000
471; CHECK-NEXT:    br i1 [[EC]], label [[LOOP]], label [[EXIT:%.*]]
472; CHECK:       exit:
473; CHECK-NEXT:    ret void
474;
475bb:
476  br label %loop
477
478loop:
479  %for.1 = phi float [ 0.0, %bb ], [ %for.1.next, %loop]
480  %for.2 = phi float [ 0.0, %bb ], [ %for.2.next, %loop]
481  %iv = phi i64 [ 0, %bb ], [ %iv.next, %loop ]
482  %for.1.use.1  = fmul fast float %for.1, 2.0
483  %used.by.both = fmul fast float %for.1, %for.2
484  %for.2.next = load float, float* %for.ptr.2, align 4
485  %for.1.use.3 = fadd fast float %for.1, 1.0
486  %iv.next = add nuw nsw i64 %iv, 1
487  %for.1.next = load float, float* %for.ptr.1, align 4
488  %gep.dst.1 = getelementptr inbounds float, float* %dst.1, i64 %iv
489  store float %used.by.both, float* %gep.dst.1
490  %gep.dst.2 = getelementptr inbounds float, float* %dst.2, i64 %iv
491  store float %for.1.use.1, float* %gep.dst.2
492  %gep.dst.3 = getelementptr inbounds float, float* %dst.3, i64 %iv
493  store float %for.1.use.3, float* %gep.dst.3
494  %ec = icmp slt i64 %iv, 1000
495  br i1 %ec, label %loop, label %exit
496
497exit:
498  ret void
499}
500
501; The (first) reason `%first_time.1` cannot be sunk is because it appears outside
502; the header and is not dominated by Previous. The fact that it feeds Previous
503; is a second sinking-preventing reason.
504define void @cannot_sink_phi(i32* %ptr) {
505; CHECK-LABEL: @cannot_sink_phi(
506; CHECK-NEXT:  entry:
507; CHECK-NEXT:    br label [[LOOP_HEADER:%.*]]
508; CHECK:       loop.header:
509; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 1, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
510; CHECK-NEXT:    [[FOR:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[FOR_NEXT:%.*]], [[LOOP_LATCH]] ]
511; CHECK-NEXT:    [[C_1:%.*]] = icmp ult i64 [[IV]], 500
512; CHECK-NEXT:    br i1 [[C_1]], label [[IF_TRUEBB:%.*]], label [[IF_FALSEBB:%.*]]
513; CHECK:       if.truebb:
514; CHECK-NEXT:    br label [[LOOP_LATCH]]
515; CHECK:       if.falsebb:
516; CHECK-NEXT:    br label [[LOOP_LATCH]]
517; CHECK:       loop.latch:
518; CHECK-NEXT:    [[FIRST_TIME_1:%.*]] = phi i32 [ 20, [[IF_TRUEBB]] ], [ [[FOR]], [[IF_FALSEBB]] ]
519; CHECK-NEXT:    [[C_2:%.*]] = icmp ult i64 [[IV]], 800
520; CHECK-NEXT:    [[FOR_NEXT]] = select i1 [[C_2]], i32 30, i32 [[FIRST_TIME_1]]
521; CHECK-NEXT:    [[PTR_IDX:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i64 [[IV]]
522; CHECK-NEXT:    store i32 [[FOR_NEXT]], i32* [[PTR_IDX]], align 4
523; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
524; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
525; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label [[EXIT:%.*]], label [[LOOP_HEADER]]
526; CHECK:       exit:
527; CHECK-NEXT:    ret void
528;
529entry:
530  br label %loop.header
531
532loop.header:
533  %iv = phi i64 [ 1, %entry ], [ %iv.next, %loop.latch ]
534  %for = phi i32 [ 0, %entry ], [ %for.next, %loop.latch ]
535  %c.1 = icmp ult i64 %iv, 500
536  br i1 %c.1, label %if.truebb, label %if.falsebb
537
538if.truebb:
539  br label %loop.latch
540
541if.falsebb:
542  br label %loop.latch
543
544loop.latch:
545  %first_time.1 = phi i32 [ 20, %if.truebb ], [ %for, %if.falsebb ]
546  %c.2 = icmp ult i64 %iv, 800
547  %for.next = select i1 %c.2, i32 30, i32 %first_time.1
548  %ptr.idx = getelementptr i32, i32* %ptr, i64 %iv
549  store i32 %for.next, i32* %ptr.idx
550  %iv.next = add nuw nsw i64 %iv, 1
551  %exitcond.not = icmp eq i64 %iv.next, 1000
552  br i1 %exitcond.not, label %exit, label %loop.header
553
554exit:
555  ret void
556}
557
558; A recurrence in a multiple exit loop.
559define i16 @multiple_exit(i16* %p, i32 %n) {
560; CHECK-LABEL: @multiple_exit(
561; CHECK-NEXT:  entry:
562; CHECK-NEXT:    [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[N:%.*]], i32 0)
563; CHECK-NEXT:    [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[SMAX]], i32 2096)
564; CHECK-NEXT:    [[TMP0:%.*]] = add nuw nsw i32 [[UMIN]], 1
565; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP0]], 4
566; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
567; CHECK:       vector.ph:
568; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], 4
569; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i32 [[N_MOD_VF]], 0
570; CHECK-NEXT:    [[TMP2:%.*]] = select i1 [[TMP1]], i32 4, i32 [[N_MOD_VF]]
571; CHECK-NEXT:    [[N_VEC:%.*]] = sub i32 [[TMP0]], [[TMP2]]
572; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
573; CHECK:       vector.body:
574; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
575; CHECK-NEXT:    [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
576; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i16> [ <i16 poison, i16 poison, i16 poison, i16 0>, [[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], [[VECTOR_BODY]] ]
577; CHECK-NEXT:    [[TMP3:%.*]] = add i32 [[INDEX]], 0
578; CHECK-NEXT:    [[TMP4:%.*]] = sext i32 [[TMP3]] to i64
579; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i16, i16* [[P:%.*]], i64 [[TMP4]]
580; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i16, i16* [[TMP5]], i32 0
581; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i16* [[TMP6]] to <4 x i16>*
582; CHECK-NEXT:    [[WIDE_LOAD]] = load <4 x i16>, <4 x i16>* [[TMP7]], align 2
583; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <4 x i16> [[VECTOR_RECUR]], <4 x i16> [[WIDE_LOAD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
584; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i16* [[TMP6]] to <4 x i16>*
585; CHECK-NEXT:    store <4 x i16> [[TMP8]], <4 x i16>* [[TMP9]], align 4
586; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
587; CHECK-NEXT:    [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], <i32 4, i32 4, i32 4, i32 4>
588; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
589; CHECK-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
590; CHECK:       middle.block:
591; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i32 3
592; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i32 2
593; CHECK-NEXT:    br label [[SCALAR_PH]]
594; CHECK:       scalar.ph:
595; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, [[ENTRY:%.*]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
596; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
597; CHECK-NEXT:    br label [[FOR_COND:%.*]]
598; CHECK:       for.cond:
599; CHECK-NEXT:    [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ]
600; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[REC_NEXT:%.*]], [[FOR_BODY]] ]
601; CHECK-NEXT:    [[IPROM:%.*]] = sext i32 [[I]] to i64
602; CHECK-NEXT:    [[B:%.*]] = getelementptr inbounds i16, i16* [[P]], i64 [[IPROM]]
603; CHECK-NEXT:    [[REC_NEXT]] = load i16, i16* [[B]], align 2
604; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[I]], [[N]]
605; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[IF_END:%.*]]
606; CHECK:       for.body:
607; CHECK-NEXT:    store i16 [[SCALAR_RECUR]], i16* [[B]], align 4
608; CHECK-NEXT:    [[INC]] = add nsw i32 [[I]], 1
609; CHECK-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[I]], 2096
610; CHECK-NEXT:    br i1 [[CMP2]], label [[FOR_COND]], label [[IF_END]], !llvm.loop [[LOOP9:![0-9]+]]
611; CHECK:       if.end:
612; CHECK-NEXT:    [[REC_LCSSA:%.*]] = phi i16 [ [[SCALAR_RECUR]], [[FOR_BODY]] ], [ [[SCALAR_RECUR]], [[FOR_COND]] ]
613; CHECK-NEXT:    ret i16 [[REC_LCSSA]]
614;
615entry:
616  br label %for.cond
617
618for.cond:
619  %i = phi i32 [ 0, %entry ], [ %inc, %for.body ]
620  %rec = phi i16 [0, %entry], [ %rec.next, %for.body ]
621  %iprom = sext i32 %i to i64
622  %b = getelementptr inbounds i16, i16* %p, i64 %iprom
623  %rec.next = load i16, i16* %b
624  %cmp = icmp slt i32 %i, %n
625  br i1 %cmp, label %for.body, label %if.end
626
627for.body:
628  store i16 %rec , i16* %b, align 4
629  %inc = add nsw i32 %i, 1
630  %cmp2 = icmp slt i32 %i, 2096
631  br i1 %cmp2, label %for.cond, label %if.end
632
633if.end:
634  ret i16 %rec
635}
636
637
638; A multiple exit case where one of the exiting edges involves a value
639; from the recurrence and one does not.
640define i16 @multiple_exit2(i16* %p, i32 %n) {
641; CHECK-LABEL: @multiple_exit2(
642; CHECK-NEXT:  entry:
643; CHECK-NEXT:    [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[N:%.*]], i32 0)
644; CHECK-NEXT:    [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[SMAX]], i32 2096)
645; CHECK-NEXT:    [[TMP0:%.*]] = add nuw nsw i32 [[UMIN]], 1
646; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP0]], 4
647; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
648; CHECK:       vector.ph:
649; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], 4
650; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i32 [[N_MOD_VF]], 0
651; CHECK-NEXT:    [[TMP2:%.*]] = select i1 [[TMP1]], i32 4, i32 [[N_MOD_VF]]
652; CHECK-NEXT:    [[N_VEC:%.*]] = sub i32 [[TMP0]], [[TMP2]]
653; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
654; CHECK:       vector.body:
655; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
656; CHECK-NEXT:    [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
657; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i16> [ <i16 poison, i16 poison, i16 poison, i16 0>, [[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], [[VECTOR_BODY]] ]
658; CHECK-NEXT:    [[TMP3:%.*]] = add i32 [[INDEX]], 0
659; CHECK-NEXT:    [[TMP4:%.*]] = sext i32 [[TMP3]] to i64
660; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i16, i16* [[P:%.*]], i64 [[TMP4]]
661; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i16, i16* [[TMP5]], i32 0
662; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i16* [[TMP6]] to <4 x i16>*
663; CHECK-NEXT:    [[WIDE_LOAD]] = load <4 x i16>, <4 x i16>* [[TMP7]], align 2
664; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <4 x i16> [[VECTOR_RECUR]], <4 x i16> [[WIDE_LOAD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
665; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i16* [[TMP6]] to <4 x i16>*
666; CHECK-NEXT:    store <4 x i16> [[TMP8]], <4 x i16>* [[TMP9]], align 4
667; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
668; CHECK-NEXT:    [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], <i32 4, i32 4, i32 4, i32 4>
669; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
670; CHECK-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
671; CHECK:       middle.block:
672; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i32 3
673; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i32 2
674; CHECK-NEXT:    br label [[SCALAR_PH]]
675; CHECK:       scalar.ph:
676; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, [[ENTRY:%.*]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
677; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
678; CHECK-NEXT:    br label [[FOR_COND:%.*]]
679; CHECK:       for.cond:
680; CHECK-NEXT:    [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ]
681; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[REC_NEXT:%.*]], [[FOR_BODY]] ]
682; CHECK-NEXT:    [[IPROM:%.*]] = sext i32 [[I]] to i64
683; CHECK-NEXT:    [[B:%.*]] = getelementptr inbounds i16, i16* [[P]], i64 [[IPROM]]
684; CHECK-NEXT:    [[REC_NEXT]] = load i16, i16* [[B]], align 2
685; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[I]], [[N]]
686; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[IF_END:%.*]]
687; CHECK:       for.body:
688; CHECK-NEXT:    store i16 [[SCALAR_RECUR]], i16* [[B]], align 4
689; CHECK-NEXT:    [[INC]] = add nsw i32 [[I]], 1
690; CHECK-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[I]], 2096
691; CHECK-NEXT:    br i1 [[CMP2]], label [[FOR_COND]], label [[IF_END]], !llvm.loop [[LOOP11:![0-9]+]]
692; CHECK:       if.end:
693; CHECK-NEXT:    [[REC_LCSSA:%.*]] = phi i16 [ [[SCALAR_RECUR]], [[FOR_COND]] ], [ 10, [[FOR_BODY]] ]
694; CHECK-NEXT:    ret i16 [[REC_LCSSA]]
695;
696entry:
697  br label %for.cond
698
699for.cond:
700  %i = phi i32 [ 0, %entry ], [ %inc, %for.body ]
701  %rec = phi i16 [0, %entry], [ %rec.next, %for.body ]
702  %iprom = sext i32 %i to i64
703  %b = getelementptr inbounds i16, i16* %p, i64 %iprom
704  %rec.next = load i16, i16* %b
705  %cmp = icmp slt i32 %i, %n
706  br i1 %cmp, label %for.body, label %if.end
707
708for.body:
709  store i16 %rec , i16* %b, align 4
710  %inc = add nsw i32 %i, 1
711  %cmp2 = icmp slt i32 %i, 2096
712  br i1 %cmp2, label %for.cond, label %if.end
713
714if.end:
715  %rec.lcssa = phi i16 [ %rec, %for.cond ], [ 10, %for.body ]
716  ret i16 %rec.lcssa
717}
718
719; A test where the instructions to sink may not be visited in dominance order.
720define void @sink_dominance(i32* %ptr, i32 %N) {
721; CHECK-LABEL: @sink_dominance(
722; CHECK-NEXT:  entry:
723; CHECK-NEXT:    [[UMAX1:%.*]] = call i32 @llvm.umax.i32(i32 [[N:%.*]], i32 1)
724; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[UMAX1]], 4
725; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
726; CHECK:       vector.scevcheck:
727; CHECK-NEXT:    [[UMAX:%.*]] = call i32 @llvm.umax.i32(i32 [[N]], i32 1)
728; CHECK-NEXT:    [[TMP0:%.*]] = add i32 [[UMAX]], -1
729; CHECK-NEXT:    [[TMP4:%.*]] = icmp slt i32 [[TMP0]], 0
730; CHECK-NEXT:    br i1 [[TMP4]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
731; CHECK:       vector.ph:
732; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i32 [[UMAX1]], 4
733; CHECK-NEXT:    [[N_VEC:%.*]] = sub i32 [[UMAX1]], [[N_MOD_VF]]
734; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
735; CHECK:       vector.body:
736; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
737; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i64> [ <i64 poison, i64 poison, i64 poison, i64 0>, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ]
738; CHECK-NEXT:    [[TMP7:%.*]] = add i32 [[INDEX]], 0
739; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[PTR:%.*]], i32 [[TMP7]]
740; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i32 0
741; CHECK-NEXT:    [[TMP10:%.*]] = bitcast i32* [[TMP9]] to <4 x i32>*
742; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP10]], align 4
743; CHECK-NEXT:    [[TMP11]] = zext <4 x i32> [[WIDE_LOAD]] to <4 x i64>
744; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <4 x i64> [[VECTOR_RECUR]], <4 x i64> [[TMP11]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
745; CHECK-NEXT:    [[TMP13:%.*]] = trunc <4 x i64> [[TMP12]] to <4 x i32>
746; CHECK-NEXT:    [[TMP14:%.*]] = icmp slt <4 x i32> [[TMP13]], <i32 213, i32 213, i32 213, i32 213>
747; CHECK-NEXT:    [[TMP15:%.*]] = select <4 x i1> [[TMP14]], <4 x i32> [[TMP13]], <4 x i32> <i32 22, i32 22, i32 22, i32 22>
748; CHECK-NEXT:    [[TMP16:%.*]] = bitcast i32* [[TMP9]] to <4 x i32>*
749; CHECK-NEXT:    store <4 x i32> [[TMP15]], <4 x i32>* [[TMP16]], align 4
750; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
751; CHECK-NEXT:    [[TMP17:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
752; CHECK-NEXT:    br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
753; CHECK:       middle.block:
754; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i32 [[UMAX1]], [[N_VEC]]
755; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i64> [[TMP11]], i32 3
756; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i64> [[TMP11]], i32 2
757; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
758; CHECK:       scalar.ph:
759; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i64 [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[ENTRY:%.*]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
760; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
761; CHECK-NEXT:    br label [[LOOP:%.*]]
762; CHECK:       loop:
763; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i64 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[FOR_NEXT:%.*]], [[LOOP]] ]
764; CHECK-NEXT:    [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
765; CHECK-NEXT:    [[FOR_TRUNC:%.*]] = trunc i64 [[SCALAR_RECUR]] to i32
766; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[FOR_TRUNC]], 213
767; CHECK-NEXT:    [[SELECT:%.*]] = select i1 [[CMP]], i32 [[FOR_TRUNC]], i32 22
768; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i32, i32* [[PTR]], i32 [[IV]]
769; CHECK-NEXT:    [[LV:%.*]] = load i32, i32* [[GEP]], align 4
770; CHECK-NEXT:    [[FOR_NEXT]] = zext i32 [[LV]] to i64
771; CHECK-NEXT:    store i32 [[SELECT]], i32* [[GEP]], align 4
772; CHECK-NEXT:    [[IV_NEXT]] = add i32 [[IV]], 1
773; CHECK-NEXT:    [[CMP73:%.*]] = icmp ugt i32 [[N]], [[IV_NEXT]]
774; CHECK-NEXT:    br i1 [[CMP73]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP13:![0-9]+]]
775; CHECK:       exit:
776; CHECK-NEXT:    ret void
777;
778entry:
779  br label %loop
780
781loop:
782  %for = phi i64 [ 0, %entry ], [ %for.next, %loop ]
783  %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
784
785  %for.trunc = trunc i64 %for to i32
786  %cmp = icmp slt i32 %for.trunc, 213
787  %select = select i1 %cmp, i32 %for.trunc, i32 22
788
789  %gep = getelementptr inbounds i32, i32* %ptr, i32 %iv
790  %lv = load i32, i32* %gep, align 4
791  %for.next = zext i32 %lv to i64
792  store i32 %select, i32* %gep
793
794  %iv.next = add i32 %iv, 1
795  %cmp73 = icmp ugt i32 %N, %iv.next
796  br i1 %cmp73, label %loop, label %exit
797
798exit:
799  ret void
800}
801
802; Similar to @sink_dominance, but with 2 separate chains that merge at %select
803; with a different number of instructions in between.
804define void @sink_dominance_2(i32* %ptr, i32 %N) {
805; CHECK-LABEL: @sink_dominance_2(
806; CHECK-NEXT:  entry:
807; CHECK-NEXT:    [[UMAX1:%.*]] = call i32 @llvm.umax.i32(i32 [[N:%.*]], i32 1)
808; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[UMAX1]], 4
809; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
810; CHECK:       vector.scevcheck:
811; CHECK-NEXT:    [[UMAX:%.*]] = call i32 @llvm.umax.i32(i32 [[N]], i32 1)
812; CHECK-NEXT:    [[TMP0:%.*]] = add i32 [[UMAX]], -1
813; CHECK-NEXT:    [[TMP4:%.*]] = icmp slt i32 [[TMP0]], 0
814; CHECK-NEXT:    br i1 [[TMP4]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
815; CHECK:       vector.ph:
816; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i32 [[UMAX1]], 4
817; CHECK-NEXT:    [[N_VEC:%.*]] = sub i32 [[UMAX1]], [[N_MOD_VF]]
818; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
819; CHECK:       vector.body:
820; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
821; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i64> [ <i64 poison, i64 poison, i64 poison, i64 0>, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ]
822; CHECK-NEXT:    [[TMP7:%.*]] = add i32 [[INDEX]], 0
823; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[PTR:%.*]], i32 [[TMP7]]
824; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i32 0
825; CHECK-NEXT:    [[TMP10:%.*]] = bitcast i32* [[TMP9]] to <4 x i32>*
826; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP10]], align 4
827; CHECK-NEXT:    [[TMP11]] = zext <4 x i32> [[WIDE_LOAD]] to <4 x i64>
828; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <4 x i64> [[VECTOR_RECUR]], <4 x i64> [[TMP11]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
829; CHECK-NEXT:    [[TMP13:%.*]] = trunc <4 x i64> [[TMP12]] to <4 x i32>
830; CHECK-NEXT:    [[TMP14:%.*]] = add <4 x i32> [[TMP13]], <i32 2, i32 2, i32 2, i32 2>
831; CHECK-NEXT:    [[TMP15:%.*]] = mul <4 x i32> [[TMP14]], <i32 99, i32 99, i32 99, i32 99>
832; CHECK-NEXT:    [[TMP16:%.*]] = icmp slt <4 x i32> [[TMP13]], <i32 213, i32 213, i32 213, i32 213>
833; CHECK-NEXT:    [[TMP17:%.*]] = select <4 x i1> [[TMP16]], <4 x i32> [[TMP13]], <4 x i32> [[TMP15]]
834; CHECK-NEXT:    [[TMP18:%.*]] = bitcast i32* [[TMP9]] to <4 x i32>*
835; CHECK-NEXT:    store <4 x i32> [[TMP17]], <4 x i32>* [[TMP18]], align 4
836; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
837; CHECK-NEXT:    [[TMP19:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
838; CHECK-NEXT:    br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
839; CHECK:       middle.block:
840; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i32 [[UMAX1]], [[N_VEC]]
841; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i64> [[TMP11]], i32 3
842; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i64> [[TMP11]], i32 2
843; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
844; CHECK:       scalar.ph:
845; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i64 [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[ENTRY:%.*]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
846; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
847; CHECK-NEXT:    br label [[LOOP:%.*]]
848; CHECK:       loop:
849; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i64 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[FOR_NEXT:%.*]], [[LOOP]] ]
850; CHECK-NEXT:    [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
851; CHECK-NEXT:    [[FOR_TRUNC:%.*]] = trunc i64 [[SCALAR_RECUR]] to i32
852; CHECK-NEXT:    [[STEP:%.*]] = add i32 [[FOR_TRUNC]], 2
853; CHECK-NEXT:    [[STEP_2:%.*]] = mul i32 [[STEP]], 99
854; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[FOR_TRUNC]], 213
855; CHECK-NEXT:    [[SELECT:%.*]] = select i1 [[CMP]], i32 [[FOR_TRUNC]], i32 [[STEP_2]]
856; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i32, i32* [[PTR]], i32 [[IV]]
857; CHECK-NEXT:    [[LV:%.*]] = load i32, i32* [[GEP]], align 4
858; CHECK-NEXT:    [[FOR_NEXT]] = zext i32 [[LV]] to i64
859; CHECK-NEXT:    store i32 [[SELECT]], i32* [[GEP]], align 4
860; CHECK-NEXT:    [[IV_NEXT]] = add i32 [[IV]], 1
861; CHECK-NEXT:    [[CMP73:%.*]] = icmp ugt i32 [[N]], [[IV_NEXT]]
862; CHECK-NEXT:    br i1 [[CMP73]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP15:![0-9]+]]
863; CHECK:       exit:
864; CHECK-NEXT:    ret void
865;
866entry:
867  br label %loop
868
869loop:
870  %for = phi i64 [ 0, %entry ], [ %for.next, %loop ]
871  %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
872
873  %for.trunc = trunc i64 %for to i32
874  %step = add i32 %for.trunc, 2
875  %step.2 = mul i32 %step, 99
876
877  %cmp = icmp slt i32 %for.trunc, 213
878  %select = select i1 %cmp, i32 %for.trunc, i32 %step.2
879
880  %gep = getelementptr inbounds i32, i32* %ptr, i32 %iv
881  %lv = load i32, i32* %gep, align 4
882  %for.next = zext i32 %lv to i64
883  store i32 %select, i32* %gep
884
885  %iv.next = add i32 %iv, 1
886  %cmp73 = icmp ugt i32 %N, %iv.next
887  br i1 %cmp73, label %loop, label %exit
888
889exit:
890  ret void
891}
892
893define void @cannot_sink_load_past_store(i32* %ptr, i32 %N) {
894; CHECK-LABEL: @cannot_sink_load_past_store(
895; CHECK-NEXT:  entry:
896; CHECK-NEXT:    br label [[LOOP:%.*]]
897; CHECK:       loop:
898; CHECK-NEXT:    [[FOR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[FOR_NEXT:%.*]], [[LOOP]] ]
899; CHECK-NEXT:    [[IV:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
900; CHECK-NEXT:    [[GEP_FOR:%.*]] = getelementptr inbounds i32, i32* [[PTR:%.*]], i64 [[FOR]]
901; CHECK-NEXT:    [[LV_FOR:%.*]] = load i32, i32* [[GEP_FOR]], align 4
902; CHECK-NEXT:    [[FOR_TRUNC:%.*]] = trunc i64 [[FOR]] to i32
903; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[LV_FOR]], [[FOR_TRUNC]]
904; CHECK-NEXT:    [[SELECT:%.*]] = select i1 [[CMP]], i32 [[LV_FOR]], i32 22
905; CHECK-NEXT:    [[GEP_IV:%.*]] = getelementptr inbounds i32, i32* [[PTR]], i32 [[IV]]
906; CHECK-NEXT:    store i32 0, i32* [[GEP_IV]], align 4
907; CHECK-NEXT:    [[IV_NEXT]] = add i32 [[IV]], 1
908; CHECK-NEXT:    [[FOR_NEXT]] = zext i32 [[IV]] to i64
909; CHECK-NEXT:    [[CMP73:%.*]] = icmp ugt i32 [[N:%.*]], [[IV_NEXT]]
910; CHECK-NEXT:    br i1 [[CMP73]], label [[LOOP]], label [[EXIT:%.*]]
911; CHECK:       exit:
912; CHECK-NEXT:    ret void
913;
914entry:
915  br label %loop
916
917loop:
918  %for = phi i64 [ 0, %entry ], [ %for.next, %loop ]
919  %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
920
921  %gep.for = getelementptr inbounds i32, i32* %ptr, i64 %for
922  %lv.for = load i32, i32* %gep.for, align 4
923  %for.trunc = trunc i64 %for to i32
924  %cmp = icmp slt i32 %lv.for, %for.trunc
925  %select = select i1 %cmp, i32 %lv.for, i32 22
926
927  %gep.iv = getelementptr inbounds i32, i32* %ptr, i32 %iv
928  store i32 0, i32* %gep.iv
929  %iv.next = add i32 %iv, 1
930  %for.next = zext i32 %iv to i64
931
932  %cmp73 = icmp ugt i32 %N, %iv.next
933  br i1 %cmp73, label %loop, label %exit
934
935exit:
936  ret void
937}
938
939define void @test_for_sink_instruction_after_same_incoming_1(double* %ptr) {
940; CHECK-LABEL: @test_for_sink_instruction_after_same_incoming_1
941; CHECK-NOT: vector.body:
942;
943entry:
944  br label %loop
945
946loop:
947  %for.1 = phi double [ 10.0, %entry ], [ %for.1.next, %loop ]
948  %for.2 = phi double [ 20.0, %entry ], [ %for.1.next, %loop ]
949  %iv = phi i64 [ 1, %entry ], [ %iv.next, %loop ]
950  %add.1 = fadd double 10.0, %for.2
951  %add.2 = fadd double %add.1, %for.1
952  %iv.next = add nuw nsw i64 %iv, 1
953  %gep.ptr = getelementptr inbounds double, double* %ptr, i64 %iv
954  %for.1.next  = load double, double* %gep.ptr, align 8
955  store double %add.2, double* %gep.ptr
956  %exitcond.not = icmp eq i64 %iv.next, 1000
957  br i1 %exitcond.not, label %exit, label %loop
958
959exit:
960  ret void
961}
962
963
964define void @test_for_sink_instruction_after_same_incoming_2(double* %ptr) {
965; CHECK-LABEL: @test_for_sink_instruction_after_same_incoming_2
966; CHECK-NOT: vector.body:
967entry:
968  br label %loop
969
970loop:
971  %for.2 = phi double [ 20.0, %entry ], [ %for.1.next, %loop ]
972  %for.1 = phi double [ 10.0, %entry ], [ %for.1.next, %loop ]
973  %iv = phi i64 [ 1, %entry ], [ %iv.next, %loop ]
974  %add.1 = fadd double 10.0, %for.2
975  %add.2 = fadd double %add.1, %for.1
976  %iv.next = add nuw nsw i64 %iv, 1
977  %gep.ptr = getelementptr inbounds double, double* %ptr, i64 %iv
978  %for.1.next  = load double, double* %gep.ptr, align 8
979  store double %add.2, double* %gep.ptr
980  %exitcond.not = icmp eq i64 %iv.next, 1000
981  br i1 %exitcond.not, label %exit, label %loop
982
983exit:
984  ret void
985}
986