1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -S %s | FileCheck %s
3
4
5@p = external local_unnamed_addr global [257 x i32], align 16
6@q = external local_unnamed_addr global [257 x i32], align 16
7
8; Test case for PR43398.
9
10define void @can_sink_after_store(i32 %x, i32* %ptr, i64 %tc) local_unnamed_addr #0 {
11; CHECK-LABEL: @can_sink_after_store(
12; CHECK-NEXT:  entry:
13; CHECK-NEXT:    br label [[PREHEADER:%.*]]
14; CHECK:       preheader:
15; CHECK-NEXT:    [[IDX_PHI_TRANS:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
16; CHECK-NEXT:    [[DOTPRE:%.*]] = load i32, i32* [[IDX_PHI_TRANS]], align 4
17; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
18; CHECK:       vector.ph:
19; CHECK-NEXT:    [[VECTOR_RECUR_INIT:%.*]] = insertelement <4 x i32> poison, i32 [[DOTPRE]], i32 3
20; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[X:%.*]], i32 0
21; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
22; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
23; CHECK:       vector.body:
24; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
25; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], [[VECTOR_BODY]] ]
26; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]]
27; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
28; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[TMP0]]
29; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 0
30; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
31; CHECK-NEXT:    [[WIDE_LOAD]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
32; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[WIDE_LOAD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
33; CHECK-NEXT:    [[TMP5:%.*]] = add <4 x i32> [[TMP4]], [[BROADCAST_SPLAT]]
34; CHECK-NEXT:    [[TMP6:%.*]] = add <4 x i32> [[TMP5]], [[WIDE_LOAD]]
35; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[TMP0]]
36; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[TMP7]], i32 0
37; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i32* [[TMP8]] to <4 x i32>*
38; CHECK-NEXT:    store <4 x i32> [[TMP6]], <4 x i32>* [[TMP9]], align 4
39; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
40; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1996
41; CHECK-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
42; CHECK:       middle.block:
43; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 1999, 1996
44; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 3
45; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 2
46; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
47; CHECK:       scalar.ph:
48; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[DOTPRE]], [[PREHEADER]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
49; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 1997, [[MIDDLE_BLOCK]] ], [ 1, [[PREHEADER]] ]
50; CHECK-NEXT:    br label [[FOR:%.*]]
51; CHECK:       for:
52; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[PRE_NEXT:%.*]], [[FOR]] ]
53; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR]] ]
54; CHECK-NEXT:    [[ADD_1:%.*]] = add i32 [[SCALAR_RECUR]], [[X]]
55; CHECK-NEXT:    [[IDX_1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[IV]]
56; CHECK-NEXT:    [[PRE_NEXT]] = load i32, i32* [[IDX_1]], align 4
57; CHECK-NEXT:    [[ADD_2:%.*]] = add i32 [[ADD_1]], [[PRE_NEXT]]
58; CHECK-NEXT:    [[IDX_2:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[IV]]
59; CHECK-NEXT:    store i32 [[ADD_2]], i32* [[IDX_2]], align 4
60; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
61; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 2000
62; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT]], label [[FOR]], !llvm.loop [[LOOP2:![0-9]+]]
63; CHECK:       exit:
64; CHECK-NEXT:    ret void
65;
66
67entry:
68  br label %preheader
69
70preheader:
71  %idx.phi.trans = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
72  %.pre = load i32, i32* %idx.phi.trans, align 4
73  br label %for
74
75for:
76  %pre.phi = phi i32 [ %.pre, %preheader ], [ %pre.next, %for ]
77  %iv = phi i64 [ 1, %preheader ], [ %iv.next, %for ]
78  %add.1 = add i32 %pre.phi, %x
79  %idx.1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %iv
80  %pre.next = load i32, i32* %idx.1, align 4
81  %add.2 = add i32 %add.1, %pre.next
82  %idx.2 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %iv
83  store i32 %add.2, i32* %idx.2, align 4
84  %iv.next = add nuw nsw i64 %iv, 1
85  %exitcond = icmp eq i64 %iv.next, 2000
86  br i1 %exitcond, label %exit, label %for
87
88exit:
89  ret void
90}
91
92; We can sink potential trapping instructions, as this will only delay the trap
93; and not introduce traps on additional paths.
94define void @sink_sdiv(i32 %x, i32* %ptr, i64 %tc) local_unnamed_addr #0 {
95; CHECK-LABEL: @sink_sdiv(
96; CHECK-NEXT:  entry:
97; CHECK-NEXT:    br label [[PREHEADER:%.*]]
98; CHECK:       preheader:
99; CHECK-NEXT:    [[IDX_PHI_TRANS:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
100; CHECK-NEXT:    [[DOTPRE:%.*]] = load i32, i32* [[IDX_PHI_TRANS]], align 4
101; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
102; CHECK:       vector.ph:
103; CHECK-NEXT:    [[VECTOR_RECUR_INIT:%.*]] = insertelement <4 x i32> poison, i32 [[DOTPRE]], i32 3
104; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[X:%.*]], i32 0
105; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
106; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
107; CHECK:       vector.body:
108; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
109; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], [[VECTOR_BODY]] ]
110; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]]
111; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
112; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[TMP0]]
113; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 0
114; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
115; CHECK-NEXT:    [[WIDE_LOAD]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
116; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[WIDE_LOAD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
117; CHECK-NEXT:    [[TMP5:%.*]] = sdiv <4 x i32> [[TMP4]], [[BROADCAST_SPLAT]]
118; CHECK-NEXT:    [[TMP6:%.*]] = add <4 x i32> [[TMP5]], [[WIDE_LOAD]]
119; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[TMP0]]
120; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[TMP7]], i32 0
121; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i32* [[TMP8]] to <4 x i32>*
122; CHECK-NEXT:    store <4 x i32> [[TMP6]], <4 x i32>* [[TMP9]], align 4
123; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
124; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1996
125; CHECK-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
126; CHECK:       middle.block:
127; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 1999, 1996
128; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 3
129; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 2
130; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
131; CHECK:       scalar.ph:
132; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[DOTPRE]], [[PREHEADER]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
133; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 1997, [[MIDDLE_BLOCK]] ], [ 1, [[PREHEADER]] ]
134; CHECK-NEXT:    br label [[FOR:%.*]]
135; CHECK:       for:
136; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[PRE_NEXT:%.*]], [[FOR]] ]
137; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR]] ]
138; CHECK-NEXT:    [[DIV_1:%.*]] = sdiv i32 [[SCALAR_RECUR]], [[X]]
139; CHECK-NEXT:    [[IDX_1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[IV]]
140; CHECK-NEXT:    [[PRE_NEXT]] = load i32, i32* [[IDX_1]], align 4
141; CHECK-NEXT:    [[ADD_2:%.*]] = add i32 [[DIV_1]], [[PRE_NEXT]]
142; CHECK-NEXT:    [[IDX_2:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[IV]]
143; CHECK-NEXT:    store i32 [[ADD_2]], i32* [[IDX_2]], align 4
144; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
145; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 2000
146; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT]], label [[FOR]], !llvm.loop [[LOOP5:![0-9]+]]
147; CHECK:       exit:
148; CHECK-NEXT:    ret void
149;
150
151entry:
152  br label %preheader
153
154preheader:
155  %idx.phi.trans = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
156  %.pre = load i32, i32* %idx.phi.trans, align 4
157  br label %for
158
159for:
160  %pre.phi = phi i32 [ %.pre, %preheader ], [ %pre.next, %for ]
161  %iv = phi i64 [ 1, %preheader ], [ %iv.next, %for ]
162  %div.1 = sdiv i32 %pre.phi, %x
163  %idx.1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %iv
164  %pre.next = load i32, i32* %idx.1, align 4
165  %add.2 = add i32 %div.1, %pre.next
166  %idx.2 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %iv
167  store i32 %add.2, i32* %idx.2, align 4
168  %iv.next = add nuw nsw i64 %iv, 1
169  %exitcond = icmp eq i64 %iv.next, 2000
170  br i1 %exitcond, label %exit, label %for
171
172exit:
173  ret void
174}
175
176; Sink users of %pre.phi recursively.
177define void @can_sink_with_additional_user(i32 %x, i32* %ptr, i64 %tc) {
178; CHECK-LABEL: @can_sink_with_additional_user(
179; CHECK-NEXT:  entry:
180; CHECK-NEXT:    br label [[PREHEADER:%.*]]
181; CHECK:       preheader:
182; CHECK-NEXT:    [[IDX_PHI_TRANS:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
183; CHECK-NEXT:    [[DOTPRE:%.*]] = load i32, i32* [[IDX_PHI_TRANS]], align 4
184; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
185; CHECK:       vector.ph:
186; CHECK-NEXT:    [[VECTOR_RECUR_INIT:%.*]] = insertelement <4 x i32> poison, i32 [[DOTPRE]], i32 3
187; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[X:%.*]], i32 0
188; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
189; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
190; CHECK:       vector.body:
191; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
192; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], [[VECTOR_BODY]] ]
193; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]]
194; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
195; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[TMP0]]
196; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 0
197; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
198; CHECK-NEXT:    [[WIDE_LOAD]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
199; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[WIDE_LOAD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
200; CHECK-NEXT:    [[TMP5:%.*]] = add <4 x i32> [[TMP4]], [[BROADCAST_SPLAT]]
201; CHECK-NEXT:    [[TMP6:%.*]] = add <4 x i32> [[TMP5]], [[BROADCAST_SPLAT]]
202; CHECK-NEXT:    [[TMP7:%.*]] = add <4 x i32> [[TMP5]], [[WIDE_LOAD]]
203; CHECK-NEXT:    [[TMP8:%.*]] = add <4 x i32> [[TMP6]], [[TMP7]]
204; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[TMP0]]
205; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[TMP9]], i32 0
206; CHECK-NEXT:    [[TMP11:%.*]] = bitcast i32* [[TMP10]] to <4 x i32>*
207; CHECK-NEXT:    store <4 x i32> [[TMP8]], <4 x i32>* [[TMP11]], align 4
208; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
209; CHECK-NEXT:    [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1996
210; CHECK-NEXT:    br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
211; CHECK:       middle.block:
212; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 1999, 1996
213; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 3
214; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 2
215; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
216; CHECK:       scalar.ph:
217; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[DOTPRE]], [[PREHEADER]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
218; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 1997, [[MIDDLE_BLOCK]] ], [ 1, [[PREHEADER]] ]
219; CHECK-NEXT:    br label [[FOR:%.*]]
220; CHECK:       for:
221; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[PRE_NEXT:%.*]], [[FOR]] ]
222; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR]] ]
223; CHECK-NEXT:    [[ADD_1:%.*]] = add i32 [[SCALAR_RECUR]], [[X]]
224; CHECK-NEXT:    [[ADD_2:%.*]] = add i32 [[ADD_1]], [[X]]
225; CHECK-NEXT:    [[IDX_1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[IV]]
226; CHECK-NEXT:    [[PRE_NEXT]] = load i32, i32* [[IDX_1]], align 4
227; CHECK-NEXT:    [[ADD_3:%.*]] = add i32 [[ADD_1]], [[PRE_NEXT]]
228; CHECK-NEXT:    [[ADD_4:%.*]] = add i32 [[ADD_2]], [[ADD_3]]
229; CHECK-NEXT:    [[IDX_2:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[IV]]
230; CHECK-NEXT:    store i32 [[ADD_4]], i32* [[IDX_2]], align 4
231; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
232; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 2000
233; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT]], label [[FOR]], !llvm.loop [[LOOP7:![0-9]+]]
234; CHECK:       exit:
235; CHECK-NEXT:    ret void
236;
237
238
239
240entry:
241  br label %preheader
242
243preheader:
244  %idx.phi.trans = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
245  %.pre = load i32, i32* %idx.phi.trans, align 4
246  br label %for
247
248for:
249  %pre.phi = phi i32 [ %.pre, %preheader ], [ %pre.next, %for ]
250  %iv = phi i64 [ 1, %preheader ], [ %iv.next, %for ]
251  %add.1 = add i32 %pre.phi, %x
252  %add.2 = add i32 %add.1, %x
253  %idx.1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %iv
254  %pre.next = load i32, i32* %idx.1, align 4
255  %add.3 = add i32 %add.1, %pre.next
256  %add.4 = add i32 %add.2, %add.3
257  %idx.2 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %iv
258  store i32 %add.4, i32* %idx.2, align 4
259  %iv.next = add nuw nsw i64 %iv, 1
260  %exitcond = icmp eq i64 %iv.next, 2000
261  br i1 %exitcond, label %exit, label %for
262
263exit:
264  ret void
265}
266
267; FIXME: We can sink a store, if we can guarantee that it does not alias any
268;        loads/stores in between.
269define void @cannot_sink_store(i32 %x, i32* %ptr, i64 %tc) {
270; CHECK-LABEL: @cannot_sink_store(
271; CHECK-NEXT:  entry:
272; CHECK-NEXT:    br label [[PREHEADER:%.*]]
273; CHECK:       preheader:
274; CHECK-NEXT:    [[IDX_PHI_TRANS:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
275; CHECK-NEXT:    [[DOTPRE:%.*]] = load i32, i32* [[IDX_PHI_TRANS]], align 4
276; CHECK-NEXT:    br label [[FOR:%.*]]
277; CHECK:       for:
278; CHECK-NEXT:    [[PRE_PHI:%.*]] = phi i32 [ [[DOTPRE]], [[PREHEADER]] ], [ [[PRE_NEXT:%.*]], [[FOR]] ]
279; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 1, [[PREHEADER]] ], [ [[IV_NEXT:%.*]], [[FOR]] ]
280; CHECK-NEXT:    [[ADD_1:%.*]] = add i32 [[PRE_PHI]], [[X:%.*]]
281; CHECK-NEXT:    store i32 [[ADD_1]], i32* [[PTR:%.*]], align 4
282; CHECK-NEXT:    [[IDX_1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[IV]]
283; CHECK-NEXT:    [[PRE_NEXT]] = load i32, i32* [[IDX_1]], align 4
284; CHECK-NEXT:    [[ADD_2:%.*]] = add i32 [[ADD_1]], [[PRE_NEXT]]
285; CHECK-NEXT:    [[IDX_2:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[IV]]
286; CHECK-NEXT:    store i32 [[ADD_2]], i32* [[IDX_2]], align 4
287; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
288; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 2000
289; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT:%.*]], label [[FOR]]
290; CHECK:       exit:
291; CHECK-NEXT:    ret void
292;
293
294
295
296entry:
297  br label %preheader
298
299preheader:
300  %idx.phi.trans = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
301  %.pre = load i32, i32* %idx.phi.trans, align 4
302  br label %for
303
304for:
305  %pre.phi = phi i32 [ %.pre, %preheader ], [ %pre.next, %for ]
306  %iv = phi i64 [ 1, %preheader ], [ %iv.next, %for ]
307  %add.1 = add i32 %pre.phi, %x
308  store i32 %add.1, i32* %ptr
309  %idx.1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %iv
310  %pre.next = load i32, i32* %idx.1, align 4
311  %add.2 = add i32 %add.1, %pre.next
312  %idx.2 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %iv
313  store i32 %add.2, i32* %idx.2, align 4
314  %iv.next = add nuw nsw i64 %iv, 1
315  %exitcond = icmp eq i64 %iv.next, 2000
316  br i1 %exitcond, label %exit, label %for
317
318exit:
319  ret void
320}
321
322; Some kinds of reductions are not detected by IVDescriptors. If we have a
323; cycle, we cannot sink it.
324define void @cannot_sink_reduction(i32 %x, i32* %ptr, i64 %tc) {
325; CHECK-LABEL: @cannot_sink_reduction(
326; CHECK-NEXT:  entry:
327; CHECK-NEXT:    br label [[PREHEADER:%.*]]
328; CHECK:       preheader:
329; CHECK-NEXT:    [[IDX_PHI_TRANS:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
330; CHECK-NEXT:    [[DOTPRE:%.*]] = load i32, i32* [[IDX_PHI_TRANS]], align 4
331; CHECK-NEXT:    br label [[FOR:%.*]]
332; CHECK:       for:
333; CHECK-NEXT:    [[PRE_PHI:%.*]] = phi i32 [ [[DOTPRE]], [[PREHEADER]] ], [ [[D:%.*]], [[FOR]] ]
334; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 1, [[PREHEADER]] ], [ [[IV_NEXT:%.*]], [[FOR]] ]
335; CHECK-NEXT:    [[D]] = sdiv i32 [[PRE_PHI]], [[X:%.*]]
336; CHECK-NEXT:    [[IDX_1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[IV]]
337; CHECK-NEXT:    [[PRE_NEXT:%.*]] = load i32, i32* [[IDX_1]], align 4
338; CHECK-NEXT:    [[ADD_2:%.*]] = add i32 [[X]], [[PRE_NEXT]]
339; CHECK-NEXT:    [[IDX_2:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[IV]]
340; CHECK-NEXT:    store i32 [[ADD_2]], i32* [[IDX_2]], align 4
341; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
342; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 2000
343; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT:%.*]], label [[FOR]]
344; CHECK:       exit:
345; CHECK-NEXT:    ret void
346;
347
348
349
350; CHECK-NET:     ret void
351entry:
352  br label %preheader
353
354preheader:
355  %idx.phi.trans = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
356  %.pre = load i32, i32* %idx.phi.trans, align 4
357  br label %for
358
359for:
360  %pre.phi = phi i32 [ %.pre, %preheader ], [ %d, %for ]
361  %iv = phi i64 [ 1, %preheader ], [ %iv.next, %for ]
362  %d = sdiv i32 %pre.phi, %x
363  %idx.1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %iv
364  %pre.next = load i32, i32* %idx.1, align 4
365  %add.2 = add i32 %x, %pre.next
366  %idx.2 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %iv
367  store i32 %add.2, i32* %idx.2, align 4
368  %iv.next = add nuw nsw i64 %iv, 1
369  %exitcond = icmp eq i64 %iv.next, 2000
370  br i1 %exitcond, label %exit, label %for
371
372exit:
373  ret void
374}
375
376; TODO: We should be able to sink %tmp38 after %tmp60.
377define void @instruction_with_2_FOR_operands() {
378; CHECK-LABEL: @instruction_with_2_FOR_operands(
379; CHECK-NEXT:  bb:
380; CHECK-NEXT:    br label [[BB13:%.*]]
381; CHECK:       bb13:
382; CHECK-NEXT:    [[TMP37:%.*]] = phi float [ [[TMP60:%.*]], [[BB13]] ], [ undef, [[BB:%.*]] ]
383; CHECK-NEXT:    [[TMP27:%.*]] = phi float [ [[TMP49:%.*]], [[BB13]] ], [ undef, [[BB]] ]
384; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[BB13]] ], [ 0, [[BB]] ]
385; CHECK-NEXT:    [[TMP38:%.*]] = fmul fast float [[TMP37]], [[TMP27]]
386; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
387; CHECK-NEXT:    [[TMP49]] = load float, float* undef, align 4
388; CHECK-NEXT:    [[TMP60]] = load float, float* undef, align 4
389; CHECK-NEXT:    [[TMP12:%.*]] = icmp slt i64 [[INDVARS_IV]], undef
390; CHECK-NEXT:    br i1 [[TMP12]], label [[BB13]], label [[BB74:%.*]]
391; CHECK:       bb74:
392; CHECK-NEXT:    ret void
393;
394
395
396bb:
397  br label %bb13
398
399bb13:                                             ; preds = %bb13, %bb
400  %tmp37 = phi float [ %tmp60, %bb13 ], [ undef, %bb ]
401  %tmp27 = phi float [ %tmp49, %bb13 ], [ undef, %bb ]
402  %indvars.iv = phi i64 [ %indvars.iv.next, %bb13 ], [ 0, %bb ]
403  %tmp38 = fmul fast float %tmp37, %tmp27
404  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
405  %tmp49 = load float, float* undef, align 4
406  %tmp60 = load float, float* undef, align 4
407  %tmp12 = icmp slt i64 %indvars.iv, undef
408  br i1 %tmp12, label %bb13, label %bb74
409
410bb74:                                             ; preds = %bb13
411  ret void
412}
413
414define void @instruction_with_2_FOR_operands_and_multiple_other_uses(float* noalias %dst.1, float* noalias %dst.2, float* noalias %dst.3, float* noalias %for.ptr.1, float* noalias %for.ptr.2) {
415; CHECK-LABEL: @instruction_with_2_FOR_operands_and_multiple_other_uses(
416; CHECK-NEXT:  bb:
417; CHECK-NEXT:    br label [[LOOP:%.*]]
418; CHECK:       loop:
419; CHECK-NEXT:    [[FOR_1:%.*]] = phi float [ 0.000000e+00, [[BB:%.*]] ], [ [[FOR_1_NEXT:%.*]], [[LOOP]] ]
420; CHECK-NEXT:    [[FOR_2:%.*]] = phi float [ 0.000000e+00, [[BB]] ], [ [[FOR_2_NEXT:%.*]], [[LOOP]] ]
421; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[BB]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
422; CHECK-NEXT:    [[FOR_1_USE_1:%.*]] = fmul fast float [[FOR_1]], 2.000000e+00
423; CHECK-NEXT:    [[USED_BY_BOTH:%.*]] = fmul fast float [[FOR_1]], [[FOR_2]]
424; CHECK-NEXT:    [[FOR_2_NEXT]] = load float, float* [[FOR_PTR_2:%.*]], align 4
425; CHECK-NEXT:    [[FOR_1_USE_3:%.*]] = fadd fast float [[FOR_1]], 1.000000e+00
426; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
427; CHECK-NEXT:    [[FOR_1_NEXT]] = load float, float* [[FOR_PTR_1:%.*]], align 4
428; CHECK-NEXT:    [[GEP_DST_1:%.*]] = getelementptr inbounds float, float* [[DST_1:%.*]], i64 [[IV]]
429; CHECK-NEXT:    store float [[USED_BY_BOTH]], float* [[GEP_DST_1]], align 4
430; CHECK-NEXT:    [[GEP_DST_2:%.*]] = getelementptr inbounds float, float* [[DST_2:%.*]], i64 [[IV]]
431; CHECK-NEXT:    store float [[FOR_1_USE_1]], float* [[GEP_DST_2]], align 4
432; CHECK-NEXT:    [[GEP_DST_3:%.*]] = getelementptr inbounds float, float* [[DST_3:%.*]], i64 [[IV]]
433; CHECK-NEXT:    store float [[FOR_1_USE_3]], float* [[GEP_DST_3]], align 4
434; CHECK-NEXT:    [[EC:%.*]] = icmp slt i64 [[IV]], 1000
435; CHECK-NEXT:    br i1 [[EC]], label [[LOOP]], label [[EXIT:%.*]]
436; CHECK:       exit:
437; CHECK-NEXT:    ret void
438;
439bb:
440  br label %loop
441
442loop:
443  %for.1 = phi float [ 0.0, %bb ], [ %for.1.next, %loop]
444  %for.2 = phi float [ 0.0, %bb ], [ %for.2.next, %loop]
445  %iv = phi i64 [ 0, %bb ], [ %iv.next, %loop ]
446  %for.1.use.1  = fmul fast float %for.1, 2.0
447  %used.by.both = fmul fast float %for.1, %for.2
448  %for.2.next = load float, float* %for.ptr.2, align 4
449  %for.1.use.3 = fadd fast float %for.1, 1.0
450  %iv.next = add nuw nsw i64 %iv, 1
451  %for.1.next = load float, float* %for.ptr.1, align 4
452  %gep.dst.1 = getelementptr inbounds float, float* %dst.1, i64 %iv
453  store float %used.by.both, float* %gep.dst.1
454  %gep.dst.2 = getelementptr inbounds float, float* %dst.2, i64 %iv
455  store float %for.1.use.1, float* %gep.dst.2
456  %gep.dst.3 = getelementptr inbounds float, float* %dst.3, i64 %iv
457  store float %for.1.use.3, float* %gep.dst.3
458  %ec = icmp slt i64 %iv, 1000
459  br i1 %ec, label %loop, label %exit
460
461exit:
462  ret void
463}
464
465; The (first) reason `%first_time.1` cannot be sunk is because it appears outside
466; the header and is not dominated by Previous. The fact that it feeds Previous
467; is a second sinking-preventing reason.
468define void @cannot_sink_phi(i32* %ptr) {
469; CHECK-LABEL: @cannot_sink_phi(
470; CHECK-NEXT:  entry:
471; CHECK-NEXT:    br label [[LOOP_HEADER:%.*]]
472; CHECK:       loop.header:
473; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 1, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
474; CHECK-NEXT:    [[FOR:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[FOR_NEXT:%.*]], [[LOOP_LATCH]] ]
475; CHECK-NEXT:    [[C_1:%.*]] = icmp ult i64 [[IV]], 500
476; CHECK-NEXT:    br i1 [[C_1]], label [[IF_TRUEBB:%.*]], label [[IF_FALSEBB:%.*]]
477; CHECK:       if.truebb:
478; CHECK-NEXT:    br label [[LOOP_LATCH]]
479; CHECK:       if.falsebb:
480; CHECK-NEXT:    br label [[LOOP_LATCH]]
481; CHECK:       loop.latch:
482; CHECK-NEXT:    [[FIRST_TIME_1:%.*]] = phi i32 [ 20, [[IF_TRUEBB]] ], [ [[FOR]], [[IF_FALSEBB]] ]
483; CHECK-NEXT:    [[C_2:%.*]] = icmp ult i64 [[IV]], 800
484; CHECK-NEXT:    [[FOR_NEXT]] = select i1 [[C_2]], i32 30, i32 [[FIRST_TIME_1]]
485; CHECK-NEXT:    [[PTR_IDX:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i64 [[IV]]
486; CHECK-NEXT:    store i32 [[FOR_NEXT]], i32* [[PTR_IDX]], align 4
487; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
488; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
489; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label [[EXIT:%.*]], label [[LOOP_HEADER]]
490; CHECK:       exit:
491; CHECK-NEXT:    ret void
492;
493entry:
494  br label %loop.header
495
496loop.header:
497  %iv = phi i64 [ 1, %entry ], [ %iv.next, %loop.latch ]
498  %for = phi i32 [ 0, %entry ], [ %for.next, %loop.latch ]
499  %c.1 = icmp ult i64 %iv, 500
500  br i1 %c.1, label %if.truebb, label %if.falsebb
501
502if.truebb:
503  br label %loop.latch
504
505if.falsebb:
506  br label %loop.latch
507
508loop.latch:
509  %first_time.1 = phi i32 [ 20, %if.truebb ], [ %for, %if.falsebb ]
510  %c.2 = icmp ult i64 %iv, 800
511  %for.next = select i1 %c.2, i32 30, i32 %first_time.1
512  %ptr.idx = getelementptr i32, i32* %ptr, i64 %iv
513  store i32 %for.next, i32* %ptr.idx
514  %iv.next = add nuw nsw i64 %iv, 1
515  %exitcond.not = icmp eq i64 %iv.next, 1000
516  br i1 %exitcond.not, label %exit, label %loop.header
517
518exit:
519  ret void
520}
521
522; A recurrence in a multiple exit loop.
523define i16 @multiple_exit(i16* %p, i32 %n) {
524; CHECK-LABEL: @multiple_exit(
525; CHECK-NEXT:  entry:
526; CHECK-NEXT:    [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[N:%.*]], i32 0)
527; CHECK-NEXT:    [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[SMAX]], i32 2096)
528; CHECK-NEXT:    [[TMP0:%.*]] = add nuw nsw i32 [[UMIN]], 1
529; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP0]], 4
530; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
531; CHECK:       vector.ph:
532; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], 4
533; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i32 [[N_MOD_VF]], 0
534; CHECK-NEXT:    [[TMP2:%.*]] = select i1 [[TMP1]], i32 4, i32 [[N_MOD_VF]]
535; CHECK-NEXT:    [[N_VEC:%.*]] = sub i32 [[TMP0]], [[TMP2]]
536; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
537; CHECK:       vector.body:
538; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
539; CHECK-NEXT:    [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
540; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i16> [ <i16 poison, i16 poison, i16 poison, i16 0>, [[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], [[VECTOR_BODY]] ]
541; CHECK-NEXT:    [[TMP3:%.*]] = add i32 [[INDEX]], 0
542; CHECK-NEXT:    [[TMP4:%.*]] = sext i32 [[TMP3]] to i64
543; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i16, i16* [[P:%.*]], i64 [[TMP4]]
544; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i16, i16* [[TMP5]], i32 0
545; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i16* [[TMP6]] to <4 x i16>*
546; CHECK-NEXT:    [[WIDE_LOAD]] = load <4 x i16>, <4 x i16>* [[TMP7]], align 2
547; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <4 x i16> [[VECTOR_RECUR]], <4 x i16> [[WIDE_LOAD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
548; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i16* [[TMP6]] to <4 x i16>*
549; CHECK-NEXT:    store <4 x i16> [[TMP8]], <4 x i16>* [[TMP9]], align 4
550; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
551; CHECK-NEXT:    [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], <i32 4, i32 4, i32 4, i32 4>
552; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
553; CHECK-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
554; CHECK:       middle.block:
555; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i32 3
556; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i32 2
557; CHECK-NEXT:    br label [[SCALAR_PH]]
558; CHECK:       scalar.ph:
559; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, [[ENTRY:%.*]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
560; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
561; CHECK-NEXT:    br label [[FOR_COND:%.*]]
562; CHECK:       for.cond:
563; CHECK-NEXT:    [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ]
564; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[REC_NEXT:%.*]], [[FOR_BODY]] ]
565; CHECK-NEXT:    [[IPROM:%.*]] = sext i32 [[I]] to i64
566; CHECK-NEXT:    [[B:%.*]] = getelementptr inbounds i16, i16* [[P]], i64 [[IPROM]]
567; CHECK-NEXT:    [[REC_NEXT]] = load i16, i16* [[B]], align 2
568; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[I]], [[N]]
569; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[IF_END:%.*]]
570; CHECK:       for.body:
571; CHECK-NEXT:    store i16 [[SCALAR_RECUR]], i16* [[B]], align 4
572; CHECK-NEXT:    [[INC]] = add nsw i32 [[I]], 1
573; CHECK-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[I]], 2096
574; CHECK-NEXT:    br i1 [[CMP2]], label [[FOR_COND]], label [[IF_END]], !llvm.loop [[LOOP9:![0-9]+]]
575; CHECK:       if.end:
576; CHECK-NEXT:    [[REC_LCSSA:%.*]] = phi i16 [ [[SCALAR_RECUR]], [[FOR_BODY]] ], [ [[SCALAR_RECUR]], [[FOR_COND]] ]
577; CHECK-NEXT:    ret i16 [[REC_LCSSA]]
578;
579entry:
580  br label %for.cond
581
582for.cond:
583  %i = phi i32 [ 0, %entry ], [ %inc, %for.body ]
584  %rec = phi i16 [0, %entry], [ %rec.next, %for.body ]
585  %iprom = sext i32 %i to i64
586  %b = getelementptr inbounds i16, i16* %p, i64 %iprom
587  %rec.next = load i16, i16* %b
588  %cmp = icmp slt i32 %i, %n
589  br i1 %cmp, label %for.body, label %if.end
590
591for.body:
592  store i16 %rec , i16* %b, align 4
593  %inc = add nsw i32 %i, 1
594  %cmp2 = icmp slt i32 %i, 2096
595  br i1 %cmp2, label %for.cond, label %if.end
596
597if.end:
598  ret i16 %rec
599}
600
601
602; A multiple exit case where one of the exiting edges involves a value
603; from the recurrence and one does not.
604define i16 @multiple_exit2(i16* %p, i32 %n) {
605; CHECK-LABEL: @multiple_exit2(
606; CHECK-NEXT:  entry:
607; CHECK-NEXT:    [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[N:%.*]], i32 0)
608; CHECK-NEXT:    [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[SMAX]], i32 2096)
609; CHECK-NEXT:    [[TMP0:%.*]] = add nuw nsw i32 [[UMIN]], 1
610; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP0]], 4
611; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
612; CHECK:       vector.ph:
613; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], 4
614; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i32 [[N_MOD_VF]], 0
615; CHECK-NEXT:    [[TMP2:%.*]] = select i1 [[TMP1]], i32 4, i32 [[N_MOD_VF]]
616; CHECK-NEXT:    [[N_VEC:%.*]] = sub i32 [[TMP0]], [[TMP2]]
617; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
618; CHECK:       vector.body:
619; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
620; CHECK-NEXT:    [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
621; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i16> [ <i16 poison, i16 poison, i16 poison, i16 0>, [[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], [[VECTOR_BODY]] ]
622; CHECK-NEXT:    [[TMP3:%.*]] = add i32 [[INDEX]], 0
623; CHECK-NEXT:    [[TMP4:%.*]] = sext i32 [[TMP3]] to i64
624; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i16, i16* [[P:%.*]], i64 [[TMP4]]
625; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i16, i16* [[TMP5]], i32 0
626; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i16* [[TMP6]] to <4 x i16>*
627; CHECK-NEXT:    [[WIDE_LOAD]] = load <4 x i16>, <4 x i16>* [[TMP7]], align 2
628; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <4 x i16> [[VECTOR_RECUR]], <4 x i16> [[WIDE_LOAD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
629; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i16* [[TMP6]] to <4 x i16>*
630; CHECK-NEXT:    store <4 x i16> [[TMP8]], <4 x i16>* [[TMP9]], align 4
631; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
632; CHECK-NEXT:    [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], <i32 4, i32 4, i32 4, i32 4>
633; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
634; CHECK-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
635; CHECK:       middle.block:
636; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i32 3
637; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i32 2
638; CHECK-NEXT:    br label [[SCALAR_PH]]
639; CHECK:       scalar.ph:
640; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, [[ENTRY:%.*]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
641; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
642; CHECK-NEXT:    br label [[FOR_COND:%.*]]
643; CHECK:       for.cond:
644; CHECK-NEXT:    [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ]
645; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[REC_NEXT:%.*]], [[FOR_BODY]] ]
646; CHECK-NEXT:    [[IPROM:%.*]] = sext i32 [[I]] to i64
647; CHECK-NEXT:    [[B:%.*]] = getelementptr inbounds i16, i16* [[P]], i64 [[IPROM]]
648; CHECK-NEXT:    [[REC_NEXT]] = load i16, i16* [[B]], align 2
649; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[I]], [[N]]
650; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[IF_END:%.*]]
651; CHECK:       for.body:
652; CHECK-NEXT:    store i16 [[SCALAR_RECUR]], i16* [[B]], align 4
653; CHECK-NEXT:    [[INC]] = add nsw i32 [[I]], 1
654; CHECK-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[I]], 2096
655; CHECK-NEXT:    br i1 [[CMP2]], label [[FOR_COND]], label [[IF_END]], !llvm.loop [[LOOP11:![0-9]+]]
656; CHECK:       if.end:
657; CHECK-NEXT:    [[REC_LCSSA:%.*]] = phi i16 [ [[SCALAR_RECUR]], [[FOR_COND]] ], [ 10, [[FOR_BODY]] ]
658; CHECK-NEXT:    ret i16 [[REC_LCSSA]]
659;
660entry:
661  br label %for.cond
662
663for.cond:
664  %i = phi i32 [ 0, %entry ], [ %inc, %for.body ]
665  %rec = phi i16 [0, %entry], [ %rec.next, %for.body ]
666  %iprom = sext i32 %i to i64
667  %b = getelementptr inbounds i16, i16* %p, i64 %iprom
668  %rec.next = load i16, i16* %b
669  %cmp = icmp slt i32 %i, %n
670  br i1 %cmp, label %for.body, label %if.end
671
672for.body:
673  store i16 %rec , i16* %b, align 4
674  %inc = add nsw i32 %i, 1
675  %cmp2 = icmp slt i32 %i, 2096
676  br i1 %cmp2, label %for.cond, label %if.end
677
678if.end:
679  %rec.lcssa = phi i16 [ %rec, %for.cond ], [ 10, %for.body ]
680  ret i16 %rec.lcssa
681}
682
683; A test where the instructions to sink may not be visited in dominance order.
684define void @sink_dominance(i32* %ptr, i32 %N) {
685; CHECK-LABEL: @sink_dominance(
686; CHECK-NEXT:  entry:
687; CHECK-NEXT:    [[UMAX1:%.*]] = call i32 @llvm.umax.i32(i32 [[N:%.*]], i32 1)
688; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[UMAX1]], 4
689; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
690; CHECK:       vector.scevcheck:
691; CHECK-NEXT:    [[UMAX:%.*]] = call i32 @llvm.umax.i32(i32 [[N]], i32 1)
692; CHECK-NEXT:    [[TMP0:%.*]] = add i32 [[UMAX]], -1
693; CHECK-NEXT:    [[TMP4:%.*]] = icmp slt i32 [[TMP0]], 0
694; CHECK-NEXT:    br i1 [[TMP4]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
695; CHECK:       vector.ph:
696; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i32 [[UMAX1]], 4
697; CHECK-NEXT:    [[N_VEC:%.*]] = sub i32 [[UMAX1]], [[N_MOD_VF]]
698; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
699; CHECK:       vector.body:
700; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
701; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i64> [ <i64 poison, i64 poison, i64 poison, i64 0>, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ]
702; CHECK-NEXT:    [[TMP7:%.*]] = add i32 [[INDEX]], 0
703; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[PTR:%.*]], i32 [[TMP7]]
704; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i32 0
705; CHECK-NEXT:    [[TMP10:%.*]] = bitcast i32* [[TMP9]] to <4 x i32>*
706; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP10]], align 4
707; CHECK-NEXT:    [[TMP11]] = zext <4 x i32> [[WIDE_LOAD]] to <4 x i64>
708; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <4 x i64> [[VECTOR_RECUR]], <4 x i64> [[TMP11]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
709; CHECK-NEXT:    [[TMP13:%.*]] = trunc <4 x i64> [[TMP12]] to <4 x i32>
710; CHECK-NEXT:    [[TMP14:%.*]] = icmp slt <4 x i32> [[TMP13]], <i32 213, i32 213, i32 213, i32 213>
711; CHECK-NEXT:    [[TMP15:%.*]] = select <4 x i1> [[TMP14]], <4 x i32> [[TMP13]], <4 x i32> <i32 22, i32 22, i32 22, i32 22>
712; CHECK-NEXT:    [[TMP16:%.*]] = bitcast i32* [[TMP9]] to <4 x i32>*
713; CHECK-NEXT:    store <4 x i32> [[TMP15]], <4 x i32>* [[TMP16]], align 4
714; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
715; CHECK-NEXT:    [[TMP17:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
716; CHECK-NEXT:    br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
717; CHECK:       middle.block:
718; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i32 [[UMAX1]], [[N_VEC]]
719; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i64> [[TMP11]], i32 3
720; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i64> [[TMP11]], i32 2
721; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
722; CHECK:       scalar.ph:
723; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i64 [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[ENTRY:%.*]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
724; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
725; CHECK-NEXT:    br label [[LOOP:%.*]]
726; CHECK:       loop:
727; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i64 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[FOR_NEXT:%.*]], [[LOOP]] ]
728; CHECK-NEXT:    [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
729; CHECK-NEXT:    [[FOR_TRUNC:%.*]] = trunc i64 [[SCALAR_RECUR]] to i32
730; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[FOR_TRUNC]], 213
731; CHECK-NEXT:    [[SELECT:%.*]] = select i1 [[CMP]], i32 [[FOR_TRUNC]], i32 22
732; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i32, i32* [[PTR]], i32 [[IV]]
733; CHECK-NEXT:    [[LV:%.*]] = load i32, i32* [[GEP]], align 4
734; CHECK-NEXT:    [[FOR_NEXT]] = zext i32 [[LV]] to i64
735; CHECK-NEXT:    store i32 [[SELECT]], i32* [[GEP]], align 4
736; CHECK-NEXT:    [[IV_NEXT]] = add i32 [[IV]], 1
737; CHECK-NEXT:    [[CMP73:%.*]] = icmp ugt i32 [[N]], [[IV_NEXT]]
738; CHECK-NEXT:    br i1 [[CMP73]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP13:![0-9]+]]
739; CHECK:       exit:
740; CHECK-NEXT:    ret void
741;
742entry:
743  br label %loop
744
745loop:
746  %for = phi i64 [ 0, %entry ], [ %for.next, %loop ]
747  %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
748
749  %for.trunc = trunc i64 %for to i32
750  %cmp = icmp slt i32 %for.trunc, 213
751  %select = select i1 %cmp, i32 %for.trunc, i32 22
752
753  %gep = getelementptr inbounds i32, i32* %ptr, i32 %iv
754  %lv = load i32, i32* %gep, align 4
755  %for.next = zext i32 %lv to i64
756  store i32 %select, i32* %gep
757
758  %iv.next = add i32 %iv, 1
759  %cmp73 = icmp ugt i32 %N, %iv.next
760  br i1 %cmp73, label %loop, label %exit
761
762exit:
763  ret void
764}
765
766; Similar to @sink_dominance, but with 2 separate chains that merge at %select
767; with a different number of instructions in between.
768define void @sink_dominance_2(i32* %ptr, i32 %N) {
769; CHECK-LABEL: @sink_dominance_2(
770; CHECK-NEXT:  entry:
771; CHECK-NEXT:    [[UMAX1:%.*]] = call i32 @llvm.umax.i32(i32 [[N:%.*]], i32 1)
772; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[UMAX1]], 4
773; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
774; CHECK:       vector.scevcheck:
775; CHECK-NEXT:    [[UMAX:%.*]] = call i32 @llvm.umax.i32(i32 [[N]], i32 1)
776; CHECK-NEXT:    [[TMP0:%.*]] = add i32 [[UMAX]], -1
777; CHECK-NEXT:    [[TMP4:%.*]] = icmp slt i32 [[TMP0]], 0
778; CHECK-NEXT:    br i1 [[TMP4]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
779; CHECK:       vector.ph:
780; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i32 [[UMAX1]], 4
781; CHECK-NEXT:    [[N_VEC:%.*]] = sub i32 [[UMAX1]], [[N_MOD_VF]]
782; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
783; CHECK:       vector.body:
784; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
785; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i64> [ <i64 poison, i64 poison, i64 poison, i64 0>, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ]
786; CHECK-NEXT:    [[TMP7:%.*]] = add i32 [[INDEX]], 0
787; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[PTR:%.*]], i32 [[TMP7]]
788; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i32 0
789; CHECK-NEXT:    [[TMP10:%.*]] = bitcast i32* [[TMP9]] to <4 x i32>*
790; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP10]], align 4
791; CHECK-NEXT:    [[TMP11]] = zext <4 x i32> [[WIDE_LOAD]] to <4 x i64>
792; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <4 x i64> [[VECTOR_RECUR]], <4 x i64> [[TMP11]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
793; CHECK-NEXT:    [[TMP13:%.*]] = trunc <4 x i64> [[TMP12]] to <4 x i32>
794; CHECK-NEXT:    [[TMP14:%.*]] = add <4 x i32> [[TMP13]], <i32 2, i32 2, i32 2, i32 2>
795; CHECK-NEXT:    [[TMP15:%.*]] = mul <4 x i32> [[TMP14]], <i32 99, i32 99, i32 99, i32 99>
796; CHECK-NEXT:    [[TMP16:%.*]] = icmp slt <4 x i32> [[TMP13]], <i32 213, i32 213, i32 213, i32 213>
797; CHECK-NEXT:    [[TMP17:%.*]] = select <4 x i1> [[TMP16]], <4 x i32> [[TMP13]], <4 x i32> [[TMP15]]
798; CHECK-NEXT:    [[TMP18:%.*]] = bitcast i32* [[TMP9]] to <4 x i32>*
799; CHECK-NEXT:    store <4 x i32> [[TMP17]], <4 x i32>* [[TMP18]], align 4
800; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
801; CHECK-NEXT:    [[TMP19:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
802; CHECK-NEXT:    br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
803; CHECK:       middle.block:
804; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i32 [[UMAX1]], [[N_VEC]]
805; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i64> [[TMP11]], i32 3
806; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i64> [[TMP11]], i32 2
807; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
808; CHECK:       scalar.ph:
809; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i64 [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[ENTRY:%.*]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
810; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
811; CHECK-NEXT:    br label [[LOOP:%.*]]
812; CHECK:       loop:
813; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i64 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[FOR_NEXT:%.*]], [[LOOP]] ]
814; CHECK-NEXT:    [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
815; CHECK-NEXT:    [[FOR_TRUNC:%.*]] = trunc i64 [[SCALAR_RECUR]] to i32
816; CHECK-NEXT:    [[STEP:%.*]] = add i32 [[FOR_TRUNC]], 2
817; CHECK-NEXT:    [[STEP_2:%.*]] = mul i32 [[STEP]], 99
818; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[FOR_TRUNC]], 213
819; CHECK-NEXT:    [[SELECT:%.*]] = select i1 [[CMP]], i32 [[FOR_TRUNC]], i32 [[STEP_2]]
820; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i32, i32* [[PTR]], i32 [[IV]]
821; CHECK-NEXT:    [[LV:%.*]] = load i32, i32* [[GEP]], align 4
822; CHECK-NEXT:    [[FOR_NEXT]] = zext i32 [[LV]] to i64
823; CHECK-NEXT:    store i32 [[SELECT]], i32* [[GEP]], align 4
824; CHECK-NEXT:    [[IV_NEXT]] = add i32 [[IV]], 1
825; CHECK-NEXT:    [[CMP73:%.*]] = icmp ugt i32 [[N]], [[IV_NEXT]]
826; CHECK-NEXT:    br i1 [[CMP73]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP15:![0-9]+]]
827; CHECK:       exit:
828; CHECK-NEXT:    ret void
829;
830entry:
831  br label %loop
832
833loop:
834  %for = phi i64 [ 0, %entry ], [ %for.next, %loop ]
835  %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
836
837  %for.trunc = trunc i64 %for to i32
838  %step = add i32 %for.trunc, 2
839  %step.2 = mul i32 %step, 99
840
841  %cmp = icmp slt i32 %for.trunc, 213
842  %select = select i1 %cmp, i32 %for.trunc, i32 %step.2
843
844  %gep = getelementptr inbounds i32, i32* %ptr, i32 %iv
845  %lv = load i32, i32* %gep, align 4
846  %for.next = zext i32 %lv to i64
847  store i32 %select, i32* %gep
848
849  %iv.next = add i32 %iv, 1
850  %cmp73 = icmp ugt i32 %N, %iv.next
851  br i1 %cmp73, label %loop, label %exit
852
853exit:
854  ret void
855}
856
857define void @cannot_sink_load_past_store(i32* %ptr, i32 %N) {
858; CHECK-LABEL: @cannot_sink_load_past_store(
859; CHECK-NEXT:  entry:
860; CHECK-NEXT:    br label [[LOOP:%.*]]
861; CHECK:       loop:
862; CHECK-NEXT:    [[FOR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[FOR_NEXT:%.*]], [[LOOP]] ]
863; CHECK-NEXT:    [[IV:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
864; CHECK-NEXT:    [[GEP_FOR:%.*]] = getelementptr inbounds i32, i32* [[PTR:%.*]], i64 [[FOR]]
865; CHECK-NEXT:    [[LV_FOR:%.*]] = load i32, i32* [[GEP_FOR]], align 4
866; CHECK-NEXT:    [[FOR_TRUNC:%.*]] = trunc i64 [[FOR]] to i32
867; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[LV_FOR]], [[FOR_TRUNC]]
868; CHECK-NEXT:    [[SELECT:%.*]] = select i1 [[CMP]], i32 [[LV_FOR]], i32 22
869; CHECK-NEXT:    [[GEP_IV:%.*]] = getelementptr inbounds i32, i32* [[PTR]], i32 [[IV]]
870; CHECK-NEXT:    store i32 0, i32* [[GEP_IV]], align 4
871; CHECK-NEXT:    [[IV_NEXT]] = add i32 [[IV]], 1
872; CHECK-NEXT:    [[FOR_NEXT]] = zext i32 [[IV]] to i64
873; CHECK-NEXT:    [[CMP73:%.*]] = icmp ugt i32 [[N:%.*]], [[IV_NEXT]]
874; CHECK-NEXT:    br i1 [[CMP73]], label [[LOOP]], label [[EXIT:%.*]]
875; CHECK:       exit:
876; CHECK-NEXT:    ret void
877;
878entry:
879  br label %loop
880
881loop:
882  %for = phi i64 [ 0, %entry ], [ %for.next, %loop ]
883  %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
884
885  %gep.for = getelementptr inbounds i32, i32* %ptr, i64 %for
886  %lv.for = load i32, i32* %gep.for, align 4
887  %for.trunc = trunc i64 %for to i32
888  %cmp = icmp slt i32 %lv.for, %for.trunc
889  %select = select i1 %cmp, i32 %lv.for, i32 22
890
891  %gep.iv = getelementptr inbounds i32, i32* %ptr, i32 %iv
892  store i32 0, i32* %gep.iv
893  %iv.next = add i32 %iv, 1
894  %for.next = zext i32 %iv to i64
895
896  %cmp73 = icmp ugt i32 %N, %iv.next
897  br i1 %cmp73, label %loop, label %exit
898
899exit:
900  ret void
901}
902
903define void @test_for_sink_instruction_after_same_incoming_1(double* %ptr) {
904; CHECK-LABEL: @test_for_sink_instruction_after_same_incoming_1
905; CHECK-NOT: vector.body:
906;
907entry:
908  br label %loop
909
910loop:
911  %for.1 = phi double [ 10.0, %entry ], [ %for.1.next, %loop ]
912  %for.2 = phi double [ 20.0, %entry ], [ %for.1.next, %loop ]
913  %iv = phi i64 [ 1, %entry ], [ %iv.next, %loop ]
914  %add.1 = fadd double 10.0, %for.2
915  %add.2 = fadd double %add.1, %for.1
916  %iv.next = add nuw nsw i64 %iv, 1
917  %gep.ptr = getelementptr inbounds double, double* %ptr, i64 %iv
918  %for.1.next  = load double, double* %gep.ptr, align 8
919  store double %add.2, double* %gep.ptr
920  %exitcond.not = icmp eq i64 %iv.next, 1000
921  br i1 %exitcond.not, label %exit, label %loop
922
923exit:
924  ret void
925}
926
927
928define void @test_for_sink_instruction_after_same_incoming_2(double* %ptr) {
929; CHECK-LABEL: @test_for_sink_instruction_after_same_incoming_2
930; CHECK-NOT: vector.body:
931entry:
932  br label %loop
933
934loop:
935  %for.2 = phi double [ 20.0, %entry ], [ %for.1.next, %loop ]
936  %for.1 = phi double [ 10.0, %entry ], [ %for.1.next, %loop ]
937  %iv = phi i64 [ 1, %entry ], [ %iv.next, %loop ]
938  %add.1 = fadd double 10.0, %for.2
939  %add.2 = fadd double %add.1, %for.1
940  %iv.next = add nuw nsw i64 %iv, 1
941  %gep.ptr = getelementptr inbounds double, double* %ptr, i64 %iv
942  %for.1.next  = load double, double* %gep.ptr, align 8
943  store double %add.2, double* %gep.ptr
944  %exitcond.not = icmp eq i64 %iv.next, 1000
945  br i1 %exitcond.not, label %exit, label %loop
946
947exit:
948  ret void
949}
950