1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -S %s | FileCheck %s
3
4
5@p = external local_unnamed_addr global [257 x i32], align 16
6@q = external local_unnamed_addr global [257 x i32], align 16
7
8; Test case for PR43398.
9
10define void @can_sink_after_store(i32 %x, i32* %ptr, i64 %tc) local_unnamed_addr #0 {
11; CHECK-LABEL: @can_sink_after_store(
12; CHECK-NEXT:  entry:
13; CHECK-NEXT:    br label [[PREHEADER:%.*]]
14; CHECK:       preheader:
15; CHECK-NEXT:    [[IDX_PHI_TRANS:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
16; CHECK-NEXT:    [[DOTPRE:%.*]] = load i32, i32* [[IDX_PHI_TRANS]], align 4
17; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
18; CHECK:       vector.ph:
19; CHECK-NEXT:    [[VECTOR_RECUR_INIT:%.*]] = insertelement <4 x i32> poison, i32 [[DOTPRE]], i32 3
20; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[X:%.*]], i32 0
21; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
22; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
23; CHECK:       vector.body:
24; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
25; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], [[VECTOR_BODY]] ]
26; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]]
27; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
28; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[TMP0]]
29; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 0
30; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
31; CHECK-NEXT:    [[WIDE_LOAD]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
32; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[WIDE_LOAD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
33; CHECK-NEXT:    [[TMP5:%.*]] = add <4 x i32> [[TMP4]], [[BROADCAST_SPLAT]]
34; CHECK-NEXT:    [[TMP6:%.*]] = add <4 x i32> [[TMP5]], [[WIDE_LOAD]]
35; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[TMP0]]
36; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[TMP7]], i32 0
37; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i32* [[TMP8]] to <4 x i32>*
38; CHECK-NEXT:    store <4 x i32> [[TMP6]], <4 x i32>* [[TMP9]], align 4
39; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
40; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1996
41; CHECK-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
42; CHECK:       middle.block:
43; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 1999, 1996
44; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 3
45; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 2
46; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
47; CHECK:       scalar.ph:
48; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[DOTPRE]], [[PREHEADER]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
49; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 1997, [[MIDDLE_BLOCK]] ], [ 1, [[PREHEADER]] ]
50; CHECK-NEXT:    br label [[FOR:%.*]]
51; CHECK:       for:
52; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[PRE_NEXT:%.*]], [[FOR]] ]
53; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR]] ]
54; CHECK-NEXT:    [[ADD_1:%.*]] = add i32 [[SCALAR_RECUR]], [[X]]
55; CHECK-NEXT:    [[IDX_1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[IV]]
56; CHECK-NEXT:    [[PRE_NEXT]] = load i32, i32* [[IDX_1]], align 4
57; CHECK-NEXT:    [[ADD_2:%.*]] = add i32 [[ADD_1]], [[PRE_NEXT]]
58; CHECK-NEXT:    [[IDX_2:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[IV]]
59; CHECK-NEXT:    store i32 [[ADD_2]], i32* [[IDX_2]], align 4
60; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
61; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 2000
62; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT]], label [[FOR]], !llvm.loop [[LOOP2:![0-9]+]]
63; CHECK:       exit:
64; CHECK-NEXT:    ret void
65;
66
67entry:
68  br label %preheader
69
70preheader:
71  %idx.phi.trans = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
72  %.pre = load i32, i32* %idx.phi.trans, align 4
73  br label %for
74
75for:
76  %pre.phi = phi i32 [ %.pre, %preheader ], [ %pre.next, %for ]
77  %iv = phi i64 [ 1, %preheader ], [ %iv.next, %for ]
78  %add.1 = add i32 %pre.phi, %x
79  %idx.1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %iv
80  %pre.next = load i32, i32* %idx.1, align 4
81  %add.2 = add i32 %add.1, %pre.next
82  %idx.2 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %iv
83  store i32 %add.2, i32* %idx.2, align 4
84  %iv.next = add nuw nsw i64 %iv, 1
85  %exitcond = icmp eq i64 %iv.next, 2000
86  br i1 %exitcond, label %exit, label %for
87
88exit:
89  ret void
90}
91
92; We can sink potential trapping instructions, as this will only delay the trap
93; and not introduce traps on additional paths.
94define void @sink_sdiv(i32 %x, i32* %ptr, i64 %tc) local_unnamed_addr #0 {
95; CHECK-LABEL: @sink_sdiv(
96; CHECK-NEXT:  entry:
97; CHECK-NEXT:    br label [[PREHEADER:%.*]]
98; CHECK:       preheader:
99; CHECK-NEXT:    [[IDX_PHI_TRANS:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
100; CHECK-NEXT:    [[DOTPRE:%.*]] = load i32, i32* [[IDX_PHI_TRANS]], align 4
101; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
102; CHECK:       vector.ph:
103; CHECK-NEXT:    [[VECTOR_RECUR_INIT:%.*]] = insertelement <4 x i32> poison, i32 [[DOTPRE]], i32 3
104; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[X:%.*]], i32 0
105; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
106; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
107; CHECK:       vector.body:
108; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
109; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], [[VECTOR_BODY]] ]
110; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]]
111; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
112; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[TMP0]]
113; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 0
114; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
115; CHECK-NEXT:    [[WIDE_LOAD]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
116; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[WIDE_LOAD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
117; CHECK-NEXT:    [[TMP5:%.*]] = sdiv <4 x i32> [[TMP4]], [[BROADCAST_SPLAT]]
118; CHECK-NEXT:    [[TMP6:%.*]] = add <4 x i32> [[TMP5]], [[WIDE_LOAD]]
119; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[TMP0]]
120; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[TMP7]], i32 0
121; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i32* [[TMP8]] to <4 x i32>*
122; CHECK-NEXT:    store <4 x i32> [[TMP6]], <4 x i32>* [[TMP9]], align 4
123; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
124; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1996
125; CHECK-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
126; CHECK:       middle.block:
127; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 1999, 1996
128; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 3
129; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 2
130; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
131; CHECK:       scalar.ph:
132; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[DOTPRE]], [[PREHEADER]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
133; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 1997, [[MIDDLE_BLOCK]] ], [ 1, [[PREHEADER]] ]
134; CHECK-NEXT:    br label [[FOR:%.*]]
135; CHECK:       for:
136; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[PRE_NEXT:%.*]], [[FOR]] ]
137; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR]] ]
138; CHECK-NEXT:    [[DIV_1:%.*]] = sdiv i32 [[SCALAR_RECUR]], [[X]]
139; CHECK-NEXT:    [[IDX_1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[IV]]
140; CHECK-NEXT:    [[PRE_NEXT]] = load i32, i32* [[IDX_1]], align 4
141; CHECK-NEXT:    [[ADD_2:%.*]] = add i32 [[DIV_1]], [[PRE_NEXT]]
142; CHECK-NEXT:    [[IDX_2:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[IV]]
143; CHECK-NEXT:    store i32 [[ADD_2]], i32* [[IDX_2]], align 4
144; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
145; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 2000
146; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT]], label [[FOR]], !llvm.loop [[LOOP5:![0-9]+]]
147; CHECK:       exit:
148; CHECK-NEXT:    ret void
149;
150
151entry:
152  br label %preheader
153
154preheader:
155  %idx.phi.trans = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
156  %.pre = load i32, i32* %idx.phi.trans, align 4
157  br label %for
158
159for:
160  %pre.phi = phi i32 [ %.pre, %preheader ], [ %pre.next, %for ]
161  %iv = phi i64 [ 1, %preheader ], [ %iv.next, %for ]
162  %div.1 = sdiv i32 %pre.phi, %x
163  %idx.1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %iv
164  %pre.next = load i32, i32* %idx.1, align 4
165  %add.2 = add i32 %div.1, %pre.next
166  %idx.2 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %iv
167  store i32 %add.2, i32* %idx.2, align 4
168  %iv.next = add nuw nsw i64 %iv, 1
169  %exitcond = icmp eq i64 %iv.next, 2000
170  br i1 %exitcond, label %exit, label %for
171
172exit:
173  ret void
174}
175
176; Sink users of %pre.phi recursively.
177define void @can_sink_with_additional_user(i32 %x, i32* %ptr, i64 %tc) {
178; CHECK-LABEL: @can_sink_with_additional_user(
179; CHECK-NEXT:  entry:
180; CHECK-NEXT:    br label [[PREHEADER:%.*]]
181; CHECK:       preheader:
182; CHECK-NEXT:    [[IDX_PHI_TRANS:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
183; CHECK-NEXT:    [[DOTPRE:%.*]] = load i32, i32* [[IDX_PHI_TRANS]], align 4
184; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
185; CHECK:       vector.ph:
186; CHECK-NEXT:    [[VECTOR_RECUR_INIT:%.*]] = insertelement <4 x i32> poison, i32 [[DOTPRE]], i32 3
187; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[X:%.*]], i32 0
188; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
189; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
190; CHECK:       vector.body:
191; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
192; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], [[VECTOR_BODY]] ]
193; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]]
194; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
195; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[TMP0]]
196; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 0
197; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
198; CHECK-NEXT:    [[WIDE_LOAD]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
199; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[WIDE_LOAD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
200; CHECK-NEXT:    [[TMP5:%.*]] = add <4 x i32> [[TMP4]], [[BROADCAST_SPLAT]]
201; CHECK-NEXT:    [[TMP6:%.*]] = add <4 x i32> [[TMP5]], [[BROADCAST_SPLAT]]
202; CHECK-NEXT:    [[TMP7:%.*]] = add <4 x i32> [[TMP5]], [[WIDE_LOAD]]
203; CHECK-NEXT:    [[TMP8:%.*]] = add <4 x i32> [[TMP6]], [[TMP7]]
204; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[TMP0]]
205; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[TMP9]], i32 0
206; CHECK-NEXT:    [[TMP11:%.*]] = bitcast i32* [[TMP10]] to <4 x i32>*
207; CHECK-NEXT:    store <4 x i32> [[TMP8]], <4 x i32>* [[TMP11]], align 4
208; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
209; CHECK-NEXT:    [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1996
210; CHECK-NEXT:    br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
211; CHECK:       middle.block:
212; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 1999, 1996
213; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 3
214; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 2
215; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
216; CHECK:       scalar.ph:
217; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[DOTPRE]], [[PREHEADER]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
218; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 1997, [[MIDDLE_BLOCK]] ], [ 1, [[PREHEADER]] ]
219; CHECK-NEXT:    br label [[FOR:%.*]]
220; CHECK:       for:
221; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[PRE_NEXT:%.*]], [[FOR]] ]
222; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR]] ]
223; CHECK-NEXT:    [[ADD_1:%.*]] = add i32 [[SCALAR_RECUR]], [[X]]
224; CHECK-NEXT:    [[ADD_2:%.*]] = add i32 [[ADD_1]], [[X]]
225; CHECK-NEXT:    [[IDX_1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[IV]]
226; CHECK-NEXT:    [[PRE_NEXT]] = load i32, i32* [[IDX_1]], align 4
227; CHECK-NEXT:    [[ADD_3:%.*]] = add i32 [[ADD_1]], [[PRE_NEXT]]
228; CHECK-NEXT:    [[ADD_4:%.*]] = add i32 [[ADD_2]], [[ADD_3]]
229; CHECK-NEXT:    [[IDX_2:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[IV]]
230; CHECK-NEXT:    store i32 [[ADD_4]], i32* [[IDX_2]], align 4
231; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
232; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 2000
233; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT]], label [[FOR]], !llvm.loop [[LOOP7:![0-9]+]]
234; CHECK:       exit:
235; CHECK-NEXT:    ret void
236;
237
238
239
240entry:
241  br label %preheader
242
243preheader:
244  %idx.phi.trans = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
245  %.pre = load i32, i32* %idx.phi.trans, align 4
246  br label %for
247
248for:
249  %pre.phi = phi i32 [ %.pre, %preheader ], [ %pre.next, %for ]
250  %iv = phi i64 [ 1, %preheader ], [ %iv.next, %for ]
251  %add.1 = add i32 %pre.phi, %x
252  %add.2 = add i32 %add.1, %x
253  %idx.1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %iv
254  %pre.next = load i32, i32* %idx.1, align 4
255  %add.3 = add i32 %add.1, %pre.next
256  %add.4 = add i32 %add.2, %add.3
257  %idx.2 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %iv
258  store i32 %add.4, i32* %idx.2, align 4
259  %iv.next = add nuw nsw i64 %iv, 1
260  %exitcond = icmp eq i64 %iv.next, 2000
261  br i1 %exitcond, label %exit, label %for
262
263exit:
264  ret void
265}
266
267; FIXME: We can sink a store, if we can guarantee that it does not alias any
268;        loads/stores in between.
269define void @cannot_sink_store(i32 %x, i32* %ptr, i64 %tc) {
270; CHECK-LABEL: @cannot_sink_store(
271; CHECK-NEXT:  entry:
272; CHECK-NEXT:    br label [[PREHEADER:%.*]]
273; CHECK:       preheader:
274; CHECK-NEXT:    [[IDX_PHI_TRANS:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
275; CHECK-NEXT:    [[DOTPRE:%.*]] = load i32, i32* [[IDX_PHI_TRANS]], align 4
276; CHECK-NEXT:    br label [[FOR:%.*]]
277; CHECK:       for:
278; CHECK-NEXT:    [[PRE_PHI:%.*]] = phi i32 [ [[DOTPRE]], [[PREHEADER]] ], [ [[PRE_NEXT:%.*]], [[FOR]] ]
279; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 1, [[PREHEADER]] ], [ [[IV_NEXT:%.*]], [[FOR]] ]
280; CHECK-NEXT:    [[ADD_1:%.*]] = add i32 [[PRE_PHI]], [[X:%.*]]
281; CHECK-NEXT:    store i32 [[ADD_1]], i32* [[PTR:%.*]], align 4
282; CHECK-NEXT:    [[IDX_1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[IV]]
283; CHECK-NEXT:    [[PRE_NEXT]] = load i32, i32* [[IDX_1]], align 4
284; CHECK-NEXT:    [[ADD_2:%.*]] = add i32 [[ADD_1]], [[PRE_NEXT]]
285; CHECK-NEXT:    [[IDX_2:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[IV]]
286; CHECK-NEXT:    store i32 [[ADD_2]], i32* [[IDX_2]], align 4
287; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
288; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 2000
289; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT:%.*]], label [[FOR]]
290; CHECK:       exit:
291; CHECK-NEXT:    ret void
292;
293
294
295
296entry:
297  br label %preheader
298
299preheader:
300  %idx.phi.trans = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
301  %.pre = load i32, i32* %idx.phi.trans, align 4
302  br label %for
303
304for:
305  %pre.phi = phi i32 [ %.pre, %preheader ], [ %pre.next, %for ]
306  %iv = phi i64 [ 1, %preheader ], [ %iv.next, %for ]
307  %add.1 = add i32 %pre.phi, %x
308  store i32 %add.1, i32* %ptr
309  %idx.1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %iv
310  %pre.next = load i32, i32* %idx.1, align 4
311  %add.2 = add i32 %add.1, %pre.next
312  %idx.2 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %iv
313  store i32 %add.2, i32* %idx.2, align 4
314  %iv.next = add nuw nsw i64 %iv, 1
315  %exitcond = icmp eq i64 %iv.next, 2000
316  br i1 %exitcond, label %exit, label %for
317
318exit:
319  ret void
320}
321
322; Some kinds of reductions are not detected by IVDescriptors. If we have a
323; cycle, we cannot sink it.
324define void @cannot_sink_reduction(i32 %x, i32* %ptr, i64 %tc) {
325; CHECK-LABEL: @cannot_sink_reduction(
326; CHECK-NEXT:  entry:
327; CHECK-NEXT:    br label [[PREHEADER:%.*]]
328; CHECK:       preheader:
329; CHECK-NEXT:    [[IDX_PHI_TRANS:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
330; CHECK-NEXT:    [[DOTPRE:%.*]] = load i32, i32* [[IDX_PHI_TRANS]], align 4
331; CHECK-NEXT:    br label [[FOR:%.*]]
332; CHECK:       for:
333; CHECK-NEXT:    [[PRE_PHI:%.*]] = phi i32 [ [[DOTPRE]], [[PREHEADER]] ], [ [[D:%.*]], [[FOR]] ]
334; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 1, [[PREHEADER]] ], [ [[IV_NEXT:%.*]], [[FOR]] ]
335; CHECK-NEXT:    [[D]] = sdiv i32 [[PRE_PHI]], [[X:%.*]]
336; CHECK-NEXT:    [[IDX_1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[IV]]
337; CHECK-NEXT:    [[PRE_NEXT:%.*]] = load i32, i32* [[IDX_1]], align 4
338; CHECK-NEXT:    [[ADD_2:%.*]] = add i32 [[X]], [[PRE_NEXT]]
339; CHECK-NEXT:    [[IDX_2:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[IV]]
340; CHECK-NEXT:    store i32 [[ADD_2]], i32* [[IDX_2]], align 4
341; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
342; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 2000
343; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT:%.*]], label [[FOR]]
344; CHECK:       exit:
345; CHECK-NEXT:    ret void
346;
347
348
349
350; CHECK-NET:     ret void
351entry:
352  br label %preheader
353
354preheader:
355  %idx.phi.trans = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
356  %.pre = load i32, i32* %idx.phi.trans, align 4
357  br label %for
358
359for:
360  %pre.phi = phi i32 [ %.pre, %preheader ], [ %d, %for ]
361  %iv = phi i64 [ 1, %preheader ], [ %iv.next, %for ]
362  %d = sdiv i32 %pre.phi, %x
363  %idx.1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %iv
364  %pre.next = load i32, i32* %idx.1, align 4
365  %add.2 = add i32 %x, %pre.next
366  %idx.2 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %iv
367  store i32 %add.2, i32* %idx.2, align 4
368  %iv.next = add nuw nsw i64 %iv, 1
369  %exitcond = icmp eq i64 %iv.next, 2000
370  br i1 %exitcond, label %exit, label %for
371
372exit:
373  ret void
374}
375
376; TODO: We should be able to sink %tmp38 after %tmp60.
377define void @instruction_with_2_FOR_operands() {
378; CHECK-LABEL: @instruction_with_2_FOR_operands(
379; CHECK-NEXT:  bb:
380; CHECK-NEXT:    br label [[BB13:%.*]]
381; CHECK:       bb13:
382; CHECK-NEXT:    [[TMP37:%.*]] = phi float [ [[TMP60:%.*]], [[BB13]] ], [ undef, [[BB:%.*]] ]
383; CHECK-NEXT:    [[TMP27:%.*]] = phi float [ [[TMP49:%.*]], [[BB13]] ], [ undef, [[BB]] ]
384; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[BB13]] ], [ 0, [[BB]] ]
385; CHECK-NEXT:    [[TMP38:%.*]] = fmul fast float [[TMP37]], [[TMP27]]
386; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
387; CHECK-NEXT:    [[TMP49]] = load float, float* undef, align 4
388; CHECK-NEXT:    [[TMP60]] = load float, float* undef, align 4
389; CHECK-NEXT:    [[TMP12:%.*]] = icmp slt i64 [[INDVARS_IV]], undef
390; CHECK-NEXT:    br i1 [[TMP12]], label [[BB13]], label [[BB74:%.*]]
391; CHECK:       bb74:
392; CHECK-NEXT:    ret void
393;
394
395
396bb:
397  br label %bb13
398
399bb13:                                             ; preds = %bb13, %bb
400  %tmp37 = phi float [ %tmp60, %bb13 ], [ undef, %bb ]
401  %tmp27 = phi float [ %tmp49, %bb13 ], [ undef, %bb ]
402  %indvars.iv = phi i64 [ %indvars.iv.next, %bb13 ], [ 0, %bb ]
403  %tmp38 = fmul fast float %tmp37, %tmp27
404  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
405  %tmp49 = load float, float* undef, align 4
406  %tmp60 = load float, float* undef, align 4
407  %tmp12 = icmp slt i64 %indvars.iv, undef
408  br i1 %tmp12, label %bb13, label %bb74
409
410bb74:                                             ; preds = %bb13
411  ret void
412}
413
414; The (first) reason `%first_time.1` cannot be sunk is because it appears outside
415; the header and is not dominated by Previous. The fact that it feeds Previous
416; is a second sinking-preventing reason.
417define void @cannot_sink_phi(i32* %ptr) {
418; CHECK-LABEL: @cannot_sink_phi(
419; CHECK-NEXT:  entry:
420; CHECK-NEXT:    br label [[LOOP_HEADER:%.*]]
421; CHECK:       loop.header:
422; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 1, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
423; CHECK-NEXT:    [[FOR:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[FOR_NEXT:%.*]], [[LOOP_LATCH]] ]
424; CHECK-NEXT:    [[C_1:%.*]] = icmp ult i64 [[IV]], 500
425; CHECK-NEXT:    br i1 [[C_1]], label [[IF_TRUEBB:%.*]], label [[IF_FALSEBB:%.*]]
426; CHECK:       if.truebb:
427; CHECK-NEXT:    br label [[LOOP_LATCH]]
428; CHECK:       if.falsebb:
429; CHECK-NEXT:    br label [[LOOP_LATCH]]
430; CHECK:       loop.latch:
431; CHECK-NEXT:    [[FIRST_TIME_1:%.*]] = phi i32 [ 20, [[IF_TRUEBB]] ], [ [[FOR]], [[IF_FALSEBB]] ]
432; CHECK-NEXT:    [[C_2:%.*]] = icmp ult i64 [[IV]], 800
433; CHECK-NEXT:    [[FOR_NEXT]] = select i1 [[C_2]], i32 30, i32 [[FIRST_TIME_1]]
434; CHECK-NEXT:    [[PTR_IDX:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i64 [[IV]]
435; CHECK-NEXT:    store i32 [[FOR_NEXT]], i32* [[PTR_IDX]], align 4
436; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
437; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
438; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label [[EXIT:%.*]], label [[LOOP_HEADER]]
439; CHECK:       exit:
440; CHECK-NEXT:    ret void
441;
442entry:
443  br label %loop.header
444
445loop.header:
446  %iv = phi i64 [ 1, %entry ], [ %iv.next, %loop.latch ]
447  %for = phi i32 [ 0, %entry ], [ %for.next, %loop.latch ]
448  %c.1 = icmp ult i64 %iv, 500
449  br i1 %c.1, label %if.truebb, label %if.falsebb
450
451if.truebb:
452  br label %loop.latch
453
454if.falsebb:
455  br label %loop.latch
456
457loop.latch:
458  %first_time.1 = phi i32 [ 20, %if.truebb ], [ %for, %if.falsebb ]
459  %c.2 = icmp ult i64 %iv, 800
460  %for.next = select i1 %c.2, i32 30, i32 %first_time.1
461  %ptr.idx = getelementptr i32, i32* %ptr, i64 %iv
462  store i32 %for.next, i32* %ptr.idx
463  %iv.next = add nuw nsw i64 %iv, 1
464  %exitcond.not = icmp eq i64 %iv.next, 1000
465  br i1 %exitcond.not, label %exit, label %loop.header
466
467exit:
468  ret void
469}
470
471; A recurrence in a multiple exit loop.
472define i16 @multiple_exit(i16* %p, i32 %n) {
473; CHECK-LABEL: @multiple_exit(
474; CHECK-NEXT:  entry:
475; CHECK-NEXT:    [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[N:%.*]], i32 0)
476; CHECK-NEXT:    [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[SMAX]], i32 2096)
477; CHECK-NEXT:    [[TMP0:%.*]] = add nuw nsw i32 [[UMIN]], 1
478; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP0]], 4
479; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
480; CHECK:       vector.ph:
481; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], 4
482; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i32 [[N_MOD_VF]], 0
483; CHECK-NEXT:    [[TMP2:%.*]] = select i1 [[TMP1]], i32 4, i32 [[N_MOD_VF]]
484; CHECK-NEXT:    [[N_VEC:%.*]] = sub i32 [[TMP0]], [[TMP2]]
485; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
486; CHECK:       vector.body:
487; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
488; CHECK-NEXT:    [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
489; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i16> [ <i16 poison, i16 poison, i16 poison, i16 0>, [[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], [[VECTOR_BODY]] ]
490; CHECK-NEXT:    [[TMP3:%.*]] = add i32 [[INDEX]], 0
491; CHECK-NEXT:    [[TMP4:%.*]] = add i32 [[INDEX]], 1
492; CHECK-NEXT:    [[TMP5:%.*]] = add i32 [[INDEX]], 2
493; CHECK-NEXT:    [[TMP6:%.*]] = add i32 [[INDEX]], 3
494; CHECK-NEXT:    [[TMP7:%.*]] = sext i32 [[TMP3]] to i64
495; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i16, i16* [[P:%.*]], i64 [[TMP7]]
496; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i16, i16* [[TMP8]], i32 0
497; CHECK-NEXT:    [[TMP10:%.*]] = bitcast i16* [[TMP9]] to <4 x i16>*
498; CHECK-NEXT:    [[WIDE_LOAD]] = load <4 x i16>, <4 x i16>* [[TMP10]], align 2
499; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <4 x i16> [[VECTOR_RECUR]], <4 x i16> [[WIDE_LOAD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
500; CHECK-NEXT:    [[TMP12:%.*]] = bitcast i16* [[TMP9]] to <4 x i16>*
501; CHECK-NEXT:    store <4 x i16> [[TMP11]], <4 x i16>* [[TMP12]], align 4
502; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
503; CHECK-NEXT:    [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], <i32 4, i32 4, i32 4, i32 4>
504; CHECK-NEXT:    [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
505; CHECK-NEXT:    br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
506; CHECK:       middle.block:
507; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i32 3
508; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i32 2
509; CHECK-NEXT:    br label [[SCALAR_PH]]
510; CHECK:       scalar.ph:
511; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, [[ENTRY:%.*]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
512; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
513; CHECK-NEXT:    br label [[FOR_COND:%.*]]
514; CHECK:       for.cond:
515; CHECK-NEXT:    [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ]
516; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[REC_NEXT:%.*]], [[FOR_BODY]] ]
517; CHECK-NEXT:    [[IPROM:%.*]] = sext i32 [[I]] to i64
518; CHECK-NEXT:    [[B:%.*]] = getelementptr inbounds i16, i16* [[P]], i64 [[IPROM]]
519; CHECK-NEXT:    [[REC_NEXT]] = load i16, i16* [[B]], align 2
520; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[I]], [[N]]
521; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[IF_END:%.*]]
522; CHECK:       for.body:
523; CHECK-NEXT:    store i16 [[SCALAR_RECUR]], i16* [[B]], align 4
524; CHECK-NEXT:    [[INC]] = add nsw i32 [[I]], 1
525; CHECK-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[I]], 2096
526; CHECK-NEXT:    br i1 [[CMP2]], label [[FOR_COND]], label [[IF_END]], !llvm.loop [[LOOP9:![0-9]+]]
527; CHECK:       if.end:
528; CHECK-NEXT:    [[REC_LCSSA:%.*]] = phi i16 [ [[SCALAR_RECUR]], [[FOR_BODY]] ], [ [[SCALAR_RECUR]], [[FOR_COND]] ]
529; CHECK-NEXT:    ret i16 [[REC_LCSSA]]
530;
531entry:
532  br label %for.cond
533
534for.cond:
535  %i = phi i32 [ 0, %entry ], [ %inc, %for.body ]
536  %rec = phi i16 [0, %entry], [ %rec.next, %for.body ]
537  %iprom = sext i32 %i to i64
538  %b = getelementptr inbounds i16, i16* %p, i64 %iprom
539  %rec.next = load i16, i16* %b
540  %cmp = icmp slt i32 %i, %n
541  br i1 %cmp, label %for.body, label %if.end
542
543for.body:
544  store i16 %rec , i16* %b, align 4
545  %inc = add nsw i32 %i, 1
546  %cmp2 = icmp slt i32 %i, 2096
547  br i1 %cmp2, label %for.cond, label %if.end
548
549if.end:
550  ret i16 %rec
551}
552
553
554; A multiple exit case where one of the exiting edges involves a value
555; from the recurrence and one does not.
556define i16 @multiple_exit2(i16* %p, i32 %n) {
557; CHECK-LABEL: @multiple_exit2(
558; CHECK-NEXT:  entry:
559; CHECK-NEXT:    [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[N:%.*]], i32 0)
560; CHECK-NEXT:    [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[SMAX]], i32 2096)
561; CHECK-NEXT:    [[TMP0:%.*]] = add nuw nsw i32 [[UMIN]], 1
562; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP0]], 4
563; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
564; CHECK:       vector.ph:
565; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], 4
566; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i32 [[N_MOD_VF]], 0
567; CHECK-NEXT:    [[TMP2:%.*]] = select i1 [[TMP1]], i32 4, i32 [[N_MOD_VF]]
568; CHECK-NEXT:    [[N_VEC:%.*]] = sub i32 [[TMP0]], [[TMP2]]
569; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
570; CHECK:       vector.body:
571; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
572; CHECK-NEXT:    [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
573; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i16> [ <i16 poison, i16 poison, i16 poison, i16 0>, [[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], [[VECTOR_BODY]] ]
574; CHECK-NEXT:    [[TMP3:%.*]] = add i32 [[INDEX]], 0
575; CHECK-NEXT:    [[TMP4:%.*]] = add i32 [[INDEX]], 1
576; CHECK-NEXT:    [[TMP5:%.*]] = add i32 [[INDEX]], 2
577; CHECK-NEXT:    [[TMP6:%.*]] = add i32 [[INDEX]], 3
578; CHECK-NEXT:    [[TMP7:%.*]] = sext i32 [[TMP3]] to i64
579; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i16, i16* [[P:%.*]], i64 [[TMP7]]
580; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i16, i16* [[TMP8]], i32 0
581; CHECK-NEXT:    [[TMP10:%.*]] = bitcast i16* [[TMP9]] to <4 x i16>*
582; CHECK-NEXT:    [[WIDE_LOAD]] = load <4 x i16>, <4 x i16>* [[TMP10]], align 2
583; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <4 x i16> [[VECTOR_RECUR]], <4 x i16> [[WIDE_LOAD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
584; CHECK-NEXT:    [[TMP12:%.*]] = bitcast i16* [[TMP9]] to <4 x i16>*
585; CHECK-NEXT:    store <4 x i16> [[TMP11]], <4 x i16>* [[TMP12]], align 4
586; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
587; CHECK-NEXT:    [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], <i32 4, i32 4, i32 4, i32 4>
588; CHECK-NEXT:    [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
589; CHECK-NEXT:    br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
590; CHECK:       middle.block:
591; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i32 3
592; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i32 2
593; CHECK-NEXT:    br label [[SCALAR_PH]]
594; CHECK:       scalar.ph:
595; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, [[ENTRY:%.*]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
596; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
597; CHECK-NEXT:    br label [[FOR_COND:%.*]]
598; CHECK:       for.cond:
599; CHECK-NEXT:    [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ]
600; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[REC_NEXT:%.*]], [[FOR_BODY]] ]
601; CHECK-NEXT:    [[IPROM:%.*]] = sext i32 [[I]] to i64
602; CHECK-NEXT:    [[B:%.*]] = getelementptr inbounds i16, i16* [[P]], i64 [[IPROM]]
603; CHECK-NEXT:    [[REC_NEXT]] = load i16, i16* [[B]], align 2
604; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[I]], [[N]]
605; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[IF_END:%.*]]
606; CHECK:       for.body:
607; CHECK-NEXT:    store i16 [[SCALAR_RECUR]], i16* [[B]], align 4
608; CHECK-NEXT:    [[INC]] = add nsw i32 [[I]], 1
609; CHECK-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[I]], 2096
610; CHECK-NEXT:    br i1 [[CMP2]], label [[FOR_COND]], label [[IF_END]], !llvm.loop [[LOOP11:![0-9]+]]
611; CHECK:       if.end:
612; CHECK-NEXT:    [[REC_LCSSA:%.*]] = phi i16 [ [[SCALAR_RECUR]], [[FOR_COND]] ], [ 10, [[FOR_BODY]] ]
613; CHECK-NEXT:    ret i16 [[REC_LCSSA]]
614;
615entry:
616  br label %for.cond
617
618for.cond:
619  %i = phi i32 [ 0, %entry ], [ %inc, %for.body ]
620  %rec = phi i16 [0, %entry], [ %rec.next, %for.body ]
621  %iprom = sext i32 %i to i64
622  %b = getelementptr inbounds i16, i16* %p, i64 %iprom
623  %rec.next = load i16, i16* %b
624  %cmp = icmp slt i32 %i, %n
625  br i1 %cmp, label %for.body, label %if.end
626
627for.body:
628  store i16 %rec , i16* %b, align 4
629  %inc = add nsw i32 %i, 1
630  %cmp2 = icmp slt i32 %i, 2096
631  br i1 %cmp2, label %for.cond, label %if.end
632
633if.end:
634  %rec.lcssa = phi i16 [ %rec, %for.cond ], [ 10, %for.body ]
635  ret i16 %rec.lcssa
636}
637
638; A test where the instructions to sink may not be visited in dominance order.
639define void @sink_dominance(i32* %ptr, i32 %N) {
640; CHECK-LABEL: @sink_dominance(
641; CHECK-NEXT:  entry:
642; CHECK-NEXT:    [[UMAX1:%.*]] = call i32 @llvm.umax.i32(i32 [[N:%.*]], i32 1)
643; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[UMAX1]], 4
644; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
645; CHECK:       vector.scevcheck:
646; CHECK-NEXT:    [[UMAX:%.*]] = call i32 @llvm.umax.i32(i32 [[N]], i32 1)
647; CHECK-NEXT:    [[TMP0:%.*]] = add i32 [[UMAX]], -1
648; CHECK-NEXT:    [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 [[TMP0]])
649; CHECK-NEXT:    [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0
650; CHECK-NEXT:    [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1
651; CHECK-NEXT:    [[TMP1:%.*]] = add i32 0, [[MUL_RESULT]]
652; CHECK-NEXT:    [[TMP2:%.*]] = sub i32 0, [[MUL_RESULT]]
653; CHECK-NEXT:    [[TMP3:%.*]] = icmp sgt i32 [[TMP2]], 0
654; CHECK-NEXT:    [[TMP4:%.*]] = icmp slt i32 [[TMP1]], 0
655; CHECK-NEXT:    [[TMP5:%.*]] = select i1 false, i1 [[TMP3]], i1 [[TMP4]]
656; CHECK-NEXT:    [[TMP6:%.*]] = or i1 [[TMP5]], [[MUL_OVERFLOW]]
657; CHECK-NEXT:    [[TMP7:%.*]] = or i1 false, [[TMP6]]
658; CHECK-NEXT:    br i1 [[TMP7]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
659; CHECK:       vector.ph:
660; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i32 [[UMAX1]], 4
661; CHECK-NEXT:    [[N_VEC:%.*]] = sub i32 [[UMAX1]], [[N_MOD_VF]]
662; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
663; CHECK:       vector.body:
664; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
665; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i64> [ <i64 poison, i64 poison, i64 poison, i64 0>, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ]
666; CHECK-NEXT:    [[TMP8:%.*]] = add i32 [[INDEX]], 0
667; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[PTR:%.*]], i32 [[TMP8]]
668; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[TMP9]], i32 0
669; CHECK-NEXT:    [[TMP11:%.*]] = bitcast i32* [[TMP10]] to <4 x i32>*
670; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP11]], align 4
671; CHECK-NEXT:    [[TMP12]] = zext <4 x i32> [[WIDE_LOAD]] to <4 x i64>
672; CHECK-NEXT:    [[TMP13:%.*]] = shufflevector <4 x i64> [[VECTOR_RECUR]], <4 x i64> [[TMP12]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
673; CHECK-NEXT:    [[TMP14:%.*]] = trunc <4 x i64> [[TMP13]] to <4 x i32>
674; CHECK-NEXT:    [[TMP15:%.*]] = icmp slt <4 x i32> [[TMP14]], <i32 213, i32 213, i32 213, i32 213>
675; CHECK-NEXT:    [[TMP16:%.*]] = select <4 x i1> [[TMP15]], <4 x i32> [[TMP14]], <4 x i32> <i32 22, i32 22, i32 22, i32 22>
676; CHECK-NEXT:    [[TMP17:%.*]] = bitcast i32* [[TMP10]] to <4 x i32>*
677; CHECK-NEXT:    store <4 x i32> [[TMP16]], <4 x i32>* [[TMP17]], align 4
678; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
679; CHECK-NEXT:    [[TMP18:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
680; CHECK-NEXT:    br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
681; CHECK:       middle.block:
682; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i32 [[UMAX1]], [[N_VEC]]
683; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i64> [[TMP12]], i32 3
684; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i64> [[TMP12]], i32 2
685; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
686; CHECK:       scalar.ph:
687; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i64 [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[ENTRY:%.*]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
688; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
689; CHECK-NEXT:    br label [[LOOP:%.*]]
690; CHECK:       loop:
691; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i64 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[FOR_NEXT:%.*]], [[LOOP]] ]
692; CHECK-NEXT:    [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
693; CHECK-NEXT:    [[FOR_TRUNC:%.*]] = trunc i64 [[SCALAR_RECUR]] to i32
694; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[FOR_TRUNC]], 213
695; CHECK-NEXT:    [[SELECT:%.*]] = select i1 [[CMP]], i32 [[FOR_TRUNC]], i32 22
696; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i32, i32* [[PTR]], i32 [[IV]]
697; CHECK-NEXT:    [[LV:%.*]] = load i32, i32* [[GEP]], align 4
698; CHECK-NEXT:    [[FOR_NEXT]] = zext i32 [[LV]] to i64
699; CHECK-NEXT:    store i32 [[SELECT]], i32* [[GEP]], align 4
700; CHECK-NEXT:    [[IV_NEXT]] = add i32 [[IV]], 1
701; CHECK-NEXT:    [[CMP73:%.*]] = icmp ugt i32 [[N]], [[IV_NEXT]]
702; CHECK-NEXT:    br i1 [[CMP73]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP13:![0-9]+]]
703; CHECK:       exit:
704; CHECK-NEXT:    ret void
705;
706entry:
707  br label %loop
708
709loop:
710  %for = phi i64 [ 0, %entry ], [ %for.next, %loop ]
711  %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
712
713  %for.trunc = trunc i64 %for to i32
714  %cmp = icmp slt i32 %for.trunc, 213
715  %select = select i1 %cmp, i32 %for.trunc, i32 22
716
717  %gep = getelementptr inbounds i32, i32* %ptr, i32 %iv
718  %lv = load i32, i32* %gep, align 4
719  %for.next = zext i32 %lv to i64
720  store i32 %select, i32* %gep
721
722  %iv.next = add i32 %iv, 1
723  %cmp73 = icmp ugt i32 %N, %iv.next
724  br i1 %cmp73, label %loop, label %exit
725
726exit:
727  ret void
728}
729
730; Similar to @sink_dominance, but with 2 separate chains that merge at %select
731; with a different number of instructions in between.
732define void @sink_dominance_2(i32* %ptr, i32 %N) {
733; CHECK-LABEL: @sink_dominance_2(
734; CHECK-NEXT:  entry:
735; CHECK-NEXT:    [[UMAX1:%.*]] = call i32 @llvm.umax.i32(i32 [[N:%.*]], i32 1)
736; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[UMAX1]], 4
737; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
738; CHECK:       vector.scevcheck:
739; CHECK-NEXT:    [[UMAX:%.*]] = call i32 @llvm.umax.i32(i32 [[N]], i32 1)
740; CHECK-NEXT:    [[TMP0:%.*]] = add i32 [[UMAX]], -1
741; CHECK-NEXT:    [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 [[TMP0]])
742; CHECK-NEXT:    [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0
743; CHECK-NEXT:    [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1
744; CHECK-NEXT:    [[TMP1:%.*]] = add i32 0, [[MUL_RESULT]]
745; CHECK-NEXT:    [[TMP2:%.*]] = sub i32 0, [[MUL_RESULT]]
746; CHECK-NEXT:    [[TMP3:%.*]] = icmp sgt i32 [[TMP2]], 0
747; CHECK-NEXT:    [[TMP4:%.*]] = icmp slt i32 [[TMP1]], 0
748; CHECK-NEXT:    [[TMP5:%.*]] = select i1 false, i1 [[TMP3]], i1 [[TMP4]]
749; CHECK-NEXT:    [[TMP6:%.*]] = or i1 [[TMP5]], [[MUL_OVERFLOW]]
750; CHECK-NEXT:    [[TMP7:%.*]] = or i1 false, [[TMP6]]
751; CHECK-NEXT:    br i1 [[TMP7]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
752; CHECK:       vector.ph:
753; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i32 [[UMAX1]], 4
754; CHECK-NEXT:    [[N_VEC:%.*]] = sub i32 [[UMAX1]], [[N_MOD_VF]]
755; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
756; CHECK:       vector.body:
757; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
758; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i64> [ <i64 poison, i64 poison, i64 poison, i64 0>, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ]
759; CHECK-NEXT:    [[TMP8:%.*]] = add i32 [[INDEX]], 0
760; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[PTR:%.*]], i32 [[TMP8]]
761; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[TMP9]], i32 0
762; CHECK-NEXT:    [[TMP11:%.*]] = bitcast i32* [[TMP10]] to <4 x i32>*
763; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP11]], align 4
764; CHECK-NEXT:    [[TMP12]] = zext <4 x i32> [[WIDE_LOAD]] to <4 x i64>
765; CHECK-NEXT:    [[TMP13:%.*]] = shufflevector <4 x i64> [[VECTOR_RECUR]], <4 x i64> [[TMP12]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
766; CHECK-NEXT:    [[TMP14:%.*]] = trunc <4 x i64> [[TMP13]] to <4 x i32>
767; CHECK-NEXT:    [[TMP15:%.*]] = add <4 x i32> [[TMP14]], <i32 2, i32 2, i32 2, i32 2>
768; CHECK-NEXT:    [[TMP16:%.*]] = mul <4 x i32> [[TMP15]], <i32 99, i32 99, i32 99, i32 99>
769; CHECK-NEXT:    [[TMP17:%.*]] = icmp slt <4 x i32> [[TMP14]], <i32 213, i32 213, i32 213, i32 213>
770; CHECK-NEXT:    [[TMP18:%.*]] = select <4 x i1> [[TMP17]], <4 x i32> [[TMP14]], <4 x i32> [[TMP16]]
771; CHECK-NEXT:    [[TMP19:%.*]] = bitcast i32* [[TMP10]] to <4 x i32>*
772; CHECK-NEXT:    store <4 x i32> [[TMP18]], <4 x i32>* [[TMP19]], align 4
773; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
774; CHECK-NEXT:    [[TMP20:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
775; CHECK-NEXT:    br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
776; CHECK:       middle.block:
777; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i32 [[UMAX1]], [[N_VEC]]
778; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i64> [[TMP12]], i32 3
779; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i64> [[TMP12]], i32 2
780; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
781; CHECK:       scalar.ph:
782; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i64 [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[ENTRY:%.*]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
783; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
784; CHECK-NEXT:    br label [[LOOP:%.*]]
785; CHECK:       loop:
786; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i64 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[FOR_NEXT:%.*]], [[LOOP]] ]
787; CHECK-NEXT:    [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
788; CHECK-NEXT:    [[FOR_TRUNC:%.*]] = trunc i64 [[SCALAR_RECUR]] to i32
789; CHECK-NEXT:    [[STEP:%.*]] = add i32 [[FOR_TRUNC]], 2
790; CHECK-NEXT:    [[STEP_2:%.*]] = mul i32 [[STEP]], 99
791; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[FOR_TRUNC]], 213
792; CHECK-NEXT:    [[SELECT:%.*]] = select i1 [[CMP]], i32 [[FOR_TRUNC]], i32 [[STEP_2]]
793; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i32, i32* [[PTR]], i32 [[IV]]
794; CHECK-NEXT:    [[LV:%.*]] = load i32, i32* [[GEP]], align 4
795; CHECK-NEXT:    [[FOR_NEXT]] = zext i32 [[LV]] to i64
796; CHECK-NEXT:    store i32 [[SELECT]], i32* [[GEP]], align 4
797; CHECK-NEXT:    [[IV_NEXT]] = add i32 [[IV]], 1
798; CHECK-NEXT:    [[CMP73:%.*]] = icmp ugt i32 [[N]], [[IV_NEXT]]
799; CHECK-NEXT:    br i1 [[CMP73]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP15:![0-9]+]]
800; CHECK:       exit:
801; CHECK-NEXT:    ret void
802;
803entry:
804  br label %loop
805
806loop:
807  %for = phi i64 [ 0, %entry ], [ %for.next, %loop ]
808  %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
809
810  %for.trunc = trunc i64 %for to i32
811  %step = add i32 %for.trunc, 2
812  %step.2 = mul i32 %step, 99
813
814  %cmp = icmp slt i32 %for.trunc, 213
815  %select = select i1 %cmp, i32 %for.trunc, i32 %step.2
816
817  %gep = getelementptr inbounds i32, i32* %ptr, i32 %iv
818  %lv = load i32, i32* %gep, align 4
819  %for.next = zext i32 %lv to i64
820  store i32 %select, i32* %gep
821
822  %iv.next = add i32 %iv, 1
823  %cmp73 = icmp ugt i32 %N, %iv.next
824  br i1 %cmp73, label %loop, label %exit
825
826exit:
827  ret void
828}
829
830define void @cannot_sink_load_past_store(i32* %ptr, i32 %N) {
831; CHECK-LABEL: @cannot_sink_load_past_store(
832; CHECK-NEXT:  entry:
833; CHECK-NEXT:    br label [[LOOP:%.*]]
834; CHECK:       loop:
835; CHECK-NEXT:    [[FOR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[FOR_NEXT:%.*]], [[LOOP]] ]
836; CHECK-NEXT:    [[IV:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
837; CHECK-NEXT:    [[GEP_FOR:%.*]] = getelementptr inbounds i32, i32* [[PTR:%.*]], i64 [[FOR]]
838; CHECK-NEXT:    [[LV_FOR:%.*]] = load i32, i32* [[GEP_FOR]], align 4
839; CHECK-NEXT:    [[FOR_TRUNC:%.*]] = trunc i64 [[FOR]] to i32
840; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[LV_FOR]], [[FOR_TRUNC]]
841; CHECK-NEXT:    [[SELECT:%.*]] = select i1 [[CMP]], i32 [[LV_FOR]], i32 22
842; CHECK-NEXT:    [[GEP_IV:%.*]] = getelementptr inbounds i32, i32* [[PTR]], i32 [[IV]]
843; CHECK-NEXT:    store i32 0, i32* [[GEP_IV]], align 4
844; CHECK-NEXT:    [[IV_NEXT]] = add i32 [[IV]], 1
845; CHECK-NEXT:    [[FOR_NEXT]] = zext i32 [[IV]] to i64
846; CHECK-NEXT:    [[CMP73:%.*]] = icmp ugt i32 [[N:%.*]], [[IV_NEXT]]
847; CHECK-NEXT:    br i1 [[CMP73]], label [[LOOP]], label [[EXIT:%.*]]
848; CHECK:       exit:
849; CHECK-NEXT:    ret void
850;
851entry:
852  br label %loop
853
854loop:
855  %for = phi i64 [ 0, %entry ], [ %for.next, %loop ]
856  %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
857
858  %gep.for = getelementptr inbounds i32, i32* %ptr, i64 %for
859  %lv.for = load i32, i32* %gep.for, align 4
860  %for.trunc = trunc i64 %for to i32
861  %cmp = icmp slt i32 %lv.for, %for.trunc
862  %select = select i1 %cmp, i32 %lv.for, i32 22
863
864  %gep.iv = getelementptr inbounds i32, i32* %ptr, i32 %iv
865  store i32 0, i32* %gep.iv
866  %iv.next = add i32 %iv, 1
867  %for.next = zext i32 %iv to i64
868
869  %cmp73 = icmp ugt i32 %N, %iv.next
870  br i1 %cmp73, label %loop, label %exit
871
872exit:
873  ret void
874}
875