1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -passes=loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -S %s | FileCheck %s
3
4
5@p = external local_unnamed_addr global [257 x i32], align 16
6@q = external local_unnamed_addr global [257 x i32], align 16
7
8; Test case for PR43398.
9
10define void @can_sink_after_store(i32 %x, i32* %ptr, i64 %tc) local_unnamed_addr #0 {
11; CHECK-LABEL: @can_sink_after_store(
12; CHECK-NEXT:  entry:
13; CHECK-NEXT:    br label [[PREHEADER:%.*]]
14; CHECK:       preheader:
15; CHECK-NEXT:    [[IDX_PHI_TRANS:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
16; CHECK-NEXT:    [[DOTPRE:%.*]] = load i32, i32* [[IDX_PHI_TRANS]], align 4
17; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
18; CHECK:       vector.ph:
19; CHECK-NEXT:    [[VECTOR_RECUR_INIT:%.*]] = insertelement <4 x i32> poison, i32 [[DOTPRE]], i32 3
20; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[X:%.*]], i32 0
21; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
22; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
23; CHECK:       vector.body:
24; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
25; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], [[VECTOR_BODY]] ]
26; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]]
27; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
28; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[TMP0]]
29; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 0
30; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
31; CHECK-NEXT:    [[WIDE_LOAD]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
32; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[WIDE_LOAD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
33; CHECK-NEXT:    [[TMP5:%.*]] = add <4 x i32> [[TMP4]], [[BROADCAST_SPLAT]]
34; CHECK-NEXT:    [[TMP6:%.*]] = add <4 x i32> [[TMP5]], [[WIDE_LOAD]]
35; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[TMP0]]
36; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[TMP7]], i32 0
37; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i32* [[TMP8]] to <4 x i32>*
38; CHECK-NEXT:    store <4 x i32> [[TMP6]], <4 x i32>* [[TMP9]], align 4
39; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
40; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1996
41; CHECK-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
42; CHECK:       middle.block:
43; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 1999, 1996
44; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 3
45; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 2
46; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
47; CHECK:       scalar.ph:
48; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[DOTPRE]], [[PREHEADER]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
49; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 1997, [[MIDDLE_BLOCK]] ], [ 1, [[PREHEADER]] ]
50; CHECK-NEXT:    br label [[FOR:%.*]]
51; CHECK:       for:
52; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[PRE_NEXT:%.*]], [[FOR]] ]
53; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR]] ]
54; CHECK-NEXT:    [[ADD_1:%.*]] = add i32 [[SCALAR_RECUR]], [[X]]
55; CHECK-NEXT:    [[IDX_1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[IV]]
56; CHECK-NEXT:    [[PRE_NEXT]] = load i32, i32* [[IDX_1]], align 4
57; CHECK-NEXT:    [[ADD_2:%.*]] = add i32 [[ADD_1]], [[PRE_NEXT]]
58; CHECK-NEXT:    [[IDX_2:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[IV]]
59; CHECK-NEXT:    store i32 [[ADD_2]], i32* [[IDX_2]], align 4
60; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
61; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 2000
62; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT]], label [[FOR]], !llvm.loop [[LOOP2:![0-9]+]]
63; CHECK:       exit:
64; CHECK-NEXT:    ret void
65;
66
67entry:
68  br label %preheader
69
70preheader:
71  %idx.phi.trans = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
72  %.pre = load i32, i32* %idx.phi.trans, align 4
73  br label %for
74
75for:
76  %pre.phi = phi i32 [ %.pre, %preheader ], [ %pre.next, %for ]
77  %iv = phi i64 [ 1, %preheader ], [ %iv.next, %for ]
78  %add.1 = add i32 %pre.phi, %x
79  %idx.1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %iv
80  %pre.next = load i32, i32* %idx.1, align 4
81  %add.2 = add i32 %add.1, %pre.next
82  %idx.2 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %iv
83  store i32 %add.2, i32* %idx.2, align 4
84  %iv.next = add nuw nsw i64 %iv, 1
85  %exitcond = icmp eq i64 %iv.next, 2000
86  br i1 %exitcond, label %exit, label %for
87
88exit:
89  ret void
90}
91
92; We can sink potential trapping instructions, as this will only delay the trap
93; and not introduce traps on additional paths.
94define void @sink_sdiv(i32 %x, i32* %ptr, i64 %tc) local_unnamed_addr #0 {
95; CHECK-LABEL: @sink_sdiv(
96; CHECK-NEXT:  entry:
97; CHECK-NEXT:    br label [[PREHEADER:%.*]]
98; CHECK:       preheader:
99; CHECK-NEXT:    [[IDX_PHI_TRANS:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
100; CHECK-NEXT:    [[DOTPRE:%.*]] = load i32, i32* [[IDX_PHI_TRANS]], align 4
101; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
102; CHECK:       vector.ph:
103; CHECK-NEXT:    [[VECTOR_RECUR_INIT:%.*]] = insertelement <4 x i32> poison, i32 [[DOTPRE]], i32 3
104; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[X:%.*]], i32 0
105; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
106; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
107; CHECK:       vector.body:
108; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
109; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], [[VECTOR_BODY]] ]
110; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]]
111; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
112; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[TMP0]]
113; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 0
114; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
115; CHECK-NEXT:    [[WIDE_LOAD]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
116; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[WIDE_LOAD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
117; CHECK-NEXT:    [[TMP5:%.*]] = sdiv <4 x i32> [[TMP4]], [[BROADCAST_SPLAT]]
118; CHECK-NEXT:    [[TMP6:%.*]] = add <4 x i32> [[TMP5]], [[WIDE_LOAD]]
119; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[TMP0]]
120; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[TMP7]], i32 0
121; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i32* [[TMP8]] to <4 x i32>*
122; CHECK-NEXT:    store <4 x i32> [[TMP6]], <4 x i32>* [[TMP9]], align 4
123; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
124; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1996
125; CHECK-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
126; CHECK:       middle.block:
127; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 1999, 1996
128; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 3
129; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 2
130; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
131; CHECK:       scalar.ph:
132; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[DOTPRE]], [[PREHEADER]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
133; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 1997, [[MIDDLE_BLOCK]] ], [ 1, [[PREHEADER]] ]
134; CHECK-NEXT:    br label [[FOR:%.*]]
135; CHECK:       for:
136; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[PRE_NEXT:%.*]], [[FOR]] ]
137; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR]] ]
138; CHECK-NEXT:    [[DIV_1:%.*]] = sdiv i32 [[SCALAR_RECUR]], [[X]]
139; CHECK-NEXT:    [[IDX_1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[IV]]
140; CHECK-NEXT:    [[PRE_NEXT]] = load i32, i32* [[IDX_1]], align 4
141; CHECK-NEXT:    [[ADD_2:%.*]] = add i32 [[DIV_1]], [[PRE_NEXT]]
142; CHECK-NEXT:    [[IDX_2:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[IV]]
143; CHECK-NEXT:    store i32 [[ADD_2]], i32* [[IDX_2]], align 4
144; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
145; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 2000
146; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT]], label [[FOR]], !llvm.loop [[LOOP5:![0-9]+]]
147; CHECK:       exit:
148; CHECK-NEXT:    ret void
149;
150
151entry:
152  br label %preheader
153
154preheader:
155  %idx.phi.trans = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
156  %.pre = load i32, i32* %idx.phi.trans, align 4
157  br label %for
158
159for:
160  %pre.phi = phi i32 [ %.pre, %preheader ], [ %pre.next, %for ]
161  %iv = phi i64 [ 1, %preheader ], [ %iv.next, %for ]
162  %div.1 = sdiv i32 %pre.phi, %x
163  %idx.1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %iv
164  %pre.next = load i32, i32* %idx.1, align 4
165  %add.2 = add i32 %div.1, %pre.next
166  %idx.2 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %iv
167  store i32 %add.2, i32* %idx.2, align 4
168  %iv.next = add nuw nsw i64 %iv, 1
169  %exitcond = icmp eq i64 %iv.next, 2000
170  br i1 %exitcond, label %exit, label %for
171
172exit:
173  ret void
174}
175
176; Sink users of %pre.phi recursively.
177define void @can_sink_with_additional_user(i32 %x, i32* %ptr, i64 %tc) {
178; CHECK-LABEL: @can_sink_with_additional_user(
179; CHECK-NEXT:  entry:
180; CHECK-NEXT:    br label [[PREHEADER:%.*]]
181; CHECK:       preheader:
182; CHECK-NEXT:    [[IDX_PHI_TRANS:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
183; CHECK-NEXT:    [[DOTPRE:%.*]] = load i32, i32* [[IDX_PHI_TRANS]], align 4
184; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
185; CHECK:       vector.ph:
186; CHECK-NEXT:    [[VECTOR_RECUR_INIT:%.*]] = insertelement <4 x i32> poison, i32 [[DOTPRE]], i32 3
187; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[X:%.*]], i32 0
188; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
189; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
190; CHECK:       vector.body:
191; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
192; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], [[VECTOR_BODY]] ]
193; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]]
194; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
195; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[TMP0]]
196; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 0
197; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
198; CHECK-NEXT:    [[WIDE_LOAD]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
199; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[WIDE_LOAD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
200; CHECK-NEXT:    [[TMP5:%.*]] = add <4 x i32> [[TMP4]], [[BROADCAST_SPLAT]]
201; CHECK-NEXT:    [[TMP6:%.*]] = add <4 x i32> [[TMP5]], [[BROADCAST_SPLAT]]
202; CHECK-NEXT:    [[TMP7:%.*]] = add <4 x i32> [[TMP5]], [[WIDE_LOAD]]
203; CHECK-NEXT:    [[TMP8:%.*]] = add <4 x i32> [[TMP6]], [[TMP7]]
204; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[TMP0]]
205; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[TMP9]], i32 0
206; CHECK-NEXT:    [[TMP11:%.*]] = bitcast i32* [[TMP10]] to <4 x i32>*
207; CHECK-NEXT:    store <4 x i32> [[TMP8]], <4 x i32>* [[TMP11]], align 4
208; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
209; CHECK-NEXT:    [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1996
210; CHECK-NEXT:    br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
211; CHECK:       middle.block:
212; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 1999, 1996
213; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 3
214; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 2
215; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
216; CHECK:       scalar.ph:
217; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[DOTPRE]], [[PREHEADER]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
218; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 1997, [[MIDDLE_BLOCK]] ], [ 1, [[PREHEADER]] ]
219; CHECK-NEXT:    br label [[FOR:%.*]]
220; CHECK:       for:
221; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[PRE_NEXT:%.*]], [[FOR]] ]
222; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR]] ]
223; CHECK-NEXT:    [[ADD_1:%.*]] = add i32 [[SCALAR_RECUR]], [[X]]
224; CHECK-NEXT:    [[ADD_2:%.*]] = add i32 [[ADD_1]], [[X]]
225; CHECK-NEXT:    [[IDX_1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[IV]]
226; CHECK-NEXT:    [[PRE_NEXT]] = load i32, i32* [[IDX_1]], align 4
227; CHECK-NEXT:    [[ADD_3:%.*]] = add i32 [[ADD_1]], [[PRE_NEXT]]
228; CHECK-NEXT:    [[ADD_4:%.*]] = add i32 [[ADD_2]], [[ADD_3]]
229; CHECK-NEXT:    [[IDX_2:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[IV]]
230; CHECK-NEXT:    store i32 [[ADD_4]], i32* [[IDX_2]], align 4
231; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
232; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 2000
233; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT]], label [[FOR]], !llvm.loop [[LOOP7:![0-9]+]]
234; CHECK:       exit:
235; CHECK-NEXT:    ret void
236;
237
238
239
240entry:
241  br label %preheader
242
243preheader:
244  %idx.phi.trans = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
245  %.pre = load i32, i32* %idx.phi.trans, align 4
246  br label %for
247
248for:
249  %pre.phi = phi i32 [ %.pre, %preheader ], [ %pre.next, %for ]
250  %iv = phi i64 [ 1, %preheader ], [ %iv.next, %for ]
251  %add.1 = add i32 %pre.phi, %x
252  %add.2 = add i32 %add.1, %x
253  %idx.1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %iv
254  %pre.next = load i32, i32* %idx.1, align 4
255  %add.3 = add i32 %add.1, %pre.next
256  %add.4 = add i32 %add.2, %add.3
257  %idx.2 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %iv
258  store i32 %add.4, i32* %idx.2, align 4
259  %iv.next = add nuw nsw i64 %iv, 1
260  %exitcond = icmp eq i64 %iv.next, 2000
261  br i1 %exitcond, label %exit, label %for
262
263exit:
264  ret void
265}
266
267; FIXME: We can sink a store, if we can guarantee that it does not alias any
268;        loads/stores in between.
269define void @cannot_sink_store(i32 %x, i32* %ptr, i64 %tc) {
270; CHECK-LABEL: @cannot_sink_store(
271; CHECK-NEXT:  entry:
272; CHECK-NEXT:    br label [[PREHEADER:%.*]]
273; CHECK:       preheader:
274; CHECK-NEXT:    [[IDX_PHI_TRANS:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
275; CHECK-NEXT:    [[DOTPRE:%.*]] = load i32, i32* [[IDX_PHI_TRANS]], align 4
276; CHECK-NEXT:    br label [[FOR:%.*]]
277; CHECK:       for:
278; CHECK-NEXT:    [[PRE_PHI:%.*]] = phi i32 [ [[DOTPRE]], [[PREHEADER]] ], [ [[PRE_NEXT:%.*]], [[FOR]] ]
279; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 1, [[PREHEADER]] ], [ [[IV_NEXT:%.*]], [[FOR]] ]
280; CHECK-NEXT:    [[ADD_1:%.*]] = add i32 [[PRE_PHI]], [[X:%.*]]
281; CHECK-NEXT:    store i32 [[ADD_1]], i32* [[PTR:%.*]], align 4
282; CHECK-NEXT:    [[IDX_1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[IV]]
283; CHECK-NEXT:    [[PRE_NEXT]] = load i32, i32* [[IDX_1]], align 4
284; CHECK-NEXT:    [[ADD_2:%.*]] = add i32 [[ADD_1]], [[PRE_NEXT]]
285; CHECK-NEXT:    [[IDX_2:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[IV]]
286; CHECK-NEXT:    store i32 [[ADD_2]], i32* [[IDX_2]], align 4
287; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
288; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 2000
289; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT:%.*]], label [[FOR]]
290; CHECK:       exit:
291; CHECK-NEXT:    ret void
292;
293
294
295
296entry:
297  br label %preheader
298
299preheader:
300  %idx.phi.trans = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
301  %.pre = load i32, i32* %idx.phi.trans, align 4
302  br label %for
303
304for:
305  %pre.phi = phi i32 [ %.pre, %preheader ], [ %pre.next, %for ]
306  %iv = phi i64 [ 1, %preheader ], [ %iv.next, %for ]
307  %add.1 = add i32 %pre.phi, %x
308  store i32 %add.1, i32* %ptr
309  %idx.1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %iv
310  %pre.next = load i32, i32* %idx.1, align 4
311  %add.2 = add i32 %add.1, %pre.next
312  %idx.2 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %iv
313  store i32 %add.2, i32* %idx.2, align 4
314  %iv.next = add nuw nsw i64 %iv, 1
315  %exitcond = icmp eq i64 %iv.next, 2000
316  br i1 %exitcond, label %exit, label %for
317
318exit:
319  ret void
320}
321
322; Some kinds of reductions are not detected by IVDescriptors. If we have a
323; cycle, we cannot sink it.
324define void @cannot_sink_reduction(i32 %x, i32* %ptr, i64 %tc) {
325; CHECK-LABEL: @cannot_sink_reduction(
326; CHECK-NEXT:  entry:
327; CHECK-NEXT:    br label [[PREHEADER:%.*]]
328; CHECK:       preheader:
329; CHECK-NEXT:    [[IDX_PHI_TRANS:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
330; CHECK-NEXT:    [[DOTPRE:%.*]] = load i32, i32* [[IDX_PHI_TRANS]], align 4
331; CHECK-NEXT:    br label [[FOR:%.*]]
332; CHECK:       for:
333; CHECK-NEXT:    [[PRE_PHI:%.*]] = phi i32 [ [[DOTPRE]], [[PREHEADER]] ], [ [[D:%.*]], [[FOR]] ]
334; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 1, [[PREHEADER]] ], [ [[IV_NEXT:%.*]], [[FOR]] ]
335; CHECK-NEXT:    [[D]] = sdiv i32 [[PRE_PHI]], [[X:%.*]]
336; CHECK-NEXT:    [[IDX_1:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 [[IV]]
337; CHECK-NEXT:    [[PRE_NEXT:%.*]] = load i32, i32* [[IDX_1]], align 4
338; CHECK-NEXT:    [[ADD_2:%.*]] = add i32 [[X]], [[PRE_NEXT]]
339; CHECK-NEXT:    [[IDX_2:%.*]] = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 [[IV]]
340; CHECK-NEXT:    store i32 [[ADD_2]], i32* [[IDX_2]], align 4
341; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
342; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 2000
343; CHECK-NEXT:    br i1 [[EXITCOND]], label [[EXIT:%.*]], label [[FOR]]
344; CHECK:       exit:
345; CHECK-NEXT:    ret void
346;
347
348
349
350; CHECK-NET:     ret void
351entry:
352  br label %preheader
353
354preheader:
355  %idx.phi.trans = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1
356  %.pre = load i32, i32* %idx.phi.trans, align 4
357  br label %for
358
359for:
360  %pre.phi = phi i32 [ %.pre, %preheader ], [ %d, %for ]
361  %iv = phi i64 [ 1, %preheader ], [ %iv.next, %for ]
362  %d = sdiv i32 %pre.phi, %x
363  %idx.1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %iv
364  %pre.next = load i32, i32* %idx.1, align 4
365  %add.2 = add i32 %x, %pre.next
366  %idx.2 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %iv
367  store i32 %add.2, i32* %idx.2, align 4
368  %iv.next = add nuw nsw i64 %iv, 1
369  %exitcond = icmp eq i64 %iv.next, 2000
370  br i1 %exitcond, label %exit, label %for
371
372exit:
373  ret void
374}
375
376; Sink %tmp38 after %tmp60, then it enable the loop vectorization.
377define void @instruction_with_2_FOR_operands(float* noalias %A, float* noalias %B, float* noalias %C) {
378; CHECK-LABEL: @instruction_with_2_FOR_operands(
379; CHECK-NEXT:  bb:
380; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
381; CHECK:       vector.ph:
382; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
383; CHECK:       vector.body:
384; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
385; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x float> [ <float poison, float poison, float poison, float 0.000000e+00>, [[VECTOR_PH]] ], [ [[BROADCAST_SPLAT3:%.*]], [[VECTOR_BODY]] ]
386; CHECK-NEXT:    [[VECTOR_RECUR1:%.*]] = phi <4 x float> [ <float poison, float poison, float poison, float 1.000000e+00>, [[VECTOR_PH]] ], [ [[BROADCAST_SPLAT:%.*]], [[VECTOR_BODY]] ]
387; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 0
388; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds float, float* [[C:%.*]], i64 [[TMP0]]
389; CHECK-NEXT:    [[TMP2:%.*]] = load float, float* [[A:%.*]], align 4
390; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[TMP2]], i32 0
391; CHECK-NEXT:    [[BROADCAST_SPLAT]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
392; CHECK-NEXT:    [[TMP3:%.*]] = shufflevector <4 x float> [[VECTOR_RECUR1]], <4 x float> [[BROADCAST_SPLAT]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
393; CHECK-NEXT:    [[TMP4:%.*]] = load float, float* [[B:%.*]], align 4
394; CHECK-NEXT:    [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x float> poison, float [[TMP4]], i32 0
395; CHECK-NEXT:    [[BROADCAST_SPLAT3]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT2]], <4 x float> poison, <4 x i32> zeroinitializer
396; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <4 x float> [[VECTOR_RECUR]], <4 x float> [[BROADCAST_SPLAT3]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
397; CHECK-NEXT:    [[TMP6:%.*]] = fmul fast <4 x float> [[TMP5]], [[TMP3]]
398; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds float, float* [[TMP1]], i32 0
399; CHECK-NEXT:    [[TMP8:%.*]] = bitcast float* [[TMP7]] to <4 x float>*
400; CHECK-NEXT:    store <4 x float> [[TMP6]], <4 x float>* [[TMP8]], align 4
401; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
402; CHECK-NEXT:    [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
403; CHECK-NEXT:    br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
404; CHECK:       middle.block:
405; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 1001, 1000
406; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x float> [[BROADCAST_SPLAT3]], i32 3
407; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x float> [[BROADCAST_SPLAT3]], i32 2
408; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT4:%.*]] = extractelement <4 x float> [[BROADCAST_SPLAT]], i32 3
409; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI5:%.*]] = extractelement <4 x float> [[BROADCAST_SPLAT]], i32 2
410; CHECK-NEXT:    br i1 [[CMP_N]], label [[BB74:%.*]], label [[SCALAR_PH]]
411; CHECK:       scalar.ph:
412; CHECK-NEXT:    [[SCALAR_RECUR_INIT6:%.*]] = phi float [ 1.000000e+00, [[BB:%.*]] ], [ [[VECTOR_RECUR_EXTRACT4]], [[MIDDLE_BLOCK]] ]
413; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi float [ 0.000000e+00, [[BB]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
414; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 1000, [[MIDDLE_BLOCK]] ], [ 0, [[BB]] ]
415; CHECK-NEXT:    br label [[BB13:%.*]]
416; CHECK:       bb13:
417; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi float [ [[TMP60:%.*]], [[BB13]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
418; CHECK-NEXT:    [[SCALAR_RECUR7:%.*]] = phi float [ [[TMP49:%.*]], [[BB13]] ], [ [[SCALAR_RECUR_INIT6]], [[SCALAR_PH]] ]
419; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[BB13]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
420; CHECK-NEXT:    [[TMP38:%.*]] = fmul fast float [[SCALAR_RECUR]], [[SCALAR_RECUR7]]
421; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
422; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds float, float* [[C]], i64 [[IV]]
423; CHECK-NEXT:    [[TMP49]] = load float, float* [[A]], align 4
424; CHECK-NEXT:    [[TMP60]] = load float, float* [[B]], align 4
425; CHECK-NEXT:    store float [[TMP38]], float* [[GEP]], align 4
426; CHECK-NEXT:    [[TMP12:%.*]] = icmp slt i64 [[IV]], 1000
427; CHECK-NEXT:    br i1 [[TMP12]], label [[BB13]], label [[BB74]], !llvm.loop [[LOOP9:![0-9]+]]
428; CHECK:       bb74:
429; CHECK-NEXT:    ret void
430;
431bb:
432  br label %bb13
433
434bb13:                                             ; preds = %bb13, %bb
435  %tmp37 = phi float [ %tmp60, %bb13 ], [ 0.0, %bb ]
436  %tmp27 = phi float [ %tmp49, %bb13 ], [ 1.0, %bb ]
437  %iv = phi i64 [ %iv.next, %bb13 ], [ 0, %bb ]
438  %tmp38 = fmul fast float %tmp37, %tmp27
439  %iv.next = add nuw nsw i64 %iv, 1
440  %gep = getelementptr inbounds float, float* %C, i64 %iv
441  %tmp49 = load float, float* %A, align 4
442  %tmp60 = load float, float* %B, align 4
443  store float %tmp38, float* %gep
444  %tmp12 = icmp slt i64 %iv, 1000
445  br i1 %tmp12, label %bb13, label %bb74
446
447bb74:                                             ; preds = %bb13
448  ret void
449}
450
451define void @instruction_with_2_FOR_operands_and_multiple_other_uses(float* noalias %dst.1, float* noalias %dst.2, float* noalias %dst.3, float* noalias %for.ptr.1, float* noalias %for.ptr.2) {
452; CHECK-LABEL: @instruction_with_2_FOR_operands_and_multiple_other_uses(
453; CHECK-NEXT:  bb:
454; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
455; CHECK:       vector.ph:
456; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
457; CHECK:       vector.body:
458; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
459; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x float> [ <float poison, float poison, float poison, float 0.000000e+00>, [[VECTOR_PH]] ], [ [[BROADCAST_SPLAT3:%.*]], [[VECTOR_BODY]] ]
460; CHECK-NEXT:    [[VECTOR_RECUR1:%.*]] = phi <4 x float> [ <float poison, float poison, float poison, float 0.000000e+00>, [[VECTOR_PH]] ], [ [[BROADCAST_SPLAT:%.*]], [[VECTOR_BODY]] ]
461; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 0
462; CHECK-NEXT:    [[TMP1:%.*]] = load float, float* [[FOR_PTR_2:%.*]], align 4
463; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[TMP1]], i32 0
464; CHECK-NEXT:    [[BROADCAST_SPLAT]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
465; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <4 x float> [[VECTOR_RECUR1]], <4 x float> [[BROADCAST_SPLAT]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
466; CHECK-NEXT:    [[TMP3:%.*]] = load float, float* [[FOR_PTR_1:%.*]], align 4
467; CHECK-NEXT:    [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x float> poison, float [[TMP3]], i32 0
468; CHECK-NEXT:    [[BROADCAST_SPLAT3]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT2]], <4 x float> poison, <4 x i32> zeroinitializer
469; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <4 x float> [[VECTOR_RECUR]], <4 x float> [[BROADCAST_SPLAT3]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
470; CHECK-NEXT:    [[TMP5:%.*]] = fmul fast <4 x float> [[TMP4]], <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
471; CHECK-NEXT:    [[TMP6:%.*]] = fmul fast <4 x float> [[TMP4]], [[TMP2]]
472; CHECK-NEXT:    [[TMP7:%.*]] = fadd fast <4 x float> [[TMP4]], <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
473; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds float, float* [[DST_1:%.*]], i64 [[TMP0]]
474; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds float, float* [[TMP8]], i32 0
475; CHECK-NEXT:    [[TMP10:%.*]] = bitcast float* [[TMP9]] to <4 x float>*
476; CHECK-NEXT:    store <4 x float> [[TMP6]], <4 x float>* [[TMP10]], align 4
477; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds float, float* [[DST_2:%.*]], i64 [[TMP0]]
478; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds float, float* [[TMP11]], i32 0
479; CHECK-NEXT:    [[TMP13:%.*]] = bitcast float* [[TMP12]] to <4 x float>*
480; CHECK-NEXT:    store <4 x float> [[TMP5]], <4 x float>* [[TMP13]], align 4
481; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds float, float* [[DST_3:%.*]], i64 [[TMP0]]
482; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds float, float* [[TMP14]], i32 0
483; CHECK-NEXT:    [[TMP16:%.*]] = bitcast float* [[TMP15]] to <4 x float>*
484; CHECK-NEXT:    store <4 x float> [[TMP7]], <4 x float>* [[TMP16]], align 4
485; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
486; CHECK-NEXT:    [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
487; CHECK-NEXT:    br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
488; CHECK:       middle.block:
489; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 1001, 1000
490; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x float> [[BROADCAST_SPLAT3]], i32 3
491; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x float> [[BROADCAST_SPLAT3]], i32 2
492; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT4:%.*]] = extractelement <4 x float> [[BROADCAST_SPLAT]], i32 3
493; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI5:%.*]] = extractelement <4 x float> [[BROADCAST_SPLAT]], i32 2
494; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
495; CHECK:       scalar.ph:
496; CHECK-NEXT:    [[SCALAR_RECUR_INIT6:%.*]] = phi float [ 0.000000e+00, [[BB:%.*]] ], [ [[VECTOR_RECUR_EXTRACT4]], [[MIDDLE_BLOCK]] ]
497; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi float [ 0.000000e+00, [[BB]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
498; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 1000, [[MIDDLE_BLOCK]] ], [ 0, [[BB]] ]
499; CHECK-NEXT:    br label [[LOOP:%.*]]
500; CHECK:       loop:
501; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi float [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[FOR_1_NEXT:%.*]], [[LOOP]] ]
502; CHECK-NEXT:    [[SCALAR_RECUR7:%.*]] = phi float [ [[SCALAR_RECUR_INIT6]], [[SCALAR_PH]] ], [ [[FOR_2_NEXT:%.*]], [[LOOP]] ]
503; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
504; CHECK-NEXT:    [[FOR_1_USE_1:%.*]] = fmul fast float [[SCALAR_RECUR]], 2.000000e+00
505; CHECK-NEXT:    [[USED_BY_BOTH:%.*]] = fmul fast float [[SCALAR_RECUR]], [[SCALAR_RECUR7]]
506; CHECK-NEXT:    [[FOR_2_NEXT]] = load float, float* [[FOR_PTR_2]], align 4
507; CHECK-NEXT:    [[FOR_1_USE_3:%.*]] = fadd fast float [[SCALAR_RECUR]], 1.000000e+00
508; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
509; CHECK-NEXT:    [[FOR_1_NEXT]] = load float, float* [[FOR_PTR_1]], align 4
510; CHECK-NEXT:    [[GEP_DST_1:%.*]] = getelementptr inbounds float, float* [[DST_1]], i64 [[IV]]
511; CHECK-NEXT:    store float [[USED_BY_BOTH]], float* [[GEP_DST_1]], align 4
512; CHECK-NEXT:    [[GEP_DST_2:%.*]] = getelementptr inbounds float, float* [[DST_2]], i64 [[IV]]
513; CHECK-NEXT:    store float [[FOR_1_USE_1]], float* [[GEP_DST_2]], align 4
514; CHECK-NEXT:    [[GEP_DST_3:%.*]] = getelementptr inbounds float, float* [[DST_3]], i64 [[IV]]
515; CHECK-NEXT:    store float [[FOR_1_USE_3]], float* [[GEP_DST_3]], align 4
516; CHECK-NEXT:    [[EC:%.*]] = icmp slt i64 [[IV]], 1000
517; CHECK-NEXT:    br i1 [[EC]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP11:![0-9]+]]
518; CHECK:       exit:
519; CHECK-NEXT:    ret void
520;
521bb:
522  br label %loop
523
524loop:
525  %for.1 = phi float [ 0.0, %bb ], [ %for.1.next, %loop]
526  %for.2 = phi float [ 0.0, %bb ], [ %for.2.next, %loop]
527  %iv = phi i64 [ 0, %bb ], [ %iv.next, %loop ]
528  %for.1.use.1  = fmul fast float %for.1, 2.0
529  %used.by.both = fmul fast float %for.1, %for.2
530  %for.2.next = load float, float* %for.ptr.2, align 4
531  %for.1.use.3 = fadd fast float %for.1, 1.0
532  %iv.next = add nuw nsw i64 %iv, 1
533  %for.1.next = load float, float* %for.ptr.1, align 4
534  %gep.dst.1 = getelementptr inbounds float, float* %dst.1, i64 %iv
535  store float %used.by.both, float* %gep.dst.1
536  %gep.dst.2 = getelementptr inbounds float, float* %dst.2, i64 %iv
537  store float %for.1.use.1, float* %gep.dst.2
538  %gep.dst.3 = getelementptr inbounds float, float* %dst.3, i64 %iv
539  store float %for.1.use.3, float* %gep.dst.3
540  %ec = icmp slt i64 %iv, 1000
541  br i1 %ec, label %loop, label %exit
542
543exit:
544  ret void
545}
546
547; Variation of @instruction_with_2_FOR_operands_and_multiple_other_uses, with
548; multiple instructions in a chain from for.1 to %used.by.both.
549define void @instruction_with_2_FOR_operands_and_multiple_other_uses_chain(float* noalias %dst.1, float* noalias %dst.2, float* noalias %dst.3, float* noalias %for.ptr.1, float* noalias %for.ptr.2) {
550; CHECK-LABEL: @instruction_with_2_FOR_operands_and_multiple_other_uses_chain(
551; CHECK-NEXT:  bb:
552; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
553; CHECK:       vector.ph:
554; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
555; CHECK:       vector.body:
556; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
557; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x float> [ <float poison, float poison, float poison, float 0.000000e+00>, [[VECTOR_PH]] ], [ [[BROADCAST_SPLAT3:%.*]], [[VECTOR_BODY]] ]
558; CHECK-NEXT:    [[VECTOR_RECUR1:%.*]] = phi <4 x float> [ <float poison, float poison, float poison, float 0.000000e+00>, [[VECTOR_PH]] ], [ [[BROADCAST_SPLAT:%.*]], [[VECTOR_BODY]] ]
559; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 0
560; CHECK-NEXT:    [[TMP1:%.*]] = load float, float* [[FOR_PTR_2:%.*]], align 4
561; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[TMP1]], i32 0
562; CHECK-NEXT:    [[BROADCAST_SPLAT]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
563; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <4 x float> [[VECTOR_RECUR1]], <4 x float> [[BROADCAST_SPLAT]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
564; CHECK-NEXT:    [[TMP3:%.*]] = load float, float* [[FOR_PTR_1:%.*]], align 4
565; CHECK-NEXT:    [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x float> poison, float [[TMP3]], i32 0
566; CHECK-NEXT:    [[BROADCAST_SPLAT3]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT2]], <4 x float> poison, <4 x i32> zeroinitializer
567; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <4 x float> [[VECTOR_RECUR]], <4 x float> [[BROADCAST_SPLAT3]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
568; CHECK-NEXT:    [[TMP5:%.*]] = fmul fast <4 x float> [[TMP4]], <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
569; CHECK-NEXT:    [[TMP6:%.*]] = fmul fast <4 x float> [[TMP5]], <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
570; CHECK-NEXT:    [[TMP7:%.*]] = fmul fast <4 x float> [[TMP6]], [[TMP2]]
571; CHECK-NEXT:    [[TMP8:%.*]] = fadd fast <4 x float> [[TMP4]], <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
572; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds float, float* [[DST_1:%.*]], i64 [[TMP0]]
573; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds float, float* [[TMP9]], i32 0
574; CHECK-NEXT:    [[TMP11:%.*]] = bitcast float* [[TMP10]] to <4 x float>*
575; CHECK-NEXT:    store <4 x float> [[TMP7]], <4 x float>* [[TMP11]], align 4
576; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds float, float* [[DST_2:%.*]], i64 [[TMP0]]
577; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds float, float* [[TMP12]], i32 0
578; CHECK-NEXT:    [[TMP14:%.*]] = bitcast float* [[TMP13]] to <4 x float>*
579; CHECK-NEXT:    store <4 x float> [[TMP5]], <4 x float>* [[TMP14]], align 4
580; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds float, float* [[DST_3:%.*]], i64 [[TMP0]]
581; CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds float, float* [[TMP15]], i32 0
582; CHECK-NEXT:    [[TMP17:%.*]] = bitcast float* [[TMP16]] to <4 x float>*
583; CHECK-NEXT:    store <4 x float> [[TMP8]], <4 x float>* [[TMP17]], align 4
584; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
585; CHECK-NEXT:    [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
586; CHECK-NEXT:    br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
587; CHECK:       middle.block:
588; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 1001, 1000
589; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x float> [[BROADCAST_SPLAT3]], i32 3
590; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x float> [[BROADCAST_SPLAT3]], i32 2
591; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT4:%.*]] = extractelement <4 x float> [[BROADCAST_SPLAT]], i32 3
592; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI5:%.*]] = extractelement <4 x float> [[BROADCAST_SPLAT]], i32 2
593; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
594; CHECK:       scalar.ph:
595; CHECK-NEXT:    [[SCALAR_RECUR_INIT6:%.*]] = phi float [ 0.000000e+00, [[BB:%.*]] ], [ [[VECTOR_RECUR_EXTRACT4]], [[MIDDLE_BLOCK]] ]
596; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi float [ 0.000000e+00, [[BB]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
597; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 1000, [[MIDDLE_BLOCK]] ], [ 0, [[BB]] ]
598; CHECK-NEXT:    br label [[LOOP:%.*]]
599; CHECK:       loop:
600; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi float [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[FOR_1_NEXT:%.*]], [[LOOP]] ]
601; CHECK-NEXT:    [[SCALAR_RECUR7:%.*]] = phi float [ [[SCALAR_RECUR_INIT6]], [[SCALAR_PH]] ], [ [[FOR_2_NEXT:%.*]], [[LOOP]] ]
602; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
603; CHECK-NEXT:    [[FOR_1_USE_1:%.*]] = fmul fast float [[SCALAR_RECUR]], 2.000000e+00
604; CHECK-NEXT:    [[FOR_1_USE_C:%.*]] = fmul fast float [[FOR_1_USE_1]], 2.000000e+00
605; CHECK-NEXT:    [[USED_BY_BOTH:%.*]] = fmul fast float [[FOR_1_USE_C]], [[SCALAR_RECUR7]]
606; CHECK-NEXT:    [[FOR_2_NEXT]] = load float, float* [[FOR_PTR_2]], align 4
607; CHECK-NEXT:    [[FOR_1_USE_3:%.*]] = fadd fast float [[SCALAR_RECUR]], 1.000000e+00
608; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
609; CHECK-NEXT:    [[FOR_1_NEXT]] = load float, float* [[FOR_PTR_1]], align 4
610; CHECK-NEXT:    [[GEP_DST_1:%.*]] = getelementptr inbounds float, float* [[DST_1]], i64 [[IV]]
611; CHECK-NEXT:    store float [[USED_BY_BOTH]], float* [[GEP_DST_1]], align 4
612; CHECK-NEXT:    [[GEP_DST_2:%.*]] = getelementptr inbounds float, float* [[DST_2]], i64 [[IV]]
613; CHECK-NEXT:    store float [[FOR_1_USE_1]], float* [[GEP_DST_2]], align 4
614; CHECK-NEXT:    [[GEP_DST_3:%.*]] = getelementptr inbounds float, float* [[DST_3]], i64 [[IV]]
615; CHECK-NEXT:    store float [[FOR_1_USE_3]], float* [[GEP_DST_3]], align 4
616; CHECK-NEXT:    [[EC:%.*]] = icmp slt i64 [[IV]], 1000
617; CHECK-NEXT:    br i1 [[EC]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP13:![0-9]+]]
618; CHECK:       exit:
619; CHECK-NEXT:    ret void
620;
621bb:
622  br label %loop
623
624loop:
625  %for.1 = phi float [ 0.0, %bb ], [ %for.1.next, %loop]
626  %for.2 = phi float [ 0.0, %bb ], [ %for.2.next, %loop]
627  %iv = phi i64 [ 0, %bb ], [ %iv.next, %loop ]
628  %for.1.use.1  = fmul fast float %for.1, 2.0
629  %for.1.use.c  = fmul fast float %for.1.use.1, 2.0
630  %used.by.both = fmul fast float %for.1.use.c, %for.2
631  %for.2.next = load float, float* %for.ptr.2, align 4
632  %for.1.use.3 = fadd fast float %for.1, 1.0
633  %iv.next = add nuw nsw i64 %iv, 1
634  %for.1.next = load float, float* %for.ptr.1, align 4
635  %gep.dst.1 = getelementptr inbounds float, float* %dst.1, i64 %iv
636  store float %used.by.both, float* %gep.dst.1
637  %gep.dst.2 = getelementptr inbounds float, float* %dst.2, i64 %iv
638  store float %for.1.use.1, float* %gep.dst.2
639  %gep.dst.3 = getelementptr inbounds float, float* %dst.3, i64 %iv
640  store float %for.1.use.3, float* %gep.dst.3
641  %ec = icmp slt i64 %iv, 1000
642  br i1 %ec, label %loop, label %exit
643
644exit:
645  ret void
646}
647
648; The (first) reason `%first_time.1` cannot be sunk is because it appears outside
649; the header and is not dominated by Previous. The fact that it feeds Previous
650; is a second sinking-preventing reason.
651define void @cannot_sink_phi(i32* %ptr) {
652; CHECK-LABEL: @cannot_sink_phi(
653; CHECK-NEXT:  entry:
654; CHECK-NEXT:    br label [[LOOP_HEADER:%.*]]
655; CHECK:       loop.header:
656; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 1, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
657; CHECK-NEXT:    [[FOR:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[FOR_NEXT:%.*]], [[LOOP_LATCH]] ]
658; CHECK-NEXT:    [[C_1:%.*]] = icmp ult i64 [[IV]], 500
659; CHECK-NEXT:    br i1 [[C_1]], label [[IF_TRUEBB:%.*]], label [[IF_FALSEBB:%.*]]
660; CHECK:       if.truebb:
661; CHECK-NEXT:    br label [[LOOP_LATCH]]
662; CHECK:       if.falsebb:
663; CHECK-NEXT:    br label [[LOOP_LATCH]]
664; CHECK:       loop.latch:
665; CHECK-NEXT:    [[FIRST_TIME_1:%.*]] = phi i32 [ 20, [[IF_TRUEBB]] ], [ [[FOR]], [[IF_FALSEBB]] ]
666; CHECK-NEXT:    [[C_2:%.*]] = icmp ult i64 [[IV]], 800
667; CHECK-NEXT:    [[FOR_NEXT]] = select i1 [[C_2]], i32 30, i32 [[FIRST_TIME_1]]
668; CHECK-NEXT:    [[PTR_IDX:%.*]] = getelementptr i32, i32* [[PTR:%.*]], i64 [[IV]]
669; CHECK-NEXT:    store i32 [[FOR_NEXT]], i32* [[PTR_IDX]], align 4
670; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
671; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
672; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label [[EXIT:%.*]], label [[LOOP_HEADER]]
673; CHECK:       exit:
674; CHECK-NEXT:    ret void
675;
676entry:
677  br label %loop.header
678
679loop.header:
680  %iv = phi i64 [ 1, %entry ], [ %iv.next, %loop.latch ]
681  %for = phi i32 [ 0, %entry ], [ %for.next, %loop.latch ]
682  %c.1 = icmp ult i64 %iv, 500
683  br i1 %c.1, label %if.truebb, label %if.falsebb
684
685if.truebb:
686  br label %loop.latch
687
688if.falsebb:
689  br label %loop.latch
690
691loop.latch:
692  %first_time.1 = phi i32 [ 20, %if.truebb ], [ %for, %if.falsebb ]
693  %c.2 = icmp ult i64 %iv, 800
694  %for.next = select i1 %c.2, i32 30, i32 %first_time.1
695  %ptr.idx = getelementptr i32, i32* %ptr, i64 %iv
696  store i32 %for.next, i32* %ptr.idx
697  %iv.next = add nuw nsw i64 %iv, 1
698  %exitcond.not = icmp eq i64 %iv.next, 1000
699  br i1 %exitcond.not, label %exit, label %loop.header
700
701exit:
702  ret void
703}
704
705; A recurrence in a multiple exit loop.
706define i16 @multiple_exit(i16* %p, i32 %n) {
707; CHECK-LABEL: @multiple_exit(
708; CHECK-NEXT:  entry:
709; CHECK-NEXT:    [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[N:%.*]], i32 0)
710; CHECK-NEXT:    [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[SMAX]], i32 2096)
711; CHECK-NEXT:    [[TMP0:%.*]] = add nuw nsw i32 [[UMIN]], 1
712; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP0]], 4
713; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
714; CHECK:       vector.ph:
715; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], 4
716; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i32 [[N_MOD_VF]], 0
717; CHECK-NEXT:    [[TMP2:%.*]] = select i1 [[TMP1]], i32 4, i32 [[N_MOD_VF]]
718; CHECK-NEXT:    [[N_VEC:%.*]] = sub i32 [[TMP0]], [[TMP2]]
719; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
720; CHECK:       vector.body:
721; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
722; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i16> [ <i16 poison, i16 poison, i16 poison, i16 0>, [[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], [[VECTOR_BODY]] ]
723; CHECK-NEXT:    [[TMP3:%.*]] = add i32 [[INDEX]], 0
724; CHECK-NEXT:    [[TMP4:%.*]] = sext i32 [[TMP3]] to i64
725; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i16, i16* [[P:%.*]], i64 [[TMP4]]
726; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i16, i16* [[TMP5]], i32 0
727; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i16* [[TMP6]] to <4 x i16>*
728; CHECK-NEXT:    [[WIDE_LOAD]] = load <4 x i16>, <4 x i16>* [[TMP7]], align 2
729; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <4 x i16> [[VECTOR_RECUR]], <4 x i16> [[WIDE_LOAD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
730; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i16* [[TMP6]] to <4 x i16>*
731; CHECK-NEXT:    store <4 x i16> [[TMP8]], <4 x i16>* [[TMP9]], align 4
732; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
733; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
734; CHECK-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
735; CHECK:       middle.block:
736; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i32 3
737; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i32 2
738; CHECK-NEXT:    br label [[SCALAR_PH]]
739; CHECK:       scalar.ph:
740; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, [[ENTRY:%.*]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
741; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
742; CHECK-NEXT:    br label [[FOR_COND:%.*]]
743; CHECK:       for.cond:
744; CHECK-NEXT:    [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ]
745; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[REC_NEXT:%.*]], [[FOR_BODY]] ]
746; CHECK-NEXT:    [[IPROM:%.*]] = sext i32 [[I]] to i64
747; CHECK-NEXT:    [[B:%.*]] = getelementptr inbounds i16, i16* [[P]], i64 [[IPROM]]
748; CHECK-NEXT:    [[REC_NEXT]] = load i16, i16* [[B]], align 2
749; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[I]], [[N]]
750; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[IF_END:%.*]]
751; CHECK:       for.body:
752; CHECK-NEXT:    store i16 [[SCALAR_RECUR]], i16* [[B]], align 4
753; CHECK-NEXT:    [[INC]] = add nsw i32 [[I]], 1
754; CHECK-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[I]], 2096
755; CHECK-NEXT:    br i1 [[CMP2]], label [[FOR_COND]], label [[IF_END]], !llvm.loop [[LOOP9:![0-9]+]]
756; CHECK:       if.end:
757; CHECK-NEXT:    [[REC_LCSSA:%.*]] = phi i16 [ [[SCALAR_RECUR]], [[FOR_BODY]] ], [ [[SCALAR_RECUR]], [[FOR_COND]] ]
758; CHECK-NEXT:    ret i16 [[REC_LCSSA]]
759;
760entry:
761  br label %for.cond
762
763for.cond:
764  %i = phi i32 [ 0, %entry ], [ %inc, %for.body ]
765  %rec = phi i16 [0, %entry], [ %rec.next, %for.body ]
766  %iprom = sext i32 %i to i64
767  %b = getelementptr inbounds i16, i16* %p, i64 %iprom
768  %rec.next = load i16, i16* %b
769  %cmp = icmp slt i32 %i, %n
770  br i1 %cmp, label %for.body, label %if.end
771
772for.body:
773  store i16 %rec , i16* %b, align 4
774  %inc = add nsw i32 %i, 1
775  %cmp2 = icmp slt i32 %i, 2096
776  br i1 %cmp2, label %for.cond, label %if.end
777
778if.end:
779  ret i16 %rec
780}
781
782
783; A multiple exit case where one of the exiting edges involves a value
784; from the recurrence and one does not.
785define i16 @multiple_exit2(i16* %p, i32 %n) {
786; CHECK-LABEL: @multiple_exit2(
787; CHECK-NEXT:  entry:
788; CHECK-NEXT:    [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[N:%.*]], i32 0)
789; CHECK-NEXT:    [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[SMAX]], i32 2096)
790; CHECK-NEXT:    [[TMP0:%.*]] = add nuw nsw i32 [[UMIN]], 1
791; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP0]], 4
792; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
793; CHECK:       vector.ph:
794; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], 4
795; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i32 [[N_MOD_VF]], 0
796; CHECK-NEXT:    [[TMP2:%.*]] = select i1 [[TMP1]], i32 4, i32 [[N_MOD_VF]]
797; CHECK-NEXT:    [[N_VEC:%.*]] = sub i32 [[TMP0]], [[TMP2]]
798; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
799; CHECK:       vector.body:
800; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
801; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i16> [ <i16 poison, i16 poison, i16 poison, i16 0>, [[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], [[VECTOR_BODY]] ]
802; CHECK-NEXT:    [[TMP3:%.*]] = add i32 [[INDEX]], 0
803; CHECK-NEXT:    [[TMP4:%.*]] = sext i32 [[TMP3]] to i64
804; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i16, i16* [[P:%.*]], i64 [[TMP4]]
805; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i16, i16* [[TMP5]], i32 0
806; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i16* [[TMP6]] to <4 x i16>*
807; CHECK-NEXT:    [[WIDE_LOAD]] = load <4 x i16>, <4 x i16>* [[TMP7]], align 2
808; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <4 x i16> [[VECTOR_RECUR]], <4 x i16> [[WIDE_LOAD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
809; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i16* [[TMP6]] to <4 x i16>*
810; CHECK-NEXT:    store <4 x i16> [[TMP8]], <4 x i16>* [[TMP9]], align 4
811; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
812; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
813; CHECK-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
814; CHECK:       middle.block:
815; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i32 3
816; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i32 2
817; CHECK-NEXT:    br label [[SCALAR_PH]]
818; CHECK:       scalar.ph:
819; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, [[ENTRY:%.*]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
820; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
821; CHECK-NEXT:    br label [[FOR_COND:%.*]]
822; CHECK:       for.cond:
823; CHECK-NEXT:    [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ]
824; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[REC_NEXT:%.*]], [[FOR_BODY]] ]
825; CHECK-NEXT:    [[IPROM:%.*]] = sext i32 [[I]] to i64
826; CHECK-NEXT:    [[B:%.*]] = getelementptr inbounds i16, i16* [[P]], i64 [[IPROM]]
827; CHECK-NEXT:    [[REC_NEXT]] = load i16, i16* [[B]], align 2
828; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[I]], [[N]]
829; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[IF_END:%.*]]
830; CHECK:       for.body:
831; CHECK-NEXT:    store i16 [[SCALAR_RECUR]], i16* [[B]], align 4
832; CHECK-NEXT:    [[INC]] = add nsw i32 [[I]], 1
833; CHECK-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[I]], 2096
834; CHECK-NEXT:    br i1 [[CMP2]], label [[FOR_COND]], label [[IF_END]], !llvm.loop [[LOOP11:![0-9]+]]
835; CHECK:       if.end:
836; CHECK-NEXT:    [[REC_LCSSA:%.*]] = phi i16 [ [[SCALAR_RECUR]], [[FOR_COND]] ], [ 10, [[FOR_BODY]] ]
837; CHECK-NEXT:    ret i16 [[REC_LCSSA]]
838;
839entry:
840  br label %for.cond
841
842for.cond:
843  %i = phi i32 [ 0, %entry ], [ %inc, %for.body ]
844  %rec = phi i16 [0, %entry], [ %rec.next, %for.body ]
845  %iprom = sext i32 %i to i64
846  %b = getelementptr inbounds i16, i16* %p, i64 %iprom
847  %rec.next = load i16, i16* %b
848  %cmp = icmp slt i32 %i, %n
849  br i1 %cmp, label %for.body, label %if.end
850
851for.body:
852  store i16 %rec , i16* %b, align 4
853  %inc = add nsw i32 %i, 1
854  %cmp2 = icmp slt i32 %i, 2096
855  br i1 %cmp2, label %for.cond, label %if.end
856
857if.end:
858  %rec.lcssa = phi i16 [ %rec, %for.cond ], [ 10, %for.body ]
859  ret i16 %rec.lcssa
860}
861
862; A test where the instructions to sink may not be visited in dominance order.
863define void @sink_dominance(i32* %ptr, i32 %N) {
864; CHECK-LABEL: @sink_dominance(
865; CHECK-NEXT:  entry:
866; CHECK-NEXT:    [[UMAX1:%.*]] = call i32 @llvm.umax.i32(i32 [[N:%.*]], i32 1)
867; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[UMAX1]], 4
868; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
869; CHECK:       vector.scevcheck:
870; CHECK-NEXT:    [[UMAX:%.*]] = call i32 @llvm.umax.i32(i32 [[N]], i32 1)
871; CHECK-NEXT:    [[TMP0:%.*]] = add i32 [[UMAX]], -1
872; CHECK-NEXT:    [[TMP4:%.*]] = icmp slt i32 [[TMP0]], 0
873; CHECK-NEXT:    br i1 [[TMP4]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
874; CHECK:       vector.ph:
875; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i32 [[UMAX1]], 4
876; CHECK-NEXT:    [[N_VEC:%.*]] = sub i32 [[UMAX1]], [[N_MOD_VF]]
877; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
878; CHECK:       vector.body:
879; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
880; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i64> [ <i64 poison, i64 poison, i64 poison, i64 0>, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ]
881; CHECK-NEXT:    [[TMP7:%.*]] = add i32 [[INDEX]], 0
882; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[PTR:%.*]], i32 [[TMP7]]
883; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i32 0
884; CHECK-NEXT:    [[TMP10:%.*]] = bitcast i32* [[TMP9]] to <4 x i32>*
885; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP10]], align 4
886; CHECK-NEXT:    [[TMP11]] = zext <4 x i32> [[WIDE_LOAD]] to <4 x i64>
887; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <4 x i64> [[VECTOR_RECUR]], <4 x i64> [[TMP11]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
888; CHECK-NEXT:    [[TMP13:%.*]] = trunc <4 x i64> [[TMP12]] to <4 x i32>
889; CHECK-NEXT:    [[TMP14:%.*]] = icmp slt <4 x i32> [[TMP13]], <i32 213, i32 213, i32 213, i32 213>
890; CHECK-NEXT:    [[TMP15:%.*]] = select <4 x i1> [[TMP14]], <4 x i32> [[TMP13]], <4 x i32> <i32 22, i32 22, i32 22, i32 22>
891; CHECK-NEXT:    [[TMP16:%.*]] = bitcast i32* [[TMP9]] to <4 x i32>*
892; CHECK-NEXT:    store <4 x i32> [[TMP15]], <4 x i32>* [[TMP16]], align 4
893; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
894; CHECK-NEXT:    [[TMP17:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
895; CHECK-NEXT:    br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
896; CHECK:       middle.block:
897; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i32 [[UMAX1]], [[N_VEC]]
898; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i64> [[TMP11]], i32 3
899; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i64> [[TMP11]], i32 2
900; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
901; CHECK:       scalar.ph:
902; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i64 [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[ENTRY:%.*]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
903; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
904; CHECK-NEXT:    br label [[LOOP:%.*]]
905; CHECK:       loop:
906; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i64 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[FOR_NEXT:%.*]], [[LOOP]] ]
907; CHECK-NEXT:    [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
908; CHECK-NEXT:    [[FOR_TRUNC:%.*]] = trunc i64 [[SCALAR_RECUR]] to i32
909; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[FOR_TRUNC]], 213
910; CHECK-NEXT:    [[SELECT:%.*]] = select i1 [[CMP]], i32 [[FOR_TRUNC]], i32 22
911; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i32, i32* [[PTR]], i32 [[IV]]
912; CHECK-NEXT:    [[LV:%.*]] = load i32, i32* [[GEP]], align 4
913; CHECK-NEXT:    [[FOR_NEXT]] = zext i32 [[LV]] to i64
914; CHECK-NEXT:    store i32 [[SELECT]], i32* [[GEP]], align 4
915; CHECK-NEXT:    [[IV_NEXT]] = add i32 [[IV]], 1
916; CHECK-NEXT:    [[CMP73:%.*]] = icmp ugt i32 [[N]], [[IV_NEXT]]
917; CHECK-NEXT:    br i1 [[CMP73]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP13:![0-9]+]]
918; CHECK:       exit:
919; CHECK-NEXT:    ret void
920;
921entry:
922  br label %loop
923
924loop:
925  %for = phi i64 [ 0, %entry ], [ %for.next, %loop ]
926  %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
927
928  %for.trunc = trunc i64 %for to i32
929  %cmp = icmp slt i32 %for.trunc, 213
930  %select = select i1 %cmp, i32 %for.trunc, i32 22
931
932  %gep = getelementptr inbounds i32, i32* %ptr, i32 %iv
933  %lv = load i32, i32* %gep, align 4
934  %for.next = zext i32 %lv to i64
935  store i32 %select, i32* %gep
936
937  %iv.next = add i32 %iv, 1
938  %cmp73 = icmp ugt i32 %N, %iv.next
939  br i1 %cmp73, label %loop, label %exit
940
941exit:
942  ret void
943}
944
945; Similar to @sink_dominance, but with 2 separate chains that merge at %select
946; with a different number of instructions in between.
947define void @sink_dominance_2(i32* %ptr, i32 %N) {
948; CHECK-LABEL: @sink_dominance_2(
949; CHECK-NEXT:  entry:
950; CHECK-NEXT:    [[UMAX1:%.*]] = call i32 @llvm.umax.i32(i32 [[N:%.*]], i32 1)
951; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[UMAX1]], 4
952; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
953; CHECK:       vector.scevcheck:
954; CHECK-NEXT:    [[UMAX:%.*]] = call i32 @llvm.umax.i32(i32 [[N]], i32 1)
955; CHECK-NEXT:    [[TMP0:%.*]] = add i32 [[UMAX]], -1
956; CHECK-NEXT:    [[TMP4:%.*]] = icmp slt i32 [[TMP0]], 0
957; CHECK-NEXT:    br i1 [[TMP4]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
958; CHECK:       vector.ph:
959; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i32 [[UMAX1]], 4
960; CHECK-NEXT:    [[N_VEC:%.*]] = sub i32 [[UMAX1]], [[N_MOD_VF]]
961; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
962; CHECK:       vector.body:
963; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
964; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x i64> [ <i64 poison, i64 poison, i64 poison, i64 0>, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ]
965; CHECK-NEXT:    [[TMP7:%.*]] = add i32 [[INDEX]], 0
966; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[PTR:%.*]], i32 [[TMP7]]
967; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i32 0
968; CHECK-NEXT:    [[TMP10:%.*]] = bitcast i32* [[TMP9]] to <4 x i32>*
969; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP10]], align 4
970; CHECK-NEXT:    [[TMP11]] = zext <4 x i32> [[WIDE_LOAD]] to <4 x i64>
971; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <4 x i64> [[VECTOR_RECUR]], <4 x i64> [[TMP11]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
972; CHECK-NEXT:    [[TMP13:%.*]] = trunc <4 x i64> [[TMP12]] to <4 x i32>
973; CHECK-NEXT:    [[TMP14:%.*]] = add <4 x i32> [[TMP13]], <i32 2, i32 2, i32 2, i32 2>
974; CHECK-NEXT:    [[TMP15:%.*]] = mul <4 x i32> [[TMP14]], <i32 99, i32 99, i32 99, i32 99>
975; CHECK-NEXT:    [[TMP16:%.*]] = icmp slt <4 x i32> [[TMP13]], <i32 213, i32 213, i32 213, i32 213>
976; CHECK-NEXT:    [[TMP17:%.*]] = select <4 x i1> [[TMP16]], <4 x i32> [[TMP13]], <4 x i32> [[TMP15]]
977; CHECK-NEXT:    [[TMP18:%.*]] = bitcast i32* [[TMP9]] to <4 x i32>*
978; CHECK-NEXT:    store <4 x i32> [[TMP17]], <4 x i32>* [[TMP18]], align 4
979; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
980; CHECK-NEXT:    [[TMP19:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
981; CHECK-NEXT:    br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
982; CHECK:       middle.block:
983; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i32 [[UMAX1]], [[N_VEC]]
984; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i64> [[TMP11]], i32 3
985; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i64> [[TMP11]], i32 2
986; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
987; CHECK:       scalar.ph:
988; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi i64 [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[ENTRY:%.*]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
989; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
990; CHECK-NEXT:    br label [[LOOP:%.*]]
991; CHECK:       loop:
992; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi i64 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[FOR_NEXT:%.*]], [[LOOP]] ]
993; CHECK-NEXT:    [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
994; CHECK-NEXT:    [[FOR_TRUNC:%.*]] = trunc i64 [[SCALAR_RECUR]] to i32
995; CHECK-NEXT:    [[STEP:%.*]] = add i32 [[FOR_TRUNC]], 2
996; CHECK-NEXT:    [[STEP_2:%.*]] = mul i32 [[STEP]], 99
997; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[FOR_TRUNC]], 213
998; CHECK-NEXT:    [[SELECT:%.*]] = select i1 [[CMP]], i32 [[FOR_TRUNC]], i32 [[STEP_2]]
999; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i32, i32* [[PTR]], i32 [[IV]]
1000; CHECK-NEXT:    [[LV:%.*]] = load i32, i32* [[GEP]], align 4
1001; CHECK-NEXT:    [[FOR_NEXT]] = zext i32 [[LV]] to i64
1002; CHECK-NEXT:    store i32 [[SELECT]], i32* [[GEP]], align 4
1003; CHECK-NEXT:    [[IV_NEXT]] = add i32 [[IV]], 1
1004; CHECK-NEXT:    [[CMP73:%.*]] = icmp ugt i32 [[N]], [[IV_NEXT]]
1005; CHECK-NEXT:    br i1 [[CMP73]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP15:![0-9]+]]
1006; CHECK:       exit:
1007; CHECK-NEXT:    ret void
1008;
1009entry:
1010  br label %loop
1011
1012loop:
1013  %for = phi i64 [ 0, %entry ], [ %for.next, %loop ]
1014  %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
1015
1016  %for.trunc = trunc i64 %for to i32
1017  %step = add i32 %for.trunc, 2
1018  %step.2 = mul i32 %step, 99
1019
1020  %cmp = icmp slt i32 %for.trunc, 213
1021  %select = select i1 %cmp, i32 %for.trunc, i32 %step.2
1022
1023  %gep = getelementptr inbounds i32, i32* %ptr, i32 %iv
1024  %lv = load i32, i32* %gep, align 4
1025  %for.next = zext i32 %lv to i64
1026  store i32 %select, i32* %gep
1027
1028  %iv.next = add i32 %iv, 1
1029  %cmp73 = icmp ugt i32 %N, %iv.next
1030  br i1 %cmp73, label %loop, label %exit
1031
1032exit:
1033  ret void
1034}
1035
1036define void @cannot_sink_load_past_store(i32* %ptr, i32 %N) {
1037; CHECK-LABEL: @cannot_sink_load_past_store(
1038; CHECK-NEXT:  entry:
1039; CHECK-NEXT:    br label [[LOOP:%.*]]
1040; CHECK:       loop:
1041; CHECK-NEXT:    [[FOR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[FOR_NEXT:%.*]], [[LOOP]] ]
1042; CHECK-NEXT:    [[IV:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
1043; CHECK-NEXT:    [[GEP_FOR:%.*]] = getelementptr inbounds i32, i32* [[PTR:%.*]], i64 [[FOR]]
1044; CHECK-NEXT:    [[LV_FOR:%.*]] = load i32, i32* [[GEP_FOR]], align 4
1045; CHECK-NEXT:    [[FOR_TRUNC:%.*]] = trunc i64 [[FOR]] to i32
1046; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[LV_FOR]], [[FOR_TRUNC]]
1047; CHECK-NEXT:    [[SELECT:%.*]] = select i1 [[CMP]], i32 [[LV_FOR]], i32 22
1048; CHECK-NEXT:    [[GEP_IV:%.*]] = getelementptr inbounds i32, i32* [[PTR]], i32 [[IV]]
1049; CHECK-NEXT:    store i32 0, i32* [[GEP_IV]], align 4
1050; CHECK-NEXT:    [[IV_NEXT]] = add i32 [[IV]], 1
1051; CHECK-NEXT:    [[FOR_NEXT]] = zext i32 [[IV]] to i64
1052; CHECK-NEXT:    [[CMP73:%.*]] = icmp ugt i32 [[N:%.*]], [[IV_NEXT]]
1053; CHECK-NEXT:    br i1 [[CMP73]], label [[LOOP]], label [[EXIT:%.*]]
1054; CHECK:       exit:
1055; CHECK-NEXT:    ret void
1056;
1057entry:
1058  br label %loop
1059
1060loop:
1061  %for = phi i64 [ 0, %entry ], [ %for.next, %loop ]
1062  %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
1063
1064  %gep.for = getelementptr inbounds i32, i32* %ptr, i64 %for
1065  %lv.for = load i32, i32* %gep.for, align 4
1066  %for.trunc = trunc i64 %for to i32
1067  %cmp = icmp slt i32 %lv.for, %for.trunc
1068  %select = select i1 %cmp, i32 %lv.for, i32 22
1069
1070  %gep.iv = getelementptr inbounds i32, i32* %ptr, i32 %iv
1071  store i32 0, i32* %gep.iv
1072  %iv.next = add i32 %iv, 1
1073  %for.next = zext i32 %iv to i64
1074
1075  %cmp73 = icmp ugt i32 %N, %iv.next
1076  br i1 %cmp73, label %loop, label %exit
1077
1078exit:
1079  ret void
1080}
1081
1082define void @test_for_sink_instruction_after_same_incoming_1(double* %ptr) {
1083; CHECK-LABEL: @test_for_sink_instruction_after_same_incoming_1(
1084; CHECK-NEXT:  entry:
1085; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
1086; CHECK:       vector.ph:
1087; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
1088; CHECK:       vector.body:
1089; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
1090; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x double> [ <double poison, double poison, double poison, double 1.000000e+01>, [[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], [[VECTOR_BODY]] ]
1091; CHECK-NEXT:    [[VECTOR_RECUR1:%.*]] = phi <4 x double> [ <double poison, double poison, double poison, double 2.000000e+01>, [[VECTOR_PH]] ], [ [[WIDE_LOAD]], [[VECTOR_BODY]] ]
1092; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]]
1093; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
1094; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds double, double* [[PTR:%.*]], i64 [[TMP0]]
1095; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds double, double* [[TMP1]], i32 0
1096; CHECK-NEXT:    [[TMP3:%.*]] = bitcast double* [[TMP2]] to <4 x double>*
1097; CHECK-NEXT:    [[WIDE_LOAD]] = load <4 x double>, <4 x double>* [[TMP3]], align 8
1098; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <4 x double> [[VECTOR_RECUR1]], <4 x double> [[WIDE_LOAD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
1099; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <4 x double> [[VECTOR_RECUR]], <4 x double> [[WIDE_LOAD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
1100; CHECK-NEXT:    [[TMP6:%.*]] = fadd <4 x double> <double 1.000000e+01, double 1.000000e+01, double 1.000000e+01, double 1.000000e+01>, [[TMP4]]
1101; CHECK-NEXT:    [[TMP7:%.*]] = fadd <4 x double> [[TMP6]], [[TMP5]]
1102; CHECK-NEXT:    [[TMP8:%.*]] = bitcast double* [[TMP2]] to <4 x double>*
1103; CHECK-NEXT:    store <4 x double> [[TMP7]], <4 x double>* [[TMP8]], align 8
1104; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
1105; CHECK-NEXT:    [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 996
1106; CHECK-NEXT:    br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
1107; CHECK:       middle.block:
1108; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 999, 996
1109; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x double> [[WIDE_LOAD]], i32 3
1110; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x double> [[WIDE_LOAD]], i32 2
1111; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT2:%.*]] = extractelement <4 x double> [[WIDE_LOAD]], i32 3
1112; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI3:%.*]] = extractelement <4 x double> [[WIDE_LOAD]], i32 2
1113; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
1114; CHECK:       scalar.ph:
1115; CHECK-NEXT:    [[SCALAR_RECUR_INIT4:%.*]] = phi double [ 2.000000e+01, [[ENTRY:%.*]] ], [ [[VECTOR_RECUR_EXTRACT2]], [[MIDDLE_BLOCK]] ]
1116; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi double [ 1.000000e+01, [[ENTRY]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
1117; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 997, [[MIDDLE_BLOCK]] ], [ 1, [[ENTRY]] ]
1118; CHECK-NEXT:    br label [[LOOP:%.*]]
1119; CHECK:       loop:
1120; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi double [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[FOR_1_NEXT:%.*]], [[LOOP]] ]
1121; CHECK-NEXT:    [[SCALAR_RECUR5:%.*]] = phi double [ [[SCALAR_RECUR_INIT4]], [[SCALAR_PH]] ], [ [[FOR_1_NEXT]], [[LOOP]] ]
1122; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
1123; CHECK-NEXT:    [[ADD_1:%.*]] = fadd double 1.000000e+01, [[SCALAR_RECUR5]]
1124; CHECK-NEXT:    [[ADD_2:%.*]] = fadd double [[ADD_1]], [[SCALAR_RECUR]]
1125; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
1126; CHECK-NEXT:    [[GEP_PTR:%.*]] = getelementptr inbounds double, double* [[PTR]], i64 [[IV]]
1127; CHECK-NEXT:    [[FOR_1_NEXT]] = load double, double* [[GEP_PTR]], align 8
1128; CHECK-NEXT:    store double [[ADD_2]], double* [[GEP_PTR]], align 8
1129; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
1130; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP23:![0-9]+]]
1131; CHECK:       exit:
1132; CHECK-NEXT:    ret void
1133;
1134entry:
1135  br label %loop
1136
1137loop:
1138  %for.1 = phi double [ 10.0, %entry ], [ %for.1.next, %loop ]
1139  %for.2 = phi double [ 20.0, %entry ], [ %for.1.next, %loop ]
1140  %iv = phi i64 [ 1, %entry ], [ %iv.next, %loop ]
1141  %add.1 = fadd double 10.0, %for.2
1142  %add.2 = fadd double %add.1, %for.1
1143  %iv.next = add nuw nsw i64 %iv, 1
1144  %gep.ptr = getelementptr inbounds double, double* %ptr, i64 %iv
1145  %for.1.next  = load double, double* %gep.ptr, align 8
1146  store double %add.2, double* %gep.ptr
1147  %exitcond.not = icmp eq i64 %iv.next, 1000
1148  br i1 %exitcond.not, label %exit, label %loop
1149
1150exit:
1151  ret void
1152}
1153
1154
1155define void @test_for_sink_instruction_after_same_incoming_2(double* %ptr) {
1156; CHECK-LABEL: @test_for_sink_instruction_after_same_incoming_2(
1157; CHECK-NEXT:  entry:
1158; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
1159; CHECK:       vector.ph:
1160; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
1161; CHECK:       vector.body:
1162; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
1163; CHECK-NEXT:    [[VECTOR_RECUR:%.*]] = phi <4 x double> [ <double poison, double poison, double poison, double 2.000000e+01>, [[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], [[VECTOR_BODY]] ]
1164; CHECK-NEXT:    [[VECTOR_RECUR1:%.*]] = phi <4 x double> [ <double poison, double poison, double poison, double 1.000000e+01>, [[VECTOR_PH]] ], [ [[WIDE_LOAD]], [[VECTOR_BODY]] ]
1165; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]]
1166; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
1167; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds double, double* [[PTR:%.*]], i64 [[TMP0]]
1168; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds double, double* [[TMP1]], i32 0
1169; CHECK-NEXT:    [[TMP3:%.*]] = bitcast double* [[TMP2]] to <4 x double>*
1170; CHECK-NEXT:    [[WIDE_LOAD]] = load <4 x double>, <4 x double>* [[TMP3]], align 8
1171; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <4 x double> [[VECTOR_RECUR1]], <4 x double> [[WIDE_LOAD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
1172; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <4 x double> [[VECTOR_RECUR]], <4 x double> [[WIDE_LOAD]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
1173; CHECK-NEXT:    [[TMP6:%.*]] = fadd <4 x double> <double 1.000000e+01, double 1.000000e+01, double 1.000000e+01, double 1.000000e+01>, [[TMP5]]
1174; CHECK-NEXT:    [[TMP7:%.*]] = fadd <4 x double> [[TMP6]], [[TMP4]]
1175; CHECK-NEXT:    [[TMP8:%.*]] = bitcast double* [[TMP2]] to <4 x double>*
1176; CHECK-NEXT:    store <4 x double> [[TMP7]], <4 x double>* [[TMP8]], align 8
1177; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
1178; CHECK-NEXT:    [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 996
1179; CHECK-NEXT:    br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
1180; CHECK:       middle.block:
1181; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 999, 996
1182; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x double> [[WIDE_LOAD]], i32 3
1183; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x double> [[WIDE_LOAD]], i32 2
1184; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT2:%.*]] = extractelement <4 x double> [[WIDE_LOAD]], i32 3
1185; CHECK-NEXT:    [[VECTOR_RECUR_EXTRACT_FOR_PHI3:%.*]] = extractelement <4 x double> [[WIDE_LOAD]], i32 2
1186; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
1187; CHECK:       scalar.ph:
1188; CHECK-NEXT:    [[SCALAR_RECUR_INIT4:%.*]] = phi double [ 1.000000e+01, [[ENTRY:%.*]] ], [ [[VECTOR_RECUR_EXTRACT2]], [[MIDDLE_BLOCK]] ]
1189; CHECK-NEXT:    [[SCALAR_RECUR_INIT:%.*]] = phi double [ 2.000000e+01, [[ENTRY]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
1190; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 997, [[MIDDLE_BLOCK]] ], [ 1, [[ENTRY]] ]
1191; CHECK-NEXT:    br label [[LOOP:%.*]]
1192; CHECK:       loop:
1193; CHECK-NEXT:    [[SCALAR_RECUR:%.*]] = phi double [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[FOR_1_NEXT:%.*]], [[LOOP]] ]
1194; CHECK-NEXT:    [[SCALAR_RECUR5:%.*]] = phi double [ [[SCALAR_RECUR_INIT4]], [[SCALAR_PH]] ], [ [[FOR_1_NEXT]], [[LOOP]] ]
1195; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
1196; CHECK-NEXT:    [[ADD_1:%.*]] = fadd double 1.000000e+01, [[SCALAR_RECUR]]
1197; CHECK-NEXT:    [[ADD_2:%.*]] = fadd double [[ADD_1]], [[SCALAR_RECUR5]]
1198; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
1199; CHECK-NEXT:    [[GEP_PTR:%.*]] = getelementptr inbounds double, double* [[PTR]], i64 [[IV]]
1200; CHECK-NEXT:    [[FOR_1_NEXT]] = load double, double* [[GEP_PTR]], align 8
1201; CHECK-NEXT:    store double [[ADD_2]], double* [[GEP_PTR]], align 8
1202; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
1203; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP25:![0-9]+]]
1204; CHECK:       exit:
1205; CHECK-NEXT:    ret void
1206;
1207entry:
1208  br label %loop
1209
1210loop:
1211  %for.2 = phi double [ 20.0, %entry ], [ %for.1.next, %loop ]
1212  %for.1 = phi double [ 10.0, %entry ], [ %for.1.next, %loop ]
1213  %iv = phi i64 [ 1, %entry ], [ %iv.next, %loop ]
1214  %add.1 = fadd double 10.0, %for.2
1215  %add.2 = fadd double %add.1, %for.1
1216  %iv.next = add nuw nsw i64 %iv, 1
1217  %gep.ptr = getelementptr inbounds double, double* %ptr, i64 %iv
1218  %for.1.next  = load double, double* %gep.ptr, align 8
1219  store double %add.2, double* %gep.ptr
1220  %exitcond.not = icmp eq i64 %iv.next, 1000
1221  br i1 %exitcond.not, label %exit, label %loop
1222
1223exit:
1224  ret void
1225}
1226