1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -loop-versioning -S < %s | FileCheck %s -check-prefix=LV
3
4target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
5
6; For this loop:
7;   unsigned index = 0;
8;   for (int i = 0; i < n; i++) {
9;    A[2 * index] = A[2 * index] + B[i];
10;    index++;
11;   }
12;
13; SCEV is unable to prove that A[2 * i] does not overflow.
14;
15; Analyzing the IR does not help us because the GEPs are not
16; affine AddRecExprs. However, we can turn them into AddRecExprs
17; using SCEV Predicates.
18;
19; Once we have an affine expression we need to add an additional NUSW
20; to check that the pointers don't wrap since the GEPs are not
21; inbound.
22
23; The expression for %mul_ext as analyzed by SCEV is
24;    (zext i32 {0,+,2}<%for.body> to i64)
25; We have added the nusw flag to turn this expression into the SCEV expression:
26;    i64 {0,+,2}<%for.body>
27
28define void @f1(i16* noalias %a,
29; LV-LABEL: @f1(
30; LV-NEXT:  for.body.lver.check:
31; LV-NEXT:    [[A5:%.*]] = bitcast i16* [[A:%.*]] to i8*
32; LV-NEXT:    [[TMP0:%.*]] = add i64 [[N:%.*]], -1
33; LV-NEXT:    [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
34; LV-NEXT:    [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP1]])
35; LV-NEXT:    [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
36; LV-NEXT:    [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
37; LV-NEXT:    [[TMP2:%.*]] = add i32 0, [[MUL_RESULT]]
38; LV-NEXT:    [[TMP3:%.*]] = sub i32 0, [[MUL_RESULT]]
39; LV-NEXT:    [[TMP4:%.*]] = icmp ugt i32 [[TMP3]], 0
40; LV-NEXT:    [[TMP5:%.*]] = icmp ult i32 [[TMP2]], 0
41; LV-NEXT:    [[TMP6:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
42; LV-NEXT:    [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]]
43; LV-NEXT:    [[TMP8:%.*]] = or i1 [[TMP7]], [[MUL_OVERFLOW]]
44; LV-NEXT:    [[TMP9:%.*]] = or i1 false, [[TMP8]]
45; LV-NEXT:    [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
46; LV-NEXT:    [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
47; LV-NEXT:    [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
48; LV-NEXT:    [[TMP10:%.*]] = sub i64 0, [[MUL_RESULT3]]
49; LV-NEXT:    [[TMP11:%.*]] = getelementptr i8, i8* [[A5]], i64 [[MUL_RESULT3]]
50; LV-NEXT:    [[TMP12:%.*]] = getelementptr i8, i8* [[A5]], i64 [[TMP10]]
51; LV-NEXT:    [[TMP13:%.*]] = icmp ugt i8* [[TMP12]], [[A5]]
52; LV-NEXT:    [[TMP14:%.*]] = icmp ult i8* [[TMP11]], [[A5]]
53; LV-NEXT:    [[TMP15:%.*]] = or i1 [[TMP14]], [[MUL_OVERFLOW4]]
54; LV-NEXT:    [[TMP16:%.*]] = or i1 [[TMP9]], [[TMP15]]
55; LV-NEXT:    br i1 [[TMP16]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
56; LV:       for.body.ph.lver.orig:
57; LV-NEXT:    br label [[FOR_BODY_LVER_ORIG:%.*]]
58; LV:       for.body.lver.orig:
59; LV-NEXT:    [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
60; LV-NEXT:    [[IND1_LVER_ORIG:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC1_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
61; LV-NEXT:    [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
62; LV-NEXT:    [[MUL_EXT_LVER_ORIG:%.*]] = zext i32 [[MUL_LVER_ORIG]] to i64
63; LV-NEXT:    [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]]
64; LV-NEXT:    [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2
65; LV-NEXT:    [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]]
66; LV-NEXT:    [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2
67; LV-NEXT:    [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
68; LV-NEXT:    store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2
69; LV-NEXT:    [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
70; LV-NEXT:    [[INC1_LVER_ORIG]] = add i32 [[IND1_LVER_ORIG]], 1
71; LV-NEXT:    [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]]
72; LV-NEXT:    br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
73; LV:       for.body.ph:
74; LV-NEXT:    br label [[FOR_BODY:%.*]]
75; LV:       for.body:
76; LV-NEXT:    [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
77; LV-NEXT:    [[IND1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ]
78; LV-NEXT:    [[MUL:%.*]] = mul i32 [[IND1]], 2
79; LV-NEXT:    [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64
80; LV-NEXT:    [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]]
81; LV-NEXT:    [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2
82; LV-NEXT:    [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]]
83; LV-NEXT:    [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2
84; LV-NEXT:    [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]]
85; LV-NEXT:    store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2
86; LV-NEXT:    [[INC]] = add nuw nsw i64 [[IND]], 1
87; LV-NEXT:    [[INC1]] = add i32 [[IND1]], 1
88; LV-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]]
89; LV-NEXT:    br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]]
90; LV:       for.end.loopexit:
91; LV-NEXT:    br label [[FOR_END:%.*]]
92; LV:       for.end.loopexit6:
93; LV-NEXT:    br label [[FOR_END]]
94; LV:       for.end:
95; LV-NEXT:    ret void
96;
97  i16* noalias %b, i64 %N) {
98entry:
99  br label %for.body
100
101for.body:                                         ; preds = %for.body, %entry
102  %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ]
103  %ind1 = phi i32 [ 0, %entry ], [ %inc1, %for.body ]
104
105  %mul = mul i32 %ind1, 2
106  %mul_ext = zext i32 %mul to i64
107
108  %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext
109  %loadA = load i16, i16* %arrayidxA, align 2
110
111  %arrayidxB = getelementptr i16, i16* %b, i64 %ind
112  %loadB = load i16, i16* %arrayidxB, align 2
113
114  %add = mul i16 %loadA, %loadB
115
116  store i16 %add, i16* %arrayidxA, align 2
117
118  %inc = add nuw nsw i64 %ind, 1
119  %inc1 = add i32 %ind1, 1
120
121  %exitcond = icmp eq i64 %inc, %N
122  br i1 %exitcond, label %for.end, label %for.body
123
124for.end:                                          ; preds = %for.body
125  ret void
126}
127
128; For this loop:
129;   unsigned index = n;
130;   for (int i = 0; i < n; i++) {
131;    A[2 * index] = A[2 * index] + B[i];
132;    index--;
133;   }
134;
135; the SCEV expression for 2 * index is not an AddRecExpr
136; (and implictly not affine). However, we are able to make assumptions
137; that will turn the expression into an affine one and continue the
138; analysis.
139;
140; Once we have an affine expression we need to add an additional NUSW
141; to check that the pointers don't wrap since the GEPs are not
142; inbounds.
143;
144; This loop has a negative stride for A, and the nusw flag is required in
145; order to properly extend the increment from i32 -4 to i64 -4.
146
147; The expression for %mul_ext as analyzed by SCEV is
148;     (zext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64)
149; We have added the nusw flag to turn this expression into the following SCEV:
150;     i64 {zext i32 (2 * (trunc i64 %N to i32)) to i64,+,-2}<%for.body>
151
152define void @f2(i16* noalias %a,
153; LV-LABEL: @f2(
154; LV-NEXT:  for.body.lver.check:
155; LV-NEXT:    [[TRUNCN:%.*]] = trunc i64 [[N:%.*]] to i32
156; LV-NEXT:    [[TMP0:%.*]] = add i64 [[N]], -1
157; LV-NEXT:    [[TMP1:%.*]] = shl i32 [[TRUNCN]], 1
158; LV-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP0]] to i32
159; LV-NEXT:    [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP2]])
160; LV-NEXT:    [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
161; LV-NEXT:    [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
162; LV-NEXT:    [[TMP3:%.*]] = add i32 [[TMP1]], [[MUL_RESULT]]
163; LV-NEXT:    [[TMP4:%.*]] = sub i32 [[TMP1]], [[MUL_RESULT]]
164; LV-NEXT:    [[TMP5:%.*]] = icmp ugt i32 [[TMP4]], [[TMP1]]
165; LV-NEXT:    [[TMP6:%.*]] = icmp ult i32 [[TMP3]], [[TMP1]]
166; LV-NEXT:    [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
167; LV-NEXT:    [[TMP8:%.*]] = or i1 [[TMP5]], [[TMP7]]
168; LV-NEXT:    [[TMP9:%.*]] = or i1 [[TMP8]], [[MUL_OVERFLOW]]
169; LV-NEXT:    [[TMP10:%.*]] = or i1 false, [[TMP9]]
170; LV-NEXT:    [[TMP11:%.*]] = trunc i64 [[N]] to i31
171; LV-NEXT:    [[TMP12:%.*]] = zext i31 [[TMP11]] to i64
172; LV-NEXT:    [[TMP13:%.*]] = shl nuw nsw i64 [[TMP12]], 1
173; LV-NEXT:    [[SCEVGEP:%.*]] = getelementptr i16, i16* [[A:%.*]], i64 [[TMP13]]
174; LV-NEXT:    [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
175; LV-NEXT:    [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
176; LV-NEXT:    [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
177; LV-NEXT:    [[SCEVGEP5:%.*]] = bitcast i16* [[SCEVGEP]] to i8*
178; LV-NEXT:    [[TMP14:%.*]] = sub i64 0, [[MUL_RESULT3]]
179; LV-NEXT:    [[TMP15:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[MUL_RESULT3]]
180; LV-NEXT:    [[TMP16:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[TMP14]]
181; LV-NEXT:    [[TMP17:%.*]] = icmp ugt i8* [[TMP16]], [[SCEVGEP5]]
182; LV-NEXT:    [[TMP18:%.*]] = icmp ult i8* [[TMP15]], [[SCEVGEP5]]
183; LV-NEXT:    [[TMP19:%.*]] = or i1 [[TMP17]], [[MUL_OVERFLOW4]]
184; LV-NEXT:    [[TMP20:%.*]] = or i1 [[TMP10]], [[TMP19]]
185; LV-NEXT:    br i1 [[TMP20]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
186; LV:       for.body.ph.lver.orig:
187; LV-NEXT:    br label [[FOR_BODY_LVER_ORIG:%.*]]
188; LV:       for.body.lver.orig:
189; LV-NEXT:    [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
190; LV-NEXT:    [[IND1_LVER_ORIG:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH_LVER_ORIG]] ], [ [[DEC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
191; LV-NEXT:    [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
192; LV-NEXT:    [[MUL_EXT_LVER_ORIG:%.*]] = zext i32 [[MUL_LVER_ORIG]] to i64
193; LV-NEXT:    [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]]
194; LV-NEXT:    [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2
195; LV-NEXT:    [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]]
196; LV-NEXT:    [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2
197; LV-NEXT:    [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
198; LV-NEXT:    store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2
199; LV-NEXT:    [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
200; LV-NEXT:    [[DEC_LVER_ORIG]] = sub i32 [[IND1_LVER_ORIG]], 1
201; LV-NEXT:    [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]]
202; LV-NEXT:    br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
203; LV:       for.body.ph:
204; LV-NEXT:    br label [[FOR_BODY:%.*]]
205; LV:       for.body:
206; LV-NEXT:    [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
207; LV-NEXT:    [[IND1:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH]] ], [ [[DEC:%.*]], [[FOR_BODY]] ]
208; LV-NEXT:    [[MUL:%.*]] = mul i32 [[IND1]], 2
209; LV-NEXT:    [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64
210; LV-NEXT:    [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]]
211; LV-NEXT:    [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2
212; LV-NEXT:    [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]]
213; LV-NEXT:    [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2
214; LV-NEXT:    [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]]
215; LV-NEXT:    store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2
216; LV-NEXT:    [[INC]] = add nuw nsw i64 [[IND]], 1
217; LV-NEXT:    [[DEC]] = sub i32 [[IND1]], 1
218; LV-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]]
219; LV-NEXT:    br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]]
220; LV:       for.end.loopexit:
221; LV-NEXT:    br label [[FOR_END:%.*]]
222; LV:       for.end.loopexit6:
223; LV-NEXT:    br label [[FOR_END]]
224; LV:       for.end:
225; LV-NEXT:    ret void
226;
227  i16* noalias %b, i64 %N) {
228entry:
229  %TruncN = trunc i64 %N to i32
230  br label %for.body
231
232for.body:                                         ; preds = %for.body, %entry
233  %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ]
234  %ind1 = phi i32 [ %TruncN, %entry ], [ %dec, %for.body ]
235
236  %mul = mul i32 %ind1, 2
237  %mul_ext = zext i32 %mul to i64
238
239  %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext
240  %loadA = load i16, i16* %arrayidxA, align 2
241
242  %arrayidxB = getelementptr i16, i16* %b, i64 %ind
243  %loadB = load i16, i16* %arrayidxB, align 2
244
245  %add = mul i16 %loadA, %loadB
246
247  store i16 %add, i16* %arrayidxA, align 2
248
249  %inc = add nuw nsw i64 %ind, 1
250  %dec = sub i32 %ind1, 1
251
252  %exitcond = icmp eq i64 %inc, %N
253  br i1 %exitcond, label %for.end, label %for.body
254
255for.end:                                          ; preds = %for.body
256  ret void
257}
258
259; We replicate the tests above, but this time sign extend 2 * index instead
260; of zero extending it.
261
262; The expression for %mul_ext as analyzed by SCEV is
263;     i64 (sext i32 {0,+,2}<%for.body> to i64)
264; We have added the nssw flag to turn this expression into the following SCEV:
265;     i64 {0,+,2}<%for.body>
266
267define void @f3(i16* noalias %a,
268; LV-LABEL: @f3(
269; LV-NEXT:  for.body.lver.check:
270; LV-NEXT:    [[A5:%.*]] = bitcast i16* [[A:%.*]] to i8*
271; LV-NEXT:    [[TMP0:%.*]] = add i64 [[N:%.*]], -1
272; LV-NEXT:    [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
273; LV-NEXT:    [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP1]])
274; LV-NEXT:    [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
275; LV-NEXT:    [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
276; LV-NEXT:    [[TMP2:%.*]] = add i32 0, [[MUL_RESULT]]
277; LV-NEXT:    [[TMP3:%.*]] = sub i32 0, [[MUL_RESULT]]
278; LV-NEXT:    [[TMP4:%.*]] = icmp sgt i32 [[TMP3]], 0
279; LV-NEXT:    [[TMP5:%.*]] = icmp slt i32 [[TMP2]], 0
280; LV-NEXT:    [[TMP6:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
281; LV-NEXT:    [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]]
282; LV-NEXT:    [[TMP8:%.*]] = or i1 [[TMP7]], [[MUL_OVERFLOW]]
283; LV-NEXT:    [[TMP9:%.*]] = or i1 false, [[TMP8]]
284; LV-NEXT:    [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
285; LV-NEXT:    [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
286; LV-NEXT:    [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
287; LV-NEXT:    [[TMP10:%.*]] = sub i64 0, [[MUL_RESULT3]]
288; LV-NEXT:    [[TMP11:%.*]] = getelementptr i8, i8* [[A5]], i64 [[MUL_RESULT3]]
289; LV-NEXT:    [[TMP12:%.*]] = getelementptr i8, i8* [[A5]], i64 [[TMP10]]
290; LV-NEXT:    [[TMP13:%.*]] = icmp ugt i8* [[TMP12]], [[A5]]
291; LV-NEXT:    [[TMP14:%.*]] = icmp ult i8* [[TMP11]], [[A5]]
292; LV-NEXT:    [[TMP15:%.*]] = or i1 [[TMP14]], [[MUL_OVERFLOW4]]
293; LV-NEXT:    [[TMP16:%.*]] = or i1 [[TMP9]], [[TMP15]]
294; LV-NEXT:    br i1 [[TMP16]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
295; LV:       for.body.ph.lver.orig:
296; LV-NEXT:    br label [[FOR_BODY_LVER_ORIG:%.*]]
297; LV:       for.body.lver.orig:
298; LV-NEXT:    [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
299; LV-NEXT:    [[IND1_LVER_ORIG:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC1_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
300; LV-NEXT:    [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
301; LV-NEXT:    [[MUL_EXT_LVER_ORIG:%.*]] = sext i32 [[MUL_LVER_ORIG]] to i64
302; LV-NEXT:    [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]]
303; LV-NEXT:    [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2
304; LV-NEXT:    [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]]
305; LV-NEXT:    [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2
306; LV-NEXT:    [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
307; LV-NEXT:    store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2
308; LV-NEXT:    [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
309; LV-NEXT:    [[INC1_LVER_ORIG]] = add i32 [[IND1_LVER_ORIG]], 1
310; LV-NEXT:    [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]]
311; LV-NEXT:    br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
312; LV:       for.body.ph:
313; LV-NEXT:    br label [[FOR_BODY:%.*]]
314; LV:       for.body:
315; LV-NEXT:    [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
316; LV-NEXT:    [[IND1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ]
317; LV-NEXT:    [[MUL:%.*]] = mul i32 [[IND1]], 2
318; LV-NEXT:    [[MUL_EXT:%.*]] = sext i32 [[MUL]] to i64
319; LV-NEXT:    [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]]
320; LV-NEXT:    [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2
321; LV-NEXT:    [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]]
322; LV-NEXT:    [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2
323; LV-NEXT:    [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]]
324; LV-NEXT:    store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2
325; LV-NEXT:    [[INC]] = add nuw nsw i64 [[IND]], 1
326; LV-NEXT:    [[INC1]] = add i32 [[IND1]], 1
327; LV-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]]
328; LV-NEXT:    br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]]
329; LV:       for.end.loopexit:
330; LV-NEXT:    br label [[FOR_END:%.*]]
331; LV:       for.end.loopexit6:
332; LV-NEXT:    br label [[FOR_END]]
333; LV:       for.end:
334; LV-NEXT:    ret void
335;
336  i16* noalias %b, i64 %N) {
337entry:
338  br label %for.body
339
340for.body:                                         ; preds = %for.body, %entry
341  %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ]
342  %ind1 = phi i32 [ 0, %entry ], [ %inc1, %for.body ]
343
344  %mul = mul i32 %ind1, 2
345  %mul_ext = sext i32 %mul to i64
346
347  %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext
348  %loadA = load i16, i16* %arrayidxA, align 2
349
350  %arrayidxB = getelementptr i16, i16* %b, i64 %ind
351  %loadB = load i16, i16* %arrayidxB, align 2
352
353  %add = mul i16 %loadA, %loadB
354
355  store i16 %add, i16* %arrayidxA, align 2
356
357  %inc = add nuw nsw i64 %ind, 1
358  %inc1 = add i32 %ind1, 1
359
360  %exitcond = icmp eq i64 %inc, %N
361  br i1 %exitcond, label %for.end, label %for.body
362
363for.end:                                          ; preds = %for.body
364  ret void
365}
366
367define void @f4(i16* noalias %a,
368; LV-LABEL: @f4(
369; LV-NEXT:  for.body.lver.check:
370; LV-NEXT:    [[TRUNCN:%.*]] = trunc i64 [[N:%.*]] to i32
371; LV-NEXT:    [[TMP0:%.*]] = add i64 [[N]], -1
372; LV-NEXT:    [[TMP1:%.*]] = shl i32 [[TRUNCN]], 1
373; LV-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP0]] to i32
374; LV-NEXT:    [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP2]])
375; LV-NEXT:    [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
376; LV-NEXT:    [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
377; LV-NEXT:    [[TMP3:%.*]] = add i32 [[TMP1]], [[MUL_RESULT]]
378; LV-NEXT:    [[TMP4:%.*]] = sub i32 [[TMP1]], [[MUL_RESULT]]
379; LV-NEXT:    [[TMP5:%.*]] = icmp sgt i32 [[TMP4]], [[TMP1]]
380; LV-NEXT:    [[TMP6:%.*]] = icmp slt i32 [[TMP3]], [[TMP1]]
381; LV-NEXT:    [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
382; LV-NEXT:    [[TMP8:%.*]] = or i1 [[TMP5]], [[TMP7]]
383; LV-NEXT:    [[TMP9:%.*]] = or i1 [[TMP8]], [[MUL_OVERFLOW]]
384; LV-NEXT:    [[TMP10:%.*]] = or i1 false, [[TMP9]]
385; LV-NEXT:    [[TMP11:%.*]] = sext i32 [[TMP1]] to i64
386; LV-NEXT:    [[SCEVGEP:%.*]] = getelementptr i16, i16* [[A:%.*]], i64 [[TMP11]]
387; LV-NEXT:    [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
388; LV-NEXT:    [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
389; LV-NEXT:    [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
390; LV-NEXT:    [[SCEVGEP5:%.*]] = bitcast i16* [[SCEVGEP]] to i8*
391; LV-NEXT:    [[TMP12:%.*]] = sub i64 0, [[MUL_RESULT3]]
392; LV-NEXT:    [[TMP13:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[MUL_RESULT3]]
393; LV-NEXT:    [[TMP14:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[TMP12]]
394; LV-NEXT:    [[TMP15:%.*]] = icmp ugt i8* [[TMP14]], [[SCEVGEP5]]
395; LV-NEXT:    [[TMP16:%.*]] = icmp ult i8* [[TMP13]], [[SCEVGEP5]]
396; LV-NEXT:    [[TMP17:%.*]] = or i1 [[TMP15]], [[MUL_OVERFLOW4]]
397; LV-NEXT:    [[TMP18:%.*]] = or i1 [[TMP10]], [[TMP17]]
398; LV-NEXT:    br i1 [[TMP18]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
399; LV:       for.body.ph.lver.orig:
400; LV-NEXT:    br label [[FOR_BODY_LVER_ORIG:%.*]]
401; LV:       for.body.lver.orig:
402; LV-NEXT:    [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
403; LV-NEXT:    [[IND1_LVER_ORIG:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH_LVER_ORIG]] ], [ [[DEC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
404; LV-NEXT:    [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
405; LV-NEXT:    [[MUL_EXT_LVER_ORIG:%.*]] = sext i32 [[MUL_LVER_ORIG]] to i64
406; LV-NEXT:    [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]]
407; LV-NEXT:    [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2
408; LV-NEXT:    [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]]
409; LV-NEXT:    [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2
410; LV-NEXT:    [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
411; LV-NEXT:    store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2
412; LV-NEXT:    [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
413; LV-NEXT:    [[DEC_LVER_ORIG]] = sub i32 [[IND1_LVER_ORIG]], 1
414; LV-NEXT:    [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]]
415; LV-NEXT:    br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
416; LV:       for.body.ph:
417; LV-NEXT:    br label [[FOR_BODY:%.*]]
418; LV:       for.body:
419; LV-NEXT:    [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
420; LV-NEXT:    [[IND1:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH]] ], [ [[DEC:%.*]], [[FOR_BODY]] ]
421; LV-NEXT:    [[MUL:%.*]] = mul i32 [[IND1]], 2
422; LV-NEXT:    [[MUL_EXT:%.*]] = sext i32 [[MUL]] to i64
423; LV-NEXT:    [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]]
424; LV-NEXT:    [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2
425; LV-NEXT:    [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]]
426; LV-NEXT:    [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2
427; LV-NEXT:    [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]]
428; LV-NEXT:    store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2
429; LV-NEXT:    [[INC]] = add nuw nsw i64 [[IND]], 1
430; LV-NEXT:    [[DEC]] = sub i32 [[IND1]], 1
431; LV-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]]
432; LV-NEXT:    br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]]
433; LV:       for.end.loopexit:
434; LV-NEXT:    br label [[FOR_END:%.*]]
435; LV:       for.end.loopexit6:
436; LV-NEXT:    br label [[FOR_END]]
437; LV:       for.end:
438; LV-NEXT:    ret void
439;
440  i16* noalias %b, i64 %N) {
441entry:
442  %TruncN = trunc i64 %N to i32
443  br label %for.body
444
445for.body:                                         ; preds = %for.body, %entry
446  %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ]
447  %ind1 = phi i32 [ %TruncN, %entry ], [ %dec, %for.body ]
448
449  %mul = mul i32 %ind1, 2
450  %mul_ext = sext i32 %mul to i64
451
452  %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext
453  %loadA = load i16, i16* %arrayidxA, align 2
454
455  %arrayidxB = getelementptr i16, i16* %b, i64 %ind
456  %loadB = load i16, i16* %arrayidxB, align 2
457
458  %add = mul i16 %loadA, %loadB
459
460  store i16 %add, i16* %arrayidxA, align 2
461
462  %inc = add nuw nsw i64 %ind, 1
463  %dec = sub i32 %ind1, 1
464
465  %exitcond = icmp eq i64 %inc, %N
466  br i1 %exitcond, label %for.end, label %for.body
467
468for.end:                                          ; preds = %for.body
469  ret void
470}
471
472; The following function is similar to the one above, but has the GEP
473; to pointer %A inbounds. The index %mul doesn't have the nsw flag.
474; This means that the SCEV expression for %mul can wrap and we need
475; a SCEV predicate to continue analysis.
476;
477; We can still analyze this by adding the required no wrap SCEV predicates.
478
479define void @f5(i16* noalias %a,
480; LV-LABEL: @f5(
481; LV-NEXT:  for.body.lver.check:
482; LV-NEXT:    [[TRUNCN:%.*]] = trunc i64 [[N:%.*]] to i32
483; LV-NEXT:    [[TMP0:%.*]] = add i64 [[N]], -1
484; LV-NEXT:    [[TMP1:%.*]] = shl i32 [[TRUNCN]], 1
485; LV-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP0]] to i32
486; LV-NEXT:    [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP2]])
487; LV-NEXT:    [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
488; LV-NEXT:    [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
489; LV-NEXT:    [[TMP3:%.*]] = add i32 [[TMP1]], [[MUL_RESULT]]
490; LV-NEXT:    [[TMP4:%.*]] = sub i32 [[TMP1]], [[MUL_RESULT]]
491; LV-NEXT:    [[TMP5:%.*]] = icmp sgt i32 [[TMP4]], [[TMP1]]
492; LV-NEXT:    [[TMP6:%.*]] = icmp slt i32 [[TMP3]], [[TMP1]]
493; LV-NEXT:    [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
494; LV-NEXT:    [[TMP8:%.*]] = or i1 [[TMP5]], [[TMP7]]
495; LV-NEXT:    [[TMP9:%.*]] = or i1 [[TMP8]], [[MUL_OVERFLOW]]
496; LV-NEXT:    [[TMP10:%.*]] = or i1 false, [[TMP9]]
497; LV-NEXT:    [[TMP11:%.*]] = sext i32 [[TMP1]] to i64
498; LV-NEXT:    [[SCEVGEP:%.*]] = getelementptr i16, i16* [[A:%.*]], i64 [[TMP11]]
499; LV-NEXT:    [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
500; LV-NEXT:    [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
501; LV-NEXT:    [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
502; LV-NEXT:    [[SCEVGEP5:%.*]] = bitcast i16* [[SCEVGEP]] to i8*
503; LV-NEXT:    [[TMP12:%.*]] = sub i64 0, [[MUL_RESULT3]]
504; LV-NEXT:    [[TMP13:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[MUL_RESULT3]]
505; LV-NEXT:    [[TMP14:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[TMP12]]
506; LV-NEXT:    [[TMP15:%.*]] = icmp ugt i8* [[TMP14]], [[SCEVGEP5]]
507; LV-NEXT:    [[TMP16:%.*]] = icmp ult i8* [[TMP13]], [[SCEVGEP5]]
508; LV-NEXT:    [[TMP17:%.*]] = or i1 [[TMP15]], [[MUL_OVERFLOW4]]
509; LV-NEXT:    [[TMP18:%.*]] = or i1 [[TMP10]], [[TMP17]]
510; LV-NEXT:    br i1 [[TMP18]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
511; LV:       for.body.ph.lver.orig:
512; LV-NEXT:    br label [[FOR_BODY_LVER_ORIG:%.*]]
513; LV:       for.body.lver.orig:
514; LV-NEXT:    [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
515; LV-NEXT:    [[IND1_LVER_ORIG:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH_LVER_ORIG]] ], [ [[DEC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
516; LV-NEXT:    [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
517; LV-NEXT:    [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr inbounds i16, i16* [[A]], i32 [[MUL_LVER_ORIG]]
518; LV-NEXT:    [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2
519; LV-NEXT:    [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr inbounds i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]]
520; LV-NEXT:    [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2
521; LV-NEXT:    [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
522; LV-NEXT:    store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2
523; LV-NEXT:    [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
524; LV-NEXT:    [[DEC_LVER_ORIG]] = sub i32 [[IND1_LVER_ORIG]], 1
525; LV-NEXT:    [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]]
526; LV-NEXT:    br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
527; LV:       for.body.ph:
528; LV-NEXT:    br label [[FOR_BODY:%.*]]
529; LV:       for.body:
530; LV-NEXT:    [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
531; LV-NEXT:    [[IND1:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH]] ], [ [[DEC:%.*]], [[FOR_BODY]] ]
532; LV-NEXT:    [[MUL:%.*]] = mul i32 [[IND1]], 2
533; LV-NEXT:    [[ARRAYIDXA:%.*]] = getelementptr inbounds i16, i16* [[A]], i32 [[MUL]]
534; LV-NEXT:    [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2
535; LV-NEXT:    [[ARRAYIDXB:%.*]] = getelementptr inbounds i16, i16* [[B]], i64 [[IND]]
536; LV-NEXT:    [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2
537; LV-NEXT:    [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]]
538; LV-NEXT:    store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2
539; LV-NEXT:    [[INC]] = add nuw nsw i64 [[IND]], 1
540; LV-NEXT:    [[DEC]] = sub i32 [[IND1]], 1
541; LV-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]]
542; LV-NEXT:    br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]]
543; LV:       for.end.loopexit:
544; LV-NEXT:    br label [[FOR_END:%.*]]
545; LV:       for.end.loopexit6:
546; LV-NEXT:    br label [[FOR_END]]
547; LV:       for.end:
548; LV-NEXT:    ret void
549;
550  i16* noalias %b, i64 %N) {
551entry:
552  %TruncN = trunc i64 %N to i32
553  br label %for.body
554
555for.body:                                         ; preds = %for.body, %entry
556  %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ]
557  %ind1 = phi i32 [ %TruncN, %entry ], [ %dec, %for.body ]
558
559  %mul = mul i32 %ind1, 2
560
561  %arrayidxA = getelementptr inbounds i16, i16* %a, i32 %mul
562  %loadA = load i16, i16* %arrayidxA, align 2
563
564  %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %ind
565  %loadB = load i16, i16* %arrayidxB, align 2
566
567  %add = mul i16 %loadA, %loadB
568
569  store i16 %add, i16* %arrayidxA, align 2
570
571  %inc = add nuw nsw i64 %ind, 1
572  %dec = sub i32 %ind1, 1
573
574  %exitcond = icmp eq i64 %inc, %N
575  br i1 %exitcond, label %for.end, label %for.body
576
577for.end:                                          ; preds = %for.body
578  ret void
579}
580