1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -basic-aa -loop-distribute -enable-loop-distribute -S -enable-mem-access-versioning=0 < %s | FileCheck %s
3
4target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
5
6; PredicatedScalarEvolution decides it needs to insert a bounds check
7; not based on memory access.
8
9define void @f(i32* noalias %a, i32* noalias %b, i32* noalias %c, i32* noalias %d, i32* noalias %e, i64 %N) {
10; CHECK-LABEL: @f(
11; CHECK-NEXT:  entry:
12; CHECK-NEXT:    [[A5:%.*]] = bitcast i32* [[A:%.*]] to i8*
13; CHECK-NEXT:    br label [[FOR_BODY_LVER_CHECK:%.*]]
14; CHECK:       for.body.lver.check:
15; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[N:%.*]], -1
16; CHECK-NEXT:    [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
17; CHECK-NEXT:    [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 8, i64 [[TMP0]])
18; CHECK-NEXT:    [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
19; CHECK-NEXT:    [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
20; CHECK-NEXT:    [[TMP11:%.*]] = sub i64 0, [[MUL_RESULT3]]
21; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr i8, i8* [[A5]], i64 [[MUL_RESULT3]]
22; CHECK-NEXT:    [[TMP15:%.*]] = icmp ult i8* [[TMP12]], [[A5]]
23; CHECK-NEXT:    [[TMP17:%.*]] = or i1 [[TMP15]], [[MUL_OVERFLOW4]]
24; CHECK-NEXT:    [[TMP18:%.*]] = or i1 [[TMP7]], [[TMP17]]
25; CHECK-NEXT:    br i1 [[TMP18]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH_LDIST1:%.*]]
26; CHECK:       for.body.ph.lver.orig:
27; CHECK-NEXT:    br label [[FOR_BODY_LVER_ORIG:%.*]]
28; CHECK:       for.body.lver.orig:
29; CHECK-NEXT:    [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[ADD_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
30; CHECK-NEXT:    [[IND1_LVER_ORIG:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC1_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
31; CHECK-NEXT:    [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
32; CHECK-NEXT:    [[MUL_EXT_LVER_ORIG:%.*]] = zext i32 [[MUL_LVER_ORIG]] to i64
33; CHECK-NEXT:    [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_EXT_LVER_ORIG]]
34; CHECK-NEXT:    [[LOADA_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXA_LVER_ORIG]], align 4
35; CHECK-NEXT:    [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
36; CHECK-NEXT:    [[LOADB_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXB_LVER_ORIG]], align 4
37; CHECK-NEXT:    [[MULA_LVER_ORIG:%.*]] = mul i32 [[LOADB_LVER_ORIG]], [[LOADA_LVER_ORIG]]
38; CHECK-NEXT:    [[ADD_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
39; CHECK-NEXT:    [[INC1_LVER_ORIG]] = add i32 [[IND1_LVER_ORIG]], 1
40; CHECK-NEXT:    [[ARRAYIDXA_PLUS_4_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD_LVER_ORIG]]
41; CHECK-NEXT:    store i32 [[MULA_LVER_ORIG]], i32* [[ARRAYIDXA_PLUS_4_LVER_ORIG]], align 4
42; CHECK-NEXT:    [[ARRAYIDXD_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[D:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
43; CHECK-NEXT:    [[LOADD_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXD_LVER_ORIG]], align 4
44; CHECK-NEXT:    [[ARRAYIDXE_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[E:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
45; CHECK-NEXT:    [[LOADE_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXE_LVER_ORIG]], align 4
46; CHECK-NEXT:    [[MULC_LVER_ORIG:%.*]] = mul i32 [[LOADD_LVER_ORIG]], [[LOADE_LVER_ORIG]]
47; CHECK-NEXT:    [[ARRAYIDXC_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
48; CHECK-NEXT:    store i32 [[MULC_LVER_ORIG]], i32* [[ARRAYIDXC_LVER_ORIG]], align 4
49; CHECK-NEXT:    [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[ADD_LVER_ORIG]], [[N]]
50; CHECK-NEXT:    br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
51; CHECK:       for.body.ph.ldist1:
52; CHECK-NEXT:    br label [[FOR_BODY_LDIST1:%.*]]
53; CHECK:       for.body.ldist1:
54; CHECK-NEXT:    [[IND_LDIST1:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LDIST1]] ], [ [[ADD_LDIST1:%.*]], [[FOR_BODY_LDIST1]] ]
55; CHECK-NEXT:    [[IND1_LDIST1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LDIST1]] ], [ [[INC1_LDIST1:%.*]], [[FOR_BODY_LDIST1]] ]
56; CHECK-NEXT:    [[MUL_LDIST1:%.*]] = mul i32 [[IND1_LDIST1]], 2
57; CHECK-NEXT:    [[MUL_EXT_LDIST1:%.*]] = zext i32 [[MUL_LDIST1]] to i64
58; CHECK-NEXT:    [[ARRAYIDXA_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_EXT_LDIST1]]
59; CHECK-NEXT:    [[LOADA_LDIST1:%.*]] = load i32, i32* [[ARRAYIDXA_LDIST1]], align 4, !alias.scope !0
60; CHECK-NEXT:    [[ARRAYIDXB_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[MUL_EXT_LDIST1]]
61; CHECK-NEXT:    [[LOADB_LDIST1:%.*]] = load i32, i32* [[ARRAYIDXB_LDIST1]], align 4
62; CHECK-NEXT:    [[MULA_LDIST1:%.*]] = mul i32 [[LOADB_LDIST1]], [[LOADA_LDIST1]]
63; CHECK-NEXT:    [[ADD_LDIST1]] = add nuw nsw i64 [[IND_LDIST1]], 1
64; CHECK-NEXT:    [[INC1_LDIST1]] = add i32 [[IND1_LDIST1]], 1
65; CHECK-NEXT:    [[ARRAYIDXA_PLUS_4_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD_LDIST1]]
66; CHECK-NEXT:    store i32 [[MULA_LDIST1]], i32* [[ARRAYIDXA_PLUS_4_LDIST1]], align 4, !alias.scope !3
67; CHECK-NEXT:    [[EXITCOND_LDIST1:%.*]] = icmp eq i64 [[ADD_LDIST1]], [[N]]
68; CHECK-NEXT:    br i1 [[EXITCOND_LDIST1]], label [[FOR_BODY_PH:%.*]], label [[FOR_BODY_LDIST1]]
69; CHECK:       for.body.ph:
70; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
71; CHECK:       for.body:
72; CHECK-NEXT:    [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
73; CHECK-NEXT:    [[IND1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ]
74; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[IND1]], 2
75; CHECK-NEXT:    [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64
76; CHECK-NEXT:    [[ADD]] = add nuw nsw i64 [[IND]], 1
77; CHECK-NEXT:    [[INC1]] = add i32 [[IND1]], 1
78; CHECK-NEXT:    [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, i32* [[D]], i64 [[MUL_EXT]]
79; CHECK-NEXT:    [[LOADD:%.*]] = load i32, i32* [[ARRAYIDXD]], align 4
80; CHECK-NEXT:    [[ARRAYIDXE:%.*]] = getelementptr inbounds i32, i32* [[E]], i64 [[MUL_EXT]]
81; CHECK-NEXT:    [[LOADE:%.*]] = load i32, i32* [[ARRAYIDXE]], align 4
82; CHECK-NEXT:    [[MULC:%.*]] = mul i32 [[LOADD]], [[LOADE]]
83; CHECK-NEXT:    [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 [[MUL_EXT]]
84; CHECK-NEXT:    store i32 [[MULC]], i32* [[ARRAYIDXC]], align 4
85; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[ADD]], [[N]]
86; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT3:%.*]], label [[FOR_BODY]]
87; CHECK:       for.end.loopexit:
88; CHECK-NEXT:    br label [[FOR_END:%.*]]
89; CHECK:       for.end.loopexit3:
90; CHECK-NEXT:    br label [[FOR_END]]
91; CHECK:       for.end:
92; CHECK-NEXT:    ret void
93;
94entry:
95  br label %for.body
96
97for.body:                                         ; preds = %for.body, %entry
98  %ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
99  %ind1 = phi i32 [ 0, %entry ], [ %inc1, %for.body ]
100
101  %mul = mul i32 %ind1, 2
102  %mul_ext = zext i32 %mul to i64
103
104
105  %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %mul_ext
106  %loadA = load i32, i32* %arrayidxA, align 4
107
108  %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %mul_ext
109  %loadB = load i32, i32* %arrayidxB, align 4
110
111  %mulA = mul i32 %loadB, %loadA
112
113  %add = add nuw nsw i64 %ind, 1
114  %inc1 = add i32 %ind1, 1
115
116  %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
117  store i32 %mulA, i32* %arrayidxA_plus_4, align 4
118
119  %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %mul_ext
120  %loadD = load i32, i32* %arrayidxD, align 4
121
122  %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %mul_ext
123  %loadE = load i32, i32* %arrayidxE, align 4
124
125  %mulC = mul i32 %loadD, %loadE
126
127  %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %mul_ext
128  store i32 %mulC, i32* %arrayidxC, align 4
129
130  %exitcond = icmp eq i64 %add, %N
131  br i1 %exitcond, label %for.end, label %for.body
132
133for.end:                                          ; preds = %for.body
134  ret void
135}
136
137declare void @use64(i64)
138@global_a = common local_unnamed_addr global [8192 x i32] zeroinitializer, align 16
139
140define void @f_with_offset(i32* noalias %b, i32* noalias %c, i32* noalias %d, i32* noalias %e, i64 %N) {
141; CHECK-LABEL: @f_with_offset(
142; CHECK-NEXT:  entry:
143; CHECK-NEXT:    [[A_BASE:%.*]] = getelementptr [8192 x i32], [8192 x i32]* @global_a, i32 0, i32 0
144; CHECK-NEXT:    [[A_INTPTR:%.*]] = ptrtoint i32* [[A_BASE]] to i64
145; CHECK-NEXT:    call void @use64(i64 [[A_INTPTR]])
146; CHECK-NEXT:    [[A:%.*]] = getelementptr i32, i32* [[A_BASE]], i32 42
147; CHECK-NEXT:    br label [[FOR_BODY_LVER_CHECK:%.*]]
148; CHECK:       for.body.lver.check:
149; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[N:%.*]], -1
150; CHECK-NEXT:    [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
151; CHECK-NEXT:    [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 8, i64 [[TMP0]])
152; CHECK-NEXT:    [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
153; CHECK-NEXT:    [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
154; CHECK-NEXT:    [[TMP11:%.*]] = sub i64 0, [[MUL_RESULT3]]
155; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr i8, i8* bitcast (i32* getelementptr inbounds ([8192 x i32], [8192 x i32]* @global_a, i64 0, i64 42) to i8*), i64 [[MUL_RESULT3]]
156; CHECK-NEXT:    [[TMP15:%.*]] = icmp ult i8* [[TMP12]], bitcast (i32* getelementptr inbounds ([8192 x i32], [8192 x i32]* @global_a, i64 0, i64 42) to i8*)
157; CHECK-NEXT:    [[TMP17:%.*]] = or i1 [[TMP15]], [[MUL_OVERFLOW4]]
158; CHECK-NEXT:    [[TMP18:%.*]] = or i1 [[TMP7]], [[TMP17]]
159; CHECK-NEXT:    br i1 [[TMP18]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH_LDIST1:%.*]]
160; CHECK:       for.body.ph.lver.orig:
161; CHECK-NEXT:    br label [[FOR_BODY_LVER_ORIG:%.*]]
162; CHECK:       for.body.lver.orig:
163; CHECK-NEXT:    [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[ADD_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
164; CHECK-NEXT:    [[IND1_LVER_ORIG:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC1_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
165; CHECK-NEXT:    [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
166; CHECK-NEXT:    [[MUL_EXT_LVER_ORIG:%.*]] = zext i32 [[MUL_LVER_ORIG]] to i64
167; CHECK-NEXT:    [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_EXT_LVER_ORIG]]
168; CHECK-NEXT:    [[LOADA_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXA_LVER_ORIG]], align 4
169; CHECK-NEXT:    [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
170; CHECK-NEXT:    [[LOADB_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXB_LVER_ORIG]], align 4
171; CHECK-NEXT:    [[MULA_LVER_ORIG:%.*]] = mul i32 [[LOADB_LVER_ORIG]], [[LOADA_LVER_ORIG]]
172; CHECK-NEXT:    [[ADD_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
173; CHECK-NEXT:    [[INC1_LVER_ORIG]] = add i32 [[IND1_LVER_ORIG]], 1
174; CHECK-NEXT:    [[ARRAYIDXA_PLUS_4_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD_LVER_ORIG]]
175; CHECK-NEXT:    store i32 [[MULA_LVER_ORIG]], i32* [[ARRAYIDXA_PLUS_4_LVER_ORIG]], align 4
176; CHECK-NEXT:    [[ARRAYIDXD_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[D:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
177; CHECK-NEXT:    [[LOADD_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXD_LVER_ORIG]], align 4
178; CHECK-NEXT:    [[ARRAYIDXE_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[E:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
179; CHECK-NEXT:    [[LOADE_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXE_LVER_ORIG]], align 4
180; CHECK-NEXT:    [[MULC_LVER_ORIG:%.*]] = mul i32 [[LOADD_LVER_ORIG]], [[LOADE_LVER_ORIG]]
181; CHECK-NEXT:    [[ARRAYIDXC_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
182; CHECK-NEXT:    store i32 [[MULC_LVER_ORIG]], i32* [[ARRAYIDXC_LVER_ORIG]], align 4
183; CHECK-NEXT:    [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[ADD_LVER_ORIG]], [[N]]
184; CHECK-NEXT:    br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
185; CHECK:       for.body.ph.ldist1:
186; CHECK-NEXT:    br label [[FOR_BODY_LDIST1:%.*]]
187; CHECK:       for.body.ldist1:
188; CHECK-NEXT:    [[IND_LDIST1:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LDIST1]] ], [ [[ADD_LDIST1:%.*]], [[FOR_BODY_LDIST1]] ]
189; CHECK-NEXT:    [[IND1_LDIST1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LDIST1]] ], [ [[INC1_LDIST1:%.*]], [[FOR_BODY_LDIST1]] ]
190; CHECK-NEXT:    [[MUL_LDIST1:%.*]] = mul i32 [[IND1_LDIST1]], 2
191; CHECK-NEXT:    [[MUL_EXT_LDIST1:%.*]] = zext i32 [[MUL_LDIST1]] to i64
192; CHECK-NEXT:    [[ARRAYIDXA_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_EXT_LDIST1]]
193; CHECK-NEXT:    [[LOADA_LDIST1:%.*]] = load i32, i32* [[ARRAYIDXA_LDIST1]], align 4, !alias.scope !5
194; CHECK-NEXT:    [[ARRAYIDXB_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[MUL_EXT_LDIST1]]
195; CHECK-NEXT:    [[LOADB_LDIST1:%.*]] = load i32, i32* [[ARRAYIDXB_LDIST1]], align 4
196; CHECK-NEXT:    [[MULA_LDIST1:%.*]] = mul i32 [[LOADB_LDIST1]], [[LOADA_LDIST1]]
197; CHECK-NEXT:    [[ADD_LDIST1]] = add nuw nsw i64 [[IND_LDIST1]], 1
198; CHECK-NEXT:    [[INC1_LDIST1]] = add i32 [[IND1_LDIST1]], 1
199; CHECK-NEXT:    [[ARRAYIDXA_PLUS_4_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD_LDIST1]]
200; CHECK-NEXT:    store i32 [[MULA_LDIST1]], i32* [[ARRAYIDXA_PLUS_4_LDIST1]], align 4, !alias.scope !8
201; CHECK-NEXT:    [[EXITCOND_LDIST1:%.*]] = icmp eq i64 [[ADD_LDIST1]], [[N]]
202; CHECK-NEXT:    br i1 [[EXITCOND_LDIST1]], label [[FOR_BODY_PH:%.*]], label [[FOR_BODY_LDIST1]]
203; CHECK:       for.body.ph:
204; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
205; CHECK:       for.body:
206; CHECK-NEXT:    [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
207; CHECK-NEXT:    [[IND1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ]
208; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[IND1]], 2
209; CHECK-NEXT:    [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64
210; CHECK-NEXT:    [[ADD]] = add nuw nsw i64 [[IND]], 1
211; CHECK-NEXT:    [[INC1]] = add i32 [[IND1]], 1
212; CHECK-NEXT:    [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, i32* [[D]], i64 [[MUL_EXT]]
213; CHECK-NEXT:    [[LOADD:%.*]] = load i32, i32* [[ARRAYIDXD]], align 4
214; CHECK-NEXT:    [[ARRAYIDXE:%.*]] = getelementptr inbounds i32, i32* [[E]], i64 [[MUL_EXT]]
215; CHECK-NEXT:    [[LOADE:%.*]] = load i32, i32* [[ARRAYIDXE]], align 4
216; CHECK-NEXT:    [[MULC:%.*]] = mul i32 [[LOADD]], [[LOADE]]
217; CHECK-NEXT:    [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 [[MUL_EXT]]
218; CHECK-NEXT:    store i32 [[MULC]], i32* [[ARRAYIDXC]], align 4
219; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[ADD]], [[N]]
220; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT2:%.*]], label [[FOR_BODY]]
221; CHECK:       for.end.loopexit:
222; CHECK-NEXT:    br label [[FOR_END:%.*]]
223; CHECK:       for.end.loopexit2:
224; CHECK-NEXT:    br label [[FOR_END]]
225; CHECK:       for.end:
226; CHECK-NEXT:    ret void
227;
228entry:
229  %a_base = getelementptr [8192 x i32], [8192 x i32]* @global_a, i32 0, i32 0
230  %a_intptr = ptrtoint i32* %a_base to i64
231  call void @use64(i64 %a_intptr)
232  %a = getelementptr i32, i32* %a_base, i32 42
233  br label %for.body
234
235for.body:                                         ; preds = %for.body, %entry
236  %ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
237  %ind1 = phi i32 [ 0, %entry ], [ %inc1, %for.body ]
238
239  %mul = mul i32 %ind1, 2
240  %mul_ext = zext i32 %mul to i64
241
242
243  %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %mul_ext
244  %loadA = load i32, i32* %arrayidxA, align 4
245
246  %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %mul_ext
247  %loadB = load i32, i32* %arrayidxB, align 4
248
249  %mulA = mul i32 %loadB, %loadA
250
251  %add = add nuw nsw i64 %ind, 1
252  %inc1 = add i32 %ind1, 1
253
254  %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
255  store i32 %mulA, i32* %arrayidxA_plus_4, align 4
256
257  %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %mul_ext
258  %loadD = load i32, i32* %arrayidxD, align 4
259
260  %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %mul_ext
261  %loadE = load i32, i32* %arrayidxE, align 4
262
263  %mulC = mul i32 %loadD, %loadE
264
265  %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %mul_ext
266  store i32 %mulC, i32* %arrayidxC, align 4
267
268  %exitcond = icmp eq i64 %add, %N
269  br i1 %exitcond, label %for.end, label %for.body
270
271for.end:                                          ; preds = %for.body
272  ret void
273}
274
275; Can't add control dependency with convergent in loop body.
276define void @f_with_convergent(i32* noalias %a, i32* noalias %b, i32* noalias %c, i32* noalias %d, i32* noalias %e, i64 %N) #1 {
277; CHECK-LABEL: @f_with_convergent(
278; CHECK-NEXT:  entry:
279; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
280; CHECK:       for.body:
281; CHECK-NEXT:    [[IND:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
282; CHECK-NEXT:    [[IND1:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INC1:%.*]], [[FOR_BODY]] ]
283; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[IND1]], 2
284; CHECK-NEXT:    [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64
285; CHECK-NEXT:    [[ARRAYIDXA:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[MUL_EXT]]
286; CHECK-NEXT:    [[LOADA:%.*]] = load i32, i32* [[ARRAYIDXA]], align 4
287; CHECK-NEXT:    [[ARRAYIDXB:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[MUL_EXT]]
288; CHECK-NEXT:    [[LOADB:%.*]] = load i32, i32* [[ARRAYIDXB]], align 4
289; CHECK-NEXT:    [[MULA:%.*]] = mul i32 [[LOADB]], [[LOADA]]
290; CHECK-NEXT:    [[ADD]] = add nuw nsw i64 [[IND]], 1
291; CHECK-NEXT:    [[INC1]] = add i32 [[IND1]], 1
292; CHECK-NEXT:    [[ARRAYIDXA_PLUS_4:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD]]
293; CHECK-NEXT:    store i32 [[MULA]], i32* [[ARRAYIDXA_PLUS_4]], align 4
294; CHECK-NEXT:    [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, i32* [[D:%.*]], i64 [[MUL_EXT]]
295; CHECK-NEXT:    [[LOADD:%.*]] = load i32, i32* [[ARRAYIDXD]], align 4
296; CHECK-NEXT:    [[ARRAYIDXE:%.*]] = getelementptr inbounds i32, i32* [[E:%.*]], i64 [[MUL_EXT]]
297; CHECK-NEXT:    [[LOADE:%.*]] = load i32, i32* [[ARRAYIDXE]], align 4
298; CHECK-NEXT:    [[CONVERGENTD:%.*]] = call i32 @llvm.convergent(i32 [[LOADD]])
299; CHECK-NEXT:    [[MULC:%.*]] = mul i32 [[CONVERGENTD]], [[LOADE]]
300; CHECK-NEXT:    [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[MUL_EXT]]
301; CHECK-NEXT:    store i32 [[MULC]], i32* [[ARRAYIDXC]], align 4
302; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[ADD]], [[N:%.*]]
303; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
304; CHECK:       for.end:
305; CHECK-NEXT:    ret void
306;
307entry:
308  br label %for.body
309
310for.body:                                         ; preds = %for.body, %entry
311  %ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
312  %ind1 = phi i32 [ 0, %entry ], [ %inc1, %for.body ]
313
314  %mul = mul i32 %ind1, 2
315  %mul_ext = zext i32 %mul to i64
316
317
318  %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %mul_ext
319  %loadA = load i32, i32* %arrayidxA, align 4
320
321  %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %mul_ext
322  %loadB = load i32, i32* %arrayidxB, align 4
323
324  %mulA = mul i32 %loadB, %loadA
325
326  %add = add nuw nsw i64 %ind, 1
327  %inc1 = add i32 %ind1, 1
328
329  %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
330  store i32 %mulA, i32* %arrayidxA_plus_4, align 4
331
332  %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %mul_ext
333  %loadD = load i32, i32* %arrayidxD, align 4
334
335  %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %mul_ext
336  %loadE = load i32, i32* %arrayidxE, align 4
337
338  %convergentD = call i32 @llvm.convergent(i32 %loadD)
339  %mulC = mul i32 %convergentD, %loadE
340
341  %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %mul_ext
342  store i32 %mulC, i32* %arrayidxC, align 4
343
344  %exitcond = icmp eq i64 %add, %N
345  br i1 %exitcond, label %for.end, label %for.body
346
347for.end:                                          ; preds = %for.body
348  ret void
349}
350
351declare i32 @llvm.convergent(i32) #0
352
353attributes #0 = { nounwind readnone convergent }
354attributes #1 = { nounwind convergent }
355