1; RUN: opt -passes='print-access-info' -disable-output  < %s 2>&1 | FileCheck %s
2
3target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
4
5; Following cases are no dependence.
6
7; void nodep_Read_Write(int *A) {
8;   int *B = A + 1;
9;   for (unsigned i = 0; i < 1024; i+=3)
10;     B[i] = A[i] + 1;
11; }
12
13; CHECK: function 'nodep_Read_Write':
14; CHECK-NEXT:   for.body:
15; CHECK-NEXT:     Memory dependences are safe
16; CHECK-NEXT:     Dependences:
17; CHECK-NEXT:     Run-time memory checks:
18
19define void @nodep_Read_Write(i32* nocapture %A) {
20entry:
21  %add.ptr = getelementptr inbounds i32, i32* %A, i64 1
22  br label %for.body
23
24for.cond.cleanup:                                 ; preds = %for.body
25  ret void
26
27for.body:                                         ; preds = %entry, %for.body
28  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
29  %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
30  %0 = load i32, i32* %arrayidx, align 4
31  %add = add nsw i32 %0, 1
32  %arrayidx2 = getelementptr inbounds i32, i32* %add.ptr, i64 %indvars.iv
33  store i32 %add, i32* %arrayidx2, align 4
34  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 3
35  %cmp = icmp ult i64 %indvars.iv.next, 1024
36  br i1 %cmp, label %for.body, label %for.cond.cleanup
37}
38
39; int nodep_Write_Read(int *A) {
40;   int sum = 0;
41;   for (unsigned i = 0; i < 1024; i+=4) {
42;     A[i] = i;
43;     sum += A[i+3];
44;   }
45;
46;   return sum;
47; }
48
49; CHECK: function 'nodep_Write_Read':
50; CHECK-NEXT:   for.body:
51; CHECK-NEXT:     Memory dependences are safe
52; CHECK-NEXT:     Dependences:
53; CHECK-NEXT:     Run-time memory checks:
54
55define i32 @nodep_Write_Read(i32* nocapture %A) {
56entry:
57  br label %for.body
58
59for.cond.cleanup:                                 ; preds = %for.body
60  ret i32 %add3
61
62for.body:                                         ; preds = %entry, %for.body
63  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
64  %sum.013 = phi i32 [ 0, %entry ], [ %add3, %for.body ]
65  %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
66  %0 = trunc i64 %indvars.iv to i32
67  store i32 %0, i32* %arrayidx, align 4
68  %1 = or i64 %indvars.iv, 3
69  %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %1
70  %2 = load i32, i32* %arrayidx2, align 4
71  %add3 = add nsw i32 %2, %sum.013
72  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 4
73  %cmp = icmp ult i64 %indvars.iv.next, 1024
74  br i1 %cmp, label %for.body, label %for.cond.cleanup
75}
76
77; void nodep_Write_Write(int *A) {
78;   for (unsigned i = 0; i < 1024; i+=2) {
79;     A[i] = i;
80;     A[i+1] = i+1;
81;   }
82; }
83
84; CHECK: function 'nodep_Write_Write':
85; CHECK-NEXT:   for.body:
86; CHECK-NEXT:     Memory dependences are safe
87; CHECK-NEXT:     Dependences:
88; CHECK-NEXT:     Run-time memory checks:
89
90define void @nodep_Write_Write(i32* nocapture %A) {
91entry:
92  br label %for.body
93
94for.cond.cleanup:                                 ; preds = %for.body
95  ret void
96
97for.body:                                         ; preds = %entry, %for.body
98  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
99  %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
100  %0 = trunc i64 %indvars.iv to i32
101  store i32 %0, i32* %arrayidx, align 4
102  %1 = or i64 %indvars.iv, 1
103  %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %1
104  %2 = trunc i64 %1 to i32
105  store i32 %2, i32* %arrayidx3, align 4
106  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
107  %cmp = icmp ult i64 %indvars.iv.next, 1024
108  br i1 %cmp, label %for.body, label %for.cond.cleanup
109}
110
111; Following cases are unsafe depdences and are not vectorizable.
112
113; void unsafe_Read_Write(int *A) {
114;   for (unsigned i = 0; i < 1024; i+=3)
115;     A[i+3] = A[i] + 1;
116; }
117
118; CHECK: function 'unsafe_Read_Write':
119; CHECK-NEXT:   for.body:
120; CHECK-NEXT:     Report: unsafe dependent memory operations in loop
121; CHECK-NEXT:     Backward loop carried data dependence.
122; CHECK-NEXT:     Dependences:
123; CHECK-NEXT:      Backward:
124; CHECK-NEXT:           %0 = load i32, i32* %arrayidx, align 4 ->
125; CHECK-NEXT:           store i32 %add, i32* %arrayidx3, align 4
126
127define void @unsafe_Read_Write(i32* nocapture %A) {
128entry:
129  br label %for.body
130
131for.cond.cleanup:                                 ; preds = %for.body
132  ret void
133
134for.body:                                         ; preds = %entry, %for.body
135  %i.010 = phi i32 [ 0, %entry ], [ %add1, %for.body ]
136  %idxprom = zext i32 %i.010 to i64
137  %arrayidx = getelementptr inbounds i32, i32* %A, i64 %idxprom
138  %0 = load i32, i32* %arrayidx, align 4
139  %add = add nsw i32 %0, 1
140  %add1 = add i32 %i.010, 3
141  %idxprom2 = zext i32 %add1 to i64
142  %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %idxprom2
143  store i32 %add, i32* %arrayidx3, align 4
144  %cmp = icmp ult i32 %add1, 1024
145  br i1 %cmp, label %for.body, label %for.cond.cleanup
146}
147
148; int unsafe_Write_Read(int *A) {
149;   int sum = 0;
150;   for (unsigned i = 0; i < 1024; i+=4) {
151;     A[i] = i;
152;     sum += A[i+4];
153;   }
154;
155;   return sum;
156; }
157
158; CHECK: function 'unsafe_Write_Read':
159; CHECK-NEXT:   for.body:
160; CHECK-NEXT:     Report: unsafe dependent memory operations in loop
161; CHECK-NEXT:     Backward loop carried data dependence.
162; CHECK-NEXT:     Dependences:
163; CHECK-NEXT:      Backward:
164; CHECK-NEXT:           store i32 %0, i32* %arrayidx, align 4 ->
165; CHECK-NEXT:           %1 = load i32, i32* %arrayidx2, align 4
166
167define i32 @unsafe_Write_Read(i32* nocapture %A) {
168entry:
169  br label %for.body
170
171for.cond.cleanup:                                 ; preds = %for.body
172  ret i32 %add3
173
174for.body:                                         ; preds = %entry, %for.body
175  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
176  %sum.013 = phi i32 [ 0, %entry ], [ %add3, %for.body ]
177  %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
178  %0 = trunc i64 %indvars.iv to i32
179  store i32 %0, i32* %arrayidx, align 4
180  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 4
181  %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
182  %1 = load i32, i32* %arrayidx2, align 4
183  %add3 = add nsw i32 %1, %sum.013
184  %cmp = icmp ult i64 %indvars.iv.next, 1024
185  br i1 %cmp, label %for.body, label %for.cond.cleanup
186}
187
188; void unsafe_Write_Write(int *A) {
189;   for (unsigned i = 0; i < 1024; i+=2) {
190;     A[i] = i;
191;     A[i+2] = i+1;
192;   }
193; }
194
195; CHECK: function 'unsafe_Write_Write':
196; CHECK-NEXT:   for.body:
197; CHECK-NEXT:     Report: unsafe dependent memory operations in loop
198; CHECK-NEXT:     Backward loop carried data dependence.
199; CHECK-NEXT:     Dependences:
200; CHECK-NEXT:      Backward:
201; CHECK-NEXT:           store i32 %0, i32* %arrayidx, align 4 ->
202; CHECK-NEXT:           store i32 %2, i32* %arrayidx3, align 4
203
204define void @unsafe_Write_Write(i32* nocapture %A) {
205entry:
206  br label %for.body
207
208for.cond.cleanup:                                 ; preds = %for.body
209  ret void
210
211for.body:                                         ; preds = %entry, %for.body
212  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
213  %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
214  %0 = trunc i64 %indvars.iv to i32
215  store i32 %0, i32* %arrayidx, align 4
216  %1 = or i64 %indvars.iv, 1
217  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
218  %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
219  %2 = trunc i64 %1 to i32
220  store i32 %2, i32* %arrayidx3, align 4
221  %cmp = icmp ult i64 %indvars.iv.next, 1024
222  br i1 %cmp, label %for.body, label %for.cond.cleanup
223}
224
225; Following cases check that strided accesses can be vectorized.
226
227; void vectorizable_Read_Write(int *A) {
228;   int *B = A + 4;
229;   for (unsigned i = 0; i < 1024; i+=2)
230;     B[i] = A[i] + 1;
231; }
232
233; CHECK: function 'vectorizable_Read_Write':
234; CHECK-NEXT:   for.body:
235; CHECK-NEXT:     Memory dependences are safe
236; CHECK-NEXT:     Dependences:
237; CHECK-NEXT:       BackwardVectorizable:
238; CHECK-NEXT:           %0 = load i32, i32* %arrayidx, align 4 ->
239; CHECK-NEXT:           store i32 %add, i32* %arrayidx2, align 4
240
241define void @vectorizable_Read_Write(i32* nocapture %A) {
242entry:
243  %add.ptr = getelementptr inbounds i32, i32* %A, i64 4
244  br label %for.body
245
246for.cond.cleanup:                                 ; preds = %for.body
247  ret void
248
249for.body:                                         ; preds = %entry, %for.body
250  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
251  %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
252  %0 = load i32, i32* %arrayidx, align 4
253  %add = add nsw i32 %0, 1
254  %arrayidx2 = getelementptr inbounds i32, i32* %add.ptr, i64 %indvars.iv
255  store i32 %add, i32* %arrayidx2, align 4
256  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
257  %cmp = icmp ult i64 %indvars.iv.next, 1024
258  br i1 %cmp, label %for.body, label %for.cond.cleanup
259}
260
261; int vectorizable_Write_Read(int *A) {
262;   int *B = A + 4;
263;   int sum = 0;
264;   for (unsigned i = 0; i < 1024; i+=2) {
265;     A[i] = i;
266;     sum += B[i];
267;   }
268;
269;   return sum;
270; }
271
272; CHECK: function 'vectorizable_Write_Read':
273; CHECK-NEXT:   for.body:
274; CHECK-NEXT:     Memory dependences are safe
275; CHECK-NEXT:     Dependences:
276; CHECK-NEXT:       BackwardVectorizable:
277; CHECK-NEXT:           store i32 %0, i32* %arrayidx, align 4 ->
278; CHECK-NEXT:           %1 = load i32, i32* %arrayidx2, align 4
279
280define i32 @vectorizable_Write_Read(i32* nocapture %A) {
281entry:
282  %add.ptr = getelementptr inbounds i32, i32* %A, i64 4
283  br label %for.body
284
285for.cond.cleanup:                                 ; preds = %for.body
286  ret i32 %add
287
288for.body:                                         ; preds = %entry, %for.body
289  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
290  %sum.013 = phi i32 [ 0, %entry ], [ %add, %for.body ]
291  %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
292  %0 = trunc i64 %indvars.iv to i32
293  store i32 %0, i32* %arrayidx, align 4
294  %arrayidx2 = getelementptr inbounds i32, i32* %add.ptr, i64 %indvars.iv
295  %1 = load i32, i32* %arrayidx2, align 4
296  %add = add nsw i32 %1, %sum.013
297  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
298  %cmp = icmp ult i64 %indvars.iv.next, 1024
299  br i1 %cmp, label %for.body, label %for.cond.cleanup
300}
301
302; void vectorizable_Write_Write(int *A) {
303;   int *B = A + 4;
304;   for (unsigned i = 0; i < 1024; i+=2) {
305;     A[i] = i;
306;     B[i] = i+1;
307;   }
308; }
309
310; CHECK: function 'vectorizable_Write_Write':
311; CHECK-NEXT:   for.body:
312; CHECK-NEXT:     Memory dependences are safe
313; CHECK-NEXT:     Dependences:
314; CHECK-NEXT:       BackwardVectorizable:
315; CHECK-NEXT:           store i32 %0, i32* %arrayidx, align 4 ->
316; CHECK-NEXT:           store i32 %2, i32* %arrayidx2, align 4
317
318define void @vectorizable_Write_Write(i32* nocapture %A) {
319entry:
320  %add.ptr = getelementptr inbounds i32, i32* %A, i64 4
321  br label %for.body
322
323for.cond.cleanup:                                 ; preds = %for.body
324  ret void
325
326for.body:                                         ; preds = %entry, %for.body
327  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
328  %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
329  %0 = trunc i64 %indvars.iv to i32
330  store i32 %0, i32* %arrayidx, align 4
331  %1 = or i64 %indvars.iv, 1
332  %arrayidx2 = getelementptr inbounds i32, i32* %add.ptr, i64 %indvars.iv
333  %2 = trunc i64 %1 to i32
334  store i32 %2, i32* %arrayidx2, align 4
335  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
336  %cmp = icmp ult i64 %indvars.iv.next, 1024
337  br i1 %cmp, label %for.body, label %for.cond.cleanup
338}
339
340; void vectorizable_unscaled_Read_Write(int *A) {
341;   int *B = (int *)((char *)A + 14);
342;   for (unsigned i = 0; i < 1024; i+=2)
343;     B[i] = A[i] + 1;
344; }
345
346; FIXME: This case looks like previous case @vectorizable_Read_Write. It sould
347; be vectorizable.
348
349; CHECK: function 'vectorizable_unscaled_Read_Write':
350; CHECK-NEXT:   for.body:
351; CHECK-NEXT:     Report: unsafe dependent memory operations in loop
352; CHECK-NEXT:     Backward loop carried data dependence that prevents store-to-load forwarding.
353; CHECK-NEXT:     Dependences:
354; CHECK-NEXT:       BackwardVectorizableButPreventsForwarding:
355; CHECK-NEXT:           %2 = load i32, i32* %arrayidx, align 4 ->
356; CHECK-NEXT:           store i32 %add, i32* %arrayidx2, align 4
357
358define void @vectorizable_unscaled_Read_Write(i32* nocapture %A) {
359entry:
360  %0 = bitcast i32* %A to i8*
361  %add.ptr = getelementptr inbounds i8, i8* %0, i64 14
362  %1 = bitcast i8* %add.ptr to i32*
363  br label %for.body
364
365for.cond.cleanup:                                 ; preds = %for.body
366  ret void
367
368for.body:                                         ; preds = %entry, %for.body
369  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
370  %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
371  %2 = load i32, i32* %arrayidx, align 4
372  %add = add nsw i32 %2, 1
373  %arrayidx2 = getelementptr inbounds i32, i32* %1, i64 %indvars.iv
374  store i32 %add, i32* %arrayidx2, align 4
375  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
376  %cmp = icmp ult i64 %indvars.iv.next, 1024
377  br i1 %cmp, label %for.body, label %for.cond.cleanup
378}
379
380; int vectorizable_unscaled_Write_Read(int *A) {
381;   int *B = (int *)((char *)A + 17);
382;   int sum = 0;
383;   for (unsigned i = 0; i < 1024; i+=2) {
384;     A[i] = i;
385;     sum += B[i];
386;   }
387;
388;   return sum;
389; }
390
391; CHECK: function 'vectorizable_unscaled_Write_Read':
392; CHECK-NEXT:   for.body:
393; CHECK-NEXT:     Memory dependences are safe
394; CHECK-NEXT:     Dependences:
395; CHECK-NEXT:       BackwardVectorizable:
396; CHECK-NEXT:           store i32 %2, i32* %arrayidx, align 4 ->
397; CHECK-NEXT:           %3 = load i32, i32* %arrayidx2, align 4
398
399define i32 @vectorizable_unscaled_Write_Read(i32* nocapture %A) {
400entry:
401  %0 = bitcast i32* %A to i8*
402  %add.ptr = getelementptr inbounds i8, i8* %0, i64 17
403  %1 = bitcast i8* %add.ptr to i32*
404  br label %for.body
405
406for.cond.cleanup:                                 ; preds = %for.body
407  ret i32 %add
408
409for.body:                                         ; preds = %entry, %for.body
410  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
411  %sum.013 = phi i32 [ 0, %entry ], [ %add, %for.body ]
412  %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
413  %2 = trunc i64 %indvars.iv to i32
414  store i32 %2, i32* %arrayidx, align 4
415  %arrayidx2 = getelementptr inbounds i32, i32* %1, i64 %indvars.iv
416  %3 = load i32, i32* %arrayidx2, align 4
417  %add = add nsw i32 %3, %sum.013
418  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
419  %cmp = icmp ult i64 %indvars.iv.next, 1024
420  br i1 %cmp, label %for.body, label %for.cond.cleanup
421}
422
423; void unsafe_unscaled_Read_Write(int *A) {
424;   int *B = (int *)((char *)A + 11);
425;   for (unsigned i = 0; i < 1024; i+=2)
426;     B[i] = A[i] + 1;
427; }
428
429; CHECK: function 'unsafe_unscaled_Read_Write':
430; CHECK-NEXT:   for.body:
431; CHECK-NEXT:     Report: unsafe dependent memory operations in loop
432; CHECK-NEXT:     Backward loop carried data dependence.
433; CHECK-NEXT:     Dependences:
434; CHECK-NEXT:       Backward:
435; CHECK-NEXT:           %2 = load i32, i32* %arrayidx, align 4 ->
436; CHECK-NEXT:           store i32 %add, i32* %arrayidx2, align 4
437
438define void @unsafe_unscaled_Read_Write(i32* nocapture %A) {
439entry:
440  %0 = bitcast i32* %A to i8*
441  %add.ptr = getelementptr inbounds i8, i8* %0, i64 11
442  %1 = bitcast i8* %add.ptr to i32*
443  br label %for.body
444
445for.cond.cleanup:                                 ; preds = %for.body
446  ret void
447
448for.body:                                         ; preds = %entry, %for.body
449  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
450  %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
451  %2 = load i32, i32* %arrayidx, align 4
452  %add = add nsw i32 %2, 1
453  %arrayidx2 = getelementptr inbounds i32, i32* %1, i64 %indvars.iv
454  store i32 %add, i32* %arrayidx2, align 4
455  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
456  %cmp = icmp ult i64 %indvars.iv.next, 1024
457  br i1 %cmp, label %for.body, label %for.cond.cleanup
458}
459
460; CHECK: function 'unsafe_unscaled_Read_Write2':
461; CHECK-NEXT:   for.body:
462; CHECK-NEXT:     Report: unsafe dependent memory operations in loop
463; CHECK-NEXT:     Backward loop carried data dependence.
464; CHECK-NEXT:     Dependences:
465; CHECK-NEXT:       Backward:
466; CHECK-NEXT:           %2 = load i32, i32* %arrayidx, align 4 ->
467; CHECK-NEXT:           store i32 %add, i32* %arrayidx2, align 4
468
469; void unsafe_unscaled_Read_Write2(int *A) {
470;   int *B = (int *)((char *)A + 1);
471;   for (unsigned i = 0; i < 1024; i+=2)
472;     B[i] = A[i] + 1;
473; }
474
475define void @unsafe_unscaled_Read_Write2(i32* nocapture %A) {
476entry:
477  %0 = bitcast i32* %A to i8*
478  %add.ptr = getelementptr inbounds i8, i8* %0, i64 1
479  %1 = bitcast i8* %add.ptr to i32*
480  br label %for.body
481
482for.cond.cleanup:                                 ; preds = %for.body
483  ret void
484
485for.body:                                         ; preds = %entry, %for.body
486  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
487  %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
488  %2 = load i32, i32* %arrayidx, align 4
489  %add = add nsw i32 %2, 1
490  %arrayidx2 = getelementptr inbounds i32, i32* %1, i64 %indvars.iv
491  store i32 %add, i32* %arrayidx2, align 4
492  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
493  %cmp = icmp ult i64 %indvars.iv.next, 1024
494  br i1 %cmp, label %for.body, label %for.cond.cleanup
495}
496
497; Following case checks that interleaved stores have dependences with another
498; store and can not pass dependence check.
499
500; void interleaved_stores(int *A) {
501;   int *B = (int *) ((char *)A + 1);
502;   for(int i = 0; i < 1024; i+=2) {
503;     B[i]   = i;                // (1)
504;     A[i+1] = i + 1;            // (2)
505;     B[i+1] = i + 1;            // (3)
506;   }
507; }
508;
509; The access (2) has overlaps with (1) and (3).
510
511; CHECK: function 'interleaved_stores':
512; CHECK-NEXT:   for.body:
513; CHECK-NEXT:     Report: unsafe dependent memory operations in loop
514; CHECK-NEXT:     Backward loop carried data dependence.
515; CHECK-NEXT:     Dependences:
516; CHECK-NEXT:       Backward:
517; CHECK-NEXT:           store i32 %4, i32* %arrayidx5, align 4 ->
518; CHECK-NEXT:           store i32 %4, i32* %arrayidx9, align 4
519; CHECK:       Backward:
520; CHECK-NEXT:           store i32 %2, i32* %arrayidx2, align 4 ->
521; CHECK-NEXT:           store i32 %4, i32* %arrayidx5, align 4
522
523define void @interleaved_stores(i32* nocapture %A) {
524entry:
525  %0 = bitcast i32* %A to i8*
526  %incdec.ptr = getelementptr inbounds i8, i8* %0, i64 1
527  %1 = bitcast i8* %incdec.ptr to i32*
528  br label %for.body
529
530for.cond.cleanup:                                 ; preds = %for.body
531  ret void
532
533for.body:                                         ; preds = %entry, %for.body
534  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
535  %2 = trunc i64 %indvars.iv to i32
536  %arrayidx2 = getelementptr inbounds i32, i32* %1, i64 %indvars.iv
537  store i32 %2, i32* %arrayidx2, align 4
538  %3 = or i64 %indvars.iv, 1
539  %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %3
540  %4 = trunc i64 %3 to i32
541  store i32 %4, i32* %arrayidx5, align 4
542  %arrayidx9 = getelementptr inbounds i32, i32* %1, i64 %3
543  store i32 %4, i32* %arrayidx9, align 4
544  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
545  %cmp = icmp slt i64 %indvars.iv.next, 1024
546  br i1 %cmp, label %for.body, label %for.cond.cleanup
547}
548