1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; REQUIRES: asserts
3; RUN: opt < %s -S -debug -passes=loop-idiom 2>&1 | FileCheck %s
4; The C code to generate this testcase:
5; void test(int *ar, int n, int m)
6; {
7;	  long i;
8;	  for (i=0; i<n; ++i) {
9;     int *arr = ar + i * m;
10;     memset(arr, 0, i + m * sizeof(int));
11;   }
12; }
13
14; Check on debug outputs...
15; CHECK: loop-idiom Scanning: F[MemsetSize_LoopVariant] Countable Loop %for.body
16; CHECK-NEXT: memset size is non-constant
17; CHECK-NEXT: memset size is not a loop-invariant, abort
18; CHECK: loop-idiom Scanning: F[MemsetSize_Stride_Mismatch] Countable Loop %for.body
19; CHECK-NEXT: memset size is non-constant
20; CHECK-NEXT: MemsetSizeSCEV: (4 * (sext i32 %m to i64))<nsw>
21; CHECK-NEXT: PositiveStrideSCEV: (4 + (4 * (sext i32 %m to i64))<nsw>)<nsw>
22; CHECK-NEXT: SCEV don't match, abort
23; CHECK: loop-idiom Scanning: F[NonZeroAddressSpace] Countable Loop %for.cond1.preheader
24; CHECK-NEXT: memset size is non-constant
25; CHECK-NEXT: pointer is not in address space zero, abort
26; CHECK: loop-idiom Scanning: F[NonAffinePointer] Countable Loop %for.body
27; CHECK-NEXT: Pointer is not affine, abort
28
29define void @MemsetSize_LoopVariant(i32* %ar, i32 %n, i32 %m) {
30; CHECK-LABEL: @MemsetSize_LoopVariant(
31; CHECK-NEXT:  entry:
32; CHECK-NEXT:    [[CONV:%.*]] = sext i32 [[N:%.*]] to i64
33; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i64 0, [[CONV]]
34; CHECK-NEXT:    br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
35; CHECK:       for.body.lr.ph:
36; CHECK-NEXT:    [[CONV1:%.*]] = sext i32 [[M:%.*]] to i64
37; CHECK-NEXT:    [[CONV2:%.*]] = sext i32 [[M]] to i64
38; CHECK-NEXT:    [[MUL3:%.*]] = mul i64 [[CONV2]], 4
39; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
40; CHECK:       for.body:
41; CHECK-NEXT:    [[I_02:%.*]] = phi i64 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_INC:%.*]] ]
42; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i64 [[I_02]], [[CONV1]]
43; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i32, i32* [[AR:%.*]], i64 [[MUL]]
44; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32* [[ADD_PTR]] to i8*
45; CHECK-NEXT:    [[ADD:%.*]] = add nsw i64 [[I_02]], [[MUL3]]
46; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 [[ADD]], i1 false)
47; CHECK-NEXT:    br label [[FOR_INC]]
48; CHECK:       for.inc:
49; CHECK-NEXT:    [[INC]] = add nuw nsw i64 [[I_02]], 1
50; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i64 [[INC]], [[CONV]]
51; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
52; CHECK:       for.cond.for.end_crit_edge:
53; CHECK-NEXT:    br label [[FOR_END]]
54; CHECK:       for.end:
55; CHECK-NEXT:    ret void
56;
57entry:
58  %conv = sext i32 %n to i64
59  %cmp1 = icmp slt i64 0, %conv
60  br i1 %cmp1, label %for.body.lr.ph, label %for.end
61
62for.body.lr.ph:                                   ; preds = %entry
63  %conv1 = sext i32 %m to i64
64  %conv2 = sext i32 %m to i64
65  %mul3 = mul i64 %conv2, 4
66  br label %for.body
67
68for.body:                                         ; preds = %for.body.lr.ph, %for.inc
69  %i.02 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
70  %mul = mul nsw i64 %i.02, %conv1
71  %add.ptr = getelementptr inbounds i32, i32* %ar, i64 %mul
72  %0 = bitcast i32* %add.ptr to i8*
73  %add = add nsw i64 %i.02, %mul3
74  call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 %add, i1 false)
75  br label %for.inc
76
77for.inc:                                          ; preds = %for.body
78  %inc = add nuw nsw i64 %i.02, 1
79  %cmp = icmp slt i64 %inc, %conv
80  br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
81
82for.cond.for.end_crit_edge:                       ; preds = %for.inc
83  br label %for.end
84
85for.end:                                          ; preds = %for.cond.for.end_crit_edge, %entry
86  ret void
87}
88; void test(int *ar, int n, int m)
89; {
90;	  long i;
91;	  for (i=0; i<n; ++i) {
92;     int *arr = ar + i + i * m;
93;     memset(arr, 0, m * sizeof(int));
94;   }
95; }
96define void @MemsetSize_Stride_Mismatch(i32* %ar, i32 %n, i32 %m) {
97; CHECK-LABEL: @MemsetSize_Stride_Mismatch(
98; CHECK-NEXT:  entry:
99; CHECK-NEXT:    [[CONV:%.*]] = sext i32 [[N:%.*]] to i64
100; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i64 0, [[CONV]]
101; CHECK-NEXT:    br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
102; CHECK:       for.body.lr.ph:
103; CHECK-NEXT:    [[CONV1:%.*]] = sext i32 [[M:%.*]] to i64
104; CHECK-NEXT:    [[CONV3:%.*]] = sext i32 [[M]] to i64
105; CHECK-NEXT:    [[MUL4:%.*]] = mul i64 [[CONV3]], 4
106; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
107; CHECK:       for.body:
108; CHECK-NEXT:    [[I_02:%.*]] = phi i64 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_INC:%.*]] ]
109; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i32, i32* [[AR:%.*]], i64 [[I_02]]
110; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i64 [[I_02]], [[CONV1]]
111; CHECK-NEXT:    [[ADD_PTR2:%.*]] = getelementptr inbounds i32, i32* [[ADD_PTR]], i64 [[MUL]]
112; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32* [[ADD_PTR2]] to i8*
113; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 [[MUL4]], i1 false)
114; CHECK-NEXT:    br label [[FOR_INC]]
115; CHECK:       for.inc:
116; CHECK-NEXT:    [[INC]] = add nuw nsw i64 [[I_02]], 1
117; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i64 [[INC]], [[CONV]]
118; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
119; CHECK:       for.cond.for.end_crit_edge:
120; CHECK-NEXT:    br label [[FOR_END]]
121; CHECK:       for.end:
122; CHECK-NEXT:    ret void
123;
124entry:
125  %conv = sext i32 %n to i64
126  %cmp1 = icmp slt i64 0, %conv
127  br i1 %cmp1, label %for.body.lr.ph, label %for.end
128
129for.body.lr.ph:                                   ; preds = %entry
130  %conv1 = sext i32 %m to i64
131  %conv3 = sext i32 %m to i64
132  %mul4 = mul i64 %conv3, 4
133  br label %for.body
134
135for.body:                                         ; preds = %for.body.lr.ph, %for.inc
136  %i.02 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
137  %add.ptr = getelementptr inbounds i32, i32* %ar, i64 %i.02
138  %mul = mul nsw i64 %i.02, %conv1
139  %add.ptr2 = getelementptr inbounds i32, i32* %add.ptr, i64 %mul
140  %0 = bitcast i32* %add.ptr2 to i8*
141  call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 %mul4, i1 false)
142  br label %for.inc
143
144for.inc:                                          ; preds = %for.body
145  %inc = add nuw nsw i64 %i.02, 1
146  %cmp = icmp slt i64 %inc, %conv
147  br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
148
149for.cond.for.end_crit_edge:                       ; preds = %for.inc
150  br label %for.end
151
152for.end:                                          ; preds = %for.cond.for.end_crit_edge, %entry
153  ret void
154}
155
156define void @NonZeroAddressSpace(i32 addrspace(2)* nocapture %ar, i64 %n, i64 %m) {
157; CHECK-LABEL: @NonZeroAddressSpace(
158; CHECK-NEXT:  entry:
159; CHECK-NEXT:    [[TMP0:%.*]] = shl nuw i64 [[M:%.*]], 2
160; CHECK-NEXT:    br label [[FOR_COND1_PREHEADER:%.*]]
161; CHECK:       for.cond1.preheader:
162; CHECK-NEXT:    [[I_017:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC5:%.*]], [[FOR_INC4:%.*]] ]
163; CHECK-NEXT:    [[TMP1:%.*]] = mul i64 [[M]], [[I_017]]
164; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr i32, i32 addrspace(2)* [[AR:%.*]], i64 [[TMP1]]
165; CHECK-NEXT:    [[SCEVGEP1:%.*]] = bitcast i32 addrspace(2)* [[SCEVGEP]] to i8 addrspace(2)*
166; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i64 [[I_017]], [[M]]
167; CHECK-NEXT:    call void @llvm.memset.p2i8.i64(i8 addrspace(2)* align 4 [[SCEVGEP1]], i8 0, i64 [[TMP0]], i1 false)
168; CHECK-NEXT:    br label [[FOR_INC4]]
169; CHECK:       for.inc4:
170; CHECK-NEXT:    [[INC5]] = add nuw nsw i64 [[I_017]], 1
171; CHECK-NEXT:    [[EXITCOND18_NOT:%.*]] = icmp eq i64 [[INC5]], [[N:%.*]]
172; CHECK-NEXT:    br i1 [[EXITCOND18_NOT]], label [[FOR_END6:%.*]], label [[FOR_COND1_PREHEADER]]
173; CHECK:       for.end6:
174; CHECK-NEXT:    ret void
175;
176entry:
177  %0 = shl nuw i64 %m, 2
178  br label %for.cond1.preheader
179
180for.cond1.preheader:                              ; preds = %for.inc4, %entry
181  %i.017 = phi i64 [ 0, %entry ], [ %inc5, %for.inc4 ]
182  %1 = mul i64 %m, %i.017
183  %scevgep = getelementptr i32, i32 addrspace(2)* %ar, i64 %1
184  %scevgep1 = bitcast i32 addrspace(2)* %scevgep to i8 addrspace(2)*
185  %mul = mul nsw i64 %i.017, %m
186  call void @llvm.memset.p2i8.i64(i8 addrspace(2)* align 4 %scevgep1, i8 0, i64 %0, i1 false)
187  br label %for.inc4
188
189for.inc4:                                         ; preds = %for.cond1.preheader
190  %inc5 = add nuw nsw i64 %i.017, 1
191  %exitcond18.not = icmp eq i64 %inc5, %n
192  br i1 %exitcond18.not, label %for.end6, label %for.cond1.preheader
193
194for.end6:                                         ; preds = %for.inc4
195  ret void
196}
197
198; void test(int *ar, int n, int m)
199; {
200;   long i;
201;   for (i=0; i<n; ++i) {
202;     int *arr = ar + i * m;
203;     memset(arr, 0, m * sizeof(int));
204; 	  ar = ar + i;
205;   }
206; }
207define void @NonAffinePointer(i32* %ar, i32 %n, i32 %m) {
208; CHECK-LABEL: @NonAffinePointer(
209; CHECK-NEXT:  entry:
210; CHECK-NEXT:    [[CONV:%.*]] = sext i32 [[N:%.*]] to i64
211; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i64 0, [[CONV]]
212; CHECK-NEXT:    br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
213; CHECK:       for.body.lr.ph:
214; CHECK-NEXT:    [[CONV1:%.*]] = sext i32 [[M:%.*]] to i64
215; CHECK-NEXT:    [[CONV2:%.*]] = sext i32 [[M]] to i64
216; CHECK-NEXT:    [[MUL3:%.*]] = mul i64 [[CONV2]], 4
217; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
218; CHECK:       for.body:
219; CHECK-NEXT:    [[AR_ADDR_03:%.*]] = phi i32* [ [[AR:%.*]], [[FOR_BODY_LR_PH]] ], [ [[ADD_PTR4:%.*]], [[FOR_INC:%.*]] ]
220; CHECK-NEXT:    [[I_02:%.*]] = phi i64 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_INC]] ]
221; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i64 [[I_02]], [[CONV1]]
222; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i32, i32* [[AR_ADDR_03]], i64 [[MUL]]
223; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32* [[ADD_PTR]] to i8*
224; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP0]], i8 0, i64 [[MUL3]], i1 false)
225; CHECK-NEXT:    [[ADD_PTR4]] = getelementptr inbounds i32, i32* [[AR_ADDR_03]], i64 [[I_02]]
226; CHECK-NEXT:    br label [[FOR_INC]]
227; CHECK:       for.inc:
228; CHECK-NEXT:    [[INC]] = add nuw nsw i64 [[I_02]], 1
229; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i64 [[INC]], [[CONV]]
230; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
231; CHECK:       for.cond.for.end_crit_edge:
232; CHECK-NEXT:    br label [[FOR_END]]
233; CHECK:       for.end:
234; CHECK-NEXT:    ret void
235;
236entry:
237  %conv = sext i32 %n to i64
238  %cmp1 = icmp slt i64 0, %conv
239  br i1 %cmp1, label %for.body.lr.ph, label %for.end
240
241for.body.lr.ph:                                   ; preds = %entry
242  %conv1 = sext i32 %m to i64
243  %conv2 = sext i32 %m to i64
244  %mul3 = mul i64 %conv2, 4
245  br label %for.body
246
247for.body:                                         ; preds = %for.body.lr.ph, %for.inc
248  %ar.addr.03 = phi i32* [ %ar, %for.body.lr.ph ], [ %add.ptr4, %for.inc ]
249  %i.02 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
250  %mul = mul nsw i64 %i.02, %conv1
251  %add.ptr = getelementptr inbounds i32, i32* %ar.addr.03, i64 %mul
252  %0 = bitcast i32* %add.ptr to i8*
253  call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 %mul3, i1 false)
254  %add.ptr4 = getelementptr inbounds i32, i32* %ar.addr.03, i64 %i.02
255  br label %for.inc
256
257for.inc:                                          ; preds = %for.body
258  %inc = add nuw nsw i64 %i.02, 1
259  %cmp = icmp slt i64 %inc, %conv
260  br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
261
262for.cond.for.end_crit_edge:                       ; preds = %for.inc
263  br label %for.end
264
265for.end:                                          ; preds = %for.cond.for.end_crit_edge, %entry
266  ret void
267}
268
269declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg)
270declare void @llvm.memset.p2i8.i64(i8 addrspace(2)* nocapture writeonly, i8, i64, i1 immarg)
271