1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.10.0 -mattr=+sse4.2 | FileCheck %s
3
4; PR28474
5
6;void foo();
7;
8;int test1(unsigned int *p) {
9;  int sum = 0;
10;  #pragma nounroll
11;  for (int y = 0; y < 2; y++) {
12;    // Inner loop gets unrolled
13;    for (int x = 0; x < 8; x++) {
14;      sum += p[x] * 42;
15;    }
16;    // Dummy call to keep outer loop alive
17;    foo();
18;  }
19;  return sum;
20;}
21
22define i32 @test(i32* nocapture readonly %p) {
23; CHECK-LABEL: @test(
24; CHECK-NEXT:  entry:
25; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
26; CHECK:       for.body:
27; CHECK-NEXT:    [[SUM:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[OP_RDX:%.*]], [[FOR_BODY]] ]
28; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32* [[P:%.*]] to <8 x i32>*
29; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* [[TMP0]], align 4
30; CHECK-NEXT:    [[TMP2:%.*]] = mul <8 x i32> [[TMP1]], <i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42>
31; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP2]])
32; CHECK-NEXT:    [[OP_RDX]] = add i32 [[TMP3]], [[SUM]]
33; CHECK-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[FOR_BODY]]
34; CHECK:       for.end:
35; CHECK-NEXT:    ret i32 [[OP_RDX]]
36;
37entry:
38  %arrayidx.1 = getelementptr inbounds i32, i32* %p, i64 1
39  %arrayidx.2 = getelementptr inbounds i32, i32* %p, i64 2
40  %arrayidx.3 = getelementptr inbounds i32, i32* %p, i64 3
41  %arrayidx.4 = getelementptr inbounds i32, i32* %p, i64 4
42  %arrayidx.5 = getelementptr inbounds i32, i32* %p, i64 5
43  %arrayidx.6 = getelementptr inbounds i32, i32* %p, i64 6
44  %arrayidx.7 = getelementptr inbounds i32, i32* %p, i64 7
45  br label %for.body
46
47for.body:
48  %sum = phi i32 [ 0, %entry ], [ %add.7, %for.body ]
49  %tmp = load i32, i32* %p, align 4
50  %mul = mul i32 %tmp, 42
51  %add = add i32 %mul, %sum
52  %tmp5 = load i32, i32* %arrayidx.1, align 4
53  %mul.1 = mul i32 %tmp5, 42
54  %add.1 = add i32 %mul.1, %add
55  %tmp6 = load i32, i32* %arrayidx.2, align 4
56  %mul.2 = mul i32 %tmp6, 42
57  %add.2 = add i32 %mul.2, %add.1
58  %tmp7 = load i32, i32* %arrayidx.3, align 4
59  %mul.3 = mul i32 %tmp7, 42
60  %add.3 = add i32 %mul.3, %add.2
61  %tmp8 = load i32, i32* %arrayidx.4, align 4
62  %mul.4 = mul i32 %tmp8, 42
63  %add.4 = add i32 %mul.4, %add.3
64  %tmp9 = load i32, i32* %arrayidx.5, align 4
65  %mul.5 = mul i32 %tmp9, 42
66  %add.5 = add i32 %mul.5, %add.4
67  %tmp10 = load i32, i32* %arrayidx.6, align 4
68  %mul.6 = mul i32 %tmp10, 42
69  %add.6 = add i32 %mul.6, %add.5
70  %tmp11 = load i32, i32* %arrayidx.7, align 4
71  %mul.7 = mul i32 %tmp11, 42
72  %add.7 = add i32 %mul.7, %add.6
73  br i1 true, label %for.end, label %for.body
74
75for.end:
76  ret i32 %add.7
77}
78
79;void foo();
80;
81;int test2(unsigned int *p, unsigned int *q) {
82;  int sum = 0;
83;  #pragma nounroll
84;  for (int y = 0; y < 2; y++) {
85;    // Inner loop gets unrolled
86;    for (int x = 0; x < 8; x++) {
87;      sum += p[x] * q[x];
88;    }
89;    // Dummy call to keep outer loop alive
90;    foo();
91;  }
92;  return sum;
93;}
94
95define i32 @test2(i32* nocapture readonly %p, i32* nocapture readonly %q) {
96; CHECK-LABEL: @test2(
97; CHECK-NEXT:  entry:
98; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
99; CHECK:       for.body:
100; CHECK-NEXT:    [[SUM:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[OP_RDX:%.*]], [[FOR_BODY]] ]
101; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32* [[P:%.*]] to <8 x i32>*
102; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* [[TMP0]], align 4
103; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i32* [[Q:%.*]] to <8 x i32>*
104; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i32>, <8 x i32>* [[TMP2]], align 4
105; CHECK-NEXT:    [[TMP4:%.*]] = mul <8 x i32> [[TMP1]], [[TMP3]]
106; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP4]])
107; CHECK-NEXT:    [[OP_RDX]] = add i32 [[TMP5]], [[SUM]]
108; CHECK-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[FOR_BODY]]
109; CHECK:       for.end:
110; CHECK-NEXT:    ret i32 [[OP_RDX]]
111;
112entry:
113  %arrayidx.p.1 = getelementptr inbounds i32, i32* %p, i64 1
114  %arrayidx.p.2 = getelementptr inbounds i32, i32* %p, i64 2
115  %arrayidx.p.3 = getelementptr inbounds i32, i32* %p, i64 3
116  %arrayidx.p.4 = getelementptr inbounds i32, i32* %p, i64 4
117  %arrayidx.p.5 = getelementptr inbounds i32, i32* %p, i64 5
118  %arrayidx.p.6 = getelementptr inbounds i32, i32* %p, i64 6
119  %arrayidx.p.7 = getelementptr inbounds i32, i32* %p, i64 7
120
121  %arrayidx.q.1 = getelementptr inbounds i32, i32* %q, i64 1
122  %arrayidx.q.2 = getelementptr inbounds i32, i32* %q, i64 2
123  %arrayidx.q.3 = getelementptr inbounds i32, i32* %q, i64 3
124  %arrayidx.q.4 = getelementptr inbounds i32, i32* %q, i64 4
125  %arrayidx.q.5 = getelementptr inbounds i32, i32* %q, i64 5
126  %arrayidx.q.6 = getelementptr inbounds i32, i32* %q, i64 6
127  %arrayidx.q.7 = getelementptr inbounds i32, i32* %q, i64 7
128  br label %for.body
129
130for.body:
131  %sum = phi i32 [ 0, %entry ], [ %add.7, %for.body ]
132  %tmpp = load i32, i32* %p, align 4
133  %tmpq = load i32, i32* %q, align 4
134  %mul = mul i32 %tmpp, %tmpq
135  %add = add i32 %mul, %sum
136  %tmp5p = load i32, i32* %arrayidx.p.1, align 4
137  %tmp5q = load i32, i32* %arrayidx.q.1, align 4
138  %mul.1 = mul i32 %tmp5p, %tmp5q
139  %add.1 = add i32 %mul.1, %add
140  %tmp6p = load i32, i32* %arrayidx.p.2, align 4
141  %tmp6q = load i32, i32* %arrayidx.q.2, align 4
142  %mul.2 = mul i32 %tmp6p, %tmp6q
143  %add.2 = add i32 %mul.2, %add.1
144  %tmp7p = load i32, i32* %arrayidx.p.3, align 4
145  %tmp7q = load i32, i32* %arrayidx.q.3, align 4
146  %mul.3 = mul i32 %tmp7p, %tmp7q
147  %add.3 = add i32 %mul.3, %add.2
148  %tmp8p = load i32, i32* %arrayidx.p.4, align 4
149  %tmp8q = load i32, i32* %arrayidx.q.4, align 4
150  %mul.4 = mul i32 %tmp8p, %tmp8q
151  %add.4 = add i32 %mul.4, %add.3
152  %tmp9p = load i32, i32* %arrayidx.p.5, align 4
153  %tmp9q = load i32, i32* %arrayidx.q.5, align 4
154  %mul.5 = mul i32 %tmp9p, %tmp9q
155  %add.5 = add i32 %mul.5, %add.4
156  %tmp10p = load i32, i32* %arrayidx.p.6, align 4
157  %tmp10q = load i32, i32* %arrayidx.q.6, align 4
158  %mul.6 = mul i32 %tmp10p, %tmp10q
159  %add.6 = add i32 %mul.6, %add.5
160  %tmp11p = load i32, i32* %arrayidx.p.7, align 4
161  %tmp11q = load i32, i32* %arrayidx.q.7, align 4
162  %mul.7 = mul i32 %tmp11p, %tmp11q
163  %add.7 = add i32 %mul.7, %add.6
164  br i1 true, label %for.end, label %for.body
165
166for.end:
167  ret i32 %add.7
168}
169
170;void foo();
171;
172;int test3(unsigned int *p, unsigned int *q) {
173;  int sum = 0;
174;  #pragma nounroll
175;  for (int y = 0; y < 2; y++) {
176;    // Inner loop gets unrolled
177;    for (int x = 0; x < 8; x++) {
178;      sum += p[x] * q[7-x];
179;    }
180;    // Dummy call to keep outer loop alive
181;    foo();
182;  }
183;  return sum;
184;}
185
186define i32 @test3(i32* nocapture readonly %p, i32* nocapture readonly %q) {
187; CHECK-LABEL: @test3(
188; CHECK-NEXT:  entry:
189; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
190; CHECK:       for.body:
191; CHECK-NEXT:    [[SUM:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[OP_RDX:%.*]], [[FOR_BODY]] ]
192; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32* [[P:%.*]] to <8 x i32>*
193; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* [[TMP0]], align 4
194; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i32* [[Q:%.*]] to <8 x i32>*
195; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i32>, <8 x i32>* [[TMP2]], align 4
196; CHECK-NEXT:    [[SHUFFLE:%.*]] = shufflevector <8 x i32> [[TMP3]], <8 x i32> poison, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
197; CHECK-NEXT:    [[TMP4:%.*]] = mul <8 x i32> [[TMP1]], [[SHUFFLE]]
198; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP4]])
199; CHECK-NEXT:    [[OP_RDX]] = add i32 [[TMP5]], [[SUM]]
200; CHECK-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[FOR_BODY]]
201; CHECK:       for.end:
202; CHECK-NEXT:    ret i32 [[OP_RDX]]
203;
204entry:
205  %arrayidx.p.1 = getelementptr inbounds i32, i32* %p, i64 1
206  %arrayidx.p.2 = getelementptr inbounds i32, i32* %p, i64 2
207  %arrayidx.p.3 = getelementptr inbounds i32, i32* %p, i64 3
208  %arrayidx.p.4 = getelementptr inbounds i32, i32* %p, i64 4
209  %arrayidx.p.5 = getelementptr inbounds i32, i32* %p, i64 5
210  %arrayidx.p.6 = getelementptr inbounds i32, i32* %p, i64 6
211  %arrayidx.p.7 = getelementptr inbounds i32, i32* %p, i64 7
212
213  %arrayidx.q.1 = getelementptr inbounds i32, i32* %q, i64 1
214  %arrayidx.q.2 = getelementptr inbounds i32, i32* %q, i64 2
215  %arrayidx.q.3 = getelementptr inbounds i32, i32* %q, i64 3
216  %arrayidx.q.4 = getelementptr inbounds i32, i32* %q, i64 4
217  %arrayidx.q.5 = getelementptr inbounds i32, i32* %q, i64 5
218  %arrayidx.q.6 = getelementptr inbounds i32, i32* %q, i64 6
219  %arrayidx.q.7 = getelementptr inbounds i32, i32* %q, i64 7
220  br label %for.body
221
222for.body:
223  %sum = phi i32 [ 0, %entry ], [ %add.7, %for.body ]
224  %tmpp = load i32, i32* %p, align 4
225  %tmpq = load i32, i32* %arrayidx.q.7, align 4
226  %mul = mul i32 %tmpp, %tmpq
227  %add = add i32 %mul, %sum
228  %tmp5p = load i32, i32* %arrayidx.p.1, align 4
229  %tmp5q = load i32, i32* %arrayidx.q.6, align 4
230  %mul.1 = mul i32 %tmp5p, %tmp5q
231  %add.1 = add i32 %mul.1, %add
232  %tmp6p = load i32, i32* %arrayidx.p.2, align 4
233  %tmp6q = load i32, i32* %arrayidx.q.5, align 4
234  %mul.2 = mul i32 %tmp6p, %tmp6q
235  %add.2 = add i32 %mul.2, %add.1
236  %tmp7p = load i32, i32* %arrayidx.p.3, align 4
237  %tmp7q = load i32, i32* %arrayidx.q.4, align 4
238  %mul.3 = mul i32 %tmp7p, %tmp7q
239  %add.3 = add i32 %mul.3, %add.2
240  %tmp8p = load i32, i32* %arrayidx.p.4, align 4
241  %tmp8q = load i32, i32* %arrayidx.q.3, align 4
242  %mul.4 = mul i32 %tmp8p, %tmp8q
243  %add.4 = add i32 %mul.4, %add.3
244  %tmp9p = load i32, i32* %arrayidx.p.5, align 4
245  %tmp9q = load i32, i32* %arrayidx.q.2, align 4
246  %mul.5 = mul i32 %tmp9p, %tmp9q
247  %add.5 = add i32 %mul.5, %add.4
248  %tmp10p = load i32, i32* %arrayidx.p.6, align 4
249  %tmp10q = load i32, i32* %arrayidx.q.1, align 4
250  %mul.6 = mul i32 %tmp10p, %tmp10q
251  %add.6 = add i32 %mul.6, %add.5
252  %tmp11p = load i32, i32* %arrayidx.p.7, align 4
253  %tmp11q = load i32, i32* %q, align 4
254  %mul.7 = mul i32 %tmp11p, %tmp11q
255  %add.7 = add i32 %mul.7, %add.6
256  br i1 true, label %for.end, label %for.body
257
258for.end:
259  ret i32 %add.7
260}
261