1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -basic-aa -slp-vectorizer -S | FileCheck %s
3target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
4target triple = "x86_64-apple-macosx10.9.0"
5
6@A = common global [2000 x double] zeroinitializer, align 16
7@B = common global [2000 x double] zeroinitializer, align 16
8@C = common global [2000 x float] zeroinitializer, align 16
9@D = common global [2000 x float] zeroinitializer, align 16
10
11; Function Attrs: nounwind ssp uwtable
12define void @foo_3double(i32 %u) #0 {
13; CHECK-LABEL: @foo_3double(
14; CHECK-NEXT:  entry:
15; CHECK-NEXT:    [[U_ADDR:%.*]] = alloca i32, align 4
16; CHECK-NEXT:    store i32 [[U:%.*]], i32* [[U_ADDR]], align 4
17; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[U]], 3
18; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[MUL]] to i64
19; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM]]
20; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM]]
21; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
22; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
23; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double* [[ARRAYIDX4]] to <2 x double>*
24; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8
25; CHECK-NEXT:    [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]]
26; CHECK-NEXT:    [[TMP5:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
27; CHECK-NEXT:    store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 8
28; CHECK-NEXT:    [[ADD24:%.*]] = add nsw i32 [[MUL]], 2
29; CHECK-NEXT:    [[IDXPROM25:%.*]] = sext i32 [[ADD24]] to i64
30; CHECK-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM25]]
31; CHECK-NEXT:    [[TMP6:%.*]] = load double, double* [[ARRAYIDX26]], align 8
32; CHECK-NEXT:    [[ARRAYIDX30:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM25]]
33; CHECK-NEXT:    [[TMP7:%.*]] = load double, double* [[ARRAYIDX30]], align 8
34; CHECK-NEXT:    [[ADD31:%.*]] = fadd double [[TMP6]], [[TMP7]]
35; CHECK-NEXT:    store double [[ADD31]], double* [[ARRAYIDX26]], align 8
36; CHECK-NEXT:    ret void
37;
38entry:
39  %u.addr = alloca i32, align 4
40  store i32 %u, i32* %u.addr, align 4
41  %mul = mul nsw i32 %u, 3
42  %idxprom = sext i32 %mul to i64
43  %arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom
44  %0 = load double, double* %arrayidx, align 8
45  %arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom
46  %1 = load double, double* %arrayidx4, align 8
47  %add5 = fadd double %0, %1
48  store double %add5, double* %arrayidx, align 8
49  %add11 = add nsw i32 %mul, 1
50  %idxprom12 = sext i32 %add11 to i64
51  %arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12
52  %2 = load double, double* %arrayidx13, align 8
53  %arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12
54  %3 = load double, double* %arrayidx17, align 8
55  %add18 = fadd double %2, %3
56  store double %add18, double* %arrayidx13, align 8
57  %add24 = add nsw i32 %mul, 2
58  %idxprom25 = sext i32 %add24 to i64
59  %arrayidx26 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom25
60  %4 = load double, double* %arrayidx26, align 8
61  %arrayidx30 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom25
62  %5 = load double, double* %arrayidx30, align 8
63  %add31 = fadd double %4, %5
64  store double %add31, double* %arrayidx26, align 8
65  ret void
66}
67
68; SCEV should be able to tell that accesses A[C1 + C2*i], A[C1 + C2*i], ...
69; A[C1 + C2*i] are consecutive, if C2 is a power of 2, and C2 > C1 > 0.
70; Thus, the following code should be vectorized.
71; Function Attrs: nounwind ssp uwtable
72define void @foo_2double(i32 %u) #0 {
73; CHECK-LABEL: @foo_2double(
74; CHECK-NEXT:  entry:
75; CHECK-NEXT:    [[U_ADDR:%.*]] = alloca i32, align 4
76; CHECK-NEXT:    store i32 [[U:%.*]], i32* [[U_ADDR]], align 4
77; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[U]], 2
78; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[MUL]] to i64
79; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM]]
80; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM]]
81; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
82; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
83; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double* [[ARRAYIDX4]] to <2 x double>*
84; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8
85; CHECK-NEXT:    [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]]
86; CHECK-NEXT:    [[TMP5:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
87; CHECK-NEXT:    store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 8
88; CHECK-NEXT:    ret void
89;
90entry:
91  %u.addr = alloca i32, align 4
92  store i32 %u, i32* %u.addr, align 4
93  %mul = mul nsw i32 %u, 2
94  %idxprom = sext i32 %mul to i64
95  %arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom
96  %0 = load double, double* %arrayidx, align 8
97  %arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom
98  %1 = load double, double* %arrayidx4, align 8
99  %add5 = fadd double %0, %1
100  store double %add5, double* %arrayidx, align 8
101  %add11 = add nsw i32 %mul, 1
102  %idxprom12 = sext i32 %add11 to i64
103  %arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12
104  %2 = load double, double* %arrayidx13, align 8
105  %arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12
106  %3 = load double, double* %arrayidx17, align 8
107  %add18 = fadd double %2, %3
108  store double %add18, double* %arrayidx13, align 8
109  ret void
110}
111
112; Similar to the previous test, but with different datatype.
113; Function Attrs: nounwind ssp uwtable
114define void @foo_4float(i32 %u) #0 {
115; CHECK-LABEL: @foo_4float(
116; CHECK-NEXT:  entry:
117; CHECK-NEXT:    [[U_ADDR:%.*]] = alloca i32, align 4
118; CHECK-NEXT:    store i32 [[U:%.*]], i32* [[U_ADDR]], align 4
119; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[U]], 4
120; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[MUL]] to i64
121; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 [[IDXPROM]]
122; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 [[IDXPROM]]
123; CHECK-NEXT:    [[TMP0:%.*]] = bitcast float* [[ARRAYIDX]] to <4 x float>*
124; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4
125; CHECK-NEXT:    [[TMP2:%.*]] = bitcast float* [[ARRAYIDX4]] to <4 x float>*
126; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x float>, <4 x float>* [[TMP2]], align 4
127; CHECK-NEXT:    [[TMP4:%.*]] = fadd <4 x float> [[TMP1]], [[TMP3]]
128; CHECK-NEXT:    [[TMP5:%.*]] = bitcast float* [[ARRAYIDX]] to <4 x float>*
129; CHECK-NEXT:    store <4 x float> [[TMP4]], <4 x float>* [[TMP5]], align 4
130; CHECK-NEXT:    ret void
131;
132entry:
133  %u.addr = alloca i32, align 4
134  store i32 %u, i32* %u.addr, align 4
135  %mul = mul nsw i32 %u, 4
136  %idxprom = sext i32 %mul to i64
137  %arrayidx = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom
138  %0 = load float, float* %arrayidx, align 4
139  %arrayidx4 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom
140  %1 = load float, float* %arrayidx4, align 4
141  %add5 = fadd float %0, %1
142  store float %add5, float* %arrayidx, align 4
143  %add11 = add nsw i32 %mul, 1
144  %idxprom12 = sext i32 %add11 to i64
145  %arrayidx13 = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom12
146  %2 = load float, float* %arrayidx13, align 4
147  %arrayidx17 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom12
148  %3 = load float, float* %arrayidx17, align 4
149  %add18 = fadd float %2, %3
150  store float %add18, float* %arrayidx13, align 4
151  %add24 = add nsw i32 %mul, 2
152  %idxprom25 = sext i32 %add24 to i64
153  %arrayidx26 = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom25
154  %4 = load float, float* %arrayidx26, align 4
155  %arrayidx30 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom25
156  %5 = load float, float* %arrayidx30, align 4
157  %add31 = fadd float %4, %5
158  store float %add31, float* %arrayidx26, align 4
159  %add37 = add nsw i32 %mul, 3
160  %idxprom38 = sext i32 %add37 to i64
161  %arrayidx39 = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom38
162  %6 = load float, float* %arrayidx39, align 4
163  %arrayidx43 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom38
164  %7 = load float, float* %arrayidx43, align 4
165  %add44 = fadd float %6, %7
166  store float %add44, float* %arrayidx39, align 4
167  ret void
168}
169
170; Similar to the previous tests, but now we are dealing with AddRec SCEV.
171; Function Attrs: nounwind ssp uwtable
172define i32 @foo_loop(double* %A, i32 %n) #0 {
173; CHECK-LABEL: @foo_loop(
174; CHECK-NEXT:  entry:
175; CHECK-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
176; CHECK-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
177; CHECK-NEXT:    [[SUM:%.*]] = alloca double, align 8
178; CHECK-NEXT:    [[I:%.*]] = alloca i32, align 4
179; CHECK-NEXT:    store double* [[A:%.*]], double** [[A_ADDR]], align 8
180; CHECK-NEXT:    store i32 [[N:%.*]], i32* [[N_ADDR]], align 4
181; CHECK-NEXT:    store double 0.000000e+00, double* [[SUM]], align 8
182; CHECK-NEXT:    store i32 0, i32* [[I]], align 4
183; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 0, [[N]]
184; CHECK-NEXT:    br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
185; CHECK:       for.body.lr.ph:
186; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
187; CHECK:       for.body:
188; CHECK-NEXT:    [[TMP0:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
189; CHECK-NEXT:    [[TMP1:%.*]] = phi double [ 0.000000e+00, [[FOR_BODY_LR_PH]] ], [ [[ADD7:%.*]], [[FOR_BODY]] ]
190; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP0]], 2
191; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[MUL]] to i64
192; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[IDXPROM]]
193; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
194; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8
195; CHECK-NEXT:    [[TMP4:%.*]] = fmul <2 x double> <double 7.000000e+00, double 7.000000e+00>, [[TMP3]]
196; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x double> [[TMP4]], i32 0
197; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x double> [[TMP4]], i32 1
198; CHECK-NEXT:    [[ADD6:%.*]] = fadd double [[TMP5]], [[TMP6]]
199; CHECK-NEXT:    [[ADD7]] = fadd double [[TMP1]], [[ADD6]]
200; CHECK-NEXT:    store double [[ADD7]], double* [[SUM]], align 8
201; CHECK-NEXT:    [[INC]] = add nsw i32 [[TMP0]], 1
202; CHECK-NEXT:    store i32 [[INC]], i32* [[I]], align 4
203; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[INC]], [[N]]
204; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
205; CHECK:       for.cond.for.end_crit_edge:
206; CHECK-NEXT:    [[SPLIT:%.*]] = phi double [ [[ADD7]], [[FOR_BODY]] ]
207; CHECK-NEXT:    br label [[FOR_END]]
208; CHECK:       for.end:
209; CHECK-NEXT:    [[DOTLCSSA:%.*]] = phi double [ [[SPLIT]], [[FOR_COND_FOR_END_CRIT_EDGE]] ], [ 0.000000e+00, [[ENTRY:%.*]] ]
210; CHECK-NEXT:    [[CONV:%.*]] = fptosi double [[DOTLCSSA]] to i32
211; CHECK-NEXT:    ret i32 [[CONV]]
212;
213entry:
214  %A.addr = alloca double*, align 8
215  %n.addr = alloca i32, align 4
216  %sum = alloca double, align 8
217  %i = alloca i32, align 4
218  store double* %A, double** %A.addr, align 8
219  store i32 %n, i32* %n.addr, align 4
220  store double 0.000000e+00, double* %sum, align 8
221  store i32 0, i32* %i, align 4
222  %cmp1 = icmp slt i32 0, %n
223  br i1 %cmp1, label %for.body.lr.ph, label %for.end
224
225for.body.lr.ph:                                   ; preds = %entry
226  br label %for.body
227
228for.body:                                         ; preds = %for.body.lr.ph, %for.body
229  %0 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
230  %1 = phi double [ 0.000000e+00, %for.body.lr.ph ], [ %add7, %for.body ]
231  %mul = mul nsw i32 %0, 2
232  %idxprom = sext i32 %mul to i64
233  %arrayidx = getelementptr inbounds double, double* %A, i64 %idxprom
234  %2 = load double, double* %arrayidx, align 8
235  %mul1 = fmul double 7.000000e+00, %2
236  %add = add nsw i32 %mul, 1
237  %idxprom3 = sext i32 %add to i64
238  %arrayidx4 = getelementptr inbounds double, double* %A, i64 %idxprom3
239  %3 = load double, double* %arrayidx4, align 8
240  %mul5 = fmul double 7.000000e+00, %3
241  %add6 = fadd double %mul1, %mul5
242  %add7 = fadd double %1, %add6
243  store double %add7, double* %sum, align 8
244  %inc = add nsw i32 %0, 1
245  store i32 %inc, i32* %i, align 4
246  %cmp = icmp slt i32 %inc, %n
247  br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
248
249for.cond.for.end_crit_edge:                       ; preds = %for.body
250  %split = phi double [ %add7, %for.body ]
251  br label %for.end
252
253for.end:                                          ; preds = %for.cond.for.end_crit_edge, %entry
254  %.lcssa = phi double [ %split, %for.cond.for.end_crit_edge ], [ 0.000000e+00, %entry ]
255  %conv = fptosi double %.lcssa to i32
256  ret i32 %conv
257}
258
259; Similar to foo_2double but with a non-power-of-2 factor and potential
260; wrapping (both indices wrap or both don't in the same time)
261; Function Attrs: nounwind ssp uwtable
262define void @foo_2double_non_power_of_2(i32 %u) #0 {
263; CHECK-LABEL: @foo_2double_non_power_of_2(
264; CHECK-NEXT:  entry:
265; CHECK-NEXT:    [[U_ADDR:%.*]] = alloca i32, align 4
266; CHECK-NEXT:    store i32 [[U:%.*]], i32* [[U_ADDR]], align 4
267; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[U]], 6
268; CHECK-NEXT:    [[ADD6:%.*]] = add i32 [[MUL]], 6
269; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[ADD6]] to i64
270; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM]]
271; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM]]
272; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
273; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
274; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double* [[ARRAYIDX4]] to <2 x double>*
275; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8
276; CHECK-NEXT:    [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]]
277; CHECK-NEXT:    [[TMP5:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
278; CHECK-NEXT:    store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 8
279; CHECK-NEXT:    ret void
280;
281entry:
282  %u.addr = alloca i32, align 4
283  store i32 %u, i32* %u.addr, align 4
284  %mul = mul i32 %u, 6
285  %add6 = add i32 %mul, 6
286  %idxprom = sext i32 %add6 to i64
287  %arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom
288  %0 = load double, double* %arrayidx, align 8
289  %arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom
290  %1 = load double, double* %arrayidx4, align 8
291  %add5 = fadd double %0, %1
292  store double %add5, double* %arrayidx, align 8
293  %add7 = add i32 %mul, 7
294  %idxprom12 = sext i32 %add7 to i64
295  %arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12
296  %2 = load double, double* %arrayidx13, align 8
297  %arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12
298  %3 = load double, double* %arrayidx17, align 8
299  %add18 = fadd double %2, %3
300  store double %add18, double* %arrayidx13, align 8
301  ret void
302}
303
304; Similar to foo_2double_non_power_of_2 but with zext's instead of sext's
305; Function Attrs: nounwind ssp uwtable
306define void @foo_2double_non_power_of_2_zext(i32 %u) #0 {
307; CHECK-LABEL: @foo_2double_non_power_of_2_zext(
308; CHECK-NEXT:  entry:
309; CHECK-NEXT:    [[U_ADDR:%.*]] = alloca i32, align 4
310; CHECK-NEXT:    store i32 [[U:%.*]], i32* [[U_ADDR]], align 4
311; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[U]], 6
312; CHECK-NEXT:    [[ADD6:%.*]] = add i32 [[MUL]], 6
313; CHECK-NEXT:    [[IDXPROM:%.*]] = zext i32 [[ADD6]] to i64
314; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM]]
315; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM]]
316; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
317; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
318; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double* [[ARRAYIDX4]] to <2 x double>*
319; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8
320; CHECK-NEXT:    [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]]
321; CHECK-NEXT:    [[TMP5:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
322; CHECK-NEXT:    store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 8
323; CHECK-NEXT:    ret void
324;
325entry:
326  %u.addr = alloca i32, align 4
327  store i32 %u, i32* %u.addr, align 4
328  %mul = mul i32 %u, 6
329  %add6 = add i32 %mul, 6
330  %idxprom = zext i32 %add6 to i64
331  %arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom
332  %0 = load double, double* %arrayidx, align 8
333  %arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom
334  %1 = load double, double* %arrayidx4, align 8
335  %add5 = fadd double %0, %1
336  store double %add5, double* %arrayidx, align 8
337  %add7 = add i32 %mul, 7
338  %idxprom12 = zext i32 %add7 to i64
339  %arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12
340  %2 = load double, double* %arrayidx13, align 8
341  %arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12
342  %3 = load double, double* %arrayidx17, align 8
343  %add18 = fadd double %2, %3
344  store double %add18, double* %arrayidx13, align 8
345  ret void
346}
347
348; Similar to foo_2double_non_power_of_2, but now we are dealing with AddRec SCEV.
349; Alternatively, this is like foo_loop, but with a non-power-of-2 factor and
350; potential wrapping (both indices wrap or both don't in the same time)
351; Function Attrs: nounwind ssp uwtable
352define i32 @foo_loop_non_power_of_2(double* %A, i32 %n) #0 {
353; CHECK-LABEL: @foo_loop_non_power_of_2(
354; CHECK-NEXT:  entry:
355; CHECK-NEXT:    [[A_ADDR:%.*]] = alloca double*, align 8
356; CHECK-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
357; CHECK-NEXT:    [[SUM:%.*]] = alloca double, align 8
358; CHECK-NEXT:    [[I:%.*]] = alloca i32, align 4
359; CHECK-NEXT:    store double* [[A:%.*]], double** [[A_ADDR]], align 8
360; CHECK-NEXT:    store i32 [[N:%.*]], i32* [[N_ADDR]], align 4
361; CHECK-NEXT:    store double 0.000000e+00, double* [[SUM]], align 8
362; CHECK-NEXT:    store i32 0, i32* [[I]], align 4
363; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 0, [[N]]
364; CHECK-NEXT:    br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
365; CHECK:       for.body.lr.ph:
366; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
367; CHECK:       for.body:
368; CHECK-NEXT:    [[TMP0:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
369; CHECK-NEXT:    [[TMP1:%.*]] = phi double [ 0.000000e+00, [[FOR_BODY_LR_PH]] ], [ [[ADD7:%.*]], [[FOR_BODY]] ]
370; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[TMP0]], 12
371; CHECK-NEXT:    [[ADD_5:%.*]] = add i32 [[MUL]], 5
372; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[ADD_5]] to i64
373; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[IDXPROM]]
374; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
375; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8
376; CHECK-NEXT:    [[TMP4:%.*]] = fmul <2 x double> <double 7.000000e+00, double 7.000000e+00>, [[TMP3]]
377; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x double> [[TMP4]], i32 0
378; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x double> [[TMP4]], i32 1
379; CHECK-NEXT:    [[ADD6:%.*]] = fadd double [[TMP5]], [[TMP6]]
380; CHECK-NEXT:    [[ADD7]] = fadd double [[TMP1]], [[ADD6]]
381; CHECK-NEXT:    store double [[ADD7]], double* [[SUM]], align 8
382; CHECK-NEXT:    [[INC]] = add i32 [[TMP0]], 1
383; CHECK-NEXT:    store i32 [[INC]], i32* [[I]], align 4
384; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[INC]], [[N]]
385; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
386; CHECK:       for.cond.for.end_crit_edge:
387; CHECK-NEXT:    [[SPLIT:%.*]] = phi double [ [[ADD7]], [[FOR_BODY]] ]
388; CHECK-NEXT:    br label [[FOR_END]]
389; CHECK:       for.end:
390; CHECK-NEXT:    [[DOTLCSSA:%.*]] = phi double [ [[SPLIT]], [[FOR_COND_FOR_END_CRIT_EDGE]] ], [ 0.000000e+00, [[ENTRY:%.*]] ]
391; CHECK-NEXT:    [[CONV:%.*]] = fptosi double [[DOTLCSSA]] to i32
392; CHECK-NEXT:    ret i32 [[CONV]]
393;
394entry:
395  %A.addr = alloca double*, align 8
396  %n.addr = alloca i32, align 4
397  %sum = alloca double, align 8
398  %i = alloca i32, align 4
399  store double* %A, double** %A.addr, align 8
400  store i32 %n, i32* %n.addr, align 4
401  store double 0.000000e+00, double* %sum, align 8
402  store i32 0, i32* %i, align 4
403  %cmp1 = icmp slt i32 0, %n
404  br i1 %cmp1, label %for.body.lr.ph, label %for.end
405
406for.body.lr.ph:                                   ; preds = %entry
407  br label %for.body
408
409for.body:                                         ; preds = %for.body.lr.ph, %for.body
410  %0 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
411  %1 = phi double [ 0.000000e+00, %for.body.lr.ph ], [ %add7, %for.body ]
412  %mul = mul i32 %0, 12
413  %add.5 = add i32 %mul, 5
414  %idxprom = sext i32 %add.5 to i64
415  %arrayidx = getelementptr inbounds double, double* %A, i64 %idxprom
416  %2 = load double, double* %arrayidx, align 8
417  %mul1 = fmul double 7.000000e+00, %2
418  %add.6 = add i32 %mul, 6
419  %idxprom3 = sext i32 %add.6 to i64
420  %arrayidx4 = getelementptr inbounds double, double* %A, i64 %idxprom3
421  %3 = load double, double* %arrayidx4, align 8
422  %mul5 = fmul double 7.000000e+00, %3
423  %add6 = fadd double %mul1, %mul5
424  %add7 = fadd double %1, %add6
425  store double %add7, double* %sum, align 8
426  %inc = add i32 %0, 1
427  store i32 %inc, i32* %i, align 4
428  %cmp = icmp slt i32 %inc, %n
429  br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
430
431for.cond.for.end_crit_edge:                       ; preds = %for.body
432  %split = phi double [ %add7, %for.body ]
433  br label %for.end
434
435for.end:                                          ; preds = %for.cond.for.end_crit_edge, %entry
436  %.lcssa = phi double [ %split, %for.cond.for.end_crit_edge ], [ 0.000000e+00, %entry ]
437  %conv = fptosi double %.lcssa to i32
438  ret i32 %conv
439}
440
441; This is generated by `clang -std=c11 -Wpedantic -Wall -O3 main.c -S -o - -emit-llvm`
442; with !{!"clang version 7.0.0 (trunk 337339) (llvm/trunk 337344)"} and stripping off
443; the !tbaa metadata nodes to fit the rest of the test file, where `cat main.c` is:
444;
445;  double bar(double *a, unsigned n) {
446;    double x = 0.0;
447;    double y = 0.0;
448;    for (unsigned i = 0; i < n; i += 2) {
449;      x += a[i];
450;      y += a[i + 1];
451;    }
452;    return x * y;
453;  }
454;
455; The resulting IR is similar to @foo_loop, but with zext's instead of sext's.
456;
457; Make sure we are able to vectorize this from now on:
458;
459define double @bar(double* nocapture readonly %a, i32 %n) local_unnamed_addr #0 {
460; CHECK-LABEL: @bar(
461; CHECK-NEXT:  entry:
462; CHECK-NEXT:    [[CMP15:%.*]] = icmp eq i32 [[N:%.*]], 0
463; CHECK-NEXT:    br i1 [[CMP15]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY:%.*]]
464; CHECK:       for.cond.cleanup:
465; CHECK-NEXT:    [[TMP0:%.*]] = phi <2 x double> [ zeroinitializer, [[ENTRY:%.*]] ], [ [[TMP6:%.*]], [[FOR_BODY]] ]
466; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x double> [[TMP0]], i32 0
467; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x double> [[TMP0]], i32 1
468; CHECK-NEXT:    [[MUL:%.*]] = fmul double [[TMP1]], [[TMP2]]
469; CHECK-NEXT:    ret double [[MUL]]
470; CHECK:       for.body:
471; CHECK-NEXT:    [[I_018:%.*]] = phi i32 [ [[ADD5:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ]
472; CHECK-NEXT:    [[TMP3:%.*]] = phi <2 x double> [ [[TMP6]], [[FOR_BODY]] ], [ zeroinitializer, [[ENTRY]] ]
473; CHECK-NEXT:    [[IDXPROM:%.*]] = zext i32 [[I_018]] to i64
474; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 [[IDXPROM]]
475; CHECK-NEXT:    [[TMP4:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
476; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x double>, <2 x double>* [[TMP4]], align 8
477; CHECK-NEXT:    [[TMP6]] = fadd <2 x double> [[TMP3]], [[TMP5]]
478; CHECK-NEXT:    [[ADD5]] = add i32 [[I_018]], 2
479; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[ADD5]], [[N]]
480; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP]]
481;
482entry:
483  %cmp15 = icmp eq i32 %n, 0
484  br i1 %cmp15, label %for.cond.cleanup, label %for.body
485
486for.cond.cleanup:                                 ; preds = %for.body, %entry
487  %x.0.lcssa = phi double [ 0.000000e+00, %entry ], [ %add, %for.body ]
488  %y.0.lcssa = phi double [ 0.000000e+00, %entry ], [ %add4, %for.body ]
489  %mul = fmul double %x.0.lcssa, %y.0.lcssa
490  ret double %mul
491
492for.body:                                         ; preds = %entry, %for.body
493  %i.018 = phi i32 [ %add5, %for.body ], [ 0, %entry ]
494  %y.017 = phi double [ %add4, %for.body ], [ 0.000000e+00, %entry ]
495  %x.016 = phi double [ %add, %for.body ], [ 0.000000e+00, %entry ]
496  %idxprom = zext i32 %i.018 to i64
497  %arrayidx = getelementptr inbounds double, double* %a, i64 %idxprom
498  %0 = load double, double* %arrayidx, align 8
499  %add = fadd double %x.016, %0
500  %add1 = or i32 %i.018, 1
501  %idxprom2 = zext i32 %add1 to i64
502  %arrayidx3 = getelementptr inbounds double, double* %a, i64 %idxprom2
503  %1 = load double, double* %arrayidx3, align 8
504  %add4 = fadd double %y.017, %1
505  %add5 = add i32 %i.018, 2
506  %cmp = icmp ult i32 %add5, %n
507  br i1 %cmp, label %for.body, label %for.cond.cleanup
508}
509
510; Globals/constant expressions are not normal constants.
511; They should not be treated as the usual vectorization candidates.
512
513@g1 = external global i32, align 4
514@g2 = external global i32, align 4
515
516define void @PR33958(i32** nocapture %p) {
517; CHECK-LABEL: @PR33958(
518; CHECK-NEXT:    store i32* @g1, i32** [[P:%.*]], align 8
519; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32*, i32** [[P]], i64 1
520; CHECK-NEXT:    store i32* @g2, i32** [[ARRAYIDX1]], align 8
521; CHECK-NEXT:    ret void
522;
523  store i32* @g1, i32** %p, align 8
524  %arrayidx1 = getelementptr inbounds i32*, i32** %p, i64 1
525  store i32* @g2, i32** %arrayidx1, align 8
526  ret void
527}
528
529define void @store_constant_expression(i64* %p) {
530; CHECK-LABEL: @store_constant_expression(
531; CHECK-NEXT:    store i64 ptrtoint (i32* @g1 to i64), i64* [[P:%.*]], align 8
532; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 1
533; CHECK-NEXT:    store i64 ptrtoint (i32* @g2 to i64), i64* [[ARRAYIDX1]], align 8
534; CHECK-NEXT:    ret void
535;
536  store i64 ptrtoint (i32* @g1 to i64), i64* %p, align 8
537  %arrayidx1 = getelementptr inbounds i64, i64* %p, i64 1
538  store i64 ptrtoint (i32* @g2 to i64), i64* %arrayidx1, align 8
539  ret void
540}
541
542attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
543
544!llvm.ident = !{!0}
545
546!0 = !{!"clang version 3.5.0 "}
547