1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -mattr=sse2 -slp-vectorizer -S | FileCheck %s --check-prefix=SSE
3; RUN: opt < %s -mattr=avx2 -slp-vectorizer -S | FileCheck %s --check-prefix=AVX
4
5; TODO:
6; With AVX, we are able to vectorize the 1st 4 elements as 256-bit vector ops,
7; but the final 2 elements remain scalar. They should get vectorized using
8; 128-bit ops identically to what happens with SSE.
9
10target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
11target triple = "x86_64-unknown-linux-gnu"
12
13define void @PR28457(double* noalias nocapture align 32 %q, double* noalias nocapture readonly align 32 %p) {
14; SSE-LABEL: @PR28457(
15; SSE-NEXT:    [[P0:%.*]] = getelementptr inbounds double, double* [[P:%.*]], i64 0
16; SSE-NEXT:    [[P2:%.*]] = getelementptr inbounds double, double* [[P]], i64 2
17; SSE-NEXT:    [[P4:%.*]] = getelementptr inbounds double, double* [[P]], i64 4
18; SSE-NEXT:    [[Q0:%.*]] = getelementptr inbounds double, double* [[Q:%.*]], i64 0
19; SSE-NEXT:    [[Q2:%.*]] = getelementptr inbounds double, double* [[Q]], i64 2
20; SSE-NEXT:    [[Q4:%.*]] = getelementptr inbounds double, double* [[Q]], i64 4
21; SSE-NEXT:    [[TMP1:%.*]] = bitcast double* [[P0]] to <2 x double>*
22; SSE-NEXT:    [[TMP2:%.*]] = load <2 x double>, <2 x double>* [[TMP1]], align 8
23; SSE-NEXT:    [[TMP3:%.*]] = fadd <2 x double> [[TMP2]], <double 1.000000e+00, double 1.000000e+00>
24; SSE-NEXT:    [[TMP4:%.*]] = bitcast double* [[Q0]] to <2 x double>*
25; SSE-NEXT:    store <2 x double> [[TMP3]], <2 x double>* [[TMP4]], align 8
26; SSE-NEXT:    [[TMP5:%.*]] = bitcast double* [[P2]] to <2 x double>*
27; SSE-NEXT:    [[TMP6:%.*]] = load <2 x double>, <2 x double>* [[TMP5]], align 8
28; SSE-NEXT:    [[TMP7:%.*]] = fadd <2 x double> [[TMP6]], <double 1.000000e+00, double 1.000000e+00>
29; SSE-NEXT:    [[TMP8:%.*]] = bitcast double* [[Q2]] to <2 x double>*
30; SSE-NEXT:    store <2 x double> [[TMP7]], <2 x double>* [[TMP8]], align 8
31; SSE-NEXT:    [[TMP9:%.*]] = bitcast double* [[P4]] to <2 x double>*
32; SSE-NEXT:    [[TMP10:%.*]] = load <2 x double>, <2 x double>* [[TMP9]], align 8
33; SSE-NEXT:    [[TMP11:%.*]] = fadd <2 x double> [[TMP10]], <double 1.000000e+00, double 1.000000e+00>
34; SSE-NEXT:    [[TMP12:%.*]] = bitcast double* [[Q4]] to <2 x double>*
35; SSE-NEXT:    store <2 x double> [[TMP11]], <2 x double>* [[TMP12]], align 8
36; SSE-NEXT:    ret void
37;
38; AVX-LABEL: @PR28457(
39; AVX-NEXT:    [[P0:%.*]] = getelementptr inbounds double, double* [[P:%.*]], i64 0
40; AVX-NEXT:    [[P4:%.*]] = getelementptr inbounds double, double* [[P]], i64 4
41; AVX-NEXT:    [[Q0:%.*]] = getelementptr inbounds double, double* [[Q:%.*]], i64 0
42; AVX-NEXT:    [[Q4:%.*]] = getelementptr inbounds double, double* [[Q]], i64 4
43; AVX-NEXT:    [[TMP1:%.*]] = bitcast double* [[P0]] to <4 x double>*
44; AVX-NEXT:    [[TMP2:%.*]] = load <4 x double>, <4 x double>* [[TMP1]], align 8
45; AVX-NEXT:    [[TMP3:%.*]] = fadd <4 x double> [[TMP2]], <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00>
46; AVX-NEXT:    [[TMP4:%.*]] = bitcast double* [[Q0]] to <4 x double>*
47; AVX-NEXT:    store <4 x double> [[TMP3]], <4 x double>* [[TMP4]], align 8
48; AVX-NEXT:    [[TMP5:%.*]] = bitcast double* [[P4]] to <2 x double>*
49; AVX-NEXT:    [[TMP6:%.*]] = load <2 x double>, <2 x double>* [[TMP5]], align 8
50; AVX-NEXT:    [[TMP7:%.*]] = fadd <2 x double> [[TMP6]], <double 1.000000e+00, double 1.000000e+00>
51; AVX-NEXT:    [[TMP8:%.*]] = bitcast double* [[Q4]] to <2 x double>*
52; AVX-NEXT:    store <2 x double> [[TMP7]], <2 x double>* [[TMP8]], align 8
53; AVX-NEXT:    ret void
54;
55  %p0 = getelementptr inbounds double, double* %p, i64 0
56  %p1 = getelementptr inbounds double, double* %p, i64 1
57  %p2 = getelementptr inbounds double, double* %p, i64 2
58  %p3 = getelementptr inbounds double, double* %p, i64 3
59  %p4 = getelementptr inbounds double, double* %p, i64 4
60  %p5 = getelementptr inbounds double, double* %p, i64 5
61
62  %q0 = getelementptr inbounds double, double* %q, i64 0
63  %q1 = getelementptr inbounds double, double* %q, i64 1
64  %q2 = getelementptr inbounds double, double* %q, i64 2
65  %q3 = getelementptr inbounds double, double* %q, i64 3
66  %q4 = getelementptr inbounds double, double* %q, i64 4
67  %q5 = getelementptr inbounds double, double* %q, i64 5
68
69  %d0 = load double, double* %p0
70  %d1 = load double, double* %p1
71  %d2 = load double, double* %p2
72  %d3 = load double, double* %p3
73  %d4 = load double, double* %p4
74  %d5 = load double, double* %p5
75
76  %a0 = fadd double %d0, 1.0
77  %a1 = fadd double %d1, 1.0
78  %a2 = fadd double %d2, 1.0
79  %a3 = fadd double %d3, 1.0
80  %a4 = fadd double %d4, 1.0
81  %a5 = fadd double %d5, 1.0
82
83  store double %a0, double* %q0
84  store double %a1, double* %q1
85  store double %a2, double* %q2
86  store double %a3, double* %q3
87  store double %a4, double* %q4
88  store double %a5, double* %q5
89  ret void
90}
91