1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -S -mtriple=x86_64-unknown -mattr=+avx -slp-vectorizer | FileCheck %s
3
4
5
6define i32 @jumbled-load(i32* noalias nocapture %in, i32* noalias nocapture %inn, i32* noalias nocapture %out) {
7; CHECK-LABEL: @jumbled-load(
8; CHECK-NEXT:    [[IN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[IN:%.*]], i64 0
9; CHECK-NEXT:    [[INN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[INN:%.*]], i64 0
10; CHECK-NEXT:    [[GEP_7:%.*]] = getelementptr inbounds i32, i32* [[OUT:%.*]], i64 0
11; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[IN_ADDR]] to <4 x i32>*
12; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
13; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[INN_ADDR]] to <4 x i32>*
14; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4
15; CHECK-NEXT:    [[SHUFFLE:%.*]] = shufflevector <4 x i32> [[TMP4]], <4 x i32> poison, <4 x i32> <i32 2, i32 0, i32 3, i32 1>
16; CHECK-NEXT:    [[TMP5:%.*]] = mul <4 x i32> [[TMP2]], [[SHUFFLE]]
17; CHECK-NEXT:    [[SHUFFLE1:%.*]] = shufflevector <4 x i32> [[TMP5]], <4 x i32> poison, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
18; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i32* [[GEP_7]] to <4 x i32>*
19; CHECK-NEXT:    store <4 x i32> [[SHUFFLE1]], <4 x i32>* [[TMP6]], align 4
20; CHECK-NEXT:    ret i32 undef
21;
22  %in.addr = getelementptr inbounds i32, i32* %in, i64 0
23  %load.1 = load i32, i32* %in.addr, align 4
24  %gep.1 = getelementptr inbounds i32, i32* %in.addr, i64 3
25  %load.2 = load i32, i32* %gep.1, align 4
26  %gep.2 = getelementptr inbounds i32, i32* %in.addr, i64 1
27  %load.3 = load i32, i32* %gep.2, align 4
28  %gep.3 = getelementptr inbounds i32, i32* %in.addr, i64 2
29  %load.4 = load i32, i32* %gep.3, align 4
30  %inn.addr = getelementptr inbounds i32, i32* %inn, i64 0
31  %load.5 = load i32, i32* %inn.addr, align 4
32  %gep.4 = getelementptr inbounds i32, i32* %inn.addr, i64 2
33  %load.6 = load i32, i32* %gep.4, align 4
34  %gep.5 = getelementptr inbounds i32, i32* %inn.addr, i64 3
35  %load.7 = load i32, i32* %gep.5, align 4
36  %gep.6 = getelementptr inbounds i32, i32* %inn.addr, i64 1
37  %load.8 = load i32, i32* %gep.6, align 4
38  %mul.1 = mul i32 %load.3, %load.5
39  %mul.2 = mul i32 %load.2, %load.8
40  %mul.3 = mul i32 %load.4, %load.7
41  %mul.4 = mul i32 %load.1, %load.6
42  %gep.7 = getelementptr inbounds i32, i32* %out, i64 0
43  store i32 %mul.1, i32* %gep.7, align 4
44  %gep.8 = getelementptr inbounds i32, i32* %out, i64 1
45  store i32 %mul.2, i32* %gep.8, align 4
46  %gep.9 = getelementptr inbounds i32, i32* %out, i64 2
47  store i32 %mul.3, i32* %gep.9, align 4
48  %gep.10 = getelementptr inbounds i32, i32* %out, i64 3
49  store i32 %mul.4, i32* %gep.10, align 4
50
51  ret i32 undef
52}
53
54
55define i32 @jumbled-load-multiuses(i32* noalias nocapture %in, i32* noalias nocapture %out) {
56; CHECK-LABEL: @jumbled-load-multiuses(
57; CHECK-NEXT:    [[IN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[IN:%.*]], i64 0
58; CHECK-NEXT:    [[GEP_7:%.*]] = getelementptr inbounds i32, i32* [[OUT:%.*]], i64 0
59; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[IN_ADDR]] to <4 x i32>*
60; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
61; CHECK-NEXT:    [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> poison, <4 x i32> <i32 1, i32 2, i32 0, i32 3>
62; CHECK-NEXT:    [[TMP4:%.*]] = mul <4 x i32> [[TMP2]], [[TMP3]]
63; CHECK-NEXT:    [[SHUFFLE:%.*]] = shufflevector <4 x i32> [[TMP4]], <4 x i32> poison, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
64; CHECK-NEXT:    [[TMP5:%.*]] = bitcast i32* [[GEP_7]] to <4 x i32>*
65; CHECK-NEXT:    store <4 x i32> [[SHUFFLE]], <4 x i32>* [[TMP5]], align 4
66; CHECK-NEXT:    ret i32 undef
67;
68  %in.addr = getelementptr inbounds i32, i32* %in, i64 0
69  %load.1 = load i32, i32* %in.addr, align 4
70  %gep.1 = getelementptr inbounds i32, i32* %in.addr, i64 3
71  %load.2 = load i32, i32* %gep.1, align 4
72  %gep.2 = getelementptr inbounds i32, i32* %in.addr, i64 1
73  %load.3 = load i32, i32* %gep.2, align 4
74  %gep.3 = getelementptr inbounds i32, i32* %in.addr, i64 2
75  %load.4 = load i32, i32* %gep.3, align 4
76  %mul.1 = mul i32 %load.3, %load.4
77  %mul.2 = mul i32 %load.2, %load.2
78  %mul.3 = mul i32 %load.4, %load.1
79  %mul.4 = mul i32 %load.1, %load.3
80  %gep.7 = getelementptr inbounds i32, i32* %out, i64 0
81  store i32 %mul.1, i32* %gep.7, align 4
82  %gep.8 = getelementptr inbounds i32, i32* %out, i64 1
83  store i32 %mul.2, i32* %gep.8, align 4
84  %gep.9 = getelementptr inbounds i32, i32* %out, i64 2
85  store i32 %mul.3, i32* %gep.9, align 4
86  %gep.10 = getelementptr inbounds i32, i32* %out, i64 3
87  store i32 %mul.4, i32* %gep.10, align 4
88
89  ret i32 undef
90}
91