1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -S -slp-vectorizer -mtriple=aarch64--linux-gnu -mcpu=generic < %s | FileCheck %s
3; RUN: opt -S -slp-vectorizer -mtriple=aarch64-apple-ios -mcpu=cyclone < %s | FileCheck %s
4; Currently disabled for a few subtargets (e.g. Kryo):
5; RUN: opt -S -slp-vectorizer -mtriple=aarch64--linux-gnu -mcpu=kryo < %s | FileCheck --check-prefix=NO_SLP %s
6; RUN: opt -S -slp-vectorizer -mtriple=aarch64--linux-gnu -mcpu=generic -slp-min-reg-size=128 < %s | FileCheck --check-prefix=NO_SLP %s
7
8define void @f(float* %r, float* %w) {
9; CHECK-LABEL: @f(
10; CHECK-NEXT:    [[R0:%.*]] = getelementptr inbounds float, float* [[R:%.*]], i64 0
11; CHECK-NEXT:    [[W0:%.*]] = getelementptr inbounds float, float* [[W:%.*]], i64 0
12; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float* [[R0]] to <2 x float>*
13; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x float>, <2 x float>* [[TMP1]], align 4
14; CHECK-NEXT:    [[TMP3:%.*]] = fadd <2 x float> [[TMP2]], [[TMP2]]
15; CHECK-NEXT:    [[TMP4:%.*]] = bitcast float* [[W0]] to <2 x float>*
16; CHECK-NEXT:    store <2 x float> [[TMP3]], <2 x float>* [[TMP4]], align 4
17; CHECK-NEXT:    ret void
18;
19; NO_SLP-LABEL: @f(
20; NO_SLP-NEXT:    [[R0:%.*]] = getelementptr inbounds float, float* [[R:%.*]], i64 0
21; NO_SLP-NEXT:    [[R1:%.*]] = getelementptr inbounds float, float* [[R]], i64 1
22; NO_SLP-NEXT:    [[F0:%.*]] = load float, float* [[R0]], align 4
23; NO_SLP-NEXT:    [[F1:%.*]] = load float, float* [[R1]], align 4
24; NO_SLP-NEXT:    [[ADD0:%.*]] = fadd float [[F0]], [[F0]]
25; NO_SLP-NEXT:    [[ADD1:%.*]] = fadd float [[F1]], [[F1]]
26; NO_SLP-NEXT:    [[W0:%.*]] = getelementptr inbounds float, float* [[W:%.*]], i64 0
27; NO_SLP-NEXT:    [[W1:%.*]] = getelementptr inbounds float, float* [[W]], i64 1
28; NO_SLP-NEXT:    store float [[ADD0]], float* [[W0]], align 4
29; NO_SLP-NEXT:    store float [[ADD1]], float* [[W1]], align 4
30; NO_SLP-NEXT:    ret void
31;
32  %r0 = getelementptr inbounds float, float* %r, i64 0
33  %r1 = getelementptr inbounds float, float* %r, i64 1
34  %f0 = load float, float* %r0
35  %f1 = load float, float* %r1
36  %add0 = fadd float %f0, %f0
37  %add1 = fadd float %f1, %f1
38  %w0 = getelementptr inbounds float, float* %w, i64 0
39  %w1 = getelementptr inbounds float, float* %w, i64 1
40  store float %add0, float* %w0
41  store float %add1, float* %w1
42  ret void
43}
44