1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
3
4; UNPREDICATED
5
6define <vscale x 4 x i16> @load_promote_4i16(<vscale x 4 x i16>* %a) {
7; CHECK-LABEL: load_promote_4i16:
8; CHECK:       // %bb.0:
9; CHECK-NEXT:    ptrue p0.s
10; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0]
11; CHECK-NEXT:    ret
12  %load = load <vscale x 4 x i16>, <vscale x 4 x i16>* %a
13  ret <vscale x 4 x i16> %load
14}
15
16define <vscale x 16 x i16> @load_split_i16(<vscale x 16 x i16>* %a) {
17; CHECK-LABEL: load_split_i16:
18; CHECK:       // %bb.0:
19; CHECK-NEXT:    ptrue p0.h
20; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
21; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x0, #1, mul vl]
22; CHECK-NEXT:    ret
23  %load = load <vscale x 16 x i16>, <vscale x 16 x i16>* %a
24  ret <vscale x 16 x i16> %load
25}
26
27define <vscale x 24 x i16> @load_split_24i16(<vscale x 24 x i16>* %a) {
28; CHECK-LABEL: load_split_24i16:
29; CHECK:       // %bb.0:
30; CHECK-NEXT:    ptrue p0.h
31; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
32; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x0, #1, mul vl]
33; CHECK-NEXT:    ld1h { z2.h }, p0/z, [x0, #2, mul vl]
34; CHECK-NEXT:    ret
35  %load = load <vscale x 24 x i16>, <vscale x 24 x i16>* %a
36  ret <vscale x 24 x i16> %load
37}
38
39define <vscale x 32 x i16> @load_split_32i16(<vscale x 32 x i16>* %a) {
40; CHECK-LABEL: load_split_32i16:
41; CHECK:       // %bb.0:
42; CHECK-NEXT:    ptrue p0.h
43; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
44; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x0, #1, mul vl]
45; CHECK-NEXT:    ld1h { z2.h }, p0/z, [x0, #2, mul vl]
46; CHECK-NEXT:    ld1h { z3.h }, p0/z, [x0, #3, mul vl]
47; CHECK-NEXT:    ret
48  %load = load <vscale x 32 x i16>, <vscale x 32 x i16>* %a
49  ret <vscale x 32 x i16> %load
50}
51
52define <vscale x 16 x i64> @load_split_16i64(<vscale x 16 x i64>* %a) {
53; CHECK-LABEL: load_split_16i64:
54; CHECK:       // %bb.0:
55; CHECK-NEXT:    ptrue p0.d
56; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
57; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x0, #1, mul vl]
58; CHECK-NEXT:    ld1d { z2.d }, p0/z, [x0, #2, mul vl]
59; CHECK-NEXT:    ld1d { z3.d }, p0/z, [x0, #3, mul vl]
60; CHECK-NEXT:    ld1d { z4.d }, p0/z, [x0, #4, mul vl]
61; CHECK-NEXT:    ld1d { z5.d }, p0/z, [x0, #5, mul vl]
62; CHECK-NEXT:    ld1d { z6.d }, p0/z, [x0, #6, mul vl]
63; CHECK-NEXT:    ld1d { z7.d }, p0/z, [x0, #7, mul vl]
64; CHECK-NEXT:    ret
65  %load = load <vscale x 16 x i64>, <vscale x 16 x i64>* %a
66  ret <vscale x 16 x i64> %load
67}
68
69; MASKED
70
71define <vscale x 2 x i32> @masked_load_promote_2i32(<vscale x 2 x i32> *%a, <vscale x 2 x i1> %pg) {
72; CHECK-LABEL: masked_load_promote_2i32:
73; CHECK:       // %bb.0:
74; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0]
75; CHECK-NEXT:    ret
76  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32> *%a, i32 1, <vscale x 2 x i1> %pg, <vscale x 2 x i32> undef)
77  ret <vscale x 2 x i32> %load
78}
79
80define <vscale x 32 x i8> @masked_load_split_32i8(<vscale x 32 x i8> *%a, <vscale x 32 x i1> %pg) {
81; CHECK-LABEL: masked_load_split_32i8:
82; CHECK:       // %bb.0:
83; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0]
84; CHECK-NEXT:    ld1b { z1.b }, p1/z, [x0, #1, mul vl]
85; CHECK-NEXT:    ret
86  %load = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8(<vscale x 32 x i8> *%a, i32 1, <vscale x 32 x i1> %pg, <vscale x 32 x i8> undef)
87  ret <vscale x 32 x i8> %load
88}
89
90define <vscale x 32 x i16> @masked_load_split_32i16(<vscale x 32 x i16> *%a, <vscale x 32 x i1> %pg) {
91; CHECK-LABEL: masked_load_split_32i16:
92; CHECK:       // %bb.0:
93; CHECK-NEXT:    punpklo p2.h, p0.b
94; CHECK-NEXT:    punpkhi p0.h, p0.b
95; CHECK-NEXT:    punpklo p3.h, p1.b
96; CHECK-NEXT:    punpkhi p1.h, p1.b
97; CHECK-NEXT:    ld1h { z0.h }, p2/z, [x0]
98; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x0, #1, mul vl]
99; CHECK-NEXT:    ld1h { z2.h }, p3/z, [x0, #2, mul vl]
100; CHECK-NEXT:    ld1h { z3.h }, p1/z, [x0, #3, mul vl]
101; CHECK-NEXT:    ret
102  %load = call <vscale x 32 x i16> @llvm.masked.load.nxv32i16(<vscale x 32 x i16> *%a, i32 1, <vscale x 32 x i1> %pg, <vscale x 32 x i16> undef)
103  ret <vscale x 32 x i16> %load
104}
105
106define <vscale x 8 x i32> @masked_load_split_8i32(<vscale x 8 x i32> *%a, <vscale x 8 x i1> %pg) {
107; CHECK-LABEL: masked_load_split_8i32:
108; CHECK:       // %bb.0:
109; CHECK-NEXT:    punpklo p1.h, p0.b
110; CHECK-NEXT:    punpkhi p0.h, p0.b
111; CHECK-NEXT:    ld1w { z0.s }, p1/z, [x0]
112; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x0, #1, mul vl]
113; CHECK-NEXT:    ret
114  %load = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32(<vscale x 8 x i32> *%a, i32 1, <vscale x 8 x i1> %pg, <vscale x 8 x i32> undef)
115  ret <vscale x 8 x i32> %load
116}
117
118define <vscale x 8 x i64> @masked_load_split_8i64(<vscale x 8 x i64> *%a, <vscale x 8 x i1> %pg) {
119; CHECK-LABEL: masked_load_split_8i64:
120; CHECK:       // %bb.0:
121; CHECK-NEXT:    punpklo p1.h, p0.b
122; CHECK-NEXT:    punpkhi p0.h, p0.b
123; CHECK-NEXT:    punpklo p2.h, p1.b
124; CHECK-NEXT:    punpkhi p1.h, p1.b
125; CHECK-NEXT:    ld1d { z0.d }, p2/z, [x0]
126; CHECK-NEXT:    punpklo p2.h, p0.b
127; CHECK-NEXT:    punpkhi p0.h, p0.b
128; CHECK-NEXT:    ld1d { z1.d }, p1/z, [x0, #1, mul vl]
129; CHECK-NEXT:    ld1d { z2.d }, p2/z, [x0, #2, mul vl]
130; CHECK-NEXT:    ld1d { z3.d }, p0/z, [x0, #3, mul vl]
131; CHECK-NEXT:    ret
132  %load = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64(<vscale x 8 x i64> *%a, i32 1, <vscale x 8 x i1> %pg, <vscale x 8 x i64> undef)
133  ret <vscale x 8 x i64> %load
134}
135
136declare <vscale x 32 x i8> @llvm.masked.load.nxv32i8(<vscale x 32 x i8>*, i32, <vscale x 32 x i1>, <vscale x 32 x i8>)
137
138declare <vscale x 32 x i16> @llvm.masked.load.nxv32i16(<vscale x 32 x i16>*, i32, <vscale x 32 x i1>, <vscale x 32 x i16>)
139
140declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>*, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
141declare <vscale x 8 x i32> @llvm.masked.load.nxv8i32(<vscale x 8 x i32>*, i32, <vscale x 8 x i1>, <vscale x 8 x i32>)
142
143declare <vscale x 8 x i64> @llvm.masked.load.nxv8i64(<vscale x 8 x i64>*, i32, <vscale x 8 x i1>, <vscale x 8 x i64>)
144