1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
4; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
5
6;
7; PR6455 'Clear Upper Bits' Patterns
8;
9
10define <2 x i64> @_clearupper2xi64a(<2 x i64>) nounwind {
11; SSE-LABEL: _clearupper2xi64a:
12; SSE:       # BB#0:
13; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
14; SSE-NEXT:    retq
15;
16; AVX-LABEL: _clearupper2xi64a:
17; AVX:       # BB#0:
18; AVX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
19; AVX-NEXT:    retq
20  %x0 = extractelement <2 x i64> %0, i32 0
21  %x1 = extractelement <2 x i64> %0, i32 1
22  %trunc0 = trunc i64 %x0 to i32
23  %trunc1 = trunc i64 %x1 to i32
24  %ext0 = zext i32 %trunc0 to i64
25  %ext1 = zext i32 %trunc1 to i64
26  %v0 = insertelement <2 x i64> undef, i64 %ext0, i32 0
27  %v1 = insertelement <2 x i64> %v0,   i64 %ext1, i32 1
28  ret <2 x i64> %v1
29}
30
31define <4 x i32> @_clearupper4xi32a(<4 x i32>) nounwind {
32; SSE-LABEL: _clearupper4xi32a:
33; SSE:       # BB#0:
34; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
35; SSE-NEXT:    retq
36;
37; AVX1-LABEL: _clearupper4xi32a:
38; AVX1:       # BB#0:
39; AVX1-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
40; AVX1-NEXT:    retq
41;
42; AVX2-LABEL: _clearupper4xi32a:
43; AVX2:       # BB#0:
44; AVX2-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
45; AVX2-NEXT:    vandps %xmm1, %xmm0, %xmm0
46; AVX2-NEXT:    retq
47  %x0 = extractelement <4 x i32> %0, i32 0
48  %x1 = extractelement <4 x i32> %0, i32 1
49  %x2 = extractelement <4 x i32> %0, i32 2
50  %x3 = extractelement <4 x i32> %0, i32 3
51  %trunc0 = trunc i32 %x0 to i16
52  %trunc1 = trunc i32 %x1 to i16
53  %trunc2 = trunc i32 %x2 to i16
54  %trunc3 = trunc i32 %x3 to i16
55  %ext0 = zext i16 %trunc0 to i32
56  %ext1 = zext i16 %trunc1 to i32
57  %ext2 = zext i16 %trunc2 to i32
58  %ext3 = zext i16 %trunc3 to i32
59  %v0 = insertelement <4 x i32> undef, i32 %ext0, i32 0
60  %v1 = insertelement <4 x i32> %v0,   i32 %ext1, i32 1
61  %v2 = insertelement <4 x i32> %v1,   i32 %ext2, i32 2
62  %v3 = insertelement <4 x i32> %v2,   i32 %ext3, i32 3
63  ret <4 x i32> %v3
64}
65
66define <8 x i16> @_clearupper8xi16a(<8 x i16>) nounwind {
67; SSE-LABEL: _clearupper8xi16a:
68; SSE:       # BB#0:
69; SSE-NEXT:    pextrw $1, %xmm0, %eax
70; SSE-NEXT:    pextrw $2, %xmm0, %r9d
71; SSE-NEXT:    pextrw $3, %xmm0, %edx
72; SSE-NEXT:    pextrw $4, %xmm0, %r8d
73; SSE-NEXT:    pextrw $5, %xmm0, %edi
74; SSE-NEXT:    pextrw $6, %xmm0, %esi
75; SSE-NEXT:    pextrw $7, %xmm0, %ecx
76; SSE-NEXT:    movd %ecx, %xmm1
77; SSE-NEXT:    movd %edx, %xmm2
78; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
79; SSE-NEXT:    movd %edi, %xmm1
80; SSE-NEXT:    movd %eax, %xmm3
81; SSE-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
82; SSE-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
83; SSE-NEXT:    movd %esi, %xmm1
84; SSE-NEXT:    movd %r9d, %xmm2
85; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
86; SSE-NEXT:    movd %r8d, %xmm1
87; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
88; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
89; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
90; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
91; SSE-NEXT:    retq
92;
93; AVX-LABEL: _clearupper8xi16a:
94; AVX:       # BB#0:
95; AVX-NEXT:    vpextrw $1, %xmm0, %eax
96; AVX-NEXT:    vpextrw $2, %xmm0, %ecx
97; AVX-NEXT:    vpextrw $3, %xmm0, %edx
98; AVX-NEXT:    vpextrw $4, %xmm0, %esi
99; AVX-NEXT:    vpextrw $5, %xmm0, %edi
100; AVX-NEXT:    vpextrw $6, %xmm0, %r8d
101; AVX-NEXT:    vpextrw $7, %xmm0, %r9d
102; AVX-NEXT:    vpinsrw $1, %eax, %xmm0, %xmm0
103; AVX-NEXT:    vpinsrw $2, %ecx, %xmm0, %xmm0
104; AVX-NEXT:    vpinsrw $3, %edx, %xmm0, %xmm0
105; AVX-NEXT:    vpinsrw $4, %esi, %xmm0, %xmm0
106; AVX-NEXT:    vpinsrw $5, %edi, %xmm0, %xmm0
107; AVX-NEXT:    vpinsrw $6, %r8d, %xmm0, %xmm0
108; AVX-NEXT:    vpinsrw $7, %r9d, %xmm0, %xmm0
109; AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
110; AVX-NEXT:    retq
111  %x0 = extractelement <8 x i16> %0, i32 0
112  %x1 = extractelement <8 x i16> %0, i32 1
113  %x2 = extractelement <8 x i16> %0, i32 2
114  %x3 = extractelement <8 x i16> %0, i32 3
115  %x4 = extractelement <8 x i16> %0, i32 4
116  %x5 = extractelement <8 x i16> %0, i32 5
117  %x6 = extractelement <8 x i16> %0, i32 6
118  %x7 = extractelement <8 x i16> %0, i32 7
119  %trunc0 = trunc i16 %x0 to i8
120  %trunc1 = trunc i16 %x1 to i8
121  %trunc2 = trunc i16 %x2 to i8
122  %trunc3 = trunc i16 %x3 to i8
123  %trunc4 = trunc i16 %x4 to i8
124  %trunc5 = trunc i16 %x5 to i8
125  %trunc6 = trunc i16 %x6 to i8
126  %trunc7 = trunc i16 %x7 to i8
127  %ext0 = zext i8 %trunc0 to i16
128  %ext1 = zext i8 %trunc1 to i16
129  %ext2 = zext i8 %trunc2 to i16
130  %ext3 = zext i8 %trunc3 to i16
131  %ext4 = zext i8 %trunc4 to i16
132  %ext5 = zext i8 %trunc5 to i16
133  %ext6 = zext i8 %trunc6 to i16
134  %ext7 = zext i8 %trunc7 to i16
135  %v0 = insertelement <8 x i16> undef, i16 %ext0, i32 0
136  %v1 = insertelement <8 x i16> %v0,   i16 %ext1, i32 1
137  %v2 = insertelement <8 x i16> %v1,   i16 %ext2, i32 2
138  %v3 = insertelement <8 x i16> %v2,   i16 %ext3, i32 3
139  %v4 = insertelement <8 x i16> %v3,   i16 %ext4, i32 4
140  %v5 = insertelement <8 x i16> %v4,   i16 %ext5, i32 5
141  %v6 = insertelement <8 x i16> %v5,   i16 %ext6, i32 6
142  %v7 = insertelement <8 x i16> %v6,   i16 %ext7, i32 7
143  ret <8 x i16> %v7
144}
145
146define <16 x i8> @_clearupper16xi8a(<16 x i8>) nounwind {
147; SSE-LABEL: _clearupper16xi8a:
148; SSE:       # BB#0:
149; SSE-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
150; SSE-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
151; SSE-NEXT:    movd %eax, %xmm0
152; SSE-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r9d
153; SSE-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edx
154; SSE-NEXT:    movzbl -{{[0-9]+}}(%rsp), %esi
155; SSE-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r8d
156; SSE-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edi
157; SSE-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
158; SSE-NEXT:    movd %eax, %xmm1
159; SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
160; SSE-NEXT:    movd %esi, %xmm0
161; SSE-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
162; SSE-NEXT:    movzbl -{{[0-9]+}}(%rsp), %esi
163; SSE-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ecx
164; SSE-NEXT:    movd %ecx, %xmm2
165; SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
166; SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
167; SSE-NEXT:    movd %edx, %xmm0
168; SSE-NEXT:    movd %esi, %xmm1
169; SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
170; SSE-NEXT:    movd %edi, %xmm0
171; SSE-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ecx
172; SSE-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edx
173; SSE-NEXT:    movd %edx, %xmm3
174; SSE-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
175; SSE-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
176; SSE-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
177; SSE-NEXT:    movd %r9d, %xmm0
178; SSE-NEXT:    movd %eax, %xmm1
179; SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
180; SSE-NEXT:    movd %r8d, %xmm0
181; SSE-NEXT:    movd %ecx, %xmm2
182; SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
183; SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
184; SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
185; SSE-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
186; SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
187; SSE-NEXT:    movd {{.*#+}} xmm4 = mem[0],zero,zero,zero
188; SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
189; SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
190; SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
191; SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
192; SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
193; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
194; SSE-NEXT:    retq
195;
196; AVX-LABEL: _clearupper16xi8a:
197; AVX:       # BB#0:
198; AVX-NEXT:    vpextrb $0, %xmm0, %eax
199; AVX-NEXT:    vmovd %eax, %xmm1
200; AVX-NEXT:    vpextrb $1, %xmm0, %eax
201; AVX-NEXT:    vpinsrb $1, %eax, %xmm1, %xmm1
202; AVX-NEXT:    vpextrb $2, %xmm0, %eax
203; AVX-NEXT:    vpinsrb $2, %eax, %xmm1, %xmm1
204; AVX-NEXT:    vpextrb $3, %xmm0, %eax
205; AVX-NEXT:    vpinsrb $3, %eax, %xmm1, %xmm1
206; AVX-NEXT:    vpextrb $4, %xmm0, %eax
207; AVX-NEXT:    vpinsrb $4, %eax, %xmm1, %xmm1
208; AVX-NEXT:    vpextrb $5, %xmm0, %eax
209; AVX-NEXT:    vpinsrb $5, %eax, %xmm1, %xmm1
210; AVX-NEXT:    vpextrb $6, %xmm0, %eax
211; AVX-NEXT:    vpinsrb $6, %eax, %xmm1, %xmm1
212; AVX-NEXT:    vpextrb $7, %xmm0, %eax
213; AVX-NEXT:    vpinsrb $7, %eax, %xmm1, %xmm1
214; AVX-NEXT:    vpextrb $8, %xmm0, %eax
215; AVX-NEXT:    vpinsrb $8, %eax, %xmm1, %xmm1
216; AVX-NEXT:    vpextrb $9, %xmm0, %eax
217; AVX-NEXT:    vpinsrb $9, %eax, %xmm1, %xmm1
218; AVX-NEXT:    vpextrb $10, %xmm0, %eax
219; AVX-NEXT:    vpinsrb $10, %eax, %xmm1, %xmm1
220; AVX-NEXT:    vpextrb $11, %xmm0, %eax
221; AVX-NEXT:    vpinsrb $11, %eax, %xmm1, %xmm1
222; AVX-NEXT:    vpextrb $12, %xmm0, %eax
223; AVX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
224; AVX-NEXT:    vpextrb $13, %xmm0, %eax
225; AVX-NEXT:    vpinsrb $13, %eax, %xmm1, %xmm1
226; AVX-NEXT:    vpextrb $14, %xmm0, %eax
227; AVX-NEXT:    vpinsrb $14, %eax, %xmm1, %xmm1
228; AVX-NEXT:    vpextrb $15, %xmm0, %eax
229; AVX-NEXT:    vpinsrb $15, %eax, %xmm1, %xmm0
230; AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
231; AVX-NEXT:    retq
232  %x0  = extractelement <16 x i8> %0, i32 0
233  %x1  = extractelement <16 x i8> %0, i32 1
234  %x2  = extractelement <16 x i8> %0, i32 2
235  %x3  = extractelement <16 x i8> %0, i32 3
236  %x4  = extractelement <16 x i8> %0, i32 4
237  %x5  = extractelement <16 x i8> %0, i32 5
238  %x6  = extractelement <16 x i8> %0, i32 6
239  %x7  = extractelement <16 x i8> %0, i32 7
240  %x8  = extractelement <16 x i8> %0, i32 8
241  %x9  = extractelement <16 x i8> %0, i32 9
242  %x10 = extractelement <16 x i8> %0, i32 10
243  %x11 = extractelement <16 x i8> %0, i32 11
244  %x12 = extractelement <16 x i8> %0, i32 12
245  %x13 = extractelement <16 x i8> %0, i32 13
246  %x14 = extractelement <16 x i8> %0, i32 14
247  %x15 = extractelement <16 x i8> %0, i32 15
248  %trunc0  = trunc i8 %x0  to i4
249  %trunc1  = trunc i8 %x1  to i4
250  %trunc2  = trunc i8 %x2  to i4
251  %trunc3  = trunc i8 %x3  to i4
252  %trunc4  = trunc i8 %x4  to i4
253  %trunc5  = trunc i8 %x5  to i4
254  %trunc6  = trunc i8 %x6  to i4
255  %trunc7  = trunc i8 %x7  to i4
256  %trunc8  = trunc i8 %x8  to i4
257  %trunc9  = trunc i8 %x9  to i4
258  %trunc10 = trunc i8 %x10 to i4
259  %trunc11 = trunc i8 %x11 to i4
260  %trunc12 = trunc i8 %x12 to i4
261  %trunc13 = trunc i8 %x13 to i4
262  %trunc14 = trunc i8 %x14 to i4
263  %trunc15 = trunc i8 %x15 to i4
264  %ext0  = zext i4 %trunc0  to i8
265  %ext1  = zext i4 %trunc1  to i8
266  %ext2  = zext i4 %trunc2  to i8
267  %ext3  = zext i4 %trunc3  to i8
268  %ext4  = zext i4 %trunc4  to i8
269  %ext5  = zext i4 %trunc5  to i8
270  %ext6  = zext i4 %trunc6  to i8
271  %ext7  = zext i4 %trunc7  to i8
272  %ext8  = zext i4 %trunc8  to i8
273  %ext9  = zext i4 %trunc9  to i8
274  %ext10 = zext i4 %trunc10 to i8
275  %ext11 = zext i4 %trunc11 to i8
276  %ext12 = zext i4 %trunc12 to i8
277  %ext13 = zext i4 %trunc13 to i8
278  %ext14 = zext i4 %trunc14 to i8
279  %ext15 = zext i4 %trunc15 to i8
280  %v0  = insertelement <16 x i8> undef, i8 %ext0,  i32 0
281  %v1  = insertelement <16 x i8> %v0,   i8 %ext1,  i32 1
282  %v2  = insertelement <16 x i8> %v1,   i8 %ext2,  i32 2
283  %v3  = insertelement <16 x i8> %v2,   i8 %ext3,  i32 3
284  %v4  = insertelement <16 x i8> %v3,   i8 %ext4,  i32 4
285  %v5  = insertelement <16 x i8> %v4,   i8 %ext5,  i32 5
286  %v6  = insertelement <16 x i8> %v5,   i8 %ext6,  i32 6
287  %v7  = insertelement <16 x i8> %v6,   i8 %ext7,  i32 7
288  %v8  = insertelement <16 x i8> %v7,   i8 %ext8,  i32 8
289  %v9  = insertelement <16 x i8> %v8,   i8 %ext9,  i32 9
290  %v10 = insertelement <16 x i8> %v9,   i8 %ext10, i32 10
291  %v11 = insertelement <16 x i8> %v10,  i8 %ext11, i32 11
292  %v12 = insertelement <16 x i8> %v11,  i8 %ext12, i32 12
293  %v13 = insertelement <16 x i8> %v12,  i8 %ext13, i32 13
294  %v14 = insertelement <16 x i8> %v13,  i8 %ext14, i32 14
295  %v15 = insertelement <16 x i8> %v14,  i8 %ext15, i32 15
296  ret <16 x i8> %v15
297}
298
299define <2 x i64> @_clearupper2xi64b(<2 x i64>) nounwind {
300; SSE-LABEL: _clearupper2xi64b:
301; SSE:       # BB#0:
302; SSE-NEXT:    xorl %eax, %eax
303; SSE-NEXT:    movd %eax, %xmm2
304; SSE-NEXT:    movaps %xmm2, %xmm1
305; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,0]
306; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
307; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[2,0]
308; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,0]
309; SSE-NEXT:    movaps %xmm1, %xmm0
310; SSE-NEXT:    retq
311;
312; AVX1-LABEL: _clearupper2xi64b:
313; AVX1:       # BB#0:
314; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
315; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
316; AVX1-NEXT:    retq
317;
318; AVX2-LABEL: _clearupper2xi64b:
319; AVX2:       # BB#0:
320; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
321; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
322; AVX2-NEXT:    retq
323  %x32 = bitcast <2 x i64> %0 to <4 x i32>
324  %r0 = insertelement <4 x i32> %x32, i32 zeroinitializer, i32 1
325  %r1 = insertelement <4 x i32> %r0,  i32 zeroinitializer, i32 3
326  %r = bitcast <4 x i32> %r1 to <2 x i64>
327  ret <2 x i64> %r
328}
329
330define <4 x i32> @_clearupper4xi32b(<4 x i32>) nounwind {
331; SSE-LABEL: _clearupper4xi32b:
332; SSE:       # BB#0:
333; SSE-NEXT:    xorl %eax, %eax
334; SSE-NEXT:    pinsrw $1, %eax, %xmm0
335; SSE-NEXT:    pinsrw $3, %eax, %xmm0
336; SSE-NEXT:    pinsrw $5, %eax, %xmm0
337; SSE-NEXT:    pinsrw $7, %eax, %xmm0
338; SSE-NEXT:    retq
339;
340; AVX-LABEL: _clearupper4xi32b:
341; AVX:       # BB#0:
342; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
343; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
344; AVX-NEXT:    retq
345  %x16 = bitcast <4 x i32> %0 to <8 x i16>
346  %r0 = insertelement <8 x i16> %x16, i16 zeroinitializer, i32 1
347  %r1 = insertelement <8 x i16> %r0,  i16 zeroinitializer, i32 3
348  %r2 = insertelement <8 x i16> %r1,  i16 zeroinitializer, i32 5
349  %r3 = insertelement <8 x i16> %r2,  i16 zeroinitializer, i32 7
350  %r = bitcast <8 x i16> %r3 to <4 x i32>
351  ret <4 x i32> %r
352}
353
354define <8 x i16> @_clearupper8xi16b(<8 x i16>) nounwind {
355; SSE-LABEL: _clearupper8xi16b:
356; SSE:       # BB#0:
357; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
358; SSE-NEXT:    pand %xmm2, %xmm0
359; SSE-NEXT:    xorl %eax, %eax
360; SSE-NEXT:    movd %eax, %xmm1
361; SSE-NEXT:    movdqa %xmm1, %xmm3
362; SSE-NEXT:    psllw $8, %xmm3
363; SSE-NEXT:    pandn %xmm3, %xmm2
364; SSE-NEXT:    por %xmm2, %xmm0
365; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255]
366; SSE-NEXT:    pand %xmm2, %xmm0
367; SSE-NEXT:    movdqa %xmm1, %xmm3
368; SSE-NEXT:    pslld $24, %xmm3
369; SSE-NEXT:    pandn %xmm3, %xmm2
370; SSE-NEXT:    por %xmm2, %xmm0
371; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255]
372; SSE-NEXT:    pand %xmm2, %xmm0
373; SSE-NEXT:    movdqa %xmm1, %xmm3
374; SSE-NEXT:    psllq $40, %xmm3
375; SSE-NEXT:    pandn %xmm3, %xmm2
376; SSE-NEXT:    por %xmm2, %xmm0
377; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255]
378; SSE-NEXT:    pand %xmm2, %xmm0
379; SSE-NEXT:    movdqa %xmm1, %xmm3
380; SSE-NEXT:    psllq $56, %xmm3
381; SSE-NEXT:    pandn %xmm3, %xmm2
382; SSE-NEXT:    por %xmm2, %xmm0
383; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255]
384; SSE-NEXT:    pand %xmm2, %xmm0
385; SSE-NEXT:    movdqa %xmm1, %xmm3
386; SSE-NEXT:    pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0,1,2,3,4,5,6]
387; SSE-NEXT:    pandn %xmm3, %xmm2
388; SSE-NEXT:    por %xmm2, %xmm0
389; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255]
390; SSE-NEXT:    pand %xmm2, %xmm0
391; SSE-NEXT:    movdqa %xmm1, %xmm3
392; SSE-NEXT:    pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0,1,2,3,4]
393; SSE-NEXT:    pandn %xmm3, %xmm2
394; SSE-NEXT:    por %xmm2, %xmm0
395; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255]
396; SSE-NEXT:    pand %xmm2, %xmm0
397; SSE-NEXT:    movdqa %xmm1, %xmm3
398; SSE-NEXT:    pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0,1,2]
399; SSE-NEXT:    pandn %xmm3, %xmm2
400; SSE-NEXT:    por %xmm2, %xmm0
401; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0]
402; SSE-NEXT:    pand %xmm2, %xmm0
403; SSE-NEXT:    pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
404; SSE-NEXT:    pandn %xmm1, %xmm2
405; SSE-NEXT:    por %xmm2, %xmm0
406; SSE-NEXT:    retq
407;
408; AVX-LABEL: _clearupper8xi16b:
409; AVX:       # BB#0:
410; AVX-NEXT:    xorl %eax, %eax
411; AVX-NEXT:    vpinsrb $1, %eax, %xmm0, %xmm0
412; AVX-NEXT:    vpinsrb $3, %eax, %xmm0, %xmm0
413; AVX-NEXT:    vpinsrb $5, %eax, %xmm0, %xmm0
414; AVX-NEXT:    vpinsrb $7, %eax, %xmm0, %xmm0
415; AVX-NEXT:    vpinsrb $9, %eax, %xmm0, %xmm0
416; AVX-NEXT:    vpinsrb $11, %eax, %xmm0, %xmm0
417; AVX-NEXT:    vpinsrb $13, %eax, %xmm0, %xmm0
418; AVX-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
419; AVX-NEXT:    retq
420  %x8 = bitcast <8 x i16> %0 to <16 x i8>
421  %r0 = insertelement <16 x i8> %x8, i8 zeroinitializer, i32 1
422  %r1 = insertelement <16 x i8> %r0, i8 zeroinitializer, i32 3
423  %r2 = insertelement <16 x i8> %r1, i8 zeroinitializer, i32 5
424  %r3 = insertelement <16 x i8> %r2, i8 zeroinitializer, i32 7
425  %r4 = insertelement <16 x i8> %r3, i8 zeroinitializer, i32 9
426  %r5 = insertelement <16 x i8> %r4, i8 zeroinitializer, i32 11
427  %r6 = insertelement <16 x i8> %r5, i8 zeroinitializer, i32 13
428  %r7 = insertelement <16 x i8> %r6, i8 zeroinitializer, i32 15
429  %r = bitcast <16 x i8> %r7 to <8 x i16>
430  ret <8 x i16> %r
431}
432
433define <16 x i8> @_clearupper16xi8b(<16 x i8>) nounwind {
434; SSE-LABEL: _clearupper16xi8b:
435; SSE:       # BB#0:
436; SSE-NEXT:    pushq %r14
437; SSE-NEXT:    pushq %rbx
438; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
439; SSE-NEXT:    movd %xmm0, %rcx
440; SSE-NEXT:    movq %rcx, %r8
441; SSE-NEXT:    movq %rcx, %r9
442; SSE-NEXT:    movq %rcx, %r10
443; SSE-NEXT:    movq %rcx, %rax
444; SSE-NEXT:    movq %rcx, %rdx
445; SSE-NEXT:    movq %rcx, %rsi
446; SSE-NEXT:    movq %rcx, %rdi
447; SSE-NEXT:    andb $15, %cl
448; SSE-NEXT:    movb %cl, -{{[0-9]+}}(%rsp)
449; SSE-NEXT:    movd %xmm1, %rcx
450; SSE-NEXT:    shrq $56, %rdi
451; SSE-NEXT:    andb $15, %dil
452; SSE-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
453; SSE-NEXT:    movq %rcx, %r11
454; SSE-NEXT:    shrq $48, %rsi
455; SSE-NEXT:    andb $15, %sil
456; SSE-NEXT:    movb %sil, -{{[0-9]+}}(%rsp)
457; SSE-NEXT:    movq %rcx, %r14
458; SSE-NEXT:    shrq $40, %rdx
459; SSE-NEXT:    andb $15, %dl
460; SSE-NEXT:    movb %dl, -{{[0-9]+}}(%rsp)
461; SSE-NEXT:    movq %rcx, %rdx
462; SSE-NEXT:    shrq $32, %rax
463; SSE-NEXT:    andb $15, %al
464; SSE-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
465; SSE-NEXT:    movq %rcx, %rax
466; SSE-NEXT:    shrq $24, %r10
467; SSE-NEXT:    andb $15, %r10b
468; SSE-NEXT:    movb %r10b, -{{[0-9]+}}(%rsp)
469; SSE-NEXT:    movq %rcx, %rdi
470; SSE-NEXT:    shrq $16, %r9
471; SSE-NEXT:    andb $15, %r9b
472; SSE-NEXT:    movb %r9b, -{{[0-9]+}}(%rsp)
473; SSE-NEXT:    movq %rcx, %rsi
474; SSE-NEXT:    shrq $8, %r8
475; SSE-NEXT:    andb $15, %r8b
476; SSE-NEXT:    movb %r8b, -{{[0-9]+}}(%rsp)
477; SSE-NEXT:    movq %rcx, %rbx
478; SSE-NEXT:    movb $0, -{{[0-9]+}}(%rsp)
479; SSE-NEXT:    andb $15, %cl
480; SSE-NEXT:    movb %cl, -{{[0-9]+}}(%rsp)
481; SSE-NEXT:    shrq $56, %rbx
482; SSE-NEXT:    andb $15, %bl
483; SSE-NEXT:    movb %bl, -{{[0-9]+}}(%rsp)
484; SSE-NEXT:    shrq $48, %rsi
485; SSE-NEXT:    andb $15, %sil
486; SSE-NEXT:    movb %sil, -{{[0-9]+}}(%rsp)
487; SSE-NEXT:    shrq $40, %rdi
488; SSE-NEXT:    andb $15, %dil
489; SSE-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
490; SSE-NEXT:    shrq $32, %rax
491; SSE-NEXT:    andb $15, %al
492; SSE-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
493; SSE-NEXT:    shrq $24, %rdx
494; SSE-NEXT:    andb $15, %dl
495; SSE-NEXT:    movb %dl, -{{[0-9]+}}(%rsp)
496; SSE-NEXT:    shrq $16, %r14
497; SSE-NEXT:    andb $15, %r14b
498; SSE-NEXT:    movb %r14b, -{{[0-9]+}}(%rsp)
499; SSE-NEXT:    shrq $8, %r11
500; SSE-NEXT:    andb $15, %r11b
501; SSE-NEXT:    movb %r11b, -{{[0-9]+}}(%rsp)
502; SSE-NEXT:    movb $0, -{{[0-9]+}}(%rsp)
503; SSE-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
504; SSE-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
505; SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
506; SSE-NEXT:    popq %rbx
507; SSE-NEXT:    popq %r14
508; SSE-NEXT:    retq
509;
510; AVX-LABEL: _clearupper16xi8b:
511; AVX:       # BB#0:
512; AVX-NEXT:    pushq %rbp
513; AVX-NEXT:    pushq %r15
514; AVX-NEXT:    pushq %r14
515; AVX-NEXT:    pushq %r13
516; AVX-NEXT:    pushq %r12
517; AVX-NEXT:    pushq %rbx
518; AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
519; AVX-NEXT:    movq -{{[0-9]+}}(%rsp), %rcx
520; AVX-NEXT:    movq -{{[0-9]+}}(%rsp), %rdx
521; AVX-NEXT:    movq %rcx, %r8
522; AVX-NEXT:    movq %rcx, %r9
523; AVX-NEXT:    movq %rcx, %r10
524; AVX-NEXT:    movq %rcx, %r11
525; AVX-NEXT:    movq %rcx, %r14
526; AVX-NEXT:    movq %rcx, %r15
527; AVX-NEXT:    movq %rdx, %r12
528; AVX-NEXT:    movq %rdx, %r13
529; AVX-NEXT:    movq %rdx, %rdi
530; AVX-NEXT:    movq %rdx, %rax
531; AVX-NEXT:    movq %rdx, %rsi
532; AVX-NEXT:    movq %rdx, %rbx
533; AVX-NEXT:    movq %rdx, %rbp
534; AVX-NEXT:    andb $15, %dl
535; AVX-NEXT:    movb %dl, -{{[0-9]+}}(%rsp)
536; AVX-NEXT:    movq %rcx, %rdx
537; AVX-NEXT:    andb $15, %cl
538; AVX-NEXT:    movb %cl, -{{[0-9]+}}(%rsp)
539; AVX-NEXT:    shrq $56, %rbp
540; AVX-NEXT:    andb $15, %bpl
541; AVX-NEXT:    movb %bpl, -{{[0-9]+}}(%rsp)
542; AVX-NEXT:    shrq $48, %rbx
543; AVX-NEXT:    andb $15, %bl
544; AVX-NEXT:    movb %bl, -{{[0-9]+}}(%rsp)
545; AVX-NEXT:    shrq $40, %rsi
546; AVX-NEXT:    andb $15, %sil
547; AVX-NEXT:    movb %sil, -{{[0-9]+}}(%rsp)
548; AVX-NEXT:    shrq $32, %rax
549; AVX-NEXT:    andb $15, %al
550; AVX-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
551; AVX-NEXT:    shrq $24, %rdi
552; AVX-NEXT:    andb $15, %dil
553; AVX-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
554; AVX-NEXT:    shrq $16, %r13
555; AVX-NEXT:    andb $15, %r13b
556; AVX-NEXT:    movb %r13b, -{{[0-9]+}}(%rsp)
557; AVX-NEXT:    shrq $8, %r12
558; AVX-NEXT:    andb $15, %r12b
559; AVX-NEXT:    movb %r12b, -{{[0-9]+}}(%rsp)
560; AVX-NEXT:    shrq $56, %rdx
561; AVX-NEXT:    andb $15, %dl
562; AVX-NEXT:    movb %dl, -{{[0-9]+}}(%rsp)
563; AVX-NEXT:    shrq $48, %r15
564; AVX-NEXT:    andb $15, %r15b
565; AVX-NEXT:    movb %r15b, -{{[0-9]+}}(%rsp)
566; AVX-NEXT:    shrq $40, %r14
567; AVX-NEXT:    andb $15, %r14b
568; AVX-NEXT:    movb %r14b, -{{[0-9]+}}(%rsp)
569; AVX-NEXT:    shrq $32, %r11
570; AVX-NEXT:    andb $15, %r11b
571; AVX-NEXT:    movb %r11b, -{{[0-9]+}}(%rsp)
572; AVX-NEXT:    shrq $24, %r10
573; AVX-NEXT:    andb $15, %r10b
574; AVX-NEXT:    movb %r10b, -{{[0-9]+}}(%rsp)
575; AVX-NEXT:    shrq $16, %r9
576; AVX-NEXT:    andb $15, %r9b
577; AVX-NEXT:    movb %r9b, -{{[0-9]+}}(%rsp)
578; AVX-NEXT:    shrq $8, %r8
579; AVX-NEXT:    andb $15, %r8b
580; AVX-NEXT:    movb %r8b, -{{[0-9]+}}(%rsp)
581; AVX-NEXT:    movb $0, -{{[0-9]+}}(%rsp)
582; AVX-NEXT:    vmovaps -{{[0-9]+}}(%rsp), %xmm0
583; AVX-NEXT:    popq %rbx
584; AVX-NEXT:    popq %r12
585; AVX-NEXT:    popq %r13
586; AVX-NEXT:    popq %r14
587; AVX-NEXT:    popq %r15
588; AVX-NEXT:    popq %rbp
589; AVX-NEXT:    retq
590  %x4  = bitcast <16 x i8> %0 to <32 x i4>
591  %r0  = insertelement <32 x i4> %x4,  i4 zeroinitializer, i32 1
592  %r1  = insertelement <32 x i4> %r0,  i4 zeroinitializer, i32 3
593  %r2  = insertelement <32 x i4> %r1,  i4 zeroinitializer, i32 5
594  %r3  = insertelement <32 x i4> %r2,  i4 zeroinitializer, i32 7
595  %r4  = insertelement <32 x i4> %r3,  i4 zeroinitializer, i32 9
596  %r5  = insertelement <32 x i4> %r4,  i4 zeroinitializer, i32 11
597  %r6  = insertelement <32 x i4> %r5,  i4 zeroinitializer, i32 13
598  %r7  = insertelement <32 x i4> %r6,  i4 zeroinitializer, i32 15
599  %r8  = insertelement <32 x i4> %r7,  i4 zeroinitializer, i32 17
600  %r9  = insertelement <32 x i4> %r8,  i4 zeroinitializer, i32 19
601  %r10 = insertelement <32 x i4> %r9,  i4 zeroinitializer, i32 21
602  %r11 = insertelement <32 x i4> %r10, i4 zeroinitializer, i32 23
603  %r12 = insertelement <32 x i4> %r11, i4 zeroinitializer, i32 25
604  %r13 = insertelement <32 x i4> %r12, i4 zeroinitializer, i32 27
605  %r14 = insertelement <32 x i4> %r13, i4 zeroinitializer, i32 29
606  %r15 = insertelement <32 x i4> %r14, i4 zeroinitializer, i32 31
607  %r = bitcast <32 x i4> %r15 to <16 x i8>
608  ret <16 x i8> %r
609}
610
611define <2 x i64> @_clearupper2xi64c(<2 x i64>) nounwind {
612; SSE-LABEL: _clearupper2xi64c:
613; SSE:       # BB#0:
614; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
615; SSE-NEXT:    retq
616;
617; AVX1-LABEL: _clearupper2xi64c:
618; AVX1:       # BB#0:
619; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
620; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
621; AVX1-NEXT:    retq
622;
623; AVX2-LABEL: _clearupper2xi64c:
624; AVX2:       # BB#0:
625; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
626; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
627; AVX2-NEXT:    retq
628  %r = and <2 x i64> <i64 4294967295, i64 4294967295>, %0
629  ret <2 x i64> %r
630}
631
632define <4 x i32> @_clearupper4xi32c(<4 x i32>) nounwind {
633; SSE-LABEL: _clearupper4xi32c:
634; SSE:       # BB#0:
635; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
636; SSE-NEXT:    retq
637;
638; AVX-LABEL: _clearupper4xi32c:
639; AVX:       # BB#0:
640; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
641; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
642; AVX-NEXT:    retq
643  %r = and <4 x i32> <i32 65535, i32 65535, i32 65535, i32 65535>, %0
644  ret <4 x i32> %r
645}
646
647define <8 x i16> @_clearupper8xi16c(<8 x i16>) nounwind {
648; SSE-LABEL: _clearupper8xi16c:
649; SSE:       # BB#0:
650; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
651; SSE-NEXT:    retq
652;
653; AVX-LABEL: _clearupper8xi16c:
654; AVX:       # BB#0:
655; AVX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
656; AVX-NEXT:    retq
657  %r = and <8 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>, %0
658  ret <8 x i16> %r
659}
660
661define <16 x i8> @_clearupper16xi8c(<16 x i8>) nounwind {
662; SSE-LABEL: _clearupper16xi8c:
663; SSE:       # BB#0:
664; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
665; SSE-NEXT:    retq
666;
667; AVX-LABEL: _clearupper16xi8c:
668; AVX:       # BB#0:
669; AVX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
670; AVX-NEXT:    retq
671  %r = and <16 x i8> <i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15>, %0
672  ret <16 x i8> %r
673}
674