1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
4; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
5
6;
7; PR6455 'Clear Upper Bits' Patterns
8;
9
10define <2 x i64> @_clearupper2xi64a(<2 x i64>) nounwind {
11; SSE-LABEL: _clearupper2xi64a:
12; SSE:       # BB#0:
13; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
14; SSE-NEXT:    retq
15;
16; AVX1-LABEL: _clearupper2xi64a:
17; AVX1:       # BB#0:
18; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
19; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
20; AVX1-NEXT:    retq
21;
22; AVX2-LABEL: _clearupper2xi64a:
23; AVX2:       # BB#0:
24; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
25; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
26; AVX2-NEXT:    retq
27  %x0 = extractelement <2 x i64> %0, i32 0
28  %x1 = extractelement <2 x i64> %0, i32 1
29  %trunc0 = trunc i64 %x0 to i32
30  %trunc1 = trunc i64 %x1 to i32
31  %ext0 = zext i32 %trunc0 to i64
32  %ext1 = zext i32 %trunc1 to i64
33  %v0 = insertelement <2 x i64> undef, i64 %ext0, i32 0
34  %v1 = insertelement <2 x i64> %v0,   i64 %ext1, i32 1
35  ret <2 x i64> %v1
36}
37
38define <4 x i32> @_clearupper4xi32a(<4 x i32>) nounwind {
39; SSE-LABEL: _clearupper4xi32a:
40; SSE:       # BB#0:
41; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
42; SSE-NEXT:    retq
43;
44; AVX-LABEL: _clearupper4xi32a:
45; AVX:       # BB#0:
46; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
47; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
48; AVX-NEXT:    retq
49  %x0 = extractelement <4 x i32> %0, i32 0
50  %x1 = extractelement <4 x i32> %0, i32 1
51  %x2 = extractelement <4 x i32> %0, i32 2
52  %x3 = extractelement <4 x i32> %0, i32 3
53  %trunc0 = trunc i32 %x0 to i16
54  %trunc1 = trunc i32 %x1 to i16
55  %trunc2 = trunc i32 %x2 to i16
56  %trunc3 = trunc i32 %x3 to i16
57  %ext0 = zext i16 %trunc0 to i32
58  %ext1 = zext i16 %trunc1 to i32
59  %ext2 = zext i16 %trunc2 to i32
60  %ext3 = zext i16 %trunc3 to i32
61  %v0 = insertelement <4 x i32> undef, i32 %ext0, i32 0
62  %v1 = insertelement <4 x i32> %v0,   i32 %ext1, i32 1
63  %v2 = insertelement <4 x i32> %v1,   i32 %ext2, i32 2
64  %v3 = insertelement <4 x i32> %v2,   i32 %ext3, i32 3
65  ret <4 x i32> %v3
66}
67
68define <8 x i16> @_clearupper8xi16a(<8 x i16>) nounwind {
69; SSE-LABEL: _clearupper8xi16a:
70; SSE:       # BB#0:
71; SSE-NEXT:    pextrw $1, %xmm0, %eax
72; SSE-NEXT:    pextrw $2, %xmm0, %r9d
73; SSE-NEXT:    pextrw $3, %xmm0, %edx
74; SSE-NEXT:    pextrw $4, %xmm0, %r8d
75; SSE-NEXT:    pextrw $5, %xmm0, %edi
76; SSE-NEXT:    pextrw $6, %xmm0, %esi
77; SSE-NEXT:    pextrw $7, %xmm0, %ecx
78; SSE-NEXT:    movd %ecx, %xmm1
79; SSE-NEXT:    movd %edx, %xmm2
80; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
81; SSE-NEXT:    movd %edi, %xmm1
82; SSE-NEXT:    movd %eax, %xmm3
83; SSE-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
84; SSE-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
85; SSE-NEXT:    movd %esi, %xmm1
86; SSE-NEXT:    movd %r9d, %xmm2
87; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
88; SSE-NEXT:    movd %r8d, %xmm1
89; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
90; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
91; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
92; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
93; SSE-NEXT:    retq
94;
95; AVX-LABEL: _clearupper8xi16a:
96; AVX:       # BB#0:
97; AVX-NEXT:    vpextrw $1, %xmm0, %eax
98; AVX-NEXT:    vpextrw $2, %xmm0, %ecx
99; AVX-NEXT:    vpextrw $3, %xmm0, %edx
100; AVX-NEXT:    vpextrw $4, %xmm0, %esi
101; AVX-NEXT:    vpextrw $5, %xmm0, %edi
102; AVX-NEXT:    vpextrw $6, %xmm0, %r8d
103; AVX-NEXT:    vpextrw $7, %xmm0, %r9d
104; AVX-NEXT:    vpinsrw $1, %eax, %xmm0, %xmm0
105; AVX-NEXT:    vpinsrw $2, %ecx, %xmm0, %xmm0
106; AVX-NEXT:    vpinsrw $3, %edx, %xmm0, %xmm0
107; AVX-NEXT:    vpinsrw $4, %esi, %xmm0, %xmm0
108; AVX-NEXT:    vpinsrw $5, %edi, %xmm0, %xmm0
109; AVX-NEXT:    vpinsrw $6, %r8d, %xmm0, %xmm0
110; AVX-NEXT:    vpinsrw $7, %r9d, %xmm0, %xmm0
111; AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
112; AVX-NEXT:    retq
113  %x0 = extractelement <8 x i16> %0, i32 0
114  %x1 = extractelement <8 x i16> %0, i32 1
115  %x2 = extractelement <8 x i16> %0, i32 2
116  %x3 = extractelement <8 x i16> %0, i32 3
117  %x4 = extractelement <8 x i16> %0, i32 4
118  %x5 = extractelement <8 x i16> %0, i32 5
119  %x6 = extractelement <8 x i16> %0, i32 6
120  %x7 = extractelement <8 x i16> %0, i32 7
121  %trunc0 = trunc i16 %x0 to i8
122  %trunc1 = trunc i16 %x1 to i8
123  %trunc2 = trunc i16 %x2 to i8
124  %trunc3 = trunc i16 %x3 to i8
125  %trunc4 = trunc i16 %x4 to i8
126  %trunc5 = trunc i16 %x5 to i8
127  %trunc6 = trunc i16 %x6 to i8
128  %trunc7 = trunc i16 %x7 to i8
129  %ext0 = zext i8 %trunc0 to i16
130  %ext1 = zext i8 %trunc1 to i16
131  %ext2 = zext i8 %trunc2 to i16
132  %ext3 = zext i8 %trunc3 to i16
133  %ext4 = zext i8 %trunc4 to i16
134  %ext5 = zext i8 %trunc5 to i16
135  %ext6 = zext i8 %trunc6 to i16
136  %ext7 = zext i8 %trunc7 to i16
137  %v0 = insertelement <8 x i16> undef, i16 %ext0, i32 0
138  %v1 = insertelement <8 x i16> %v0,   i16 %ext1, i32 1
139  %v2 = insertelement <8 x i16> %v1,   i16 %ext2, i32 2
140  %v3 = insertelement <8 x i16> %v2,   i16 %ext3, i32 3
141  %v4 = insertelement <8 x i16> %v3,   i16 %ext4, i32 4
142  %v5 = insertelement <8 x i16> %v4,   i16 %ext5, i32 5
143  %v6 = insertelement <8 x i16> %v5,   i16 %ext6, i32 6
144  %v7 = insertelement <8 x i16> %v6,   i16 %ext7, i32 7
145  ret <8 x i16> %v7
146}
147
148define <16 x i8> @_clearupper16xi8a(<16 x i8>) nounwind {
149; SSE-LABEL: _clearupper16xi8a:
150; SSE:       # BB#0:
151; SSE-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
152; SSE-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
153; SSE-NEXT:    movd %eax, %xmm0
154; SSE-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r9d
155; SSE-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edx
156; SSE-NEXT:    movzbl -{{[0-9]+}}(%rsp), %esi
157; SSE-NEXT:    movzbl -{{[0-9]+}}(%rsp), %r8d
158; SSE-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edi
159; SSE-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
160; SSE-NEXT:    movd %eax, %xmm1
161; SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
162; SSE-NEXT:    movd %esi, %xmm0
163; SSE-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
164; SSE-NEXT:    movzbl -{{[0-9]+}}(%rsp), %esi
165; SSE-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ecx
166; SSE-NEXT:    movd %ecx, %xmm2
167; SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
168; SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
169; SSE-NEXT:    movd %edx, %xmm0
170; SSE-NEXT:    movd %esi, %xmm1
171; SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
172; SSE-NEXT:    movd %edi, %xmm0
173; SSE-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ecx
174; SSE-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edx
175; SSE-NEXT:    movd %edx, %xmm3
176; SSE-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
177; SSE-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
178; SSE-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
179; SSE-NEXT:    movd %r9d, %xmm0
180; SSE-NEXT:    movd %eax, %xmm1
181; SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
182; SSE-NEXT:    movd %r8d, %xmm0
183; SSE-NEXT:    movd %ecx, %xmm2
184; SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
185; SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
186; SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
187; SSE-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
188; SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
189; SSE-NEXT:    movd {{.*#+}} xmm4 = mem[0],zero,zero,zero
190; SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
191; SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
192; SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
193; SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
194; SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
195; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
196; SSE-NEXT:    retq
197;
198; AVX-LABEL: _clearupper16xi8a:
199; AVX:       # BB#0:
200; AVX-NEXT:    vpextrb $0, %xmm0, %eax
201; AVX-NEXT:    vmovd %eax, %xmm1
202; AVX-NEXT:    vpextrb $1, %xmm0, %eax
203; AVX-NEXT:    vpinsrb $1, %eax, %xmm1, %xmm1
204; AVX-NEXT:    vpextrb $2, %xmm0, %eax
205; AVX-NEXT:    vpinsrb $2, %eax, %xmm1, %xmm1
206; AVX-NEXT:    vpextrb $3, %xmm0, %eax
207; AVX-NEXT:    vpinsrb $3, %eax, %xmm1, %xmm1
208; AVX-NEXT:    vpextrb $4, %xmm0, %eax
209; AVX-NEXT:    vpinsrb $4, %eax, %xmm1, %xmm1
210; AVX-NEXT:    vpextrb $5, %xmm0, %eax
211; AVX-NEXT:    vpinsrb $5, %eax, %xmm1, %xmm1
212; AVX-NEXT:    vpextrb $6, %xmm0, %eax
213; AVX-NEXT:    vpinsrb $6, %eax, %xmm1, %xmm1
214; AVX-NEXT:    vpextrb $7, %xmm0, %eax
215; AVX-NEXT:    vpinsrb $7, %eax, %xmm1, %xmm1
216; AVX-NEXT:    vpextrb $8, %xmm0, %eax
217; AVX-NEXT:    vpinsrb $8, %eax, %xmm1, %xmm1
218; AVX-NEXT:    vpextrb $9, %xmm0, %eax
219; AVX-NEXT:    vpinsrb $9, %eax, %xmm1, %xmm1
220; AVX-NEXT:    vpextrb $10, %xmm0, %eax
221; AVX-NEXT:    vpinsrb $10, %eax, %xmm1, %xmm1
222; AVX-NEXT:    vpextrb $11, %xmm0, %eax
223; AVX-NEXT:    vpinsrb $11, %eax, %xmm1, %xmm1
224; AVX-NEXT:    vpextrb $12, %xmm0, %eax
225; AVX-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
226; AVX-NEXT:    vpextrb $13, %xmm0, %eax
227; AVX-NEXT:    vpinsrb $13, %eax, %xmm1, %xmm1
228; AVX-NEXT:    vpextrb $14, %xmm0, %eax
229; AVX-NEXT:    vpinsrb $14, %eax, %xmm1, %xmm1
230; AVX-NEXT:    vpextrb $15, %xmm0, %eax
231; AVX-NEXT:    vpinsrb $15, %eax, %xmm1, %xmm0
232; AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
233; AVX-NEXT:    retq
234  %x0  = extractelement <16 x i8> %0, i32 0
235  %x1  = extractelement <16 x i8> %0, i32 1
236  %x2  = extractelement <16 x i8> %0, i32 2
237  %x3  = extractelement <16 x i8> %0, i32 3
238  %x4  = extractelement <16 x i8> %0, i32 4
239  %x5  = extractelement <16 x i8> %0, i32 5
240  %x6  = extractelement <16 x i8> %0, i32 6
241  %x7  = extractelement <16 x i8> %0, i32 7
242  %x8  = extractelement <16 x i8> %0, i32 8
243  %x9  = extractelement <16 x i8> %0, i32 9
244  %x10 = extractelement <16 x i8> %0, i32 10
245  %x11 = extractelement <16 x i8> %0, i32 11
246  %x12 = extractelement <16 x i8> %0, i32 12
247  %x13 = extractelement <16 x i8> %0, i32 13
248  %x14 = extractelement <16 x i8> %0, i32 14
249  %x15 = extractelement <16 x i8> %0, i32 15
250  %trunc0  = trunc i8 %x0  to i4
251  %trunc1  = trunc i8 %x1  to i4
252  %trunc2  = trunc i8 %x2  to i4
253  %trunc3  = trunc i8 %x3  to i4
254  %trunc4  = trunc i8 %x4  to i4
255  %trunc5  = trunc i8 %x5  to i4
256  %trunc6  = trunc i8 %x6  to i4
257  %trunc7  = trunc i8 %x7  to i4
258  %trunc8  = trunc i8 %x8  to i4
259  %trunc9  = trunc i8 %x9  to i4
260  %trunc10 = trunc i8 %x10 to i4
261  %trunc11 = trunc i8 %x11 to i4
262  %trunc12 = trunc i8 %x12 to i4
263  %trunc13 = trunc i8 %x13 to i4
264  %trunc14 = trunc i8 %x14 to i4
265  %trunc15 = trunc i8 %x15 to i4
266  %ext0  = zext i4 %trunc0  to i8
267  %ext1  = zext i4 %trunc1  to i8
268  %ext2  = zext i4 %trunc2  to i8
269  %ext3  = zext i4 %trunc3  to i8
270  %ext4  = zext i4 %trunc4  to i8
271  %ext5  = zext i4 %trunc5  to i8
272  %ext6  = zext i4 %trunc6  to i8
273  %ext7  = zext i4 %trunc7  to i8
274  %ext8  = zext i4 %trunc8  to i8
275  %ext9  = zext i4 %trunc9  to i8
276  %ext10 = zext i4 %trunc10 to i8
277  %ext11 = zext i4 %trunc11 to i8
278  %ext12 = zext i4 %trunc12 to i8
279  %ext13 = zext i4 %trunc13 to i8
280  %ext14 = zext i4 %trunc14 to i8
281  %ext15 = zext i4 %trunc15 to i8
282  %v0  = insertelement <16 x i8> undef, i8 %ext0,  i32 0
283  %v1  = insertelement <16 x i8> %v0,   i8 %ext1,  i32 1
284  %v2  = insertelement <16 x i8> %v1,   i8 %ext2,  i32 2
285  %v3  = insertelement <16 x i8> %v2,   i8 %ext3,  i32 3
286  %v4  = insertelement <16 x i8> %v3,   i8 %ext4,  i32 4
287  %v5  = insertelement <16 x i8> %v4,   i8 %ext5,  i32 5
288  %v6  = insertelement <16 x i8> %v5,   i8 %ext6,  i32 6
289  %v7  = insertelement <16 x i8> %v6,   i8 %ext7,  i32 7
290  %v8  = insertelement <16 x i8> %v7,   i8 %ext8,  i32 8
291  %v9  = insertelement <16 x i8> %v8,   i8 %ext9,  i32 9
292  %v10 = insertelement <16 x i8> %v9,   i8 %ext10, i32 10
293  %v11 = insertelement <16 x i8> %v10,  i8 %ext11, i32 11
294  %v12 = insertelement <16 x i8> %v11,  i8 %ext12, i32 12
295  %v13 = insertelement <16 x i8> %v12,  i8 %ext13, i32 13
296  %v14 = insertelement <16 x i8> %v13,  i8 %ext14, i32 14
297  %v15 = insertelement <16 x i8> %v14,  i8 %ext15, i32 15
298  ret <16 x i8> %v15
299}
300
301define <2 x i64> @_clearupper2xi64b(<2 x i64>) nounwind {
302; SSE-LABEL: _clearupper2xi64b:
303; SSE:       # BB#0:
304; SSE-NEXT:    xorl %eax, %eax
305; SSE-NEXT:    movd %eax, %xmm2
306; SSE-NEXT:    movdqa %xmm2, %xmm1
307; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[0,0]
308; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
309; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[2,0]
310; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,0]
311; SSE-NEXT:    movaps %xmm1, %xmm0
312; SSE-NEXT:    retq
313;
314; AVX1-LABEL: _clearupper2xi64b:
315; AVX1:       # BB#0:
316; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
317; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
318; AVX1-NEXT:    retq
319;
320; AVX2-LABEL: _clearupper2xi64b:
321; AVX2:       # BB#0:
322; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
323; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
324; AVX2-NEXT:    retq
325  %x32 = bitcast <2 x i64> %0 to <4 x i32>
326  %r0 = insertelement <4 x i32> %x32, i32 zeroinitializer, i32 1
327  %r1 = insertelement <4 x i32> %r0,  i32 zeroinitializer, i32 3
328  %r = bitcast <4 x i32> %r1 to <2 x i64>
329  ret <2 x i64> %r
330}
331
332define <4 x i32> @_clearupper4xi32b(<4 x i32>) nounwind {
333; SSE-LABEL: _clearupper4xi32b:
334; SSE:       # BB#0:
335; SSE-NEXT:    xorl %eax, %eax
336; SSE-NEXT:    pinsrw $1, %eax, %xmm0
337; SSE-NEXT:    pinsrw $3, %eax, %xmm0
338; SSE-NEXT:    pinsrw $5, %eax, %xmm0
339; SSE-NEXT:    pinsrw $7, %eax, %xmm0
340; SSE-NEXT:    retq
341;
342; AVX-LABEL: _clearupper4xi32b:
343; AVX:       # BB#0:
344; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
345; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
346; AVX-NEXT:    retq
347  %x16 = bitcast <4 x i32> %0 to <8 x i16>
348  %r0 = insertelement <8 x i16> %x16, i16 zeroinitializer, i32 1
349  %r1 = insertelement <8 x i16> %r0,  i16 zeroinitializer, i32 3
350  %r2 = insertelement <8 x i16> %r1,  i16 zeroinitializer, i32 5
351  %r3 = insertelement <8 x i16> %r2,  i16 zeroinitializer, i32 7
352  %r = bitcast <8 x i16> %r3 to <4 x i32>
353  ret <4 x i32> %r
354}
355
356define <8 x i16> @_clearupper8xi16b(<8 x i16>) nounwind {
357; SSE-LABEL: _clearupper8xi16b:
358; SSE:       # BB#0:
359; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
360; SSE-NEXT:    pand %xmm2, %xmm0
361; SSE-NEXT:    xorl %eax, %eax
362; SSE-NEXT:    movd %eax, %xmm1
363; SSE-NEXT:    movdqa %xmm1, %xmm3
364; SSE-NEXT:    psllw $8, %xmm3
365; SSE-NEXT:    pandn %xmm3, %xmm2
366; SSE-NEXT:    por %xmm2, %xmm0
367; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255]
368; SSE-NEXT:    pand %xmm2, %xmm0
369; SSE-NEXT:    movdqa %xmm1, %xmm3
370; SSE-NEXT:    pslld $24, %xmm3
371; SSE-NEXT:    pandn %xmm3, %xmm2
372; SSE-NEXT:    por %xmm2, %xmm0
373; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255]
374; SSE-NEXT:    pand %xmm2, %xmm0
375; SSE-NEXT:    movdqa %xmm1, %xmm3
376; SSE-NEXT:    psllq $40, %xmm3
377; SSE-NEXT:    pandn %xmm3, %xmm2
378; SSE-NEXT:    por %xmm2, %xmm0
379; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255]
380; SSE-NEXT:    pand %xmm2, %xmm0
381; SSE-NEXT:    movdqa %xmm1, %xmm3
382; SSE-NEXT:    psllq $56, %xmm3
383; SSE-NEXT:    pandn %xmm3, %xmm2
384; SSE-NEXT:    por %xmm2, %xmm0
385; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255]
386; SSE-NEXT:    pand %xmm2, %xmm0
387; SSE-NEXT:    movdqa %xmm1, %xmm3
388; SSE-NEXT:    pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0,1,2,3,4,5,6]
389; SSE-NEXT:    pandn %xmm3, %xmm2
390; SSE-NEXT:    por %xmm2, %xmm0
391; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255]
392; SSE-NEXT:    pand %xmm2, %xmm0
393; SSE-NEXT:    movdqa %xmm1, %xmm3
394; SSE-NEXT:    pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0,1,2,3,4]
395; SSE-NEXT:    pandn %xmm3, %xmm2
396; SSE-NEXT:    por %xmm2, %xmm0
397; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255]
398; SSE-NEXT:    pand %xmm2, %xmm0
399; SSE-NEXT:    movdqa %xmm1, %xmm3
400; SSE-NEXT:    pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0,1,2]
401; SSE-NEXT:    pandn %xmm3, %xmm2
402; SSE-NEXT:    por %xmm2, %xmm0
403; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0]
404; SSE-NEXT:    pand %xmm2, %xmm0
405; SSE-NEXT:    pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
406; SSE-NEXT:    pandn %xmm1, %xmm2
407; SSE-NEXT:    por %xmm2, %xmm0
408; SSE-NEXT:    retq
409;
410; AVX-LABEL: _clearupper8xi16b:
411; AVX:       # BB#0:
412; AVX-NEXT:    xorl %eax, %eax
413; AVX-NEXT:    vpinsrb $1, %eax, %xmm0, %xmm0
414; AVX-NEXT:    vpinsrb $3, %eax, %xmm0, %xmm0
415; AVX-NEXT:    vpinsrb $5, %eax, %xmm0, %xmm0
416; AVX-NEXT:    vpinsrb $7, %eax, %xmm0, %xmm0
417; AVX-NEXT:    vpinsrb $9, %eax, %xmm0, %xmm0
418; AVX-NEXT:    vpinsrb $11, %eax, %xmm0, %xmm0
419; AVX-NEXT:    vpinsrb $13, %eax, %xmm0, %xmm0
420; AVX-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
421; AVX-NEXT:    retq
422  %x8 = bitcast <8 x i16> %0 to <16 x i8>
423  %r0 = insertelement <16 x i8> %x8, i8 zeroinitializer, i32 1
424  %r1 = insertelement <16 x i8> %r0, i8 zeroinitializer, i32 3
425  %r2 = insertelement <16 x i8> %r1, i8 zeroinitializer, i32 5
426  %r3 = insertelement <16 x i8> %r2, i8 zeroinitializer, i32 7
427  %r4 = insertelement <16 x i8> %r3, i8 zeroinitializer, i32 9
428  %r5 = insertelement <16 x i8> %r4, i8 zeroinitializer, i32 11
429  %r6 = insertelement <16 x i8> %r5, i8 zeroinitializer, i32 13
430  %r7 = insertelement <16 x i8> %r6, i8 zeroinitializer, i32 15
431  %r = bitcast <16 x i8> %r7 to <8 x i16>
432  ret <8 x i16> %r
433}
434
435define <16 x i8> @_clearupper16xi8b(<16 x i8>) nounwind {
436; SSE-LABEL: _clearupper16xi8b:
437; SSE:       # BB#0:
438; SSE-NEXT:    pushq %r14
439; SSE-NEXT:    pushq %rbx
440; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
441; SSE-NEXT:    movd %xmm0, %rcx
442; SSE-NEXT:    movq %rcx, %r8
443; SSE-NEXT:    movq %rcx, %r9
444; SSE-NEXT:    movq %rcx, %r10
445; SSE-NEXT:    movq %rcx, %rax
446; SSE-NEXT:    movq %rcx, %rdx
447; SSE-NEXT:    movq %rcx, %rsi
448; SSE-NEXT:    movq %rcx, %rdi
449; SSE-NEXT:    andb $15, %cl
450; SSE-NEXT:    movb %cl, -{{[0-9]+}}(%rsp)
451; SSE-NEXT:    movd %xmm1, %rcx
452; SSE-NEXT:    shrq $56, %rdi
453; SSE-NEXT:    andb $15, %dil
454; SSE-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
455; SSE-NEXT:    movq %rcx, %r11
456; SSE-NEXT:    shrq $48, %rsi
457; SSE-NEXT:    andb $15, %sil
458; SSE-NEXT:    movb %sil, -{{[0-9]+}}(%rsp)
459; SSE-NEXT:    movq %rcx, %r14
460; SSE-NEXT:    shrq $40, %rdx
461; SSE-NEXT:    andb $15, %dl
462; SSE-NEXT:    movb %dl, -{{[0-9]+}}(%rsp)
463; SSE-NEXT:    movq %rcx, %rdx
464; SSE-NEXT:    shrq $32, %rax
465; SSE-NEXT:    andb $15, %al
466; SSE-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
467; SSE-NEXT:    movq %rcx, %rax
468; SSE-NEXT:    shrq $24, %r10
469; SSE-NEXT:    andb $15, %r10b
470; SSE-NEXT:    movb %r10b, -{{[0-9]+}}(%rsp)
471; SSE-NEXT:    movq %rcx, %rdi
472; SSE-NEXT:    shrq $16, %r9
473; SSE-NEXT:    andb $15, %r9b
474; SSE-NEXT:    movb %r9b, -{{[0-9]+}}(%rsp)
475; SSE-NEXT:    movq %rcx, %rsi
476; SSE-NEXT:    shrq $8, %r8
477; SSE-NEXT:    andb $15, %r8b
478; SSE-NEXT:    movb %r8b, -{{[0-9]+}}(%rsp)
479; SSE-NEXT:    movq %rcx, %rbx
480; SSE-NEXT:    movb $0, -{{[0-9]+}}(%rsp)
481; SSE-NEXT:    andb $15, %cl
482; SSE-NEXT:    movb %cl, -{{[0-9]+}}(%rsp)
483; SSE-NEXT:    shrq $56, %rbx
484; SSE-NEXT:    andb $15, %bl
485; SSE-NEXT:    movb %bl, -{{[0-9]+}}(%rsp)
486; SSE-NEXT:    shrq $48, %rsi
487; SSE-NEXT:    andb $15, %sil
488; SSE-NEXT:    movb %sil, -{{[0-9]+}}(%rsp)
489; SSE-NEXT:    shrq $40, %rdi
490; SSE-NEXT:    andb $15, %dil
491; SSE-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
492; SSE-NEXT:    shrq $32, %rax
493; SSE-NEXT:    andb $15, %al
494; SSE-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
495; SSE-NEXT:    shrq $24, %rdx
496; SSE-NEXT:    andb $15, %dl
497; SSE-NEXT:    movb %dl, -{{[0-9]+}}(%rsp)
498; SSE-NEXT:    shrq $16, %r14
499; SSE-NEXT:    andb $15, %r14b
500; SSE-NEXT:    movb %r14b, -{{[0-9]+}}(%rsp)
501; SSE-NEXT:    shrq $8, %r11
502; SSE-NEXT:    andb $15, %r11b
503; SSE-NEXT:    movb %r11b, -{{[0-9]+}}(%rsp)
504; SSE-NEXT:    movb $0, -{{[0-9]+}}(%rsp)
505; SSE-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
506; SSE-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
507; SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
508; SSE-NEXT:    popq %rbx
509; SSE-NEXT:    popq %r14
510; SSE-NEXT:    retq
511;
512; AVX-LABEL: _clearupper16xi8b:
513; AVX:       # BB#0:
514; AVX-NEXT:    pushq %rbp
515; AVX-NEXT:    pushq %r15
516; AVX-NEXT:    pushq %r14
517; AVX-NEXT:    pushq %r13
518; AVX-NEXT:    pushq %r12
519; AVX-NEXT:    pushq %rbx
520; AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
521; AVX-NEXT:    movq -{{[0-9]+}}(%rsp), %rcx
522; AVX-NEXT:    movq -{{[0-9]+}}(%rsp), %rdx
523; AVX-NEXT:    movq %rcx, %r8
524; AVX-NEXT:    movq %rcx, %r9
525; AVX-NEXT:    movq %rcx, %r10
526; AVX-NEXT:    movq %rcx, %r11
527; AVX-NEXT:    movq %rcx, %r14
528; AVX-NEXT:    movq %rcx, %r15
529; AVX-NEXT:    movq %rdx, %r12
530; AVX-NEXT:    movq %rdx, %r13
531; AVX-NEXT:    movq %rdx, %rdi
532; AVX-NEXT:    movq %rdx, %rax
533; AVX-NEXT:    movq %rdx, %rsi
534; AVX-NEXT:    movq %rdx, %rbx
535; AVX-NEXT:    movq %rdx, %rbp
536; AVX-NEXT:    andb $15, %dl
537; AVX-NEXT:    movb %dl, -{{[0-9]+}}(%rsp)
538; AVX-NEXT:    movq %rcx, %rdx
539; AVX-NEXT:    andb $15, %cl
540; AVX-NEXT:    movb %cl, -{{[0-9]+}}(%rsp)
541; AVX-NEXT:    shrq $56, %rbp
542; AVX-NEXT:    andb $15, %bpl
543; AVX-NEXT:    movb %bpl, -{{[0-9]+}}(%rsp)
544; AVX-NEXT:    shrq $48, %rbx
545; AVX-NEXT:    andb $15, %bl
546; AVX-NEXT:    movb %bl, -{{[0-9]+}}(%rsp)
547; AVX-NEXT:    shrq $40, %rsi
548; AVX-NEXT:    andb $15, %sil
549; AVX-NEXT:    movb %sil, -{{[0-9]+}}(%rsp)
550; AVX-NEXT:    shrq $32, %rax
551; AVX-NEXT:    andb $15, %al
552; AVX-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
553; AVX-NEXT:    shrq $24, %rdi
554; AVX-NEXT:    andb $15, %dil
555; AVX-NEXT:    movb %dil, -{{[0-9]+}}(%rsp)
556; AVX-NEXT:    shrq $16, %r13
557; AVX-NEXT:    andb $15, %r13b
558; AVX-NEXT:    movb %r13b, -{{[0-9]+}}(%rsp)
559; AVX-NEXT:    shrq $8, %r12
560; AVX-NEXT:    andb $15, %r12b
561; AVX-NEXT:    movb %r12b, -{{[0-9]+}}(%rsp)
562; AVX-NEXT:    shrq $56, %rdx
563; AVX-NEXT:    andb $15, %dl
564; AVX-NEXT:    movb %dl, -{{[0-9]+}}(%rsp)
565; AVX-NEXT:    shrq $48, %r15
566; AVX-NEXT:    andb $15, %r15b
567; AVX-NEXT:    movb %r15b, -{{[0-9]+}}(%rsp)
568; AVX-NEXT:    shrq $40, %r14
569; AVX-NEXT:    andb $15, %r14b
570; AVX-NEXT:    movb %r14b, -{{[0-9]+}}(%rsp)
571; AVX-NEXT:    shrq $32, %r11
572; AVX-NEXT:    andb $15, %r11b
573; AVX-NEXT:    movb %r11b, -{{[0-9]+}}(%rsp)
574; AVX-NEXT:    shrq $24, %r10
575; AVX-NEXT:    andb $15, %r10b
576; AVX-NEXT:    movb %r10b, -{{[0-9]+}}(%rsp)
577; AVX-NEXT:    shrq $16, %r9
578; AVX-NEXT:    andb $15, %r9b
579; AVX-NEXT:    movb %r9b, -{{[0-9]+}}(%rsp)
580; AVX-NEXT:    shrq $8, %r8
581; AVX-NEXT:    andb $15, %r8b
582; AVX-NEXT:    movb %r8b, -{{[0-9]+}}(%rsp)
583; AVX-NEXT:    movb $0, -{{[0-9]+}}(%rsp)
584; AVX-NEXT:    vmovaps -{{[0-9]+}}(%rsp), %xmm0
585; AVX-NEXT:    popq %rbx
586; AVX-NEXT:    popq %r12
587; AVX-NEXT:    popq %r13
588; AVX-NEXT:    popq %r14
589; AVX-NEXT:    popq %r15
590; AVX-NEXT:    popq %rbp
591; AVX-NEXT:    retq
592  %x4  = bitcast <16 x i8> %0 to <32 x i4>
593  %r0  = insertelement <32 x i4> %x4,  i4 zeroinitializer, i32 1
594  %r1  = insertelement <32 x i4> %r0,  i4 zeroinitializer, i32 3
595  %r2  = insertelement <32 x i4> %r1,  i4 zeroinitializer, i32 5
596  %r3  = insertelement <32 x i4> %r2,  i4 zeroinitializer, i32 7
597  %r4  = insertelement <32 x i4> %r3,  i4 zeroinitializer, i32 9
598  %r5  = insertelement <32 x i4> %r4,  i4 zeroinitializer, i32 11
599  %r6  = insertelement <32 x i4> %r5,  i4 zeroinitializer, i32 13
600  %r7  = insertelement <32 x i4> %r6,  i4 zeroinitializer, i32 15
601  %r8  = insertelement <32 x i4> %r7,  i4 zeroinitializer, i32 17
602  %r9  = insertelement <32 x i4> %r8,  i4 zeroinitializer, i32 19
603  %r10 = insertelement <32 x i4> %r9,  i4 zeroinitializer, i32 21
604  %r11 = insertelement <32 x i4> %r10, i4 zeroinitializer, i32 23
605  %r12 = insertelement <32 x i4> %r11, i4 zeroinitializer, i32 25
606  %r13 = insertelement <32 x i4> %r12, i4 zeroinitializer, i32 27
607  %r14 = insertelement <32 x i4> %r13, i4 zeroinitializer, i32 29
608  %r15 = insertelement <32 x i4> %r14, i4 zeroinitializer, i32 31
609  %r = bitcast <32 x i4> %r15 to <16 x i8>
610  ret <16 x i8> %r
611}
612
613define <2 x i64> @_clearupper2xi64c(<2 x i64>) nounwind {
614; SSE-LABEL: _clearupper2xi64c:
615; SSE:       # BB#0:
616; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
617; SSE-NEXT:    retq
618;
619; AVX1-LABEL: _clearupper2xi64c:
620; AVX1:       # BB#0:
621; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
622; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
623; AVX1-NEXT:    retq
624;
625; AVX2-LABEL: _clearupper2xi64c:
626; AVX2:       # BB#0:
627; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
628; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
629; AVX2-NEXT:    retq
630  %r = and <2 x i64> <i64 4294967295, i64 4294967295>, %0
631  ret <2 x i64> %r
632}
633
634define <4 x i32> @_clearupper4xi32c(<4 x i32>) nounwind {
635; SSE-LABEL: _clearupper4xi32c:
636; SSE:       # BB#0:
637; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
638; SSE-NEXT:    retq
639;
640; AVX-LABEL: _clearupper4xi32c:
641; AVX:       # BB#0:
642; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
643; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
644; AVX-NEXT:    retq
645  %r = and <4 x i32> <i32 65535, i32 65535, i32 65535, i32 65535>, %0
646  ret <4 x i32> %r
647}
648
649define <8 x i16> @_clearupper8xi16c(<8 x i16>) nounwind {
650; SSE-LABEL: _clearupper8xi16c:
651; SSE:       # BB#0:
652; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
653; SSE-NEXT:    retq
654;
655; AVX-LABEL: _clearupper8xi16c:
656; AVX:       # BB#0:
657; AVX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
658; AVX-NEXT:    retq
659  %r = and <8 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>, %0
660  ret <8 x i16> %r
661}
662
663define <16 x i8> @_clearupper16xi8c(<16 x i8>) nounwind {
664; SSE-LABEL: _clearupper16xi8c:
665; SSE:       # BB#0:
666; SSE-NEXT:    andps {{.*}}(%rip), %xmm0
667; SSE-NEXT:    retq
668;
669; AVX-LABEL: _clearupper16xi8c:
670; AVX:       # BB#0:
671; AVX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
672; AVX-NEXT:    retq
673  %r = and <16 x i8> <i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15>, %0
674  ret <16 x i8> %r
675}
676