1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32-SSE --check-prefix=X32-SSE2
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64-SSE --check-prefix=X64-SSE2
4; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64-AVX --check-prefix=X64-AVX1
5; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64-AVX --check-prefix=X64-AVX2
6
7define <4 x i32> @trunc_ashr_v4i64(<4 x i64> %a) nounwind {
8; X32-SSE-LABEL: trunc_ashr_v4i64:
9; X32-SSE:       # BB#0:
10; X32-SSE-NEXT:    psrad $31, %xmm1
11; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
12; X32-SSE-NEXT:    psrad $31, %xmm0
13; X32-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
14; X32-SSE-NEXT:    packsswb %xmm1, %xmm0
15; X32-SSE-NEXT:    retl
16;
17; X64-SSE-LABEL: trunc_ashr_v4i64:
18; X64-SSE:       # BB#0:
19; X64-SSE-NEXT:    psrad $31, %xmm1
20; X64-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
21; X64-SSE-NEXT:    psrad $31, %xmm0
22; X64-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
23; X64-SSE-NEXT:    packsswb %xmm1, %xmm0
24; X64-SSE-NEXT:    retq
25;
26; X64-AVX1-LABEL: trunc_ashr_v4i64:
27; X64-AVX1:       # BB#0:
28; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
29; X64-AVX1-NEXT:    vpsrad $31, %xmm1, %xmm1
30; X64-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
31; X64-AVX1-NEXT:    vpsrad $31, %xmm0, %xmm0
32; X64-AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
33; X64-AVX1-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
34; X64-AVX1-NEXT:    vzeroupper
35; X64-AVX1-NEXT:    retq
36;
37; X64-AVX2-LABEL: trunc_ashr_v4i64:
38; X64-AVX2:       # BB#0:
39; X64-AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
40; X64-AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
41; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
42; X64-AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
43; X64-AVX2-NEXT:    vzeroupper
44; X64-AVX2-NEXT:    retq
45  %1 = ashr <4 x i64> %a, <i64 63, i64 63, i64 63, i64 63>
46  %2 = trunc <4 x i64> %1 to <4 x i32>
47  ret <4 x i32> %2
48}
49
50define <8 x i16> @trunc_ashr_v8i32(<8 x i32> %a) nounwind {
51; X32-SSE-LABEL: trunc_ashr_v8i32:
52; X32-SSE:       # BB#0:
53; X32-SSE-NEXT:    psrad $31, %xmm1
54; X32-SSE-NEXT:    psrad $31, %xmm0
55; X32-SSE-NEXT:    packsswb %xmm1, %xmm0
56; X32-SSE-NEXT:    retl
57;
58; X64-SSE-LABEL: trunc_ashr_v8i32:
59; X64-SSE:       # BB#0:
60; X64-SSE-NEXT:    psrad $31, %xmm1
61; X64-SSE-NEXT:    psrad $31, %xmm0
62; X64-SSE-NEXT:    packsswb %xmm1, %xmm0
63; X64-SSE-NEXT:    retq
64;
65; X64-AVX1-LABEL: trunc_ashr_v8i32:
66; X64-AVX1:       # BB#0:
67; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
68; X64-AVX1-NEXT:    vpsrad $31, %xmm1, %xmm1
69; X64-AVX1-NEXT:    vpsrad $31, %xmm0, %xmm0
70; X64-AVX1-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
71; X64-AVX1-NEXT:    vzeroupper
72; X64-AVX1-NEXT:    retq
73;
74; X64-AVX2-LABEL: trunc_ashr_v8i32:
75; X64-AVX2:       # BB#0:
76; X64-AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
77; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
78; X64-AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
79; X64-AVX2-NEXT:    vzeroupper
80; X64-AVX2-NEXT:    retq
81  %1 = ashr <8 x i32> %a, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
82  %2 = trunc <8 x i32> %1 to <8 x i16>
83  ret <8 x i16> %2
84}
85
86define <8 x i16> @trunc_ashr_v4i32_icmp_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
87; X32-SSE-LABEL: trunc_ashr_v4i32_icmp_v4i32:
88; X32-SSE:       # BB#0:
89; X32-SSE-NEXT:    psrad $31, %xmm0
90; X32-SSE-NEXT:    pcmpgtd {{\.LCPI.*}}, %xmm1
91; X32-SSE-NEXT:    packsswb %xmm1, %xmm0
92; X32-SSE-NEXT:    retl
93;
94; X64-SSE-LABEL: trunc_ashr_v4i32_icmp_v4i32:
95; X64-SSE:       # BB#0:
96; X64-SSE-NEXT:    psrad $31, %xmm0
97; X64-SSE-NEXT:    pcmpgtd {{.*}}(%rip), %xmm1
98; X64-SSE-NEXT:    packsswb %xmm1, %xmm0
99; X64-SSE-NEXT:    retq
100;
101; X64-AVX-LABEL: trunc_ashr_v4i32_icmp_v4i32:
102; X64-AVX:       # BB#0:
103; X64-AVX-NEXT:    vpsrad $31, %xmm0, %xmm0
104; X64-AVX-NEXT:    vpcmpgtd {{.*}}(%rip), %xmm1, %xmm1
105; X64-AVX-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
106; X64-AVX-NEXT:    retq
107  %1 = ashr <4 x i32> %a, <i32 31, i32 31, i32 31, i32 31>
108  %2 = icmp sgt <4 x i32> %b, <i32 1, i32 16, i32 255, i32 65535>
109  %3 = sext <4 x i1> %2 to <4 x i32>
110  %4 = shufflevector <4 x i32> %1, <4 x i32> %3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
111  %5 = trunc <8 x i32> %4 to <8 x i16>
112  ret <8 x i16> %5
113}
114