1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=X32
3; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=X64
4
5; test vector shifts converted to proper SSE2 vector shifts when the shift
6; amounts are the same.
7
8define void @shift1a(<2 x i64> %val, <2 x i64>* %dst) nounwind {
9; X32-LABEL: shift1a:
10; X32:       # BB#0: # %entry
11; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
12; X32-NEXT:    psrlq $32, %xmm0
13; X32-NEXT:    movdqa %xmm0, (%eax)
14; X32-NEXT:    retl
15;
16; X64-LABEL: shift1a:
17; X64:       # BB#0: # %entry
18; X64-NEXT:    psrlq $32, %xmm0
19; X64-NEXT:    movdqa %xmm0, (%rdi)
20; X64-NEXT:    retq
21entry:
22  %lshr = lshr <2 x i64> %val, < i64 32, i64 32 >
23  store <2 x i64> %lshr, <2 x i64>* %dst
24  ret void
25}
26
27define void @shift1b(<2 x i64> %val, <2 x i64>* %dst, i64 %amt) nounwind {
28; X32-LABEL: shift1b:
29; X32:       # BB#0: # %entry
30; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
31; X32-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
32; X32-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
33; X32-NEXT:    movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
34; X32-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
35; X32-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
36; X32-NEXT:    psrlq %xmm2, %xmm0
37; X32-NEXT:    movdqa %xmm0, (%eax)
38; X32-NEXT:    retl
39;
40; X64-LABEL: shift1b:
41; X64:       # BB#0: # %entry
42; X64-NEXT:    movd %rsi, %xmm1
43; X64-NEXT:    psrlq %xmm1, %xmm0
44; X64-NEXT:    movdqa %xmm0, (%rdi)
45; X64-NEXT:    retq
46entry:
47  %0 = insertelement <2 x i64> undef, i64 %amt, i32 0
48  %1 = insertelement <2 x i64> %0, i64 %amt, i32 1
49  %lshr = lshr <2 x i64> %val, %1
50  store <2 x i64> %lshr, <2 x i64>* %dst
51  ret void
52}
53
54define void @shift2a(<4 x i32> %val, <4 x i32>* %dst) nounwind {
55; X32-LABEL: shift2a:
56; X32:       # BB#0: # %entry
57; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
58; X32-NEXT:    psrld $17, %xmm0
59; X32-NEXT:    movdqa %xmm0, (%eax)
60; X32-NEXT:    retl
61;
62; X64-LABEL: shift2a:
63; X64:       # BB#0: # %entry
64; X64-NEXT:    psrld $17, %xmm0
65; X64-NEXT:    movdqa %xmm0, (%rdi)
66; X64-NEXT:    retq
67entry:
68  %lshr = lshr <4 x i32> %val, < i32 17, i32 17, i32 17, i32 17 >
69  store <4 x i32> %lshr, <4 x i32>* %dst
70  ret void
71}
72
73define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind {
74; X32-LABEL: shift2b:
75; X32:       # BB#0: # %entry
76; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
77; X32-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
78; X32-NEXT:    psrld %xmm1, %xmm0
79; X32-NEXT:    movdqa %xmm0, (%eax)
80; X32-NEXT:    retl
81;
82; X64-LABEL: shift2b:
83; X64:       # BB#0: # %entry
84; X64-NEXT:    movd %esi, %xmm1
85; X64-NEXT:    psrld %xmm1, %xmm0
86; X64-NEXT:    movdqa %xmm0, (%rdi)
87; X64-NEXT:    retq
88entry:
89  %0 = insertelement <4 x i32> undef, i32 %amt, i32 0
90  %1 = insertelement <4 x i32> %0, i32 %amt, i32 1
91  %2 = insertelement <4 x i32> %1, i32 %amt, i32 2
92  %3 = insertelement <4 x i32> %2, i32 %amt, i32 3
93  %lshr = lshr <4 x i32> %val, %3
94  store <4 x i32> %lshr, <4 x i32>* %dst
95  ret void
96}
97
98
99define void @shift3a(<8 x i16> %val, <8 x i16>* %dst) nounwind {
100; X32-LABEL: shift3a:
101; X32:       # BB#0: # %entry
102; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
103; X32-NEXT:    psrlw $5, %xmm0
104; X32-NEXT:    movdqa %xmm0, (%eax)
105; X32-NEXT:    retl
106;
107; X64-LABEL: shift3a:
108; X64:       # BB#0: # %entry
109; X64-NEXT:    psrlw $5, %xmm0
110; X64-NEXT:    movdqa %xmm0, (%rdi)
111; X64-NEXT:    retq
112entry:
113  %lshr = lshr <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 >
114  store <8 x i16> %lshr, <8 x i16>* %dst
115  ret void
116}
117
118; properly zero extend the shift amount
119define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind {
120; X32-LABEL: shift3b:
121; X32:       # BB#0: # %entry
122; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
123; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
124; X32-NEXT:    movd %ecx, %xmm1
125; X32-NEXT:    psrlw %xmm1, %xmm0
126; X32-NEXT:    movdqa %xmm0, (%eax)
127; X32-NEXT:    retl
128;
129; X64-LABEL: shift3b:
130; X64:       # BB#0: # %entry
131; X64-NEXT:    movzwl %si, %eax
132; X64-NEXT:    movd %eax, %xmm1
133; X64-NEXT:    psrlw %xmm1, %xmm0
134; X64-NEXT:    movdqa %xmm0, (%rdi)
135; X64-NEXT:    retq
136entry:
137  %0 = insertelement <8 x i16> undef, i16 %amt, i32 0
138  %1 = insertelement <8 x i16> %0, i16 %amt, i32 1
139  %2 = insertelement <8 x i16> %1, i16 %amt, i32 2
140  %3 = insertelement <8 x i16> %2, i16 %amt, i32 3
141  %4 = insertelement <8 x i16> %3, i16 %amt, i32 4
142  %5 = insertelement <8 x i16> %4, i16 %amt, i32 5
143  %6 = insertelement <8 x i16> %5, i16 %amt, i32 6
144  %7 = insertelement <8 x i16> %6, i16 %amt, i32 7
145  %lshr = lshr <8 x i16> %val, %7
146  store <8 x i16> %lshr, <8 x i16>* %dst
147  ret void
148}
149