1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
3; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=X64
4
5; These tests just check that the plumbing is in place for @llvm.bitreverse. The
6; actual output is massive at the moment as llvm.bitreverse is not yet legal.
7
8declare i32 @llvm.bitreverse.i32(i32) readnone
9declare <4 x i32> @llvm.bitreverse.v4i32(<4 x i32>) readnone
10
11; fold (bitreverse undef) -> undef
12define i32 @test_undef() nounwind {
13; X86-LABEL: test_undef:
14; X86:       # %bb.0:
15; X86-NEXT:    retl
16;
17; X64-LABEL: test_undef:
18; X64:       # %bb.0:
19; X64-NEXT:    retq
20  %b = call i32 @llvm.bitreverse.i32(i32 undef)
21  ret i32 %b
22}
23
24; fold (bitreverse (bitreverse x)) -> x
25define i32 @test_bitreverse_bitreverse(i32 %a0) nounwind {
26; X86-LABEL: test_bitreverse_bitreverse:
27; X86:       # %bb.0:
28; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
29; X86-NEXT:    retl
30;
31; X64-LABEL: test_bitreverse_bitreverse:
32; X64:       # %bb.0:
33; X64-NEXT:    movl %edi, %eax
34; X64-NEXT:    retq
35  %b = call i32 @llvm.bitreverse.i32(i32 %a0)
36  %c = call i32 @llvm.bitreverse.i32(i32 %b)
37  ret i32 %c
38}
39
40define <4 x i32> @test_demandedbits_bitreverse(<4 x i32> %a0) nounwind {
41; X86-LABEL: test_demandedbits_bitreverse:
42; X86:       # %bb.0:
43; X86-NEXT:    pxor %xmm1, %xmm1
44; X86-NEXT:    movdqa %xmm0, %xmm2
45; X86-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
46; X86-NEXT:    pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
47; X86-NEXT:    pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
48; X86-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
49; X86-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
50; X86-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
51; X86-NEXT:    packuswb %xmm2, %xmm0
52; X86-NEXT:    movdqa %xmm0, %xmm1
53; X86-NEXT:    psrlw $4, %xmm1
54; X86-NEXT:    movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
55; X86-NEXT:    pand %xmm2, %xmm1
56; X86-NEXT:    pand %xmm2, %xmm0
57; X86-NEXT:    psllw $4, %xmm0
58; X86-NEXT:    por %xmm1, %xmm0
59; X86-NEXT:    movdqa %xmm0, %xmm1
60; X86-NEXT:    psrlw $2, %xmm1
61; X86-NEXT:    movdqa {{.*#+}} xmm2 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
62; X86-NEXT:    pand %xmm2, %xmm1
63; X86-NEXT:    pand %xmm2, %xmm0
64; X86-NEXT:    psllw $2, %xmm0
65; X86-NEXT:    por %xmm1, %xmm0
66; X86-NEXT:    movdqa %xmm0, %xmm1
67; X86-NEXT:    psrlw $1, %xmm1
68; X86-NEXT:    movdqa {{.*#+}} xmm2 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
69; X86-NEXT:    pand %xmm2, %xmm1
70; X86-NEXT:    pand %xmm2, %xmm0
71; X86-NEXT:    paddb %xmm0, %xmm0
72; X86-NEXT:    por %xmm1, %xmm0
73; X86-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
74; X86-NEXT:    retl
75;
76; X64-LABEL: test_demandedbits_bitreverse:
77; X64:       # %bb.0:
78; X64-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
79; X64-NEXT:    vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
80; X64-NEXT:    vpand %xmm1, %xmm0, %xmm2
81; X64-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
82; X64-NEXT:    vpshufb %xmm2, %xmm3, %xmm2
83; X64-NEXT:    vpsrlw $4, %xmm0, %xmm0
84; X64-NEXT:    vpand %xmm1, %xmm0, %xmm0
85; X64-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
86; X64-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
87; X64-NEXT:    vpor %xmm0, %xmm2, %xmm0
88; X64-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
89; X64-NEXT:    retq
90  %b = or <4 x i32> %a0, <i32 2147483648, i32 2147483648, i32 2147483648, i32 2147483648>
91  %c = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> %b)
92  %d = and <4 x i32> %c, <i32 -2, i32 -2, i32 -2, i32 -2>
93  ret <4 x i32> %d
94}
95