1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2 3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41 4; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 5; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 6 7define <4 x i32> @test1(<4 x i32> %A, <4 x i32> %B) { 8; SSE2-LABEL: test1: 9; SSE2: # BB#0: 10; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] 11; SSE2-NEXT: movapd %xmm1, %xmm0 12; SSE2-NEXT: retq 13; 14; SSE41-LABEL: test1: 15; SSE41: # BB#0: 16; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] 17; SSE41-NEXT: retq 18; 19; AVX1-LABEL: test1: 20; AVX1: # BB#0: 21; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] 22; AVX1-NEXT: retq 23; 24; AVX2-LABEL: test1: 25; AVX2: # BB#0: 26; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] 27; AVX2-NEXT: retq 28 %select = select <4 x i1><i1 true, i1 true, i1 false, i1 false>, <4 x i32> %A, <4 x i32> %B 29 ret <4 x i32> %select 30} 31 32define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) { 33; SSE2-LABEL: test2: 34; SSE2: # BB#0: 35; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] 36; SSE2-NEXT: retq 37; 38; SSE41-LABEL: test2: 39; SSE41: # BB#0: 40; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] 41; SSE41-NEXT: retq 42; 43; AVX1-LABEL: test2: 44; AVX1: # BB#0: 45; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] 46; AVX1-NEXT: retq 47; 48; AVX2-LABEL: test2: 49; AVX2: # BB#0: 50; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] 51; AVX2-NEXT: retq 52 %select = select <4 x i1><i1 false, i1 false, i1 true, i1 true>, <4 x i32> %A, <4 x i32> %B 53 ret <4 x i32> %select 54} 55 56define <4 x float> @test3(<4 x float> %A, <4 x float> %B) { 57; SSE2-LABEL: test3: 58; SSE2: # BB#0: 59; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] 60; SSE2-NEXT: movapd %xmm1, %xmm0 61; SSE2-NEXT: retq 62; 63; SSE41-LABEL: test3: 64; SSE41: # BB#0: 65; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] 66; SSE41-NEXT: retq 67; 68; AVX-LABEL: test3: 69; AVX: # BB#0: 70; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] 71; AVX-NEXT: retq 72 %select = select <4 x i1><i1 true, i1 true, i1 false, i1 false>, <4 x float> %A, <4 x float> %B 73 ret <4 x float> %select 74} 75 76define <4 x float> @test4(<4 x float> %A, <4 x float> %B) { 77; SSE2-LABEL: test4: 78; SSE2: # BB#0: 79; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] 80; SSE2-NEXT: retq 81; 82; SSE41-LABEL: test4: 83; SSE41: # BB#0: 84; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1] 85; SSE41-NEXT: retq 86; 87; AVX-LABEL: test4: 88; AVX: # BB#0: 89; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1] 90; AVX-NEXT: retq 91 %select = select <4 x i1><i1 false, i1 false, i1 true, i1 true>, <4 x float> %A, <4 x float> %B 92 ret <4 x float> %select 93} 94