1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -slp-vectorizer -S -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE 3; RUN: opt < %s -slp-vectorizer -S -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX 4; RUN: opt < %s -slp-vectorizer -S -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX 5 6; PR38821 7 8define <2 x i64> @load_00123456(ptr nocapture noundef readonly %data) { 9; SSE-LABEL: @load_00123456( 10; SSE-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i16, ptr [[DATA:%.*]], i64 1 11; SSE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, ptr [[DATA]], i64 2 12; SSE-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i16, ptr [[DATA]], i64 3 13; SSE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i16, ptr [[DATA]], i64 4 14; SSE-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, ptr [[DATA]], i64 5 15; SSE-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, ptr [[DATA]], i64 6 16; SSE-NEXT: [[T0:%.*]] = load i16, ptr [[DATA]], align 2 17; SSE-NEXT: [[T1:%.*]] = load i16, ptr [[ARRAYIDX1]], align 2 18; SSE-NEXT: [[T2:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2 19; SSE-NEXT: [[T3:%.*]] = load i16, ptr [[ARRAYIDX3]], align 2 20; SSE-NEXT: [[T4:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 21; SSE-NEXT: [[T5:%.*]] = load i16, ptr [[ARRAYIDX5]], align 2 22; SSE-NEXT: [[T6:%.*]] = load i16, ptr [[ARRAYIDX6]], align 2 23; SSE-NEXT: [[VECINIT0_I_I:%.*]] = insertelement <8 x i16> undef, i16 [[T0]], i64 0 24; SSE-NEXT: [[VECINIT1_I_I:%.*]] = insertelement <8 x i16> [[VECINIT0_I_I]], i16 [[T0]], i64 1 25; SSE-NEXT: [[VECINIT2_I_I:%.*]] = insertelement <8 x i16> [[VECINIT1_I_I]], i16 [[T1]], i64 2 26; SSE-NEXT: [[VECINIT3_I_I:%.*]] = insertelement <8 x i16> [[VECINIT2_I_I]], i16 [[T2]], i64 3 27; SSE-NEXT: [[VECINIT4_I_I:%.*]] = insertelement <8 x i16> [[VECINIT3_I_I]], i16 [[T3]], i64 4 28; SSE-NEXT: [[VECINIT5_I_I:%.*]] = insertelement <8 x i16> [[VECINIT4_I_I]], i16 [[T4]], i64 5 29; SSE-NEXT: [[VECINIT6_I_I:%.*]] = insertelement <8 x i16> [[VECINIT5_I_I]], i16 [[T5]], i64 6 30; SSE-NEXT: [[VECINIT7_I_I:%.*]] = insertelement <8 x i16> [[VECINIT6_I_I]], i16 [[T6]], i64 7 31; SSE-NEXT: [[T7:%.*]] = bitcast <8 x i16> [[VECINIT7_I_I]] to <2 x i64> 32; SSE-NEXT: ret <2 x i64> [[T7]] 33; 34; AVX-LABEL: @load_00123456( 35; AVX-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, ptr [[DATA:%.*]], i64 2 36; AVX-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i16, ptr [[DATA]], i64 3 37; AVX-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[DATA]], align 2 38; AVX-NEXT: [[T2:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2 39; AVX-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr [[ARRAYIDX3]], align 2 40; AVX-NEXT: [[TMP3:%.*]] = shufflevector <2 x i16> [[TMP1]], <2 x i16> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> 41; AVX-NEXT: [[VECINIT2_I_I2:%.*]] = shufflevector <8 x i16> [[TMP3]], <8 x i16> [[TMP3]], <8 x i32> <i32 0, i32 8, i32 9, i32 3, i32 4, i32 5, i32 6, i32 7> 42; AVX-NEXT: [[VECINIT3_I_I:%.*]] = insertelement <8 x i16> [[VECINIT2_I_I2]], i16 [[T2]], i64 3 43; AVX-NEXT: [[TMP4:%.*]] = shufflevector <4 x i16> [[TMP2]], <4 x i16> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef> 44; AVX-NEXT: [[VECINIT7_I_I1:%.*]] = shufflevector <8 x i16> [[VECINIT3_I_I]], <8 x i16> [[TMP4]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11> 45; AVX-NEXT: [[T7:%.*]] = bitcast <8 x i16> [[VECINIT7_I_I1]] to <2 x i64> 46; AVX-NEXT: ret <2 x i64> [[T7]] 47; 48 %arrayidx1 = getelementptr inbounds i16, ptr %data, i64 1 49 %arrayidx2 = getelementptr inbounds i16, ptr %data, i64 2 50 %arrayidx3 = getelementptr inbounds i16, ptr %data, i64 3 51 %arrayidx4 = getelementptr inbounds i16, ptr %data, i64 4 52 %arrayidx5 = getelementptr inbounds i16, ptr %data, i64 5 53 %arrayidx6 = getelementptr inbounds i16, ptr %data, i64 6 54 55 %t0 = load i16, ptr %data, align 2 56 %t1 = load i16, ptr %arrayidx1, align 2 57 %t2 = load i16, ptr %arrayidx2, align 2 58 %t3 = load i16, ptr %arrayidx3, align 2 59 %t4 = load i16, ptr %arrayidx4, align 2 60 %t5 = load i16, ptr %arrayidx5, align 2 61 %t6 = load i16, ptr %arrayidx6, align 2 62 63 %vecinit0.i.i = insertelement <8 x i16> undef, i16 %t0, i64 0 64 %vecinit1.i.i = insertelement <8 x i16> %vecinit0.i.i, i16 %t0, i64 1 65 %vecinit2.i.i = insertelement <8 x i16> %vecinit1.i.i, i16 %t1, i64 2 66 %vecinit3.i.i = insertelement <8 x i16> %vecinit2.i.i, i16 %t2, i64 3 67 %vecinit4.i.i = insertelement <8 x i16> %vecinit3.i.i, i16 %t3, i64 4 68 %vecinit5.i.i = insertelement <8 x i16> %vecinit4.i.i, i16 %t4, i64 5 69 %vecinit6.i.i = insertelement <8 x i16> %vecinit5.i.i, i16 %t5, i64 6 70 %vecinit7.i.i = insertelement <8 x i16> %vecinit6.i.i, i16 %t6, i64 7 71 %t7 = bitcast <8 x i16> %vecinit7.i.i to <2 x i64> 72 ret <2 x i64> %t7 73} 74