1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -passes='lower-matrix-intrinsics' -S < %s | FileCheck %s 3 4define <9 x float> @strided_load_3x3(float* %in, i64 %stride) { 5; CHECK-LABEL: @strided_load_3x3( 6; CHECK-NEXT: entry: 7; CHECK-NEXT: [[VEC_START:%.*]] = mul i64 0, [[STRIDE:%.*]] 8; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr float, float* [[IN:%.*]], i64 [[VEC_START]] 9; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast float* [[VEC_GEP]] to <3 x float>* 10; CHECK-NEXT: [[COL_LOAD:%.*]] = load <3 x float>, <3 x float>* [[VEC_CAST]], align 4 11; CHECK-NEXT: [[VEC_START1:%.*]] = mul i64 1, [[STRIDE]] 12; CHECK-NEXT: [[VEC_GEP2:%.*]] = getelementptr float, float* [[IN]], i64 [[VEC_START1]] 13; CHECK-NEXT: [[VEC_CAST3:%.*]] = bitcast float* [[VEC_GEP2]] to <3 x float>* 14; CHECK-NEXT: [[COL_LOAD4:%.*]] = load <3 x float>, <3 x float>* [[VEC_CAST3]], align 4 15; CHECK-NEXT: [[VEC_START5:%.*]] = mul i64 2, [[STRIDE]] 16; CHECK-NEXT: [[VEC_GEP6:%.*]] = getelementptr float, float* [[IN]], i64 [[VEC_START5]] 17; CHECK-NEXT: [[VEC_CAST7:%.*]] = bitcast float* [[VEC_GEP6]] to <3 x float>* 18; CHECK-NEXT: [[COL_LOAD8:%.*]] = load <3 x float>, <3 x float>* [[VEC_CAST7]], align 4 19; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x float> [[COL_LOAD]], <3 x float> [[COL_LOAD4]], <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5> 20; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <3 x float> [[COL_LOAD8]], <3 x float> poison, <6 x i32> <i32 0, i32 1, i32 2, i32 undef, i32 undef, i32 undef> 21; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <6 x float> [[TMP0]], <6 x float> [[TMP1]], <9 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8> 22; CHECK-NEXT: ret <9 x float> [[TMP2]] 23; 24entry: 25 %load = call <9 x float> @llvm.matrix.column.major.load(float* %in, i64 %stride, i1 false, i32 3, i32 3) 26 ret <9 x float> %load 27} 28 29declare <9 x float> @llvm.matrix.column.major.load(float*, i64, i1, i32, i32) 30 31define <9 x float> @strided_load_9x1(float* %in, i64 %stride) { 32; CHECK-LABEL: @strided_load_9x1( 33; CHECK-NEXT: entry: 34; CHECK-NEXT: [[VEC_START:%.*]] = mul i64 0, [[STRIDE:%.*]] 35; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr float, float* [[IN:%.*]], i64 [[VEC_START]] 36; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast float* [[VEC_GEP]] to <9 x float>* 37; CHECK-NEXT: [[COL_LOAD:%.*]] = load <9 x float>, <9 x float>* [[VEC_CAST]], align 4 38; CHECK-NEXT: ret <9 x float> [[COL_LOAD]] 39; 40entry: 41 %load = call <9 x float> @llvm.matrix.column.major.load(float* %in, i64 %stride, i1 false, i32 9, i32 1) 42 ret <9 x float> %load 43} 44 45declare <8 x float> @llvm.matrix.column.major.load.v8f32(float*, i64, i1, i32, i32) 46 47define <8 x float> @strided_load_4x2(float* %in, i64 %stride) { 48; CHECK-LABEL: @strided_load_4x2( 49; CHECK-NEXT: entry: 50; CHECK-NEXT: [[VEC_START:%.*]] = mul i64 0, [[STRIDE:%.*]] 51; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr float, float* [[IN:%.*]], i64 [[VEC_START]] 52; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast float* [[VEC_GEP]] to <4 x float>* 53; CHECK-NEXT: [[COL_LOAD:%.*]] = load <4 x float>, <4 x float>* [[VEC_CAST]], align 4 54; CHECK-NEXT: [[VEC_START1:%.*]] = mul i64 1, [[STRIDE]] 55; CHECK-NEXT: [[VEC_GEP2:%.*]] = getelementptr float, float* [[IN]], i64 [[VEC_START1]] 56; CHECK-NEXT: [[VEC_CAST3:%.*]] = bitcast float* [[VEC_GEP2]] to <4 x float>* 57; CHECK-NEXT: [[COL_LOAD4:%.*]] = load <4 x float>, <4 x float>* [[VEC_CAST3]], align 4 58; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <4 x float> [[COL_LOAD]], <4 x float> [[COL_LOAD4]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> 59; CHECK-NEXT: ret <8 x float> [[TMP0]] 60; 61entry: 62 %load = call <8 x float> @llvm.matrix.column.major.load.v8f32(float* %in, i64 %stride, i1 false, i32 4, i32 2) 63 ret <8 x float> %load 64} 65