1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -lower-matrix-intrinsics -S < %s | FileCheck %s 3; RUN: opt -passes='lower-matrix-intrinsics' -S < %s | FileCheck %s 4 5define <9 x double> @strided_load_3x3(double* %in, i64 %stride) { 6; CHECK-LABEL: @strided_load_3x3( 7; CHECK-NEXT: entry: 8; CHECK-NEXT: [[VEC_START:%.*]] = mul i64 0, [[STRIDE:%.*]] 9; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, double* [[IN:%.*]], i64 [[VEC_START]] 10; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast double* [[VEC_GEP]] to <3 x double>* 11; CHECK-NEXT: [[COL_LOAD:%.*]] = load <3 x double>, <3 x double>* [[VEC_CAST]], align 8 12; CHECK-NEXT: [[VEC_START1:%.*]] = mul i64 1, [[STRIDE]] 13; CHECK-NEXT: [[VEC_GEP2:%.*]] = getelementptr double, double* [[IN]], i64 [[VEC_START1]] 14; CHECK-NEXT: [[VEC_CAST3:%.*]] = bitcast double* [[VEC_GEP2]] to <3 x double>* 15; CHECK-NEXT: [[COL_LOAD4:%.*]] = load <3 x double>, <3 x double>* [[VEC_CAST3]], align 8 16; CHECK-NEXT: [[VEC_START5:%.*]] = mul i64 2, [[STRIDE]] 17; CHECK-NEXT: [[VEC_GEP6:%.*]] = getelementptr double, double* [[IN]], i64 [[VEC_START5]] 18; CHECK-NEXT: [[VEC_CAST7:%.*]] = bitcast double* [[VEC_GEP6]] to <3 x double>* 19; CHECK-NEXT: [[COL_LOAD8:%.*]] = load <3 x double>, <3 x double>* [[VEC_CAST7]], align 8 20; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x double> [[COL_LOAD]], <3 x double> [[COL_LOAD4]], <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5> 21; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <3 x double> [[COL_LOAD8]], <3 x double> poison, <6 x i32> <i32 0, i32 1, i32 2, i32 undef, i32 undef, i32 undef> 22; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <6 x double> [[TMP0]], <6 x double> [[TMP1]], <9 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8> 23; CHECK-NEXT: ret <9 x double> [[TMP2]] 24; 25entry: 26 %load = call <9 x double> @llvm.matrix.column.major.load.v9f64.i64(double* %in, i64 %stride, i1 false, i32 3, i32 3) 27 ret <9 x double> %load 28} 29 30declare <9 x double> @llvm.matrix.column.major.load.v9f64.i64(double*, i64, i1, i32, i32) 31 32define <9 x double> @strided_load_9x1(double* %in, i64 %stride) { 33; CHECK-LABEL: @strided_load_9x1( 34; CHECK-NEXT: entry: 35; CHECK-NEXT: [[VEC_START:%.*]] = mul i64 0, [[STRIDE:%.*]] 36; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, double* [[IN:%.*]], i64 [[VEC_START]] 37; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast double* [[VEC_GEP]] to <9 x double>* 38; CHECK-NEXT: [[COL_LOAD:%.*]] = load <9 x double>, <9 x double>* [[VEC_CAST]], align 8 39; CHECK-NEXT: ret <9 x double> [[COL_LOAD]] 40; 41entry: 42 %load = call <9 x double> @llvm.matrix.column.major.load.v9f64.i64(double* %in, i64 %stride, i1 false, i32 9, i32 1) 43 ret <9 x double> %load 44} 45 46declare <8 x double> @llvm.matrix.column.major.load.v8f64.i64(double*, i64, i1, i32, i32) 47 48define <8 x double> @strided_load_4x2(double* %in, i64 %stride) { 49; CHECK-LABEL: @strided_load_4x2( 50; CHECK-NEXT: entry: 51; CHECK-NEXT: [[VEC_START:%.*]] = mul i64 0, [[STRIDE:%.*]] 52; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, double* [[IN:%.*]], i64 [[VEC_START]] 53; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast double* [[VEC_GEP]] to <4 x double>* 54; CHECK-NEXT: [[COL_LOAD:%.*]] = load <4 x double>, <4 x double>* [[VEC_CAST]], align 8 55; CHECK-NEXT: [[VEC_START1:%.*]] = mul i64 1, [[STRIDE]] 56; CHECK-NEXT: [[VEC_GEP2:%.*]] = getelementptr double, double* [[IN]], i64 [[VEC_START1]] 57; CHECK-NEXT: [[VEC_CAST3:%.*]] = bitcast double* [[VEC_GEP2]] to <4 x double>* 58; CHECK-NEXT: [[COL_LOAD4:%.*]] = load <4 x double>, <4 x double>* [[VEC_CAST3]], align 8 59; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <4 x double> [[COL_LOAD]], <4 x double> [[COL_LOAD4]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> 60; CHECK-NEXT: ret <8 x double> [[TMP0]] 61; 62entry: 63 %load = call <8 x double> @llvm.matrix.column.major.load.v8f64.i64(double* %in, i64 %stride, i1 false, i32 4, i32 2) 64 ret <8 x double> %load 65} 66 67declare <8 x double> @llvm.matrix.column.major.load.v8f64.i32(double*, i32, i1, i32, i32) 68 69define <8 x double> @strided_load_4x2_stride_i32(double* %in, i32 %stride) { 70; CHECK-LABEL: @strided_load_4x2_stride_i32( 71; CHECK-NEXT: entry: 72; CHECK-NEXT: [[VEC_START:%.*]] = mul i32 0, [[STRIDE:%.*]] 73; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, double* [[IN:%.*]], i32 [[VEC_START]] 74; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast double* [[VEC_GEP]] to <4 x double>* 75; CHECK-NEXT: [[COL_LOAD:%.*]] = load <4 x double>, <4 x double>* [[VEC_CAST]], align 8 76; CHECK-NEXT: [[VEC_START1:%.*]] = mul i32 1, [[STRIDE]] 77; CHECK-NEXT: [[VEC_GEP2:%.*]] = getelementptr double, double* [[IN]], i32 [[VEC_START1]] 78; CHECK-NEXT: [[VEC_CAST3:%.*]] = bitcast double* [[VEC_GEP2]] to <4 x double>* 79; CHECK-NEXT: [[COL_LOAD4:%.*]] = load <4 x double>, <4 x double>* [[VEC_CAST3]], align 8 80; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <4 x double> [[COL_LOAD]], <4 x double> [[COL_LOAD4]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> 81; CHECK-NEXT: ret <8 x double> [[TMP0]] 82; 83entry: 84 %load = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(double* %in, i32 %stride, i1 false, i32 4, i32 2) 85 ret <8 x double> %load 86} 87