1// RUN: mlir-opt %s --sparse-compiler | \ 2// RUN: mlir-cpu-runner -e entry -entry-point-result=void \ 3// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ 4// RUN: FileCheck %s 5 6#SparseVector = #sparse_tensor.encoding<{ 7 dimLevelType = ["compressed"] 8}> 9 10#SparseMatrix = #sparse_tensor.encoding<{ 11 dimLevelType = ["compressed", "compressed"] 12}> 13 14// 15// Test with various forms of the two most elementary reshape 16// operations: expand/collapse. 17// 18module { 19 20 func.func @expand_dense(%arg0: tensor<12xf64>) -> tensor<3x4xf64> { 21 %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64> into tensor<3x4xf64> 22 return %0 : tensor<3x4xf64> 23 } 24 25 func.func @expand_from_sparse(%arg0: tensor<12xf64, #SparseVector>) -> tensor<3x4xf64> { 26 %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64, #SparseVector> into tensor<3x4xf64> 27 return %0 : tensor<3x4xf64> 28 } 29 30 func.func @expand_to_sparse(%arg0: tensor<12xf64>) -> tensor<3x4xf64, #SparseMatrix> { 31 %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64> into tensor<3x4xf64, #SparseMatrix> 32 return %0 : tensor<3x4xf64, #SparseMatrix> 33 } 34 35 func.func @expand_sparse2sparse(%arg0: tensor<12xf64, #SparseVector>) -> tensor<3x4xf64, #SparseMatrix> { 36 %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64, #SparseVector> into tensor<3x4xf64, #SparseMatrix> 37 return %0 : tensor<3x4xf64, #SparseMatrix> 38 } 39 40 func.func @collapse_dense(%arg0: tensor<3x4xf64>) -> tensor<12xf64> { 41 %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<3x4xf64> into tensor<12xf64> 42 return %0 : tensor<12xf64> 43 } 44 45 func.func @collapse_from_sparse(%arg0: tensor<3x4xf64, #SparseMatrix>) -> tensor<12xf64> { 46 %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<3x4xf64, #SparseMatrix> into tensor<12xf64> 47 return %0 : tensor<12xf64> 48 } 49 50 func.func @collapse_to_sparse(%arg0: tensor<3x4xf64>) -> tensor<12xf64, #SparseVector> { 51 %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<3x4xf64> into tensor<12xf64, #SparseVector> 52 return %0 : tensor<12xf64, #SparseVector> 53 } 54 55 func.func @collapse_sparse2sparse(%arg0: tensor<3x4xf64, #SparseMatrix>) -> tensor<12xf64, #SparseVector> { 56 %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<3x4xf64, #SparseMatrix> into tensor<12xf64, #SparseVector> 57 return %0 : tensor<12xf64, #SparseVector> 58 } 59 60 61 // 62 // Main driver. 63 // 64 func.func @entry() { 65 %c0 = arith.constant 0 : index 66 %df = arith.constant -1.0 : f64 67 68 // Setup test vectors and matrices.. 69 %v = arith.constant dense <[ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 70 7.0, 8.0, 9.0, 10.0, 11.0, 12.0]> : tensor<12xf64> 71 %m = arith.constant dense <[ [ 1.1, 1.2, 1.3, 1.4 ], 72 [ 2.1, 2.2, 2.3, 2.4 ], 73 [ 3.1, 3.2, 3.3, 3.4 ]]> : tensor<3x4xf64> 74 %sv = sparse_tensor.convert %v : tensor<12xf64> to tensor<12xf64, #SparseVector> 75 %sm = sparse_tensor.convert %m : tensor<3x4xf64> to tensor<3x4xf64, #SparseMatrix> 76 77 78 // Call the kernels. 79 %expand0 = call @expand_dense(%v) : (tensor<12xf64>) -> tensor<3x4xf64> 80 %expand1 = call @expand_from_sparse(%sv) : (tensor<12xf64, #SparseVector>) -> tensor<3x4xf64> 81 %expand2 = call @expand_to_sparse(%v) : (tensor<12xf64>) -> tensor<3x4xf64, #SparseMatrix> 82 %expand3 = call @expand_sparse2sparse(%sv) : (tensor<12xf64, #SparseVector>) -> tensor<3x4xf64, #SparseMatrix> 83 84 %collapse0 = call @collapse_dense(%m) : (tensor<3x4xf64>) -> tensor<12xf64> 85 %collapse1 = call @collapse_from_sparse(%sm) : (tensor<3x4xf64, #SparseMatrix>) -> tensor<12xf64> 86 %collapse2 = call @collapse_to_sparse(%m) : (tensor<3x4xf64>) -> tensor<12xf64, #SparseVector> 87 %collapse3 = call @collapse_sparse2sparse(%sm) : (tensor<3x4xf64, #SparseMatrix>) -> tensor<12xf64, #SparseVector> 88 89 // 90 // Verify result. 91 // 92 // CHECK: ( ( 1, 2, 3, 4 ), ( 5, 6, 7, 8 ), ( 9, 10, 11, 12 ) ) 93 // CHECK-NEXT: ( ( 1, 2, 3, 4 ), ( 5, 6, 7, 8 ), ( 9, 10, 11, 12 ) ) 94 // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, -1, -1, -1, -1 ) 95 // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, -1, -1, -1, -1 ) 96 // CHECK-NEXT: ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4 ) 97 // CHECK-NEXT: ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4 ) 98 // CHECK-NEXT: ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4, -1, -1, -1, -1 ) 99 // CHECK-NEXT: ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4, -1, -1, -1, -1 ) 100 // 101 %m0 = vector.transfer_read %expand0[%c0, %c0], %df: tensor<3x4xf64>, vector<3x4xf64> 102 vector.print %m0 : vector<3x4xf64> 103 %m1 = vector.transfer_read %expand1[%c0, %c0], %df: tensor<3x4xf64>, vector<3x4xf64> 104 vector.print %m1 : vector<3x4xf64> 105 %a2 = sparse_tensor.values %expand2 : tensor<3x4xf64, #SparseMatrix> to memref<?xf64> 106 %m2 = vector.transfer_read %a2[%c0], %df: memref<?xf64>, vector<16xf64> 107 vector.print %m2 : vector<16xf64> 108 %a3 = sparse_tensor.values %expand3 : tensor<3x4xf64, #SparseMatrix> to memref<?xf64> 109 %m3 = vector.transfer_read %a3[%c0], %df: memref<?xf64>, vector<16xf64> 110 vector.print %m3 : vector<16xf64> 111 112 %v0 = vector.transfer_read %collapse0[%c0], %df: tensor<12xf64>, vector<12xf64> 113 vector.print %v0 : vector<12xf64> 114 %v1 = vector.transfer_read %collapse1[%c0], %df: tensor<12xf64>, vector<12xf64> 115 vector.print %v1 : vector<12xf64> 116 %b2 = sparse_tensor.values %collapse2 : tensor<12xf64, #SparseVector> to memref<?xf64> 117 %v2 = vector.transfer_read %b2[%c0], %df: memref<?xf64>, vector<16xf64> 118 vector.print %v2 : vector<16xf64> 119 %b3 = sparse_tensor.values %collapse3 : tensor<12xf64, #SparseVector> to memref<?xf64> 120 %v3 = vector.transfer_read %b3[%c0], %df: memref<?xf64>, vector<16xf64> 121 vector.print %v3 : vector<16xf64> 122 123 // Release sparse resources. 124 bufferization.dealloc_tensor %sv : tensor<12xf64, #SparseVector> 125 bufferization.dealloc_tensor %sm : tensor<3x4xf64, #SparseMatrix> 126 bufferization.dealloc_tensor %expand2 : tensor<3x4xf64, #SparseMatrix> 127 bufferization.dealloc_tensor %expand3 : tensor<3x4xf64, #SparseMatrix> 128 bufferization.dealloc_tensor %collapse2 : tensor<12xf64, #SparseVector> 129 bufferization.dealloc_tensor %collapse3 : tensor<12xf64, #SparseVector> 130 131 // Release dense resources. 132 bufferization.dealloc_tensor %expand1 : tensor<3x4xf64> 133 bufferization.dealloc_tensor %collapse1 : tensor<12xf64> 134 135 return 136 } 137} 138