1// RUN: mlir-opt %s \ 2// RUN: --sparsification --sparse-tensor-conversion \ 3// RUN: --linalg-bufferize --convert-linalg-to-loops \ 4// RUN: --convert-vector-to-scf --convert-scf-to-std \ 5// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ 6// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ 7// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ 8// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ 9// RUN: mlir-cpu-runner \ 10// RUN: -e entry -entry-point-result=void \ 11// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ 12// RUN: FileCheck %s 13 14#SparseMatrix = #sparse_tensor.encoding<{ 15 dimLevelType = [ "compressed", "compressed" ] 16}> 17 18#SparseTensor = #sparse_tensor.encoding<{ 19 dimLevelType = [ "compressed", "compressed", "compressed" ] 20}> 21 22#redsum = { 23 indexing_maps = [ 24 affine_map<(i,j,k) -> (i,j,k)>, // A 25 affine_map<(i,j,k) -> (i,j,k)>, // B 26 affine_map<(i,j,k) -> (i,j)> // X (out) 27 ], 28 iterator_types = ["parallel", "parallel", "reduction"], 29 doc = "X(i,j) = SUM_k A(i,j,k) * B(i,j,k)" 30} 31 32module { 33 func @redsum(%arga: tensor<?x?x?xi32, #SparseTensor>, 34 %argb: tensor<?x?x?xi32, #SparseTensor>) 35 -> tensor<?x?xi32, #SparseMatrix> { 36 %c0 = arith.constant 0 : index 37 %c1 = arith.constant 1 : index 38 %d0 = tensor.dim %arga, %c0 : tensor<?x?x?xi32, #SparseTensor> 39 %d1 = tensor.dim %arga, %c1 : tensor<?x?x?xi32, #SparseTensor> 40 %xinit = sparse_tensor.init [%d0, %d1] : tensor<?x?xi32, #SparseMatrix> 41 %0 = linalg.generic #redsum 42 ins(%arga, %argb: tensor<?x?x?xi32, #SparseTensor>, 43 tensor<?x?x?xi32, #SparseTensor>) 44 outs(%xinit: tensor<?x?xi32, #SparseMatrix>) { 45 ^bb(%a: i32, %b: i32, %x: i32): 46 %0 = arith.muli %a, %b : i32 47 %1 = arith.addi %x, %0 : i32 48 linalg.yield %1 : i32 49 } -> tensor<?x?xi32, #SparseMatrix> 50 return %0 : tensor<?x?xi32, #SparseMatrix> 51 } 52 53 // Driver method to call and verify tensor kernel. 54 func @entry() { 55 %c0 = arith.constant 0 : index 56 %i0 = arith.constant -1 : i32 57 58 // Setup very sparse 3-d tensors. 59 %t1 = arith.constant sparse< 60 [ [1,1,3], [2,0,0], [2,2,1], [2,2,2], [2,2,3] ], [ 1, 2, 3, 4, 5 ] 61 > : tensor<3x3x4xi32> 62 %t2 = arith.constant sparse< 63 [ [1,0,0], [1,1,3], [2,2,1], [2,2,3] ], [ 6, 7, 8, 9 ] 64 > : tensor<3x3x4xi32> 65 %st1 = sparse_tensor.convert %t1 66 : tensor<3x3x4xi32> to tensor<?x?x?xi32, #SparseTensor> 67 %st2 = sparse_tensor.convert %t2 68 : tensor<3x3x4xi32> to tensor<?x?x?xi32, #SparseTensor> 69 70 // Call kernel. 71 %0 = call @redsum(%st1, %st2) 72 : (tensor<?x?x?xi32, #SparseTensor>, 73 tensor<?x?x?xi32, #SparseTensor>) -> tensor<?x?xi32, #SparseMatrix> 74 75 // 76 // Verify results. Only two entries stored in result. Correct structure. 77 // 78 // CHECK: ( 7, 69, -1, -1 ) 79 // CHECK-NEXT: ( ( 0, 0, 0 ), ( 0, 7, 0 ), ( 0, 0, 69 ) ) 80 // 81 %val = sparse_tensor.values %0 82 : tensor<?x?xi32, #SparseMatrix> to memref<?xi32> 83 %vv = vector.transfer_read %val[%c0], %i0: memref<?xi32>, vector<4xi32> 84 vector.print %vv : vector<4xi32> 85 %dm = sparse_tensor.convert %0 86 : tensor<?x?xi32, #SparseMatrix> to tensor<?x?xi32> 87 %db = bufferization.to_memref %dm : memref<?x?xi32> 88 %vm = vector.transfer_read %db[%c0, %c0], %i0: memref<?x?xi32>, vector<3x3xi32> 89 vector.print %vm : vector<3x3xi32> 90 91 // Release the resources. 92 sparse_tensor.release %st1 : tensor<?x?x?xi32, #SparseTensor> 93 sparse_tensor.release %st2 : tensor<?x?x?xi32, #SparseTensor> 94 sparse_tensor.release %0 : tensor<?x?xi32, #SparseMatrix> 95 memref.dealloc %db : memref<?x?xi32> 96 return 97 } 98} 99