// RUN: mlir-opt %s \ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --linalg-bufferize --convert-linalg-to-loops \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ // RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ // RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s #SparseMatrix = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }> #SparseTensor = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }> #redsum = { indexing_maps = [ affine_map<(i,j,k) -> (i,j,k)>, // A affine_map<(i,j,k) -> (i,j,k)>, // B affine_map<(i,j,k) -> (i,j)> // X (out) ], iterator_types = ["parallel", "parallel", "reduction"], doc = "X(i,j) = SUM_k A(i,j,k) * B(i,j,k)" } module { func @redsum(%arga: tensor, %argb: tensor) -> tensor { %c0 = arith.constant 0 : index %c1 = arith.constant 1 : index %d0 = tensor.dim %arga, %c0 : tensor %d1 = tensor.dim %arga, %c1 : tensor %xinit = sparse_tensor.init [%d0, %d1] : tensor %0 = linalg.generic #redsum ins(%arga, %argb: tensor, tensor) outs(%xinit: tensor) { ^bb(%a: i32, %b: i32, %x: i32): %0 = arith.muli %a, %b : i32 %1 = arith.addi %x, %0 : i32 linalg.yield %1 : i32 } -> tensor return %0 : tensor } // Driver method to call and verify tensor kernel. func @entry() { %c0 = arith.constant 0 : index %i0 = arith.constant -1 : i32 // Setup very sparse 3-d tensors. %t1 = arith.constant sparse< [ [1,1,3], [2,0,0], [2,2,1], [2,2,2], [2,2,3] ], [ 1, 2, 3, 4, 5 ] > : tensor<3x3x4xi32> %t2 = arith.constant sparse< [ [1,0,0], [1,1,3], [2,2,1], [2,2,3] ], [ 6, 7, 8, 9 ] > : tensor<3x3x4xi32> %st1 = sparse_tensor.convert %t1 : tensor<3x3x4xi32> to tensor %st2 = sparse_tensor.convert %t2 : tensor<3x3x4xi32> to tensor // Call kernel. %0 = call @redsum(%st1, %st2) : (tensor, tensor) -> tensor // // Verify results. Only two entries stored in result. Correct structure. // // CHECK: ( 7, 69, -1, -1 ) // CHECK-NEXT: ( ( 0, 0, 0 ), ( 0, 7, 0 ), ( 0, 0, 69 ) ) // %val = sparse_tensor.values %0 : tensor to memref %vv = vector.transfer_read %val[%c0], %i0: memref, vector<4xi32> vector.print %vv : vector<4xi32> %dm = sparse_tensor.convert %0 : tensor to tensor %db = bufferization.to_memref %dm : memref %vm = vector.transfer_read %db[%c0, %c0], %i0: memref, vector<3x3xi32> vector.print %vm : vector<3x3xi32> // Release the resources. sparse_tensor.release %st1 : tensor sparse_tensor.release %st2 : tensor sparse_tensor.release %0 : tensor memref.dealloc %db : memref return } }