1// RUN: mlir-opt %s \ 2// RUN: --sparsification --sparse-tensor-conversion \ 3// RUN: --convert-vector-to-scf --convert-scf-to-std \ 4// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ 5// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ 6// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ 7// RUN: TENSOR0="%mlir_integration_test_dir/data/test_symmetric.mtx" \ 8// RUN: mlir-cpu-runner \ 9// RUN: -e entry -entry-point-result=void \ 10// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ 11// RUN: FileCheck %s 12// 13// Do the same run, but now with SIMDization as well. This should not change the outcome. 14// 15// RUN: mlir-opt %s \ 16// RUN: --sparsification="vectorization-strategy=2 vl=2" --sparse-tensor-conversion \ 17// RUN: --convert-vector-to-scf --convert-scf-to-std \ 18// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ 19// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ 20// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ 21// RUN: TENSOR0="%mlir_integration_test_dir/data/test_symmetric.mtx" \ 22// RUN: mlir-cpu-runner \ 23// RUN: -e entry -entry-point-result=void \ 24// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ 25// RUN: FileCheck %s 26 27!Filename = type !llvm.ptr<i8> 28 29#SparseMatrix = #sparse_tensor.encoding<{ 30 dimLevelType = [ "compressed", "compressed" ] 31}> 32 33#trait_sum_reduce = { 34 indexing_maps = [ 35 affine_map<(i,j) -> (i,j)>, // A 36 affine_map<(i,j) -> ()> // x (out) 37 ], 38 iterator_types = ["reduction", "reduction"], 39 doc = "x += A(i,j)" 40} 41 42// 43// Integration test that lowers a kernel annotated as sparse to 44// actual sparse code, initializes a matching sparse storage scheme 45// from file, and runs the resulting code with the JIT compiler. 46// 47module { 48 // 49 // A kernel that sum-reduces a matrix to a single scalar. 50 // 51 func @kernel_sum_reduce(%arga: tensor<?x?xf64, #SparseMatrix>, 52 %argx: tensor<f64> {linalg.inplaceable = true}) -> tensor<f64> { 53 %0 = linalg.generic #trait_sum_reduce 54 ins(%arga: tensor<?x?xf64, #SparseMatrix>) 55 outs(%argx: tensor<f64>) { 56 ^bb(%a: f64, %x: f64): 57 %0 = arith.addf %x, %a : f64 58 linalg.yield %0 : f64 59 } -> tensor<f64> 60 return %0 : tensor<f64> 61 } 62 63 func private @getTensorFilename(index) -> (!Filename) 64 65 // 66 // Main driver that reads matrix from file and calls the sparse kernel. 67 // 68 func @entry() { 69 %d0 = arith.constant 0.0 : f64 70 %c0 = arith.constant 0 : index 71 72 // Setup memory for a single reduction scalar, 73 // initialized to zero. 74 %xdata = memref.alloc() : memref<f64> 75 memref.store %d0, %xdata[] : memref<f64> 76 %x = bufferization.to_tensor %xdata : memref<f64> 77 78 // Read the sparse matrix from file, construct sparse storage. 79 %fileName = call @getTensorFilename(%c0) : (index) -> (!Filename) 80 %a = sparse_tensor.new %fileName : !Filename to tensor<?x?xf64, #SparseMatrix> 81 82 // Call the kernel. 83 %0 = call @kernel_sum_reduce(%a, %x) 84 : (tensor<?x?xf64, #SparseMatrix>, tensor<f64>) -> tensor<f64> 85 86 // Print the result for verification. 87 // 88 // CHECK: 30.2 89 // 90 %m = bufferization.to_memref %0 : memref<f64> 91 %v = memref.load %m[] : memref<f64> 92 vector.print %v : f64 93 94 // Release the resources. 95 memref.dealloc %xdata : memref<f64> 96 sparse_tensor.release %a : tensor<?x?xf64, #SparseMatrix> 97 98 return 99 } 100} 101