1// RUN: mlir-opt %s --sparse-compiler | \ 2// RUN: TENSOR0="%mlir_integration_test_dir/data/test_symmetric.mtx" \ 3// RUN: mlir-cpu-runner \ 4// RUN: -e entry -entry-point-result=void \ 5// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ 6// RUN: FileCheck %s 7// 8// Do the same run, but now with SIMDization as well. This should not change the outcome. 9// 10// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=2" | \ 11// RUN: TENSOR0="%mlir_integration_test_dir/data/test_symmetric.mtx" \ 12// RUN: mlir-cpu-runner \ 13// RUN: -e entry -entry-point-result=void \ 14// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ 15// RUN: FileCheck %s 16 17!Filename = !llvm.ptr<i8> 18 19#SparseMatrix = #sparse_tensor.encoding<{ 20 dimLevelType = [ "compressed", "compressed" ] 21}> 22 23#trait_sum_reduce = { 24 indexing_maps = [ 25 affine_map<(i,j) -> (i,j)>, // A 26 affine_map<(i,j) -> ()> // x (out) 27 ], 28 iterator_types = ["reduction", "reduction"], 29 doc = "x += A(i,j)" 30} 31 32// 33// Integration test that lowers a kernel annotated as sparse to 34// actual sparse code, initializes a matching sparse storage scheme 35// from file, and runs the resulting code with the JIT compiler. 36// 37module { 38 // 39 // A kernel that sum-reduces a matrix to a single scalar. 40 // 41 func.func @kernel_sum_reduce(%arga: tensor<?x?xf64, #SparseMatrix>, 42 %argx: tensor<f64>) -> tensor<f64> { 43 %0 = linalg.generic #trait_sum_reduce 44 ins(%arga: tensor<?x?xf64, #SparseMatrix>) 45 outs(%argx: tensor<f64>) { 46 ^bb(%a: f64, %x: f64): 47 %0 = arith.addf %x, %a : f64 48 linalg.yield %0 : f64 49 } -> tensor<f64> 50 return %0 : tensor<f64> 51 } 52 53 func.func private @getTensorFilename(index) -> (!Filename) 54 55 // 56 // Main driver that reads matrix from file and calls the sparse kernel. 57 // 58 func.func @entry() { 59 %d0 = arith.constant 0.0 : f64 60 %c0 = arith.constant 0 : index 61 62 // Setup memory for a single reduction scalar, 63 // initialized to zero. 64 %x = tensor.from_elements %d0 : tensor<f64> 65 66 // Read the sparse matrix from file, construct sparse storage. 67 %fileName = call @getTensorFilename(%c0) : (index) -> (!Filename) 68 %a = sparse_tensor.new %fileName : !Filename to tensor<?x?xf64, #SparseMatrix> 69 70 // Call the kernel. 71 %0 = call @kernel_sum_reduce(%a, %x) 72 : (tensor<?x?xf64, #SparseMatrix>, tensor<f64>) -> tensor<f64> 73 74 // Print the result for verification. 75 // 76 // CHECK: 30.2 77 // 78 %v = tensor.extract %0[] : tensor<f64> 79 vector.print %v : f64 80 81 // Release the resources. 82 bufferization.dealloc_tensor %a : tensor<?x?xf64, #SparseMatrix> 83 84 return 85 } 86} 87