1// RUN: mlir-opt %s --sparse-compiler | \ 2// RUN: TENSOR0="%mlir_integration_test_dir/data/test_symmetric_complex.mtx" \ 3// RUN: mlir-cpu-runner \ 4// RUN: -e entry -entry-point-result=void \ 5// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ 6// RUN: FileCheck %s 7 8!Filename = !llvm.ptr<i8> 9 10#SparseMatrix = #sparse_tensor.encoding<{ 11 dimLevelType = [ "compressed", "compressed" ] 12}> 13 14#trait_sum_reduce = { 15 indexing_maps = [ 16 affine_map<(i,j) -> (i,j)>, // A 17 affine_map<(i,j) -> ()> // x (out) 18 ], 19 iterator_types = ["reduction", "reduction"], 20 doc = "x += A(i,j)" 21} 22 23// 24// Integration test that lowers a kernel annotated as sparse to 25// actual sparse code, initializes a matching sparse storage scheme 26// from file, and runs the resulting code with the JIT compiler. 27// 28module { 29 // 30 // A kernel that sum-reduces a matrix to a single scalar. 31 // 32 func.func @kernel_sum_reduce(%arga: tensor<?x?xcomplex<f64>, #SparseMatrix>, 33 %argx: tensor<complex<f64>>) -> tensor<complex<f64>> { 34 %0 = linalg.generic #trait_sum_reduce 35 ins(%arga: tensor<?x?xcomplex<f64>, #SparseMatrix>) 36 outs(%argx: tensor<complex<f64>>) { 37 ^bb(%a: complex<f64>, %x: complex<f64>): 38 %0 = complex.add %x, %a : complex<f64> 39 linalg.yield %0 : complex<f64> 40 } -> tensor<complex<f64>> 41 return %0 : tensor<complex<f64>> 42 } 43 44 func.func private @getTensorFilename(index) -> (!Filename) 45 46 // 47 // Main driver that reads matrix from file and calls the sparse kernel. 48 // 49 func.func @entry() { 50 //%d0 = arith.constant 0.0 : complex<f64> 51 %d0 = complex.constant [0.0 : f64, 0.0 : f64] : complex<f64> 52 %c0 = arith.constant 0 : index 53 54 // Setup memory for a single reduction scalar, 55 // initialized to zero. 56 // TODO: tensor.from_elements does not support complex. 57 %alloc = bufferization.alloc_tensor() : tensor<complex<f64>> 58 %x = tensor.insert %d0 into %alloc[] : tensor<complex<f64>> 59 60 // Read the sparse matrix from file, construct sparse storage. 61 %fileName = call @getTensorFilename(%c0) : (index) -> (!Filename) 62 %a = sparse_tensor.new %fileName : !Filename to tensor<?x?xcomplex<f64>, #SparseMatrix> 63 64 // Call the kernel. 65 %0 = call @kernel_sum_reduce(%a, %x) 66 : (tensor<?x?xcomplex<f64>, #SparseMatrix>, tensor<complex<f64>>) -> tensor<complex<f64>> 67 68 // Print the result for verification. 69 // 70 // CHECK: 30.2 71 // CHECK-NEXT: 22.2 72 // 73 %v = tensor.extract %0[] : tensor<complex<f64>> 74 %real = complex.re %v : complex<f64> 75 %imag = complex.im %v : complex<f64> 76 vector.print %real : f64 77 vector.print %imag : f64 78 79 // Release the resources. 80 bufferization.dealloc_tensor %a : tensor<?x?xcomplex<f64>, #SparseMatrix> 81 82 return 83 } 84} 85