1// RUN: mlir-opt %s --sparse-compiler | \
2// RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \
3// RUN: mlir-cpu-runner \
4// RUN:  -e entry -entry-point-result=void  \
5// RUN:  -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
6// RUN: FileCheck %s
7//
8// Do the same run, but now with SIMDization as well. This should not change the outcome.
9//
10// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=4" | \
11// RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \
12// RUN: mlir-cpu-runner \
13// RUN:  -e entry -entry-point-result=void  \
14// RUN:  -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
15// RUN: FileCheck %s
16
17!Filename = !llvm.ptr<i8>
18
19#DCSR = #sparse_tensor.encoding<{
20  dimLevelType = [ "compressed", "compressed" ],
21  dimOrdering = affine_map<(i,j) -> (i,j)>
22}>
23
24#eltwise_mult = {
25  indexing_maps = [
26    affine_map<(i,j) -> (i,j)>  // X (out)
27  ],
28  iterator_types = ["parallel", "parallel"],
29  doc = "X(i,j) *= X(i,j)"
30}
31
32//
33// Integration test that lowers a kernel annotated as sparse to
34// actual sparse code, initializes a matching sparse storage scheme
35// from file, and runs the resulting code with the JIT compiler.
36//
37module {
38  //
39  // A kernel that multiplies a sparse matrix A with itself
40  // in an element-wise fashion. In this operation, we have
41  // a sparse tensor as output, but although the values of the
42  // sparse tensor change, its nonzero structure remains the same.
43  //
44  func.func @kernel_eltwise_mult(%argx: tensor<?x?xf64, #DCSR>)
45    -> tensor<?x?xf64, #DCSR> {
46    %0 = linalg.generic #eltwise_mult
47      outs(%argx: tensor<?x?xf64, #DCSR>) {
48      ^bb(%x: f64):
49        %0 = arith.mulf %x, %x : f64
50        linalg.yield %0 : f64
51    } -> tensor<?x?xf64, #DCSR>
52    return %0 : tensor<?x?xf64, #DCSR>
53  }
54
55  func.func private @getTensorFilename(index) -> (!Filename)
56
57  //
58  // Main driver that reads matrix from file and calls the sparse kernel.
59  //
60  func.func @entry() {
61    %d0 = arith.constant 0.0 : f64
62    %c0 = arith.constant 0 : index
63
64    // Read the sparse matrix from file, construct sparse storage.
65    %fileName = call @getTensorFilename(%c0) : (index) -> (!Filename)
66    %x = sparse_tensor.new %fileName : !Filename to tensor<?x?xf64, #DCSR>
67
68    // Call kernel.
69    %0 = call @kernel_eltwise_mult(%x) : (tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR>
70
71    // Print the result for verification.
72    //
73    // CHECK: ( 1, 1.96, 4, 6.25, 9, 16.81, 16, 27.04, 25 )
74    //
75    %m = sparse_tensor.values %0 : tensor<?x?xf64, #DCSR> to memref<?xf64>
76    %v = vector.transfer_read %m[%c0], %d0: memref<?xf64>, vector<9xf64>
77    vector.print %v : vector<9xf64>
78
79    // Release the resources.
80    bufferization.dealloc_tensor %x : tensor<?x?xf64, #DCSR>
81
82    return
83  }
84}
85