1// RUN: mlir-opt %s --sparse-compiler | \
2// RUN: mlir-cpu-runner \
3// RUN:  -e entry -entry-point-result=void  \
4// RUN:  -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
5// RUN: FileCheck %s
6
7#DCSR = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}>
8
9//
10// Traits for 2-d tensor (aka matrix) operations.
11//
12#trait_scale = {
13  indexing_maps = [
14    affine_map<(i,j) -> (i,j)>,  // A (in)
15    affine_map<(i,j) -> (i,j)>   // X (out)
16  ],
17  iterator_types = ["parallel", "parallel"],
18  doc = "X(i,j) = A(i,j) * 2.0"
19}
20#trait_scale_inpl = {
21  indexing_maps = [
22    affine_map<(i,j) -> (i,j)>   // X (out)
23  ],
24  iterator_types = ["parallel", "parallel"],
25  doc = "X(i,j) *= 2.0"
26}
27#trait_op = {
28  indexing_maps = [
29    affine_map<(i,j) -> (i,j)>,  // A (in)
30    affine_map<(i,j) -> (i,j)>,  // B (in)
31    affine_map<(i,j) -> (i,j)>   // X (out)
32  ],
33  iterator_types = ["parallel", "parallel"],
34  doc = "X(i,j) = A(i,j) OP B(i,j)"
35}
36
37module {
38  // Scales a sparse matrix into a new sparse matrix.
39  func.func @matrix_scale(%arga: tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR> {
40    %s = arith.constant 2.0 : f64
41    %c0 = arith.constant 0 : index
42    %c1 = arith.constant 1 : index
43    %d0 = tensor.dim %arga, %c0 : tensor<?x?xf64, #DCSR>
44    %d1 = tensor.dim %arga, %c1 : tensor<?x?xf64, #DCSR>
45    %xm = bufferization.alloc_tensor(%d0, %d1) : tensor<?x?xf64, #DCSR>
46    %0 = linalg.generic #trait_scale
47       ins(%arga: tensor<?x?xf64, #DCSR>)
48        outs(%xm: tensor<?x?xf64, #DCSR>) {
49        ^bb(%a: f64, %x: f64):
50          %1 = arith.mulf %a, %s : f64
51          linalg.yield %1 : f64
52    } -> tensor<?x?xf64, #DCSR>
53    return %0 : tensor<?x?xf64, #DCSR>
54  }
55
56  // Scales a sparse matrix in place.
57  func.func @matrix_scale_inplace(%argx: tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR> {
58    %s = arith.constant 2.0 : f64
59    %0 = linalg.generic #trait_scale_inpl
60      outs(%argx: tensor<?x?xf64, #DCSR>) {
61        ^bb(%x: f64):
62          %1 = arith.mulf %x, %s : f64
63          linalg.yield %1 : f64
64    } -> tensor<?x?xf64, #DCSR>
65    return %0 : tensor<?x?xf64, #DCSR>
66  }
67
68  // Adds two sparse matrices element-wise into a new sparse matrix.
69  func.func @matrix_add(%arga: tensor<?x?xf64, #DCSR>,
70                        %argb: tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR> {
71    %c0 = arith.constant 0 : index
72    %c1 = arith.constant 1 : index
73    %d0 = tensor.dim %arga, %c0 : tensor<?x?xf64, #DCSR>
74    %d1 = tensor.dim %arga, %c1 : tensor<?x?xf64, #DCSR>
75    %xv = bufferization.alloc_tensor(%d0, %d1) : tensor<?x?xf64, #DCSR>
76    %0 = linalg.generic #trait_op
77       ins(%arga, %argb: tensor<?x?xf64, #DCSR>, tensor<?x?xf64, #DCSR>)
78        outs(%xv: tensor<?x?xf64, #DCSR>) {
79        ^bb(%a: f64, %b: f64, %x: f64):
80          %1 = arith.addf %a, %b : f64
81          linalg.yield %1 : f64
82    } -> tensor<?x?xf64, #DCSR>
83    return %0 : tensor<?x?xf64, #DCSR>
84  }
85
86  // Multiplies two sparse matrices element-wise into a new sparse matrix.
87  func.func @matrix_mul(%arga: tensor<?x?xf64, #DCSR>,
88                        %argb: tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR> {
89    %c0 = arith.constant 0 : index
90    %c1 = arith.constant 1 : index
91    %d0 = tensor.dim %arga, %c0 : tensor<?x?xf64, #DCSR>
92    %d1 = tensor.dim %arga, %c1 : tensor<?x?xf64, #DCSR>
93    %xv = bufferization.alloc_tensor(%d0, %d1) : tensor<?x?xf64, #DCSR>
94    %0 = linalg.generic #trait_op
95       ins(%arga, %argb: tensor<?x?xf64, #DCSR>, tensor<?x?xf64, #DCSR>)
96        outs(%xv: tensor<?x?xf64, #DCSR>) {
97        ^bb(%a: f64, %b: f64, %x: f64):
98          %1 = arith.mulf %a, %b : f64
99          linalg.yield %1 : f64
100    } -> tensor<?x?xf64, #DCSR>
101    return %0 : tensor<?x?xf64, #DCSR>
102  }
103
104  // Dump a sparse matrix.
105  func.func @dump(%arg0: tensor<?x?xf64, #DCSR>) {
106    %d0 = arith.constant 0.0 : f64
107    %c0 = arith.constant 0 : index
108    %dm = sparse_tensor.convert %arg0 : tensor<?x?xf64, #DCSR> to tensor<?x?xf64>
109    %1 = vector.transfer_read %dm[%c0, %c0], %d0: tensor<?x?xf64>, vector<4x8xf64>
110    vector.print %1 : vector<4x8xf64>
111    return
112  }
113
114  // Driver method to call and verify matrix kernels.
115  func.func @entry() {
116    %c0 = arith.constant 0 : index
117    %d1 = arith.constant 1.1 : f64
118
119    // Setup sparse matrices.
120    %m1 = arith.constant sparse<
121       [ [0,0], [0,1], [1,7], [2,2], [2,4], [2,7], [3,0], [3,2], [3,3] ],
122         [ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0 ]
123    > : tensor<4x8xf64>
124    %m2 = arith.constant sparse<
125       [ [0,0], [0,7], [1,0], [1,6], [2,1], [2,7] ],
126         [6.0, 5.0, 4.0, 3.0, 2.0, 1.0 ]
127    > : tensor<4x8xf64>
128    %sm1 = sparse_tensor.convert %m1 : tensor<4x8xf64> to tensor<?x?xf64, #DCSR>
129    // TODO: Use %sm1 when we support sparse tensor copies.
130    %sm1_dup = sparse_tensor.convert %m1 : tensor<4x8xf64> to tensor<?x?xf64, #DCSR>
131    %sm2 = sparse_tensor.convert %m2 : tensor<4x8xf64> to tensor<?x?xf64, #DCSR>
132
133    // Call sparse matrix kernels.
134    %0 = call @matrix_scale(%sm1)
135      : (tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR>
136    %1 = call @matrix_scale_inplace(%sm1_dup)
137      : (tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR>
138    %2 = call @matrix_add(%1, %sm2)
139      : (tensor<?x?xf64, #DCSR>, tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR>
140    %3 = call @matrix_mul(%1, %sm2)
141      : (tensor<?x?xf64, #DCSR>, tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR>
142
143    //
144    // Verify the results.
145    //
146    // CHECK:      ( ( 1, 2, 0, 0, 0, 0, 0, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 3 ), ( 0, 0, 4, 0, 5, 0, 0, 6 ), ( 7, 0, 8, 9, 0, 0, 0, 0 ) )
147    // CHECK-NEXT: ( ( 6, 0, 0, 0, 0, 0, 0, 5 ), ( 4, 0, 0, 0, 0, 0, 3, 0 ), ( 0, 2, 0, 0, 0, 0, 0, 1 ), ( 0, 0, 0, 0, 0, 0, 0, 0 ) )
148    // CHECK-NEXT: ( ( 2, 4, 0, 0, 0, 0, 0, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 6 ), ( 0, 0, 8, 0, 10, 0, 0, 12 ), ( 14, 0, 16, 18, 0, 0, 0, 0 ) )
149    // CHECK-NEXT: ( ( 2, 4, 0, 0, 0, 0, 0, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 6 ), ( 0, 0, 8, 0, 10, 0, 0, 12 ), ( 14, 0, 16, 18, 0, 0, 0, 0 ) )
150    // CHECK-NEXT: ( ( 8, 4, 0, 0, 0, 0, 0, 5 ), ( 4, 0, 0, 0, 0, 0, 3, 6 ), ( 0, 2, 8, 0, 10, 0, 0, 13 ), ( 14, 0, 16, 18, 0, 0, 0, 0 ) )
151    // CHECK-NEXT: ( ( 12, 0, 0, 0, 0, 0, 0, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 12 ), ( 0, 0, 0, 0, 0, 0, 0, 0 ) )
152    //
153    call @dump(%sm1) : (tensor<?x?xf64, #DCSR>) -> ()
154    call @dump(%sm2) : (tensor<?x?xf64, #DCSR>) -> ()
155    call @dump(%0) : (tensor<?x?xf64, #DCSR>) -> ()
156    call @dump(%1) : (tensor<?x?xf64, #DCSR>) -> ()
157    call @dump(%2) : (tensor<?x?xf64, #DCSR>) -> ()
158    call @dump(%3) : (tensor<?x?xf64, #DCSR>) -> ()
159
160    // Release the resources.
161    bufferization.dealloc_tensor %sm1 : tensor<?x?xf64, #DCSR>
162    bufferization.dealloc_tensor %sm1_dup : tensor<?x?xf64, #DCSR>
163    bufferization.dealloc_tensor %sm2 : tensor<?x?xf64, #DCSR>
164    bufferization.dealloc_tensor %0 : tensor<?x?xf64, #DCSR>
165    bufferization.dealloc_tensor %2 : tensor<?x?xf64, #DCSR>
166    bufferization.dealloc_tensor %3 : tensor<?x?xf64, #DCSR>
167    return
168  }
169}
170