1// RUN: mlir-opt %s --sparse-compiler | \
2// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
3// RUN:  -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
4// RUN: FileCheck %s
5
6#CSR = #sparse_tensor.encoding<{
7  dimLevelType = [ "dense", "compressed" ],
8  dimOrdering = affine_map<(i,j) -> (i,j)>
9}>
10
11#DCSR = #sparse_tensor.encoding<{
12  dimLevelType = [ "compressed", "compressed" ],
13  dimOrdering = affine_map<(i,j) -> (i,j)>
14}>
15
16module {
17  //
18  // Computes C = A x B with all matrices dense.
19  //
20  func.func @matmul1(%A: tensor<4x8xf64>, %B: tensor<8x4xf64>,
21                     %C: tensor<4x4xf64>) -> tensor<4x4xf64> {
22    %D = linalg.matmul
23      ins(%A, %B: tensor<4x8xf64>, tensor<8x4xf64>)
24      outs(%C: tensor<4x4xf64>) -> tensor<4x4xf64>
25    return %D: tensor<4x4xf64>
26  }
27
28  //
29  // Computes C = A x B with all matrices sparse (SpMSpM) in CSR.
30  //
31  func.func @matmul2(%A: tensor<4x8xf64, #CSR>,
32                     %B: tensor<8x4xf64, #CSR>) -> tensor<4x4xf64, #CSR> {
33    %C = bufferization.alloc_tensor() : tensor<4x4xf64, #CSR>
34    %D = linalg.matmul
35      ins(%A, %B: tensor<4x8xf64, #CSR>, tensor<8x4xf64, #CSR>)
36         outs(%C: tensor<4x4xf64, #CSR>) -> tensor<4x4xf64, #CSR>
37    return %D: tensor<4x4xf64, #CSR>
38  }
39
40  //
41  // Computes C = A x B with all matrices sparse (SpMSpM) in DCSR.
42  //
43  func.func @matmul3(%A: tensor<4x8xf64, #DCSR>,
44                     %B: tensor<8x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> {
45    %C = bufferization.alloc_tensor() : tensor<4x4xf64, #DCSR>
46    %D = linalg.matmul
47      ins(%A, %B: tensor<4x8xf64, #DCSR>, tensor<8x4xf64, #DCSR>)
48         outs(%C: tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR>
49    return %D: tensor<4x4xf64, #DCSR>
50  }
51
52  //
53  // Main driver.
54  //
55  func.func @entry() {
56    %c0 = arith.constant 0 : index
57    %d1 = arith.constant -1.0 : f64
58
59    // Initialize various matrices, dense for stress testing,
60    // and sparse to verify correct nonzero structure.
61    %da = arith.constant dense<[
62        [ 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 8.1 ],
63        [ 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2, 8.2 ],
64        [ 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3, 8.3 ],
65        [ 1.4, 2.4, 3.4, 4.4, 5.4, 6.4, 7.4, 8.4 ]
66    ]> : tensor<4x8xf64>
67    %db = arith.constant dense<[
68        [ 10.1, 11.1, 12.1, 13.1 ],
69        [ 10.2, 11.2, 12.2, 13.2 ],
70        [ 10.3, 11.3, 12.3, 13.3 ],
71        [ 10.4, 11.4, 12.4, 13.4 ],
72        [ 10.5, 11.5, 12.5, 13.5 ],
73        [ 10.6, 11.6, 12.6, 13.6 ],
74        [ 10.7, 11.7, 12.7, 13.7 ],
75        [ 10.8, 11.8, 12.8, 13.8 ]
76    ]> : tensor<8x4xf64>
77    %sa = arith.constant dense<[
78        [ 0.0, 2.1, 0.0, 0.0, 0.0, 6.1, 0.0, 0.0 ],
79        [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ],
80        [ 0.0, 2.3, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ],
81        [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 ]
82    ]> : tensor<4x8xf64>
83    %sb = arith.constant dense<[
84        [ 0.0, 0.0, 0.0, 1.0 ],
85        [ 0.0, 0.0, 2.0, 0.0 ],
86        [ 0.0, 3.0, 0.0, 0.0 ],
87        [ 4.0, 0.0, 0.0, 0.0 ],
88        [ 0.0, 0.0, 0.0, 0.0 ],
89        [ 0.0, 5.0, 0.0, 0.0 ],
90        [ 0.0, 0.0, 6.0, 0.0 ],
91        [ 0.0, 0.0, 7.0, 8.0 ]
92    ]> : tensor<8x4xf64>
93    %zero = arith.constant dense<0.0> : tensor<4x4xf64>
94
95    // Convert all these matrices to sparse format.
96    %a1 = sparse_tensor.convert %da : tensor<4x8xf64> to tensor<4x8xf64, #CSR>
97    %a2 = sparse_tensor.convert %da : tensor<4x8xf64> to tensor<4x8xf64, #DCSR>
98    %a3 = sparse_tensor.convert %sa : tensor<4x8xf64> to tensor<4x8xf64, #CSR>
99    %a4 = sparse_tensor.convert %sa : tensor<4x8xf64> to tensor<4x8xf64, #DCSR>
100    %b1 = sparse_tensor.convert %db : tensor<8x4xf64> to tensor<8x4xf64, #CSR>
101    %b2 = sparse_tensor.convert %db : tensor<8x4xf64> to tensor<8x4xf64, #DCSR>
102    %b3 = sparse_tensor.convert %sb : tensor<8x4xf64> to tensor<8x4xf64, #CSR>
103    %b4 = sparse_tensor.convert %sb : tensor<8x4xf64> to tensor<8x4xf64, #DCSR>
104
105    // Call kernels with dense.
106    %0 = call @matmul1(%da, %db, %zero)
107       : (tensor<4x8xf64>, tensor<8x4xf64>, tensor<4x4xf64>) -> tensor<4x4xf64>
108    %1 = call @matmul2(%a1, %b1)
109       : (tensor<4x8xf64, #CSR>,
110          tensor<8x4xf64, #CSR>) -> tensor<4x4xf64, #CSR>
111    %2 = call @matmul3(%a2, %b2)
112       : (tensor<4x8xf64, #DCSR>,
113          tensor<8x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR>
114
115    // Call kernels with one sparse.
116    %3 = call @matmul1(%sa, %db, %zero)
117       : (tensor<4x8xf64>, tensor<8x4xf64>, tensor<4x4xf64>) -> tensor<4x4xf64>
118    %4 = call @matmul2(%a3, %b1)
119       : (tensor<4x8xf64, #CSR>,
120          tensor<8x4xf64, #CSR>) -> tensor<4x4xf64, #CSR>
121    %5 = call @matmul3(%a4, %b2)
122       : (tensor<4x8xf64, #DCSR>,
123          tensor<8x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR>
124
125    // Call kernels with sparse.
126    %6 = call @matmul1(%sa, %sb, %zero)
127       : (tensor<4x8xf64>, tensor<8x4xf64>, tensor<4x4xf64>) -> tensor<4x4xf64>
128    %7 = call @matmul2(%a3, %b3)
129       : (tensor<4x8xf64, #CSR>,
130          tensor<8x4xf64, #CSR>) -> tensor<4x4xf64, #CSR>
131    %8 = call @matmul3(%a4, %b4)
132       : (tensor<4x8xf64, #DCSR>,
133          tensor<8x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR>
134
135    //
136    // CHECK:    ( ( 388.76, 425.56, 462.36, 499.16 ),
137    // CHECK-SAME: ( 397.12, 434.72, 472.32, 509.92 ),
138    // CHECK-SAME: ( 405.48, 443.88, 482.28, 520.68 ),
139    // CHECK-SAME: ( 413.84, 453.04, 492.24, 531.44 ) )
140    //
141    %v0 = vector.transfer_read %0[%c0, %c0], %d1 : tensor<4x4xf64>, vector<4x4xf64>
142    vector.print %v0 : vector<4x4xf64>
143
144    //
145    // CHECK:    ( ( 388.76, 425.56, 462.36, 499.16 ),
146    // CHECK-SAME: ( 397.12, 434.72, 472.32, 509.92 ),
147    // CHECK-SAME: ( 405.48, 443.88, 482.28, 520.68 ),
148    // CHECK-SAME: ( 413.84, 453.04, 492.24, 531.44 ) )
149    //
150    %c1 = sparse_tensor.convert %1 : tensor<4x4xf64, #CSR> to tensor<4x4xf64>
151    %v1 = vector.transfer_read %c1[%c0, %c0], %d1 : tensor<4x4xf64>, vector<4x4xf64>
152    vector.print %v1 : vector<4x4xf64>
153
154    //
155    // CHECK:    ( ( 388.76, 425.56, 462.36, 499.16 ),
156    // CHECK-SAME: ( 397.12, 434.72, 472.32, 509.92 ),
157    // CHECK-SAME: ( 405.48, 443.88, 482.28, 520.68 ),
158    // CHECK-SAME: ( 413.84, 453.04, 492.24, 531.44 ) )
159    //
160    %c2 = sparse_tensor.convert %2 : tensor<4x4xf64, #DCSR> to tensor<4x4xf64>
161    %v2 = vector.transfer_read %c2[%c0, %c0], %d1 : tensor<4x4xf64>, vector<4x4xf64>
162    vector.print %v2 : vector<4x4xf64>
163
164    //
165    // CHECK:    ( ( 86.08, 94.28, 102.48, 110.68 ),
166    // CHECK-SAME: ( 0, 0, 0, 0 ),
167    // CHECK-SAME: ( 23.46, 25.76, 28.06, 30.36 ),
168    // CHECK-SAME: ( 10.8, 11.8, 12.8, 13.8 ) )
169    //
170    %v3 = vector.transfer_read %3[%c0, %c0], %d1 : tensor<4x4xf64>, vector<4x4xf64>
171    vector.print %v3 : vector<4x4xf64>
172
173    //
174    // CHECK:    ( ( 86.08, 94.28, 102.48, 110.68 ),
175    // CHECK-SAME: ( 0, 0, 0, 0 ),
176    // CHECK-SAME: ( 23.46, 25.76, 28.06, 30.36 ),
177    // CHECK-SAME: ( 10.8, 11.8, 12.8, 13.8 ) )
178    //
179    %c4 = sparse_tensor.convert %4 : tensor<4x4xf64, #CSR> to tensor<4x4xf64>
180    %v4 = vector.transfer_read %c4[%c0, %c0], %d1 : tensor<4x4xf64>, vector<4x4xf64>
181    vector.print %v4 : vector<4x4xf64>
182
183    //
184    // CHECK:    ( ( 86.08, 94.28, 102.48, 110.68 ),
185    // CHECK-SAME: ( 0, 0, 0, 0 ),
186    // CHECK-SAME: ( 23.46, 25.76, 28.06, 30.36 ),
187    // CHECK-SAME: ( 10.8, 11.8, 12.8, 13.8 ) )
188    //
189    %c5 = sparse_tensor.convert %5 : tensor<4x4xf64, #DCSR> to tensor<4x4xf64>
190    %v5 = vector.transfer_read %c5[%c0, %c0], %d1 : tensor<4x4xf64>, vector<4x4xf64>
191    vector.print %v5 : vector<4x4xf64>
192
193    //
194    // CHECK: ( ( 0, 30.5, 4.2, 0 ), ( 0, 0, 0, 0 ), ( 0, 0, 4.6, 0 ), ( 0, 0, 7, 8 ) )
195    //
196    %v6 = vector.transfer_read %6[%c0, %c0], %d1 : tensor<4x4xf64>, vector<4x4xf64>
197    vector.print %v6 : vector<4x4xf64>
198
199    //
200    // CHECK: ( ( 0, 30.5, 4.2, 0 ), ( 0, 0, 0, 0 ), ( 0, 0, 4.6, 0 ), ( 0, 0, 7, 8 ) )
201    //
202    %c7 = sparse_tensor.convert %7 : tensor<4x4xf64, #CSR> to tensor<4x4xf64>
203    %v7 = vector.transfer_read %c7[%c0, %c0], %d1 : tensor<4x4xf64>, vector<4x4xf64>
204    vector.print %v7 : vector<4x4xf64>
205
206    //
207    // CHECK: ( ( 0, 30.5, 4.2, 0 ), ( 0, 0, 0, 0 ), ( 0, 0, 4.6, 0 ), ( 0, 0, 7, 8 ) )
208    //
209    %c8 = sparse_tensor.convert %8 : tensor<4x4xf64, #DCSR> to tensor<4x4xf64>
210    %v8 = vector.transfer_read %c8[%c0, %c0], %d1 : tensor<4x4xf64>, vector<4x4xf64>
211    vector.print %v8 : vector<4x4xf64>
212
213    //
214    // Sanity check on nonzeros.
215    //
216    // CHECK: ( 30.5, 4.2, 4.6, 7, 8, -1, -1, -1 )
217    // CHECK: ( 30.5, 4.2, 4.6, 7, 8, -1, -1, -1 )
218    //
219    %val7 = sparse_tensor.values %7 : tensor<4x4xf64, #CSR> to memref<?xf64>
220    %val8 = sparse_tensor.values %8 : tensor<4x4xf64, #DCSR> to memref<?xf64>
221    %nz7 = vector.transfer_read %val7[%c0], %d1 : memref<?xf64>, vector<8xf64>
222    %nz8 = vector.transfer_read %val8[%c0], %d1 : memref<?xf64>, vector<8xf64>
223    vector.print %nz7 : vector<8xf64>
224    vector.print %nz8 : vector<8xf64>
225
226    // Release the resources.
227    bufferization.dealloc_tensor %a1 : tensor<4x8xf64, #CSR>
228    bufferization.dealloc_tensor %a2 : tensor<4x8xf64, #DCSR>
229    bufferization.dealloc_tensor %a3 : tensor<4x8xf64, #CSR>
230    bufferization.dealloc_tensor %a4 : tensor<4x8xf64, #DCSR>
231    bufferization.dealloc_tensor %b1 : tensor<8x4xf64, #CSR>
232    bufferization.dealloc_tensor %b2 : tensor<8x4xf64, #DCSR>
233    bufferization.dealloc_tensor %b3 : tensor<8x4xf64, #CSR>
234    bufferization.dealloc_tensor %b4 : tensor<8x4xf64, #DCSR>
235    bufferization.dealloc_tensor %1 : tensor<4x4xf64, #CSR>
236    bufferization.dealloc_tensor %2 : tensor<4x4xf64, #DCSR>
237    bufferization.dealloc_tensor %4 : tensor<4x4xf64, #CSR>
238    bufferization.dealloc_tensor %5 : tensor<4x4xf64, #DCSR>
239    bufferization.dealloc_tensor %7 : tensor<4x4xf64, #CSR>
240    bufferization.dealloc_tensor %8 : tensor<4x4xf64, #DCSR>
241
242    return
243  }
244}
245