1// RUN: mlir-opt %s --sparse-compiler | \
2// RUN: mlir-cpu-runner \
3// RUN:  -e entry -entry-point-result=void  \
4// RUN:  -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
5// RUN: FileCheck %s
6
7#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
8
9#trait_op1 = {
10  indexing_maps = [
11    affine_map<(i) -> (i)>,  // a (in)
12    affine_map<(i) -> (i)>   // x (out)
13  ],
14  iterator_types = ["parallel"],
15  doc = "x(i) = OP a(i)"
16}
17
18#trait_op2 = {
19  indexing_maps = [
20    affine_map<(i) -> (i)>,  // a (in)
21    affine_map<(i) -> (i)>,  // b (in)
22    affine_map<(i) -> (i)>   // x (out)
23  ],
24  iterator_types = ["parallel"],
25  doc = "x(i) = a(i) OP b(i)"
26}
27
28module {
29  func.func @cops(%arga: tensor<?xcomplex<f64>, #SparseVector>,
30                  %argb: tensor<?xcomplex<f64>, #SparseVector>)
31                 -> tensor<?xcomplex<f64>, #SparseVector> {
32    %c0 = arith.constant 0 : index
33    %d = tensor.dim %arga, %c0 : tensor<?xcomplex<f64>, #SparseVector>
34    %xv = bufferization.alloc_tensor(%d) : tensor<?xcomplex<f64>, #SparseVector>
35    %0 = linalg.generic #trait_op2
36       ins(%arga, %argb: tensor<?xcomplex<f64>, #SparseVector>,
37                         tensor<?xcomplex<f64>, #SparseVector>)
38        outs(%xv: tensor<?xcomplex<f64>, #SparseVector>) {
39        ^bb(%a: complex<f64>, %b: complex<f64>, %x: complex<f64>):
40          %1 = complex.neg %b : complex<f64>
41          %2 = complex.sub %a, %1 : complex<f64>
42          linalg.yield %2 : complex<f64>
43    } -> tensor<?xcomplex<f64>, #SparseVector>
44    return %0 : tensor<?xcomplex<f64>, #SparseVector>
45  }
46
47  func.func @csin(%arga: tensor<?xcomplex<f64>, #SparseVector>)
48                 -> tensor<?xcomplex<f64>, #SparseVector> {
49    %c0 = arith.constant 0 : index
50    %d = tensor.dim %arga, %c0 : tensor<?xcomplex<f64>, #SparseVector>
51    %xv = bufferization.alloc_tensor(%d) : tensor<?xcomplex<f64>, #SparseVector>
52    %0 = linalg.generic #trait_op1
53       ins(%arga: tensor<?xcomplex<f64>, #SparseVector>)
54        outs(%xv: tensor<?xcomplex<f64>, #SparseVector>) {
55        ^bb(%a: complex<f64>, %x: complex<f64>):
56          %1 = complex.sin %a : complex<f64>
57          linalg.yield %1 : complex<f64>
58    } -> tensor<?xcomplex<f64>, #SparseVector>
59    return %0 : tensor<?xcomplex<f64>, #SparseVector>
60  }
61
62  func.func @complex_sqrt(%arga: tensor<?xcomplex<f64>, #SparseVector>)
63                 -> tensor<?xcomplex<f64>, #SparseVector> {
64    %c0 = arith.constant 0 : index
65    %d = tensor.dim %arga, %c0 : tensor<?xcomplex<f64>, #SparseVector>
66    %xv = bufferization.alloc_tensor(%d) : tensor<?xcomplex<f64>, #SparseVector>
67    %0 = linalg.generic #trait_op1
68       ins(%arga: tensor<?xcomplex<f64>, #SparseVector>)
69        outs(%xv: tensor<?xcomplex<f64>, #SparseVector>) {
70        ^bb(%a: complex<f64>, %x: complex<f64>):
71          %1 = complex.sqrt %a : complex<f64>
72          linalg.yield %1 : complex<f64>
73    } -> tensor<?xcomplex<f64>, #SparseVector>
74    return %0 : tensor<?xcomplex<f64>, #SparseVector>
75  }
76
77  func.func @complex_tanh(%arga: tensor<?xcomplex<f64>, #SparseVector>)
78                 -> tensor<?xcomplex<f64>, #SparseVector> {
79    %c0 = arith.constant 0 : index
80    %d = tensor.dim %arga, %c0 : tensor<?xcomplex<f64>, #SparseVector>
81    %xv = bufferization.alloc_tensor(%d) : tensor<?xcomplex<f64>, #SparseVector>
82    %0 = linalg.generic #trait_op1
83       ins(%arga: tensor<?xcomplex<f64>, #SparseVector>)
84        outs(%xv: tensor<?xcomplex<f64>, #SparseVector>) {
85       ^bb(%a: complex<f64>, %x: complex<f64>):
86          %1 = complex.tanh %a : complex<f64>
87          linalg.yield %1 : complex<f64>
88   } -> tensor<?xcomplex<f64>, #SparseVector>
89    return %0 : tensor<?xcomplex<f64>, #SparseVector>
90  }
91
92  func.func @clog1p_expm1(%arga: tensor<?xcomplex<f64>, #SparseVector>)
93                 -> tensor<?xcomplex<f64>, #SparseVector> {
94    %c0 = arith.constant 0 : index
95    %d = tensor.dim %arga, %c0 : tensor<?xcomplex<f64>, #SparseVector>
96    %xv = bufferization.alloc_tensor(%d) : tensor<?xcomplex<f64>, #SparseVector>
97    %0 = linalg.generic #trait_op1
98       ins(%arga: tensor<?xcomplex<f64>, #SparseVector>)
99        outs(%xv: tensor<?xcomplex<f64>, #SparseVector>) {
100        ^bb(%a: complex<f64>, %x: complex<f64>):
101          %1 = complex.log1p %a : complex<f64>
102          %2 = complex.expm1 %1 : complex<f64>
103          linalg.yield %2 : complex<f64>
104    } -> tensor<?xcomplex<f64>, #SparseVector>
105    return %0 : tensor<?xcomplex<f64>, #SparseVector>
106  }
107
108  func.func @cdiv(%arga: tensor<?xcomplex<f64>, #SparseVector>)
109                 -> tensor<?xcomplex<f64>, #SparseVector> {
110    %c0 = arith.constant 0 : index
111    %d = tensor.dim %arga, %c0 : tensor<?xcomplex<f64>, #SparseVector>
112    %xv = bufferization.alloc_tensor(%d) : tensor<?xcomplex<f64>, #SparseVector>
113    %c = complex.constant [2.0 : f64, 0.0 : f64] : complex<f64>
114    %0 = linalg.generic #trait_op1
115       ins(%arga: tensor<?xcomplex<f64>, #SparseVector>)
116        outs(%xv: tensor<?xcomplex<f64>, #SparseVector>) {
117        ^bb(%a: complex<f64>, %x: complex<f64>):
118          %1 = complex.div %a, %c  : complex<f64>
119          linalg.yield %1 : complex<f64>
120    } -> tensor<?xcomplex<f64>, #SparseVector>
121    return %0 : tensor<?xcomplex<f64>, #SparseVector>
122  }
123
124  func.func @cabs(%arga: tensor<?xcomplex<f64>, #SparseVector>)
125                 -> tensor<?xf64, #SparseVector> {
126    %c0 = arith.constant 0 : index
127    %d = tensor.dim %arga, %c0 : tensor<?xcomplex<f64>, #SparseVector>
128    %xv = bufferization.alloc_tensor(%d) : tensor<?xf64, #SparseVector>
129    %0 = linalg.generic #trait_op1
130       ins(%arga: tensor<?xcomplex<f64>, #SparseVector>)
131        outs(%xv: tensor<?xf64, #SparseVector>) {
132        ^bb(%a: complex<f64>, %x: f64):
133          %1 = complex.abs %a : complex<f64>
134          linalg.yield %1 : f64
135    } -> tensor<?xf64, #SparseVector>
136    return %0 : tensor<?xf64, #SparseVector>
137  }
138
139  func.func @dumpc(%arg0: tensor<?xcomplex<f64>, #SparseVector>, %d: index) {
140    %c0 = arith.constant 0 : index
141    %c1 = arith.constant 1 : index
142    %mem = sparse_tensor.values %arg0 : tensor<?xcomplex<f64>, #SparseVector> to memref<?xcomplex<f64>>
143    scf.for %i = %c0 to %d step %c1 {
144       %v = memref.load %mem[%i] : memref<?xcomplex<f64>>
145       %real = complex.re %v : complex<f64>
146       %imag = complex.im %v : complex<f64>
147       vector.print %real : f64
148       vector.print %imag : f64
149    }
150    return
151  }
152
153  func.func @dumpf(%arg0: tensor<?xf64, #SparseVector>) {
154    %c0 = arith.constant 0 : index
155    %d0 = arith.constant 0.0 : f64
156    %values = sparse_tensor.values %arg0 : tensor<?xf64, #SparseVector> to memref<?xf64>
157    %0 = vector.transfer_read %values[%c0], %d0: memref<?xf64>, vector<3xf64>
158    vector.print %0 : vector<3xf64>
159    return
160  }
161
162  // Driver method to call and verify complex kernels.
163  func.func @entry() {
164    // Setup sparse vectors.
165    %v1 = arith.constant sparse<
166       [ [0], [28], [31] ],
167         [ (-5.13, 2.0), (3.0, 4.0), (5.0, 6.0) ] > : tensor<32xcomplex<f64>>
168    %v2 = arith.constant sparse<
169       [ [1], [28], [31] ],
170         [ (1.0, 0.0), (-2.0, 0.0), (3.0, 0.0) ] > : tensor<32xcomplex<f64>>
171    %sv1 = sparse_tensor.convert %v1 : tensor<32xcomplex<f64>> to tensor<?xcomplex<f64>, #SparseVector>
172    %sv2 = sparse_tensor.convert %v2 : tensor<32xcomplex<f64>> to tensor<?xcomplex<f64>, #SparseVector>
173
174    // Call sparse vector kernels.
175    %0 = call @cops(%sv1, %sv2)
176       : (tensor<?xcomplex<f64>, #SparseVector>,
177          tensor<?xcomplex<f64>, #SparseVector>) -> tensor<?xcomplex<f64>, #SparseVector>
178    %1 = call @csin(%sv1)
179       : (tensor<?xcomplex<f64>, #SparseVector>) -> tensor<?xcomplex<f64>, #SparseVector>
180    %2 = call @complex_sqrt(%sv1)
181       : (tensor<?xcomplex<f64>, #SparseVector>) -> tensor<?xcomplex<f64>, #SparseVector>
182    %3 = call @complex_tanh(%sv2)
183       : (tensor<?xcomplex<f64>, #SparseVector>) -> tensor<?xcomplex<f64>, #SparseVector>
184    %4 = call @clog1p_expm1(%sv1)
185       : (tensor<?xcomplex<f64>, #SparseVector>) -> tensor<?xcomplex<f64>, #SparseVector>
186    %5 = call @cdiv(%sv1)
187       : (tensor<?xcomplex<f64>, #SparseVector>) -> tensor<?xcomplex<f64>, #SparseVector>
188    %6 = call @cabs(%sv1)
189       : (tensor<?xcomplex<f64>, #SparseVector>) -> tensor<?xf64, #SparseVector>
190
191    //
192    // Verify the results.
193    //
194    %d3 = arith.constant 3 : index
195    %d4 = arith.constant 4 : index
196    // CHECK: -5.13
197    // CHECK-NEXT: 2
198    // CHECK-NEXT: 1
199    // CHECK-NEXT: 0
200    // CHECK-NEXT: 1
201    // CHECK-NEXT: 4
202    // CHECK-NEXT: 8
203    // CHECK-NEXT: 6
204    call @dumpc(%0, %d4) : (tensor<?xcomplex<f64>, #SparseVector>, index) -> ()
205    // CHECK-NEXT: 3.43887
206    // CHECK-NEXT: 1.47097
207    // CHECK-NEXT: 3.85374
208    // CHECK-NEXT: -27.0168
209    // CHECK-NEXT: -193.43
210    // CHECK-NEXT: 57.2184
211    call @dumpc(%1, %d3) : (tensor<?xcomplex<f64>, #SparseVector>, index) -> ()
212    // CHECK-NEXT: 0.433635
213    // CHECK-NEXT: 2.30609
214    // CHECK-NEXT: 2
215    // CHECK-NEXT: 1
216    // CHECK-NEXT: 2.53083
217    // CHECK-NEXT: 1.18538
218    call @dumpc(%2, %d3) : (tensor<?xcomplex<f64>, #SparseVector>, index) -> ()
219    // CHECK-NEXT: 0.761594
220    // CHECK-NEXT: 0
221    // CHECK-NEXT: -0.964028
222    // CHECK-NEXT: 0
223    // CHECK-NEXT: 0.995055
224    // CHECK-NEXT: 0
225    call @dumpc(%3, %d3) : (tensor<?xcomplex<f64>, #SparseVector>, index) -> ()
226    // CHECK-NEXT: -5.13
227    // CHECK-NEXT: 2
228    // CHECK-NEXT: 3
229    // CHECK-NEXT: 4
230    // CHECK-NEXT: 5
231    // CHECK-NEXT: 6
232    call @dumpc(%4, %d3) : (tensor<?xcomplex<f64>, #SparseVector>, index) -> ()
233    // CHECK-NEXT: -2.565
234    // CHECK-NEXT: 1
235    // CHECK-NEXT: 1.5
236    // CHECK-NEXT: 2
237    // CHECK-NEXT: 2.5
238    // CHECK-NEXT: 3
239    call @dumpc(%5, %d3) : (tensor<?xcomplex<f64>, #SparseVector>, index) -> ()
240    // CHECK-NEXT: ( 5.50608, 5, 7.81025 )
241    call @dumpf(%6) : (tensor<?xf64, #SparseVector>) -> ()
242
243    // Release the resources.
244    bufferization.dealloc_tensor %sv1 : tensor<?xcomplex<f64>, #SparseVector>
245    bufferization.dealloc_tensor %sv2 : tensor<?xcomplex<f64>, #SparseVector>
246    bufferization.dealloc_tensor %0 : tensor<?xcomplex<f64>, #SparseVector>
247    bufferization.dealloc_tensor %1 : tensor<?xcomplex<f64>, #SparseVector>
248    bufferization.dealloc_tensor %2 : tensor<?xcomplex<f64>, #SparseVector>
249    bufferization.dealloc_tensor %3 : tensor<?xcomplex<f64>, #SparseVector>
250    bufferization.dealloc_tensor %4 : tensor<?xcomplex<f64>, #SparseVector>
251    bufferization.dealloc_tensor %5 : tensor<?xcomplex<f64>, #SparseVector>
252    bufferization.dealloc_tensor %6 : tensor<?xf64, #SparseVector>
253    return
254  }
255}
256