1// RUN: mlir-opt %s --sparse-compiler | \
2// RUN: mlir-cpu-runner \
3// RUN:  -e entry -entry-point-result=void  \
4// RUN:  -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
5// RUN: FileCheck %s
6
7#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
8
9#trait_op1 = {
10  indexing_maps = [
11    affine_map<(i) -> (i)>,  // a (in)
12    affine_map<(i) -> (i)>   // x (out)
13  ],
14  iterator_types = ["parallel"],
15  doc = "x(i) = OP a(i)"
16}
17
18#trait_op2 = {
19  indexing_maps = [
20    affine_map<(i) -> (i)>,  // a (in)
21    affine_map<(i) -> (i)>,  // b (in)
22    affine_map<(i) -> (i)>   // x (out)
23  ],
24  iterator_types = ["parallel"],
25  doc = "x(i) = a(i) OP b(i)"
26}
27
28module {
29  func.func @cops(%arga: tensor<?xcomplex<f64>, #SparseVector>,
30                  %argb: tensor<?xcomplex<f64>, #SparseVector>)
31                 -> tensor<?xcomplex<f64>, #SparseVector> {
32    %c0 = arith.constant 0 : index
33    %d = tensor.dim %arga, %c0 : tensor<?xcomplex<f64>, #SparseVector>
34    %xv = sparse_tensor.init [%d] : tensor<?xcomplex<f64>, #SparseVector>
35    %0 = linalg.generic #trait_op2
36       ins(%arga, %argb: tensor<?xcomplex<f64>, #SparseVector>,
37                         tensor<?xcomplex<f64>, #SparseVector>)
38        outs(%xv: tensor<?xcomplex<f64>, #SparseVector>) {
39        ^bb(%a: complex<f64>, %b: complex<f64>, %x: complex<f64>):
40          %1 = complex.neg %b : complex<f64>
41          %2 = complex.sub %a, %1 : complex<f64>
42          linalg.yield %2 : complex<f64>
43    } -> tensor<?xcomplex<f64>, #SparseVector>
44    return %0 : tensor<?xcomplex<f64>, #SparseVector>
45  }
46
47  func.func @csin(%arga: tensor<?xcomplex<f64>, #SparseVector>)
48                 -> tensor<?xcomplex<f64>, #SparseVector> {
49    %c0 = arith.constant 0 : index
50    %d = tensor.dim %arga, %c0 : tensor<?xcomplex<f64>, #SparseVector>
51    %xv = sparse_tensor.init [%d] : tensor<?xcomplex<f64>, #SparseVector>
52    %0 = linalg.generic #trait_op1
53       ins(%arga: tensor<?xcomplex<f64>, #SparseVector>)
54        outs(%xv: tensor<?xcomplex<f64>, #SparseVector>) {
55        ^bb(%a: complex<f64>, %x: complex<f64>):
56          %1 = complex.sin %a : complex<f64>
57          linalg.yield %1 : complex<f64>
58    } -> tensor<?xcomplex<f64>, #SparseVector>
59    return %0 : tensor<?xcomplex<f64>, #SparseVector>
60  }
61
62  func.func @cdiv(%arga: tensor<?xcomplex<f64>, #SparseVector>)
63                 -> tensor<?xcomplex<f64>, #SparseVector> {
64    %c0 = arith.constant 0 : index
65    %d = tensor.dim %arga, %c0 : tensor<?xcomplex<f64>, #SparseVector>
66    %xv = sparse_tensor.init [%d] : tensor<?xcomplex<f64>, #SparseVector>
67    %c = complex.constant [2.0 : f64, 0.0 : f64] : complex<f64>
68    %0 = linalg.generic #trait_op1
69       ins(%arga: tensor<?xcomplex<f64>, #SparseVector>)
70        outs(%xv: tensor<?xcomplex<f64>, #SparseVector>) {
71        ^bb(%a: complex<f64>, %x: complex<f64>):
72          %1 = complex.div %a, %c  : complex<f64>
73          linalg.yield %1 : complex<f64>
74    } -> tensor<?xcomplex<f64>, #SparseVector>
75    return %0 : tensor<?xcomplex<f64>, #SparseVector>
76  }
77
78  func.func @cabs(%arga: tensor<?xcomplex<f64>, #SparseVector>)
79                 -> tensor<?xf64, #SparseVector> {
80    %c0 = arith.constant 0 : index
81    %d = tensor.dim %arga, %c0 : tensor<?xcomplex<f64>, #SparseVector>
82    %xv = sparse_tensor.init [%d] : tensor<?xf64, #SparseVector>
83    %0 = linalg.generic #trait_op1
84       ins(%arga: tensor<?xcomplex<f64>, #SparseVector>)
85        outs(%xv: tensor<?xf64, #SparseVector>) {
86        ^bb(%a: complex<f64>, %x: f64):
87          %1 = complex.abs %a : complex<f64>
88          linalg.yield %1 : f64
89    } -> tensor<?xf64, #SparseVector>
90    return %0 : tensor<?xf64, #SparseVector>
91  }
92
93  func.func @dumpc(%arg0: tensor<?xcomplex<f64>, #SparseVector>, %d: index) {
94    %c0 = arith.constant 0 : index
95    %c1 = arith.constant 1 : index
96    %mem = sparse_tensor.values %arg0 : tensor<?xcomplex<f64>, #SparseVector> to memref<?xcomplex<f64>>
97    scf.for %i = %c0 to %d step %c1 {
98       %v = memref.load %mem[%i] : memref<?xcomplex<f64>>
99       %real = complex.re %v : complex<f64>
100       %imag = complex.im %v : complex<f64>
101       vector.print %real : f64
102       vector.print %imag : f64
103    }
104    return
105  }
106
107  func.func @dumpf(%arg0: tensor<?xf64, #SparseVector>) {
108    %c0 = arith.constant 0 : index
109    %d0 = arith.constant 0.0 : f64
110    %values = sparse_tensor.values %arg0 : tensor<?xf64, #SparseVector> to memref<?xf64>
111    %0 = vector.transfer_read %values[%c0], %d0: memref<?xf64>, vector<3xf64>
112    vector.print %0 : vector<3xf64>
113    return
114  }
115
116  // Driver method to call and verify complex kernels.
117  func.func @entry() {
118    // Setup sparse vectors.
119    %v1 = arith.constant sparse<
120       [ [0], [28], [31] ],
121         [ (-5.13, 2.0), (3.0, 4.0), (5.0, 6.0) ] > : tensor<32xcomplex<f64>>
122    %v2 = arith.constant sparse<
123       [ [1], [28], [31] ],
124         [ (1.0, 0.0), (-2.0, 0.0), (3.0, 0.0) ] > : tensor<32xcomplex<f64>>
125    %sv1 = sparse_tensor.convert %v1 : tensor<32xcomplex<f64>> to tensor<?xcomplex<f64>, #SparseVector>
126    %sv2 = sparse_tensor.convert %v2 : tensor<32xcomplex<f64>> to tensor<?xcomplex<f64>, #SparseVector>
127
128    // Call sparse vector kernels.
129    %0 = call @cops(%sv1, %sv2)
130       : (tensor<?xcomplex<f64>, #SparseVector>,
131          tensor<?xcomplex<f64>, #SparseVector>) -> tensor<?xcomplex<f64>, #SparseVector>
132    %1 = call @csin(%sv1)
133       : (tensor<?xcomplex<f64>, #SparseVector>) -> tensor<?xcomplex<f64>, #SparseVector>
134    %2 = call @cdiv(%sv1)
135       : (tensor<?xcomplex<f64>, #SparseVector>) -> tensor<?xcomplex<f64>, #SparseVector>
136    %3 = call @cabs(%sv1)
137       : (tensor<?xcomplex<f64>, #SparseVector>) -> tensor<?xf64, #SparseVector>
138
139    //
140    // Verify the results.
141    //
142    %d3 = arith.constant 3 : index
143    %d4 = arith.constant 4 : index
144    // CHECK: -5.13
145    // CHECK-NEXT: 2
146    // CHECK-NEXT: 1
147    // CHECK-NEXT: 0
148    // CHECK-NEXT: 1
149    // CHECK-NEXT: 4
150    // CHECK-NEXT: 8
151    // CHECK-NEXT: 6
152    call @dumpc(%0, %d4) : (tensor<?xcomplex<f64>, #SparseVector>, index) -> ()
153    // CHECK-NEXT: 3.43887
154    // CHECK-NEXT: 1.47097
155    // CHECK-NEXT: 3.85374
156    // CHECK-NEXT: -27.0168
157    // CHECK-NEXT: -193.43
158    // CHECK-NEXT: 57.2184
159    call @dumpc(%1, %d3) : (tensor<?xcomplex<f64>, #SparseVector>, index) -> ()
160    // CHECK-NEXT: -2.565
161    // CHECK-NEXT: 1
162    // CHECK-NEXT: 1.5
163    // CHECK-NEXT: 2
164    // CHECK-NEXT: 2.5
165    // CHECK-NEXT: 3
166    call @dumpc(%2, %d3) : (tensor<?xcomplex<f64>, #SparseVector>, index) -> ()
167    // CHECK-NEXT: ( 5.50608, 5, 7.81025 )
168    call @dumpf(%3) : (tensor<?xf64, #SparseVector>) -> ()
169
170    // Release the resources.
171    sparse_tensor.release %sv1 : tensor<?xcomplex<f64>, #SparseVector>
172    sparse_tensor.release %sv2 : tensor<?xcomplex<f64>, #SparseVector>
173    sparse_tensor.release %0 : tensor<?xcomplex<f64>, #SparseVector>
174    sparse_tensor.release %1 : tensor<?xcomplex<f64>, #SparseVector>
175    sparse_tensor.release %2 : tensor<?xcomplex<f64>, #SparseVector>
176    sparse_tensor.release %3 : tensor<?xf64, #SparseVector>
177    return
178  }
179}
180