1// RUN: mlir-opt %s --sparse-tensor-conversion --canonicalize --cse | FileCheck %s
2
3#SparseVector = #sparse_tensor.encoding<{
4  dimLevelType = ["compressed"]
5}>
6
7#SparseVector64 = #sparse_tensor.encoding<{
8  dimLevelType = ["compressed"],
9  pointerBitWidth = 64,
10  indexBitWidth = 64
11}>
12
13#SparseVector32 = #sparse_tensor.encoding<{
14  dimLevelType = ["compressed"],
15  pointerBitWidth = 32,
16  indexBitWidth = 32
17}>
18
19#SparseMatrix = #sparse_tensor.encoding<{
20  dimLevelType = ["dense", "compressed"]
21}>
22
23#SparseTensor = #sparse_tensor.encoding<{
24  dimLevelType = ["dense", "compressed", "compressed"],
25  dimOrdering = affine_map<(i,j,k) -> (k,i,j)>
26}>
27
28// CHECK-LABEL: func @sparse_dim1d(
29//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
30//       CHECK: %[[C:.*]] = arith.constant 0 : index
31//       CHECK: %[[D:.*]] = call @sparseDimSize(%[[A]], %[[C]])
32//       CHECK: return %[[D]] : index
33func @sparse_dim1d(%arg0: tensor<?xf64, #SparseVector>) -> index {
34  %c = arith.constant 0 : index
35  %0 = tensor.dim %arg0, %c : tensor<?xf64, #SparseVector>
36  return %0 : index
37}
38
39// CHECK-LABEL: func @sparse_dim3d(
40//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
41//       CHECK: %[[C:.*]] = arith.constant 2 : index
42//       CHECK: %[[D:.*]] = call @sparseDimSize(%[[A]], %[[C]])
43//       CHECK: return %[[D]] : index
44func @sparse_dim3d(%arg0: tensor<?x?x?xf64, #SparseTensor>) -> index {
45  // Querying for dimension 1 in the tensor type needs to be
46  // permuted into querying for dimension 2 in the stored sparse
47  // tensor scheme, since the latter honors the dimOrdering.
48  %c = arith.constant 1 : index
49  %0 = tensor.dim %arg0, %c : tensor<?x?x?xf64, #SparseTensor>
50  return %0 : index
51}
52
53// CHECK-LABEL: func @sparse_dim3d_const(
54//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
55//       CHECK: %[[C:.*]] = arith.constant 20 : index
56//       CHECK: return %[[C]] : index
57func @sparse_dim3d_const(%arg0: tensor<10x20x30xf64, #SparseTensor>) -> index {
58  // Querying for dimension 1 in the tensor type can be directly
59  // folded into the right value (even though it corresponds
60  // to dimension 2 in the stored sparse tensor scheme).
61  %c = arith.constant 1 : index
62  %0 = tensor.dim %arg0, %c : tensor<10x20x30xf64, #SparseTensor>
63  return %0 : index
64}
65
66// CHECK-LABEL: func @sparse_new1d(
67//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8>
68//   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8>
69//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex>
70//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex>
71//   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref<?xi8>
72//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref<?xindex>
73//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref<?xindex>
74//       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]])
75//       CHECK: return %[[T]] : !llvm.ptr<i8>
76func @sparse_new1d(%arg0: !llvm.ptr<i8>) -> tensor<128xf64, #SparseVector> {
77  %0 = sparse_tensor.new %arg0 : !llvm.ptr<i8> to tensor<128xf64, #SparseVector>
78  return %0 : tensor<128xf64, #SparseVector>
79}
80
81// CHECK-LABEL: func @sparse_new2d(
82//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8>
83//   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8>
84//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xindex>
85//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xindex>
86//   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref<?xi8>
87//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref<?xindex>
88//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref<?xindex>
89//       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]])
90//       CHECK: return %[[T]] : !llvm.ptr<i8>
91func @sparse_new2d(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #SparseMatrix> {
92  %0 = sparse_tensor.new %arg0 : !llvm.ptr<i8> to tensor<?x?xf32, #SparseMatrix>
93  return %0 : tensor<?x?xf32, #SparseMatrix>
94}
95
96// CHECK-LABEL: func @sparse_new3d(
97//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8>
98//   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<3xi8>
99//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<3xindex>
100//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<3xindex>
101//   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<3xi8> to memref<?xi8>
102//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<3xindex> to memref<?xindex>
103//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<3xindex> to memref<?xindex>
104//       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]])
105//       CHECK: return %[[T]] : !llvm.ptr<i8>
106func @sparse_new3d(%arg0: !llvm.ptr<i8>) -> tensor<?x?x?xf32, #SparseTensor> {
107  %0 = sparse_tensor.new %arg0 : !llvm.ptr<i8> to tensor<?x?x?xf32, #SparseTensor>
108  return %0 : tensor<?x?x?xf32, #SparseTensor>
109}
110
111// CHECK-LABEL: func @sparse_init(
112//  CHECK-SAME: %[[I:.*]]: index,
113//  CHECK-SAME: %[[J:.*]]: index) -> !llvm.ptr<i8>
114//   CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
115//   CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
116//   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8>
117//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xindex>
118//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xindex>
119//   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref<?xi8>
120//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref<?xindex>
121//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref<?xindex>
122//   CHECK-DAG: memref.store %[[I]], %[[Q]][%[[C0]]] : memref<2xindex>
123//   CHECK-DAG: memref.store %[[J]], %[[Q]][%[[C1]]] : memref<2xindex>
124//       CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8>
125//       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]])
126//       CHECK: return %[[T]] : !llvm.ptr<i8>
127func @sparse_init(%arg0: index, %arg1: index) -> tensor<?x?xf64, #SparseMatrix> {
128  %0 = sparse_tensor.init [%arg0, %arg1] : tensor<?x?xf64, #SparseMatrix>
129  return %0 : tensor<?x?xf64, #SparseMatrix>
130}
131
132// CHECK-LABEL: func @sparse_release(
133//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
134//       CHECK: call @delSparseTensor(%[[A]]) : (!llvm.ptr<i8>) -> ()
135//       CHECK: return
136func @sparse_release(%arg0: tensor<128xf64, #SparseVector>) {
137  sparse_tensor.release %arg0 : tensor<128xf64, #SparseVector>
138  return
139}
140
141// CHECK-LABEL: func @sparse_nop_convert(
142//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8>
143//       CHECK: return %[[A]] : !llvm.ptr<i8>
144func @sparse_nop_convert(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32, #SparseVector> {
145  %0 = sparse_tensor.convert %arg0 : tensor<64xf32, #SparseVector> to tensor<64xf32, #SparseVector>
146  return %0 : tensor<64xf32, #SparseVector>
147}
148
149// CHECK-LABEL: func @sparse_hidden_nop_cast(
150//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8>
151//       CHECK: return %[[A]] : !llvm.ptr<i8>
152func @sparse_hidden_nop_cast(%arg0: tensor<32xf32, #SparseVector>) -> tensor<?xf32, #SparseVector> {
153  %0 = sparse_tensor.convert %arg0 : tensor<32xf32, #SparseVector> to tensor<?xf32, #SparseVector>
154  return %0 : tensor<?xf32, #SparseVector>
155}
156
157// CHECK-LABEL: func @sparse_nop_cast(
158//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8>
159//       CHECK: return %[[A]] : !llvm.ptr<i8>
160func @sparse_nop_cast(%arg0: tensor<64xf32, #SparseVector>) -> tensor<?xf32, #SparseVector> {
161  %0 = tensor.cast %arg0 : tensor<64xf32, #SparseVector> to tensor<?xf32, #SparseVector>
162  return %0 : tensor<?xf32, #SparseVector>
163}
164
165// CHECK-LABEL: func @sparse_convert_1d(
166//  CHECK-SAME: %[[A:.*]]: tensor<?xi32>) -> !llvm.ptr<i8>
167//   CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
168//   CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
169//   CHECK-DAG: %[[U:.*]] = tensor.dim %[[A]], %[[C0]] : tensor<?xi32>
170//   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8>
171//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex>
172//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex>
173//   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref<?xi8>
174//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref<?xindex>
175//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref<?xindex>
176//       CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8>
177//       CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]])
178//       CHECK: %[[M:.*]] = memref.alloca() : memref<1xindex>
179//       CHECK: %[[T:.*]] = memref.cast %[[M]] : memref<1xindex> to memref<?xindex>
180//       CHECK: scf.for %[[I:.*]] = %[[C0]] to %[[U]] step %[[C1]] {
181//       CHECK:   %[[E:.*]] = tensor.extract %[[A]][%[[I]]] : tensor<?xi32>
182//       CHECK:   memref.store %[[I]], %[[M]][%[[C0]]] : memref<1xindex>
183//       CHECK:   call @addEltI32(%[[C]], %[[E]], %[[T]], %[[Z]])
184//       CHECK: }
185//       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[C]])
186//       CHECK: return %[[T]] : !llvm.ptr<i8>
187func @sparse_convert_1d(%arg0: tensor<?xi32>) -> tensor<?xi32, #SparseVector> {
188  %0 = sparse_tensor.convert %arg0 : tensor<?xi32> to tensor<?xi32, #SparseVector>
189  return %0 : tensor<?xi32, #SparseVector>
190}
191
192// CHECK-LABEL: func @sparse_convert_1d_ss(
193//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
194//   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8>
195//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex>
196//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex>
197//   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref<?xi8>
198//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref<?xindex>
199//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref<?xindex>
200//       CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]])
201//       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[C]])
202//       CHECK: return %[[T]] : !llvm.ptr<i8>
203func @sparse_convert_1d_ss(%arg0: tensor<?xf32, #SparseVector64>) -> tensor<?xf32, #SparseVector32> {
204  %0 = sparse_tensor.convert %arg0 : tensor<?xf32, #SparseVector64> to tensor<?xf32, #SparseVector32>
205  return %0 : tensor<?xf32, #SparseVector32>
206}
207
208// CHECK-LABEL: func @sparse_convert_2d(
209//  CHECK-SAME: %[[A:.*]]: tensor<2x4xf64>) -> !llvm.ptr<i8>
210//   CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
211//   CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
212//   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8>
213//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xindex>
214//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xindex>
215//   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref<?xi8>
216//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref<?xindex>
217//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref<?xindex>
218//       CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8>
219//       CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]])
220//       CHECK: %[[M:.*]] = memref.alloca() : memref<2xindex>
221//       CHECK: %[[T:.*]] = memref.cast %[[M]] : memref<2xindex> to memref<?xindex>
222//       CHECK: scf.for %[[I:.*]] = %[[C0]] to %{{.*}} step %[[C1]] {
223//       CHECK:   scf.for %[[J:.*]] = %[[C0]] to %{{.*}} step %[[C1]] {
224//       CHECK:     %[[E:.*]] = tensor.extract %[[A]][%[[I]], %[[J]]] : tensor<2x4xf64>
225//       CHECK:     memref.store %[[I]], %[[M]][%[[C0]]] : memref<2xindex>
226//       CHECK:     memref.store %[[J]], %[[M]][%[[C1]]] : memref<2xindex>
227//       CHECK:     call @addEltF64(%[[C]], %[[E]], %[[T]], %[[Z]])
228//       CHECK:   }
229//       CHECK: }
230//       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[C]])
231//       CHECK: return %[[T]] : !llvm.ptr<i8>
232func @sparse_convert_2d(%arg0: tensor<2x4xf64>) -> tensor<2x4xf64, #SparseMatrix> {
233  %0 = sparse_tensor.convert %arg0 : tensor<2x4xf64> to tensor<2x4xf64, #SparseMatrix>
234  return %0 : tensor<2x4xf64, #SparseMatrix>
235}
236
237// CHECK-LABEL: func @sparse_constant() -> !llvm.ptr<i8> {
238//   CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
239//   CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
240//   CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
241//   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8>
242//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xindex>
243//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xindex>
244//   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref<?xi8>
245//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref<?xindex>
246//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref<?xindex>
247//       CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8>
248//       CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]])
249//       CHECK: %[[M:.*]] = memref.alloca() : memref<2xindex>
250//       CHECK: %[[N:.*]] = memref.cast %[[M]] : memref<2xindex> to memref<?xindex>
251//       CHECK: scf.for %[[I:.*]] = %[[C0]] to %[[C2]] step %[[C1]] {
252//       CHECK:   memref.store %{{.*}}, %[[M]][%[[C0]]] : memref<2xindex>
253//       CHECK:   memref.store %{{.*}}, %[[M]][%[[C1]]] : memref<2xindex>
254//       CHECK:   %[[V:.*]] = tensor.extract %{{.*}}[%[[I]]] : tensor<2xf32>
255//       CHECK:   call @addEltF32(%{{.*}}, %[[V]], %[[N]], %{{.*}})
256//       CHECK: }
257//       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[C]])
258//       CHECK: return %[[T]] : !llvm.ptr<i8>
259func @sparse_constant() -> tensor<8x7xf32, #SparseMatrix>{
260  // Initialize a tensor.
261  %0 = arith.constant sparse<[[0, 0], [1, 6]], [1.0, 5.0]> : tensor<8x7xf32>
262  // Convert the tensor to a sparse tensor.
263  %1 = sparse_tensor.convert %0 : tensor<8x7xf32> to tensor<8x7xf32, #SparseMatrix>
264  return %1 : tensor<8x7xf32, #SparseMatrix>
265}
266
267// CHECK-LABEL: func @sparse_convert_3d(
268//  CHECK-SAME: %[[A:.*]]: tensor<?x?x?xf64>) -> !llvm.ptr<i8>
269//   CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
270//   CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
271//   CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
272//   CHECK-DAG: %[[U1:.*]] = tensor.dim %[[A]], %[[C0]] : tensor<?x?x?xf64>
273//   CHECK-DAG: %[[U2:.*]] = tensor.dim %[[A]], %[[C1]] : tensor<?x?x?xf64>
274//   CHECK-DAG: %[[U3:.*]] = tensor.dim %[[A]], %[[C2]] : tensor<?x?x?xf64>
275//   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<3xi8>
276//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<3xindex>
277//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<3xindex>
278//   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<3xi8> to memref<?xi8>
279//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<3xindex> to memref<?xindex>
280//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<3xindex> to memref<?xindex>
281//       CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8>
282//       CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]])
283//       CHECK: %[[M:.*]] = memref.alloca() : memref<3xindex>
284//       CHECK: %[[N:.*]] = memref.cast %[[M]] : memref<3xindex> to memref<?xindex>
285//       CHECK: scf.for %[[I:.*]] = %[[C0]] to %[[U1]] step %[[C1]] {
286//       CHECK:   scf.for %[[J:.*]] = %[[C0]] to %[[U2]] step %[[C1]] {
287//       CHECK:     scf.for %[[K:.*]] = %[[C0]] to %[[U3]] step %[[C1]] {
288//       CHECK:       %[[E:.*]] = tensor.extract %[[A]][%[[I]], %[[J]], %[[K]]] : tensor<?x?x?xf64>
289//       CHECK:       memref.store %[[I]], %[[M]][%[[C0]]] : memref<3xindex>
290//       CHECK:       memref.store %[[J]], %[[M]][%[[C1]]] : memref<3xindex>
291//       CHECK:       memref.store %[[K]], %[[M]][%[[C2]]] : memref<3xindex>
292//       CHECK:       call @addEltF64(%[[C]], %[[E]], %[[N]], %[[Z]])
293//       CHECK:     }
294//       CHECK:   }
295//       CHECK: }
296//       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[C]])
297//       CHECK: return %[[T]] : !llvm.ptr<i8>
298func @sparse_convert_3d(%arg0: tensor<?x?x?xf64>) -> tensor<?x?x?xf64, #SparseTensor> {
299  %0 = sparse_tensor.convert %arg0 : tensor<?x?x?xf64> to tensor<?x?x?xf64, #SparseTensor>
300  return %0 : tensor<?x?x?xf64, #SparseTensor>
301}
302
303// CHECK-LABEL: func @sparse_pointers(
304//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
305//       CHECK: %[[C:.*]] = arith.constant 0 : index
306//       CHECK: %[[T:.*]] = call @sparsePointers(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
307//       CHECK: return %[[T]] : memref<?xindex>
308func @sparse_pointers(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
309  %c = arith.constant 0 : index
310  %0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64, #SparseVector> to memref<?xindex>
311  return %0 : memref<?xindex>
312}
313
314// CHECK-LABEL: func @sparse_pointers64(
315//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
316//       CHECK: %[[C:.*]] = arith.constant 0 : index
317//       CHECK: %[[T:.*]] = call @sparsePointers64(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi64>
318//       CHECK: return %[[T]] : memref<?xi64>
319func @sparse_pointers64(%arg0: tensor<128xf64, #SparseVector64>) -> memref<?xi64> {
320  %c = arith.constant 0 : index
321  %0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64, #SparseVector64> to memref<?xi64>
322  return %0 : memref<?xi64>
323}
324
325// CHECK-LABEL: func @sparse_pointers32(
326//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
327//       CHECK: %[[C:.*]] = arith.constant 0 : index
328//       CHECK: %[[T:.*]] = call @sparsePointers32(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi32>
329//       CHECK: return %[[T]] : memref<?xi32>
330func @sparse_pointers32(%arg0: tensor<128xf64, #SparseVector32>) -> memref<?xi32> {
331  %c = arith.constant 0 : index
332  %0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64, #SparseVector32> to memref<?xi32>
333  return %0 : memref<?xi32>
334}
335
336// CHECK-LABEL: func @sparse_indices(
337//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
338//       CHECK: %[[C:.*]] = arith.constant 0 : index
339//       CHECK: %[[T:.*]] = call @sparseIndices(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
340//       CHECK: return %[[T]] : memref<?xindex>
341func @sparse_indices(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
342  %c = arith.constant 0 : index
343  %0 = sparse_tensor.indices %arg0, %c : tensor<128xf64, #SparseVector> to memref<?xindex>
344  return %0 : memref<?xindex>
345}
346
347// CHECK-LABEL: func @sparse_indices64(
348//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
349//       CHECK: %[[C:.*]] = arith.constant 0 : index
350//       CHECK: %[[T:.*]] = call @sparseIndices64(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi64>
351//       CHECK: return %[[T]] : memref<?xi64>
352func @sparse_indices64(%arg0: tensor<128xf64, #SparseVector64>) -> memref<?xi64> {
353  %c = arith.constant 0 : index
354  %0 = sparse_tensor.indices %arg0, %c : tensor<128xf64, #SparseVector64> to memref<?xi64>
355  return %0 : memref<?xi64>
356}
357
358// CHECK-LABEL: func @sparse_indices32(
359//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
360//       CHECK: %[[C:.*]] = arith.constant 0 : index
361//       CHECK: %[[T:.*]] = call @sparseIndices32(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi32>
362//       CHECK: return %[[T]] : memref<?xi32>
363func @sparse_indices32(%arg0: tensor<128xf64, #SparseVector32>) -> memref<?xi32> {
364  %c = arith.constant 0 : index
365  %0 = sparse_tensor.indices %arg0, %c : tensor<128xf64, #SparseVector32> to memref<?xi32>
366  return %0 : memref<?xi32>
367}
368
369// CHECK-LABEL: func @sparse_valuesf64(
370//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
371//       CHECK: %[[T:.*]] = call @sparseValuesF64(%[[A]]) : (!llvm.ptr<i8>) -> memref<?xf64>
372//       CHECK: return %[[T]] : memref<?xf64>
373func @sparse_valuesf64(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xf64> {
374  %0 = sparse_tensor.values %arg0 : tensor<128xf64, #SparseVector> to memref<?xf64>
375  return %0 : memref<?xf64>
376}
377
378// CHECK-LABEL: func @sparse_valuesf32(
379//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
380//       CHECK: %[[T:.*]] = call @sparseValuesF32(%[[A]]) : (!llvm.ptr<i8>) -> memref<?xf32>
381//       CHECK: return %[[T]] : memref<?xf32>
382func @sparse_valuesf32(%arg0: tensor<128xf32, #SparseVector>) -> memref<?xf32> {
383  %0 = sparse_tensor.values %arg0: tensor<128xf32, #SparseVector> to memref<?xf32>
384  return %0 : memref<?xf32>
385}
386
387// CHECK-LABEL: func @sparse_valuesi32(
388//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
389//       CHECK: %[[T:.*]] = call @sparseValuesI32(%[[A]]) : (!llvm.ptr<i8>) -> memref<?xi32>
390//       CHECK: return %[[T]] : memref<?xi32>
391func @sparse_valuesi32(%arg0: tensor<128xi32, #SparseVector>) -> memref<?xi32> {
392  %0 = sparse_tensor.values %arg0: tensor<128xi32, #SparseVector> to memref<?xi32>
393  return %0 : memref<?xi32>
394}
395
396// CHECK-LABEL: func @sparse_valuesi16(
397//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
398//       CHECK: %[[T:.*]] = call @sparseValuesI16(%[[A]]) : (!llvm.ptr<i8>) -> memref<?xi16>
399//       CHECK: return %[[T]] : memref<?xi16>
400func @sparse_valuesi16(%arg0: tensor<128xi16, #SparseVector>) -> memref<?xi16> {
401  %0 = sparse_tensor.values %arg0: tensor<128xi16, #SparseVector> to memref<?xi16>
402  return %0 : memref<?xi16>
403}
404
405// CHECK-LABEL: func @sparse_valuesi8(
406//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
407//       CHECK: %[[T:.*]] = call @sparseValuesI8(%[[A]]) : (!llvm.ptr<i8>) -> memref<?xi8>
408//       CHECK: return %[[T]] : memref<?xi8>
409func @sparse_valuesi8(%arg0: tensor<128xi8, #SparseVector>) -> memref<?xi8> {
410  %0 = sparse_tensor.values %arg0: tensor<128xi8, #SparseVector> to memref<?xi8>
411  return %0 : memref<?xi8>
412}
413
414// CHECK-LABEL: func @sparse_reconstruct(
415//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>
416//       CHECK: return %[[A]] : !llvm.ptr<i8>
417func @sparse_reconstruct(%arg0: tensor<128xf32, #SparseVector>) -> tensor<128xf32, #SparseVector> {
418  %0 = sparse_tensor.load %arg0 : tensor<128xf32, #SparseVector>
419  return %0 : tensor<128xf32, #SparseVector>
420}
421
422// CHECK-LABEL: func @sparse_reconstruct_ins(
423//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>
424//       CHECK: call @endInsert(%[[A]]) : (!llvm.ptr<i8>) -> ()
425//       CHECK: return %[[A]] : !llvm.ptr<i8>
426func @sparse_reconstruct_ins(%arg0: tensor<128xf32, #SparseVector>) -> tensor<128xf32, #SparseVector> {
427  %0 = sparse_tensor.load %arg0 hasInserts : tensor<128xf32, #SparseVector>
428  return %0 : tensor<128xf32, #SparseVector>
429}
430
431// CHECK-LABEL: func @sparse_insert(
432//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>,
433//  CHECK-SAME: %[[B:.*]]: memref<?xindex>,
434//  CHECK-SAME: %[[C:.*]]: f32) {
435//       CHECK: call @lexInsertF32(%[[A]], %[[B]], %[[C]]) : (!llvm.ptr<i8>, memref<?xindex>, f32) -> ()
436//       CHECK: return
437func @sparse_insert(%arg0: tensor<128xf32, #SparseVector>,
438                    %arg1: memref<?xindex>,
439                    %arg2: f32) {
440  sparse_tensor.lex_insert %arg0, %arg1, %arg2 : tensor<128xf32, #SparseVector>, memref<?xindex>, f32
441  return
442}
443
444// CHECK-LABEL: func @sparse_expansion()
445//    %[[S:.*]] = call @sparseDimSize
446//    %[[V:.*]] = memref.alloca(%[[S]]) : memref<?xf64>
447//    %[[F:.*]] = memref.alloca(%[[S]]) : memref<?xi1>
448//    %[[A:.*]] = memref.alloca(%[[S]]) : memref<?xindex>
449//    linalg.fill(%{{.*}}, %[[V]]) : f64, memref<?xf64>
450//    linalg.fill(%{{.*}}, %[[F]]) : i1, memref<?xi1>
451//       CHECK: return
452func @sparse_expansion() {
453  %c = arith.constant 8 : index
454  %0 = sparse_tensor.init [%c, %c] : tensor<8x8xf64, #SparseMatrix>
455  %values, %filled, %added, %count = sparse_tensor.expand %0
456    : tensor<8x8xf64, #SparseMatrix> to memref<?xf64>, memref<?xi1>, memref<?xindex>, index
457  return
458}
459
460// CHECK-LABEL: func @sparse_compression(
461//  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>,
462//       CHECK: call @expInsertF64(%[[A]],
463//       CHECK: return
464func @sparse_compression(%arg0: tensor<8x8xf64, #SparseMatrix>,
465                         %arg1: memref<?xindex>, %arg2: memref<?xf64>, %arg3: memref<?xi1>,
466                         %arg4: memref<?xindex>, %arg5: index) {
467  sparse_tensor.compress %arg0, %arg1, %arg2, %arg3, %arg4, %arg5
468    : tensor<8x8xf64, #SparseMatrix>, memref<?xindex>, memref<?xf64>, memref<?xi1>, memref<?xindex>, index
469  return
470}
471