1// RUN: mlir-opt %s --sparse-tensor-conversion --canonicalize --cse | FileCheck %s
2
3#SparseVector = #sparse_tensor.encoding<{
4  dimLevelType = ["compressed"]
5}>
6
7#SparseMatrix = #sparse_tensor.encoding<{
8  dimLevelType = ["dense", "compressed"]
9}>
10
11#SparseTensor = #sparse_tensor.encoding<{
12  dimLevelType = ["dense", "compressed", "compressed"],
13  dimOrdering = affine_map<(i,j,k) -> (k,i,j)>
14}>
15
16// CHECK-LABEL: func @sparse_convert_1d(
17//  CHECK-SAME: %[[Arg:.*]]: !llvm.ptr<i8>) -> tensor<13xi32>
18//   CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index
19//   CHECK-DAG: %[[I13:.*]] = arith.constant 13 : index
20//   CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<1xi8>
21//   CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<1xi8> to memref<?xi8>
22//   CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8
23//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<1xi8>
24//   CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<1xindex>
25//   CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<1xindex> to memref<?xindex>
26//   CHECK-DAG: memref.store %[[I13]], %[[SizesS]][%[[I0]]] : memref<1xindex>
27//   CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<1xindex>
28//   CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<1xindex> to memref<?xindex>
29//   CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<1xindex>
30//   CHECK-DAG: %[[zeroI32:.*]] = arith.constant 0 : i32
31//   CHECK-DAG: %[[ElemTp:.*]] = arith.constant 4 : i32
32//   CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 5 : i32
33//   CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %[[zeroI32]], %[[zeroI32]], %[[ElemTp]], %[[ActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
34//   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<1xindex>
35//   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<1xindex> to memref<?xindex>
36//   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<i32>
37//   CHECK-DAG: %[[M:.*]] = memref.alloc() : memref<13xi32>
38//   CHECK-DAG: linalg.fill(%[[zeroI32]], %[[M]]) : i32, memref<13xi32>
39//       CHECK: scf.while : () -> () {
40//       CHECK:   %[[Cond:.*]] = call @getNextI32(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr<i8>, memref<?xindex>, memref<i32>) -> i1
41//       CHECK:   scf.condition(%[[Cond]])
42//       CHECK: } do {
43//       CHECK:   %[[Iv0:.*]] = memref.load %[[IndS]][%[[I0]]] : memref<1xindex>
44//       CHECK:   %[[ElemVal:.*]] = memref.load %[[ElemBuffer]][] : memref<i32>
45//       CHECK:   memref.store %[[ElemVal]], %[[M]][%[[Iv0]]] : memref<13xi32>
46//       CHECK:   scf.yield
47//       CHECK: }
48//       CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<13xi32>
49//       CHECK: return %[[T]] : tensor<13xi32>
50func @sparse_convert_1d(%arg0: tensor<13xi32, #SparseVector>) -> tensor<13xi32> {
51  %0 = sparse_tensor.convert %arg0 : tensor<13xi32, #SparseVector> to tensor<13xi32>
52  return %0 : tensor<13xi32>
53}
54
55// CHECK-LABEL: func @sparse_convert_1d_dyn(
56//  CHECK-SAME: %[[Arg:.*]]: !llvm.ptr<i8>) -> tensor<?xi32>
57//   CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index
58//   CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<1xi8>
59//   CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<1xi8> to memref<?xi8>
60//   CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8
61//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<1xi8>
62//   CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<1xindex>
63//   CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<1xindex> to memref<?xindex>
64//   CHECK-DAG: %[[SizeI0:.*]] = call @sparseDimSize(%[[Arg]], %[[I0]]) : (!llvm.ptr<i8>, index) -> index
65//   CHECK-DAG: memref.store %[[SizeI0]], %[[SizesS]][%[[I0]]] : memref<1xindex>
66//   CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<1xindex>
67//   CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<1xindex> to memref<?xindex>
68//   CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<1xindex>
69//   CHECK-DAG: %[[zeroI32:.*]] = arith.constant 0 : i32
70//   CHECK-DAG: %[[ElemTp:.*]] = arith.constant 4 : i32
71//   CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 5 : i32
72//   CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %[[zeroI32]], %[[zeroI32]], %[[ElemTp]], %[[ActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
73//   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<1xindex>
74//   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<1xindex> to memref<?xindex>
75//   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<i32>
76//   CHECK-DAG: %[[M:.*]] = memref.alloc(%[[SizeI0]]) : memref<?xi32>
77//   CHECK-DAG: linalg.fill(%[[zeroI32]], %[[M]]) : i32, memref<?xi32>
78//       CHECK: scf.while : () -> () {
79//       CHECK:   %[[Cond:.*]] = call @getNextI32(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr<i8>, memref<?xindex>, memref<i32>) -> i1
80//       CHECK:   scf.condition(%[[Cond]])
81//       CHECK: } do {
82//       CHECK:   %[[Iv0:.*]] = memref.load %[[IndS]][%[[I0]]] : memref<1xindex>
83//       CHECK:   %[[ElemVal:.*]] = memref.load %[[ElemBuffer]][] : memref<i32>
84//       CHECK:   memref.store %[[ElemVal]], %[[M]][%[[Iv0]]] : memref<?xi32>
85//       CHECK:   scf.yield
86//       CHECK: }
87//       CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<?xi32>
88//       CHECK: return %[[T]] : tensor<?xi32>
89func @sparse_convert_1d_dyn(%arg0: tensor<?xi32, #SparseVector>) -> tensor<?xi32> {
90  %0 = sparse_tensor.convert %arg0 : tensor<?xi32, #SparseVector> to tensor<?xi32>
91  return %0 : tensor<?xi32>
92}
93
94// CHECK-LABEL: func @sparse_convert_2d(
95//  CHECK-SAME: %[[Arg:.*]]: !llvm.ptr<i8>) -> tensor<2x4xf64>
96//   CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index
97//   CHECK-DAG: %[[I1:.*]] = arith.constant 1 : index
98//   CHECK-DAG: %[[I2:.*]] = arith.constant 2 : index
99//   CHECK-DAG: %[[I4:.*]] = arith.constant 4 : index
100//   CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<2xi8>
101//   CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<2xi8> to memref<?xi8>
102//   CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8
103//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<2xi8>
104//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I1]]] : memref<2xi8>
105//   CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<2xindex>
106//   CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<2xindex> to memref<?xindex>
107//   CHECK-DAG: memref.store %[[I2]], %[[SizesS]][%[[I0]]] : memref<2xindex>
108//   CHECK-DAG: memref.store %[[I4]], %[[SizesS]][%[[I1]]] : memref<2xindex>
109//   CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<2xindex>
110//   CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<2xindex> to memref<?xindex>
111//   CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<2xindex>
112//   CHECK-DAG: memref.store %[[I1]], %[[PermS]][%[[I1]]] : memref<2xindex>
113//   CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 5 : i32
114//   CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
115//   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<2xindex>
116//   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<2xindex> to memref<?xindex>
117//   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<f64>
118//   CHECK-DAG: %[[M:.*]] = memref.alloc() : memref<2x4xf64>
119//   CHECK-DAG: %[[E0:.*]] = arith.constant 0.000000e+00 : f64
120//   CHECK-DAG: linalg.fill(%[[E0]], %[[M]]) : f64, memref<2x4xf64>
121//       CHECK: scf.while : () -> () {
122//       CHECK:   %[[Cond:.*]] = call @getNextF64(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr<i8>, memref<?xindex>, memref<f64>) -> i1
123//       CHECK:   scf.condition(%[[Cond]])
124//       CHECK: } do {
125//       CHECK:   %[[Iv0:.*]] = memref.load %[[IndS]][%[[I0]]] : memref<2xindex>
126//       CHECK:   %[[Iv1:.*]] = memref.load %[[IndS]][%[[I1]]] : memref<2xindex>
127//       CHECK:   %[[ElemVal:.*]] = memref.load %[[ElemBuffer]][] : memref<f64>
128//       CHECK:   memref.store %[[ElemVal]], %[[M]][%[[Iv0]], %[[Iv1]]] : memref<2x4xf64>
129//       CHECK:   scf.yield
130//       CHECK: }
131//       CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<2x4xf64>
132//       CHECK: return %[[T]] : tensor<2x4xf64>
133func @sparse_convert_2d(%arg0: tensor<2x4xf64, #SparseMatrix>) -> tensor<2x4xf64> {
134  %0 = sparse_tensor.convert %arg0 : tensor<2x4xf64, #SparseMatrix> to tensor<2x4xf64>
135  return %0 : tensor<2x4xf64>
136}
137
138// CHECK-LABEL: func @sparse_convert_2d_dyn0(
139//  CHECK-SAME: %[[Arg:.*]]: !llvm.ptr<i8>) -> tensor<?x4xf64>
140//   CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index
141//   CHECK-DAG: %[[I1:.*]] = arith.constant 1 : index
142//   CHECK-DAG: %[[I4:.*]] = arith.constant 4 : index
143//   CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<2xi8>
144//   CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<2xi8> to memref<?xi8>
145//   CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8
146//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<2xi8>
147//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I1]]] : memref<2xi8>
148//   CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<2xindex>
149//   CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<2xindex> to memref<?xindex>
150//   CHECK-DAG: %[[SizeI0:.*]] = call @sparseDimSize(%[[Arg]], %[[I0]]) : (!llvm.ptr<i8>, index) -> index
151//   CHECK-DAG: memref.store %[[SizeI0]], %[[SizesS]][%[[I0]]] : memref<2xindex>
152//   CHECK-DAG: memref.store %[[I4]], %[[SizesS]][%[[I1]]] : memref<2xindex>
153//   CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<2xindex>
154//   CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<2xindex> to memref<?xindex>
155//   CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<2xindex>
156//   CHECK-DAG: memref.store %[[I1]], %[[PermS]][%[[I1]]] : memref<2xindex>
157//   CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 5 : i32
158//   CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
159//   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<2xindex>
160//   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<2xindex> to memref<?xindex>
161//   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<f64>
162//   CHECK-DAG: %[[M:.*]] = memref.alloc(%[[SizeI0]]) : memref<?x4xf64>
163//   CHECK-DAG: %[[E0:.*]] = arith.constant 0.000000e+00 : f64
164//   CHECK-DAG: linalg.fill(%[[E0]], %[[M]]) : f64, memref<?x4xf64>
165//       CHECK: scf.while : () -> () {
166//       CHECK:   %[[Cond:.*]] = call @getNextF64(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr<i8>, memref<?xindex>, memref<f64>) -> i1
167//       CHECK:   scf.condition(%[[Cond]])
168//       CHECK: } do {
169//       CHECK:   %[[Iv0:.*]] = memref.load %[[IndS]][%[[I0]]] : memref<2xindex>
170//       CHECK:   %[[Iv1:.*]] = memref.load %[[IndS]][%[[I1]]] : memref<2xindex>
171//       CHECK:   %[[ElemVal:.*]] = memref.load %[[ElemBuffer]][] : memref<f64>
172//       CHECK:   memref.store %[[ElemVal]], %[[M]][%[[Iv0]], %[[Iv1]]] : memref<?x4xf64>
173//       CHECK:   scf.yield
174//       CHECK: }
175//       CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<?x4xf64>
176//       CHECK: return %[[T]] : tensor<?x4xf64>
177func @sparse_convert_2d_dyn0(%arg0: tensor<?x4xf64, #SparseMatrix>) -> tensor<?x4xf64> {
178  %0 = sparse_tensor.convert %arg0 : tensor<?x4xf64, #SparseMatrix> to tensor<?x4xf64>
179  return %0 : tensor<?x4xf64>
180}
181
182// CHECK-LABEL: func @sparse_convert_2d_dyn1(
183//  CHECK-SAME: %[[Arg:.*]]: !llvm.ptr<i8>) -> tensor<2x?xf64>
184//   CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index
185//   CHECK-DAG: %[[I1:.*]] = arith.constant 1 : index
186//   CHECK-DAG: %[[I2:.*]] = arith.constant 2 : index
187//   CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<2xi8>
188//   CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<2xi8> to memref<?xi8>
189//   CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8
190//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<2xi8>
191//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I1]]] : memref<2xi8>
192//   CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<2xindex>
193//   CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<2xindex> to memref<?xindex>
194//   CHECK-DAG: %[[SizeI1:.*]] = call @sparseDimSize(%[[Arg]], %[[I1]]) : (!llvm.ptr<i8>, index) -> index
195//   CHECK-DAG: memref.store %[[I2]], %[[SizesS]][%[[I0]]] : memref<2xindex>
196//   CHECK-DAG: memref.store %[[SizeI1]], %[[SizesS]][%[[I1]]] : memref<2xindex>
197//   CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<2xindex>
198//   CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<2xindex> to memref<?xindex>
199//   CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<2xindex>
200//   CHECK-DAG: memref.store %[[I1]], %[[PermS]][%[[I1]]] : memref<2xindex>
201//   CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 5 : i32
202//   CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
203//   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<2xindex>
204//   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<2xindex> to memref<?xindex>
205//   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<f64>
206//   CHECK-DAG: %[[M:.*]] = memref.alloc(%[[SizeI1]]) : memref<2x?xf64>
207//   CHECK-DAG: %[[E0:.*]] = arith.constant 0.000000e+00 : f64
208//   CHECK-DAG: linalg.fill(%[[E0]], %[[M]]) : f64, memref<2x?xf64>
209//       CHECK: scf.while : () -> () {
210//       CHECK:   %[[Cond:.*]] = call @getNextF64(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr<i8>, memref<?xindex>, memref<f64>) -> i1
211//       CHECK:   scf.condition(%[[Cond]])
212//       CHECK: } do {
213//       CHECK:   %[[Iv0:.*]] = memref.load %[[IndS]][%[[I0]]] : memref<2xindex>
214//       CHECK:   %[[Iv1:.*]] = memref.load %[[IndS]][%[[I1]]] : memref<2xindex>
215//       CHECK:   %[[ElemVal:.*]] = memref.load %[[ElemBuffer]][] : memref<f64>
216//       CHECK:   memref.store %[[ElemVal]], %[[M]][%[[Iv0]], %[[Iv1]]] : memref<2x?xf64>
217//       CHECK:   scf.yield
218//       CHECK: }
219//       CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<2x?xf64>
220//       CHECK: return %[[T]] : tensor<2x?xf64>
221func @sparse_convert_2d_dyn1(%arg0: tensor<2x?xf64, #SparseMatrix>) -> tensor<2x?xf64> {
222  %0 = sparse_tensor.convert %arg0 : tensor<2x?xf64, #SparseMatrix> to tensor<2x?xf64>
223  return %0 : tensor<2x?xf64>
224}
225
226// CHECK-LABEL: func @sparse_convert_2d_dyn2(
227//  CHECK-SAME: %[[Arg:.*]]: !llvm.ptr<i8>) -> tensor<?x?xf64>
228//   CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index
229//   CHECK-DAG: %[[I1:.*]] = arith.constant 1 : index
230//   CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<2xi8>
231//   CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<2xi8> to memref<?xi8>
232//   CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8
233//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<2xi8>
234//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I1]]] : memref<2xi8>
235//   CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<2xindex>
236//   CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<2xindex> to memref<?xindex>
237//   CHECK-DAG: %[[SizeI0:.*]] = call @sparseDimSize(%[[Arg]], %[[I0]]) : (!llvm.ptr<i8>, index) -> index
238//   CHECK-DAG: %[[SizeI1:.*]] = call @sparseDimSize(%[[Arg]], %[[I1]]) : (!llvm.ptr<i8>, index) -> index
239//   CHECK-DAG: memref.store %[[SizeI0]], %[[SizesS]][%[[I0]]] : memref<2xindex>
240//   CHECK-DAG: memref.store %[[SizeI1]], %[[SizesS]][%[[I1]]] : memref<2xindex>
241//   CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<2xindex>
242//   CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<2xindex> to memref<?xindex>
243//   CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<2xindex>
244//   CHECK-DAG: memref.store %[[I1]], %[[PermS]][%[[I1]]] : memref<2xindex>
245//   CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 5 : i32
246//   CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
247//   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<2xindex>
248//   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<2xindex> to memref<?xindex>
249//   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<f64>
250//   CHECK-DAG: %[[M:.*]] = memref.alloc(%[[SizeI0]], %[[SizeI1]]) : memref<?x?xf64>
251//   CHECK-DAG: %[[E0:.*]] = arith.constant 0.000000e+00 : f64
252//   CHECK-DAG: linalg.fill(%[[E0]], %[[M]]) : f64, memref<?x?xf64>
253//       CHECK: scf.while : () -> () {
254//       CHECK:   %[[Cond:.*]] = call @getNextF64(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr<i8>, memref<?xindex>, memref<f64>) -> i1
255//       CHECK:   scf.condition(%[[Cond]])
256//       CHECK: } do {
257//       CHECK:   %[[Iv0:.*]] = memref.load %[[IndS]][%[[I0]]] : memref<2xindex>
258//       CHECK:   %[[Iv1:.*]] = memref.load %[[IndS]][%[[I1]]] : memref<2xindex>
259//       CHECK:   %[[ElemVal:.*]] = memref.load %[[ElemBuffer]][] : memref<f64>
260//       CHECK:   memref.store %[[ElemVal]], %[[M]][%[[Iv0]], %[[Iv1]]] : memref<?x?xf64>
261//       CHECK:   scf.yield
262//       CHECK: }
263//       CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<?x?xf64>
264//       CHECK: return %[[T]] : tensor<?x?xf64>
265func @sparse_convert_2d_dyn2(%arg0: tensor<?x?xf64, #SparseMatrix>) -> tensor<?x?xf64> {
266  %0 = sparse_tensor.convert %arg0 : tensor<?x?xf64, #SparseMatrix> to tensor<?x?xf64>
267  return %0 : tensor<?x?xf64>
268}
269
270// CHECK-LABEL: func @sparse_convert_3d(
271//  CHECK-SAME: %[[Arg:.*]]: !llvm.ptr<i8>) -> tensor<2x3x4xf64>
272//   CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index
273//   CHECK-DAG: %[[I1:.*]] = arith.constant 1 : index
274//   CHECK-DAG: %[[I2:.*]] = arith.constant 2 : index
275//   CHECK-DAG: %[[I3:.*]] = arith.constant 3 : index
276//   CHECK-DAG: %[[I4:.*]] = arith.constant 4 : index
277//   CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<3xi8>
278//   CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<3xi8> to memref<?xi8>
279//   CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8
280//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<3xi8>
281//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I1]]] : memref<3xi8>
282//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I2]]] : memref<3xi8>
283//   CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<3xindex>
284//   CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<3xindex> to memref<?xindex>
285//   CHECK-DAG: memref.store %[[I2]], %[[SizesS]][%[[I0]]] : memref<3xindex>
286//   CHECK-DAG: memref.store %[[I3]], %[[SizesS]][%[[I1]]] : memref<3xindex>
287//   CHECK-DAG: memref.store %[[I4]], %[[SizesS]][%[[I2]]] : memref<3xindex>
288//   CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<3xindex>
289//   CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<3xindex> to memref<?xindex>
290//   CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<3xindex>
291//   CHECK-DAG: memref.store %[[I1]], %[[PermS]][%[[I1]]] : memref<3xindex>
292//   CHECK-DAG: memref.store %[[I2]], %[[PermS]][%[[I2]]] : memref<3xindex>
293//   CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 5 : i32
294//   CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
295//   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<3xindex>
296//   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<3xindex> to memref<?xindex>
297//   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<f64>
298//   CHECK-DAG: %[[M:.*]] = memref.alloc() : memref<2x3x4xf64>
299//   CHECK-DAG: %[[E0:.*]] = arith.constant 0.000000e+00 : f64
300//   CHECK-DAG: linalg.fill(%[[E0]], %[[M]]) : f64, memref<2x3x4xf64>
301//       CHECK: scf.while : () -> () {
302//       CHECK:   %[[Cond:.*]] = call @getNextF64(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr<i8>, memref<?xindex>, memref<f64>) -> i1
303//       CHECK:   scf.condition(%[[Cond]])
304//       CHECK: } do {
305//       CHECK:   %[[Iv0:.*]] = memref.load %[[IndS]][%[[I0]]] : memref<3xindex>
306//       CHECK:   %[[Iv1:.*]] = memref.load %[[IndS]][%[[I1]]] : memref<3xindex>
307//       CHECK:   %[[Iv2:.*]] = memref.load %[[IndS]][%[[I2]]] : memref<3xindex>
308//       CHECK:   %[[ElemVal:.*]] = memref.load %[[ElemBuffer]][] : memref<f64>
309//       CHECK:   memref.store %[[ElemVal]], %[[M]][%[[Iv0]], %[[Iv1]], %[[Iv2]]] : memref<2x3x4xf64>
310//       CHECK:   scf.yield
311//       CHECK: }
312//       CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<2x3x4xf64>
313//       CHECK: return %[[T]] : tensor<2x3x4xf64>
314func @sparse_convert_3d(%arg0: tensor<2x3x4xf64, #SparseTensor>) -> tensor<2x3x4xf64> {
315  %0 = sparse_tensor.convert %arg0 : tensor<2x3x4xf64, #SparseTensor> to tensor<2x3x4xf64>
316  return %0 : tensor<2x3x4xf64>
317}
318