1// RUN: mlir-opt %s --sparse-tensor-conversion --canonicalize --cse | FileCheck %s
2
3#SparseVector = #sparse_tensor.encoding<{
4  dimLevelType = ["compressed"]
5}>
6
7#SparseMatrix = #sparse_tensor.encoding<{
8  dimLevelType = ["dense", "compressed"]
9}>
10
11#SparseTensor = #sparse_tensor.encoding<{
12  dimLevelType = ["dense", "compressed", "compressed"],
13  dimOrdering = affine_map<(i,j,k) -> (k,i,j)>
14}>
15
16// CHECK-LABEL: func @sparse_convert_1d(
17//  CHECK-SAME: %[[Arg:.*]]: !llvm.ptr<i8>) -> tensor<13xi32>
18//   CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index
19//   CHECK-DAG: %[[I13:.*]] = arith.constant 13 : index
20//   CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<1xi8>
21//   CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<1xi8> to memref<?xi8>
22//   CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8
23//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<1xi8>
24//   CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<1xindex>
25//   CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<1xindex> to memref<?xindex>
26//   CHECK-DAG: memref.store %[[I13]], %[[SizesS]][%[[I0]]] : memref<1xindex>
27//   CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<1xindex>
28//   CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<1xindex> to memref<?xindex>
29//   CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<1xindex>
30//   CHECK-DAG: %[[zeroI32:.*]] = arith.constant 0 : i32
31//   CHECK-DAG: %[[ElemTpActionToIter:.*]] = arith.constant 6 : i32
32//   CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %[[zeroI32]], %[[zeroI32]], %[[ElemTpActionToIter]], %[[ElemTpActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
33//   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<1xindex>
34//   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<1xindex> to memref<?xindex>
35//   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<i32>
36//   CHECK-DAG: %[[M:.*]] = memref.alloc() : memref<13xi32>
37//   CHECK-DAG: linalg.fill ins(%[[zeroI32]] : i32) outs(%[[M]] : memref<13xi32>)
38//       CHECK: scf.while : () -> () {
39//       CHECK:   %[[Cond:.*]] = func.call @getNextI32(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr<i8>, memref<?xindex>, memref<i32>) -> i1
40//       CHECK:   scf.condition(%[[Cond]])
41//       CHECK: } do {
42//       CHECK:   %[[Iv0:.*]] = memref.load %[[IndS]][%[[I0]]] : memref<1xindex>
43//       CHECK:   %[[ElemVal:.*]] = memref.load %[[ElemBuffer]][] : memref<i32>
44//       CHECK:   memref.store %[[ElemVal]], %[[M]][%[[Iv0]]] : memref<13xi32>
45//       CHECK:   scf.yield
46//       CHECK: }
47//       CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<13xi32>
48//       CHECK: return %[[T]] : tensor<13xi32>
49func.func @sparse_convert_1d(%arg0: tensor<13xi32, #SparseVector>) -> tensor<13xi32> {
50  %0 = sparse_tensor.convert %arg0 : tensor<13xi32, #SparseVector> to tensor<13xi32>
51  return %0 : tensor<13xi32>
52}
53
54// CHECK-LABEL: func @sparse_convert_1d_dyn(
55//  CHECK-SAME: %[[Arg:.*]]: !llvm.ptr<i8>) -> tensor<?xi32>
56//   CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index
57//   CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<1xi8>
58//   CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<1xi8> to memref<?xi8>
59//   CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8
60//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<1xi8>
61//   CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<1xindex>
62//   CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<1xindex> to memref<?xindex>
63//   CHECK-DAG: %[[SizeI0:.*]] = call @sparseDimSize(%[[Arg]], %[[I0]]) : (!llvm.ptr<i8>, index) -> index
64//   CHECK-DAG: memref.store %[[SizeI0]], %[[SizesS]][%[[I0]]] : memref<1xindex>
65//   CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<1xindex>
66//   CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<1xindex> to memref<?xindex>
67//   CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<1xindex>
68//   CHECK-DAG: %[[zeroI32:.*]] = arith.constant 0 : i32
69//   CHECK-DAG: %[[ElemTpActionToIter:.*]] = arith.constant 6 : i32
70//   CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %[[zeroI32]], %[[zeroI32]], %[[ElemTpActionToIter]], %[[ElemTpActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
71//   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<1xindex>
72//   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<1xindex> to memref<?xindex>
73//   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<i32>
74//   CHECK-DAG: %[[M:.*]] = memref.alloc(%[[SizeI0]]) : memref<?xi32>
75//   CHECK-DAG: linalg.fill ins(%[[zeroI32]] : i32) outs(%[[M]] : memref<?xi32>)
76//       CHECK: scf.while : () -> () {
77//       CHECK:   %[[Cond:.*]] = func.call @getNextI32(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr<i8>, memref<?xindex>, memref<i32>) -> i1
78//       CHECK:   scf.condition(%[[Cond]])
79//       CHECK: } do {
80//       CHECK:   %[[Iv0:.*]] = memref.load %[[IndS]][%[[I0]]] : memref<1xindex>
81//       CHECK:   %[[ElemVal:.*]] = memref.load %[[ElemBuffer]][] : memref<i32>
82//       CHECK:   memref.store %[[ElemVal]], %[[M]][%[[Iv0]]] : memref<?xi32>
83//       CHECK:   scf.yield
84//       CHECK: }
85//       CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<?xi32>
86//       CHECK: return %[[T]] : tensor<?xi32>
87func.func @sparse_convert_1d_dyn(%arg0: tensor<?xi32, #SparseVector>) -> tensor<?xi32> {
88  %0 = sparse_tensor.convert %arg0 : tensor<?xi32, #SparseVector> to tensor<?xi32>
89  return %0 : tensor<?xi32>
90}
91
92// CHECK-LABEL: func @sparse_convert_2d(
93//  CHECK-SAME: %[[Arg:.*]]: !llvm.ptr<i8>) -> tensor<2x4xf64>
94//   CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index
95//   CHECK-DAG: %[[I1:.*]] = arith.constant 1 : index
96//   CHECK-DAG: %[[I2:.*]] = arith.constant 2 : index
97//   CHECK-DAG: %[[I4:.*]] = arith.constant 4 : index
98//   CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<2xi8>
99//   CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<2xi8> to memref<?xi8>
100//   CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8
101//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<2xi8>
102//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I1]]] : memref<2xi8>
103//   CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<2xindex>
104//   CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<2xindex> to memref<?xindex>
105//   CHECK-DAG: memref.store %[[I2]], %[[SizesS]][%[[I0]]] : memref<2xindex>
106//   CHECK-DAG: memref.store %[[I4]], %[[SizesS]][%[[I1]]] : memref<2xindex>
107//   CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<2xindex>
108//   CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<2xindex> to memref<?xindex>
109//   CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<2xindex>
110//   CHECK-DAG: memref.store %[[I1]], %[[PermS]][%[[I1]]] : memref<2xindex>
111//   CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 6 : i32
112//   CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
113//   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<2xindex>
114//   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<2xindex> to memref<?xindex>
115//   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<f64>
116//   CHECK-DAG: %[[M:.*]] = memref.alloc() : memref<2x4xf64>
117//   CHECK-DAG: %[[E0:.*]] = arith.constant 0.000000e+00 : f64
118//   CHECK-DAG: linalg.fill ins(%[[E0]] : f64) outs(%[[M]] : memref<2x4xf64>)
119//       CHECK: scf.while : () -> () {
120//       CHECK:   %[[Cond:.*]] = func.call @getNextF64(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr<i8>, memref<?xindex>, memref<f64>) -> i1
121//       CHECK:   scf.condition(%[[Cond]])
122//       CHECK: } do {
123//       CHECK:   %[[Iv0:.*]] = memref.load %[[IndS]][%[[I0]]] : memref<2xindex>
124//       CHECK:   %[[Iv1:.*]] = memref.load %[[IndS]][%[[I1]]] : memref<2xindex>
125//       CHECK:   %[[ElemVal:.*]] = memref.load %[[ElemBuffer]][] : memref<f64>
126//       CHECK:   memref.store %[[ElemVal]], %[[M]][%[[Iv0]], %[[Iv1]]] : memref<2x4xf64>
127//       CHECK:   scf.yield
128//       CHECK: }
129//       CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<2x4xf64>
130//       CHECK: return %[[T]] : tensor<2x4xf64>
131func.func @sparse_convert_2d(%arg0: tensor<2x4xf64, #SparseMatrix>) -> tensor<2x4xf64> {
132  %0 = sparse_tensor.convert %arg0 : tensor<2x4xf64, #SparseMatrix> to tensor<2x4xf64>
133  return %0 : tensor<2x4xf64>
134}
135
136// CHECK-LABEL: func @sparse_convert_2d_dyn0(
137//  CHECK-SAME: %[[Arg:.*]]: !llvm.ptr<i8>) -> tensor<?x4xf64>
138//   CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index
139//   CHECK-DAG: %[[I1:.*]] = arith.constant 1 : index
140//   CHECK-DAG: %[[I4:.*]] = arith.constant 4 : index
141//   CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<2xi8>
142//   CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<2xi8> to memref<?xi8>
143//   CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8
144//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<2xi8>
145//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I1]]] : memref<2xi8>
146//   CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<2xindex>
147//   CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<2xindex> to memref<?xindex>
148//   CHECK-DAG: %[[SizeI0:.*]] = call @sparseDimSize(%[[Arg]], %[[I0]]) : (!llvm.ptr<i8>, index) -> index
149//   CHECK-DAG: memref.store %[[SizeI0]], %[[SizesS]][%[[I0]]] : memref<2xindex>
150//   CHECK-DAG: memref.store %[[I4]], %[[SizesS]][%[[I1]]] : memref<2xindex>
151//   CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<2xindex>
152//   CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<2xindex> to memref<?xindex>
153//   CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<2xindex>
154//   CHECK-DAG: memref.store %[[I1]], %[[PermS]][%[[I1]]] : memref<2xindex>
155//   CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 6 : i32
156//   CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
157//   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<2xindex>
158//   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<2xindex> to memref<?xindex>
159//   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<f64>
160//   CHECK-DAG: %[[M:.*]] = memref.alloc(%[[SizeI0]]) : memref<?x4xf64>
161//   CHECK-DAG: %[[E0:.*]] = arith.constant 0.000000e+00 : f64
162//   CHECK-DAG: linalg.fill ins(%[[E0]] : f64) outs(%[[M]] : memref<?x4xf64>)
163//       CHECK: scf.while : () -> () {
164//       CHECK:   %[[Cond:.*]] = func.call @getNextF64(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr<i8>, memref<?xindex>, memref<f64>) -> i1
165//       CHECK:   scf.condition(%[[Cond]])
166//       CHECK: } do {
167//       CHECK:   %[[Iv0:.*]] = memref.load %[[IndS]][%[[I0]]] : memref<2xindex>
168//       CHECK:   %[[Iv1:.*]] = memref.load %[[IndS]][%[[I1]]] : memref<2xindex>
169//       CHECK:   %[[ElemVal:.*]] = memref.load %[[ElemBuffer]][] : memref<f64>
170//       CHECK:   memref.store %[[ElemVal]], %[[M]][%[[Iv0]], %[[Iv1]]] : memref<?x4xf64>
171//       CHECK:   scf.yield
172//       CHECK: }
173//       CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<?x4xf64>
174//       CHECK: return %[[T]] : tensor<?x4xf64>
175func.func @sparse_convert_2d_dyn0(%arg0: tensor<?x4xf64, #SparseMatrix>) -> tensor<?x4xf64> {
176  %0 = sparse_tensor.convert %arg0 : tensor<?x4xf64, #SparseMatrix> to tensor<?x4xf64>
177  return %0 : tensor<?x4xf64>
178}
179
180// CHECK-LABEL: func @sparse_convert_2d_dyn1(
181//  CHECK-SAME: %[[Arg:.*]]: !llvm.ptr<i8>) -> tensor<2x?xf64>
182//   CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index
183//   CHECK-DAG: %[[I1:.*]] = arith.constant 1 : index
184//   CHECK-DAG: %[[I2:.*]] = arith.constant 2 : index
185//   CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<2xi8>
186//   CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<2xi8> to memref<?xi8>
187//   CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8
188//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<2xi8>
189//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I1]]] : memref<2xi8>
190//   CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<2xindex>
191//   CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<2xindex> to memref<?xindex>
192//   CHECK-DAG: %[[SizeI1:.*]] = call @sparseDimSize(%[[Arg]], %[[I1]]) : (!llvm.ptr<i8>, index) -> index
193//   CHECK-DAG: memref.store %[[I2]], %[[SizesS]][%[[I0]]] : memref<2xindex>
194//   CHECK-DAG: memref.store %[[SizeI1]], %[[SizesS]][%[[I1]]] : memref<2xindex>
195//   CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<2xindex>
196//   CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<2xindex> to memref<?xindex>
197//   CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<2xindex>
198//   CHECK-DAG: memref.store %[[I1]], %[[PermS]][%[[I1]]] : memref<2xindex>
199//   CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 6 : i32
200//   CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
201//   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<2xindex>
202//   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<2xindex> to memref<?xindex>
203//   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<f64>
204//   CHECK-DAG: %[[M:.*]] = memref.alloc(%[[SizeI1]]) : memref<2x?xf64>
205//   CHECK-DAG: %[[E0:.*]] = arith.constant 0.000000e+00 : f64
206//   CHECK-DAG: linalg.fill ins(%[[E0]] : f64) outs(%[[M]] : memref<2x?xf64>)
207//       CHECK: scf.while : () -> () {
208//       CHECK:   %[[Cond:.*]] = func.call @getNextF64(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr<i8>, memref<?xindex>, memref<f64>) -> i1
209//       CHECK:   scf.condition(%[[Cond]])
210//       CHECK: } do {
211//       CHECK:   %[[Iv0:.*]] = memref.load %[[IndS]][%[[I0]]] : memref<2xindex>
212//       CHECK:   %[[Iv1:.*]] = memref.load %[[IndS]][%[[I1]]] : memref<2xindex>
213//       CHECK:   %[[ElemVal:.*]] = memref.load %[[ElemBuffer]][] : memref<f64>
214//       CHECK:   memref.store %[[ElemVal]], %[[M]][%[[Iv0]], %[[Iv1]]] : memref<2x?xf64>
215//       CHECK:   scf.yield
216//       CHECK: }
217//       CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<2x?xf64>
218//       CHECK: return %[[T]] : tensor<2x?xf64>
219func.func @sparse_convert_2d_dyn1(%arg0: tensor<2x?xf64, #SparseMatrix>) -> tensor<2x?xf64> {
220  %0 = sparse_tensor.convert %arg0 : tensor<2x?xf64, #SparseMatrix> to tensor<2x?xf64>
221  return %0 : tensor<2x?xf64>
222}
223
224// CHECK-LABEL: func @sparse_convert_2d_dyn2(
225//  CHECK-SAME: %[[Arg:.*]]: !llvm.ptr<i8>) -> tensor<?x?xf64>
226//   CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index
227//   CHECK-DAG: %[[I1:.*]] = arith.constant 1 : index
228//   CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<2xi8>
229//   CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<2xi8> to memref<?xi8>
230//   CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8
231//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<2xi8>
232//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I1]]] : memref<2xi8>
233//   CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<2xindex>
234//   CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<2xindex> to memref<?xindex>
235//   CHECK-DAG: %[[SizeI0:.*]] = call @sparseDimSize(%[[Arg]], %[[I0]]) : (!llvm.ptr<i8>, index) -> index
236//   CHECK-DAG: %[[SizeI1:.*]] = call @sparseDimSize(%[[Arg]], %[[I1]]) : (!llvm.ptr<i8>, index) -> index
237//   CHECK-DAG: memref.store %[[SizeI0]], %[[SizesS]][%[[I0]]] : memref<2xindex>
238//   CHECK-DAG: memref.store %[[SizeI1]], %[[SizesS]][%[[I1]]] : memref<2xindex>
239//   CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<2xindex>
240//   CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<2xindex> to memref<?xindex>
241//   CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<2xindex>
242//   CHECK-DAG: memref.store %[[I1]], %[[PermS]][%[[I1]]] : memref<2xindex>
243//   CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 6 : i32
244//   CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
245//   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<2xindex>
246//   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<2xindex> to memref<?xindex>
247//   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<f64>
248//   CHECK-DAG: %[[M:.*]] = memref.alloc(%[[SizeI0]], %[[SizeI1]]) : memref<?x?xf64>
249//   CHECK-DAG: %[[E0:.*]] = arith.constant 0.000000e+00 : f64
250//   CHECK-DAG: linalg.fill ins(%[[E0]] : f64) outs(%[[M]] : memref<?x?xf64>)
251//       CHECK: scf.while : () -> () {
252//       CHECK:   %[[Cond:.*]] = func.call @getNextF64(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr<i8>, memref<?xindex>, memref<f64>) -> i1
253//       CHECK:   scf.condition(%[[Cond]])
254//       CHECK: } do {
255//       CHECK:   %[[Iv0:.*]] = memref.load %[[IndS]][%[[I0]]] : memref<2xindex>
256//       CHECK:   %[[Iv1:.*]] = memref.load %[[IndS]][%[[I1]]] : memref<2xindex>
257//       CHECK:   %[[ElemVal:.*]] = memref.load %[[ElemBuffer]][] : memref<f64>
258//       CHECK:   memref.store %[[ElemVal]], %[[M]][%[[Iv0]], %[[Iv1]]] : memref<?x?xf64>
259//       CHECK:   scf.yield
260//       CHECK: }
261//       CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<?x?xf64>
262//       CHECK: return %[[T]] : tensor<?x?xf64>
263func.func @sparse_convert_2d_dyn2(%arg0: tensor<?x?xf64, #SparseMatrix>) -> tensor<?x?xf64> {
264  %0 = sparse_tensor.convert %arg0 : tensor<?x?xf64, #SparseMatrix> to tensor<?x?xf64>
265  return %0 : tensor<?x?xf64>
266}
267
268// CHECK-LABEL: func @sparse_convert_3d(
269//  CHECK-SAME: %[[Arg:.*]]: !llvm.ptr<i8>) -> tensor<2x3x4xf64>
270//   CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index
271//   CHECK-DAG: %[[I1:.*]] = arith.constant 1 : index
272//   CHECK-DAG: %[[I2:.*]] = arith.constant 2 : index
273//   CHECK-DAG: %[[I3:.*]] = arith.constant 3 : index
274//   CHECK-DAG: %[[I4:.*]] = arith.constant 4 : index
275//   CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<3xi8>
276//   CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<3xi8> to memref<?xi8>
277//   CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8
278//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<3xi8>
279//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I1]]] : memref<3xi8>
280//   CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I2]]] : memref<3xi8>
281//   CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<3xindex>
282//   CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<3xindex> to memref<?xindex>
283//   CHECK-DAG: memref.store %[[I2]], %[[SizesS]][%[[I0]]] : memref<3xindex>
284//   CHECK-DAG: memref.store %[[I3]], %[[SizesS]][%[[I1]]] : memref<3xindex>
285//   CHECK-DAG: memref.store %[[I4]], %[[SizesS]][%[[I2]]] : memref<3xindex>
286//   CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<3xindex>
287//   CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<3xindex> to memref<?xindex>
288//   CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<3xindex>
289//   CHECK-DAG: memref.store %[[I1]], %[[PermS]][%[[I1]]] : memref<3xindex>
290//   CHECK-DAG: memref.store %[[I2]], %[[PermS]][%[[I2]]] : memref<3xindex>
291//   CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 6 : i32
292//   CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
293//   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<3xindex>
294//   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<3xindex> to memref<?xindex>
295//   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<f64>
296//   CHECK-DAG: %[[M:.*]] = memref.alloc() : memref<2x3x4xf64>
297//   CHECK-DAG: %[[E0:.*]] = arith.constant 0.000000e+00 : f64
298//   CHECK-DAG: linalg.fill ins(%[[E0]] : f64) outs(%[[M]] : memref<2x3x4xf64>)
299//       CHECK: scf.while : () -> () {
300//       CHECK:   %[[Cond:.*]] = func.call @getNextF64(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr<i8>, memref<?xindex>, memref<f64>) -> i1
301//       CHECK:   scf.condition(%[[Cond]])
302//       CHECK: } do {
303//       CHECK:   %[[Iv0:.*]] = memref.load %[[IndS]][%[[I0]]] : memref<3xindex>
304//       CHECK:   %[[Iv1:.*]] = memref.load %[[IndS]][%[[I1]]] : memref<3xindex>
305//       CHECK:   %[[Iv2:.*]] = memref.load %[[IndS]][%[[I2]]] : memref<3xindex>
306//       CHECK:   %[[ElemVal:.*]] = memref.load %[[ElemBuffer]][] : memref<f64>
307//       CHECK:   memref.store %[[ElemVal]], %[[M]][%[[Iv0]], %[[Iv1]], %[[Iv2]]] : memref<2x3x4xf64>
308//       CHECK:   scf.yield
309//       CHECK: }
310//       CHECK: %[[T:.*]] = bufferization.to_tensor %[[M]] : memref<2x3x4xf64>
311//       CHECK: return %[[T]] : tensor<2x3x4xf64>
312func.func @sparse_convert_3d(%arg0: tensor<2x3x4xf64, #SparseTensor>) -> tensor<2x3x4xf64> {
313  %0 = sparse_tensor.convert %arg0 : tensor<2x3x4xf64, #SparseTensor> to tensor<2x3x4xf64>
314  return %0 : tensor<2x3x4xf64>
315}
316