1// First use with `kViaCOO` for sparse2sparse conversion (the old way). 2// RUN: mlir-opt %s --sparse-tensor-conversion="s2s-strategy=1" \ 3// RUN: --canonicalize --cse | FileCheck %s 4// 5// Now again with `kAuto` (the new default). 6// RUN: mlir-opt %s --sparse-tensor-conversion="s2s-strategy=0" \ 7// RUN: --canonicalize --cse | FileCheck %s -check-prefix=CHECKAUTO 8 9#SparseVector = #sparse_tensor.encoding<{ 10 dimLevelType = ["compressed"] 11}> 12 13#SparseVector64 = #sparse_tensor.encoding<{ 14 dimLevelType = ["compressed"], 15 pointerBitWidth = 64, 16 indexBitWidth = 64 17}> 18 19#SparseVector32 = #sparse_tensor.encoding<{ 20 dimLevelType = ["compressed"], 21 pointerBitWidth = 32, 22 indexBitWidth = 32 23}> 24 25#SparseMatrix = #sparse_tensor.encoding<{ 26 dimLevelType = ["dense", "compressed"] 27}> 28 29#SparseTensor = #sparse_tensor.encoding<{ 30 dimLevelType = ["dense", "compressed", "compressed"], 31 dimOrdering = affine_map<(i,j,k) -> (k,i,j)> 32}> 33 34// CHECK-LABEL: func @sparse_dim1d( 35// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) 36// CHECK: %[[C:.*]] = arith.constant 0 : index 37// CHECK: %[[D:.*]] = call @sparseDimSize(%[[A]], %[[C]]) 38// CHECK: return %[[D]] : index 39func.func @sparse_dim1d(%arg0: tensor<?xf64, #SparseVector>) -> index { 40 %c = arith.constant 0 : index 41 %0 = tensor.dim %arg0, %c : tensor<?xf64, #SparseVector> 42 return %0 : index 43} 44 45// CHECK-LABEL: func @sparse_dim3d( 46// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) 47// CHECK: %[[C:.*]] = arith.constant 2 : index 48// CHECK: %[[D:.*]] = call @sparseDimSize(%[[A]], %[[C]]) 49// CHECK: return %[[D]] : index 50func.func @sparse_dim3d(%arg0: tensor<?x?x?xf64, #SparseTensor>) -> index { 51 // Querying for dimension 1 in the tensor type needs to be 52 // permuted into querying for dimension 2 in the stored sparse 53 // tensor scheme, since the latter honors the dimOrdering. 54 %c = arith.constant 1 : index 55 %0 = tensor.dim %arg0, %c : tensor<?x?x?xf64, #SparseTensor> 56 return %0 : index 57} 58 59// CHECK-LABEL: func @sparse_dim3d_const( 60// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) 61// CHECK: %[[C:.*]] = arith.constant 20 : index 62// CHECK: return %[[C]] : index 63func.func @sparse_dim3d_const(%arg0: tensor<10x20x30xf64, #SparseTensor>) -> index { 64 // Querying for dimension 1 in the tensor type can be directly 65 // folded into the right value (even though it corresponds 66 // to dimension 2 in the stored sparse tensor scheme). 67 %c = arith.constant 1 : index 68 %0 = tensor.dim %arg0, %c : tensor<10x20x30xf64, #SparseTensor> 69 return %0 : index 70} 71 72// CHECK-LABEL: func @sparse_new1d( 73// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8> 74// CHECK-DAG: %[[FromFile:.*]] = arith.constant 1 : i32 75// CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8> 76// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex> 77// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex> 78// CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref<?xi8> 79// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref<?xindex> 80// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref<?xindex> 81// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromFile]], %[[A]]) 82// CHECK: return %[[T]] : !llvm.ptr<i8> 83func.func @sparse_new1d(%arg0: !llvm.ptr<i8>) -> tensor<128xf64, #SparseVector> { 84 %0 = sparse_tensor.new %arg0 : !llvm.ptr<i8> to tensor<128xf64, #SparseVector> 85 return %0 : tensor<128xf64, #SparseVector> 86} 87 88// CHECK-LABEL: func @sparse_new2d( 89// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8> 90// CHECK-DAG: %[[FromFile:.*]] = arith.constant 1 : i32 91// CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8> 92// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xindex> 93// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xindex> 94// CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref<?xi8> 95// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref<?xindex> 96// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref<?xindex> 97// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromFile]], %[[A]]) 98// CHECK: return %[[T]] : !llvm.ptr<i8> 99func.func @sparse_new2d(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #SparseMatrix> { 100 %0 = sparse_tensor.new %arg0 : !llvm.ptr<i8> to tensor<?x?xf32, #SparseMatrix> 101 return %0 : tensor<?x?xf32, #SparseMatrix> 102} 103 104// CHECK-LABEL: func @sparse_new3d( 105// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8> 106// CHECK-DAG: %[[FromFile:.*]] = arith.constant 1 : i32 107// CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<3xi8> 108// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<3xindex> 109// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<3xindex> 110// CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<3xi8> to memref<?xi8> 111// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<3xindex> to memref<?xindex> 112// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<3xindex> to memref<?xindex> 113// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromFile]], %[[A]]) 114// CHECK: return %[[T]] : !llvm.ptr<i8> 115func.func @sparse_new3d(%arg0: !llvm.ptr<i8>) -> tensor<?x?x?xf32, #SparseTensor> { 116 %0 = sparse_tensor.new %arg0 : !llvm.ptr<i8> to tensor<?x?x?xf32, #SparseTensor> 117 return %0 : tensor<?x?x?xf32, #SparseTensor> 118} 119 120// CHECK-LABEL: func @sparse_init( 121// CHECK-SAME: %[[I:.*]]: index, 122// CHECK-SAME: %[[J:.*]]: index) -> !llvm.ptr<i8> 123// CHECK-DAG: %[[Empty:.*]] = arith.constant 0 : i32 124// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index 125// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index 126// CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8> 127// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xindex> 128// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xindex> 129// CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref<?xi8> 130// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref<?xindex> 131// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref<?xindex> 132// CHECK-DAG: memref.store %[[I]], %[[Q]][%[[C0]]] : memref<2xindex> 133// CHECK-DAG: memref.store %[[J]], %[[Q]][%[[C1]]] : memref<2xindex> 134// CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8> 135// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[Empty]], %[[NP]]) 136// CHECK: return %[[T]] : !llvm.ptr<i8> 137func.func @sparse_init(%arg0: index, %arg1: index) -> tensor<?x?xf64, #SparseMatrix> { 138 %0 = bufferization.alloc_tensor(%arg0, %arg1) : tensor<?x?xf64, #SparseMatrix> 139 %1 = sparse_tensor.load %0 : tensor<?x?xf64, #SparseMatrix> 140 return %1 : tensor<?x?xf64, #SparseMatrix> 141} 142 143// CHECK-LABEL: func @sparse_release( 144// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) 145// CHECK: call @delSparseTensor(%[[A]]) : (!llvm.ptr<i8>) -> () 146// CHECK: return 147func.func @sparse_release(%arg0: tensor<128xf64, #SparseVector>) { 148 bufferization.dealloc_tensor %arg0 : tensor<128xf64, #SparseVector> 149 return 150} 151 152// CHECK-LABEL: func @sparse_nop_convert( 153// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8> 154// CHECK: return %[[A]] : !llvm.ptr<i8> 155func.func @sparse_nop_convert(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32, #SparseVector> { 156 %0 = sparse_tensor.convert %arg0 : tensor<64xf32, #SparseVector> to tensor<64xf32, #SparseVector> 157 return %0 : tensor<64xf32, #SparseVector> 158} 159 160// CHECK-LABEL: func @sparse_hidden_nop_cast( 161// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8> 162// CHECK: return %[[A]] : !llvm.ptr<i8> 163func.func @sparse_hidden_nop_cast(%arg0: tensor<32xf32, #SparseVector>) -> tensor<?xf32, #SparseVector> { 164 %0 = sparse_tensor.convert %arg0 : tensor<32xf32, #SparseVector> to tensor<?xf32, #SparseVector> 165 return %0 : tensor<?xf32, #SparseVector> 166} 167 168// CHECK-LABEL: func @sparse_nop_cast( 169// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8> 170// CHECK: return %[[A]] : !llvm.ptr<i8> 171func.func @sparse_nop_cast(%arg0: tensor<64xf32, #SparseVector>) -> tensor<?xf32, #SparseVector> { 172 %0 = tensor.cast %arg0 : tensor<64xf32, #SparseVector> to tensor<?xf32, #SparseVector> 173 return %0 : tensor<?xf32, #SparseVector> 174} 175 176// CHECK-LABEL: func @sparse_convert_1d( 177// CHECK-SAME: %[[A:.*]]: tensor<?xi32>) -> !llvm.ptr<i8> { 178// CHECK-DAG: %[[EmptyCOO:.*]] = arith.constant 4 : i32 179// CHECK-DAG: %[[FromCOO:.*]] = arith.constant 2 : i32 180// CHECK-DAG: %[[I0:.*]] = arith.constant 0 : i32 181// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index 182// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index 183// CHECK-DAG: %[[U:.*]] = tensor.dim %[[A]], %[[C0]] : tensor<?xi32> 184// CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8> 185// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex> 186// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex> 187// CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref<?xi8> 188// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref<?xindex> 189// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref<?xindex> 190// CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8> 191// CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[EmptyCOO]], %[[NP]]) 192// CHECK: %[[M:.*]] = memref.alloca() : memref<1xindex> 193// CHECK: %[[T:.*]] = memref.cast %[[M]] : memref<1xindex> to memref<?xindex> 194// CHECK: %[[BUF:.*]] = memref.alloca() : memref<i32> 195// CHECK: scf.for %[[I:.*]] = %[[C0]] to %[[U]] step %[[C1]] { 196// CHECK: %[[E:.*]] = tensor.extract %[[A]][%[[I]]] : tensor<?xi32> 197// CHECK: %[[N:.*]] = arith.cmpi ne, %[[E]], %[[I0]] : i32 198// CHECK: scf.if %[[N]] { 199// CHECK: memref.store %[[I]], %[[M]][%[[C0]]] : memref<1xindex> 200// CHECK: memref.store %[[E]], %[[BUF]][] : memref<i32> 201// CHECK: call @addEltI32(%[[C]], %[[BUF]], %[[T]], %[[Z]]) 202// CHECK: } 203// CHECK: } 204// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromCOO]], %[[C]]) 205// CHECK: call @delSparseTensorCOOI32(%[[C]]) 206// CHECK: return %[[T]] : !llvm.ptr<i8> 207func.func @sparse_convert_1d(%arg0: tensor<?xi32>) -> tensor<?xi32, #SparseVector> { 208 %0 = sparse_tensor.convert %arg0 : tensor<?xi32> to tensor<?xi32, #SparseVector> 209 return %0 : tensor<?xi32, #SparseVector> 210} 211 212// CHECK-LABEL: func @sparse_convert_complex( 213// CHECK-SAME: %[[A:.*]]: tensor<100xcomplex<f64>>) -> !llvm.ptr<i8> { 214// CHECK-DAG: %[[CC:.*]] = complex.constant [0.000000e+00, 0.000000e+00] : complex<f64> 215// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index 216// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index 217// CHECK-DAG: %[[C100:.*]] = arith.constant 100 : index 218// CHECK: scf.for %[[I:.*]] = %[[C0]] to %[[C100]] step %[[C1]] { 219// CHECK: %[[E:.*]] = tensor.extract %[[A]][%[[I]]] : tensor<100xcomplex<f64>> 220// CHECK: %[[N:.*]] = complex.neq %[[E]], %[[CC]] : complex<f64> 221// CHECK: scf.if %[[N]] { 222// CHECK: memref.store %[[I]], %{{.*}}[%[[C0]]] : memref<1xindex> 223// CHECK: call @addEltC64 224// CHECK: } 225// CHECK: } 226// CHECK: %[[T:.*]] = call @newSparseTensor 227// CHECK: call @delSparseTensorCOOC64 228// CHECK: return %[[T]] : !llvm.ptr<i8> 229func.func @sparse_convert_complex(%arg0: tensor<100xcomplex<f64>>) -> tensor<100xcomplex<f64>, #SparseVector> { 230 %0 = sparse_tensor.convert %arg0 : tensor<100xcomplex<f64>> to tensor<100xcomplex<f64>, #SparseVector> 231 return %0 : tensor<100xcomplex<f64>, #SparseVector> 232} 233 234// CHECK-LABEL: func @sparse_convert_1d_ss( 235// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) 236// CHECK-DAG: %[[ToCOO:.*]] = arith.constant 5 : i32 237// CHECK-DAG: %[[FromCOO:.*]] = arith.constant 2 : i32 238// CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8> 239// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex> 240// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex> 241// CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref<?xi8> 242// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref<?xindex> 243// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref<?xindex> 244// CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ToCOO]], %[[A]]) 245// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromCOO]], %[[C]]) 246// CHECK: call @delSparseTensorCOOF32(%[[C]]) 247// CHECK: return %[[T]] : !llvm.ptr<i8> 248// CHECKAUTO-LABEL: func @sparse_convert_1d_ss( 249// CHECKAUTO-SAME: %[[A:.*]]: !llvm.ptr<i8>) 250// CHECKAUTO-DAG: %[[SparseToSparse:.*]] = arith.constant 3 : i32 251// CHECKAUTO-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8> 252// CHECKAUTO-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex> 253// CHECKAUTO-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex> 254// CHECKAUTO-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref<?xi8> 255// CHECKAUTO-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref<?xindex> 256// CHECKAUTO-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref<?xindex> 257// CHECKAUTO: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[SparseToSparse]], %[[A]]) 258// CHECKAUTO: return %[[T]] : !llvm.ptr<i8> 259func.func @sparse_convert_1d_ss(%arg0: tensor<?xf32, #SparseVector64>) -> tensor<?xf32, #SparseVector32> { 260 %0 = sparse_tensor.convert %arg0 : tensor<?xf32, #SparseVector64> to tensor<?xf32, #SparseVector32> 261 return %0 : tensor<?xf32, #SparseVector32> 262} 263 264// CHECK-LABEL: func @sparse_convert_2d( 265// CHECK-SAME: %[[A:.*]]: tensor<2x4xf64>) -> !llvm.ptr<i8> 266// CHECK-DAG: %[[EmptyCOO:.*]] = arith.constant 4 : i32 267// CHECK-DAG: %[[FromCOO:.*]] = arith.constant 2 : i32 268// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index 269// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index 270// CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8> 271// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xindex> 272// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xindex> 273// CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref<?xi8> 274// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref<?xindex> 275// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref<?xindex> 276// CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8> 277// CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[EmptyCOO]], %[[NP]]) 278// CHECK: %[[M:.*]] = memref.alloca() : memref<2xindex> 279// CHECK: %[[T:.*]] = memref.cast %[[M]] : memref<2xindex> to memref<?xindex> 280// CHECK: %[[BUF:.*]] = memref.alloca() : memref<f64> 281// CHECK: scf.for %[[I:.*]] = %[[C0]] to %{{.*}} step %[[C1]] { 282// CHECK: scf.for %[[J:.*]] = %[[C0]] to %{{.*}} step %[[C1]] { 283// CHECK: %[[E:.*]] = tensor.extract %[[A]][%[[I]], %[[J]]] : tensor<2x4xf64> 284// CHECK: memref.store %[[I]], %[[M]][%[[C0]]] : memref<2xindex> 285// CHECK: memref.store %[[J]], %[[M]][%[[C1]]] : memref<2xindex> 286// CHECK: memref.store %[[E]], %[[BUF]][] : memref<f64> 287// CHECK: call @addEltF64(%[[C]], %[[BUF]], %[[T]], %[[Z]]) 288// CHECK: } 289// CHECK: } 290// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromCOO]], %[[C]]) 291// CHECK: call @delSparseTensorCOOF64(%[[C]]) 292// CHECK: return %[[T]] : !llvm.ptr<i8> 293func.func @sparse_convert_2d(%arg0: tensor<2x4xf64>) -> tensor<2x4xf64, #SparseMatrix> { 294 %0 = sparse_tensor.convert %arg0 : tensor<2x4xf64> to tensor<2x4xf64, #SparseMatrix> 295 return %0 : tensor<2x4xf64, #SparseMatrix> 296} 297 298// CHECK-LABEL: func @sparse_constant() -> !llvm.ptr<i8> { 299// CHECK-DAG: %[[EmptyCOO:.*]] = arith.constant 4 : i32 300// CHECK-DAG: %[[FromCOO:.*]] = arith.constant 2 : i32 301// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index 302// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index 303// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index 304// CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8> 305// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xindex> 306// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xindex> 307// CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref<?xi8> 308// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref<?xindex> 309// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref<?xindex> 310// CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8> 311// CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[EmptyCOO]], %[[NP]]) 312// CHECK: %[[M:.*]] = memref.alloca() : memref<2xindex> 313// CHECK: %[[N:.*]] = memref.cast %[[M]] : memref<2xindex> to memref<?xindex> 314// CHECK: %[[BUF:.*]] = memref.alloca() : memref<f32> 315// CHECK: scf.for %[[I:.*]] = %[[C0]] to %[[C2]] step %[[C1]] { 316// CHECK: memref.store %{{.*}}, %[[M]][%[[C0]]] : memref<2xindex> 317// CHECK: memref.store %{{.*}}, %[[M]][%[[C1]]] : memref<2xindex> 318// CHECK: %[[V:.*]] = tensor.extract %{{.*}}[%[[I]]] : tensor<2xf32> 319// CHECK: memref.store %[[V]], %[[BUF]][] : memref<f32> 320// CHECK: call @addEltF32(%{{.*}}, %[[BUF]], %[[N]], %{{.*}}) 321// CHECK: } 322// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromCOO]], %[[C]]) 323// CHECK: call @delSparseTensorCOOF32(%[[C]]) 324// CHECK: return %[[T]] : !llvm.ptr<i8> 325func.func @sparse_constant() -> tensor<8x7xf32, #SparseMatrix>{ 326 // Initialize a tensor. 327 %0 = arith.constant sparse<[[0, 0], [1, 6]], [1.0, 5.0]> : tensor<8x7xf32> 328 // Convert the tensor to a sparse tensor. 329 %1 = sparse_tensor.convert %0 : tensor<8x7xf32> to tensor<8x7xf32, #SparseMatrix> 330 return %1 : tensor<8x7xf32, #SparseMatrix> 331} 332 333// CHECK-LABEL: func @sparse_convert_3d( 334// CHECK-SAME: %[[A:.*]]: tensor<?x?x?xf64>) -> !llvm.ptr<i8> 335// CHECK-DAG: %[[EmptyCOO:.*]] = arith.constant 4 : i32 336// CHECK-DAG: %[[FromCOO:.*]] = arith.constant 2 : i32 337// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index 338// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index 339// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index 340// CHECK-DAG: %[[U1:.*]] = tensor.dim %[[A]], %[[C0]] : tensor<?x?x?xf64> 341// CHECK-DAG: %[[U2:.*]] = tensor.dim %[[A]], %[[C1]] : tensor<?x?x?xf64> 342// CHECK-DAG: %[[U3:.*]] = tensor.dim %[[A]], %[[C2]] : tensor<?x?x?xf64> 343// CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<3xi8> 344// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<3xindex> 345// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<3xindex> 346// CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<3xi8> to memref<?xi8> 347// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<3xindex> to memref<?xindex> 348// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<3xindex> to memref<?xindex> 349// CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8> 350// CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[EmptyCOO]], %[[NP]]) 351// CHECK: %[[M:.*]] = memref.alloca() : memref<3xindex> 352// CHECK: %[[N:.*]] = memref.cast %[[M]] : memref<3xindex> to memref<?xindex> 353// CHECK: %[[BUF:.*]] = memref.alloca() : memref<f64> 354// CHECK: scf.for %[[I:.*]] = %[[C0]] to %[[U1]] step %[[C1]] { 355// CHECK: scf.for %[[J:.*]] = %[[C0]] to %[[U2]] step %[[C1]] { 356// CHECK: scf.for %[[K:.*]] = %[[C0]] to %[[U3]] step %[[C1]] { 357// CHECK: %[[E:.*]] = tensor.extract %[[A]][%[[I]], %[[J]], %[[K]]] : tensor<?x?x?xf64> 358// CHECK: memref.store %[[I]], %[[M]][%[[C0]]] : memref<3xindex> 359// CHECK: memref.store %[[J]], %[[M]][%[[C1]]] : memref<3xindex> 360// CHECK: memref.store %[[K]], %[[M]][%[[C2]]] : memref<3xindex> 361// CHECK: memref.store %[[E]], %[[BUF]][] : memref<f64> 362// CHECK: call @addEltF64(%[[C]], %[[BUF]], %[[N]], %[[Z]]) 363// CHECK: } 364// CHECK: } 365// CHECK: } 366// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromCOO]], %[[C]]) 367// CHECK: call @delSparseTensorCOOF64(%[[C]]) 368// CHECK: return %[[T]] : !llvm.ptr<i8> 369func.func @sparse_convert_3d(%arg0: tensor<?x?x?xf64>) -> tensor<?x?x?xf64, #SparseTensor> { 370 %0 = sparse_tensor.convert %arg0 : tensor<?x?x?xf64> to tensor<?x?x?xf64, #SparseTensor> 371 return %0 : tensor<?x?x?xf64, #SparseTensor> 372} 373 374// CHECK-LABEL: func @sparse_pointers( 375// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) 376// CHECK: %[[C:.*]] = arith.constant 0 : index 377// CHECK: %[[T:.*]] = call @sparsePointers0(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xindex> 378// CHECK: return %[[T]] : memref<?xindex> 379func.func @sparse_pointers(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> { 380 %c = arith.constant 0 : index 381 %0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64, #SparseVector> to memref<?xindex> 382 return %0 : memref<?xindex> 383} 384 385// CHECK-LABEL: func @sparse_pointers64( 386// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) 387// CHECK: %[[C:.*]] = arith.constant 0 : index 388// CHECK: %[[T:.*]] = call @sparsePointers64(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi64> 389// CHECK: return %[[T]] : memref<?xi64> 390func.func @sparse_pointers64(%arg0: tensor<128xf64, #SparseVector64>) -> memref<?xi64> { 391 %c = arith.constant 0 : index 392 %0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64, #SparseVector64> to memref<?xi64> 393 return %0 : memref<?xi64> 394} 395 396// CHECK-LABEL: func @sparse_pointers32( 397// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) 398// CHECK: %[[C:.*]] = arith.constant 0 : index 399// CHECK: %[[T:.*]] = call @sparsePointers32(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi32> 400// CHECK: return %[[T]] : memref<?xi32> 401func.func @sparse_pointers32(%arg0: tensor<128xf64, #SparseVector32>) -> memref<?xi32> { 402 %c = arith.constant 0 : index 403 %0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64, #SparseVector32> to memref<?xi32> 404 return %0 : memref<?xi32> 405} 406 407// CHECK-LABEL: func @sparse_indices( 408// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) 409// CHECK: %[[C:.*]] = arith.constant 0 : index 410// CHECK: %[[T:.*]] = call @sparseIndices0(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xindex> 411// CHECK: return %[[T]] : memref<?xindex> 412func.func @sparse_indices(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> { 413 %c = arith.constant 0 : index 414 %0 = sparse_tensor.indices %arg0, %c : tensor<128xf64, #SparseVector> to memref<?xindex> 415 return %0 : memref<?xindex> 416} 417 418// CHECK-LABEL: func @sparse_indices64( 419// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) 420// CHECK: %[[C:.*]] = arith.constant 0 : index 421// CHECK: %[[T:.*]] = call @sparseIndices64(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi64> 422// CHECK: return %[[T]] : memref<?xi64> 423func.func @sparse_indices64(%arg0: tensor<128xf64, #SparseVector64>) -> memref<?xi64> { 424 %c = arith.constant 0 : index 425 %0 = sparse_tensor.indices %arg0, %c : tensor<128xf64, #SparseVector64> to memref<?xi64> 426 return %0 : memref<?xi64> 427} 428 429// CHECK-LABEL: func @sparse_indices32( 430// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) 431// CHECK: %[[C:.*]] = arith.constant 0 : index 432// CHECK: %[[T:.*]] = call @sparseIndices32(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi32> 433// CHECK: return %[[T]] : memref<?xi32> 434func.func @sparse_indices32(%arg0: tensor<128xf64, #SparseVector32>) -> memref<?xi32> { 435 %c = arith.constant 0 : index 436 %0 = sparse_tensor.indices %arg0, %c : tensor<128xf64, #SparseVector32> to memref<?xi32> 437 return %0 : memref<?xi32> 438} 439 440// CHECK-LABEL: func @sparse_valuesf64( 441// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) 442// CHECK: %[[T:.*]] = call @sparseValuesF64(%[[A]]) : (!llvm.ptr<i8>) -> memref<?xf64> 443// CHECK: return %[[T]] : memref<?xf64> 444func.func @sparse_valuesf64(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xf64> { 445 %0 = sparse_tensor.values %arg0 : tensor<128xf64, #SparseVector> to memref<?xf64> 446 return %0 : memref<?xf64> 447} 448 449// CHECK-LABEL: func @sparse_valuesf32( 450// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) 451// CHECK: %[[T:.*]] = call @sparseValuesF32(%[[A]]) : (!llvm.ptr<i8>) -> memref<?xf32> 452// CHECK: return %[[T]] : memref<?xf32> 453func.func @sparse_valuesf32(%arg0: tensor<128xf32, #SparseVector>) -> memref<?xf32> { 454 %0 = sparse_tensor.values %arg0: tensor<128xf32, #SparseVector> to memref<?xf32> 455 return %0 : memref<?xf32> 456} 457 458// CHECK-LABEL: func @sparse_valuesi32( 459// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) 460// CHECK: %[[T:.*]] = call @sparseValuesI32(%[[A]]) : (!llvm.ptr<i8>) -> memref<?xi32> 461// CHECK: return %[[T]] : memref<?xi32> 462func.func @sparse_valuesi32(%arg0: tensor<128xi32, #SparseVector>) -> memref<?xi32> { 463 %0 = sparse_tensor.values %arg0: tensor<128xi32, #SparseVector> to memref<?xi32> 464 return %0 : memref<?xi32> 465} 466 467// CHECK-LABEL: func @sparse_valuesi16( 468// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) 469// CHECK: %[[T:.*]] = call @sparseValuesI16(%[[A]]) : (!llvm.ptr<i8>) -> memref<?xi16> 470// CHECK: return %[[T]] : memref<?xi16> 471func.func @sparse_valuesi16(%arg0: tensor<128xi16, #SparseVector>) -> memref<?xi16> { 472 %0 = sparse_tensor.values %arg0: tensor<128xi16, #SparseVector> to memref<?xi16> 473 return %0 : memref<?xi16> 474} 475 476// CHECK-LABEL: func @sparse_valuesi8( 477// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) 478// CHECK: %[[T:.*]] = call @sparseValuesI8(%[[A]]) : (!llvm.ptr<i8>) -> memref<?xi8> 479// CHECK: return %[[T]] : memref<?xi8> 480func.func @sparse_valuesi8(%arg0: tensor<128xi8, #SparseVector>) -> memref<?xi8> { 481 %0 = sparse_tensor.values %arg0: tensor<128xi8, #SparseVector> to memref<?xi8> 482 return %0 : memref<?xi8> 483} 484 485// CHECK-LABEL: func @sparse_reconstruct( 486// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8> 487// CHECK: return %[[A]] : !llvm.ptr<i8> 488func.func @sparse_reconstruct(%arg0: tensor<128xf32, #SparseVector>) -> tensor<128xf32, #SparseVector> { 489 %0 = sparse_tensor.load %arg0 : tensor<128xf32, #SparseVector> 490 return %0 : tensor<128xf32, #SparseVector> 491} 492 493// CHECK-LABEL: func @sparse_reconstruct_ins( 494// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8> 495// CHECK: call @endInsert(%[[A]]) : (!llvm.ptr<i8>) -> () 496// CHECK: return %[[A]] : !llvm.ptr<i8> 497func.func @sparse_reconstruct_ins(%arg0: tensor<128xf32, #SparseVector>) -> tensor<128xf32, #SparseVector> { 498 %0 = sparse_tensor.load %arg0 hasInserts : tensor<128xf32, #SparseVector> 499 return %0 : tensor<128xf32, #SparseVector> 500} 501 502// CHECK-LABEL: func @sparse_insert( 503// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>, 504// CHECK-SAME: %[[B:.*]]: memref<?xindex>, 505// CHECK-SAME: %[[C:.*]]: memref<f32>) { 506// CHECK: call @lexInsertF32(%[[A]], %[[B]], %[[C]]) : (!llvm.ptr<i8>, memref<?xindex>, memref<f32>) -> () 507// CHECK: return 508func.func @sparse_insert(%arg0: tensor<128xf32, #SparseVector>, 509 %arg1: memref<?xindex>, 510 %arg2: memref<f32>) { 511 sparse_tensor.lex_insert %arg0, %arg1, %arg2 : tensor<128xf32, #SparseVector>, memref<?xindex>, memref<f32> 512 return 513} 514 515// CHECK-LABEL: func @sparse_expansion() 516// CHECK: %[[S:.*]] = call @sparseDimSize 517// CHECK: %[[A:.*]] = memref.alloc(%[[S]]) : memref<?xf64> 518// CHECK: %[[B:.*]] = memref.alloc(%[[S]]) : memref<?xi1> 519// CHECK: %[[C:.*]] = memref.alloc(%[[S]]) : memref<?xindex> 520// CHECK-DAG: linalg.fill ins(%{{.*}} : f64) outs(%[[A]] : memref<?xf64>) 521// CHECK-DAG: linalg.fill ins(%{{.*}} : i1) outs(%[[B]] : memref<?xi1>) 522// CHECK: return %[[C]] : memref<?xindex> 523func.func @sparse_expansion() -> memref<?xindex> { 524 %0 = bufferization.alloc_tensor() : tensor<8x8xf64, #SparseMatrix> 525 %values, %filled, %added, %count = sparse_tensor.expand %0 526 : tensor<8x8xf64, #SparseMatrix> to memref<?xf64>, memref<?xi1>, memref<?xindex>, index 527 return %added : memref<?xindex> 528} 529 530// CHECK-LABEL: func @sparse_compression( 531// CHECK-SAME: %[[A:.*0]]: !llvm.ptr<i8>, 532// CHECK-SAME: %[[B:.*1]]: memref<?xindex>, 533// CHECK-SAME: %[[C:.*2]]: memref<?xf64>, 534// CHECK-SAME: %[[D:.*3]]: memref<?xi1>, 535// CHECK-SAME: %[[E:.*4]]: memref<?xindex>, 536// CHECK: call @expInsertF64(%[[A]], 537// CHECK-DAG: memref.dealloc %[[C]] : memref<?xf64> 538// CHECK-DAG: memref.dealloc %[[D]] : memref<?xi1> 539// CHECK-DAG: memref.dealloc %[[E]] : memref<?xindex> 540// CHECK: return 541func.func @sparse_compression(%arg0: tensor<8x8xf64, #SparseMatrix>, 542 %arg1: memref<?xindex>, %arg2: memref<?xf64>, %arg3: memref<?xi1>, 543 %arg4: memref<?xindex>, %arg5: index) { 544 sparse_tensor.compress %arg0, %arg1, %arg2, %arg3, %arg4, %arg5 545 : tensor<8x8xf64, #SparseMatrix>, memref<?xindex>, memref<?xf64>, memref<?xi1>, memref<?xindex>, index 546 return 547} 548 549// CHECK-LABEL: func @sparse_out1( 550// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>, 551// CHECK-SAME: %[[B:.*]]: !llvm.ptr<i8>) 552// CHECK-DAG: %[[ToCOO:.*]] = arith.constant 5 : i32 553// CHECK-DAG: %[[Sort:.*]] = arith.constant false 554// CHECK: %[[COO:.*]] = call @newSparseTensor(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[ToCOO]], %[[A]]) 555// CHECK: call @outSparseTensorF64(%[[COO]], %[[B]], %[[Sort]]) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i1) -> () 556// CHECK: call @delSparseTensorCOOF64(%[[COO]]) 557// CHECK: return 558func.func @sparse_out1(%arg0: tensor<?x?xf64, #SparseMatrix>, %arg1: !llvm.ptr<i8>) { 559 sparse_tensor.out %arg0, %arg1 : tensor<?x?xf64, #SparseMatrix>, !llvm.ptr<i8> 560 return 561} 562 563// CHECK-LABEL: func @sparse_out2( 564// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>, 565// CHECK-SAME: %[[B:.*]]: !llvm.ptr<i8>) 566// CHECK-DAG: %[[ToCOO:.*]] = arith.constant 5 : i32 567// CHECK-DAG: %[[Sort:.*]] = arith.constant true 568// CHECK: %[[COO:.*]] = call @newSparseTensor(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[ToCOO]], %[[A]]) 569// CHECK: call @outSparseTensorF32(%[[COO]], %[[B]], %[[Sort]]) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i1) -> () 570// CHECK: call @delSparseTensorCOOF32(%[[COO]]) 571// CHECK: return 572func.func @sparse_out2(%arg0: tensor<?x?x?xf32, #SparseTensor>, %arg1: !llvm.ptr<i8>) { 573 sparse_tensor.out %arg0, %arg1 : tensor<?x?x?xf32, #SparseTensor>, !llvm.ptr<i8> 574 return 575} 576 577// CHECK-LABEL: func @sparse_and_dense_init( 578// CHECK: %[[S:.*]] = call @newSparseTensor 579// CHECK: %[[D:.*]] = bufferization.alloc_tensor 580// CHECK: return %[[S]], %[[D]] : !llvm.ptr<i8>, tensor<?x?xf64> 581func.func @sparse_and_dense_init(%arg0: index, %arg1: index) 582 -> (tensor<?x?xf64, #SparseMatrix>, tensor<?x?xf64>) { 583 %0 = bufferization.alloc_tensor(%arg0, %arg1) : tensor<?x?xf64, #SparseMatrix> 584 %1 = sparse_tensor.load %0 : tensor<?x?xf64, #SparseMatrix> 585 %2 = bufferization.alloc_tensor(%arg0, %arg1) : tensor<?x?xf64> 586 return %1, %2 : tensor<?x?xf64, #SparseMatrix>, tensor<?x?xf64> 587} 588