// RUN: mlir-opt %s --sparse-tensor-conversion --canonicalize --cse | FileCheck %s #SparseVector = #sparse_tensor.encoding<{ dimLevelType = ["compressed"] }> #SparseVector64 = #sparse_tensor.encoding<{ dimLevelType = ["compressed"], pointerBitWidth = 64, indexBitWidth = 64 }> #SparseVector32 = #sparse_tensor.encoding<{ dimLevelType = ["compressed"], pointerBitWidth = 32, indexBitWidth = 32 }> #SparseMatrix = #sparse_tensor.encoding<{ dimLevelType = ["dense", "compressed"] }> #SparseTensor = #sparse_tensor.encoding<{ dimLevelType = ["dense", "compressed", "compressed"], dimOrdering = affine_map<(i,j,k) -> (k,i,j)> }> // CHECK-LABEL: func @sparse_dim1d( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) // CHECK: %[[C:.*]] = arith.constant 0 : index // CHECK: %[[D:.*]] = call @sparseDimSize(%[[A]], %[[C]]) // CHECK: return %[[D]] : index func @sparse_dim1d(%arg0: tensor) -> index { %c = arith.constant 0 : index %0 = tensor.dim %arg0, %c : tensor return %0 : index } // CHECK-LABEL: func @sparse_dim3d( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) // CHECK: %[[C:.*]] = arith.constant 2 : index // CHECK: %[[D:.*]] = call @sparseDimSize(%[[A]], %[[C]]) // CHECK: return %[[D]] : index func @sparse_dim3d(%arg0: tensor) -> index { // Querying for dimension 1 in the tensor type needs to be // permuted into querying for dimension 2 in the stored sparse // tensor scheme, since the latter honors the dimOrdering. %c = arith.constant 1 : index %0 = tensor.dim %arg0, %c : tensor return %0 : index } // CHECK-LABEL: func @sparse_dim3d_const( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) // CHECK: %[[C:.*]] = arith.constant 20 : index // CHECK: return %[[C]] : index func @sparse_dim3d_const(%arg0: tensor<10x20x30xf64, #SparseTensor>) -> index { // Querying for dimension 1 in the tensor type can be directly // folded into the right value (even though it corresponds // to dimension 2 in the stored sparse tensor scheme). %c = arith.constant 1 : index %0 = tensor.dim %arg0, %c : tensor<10x20x30xf64, #SparseTensor> return %0 : index } // CHECK-LABEL: func @sparse_new1d( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> !llvm.ptr // CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8> // CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex> // CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex> // CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref // CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref // CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref // CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_new1d(%arg0: !llvm.ptr) -> tensor<128xf64, #SparseVector> { %0 = sparse_tensor.new %arg0 : !llvm.ptr to tensor<128xf64, #SparseVector> return %0 : tensor<128xf64, #SparseVector> } // CHECK-LABEL: func @sparse_new2d( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> !llvm.ptr // CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8> // CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xindex> // CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xindex> // CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref // CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref // CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref // CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_new2d(%arg0: !llvm.ptr) -> tensor { %0 = sparse_tensor.new %arg0 : !llvm.ptr to tensor return %0 : tensor } // CHECK-LABEL: func @sparse_new3d( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> !llvm.ptr // CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<3xi8> // CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<3xindex> // CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<3xindex> // CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<3xi8> to memref // CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<3xindex> to memref // CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<3xindex> to memref // CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_new3d(%arg0: !llvm.ptr) -> tensor { %0 = sparse_tensor.new %arg0 : !llvm.ptr to tensor return %0 : tensor } // CHECK-LABEL: func @sparse_init( // CHECK-SAME: %[[I:.*]]: index, // CHECK-SAME: %[[J:.*]]: index) -> !llvm.ptr // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index // CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8> // CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xindex> // CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xindex> // CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref // CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref // CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref // CHECK-DAG: memref.store %[[I]], %[[Q]][%[[C0]]] : memref<2xindex> // CHECK-DAG: memref.store %[[J]], %[[Q]][%[[C1]]] : memref<2xindex> // CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr // CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_init(%arg0: index, %arg1: index) -> tensor { %0 = sparse_tensor.init [%arg0, %arg1] : tensor return %0 : tensor } // CHECK-LABEL: func @sparse_release( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) // CHECK: call @delSparseTensor(%[[A]]) : (!llvm.ptr) -> () // CHECK: return func @sparse_release(%arg0: tensor<128xf64, #SparseVector>) { sparse_tensor.release %arg0 : tensor<128xf64, #SparseVector> return } // CHECK-LABEL: func @sparse_nop_convert( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> !llvm.ptr // CHECK: return %[[A]] : !llvm.ptr func @sparse_nop_convert(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32, #SparseVector> { %0 = sparse_tensor.convert %arg0 : tensor<64xf32, #SparseVector> to tensor<64xf32, #SparseVector> return %0 : tensor<64xf32, #SparseVector> } // CHECK-LABEL: func @sparse_hidden_nop_cast( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> !llvm.ptr // CHECK: return %[[A]] : !llvm.ptr func @sparse_hidden_nop_cast(%arg0: tensor<32xf32, #SparseVector>) -> tensor { %0 = sparse_tensor.convert %arg0 : tensor<32xf32, #SparseVector> to tensor return %0 : tensor } // CHECK-LABEL: func @sparse_nop_cast( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> !llvm.ptr // CHECK: return %[[A]] : !llvm.ptr func @sparse_nop_cast(%arg0: tensor<64xf32, #SparseVector>) -> tensor { %0 = tensor.cast %arg0 : tensor<64xf32, #SparseVector> to tensor return %0 : tensor } // CHECK-LABEL: func @sparse_convert_1d( // CHECK-SAME: %[[A:.*]]: tensor) -> !llvm.ptr // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index // CHECK-DAG: %[[U:.*]] = tensor.dim %[[A]], %[[C0]] : tensor // CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8> // CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex> // CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex> // CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref // CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref // CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref // CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr // CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]]) // CHECK: %[[M:.*]] = memref.alloca() : memref<1xindex> // CHECK: %[[T:.*]] = memref.cast %[[M]] : memref<1xindex> to memref // CHECK: scf.for %[[I:.*]] = %[[C0]] to %[[U]] step %[[C1]] { // CHECK: %[[E:.*]] = tensor.extract %[[A]][%[[I]]] : tensor // CHECK: memref.store %[[I]], %[[M]][%[[C0]]] : memref<1xindex> // CHECK: call @addEltI32(%[[C]], %[[E]], %[[T]], %[[Z]]) // CHECK: } // CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[C]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_convert_1d(%arg0: tensor) -> tensor { %0 = sparse_tensor.convert %arg0 : tensor to tensor return %0 : tensor } // CHECK-LABEL: func @sparse_convert_1d_ss( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) // CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8> // CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex> // CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex> // CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref // CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref // CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref // CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]]) // CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[C]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_convert_1d_ss(%arg0: tensor) -> tensor { %0 = sparse_tensor.convert %arg0 : tensor to tensor return %0 : tensor } // CHECK-LABEL: func @sparse_convert_2d( // CHECK-SAME: %[[A:.*]]: tensor<2x4xf64>) -> !llvm.ptr // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index // CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8> // CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xindex> // CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xindex> // CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref // CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref // CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref // CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr // CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]]) // CHECK: %[[M:.*]] = memref.alloca() : memref<2xindex> // CHECK: %[[T:.*]] = memref.cast %[[M]] : memref<2xindex> to memref // CHECK: scf.for %[[I:.*]] = %[[C0]] to %{{.*}} step %[[C1]] { // CHECK: scf.for %[[J:.*]] = %[[C0]] to %{{.*}} step %[[C1]] { // CHECK: %[[E:.*]] = tensor.extract %[[A]][%[[I]], %[[J]]] : tensor<2x4xf64> // CHECK: memref.store %[[I]], %[[M]][%[[C0]]] : memref<2xindex> // CHECK: memref.store %[[J]], %[[M]][%[[C1]]] : memref<2xindex> // CHECK: call @addEltF64(%[[C]], %[[E]], %[[T]], %[[Z]]) // CHECK: } // CHECK: } // CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[C]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_convert_2d(%arg0: tensor<2x4xf64>) -> tensor<2x4xf64, #SparseMatrix> { %0 = sparse_tensor.convert %arg0 : tensor<2x4xf64> to tensor<2x4xf64, #SparseMatrix> return %0 : tensor<2x4xf64, #SparseMatrix> } // CHECK-LABEL: func @sparse_constant() -> !llvm.ptr { // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index // CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index // CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8> // CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xindex> // CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xindex> // CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref // CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref // CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref // CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr // CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]]) // CHECK: %[[M:.*]] = memref.alloca() : memref<2xindex> // CHECK: %[[N:.*]] = memref.cast %[[M]] : memref<2xindex> to memref // CHECK: scf.for %[[I:.*]] = %[[C0]] to %[[C2]] step %[[C1]] { // CHECK: memref.store %{{.*}}, %[[M]][%[[C0]]] : memref<2xindex> // CHECK: memref.store %{{.*}}, %[[M]][%[[C1]]] : memref<2xindex> // CHECK: %[[V:.*]] = tensor.extract %{{.*}}[%[[I]]] : tensor<2xf32> // CHECK: call @addEltF32(%{{.*}}, %[[V]], %[[N]], %{{.*}}) // CHECK: } // CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[C]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_constant() -> tensor<8x7xf32, #SparseMatrix>{ // Initialize a tensor. %0 = arith.constant sparse<[[0, 0], [1, 6]], [1.0, 5.0]> : tensor<8x7xf32> // Convert the tensor to a sparse tensor. %1 = sparse_tensor.convert %0 : tensor<8x7xf32> to tensor<8x7xf32, #SparseMatrix> return %1 : tensor<8x7xf32, #SparseMatrix> } // CHECK-LABEL: func @sparse_convert_3d( // CHECK-SAME: %[[A:.*]]: tensor) -> !llvm.ptr // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index // CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index // CHECK-DAG: %[[U1:.*]] = tensor.dim %[[A]], %[[C0]] : tensor // CHECK-DAG: %[[U2:.*]] = tensor.dim %[[A]], %[[C1]] : tensor // CHECK-DAG: %[[U3:.*]] = tensor.dim %[[A]], %[[C2]] : tensor // CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<3xi8> // CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<3xindex> // CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<3xindex> // CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<3xi8> to memref // CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<3xindex> to memref // CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<3xindex> to memref // CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr // CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]]) // CHECK: %[[M:.*]] = memref.alloca() : memref<3xindex> // CHECK: %[[N:.*]] = memref.cast %[[M]] : memref<3xindex> to memref // CHECK: scf.for %[[I:.*]] = %[[C0]] to %[[U1]] step %[[C1]] { // CHECK: scf.for %[[J:.*]] = %[[C0]] to %[[U2]] step %[[C1]] { // CHECK: scf.for %[[K:.*]] = %[[C0]] to %[[U3]] step %[[C1]] { // CHECK: %[[E:.*]] = tensor.extract %[[A]][%[[I]], %[[J]], %[[K]]] : tensor // CHECK: memref.store %[[I]], %[[M]][%[[C0]]] : memref<3xindex> // CHECK: memref.store %[[J]], %[[M]][%[[C1]]] : memref<3xindex> // CHECK: memref.store %[[K]], %[[M]][%[[C2]]] : memref<3xindex> // CHECK: call @addEltF64(%[[C]], %[[E]], %[[N]], %[[Z]]) // CHECK: } // CHECK: } // CHECK: } // CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[C]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_convert_3d(%arg0: tensor) -> tensor { %0 = sparse_tensor.convert %arg0 : tensor to tensor return %0 : tensor } // CHECK-LABEL: func @sparse_pointers( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) // CHECK: %[[C:.*]] = arith.constant 0 : index // CHECK: %[[T:.*]] = call @sparsePointers(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref // CHECK: return %[[T]] : memref func @sparse_pointers(%arg0: tensor<128xf64, #SparseVector>) -> memref { %c = arith.constant 0 : index %0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64, #SparseVector> to memref return %0 : memref } // CHECK-LABEL: func @sparse_pointers64( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) // CHECK: %[[C:.*]] = arith.constant 0 : index // CHECK: %[[T:.*]] = call @sparsePointers64(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref // CHECK: return %[[T]] : memref func @sparse_pointers64(%arg0: tensor<128xf64, #SparseVector64>) -> memref { %c = arith.constant 0 : index %0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64, #SparseVector64> to memref return %0 : memref } // CHECK-LABEL: func @sparse_pointers32( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) // CHECK: %[[C:.*]] = arith.constant 0 : index // CHECK: %[[T:.*]] = call @sparsePointers32(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref // CHECK: return %[[T]] : memref func @sparse_pointers32(%arg0: tensor<128xf64, #SparseVector32>) -> memref { %c = arith.constant 0 : index %0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64, #SparseVector32> to memref return %0 : memref } // CHECK-LABEL: func @sparse_indices( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) // CHECK: %[[C:.*]] = arith.constant 0 : index // CHECK: %[[T:.*]] = call @sparseIndices(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref // CHECK: return %[[T]] : memref func @sparse_indices(%arg0: tensor<128xf64, #SparseVector>) -> memref { %c = arith.constant 0 : index %0 = sparse_tensor.indices %arg0, %c : tensor<128xf64, #SparseVector> to memref return %0 : memref } // CHECK-LABEL: func @sparse_indices64( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) // CHECK: %[[C:.*]] = arith.constant 0 : index // CHECK: %[[T:.*]] = call @sparseIndices64(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref // CHECK: return %[[T]] : memref func @sparse_indices64(%arg0: tensor<128xf64, #SparseVector64>) -> memref { %c = arith.constant 0 : index %0 = sparse_tensor.indices %arg0, %c : tensor<128xf64, #SparseVector64> to memref return %0 : memref } // CHECK-LABEL: func @sparse_indices32( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) // CHECK: %[[C:.*]] = arith.constant 0 : index // CHECK: %[[T:.*]] = call @sparseIndices32(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref // CHECK: return %[[T]] : memref func @sparse_indices32(%arg0: tensor<128xf64, #SparseVector32>) -> memref { %c = arith.constant 0 : index %0 = sparse_tensor.indices %arg0, %c : tensor<128xf64, #SparseVector32> to memref return %0 : memref } // CHECK-LABEL: func @sparse_valuesf64( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) // CHECK: %[[T:.*]] = call @sparseValuesF64(%[[A]]) : (!llvm.ptr) -> memref // CHECK: return %[[T]] : memref func @sparse_valuesf64(%arg0: tensor<128xf64, #SparseVector>) -> memref { %0 = sparse_tensor.values %arg0 : tensor<128xf64, #SparseVector> to memref return %0 : memref } // CHECK-LABEL: func @sparse_valuesf32( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) // CHECK: %[[T:.*]] = call @sparseValuesF32(%[[A]]) : (!llvm.ptr) -> memref // CHECK: return %[[T]] : memref func @sparse_valuesf32(%arg0: tensor<128xf32, #SparseVector>) -> memref { %0 = sparse_tensor.values %arg0: tensor<128xf32, #SparseVector> to memref return %0 : memref } // CHECK-LABEL: func @sparse_valuesi32( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) // CHECK: %[[T:.*]] = call @sparseValuesI32(%[[A]]) : (!llvm.ptr) -> memref // CHECK: return %[[T]] : memref func @sparse_valuesi32(%arg0: tensor<128xi32, #SparseVector>) -> memref { %0 = sparse_tensor.values %arg0: tensor<128xi32, #SparseVector> to memref return %0 : memref } // CHECK-LABEL: func @sparse_valuesi16( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) // CHECK: %[[T:.*]] = call @sparseValuesI16(%[[A]]) : (!llvm.ptr) -> memref // CHECK: return %[[T]] : memref func @sparse_valuesi16(%arg0: tensor<128xi16, #SparseVector>) -> memref { %0 = sparse_tensor.values %arg0: tensor<128xi16, #SparseVector> to memref return %0 : memref } // CHECK-LABEL: func @sparse_valuesi8( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) // CHECK: %[[T:.*]] = call @sparseValuesI8(%[[A]]) : (!llvm.ptr) -> memref // CHECK: return %[[T]] : memref func @sparse_valuesi8(%arg0: tensor<128xi8, #SparseVector>) -> memref { %0 = sparse_tensor.values %arg0: tensor<128xi8, #SparseVector> to memref return %0 : memref } // CHECK-LABEL: func @sparse_reconstruct( // CHECK-SAME: %[[A:.*]]: !llvm.ptr // CHECK: return %[[A]] : !llvm.ptr func @sparse_reconstruct(%arg0: tensor<128xf32, #SparseVector>) -> tensor<128xf32, #SparseVector> { %0 = sparse_tensor.load %arg0 : tensor<128xf32, #SparseVector> return %0 : tensor<128xf32, #SparseVector> } // CHECK-LABEL: func @sparse_reconstruct_ins( // CHECK-SAME: %[[A:.*]]: !llvm.ptr // CHECK: call @endInsert(%[[A]]) : (!llvm.ptr) -> () // CHECK: return %[[A]] : !llvm.ptr func @sparse_reconstruct_ins(%arg0: tensor<128xf32, #SparseVector>) -> tensor<128xf32, #SparseVector> { %0 = sparse_tensor.load %arg0 hasInserts : tensor<128xf32, #SparseVector> return %0 : tensor<128xf32, #SparseVector> } // CHECK-LABEL: func @sparse_insert( // CHECK-SAME: %[[A:.*]]: !llvm.ptr, // CHECK-SAME: %[[B:.*]]: memref, // CHECK-SAME: %[[C:.*]]: f32) { // CHECK: call @lexInsertF32(%[[A]], %[[B]], %[[C]]) : (!llvm.ptr, memref, f32) -> () // CHECK: return func @sparse_insert(%arg0: tensor<128xf32, #SparseVector>, %arg1: memref, %arg2: f32) { sparse_tensor.lex_insert %arg0, %arg1, %arg2 : tensor<128xf32, #SparseVector>, memref, f32 return } // CHECK-LABEL: func @sparse_expansion() // %[[S:.*]] = call @sparseDimSize // %[[V:.*]] = memref.alloca(%[[S]]) : memref // %[[F:.*]] = memref.alloca(%[[S]]) : memref // %[[A:.*]] = memref.alloca(%[[S]]) : memref // linalg.fill(%{{.*}}, %[[V]]) : f64, memref // linalg.fill(%{{.*}}, %[[F]]) : i1, memref // CHECK: return func @sparse_expansion() { %c = arith.constant 8 : index %0 = sparse_tensor.init [%c, %c] : tensor<8x8xf64, #SparseMatrix> %values, %filled, %added, %count = sparse_tensor.expand %0 : tensor<8x8xf64, #SparseMatrix> to memref, memref, memref, index return } // CHECK-LABEL: func @sparse_compression( // CHECK-SAME: %[[A:.*]]: !llvm.ptr, // CHECK: call @expInsertF64(%[[A]], // CHECK: return func @sparse_compression(%arg0: tensor<8x8xf64, #SparseMatrix>, %arg1: memref, %arg2: memref, %arg3: memref, %arg4: memref, %arg5: index) { sparse_tensor.compress %arg0, %arg1, %arg2, %arg3, %arg4, %arg5 : tensor<8x8xf64, #SparseMatrix>, memref, memref, memref, memref, index return }