12c332660SJim Kitchen// RUN: mlir-opt %s --sparse-compiler | \ 22c332660SJim Kitchen// RUN: mlir-cpu-runner \ 32c332660SJim Kitchen// RUN: -e entry -entry-point-result=void \ 42c332660SJim Kitchen// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ 52c332660SJim Kitchen// RUN: FileCheck %s 62c332660SJim Kitchen 72c332660SJim Kitchen#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> 82c332660SJim Kitchen#DCSR = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> 92c332660SJim Kitchen 102c332660SJim Kitchen// 112c332660SJim Kitchen// Traits for tensor operations. 122c332660SJim Kitchen// 132c332660SJim Kitchen#trait_vec_scale = { 142c332660SJim Kitchen indexing_maps = [ 152c332660SJim Kitchen affine_map<(i) -> (i)>, // a (in) 162c332660SJim Kitchen affine_map<(i) -> (i)> // x (out) 172c332660SJim Kitchen ], 182c332660SJim Kitchen iterator_types = ["parallel"] 192c332660SJim Kitchen} 202c332660SJim Kitchen#trait_vec_op = { 212c332660SJim Kitchen indexing_maps = [ 222c332660SJim Kitchen affine_map<(i) -> (i)>, // a (in) 232c332660SJim Kitchen affine_map<(i) -> (i)>, // b (in) 242c332660SJim Kitchen affine_map<(i) -> (i)> // x (out) 252c332660SJim Kitchen ], 262c332660SJim Kitchen iterator_types = ["parallel"] 272c332660SJim Kitchen} 282c332660SJim Kitchen#trait_mat_op = { 292c332660SJim Kitchen indexing_maps = [ 302c332660SJim Kitchen affine_map<(i,j) -> (i,j)>, // A (in) 312c332660SJim Kitchen affine_map<(i,j) -> (i,j)>, // B (in) 322c332660SJim Kitchen affine_map<(i,j) -> (i,j)> // X (out) 332c332660SJim Kitchen ], 342c332660SJim Kitchen iterator_types = ["parallel", "parallel"], 352c332660SJim Kitchen doc = "X(i,j) = A(i,j) OP B(i,j)" 362c332660SJim Kitchen} 372c332660SJim Kitchen 3815d1cb45SPeiming Liu// 3915d1cb45SPeiming Liu// Contains test cases for the sparse_tensor.binary operator (different cases when left/right/overlap 4015d1cb45SPeiming Liu// is empty/identity, etc). 4115d1cb45SPeiming Liu// 4215d1cb45SPeiming Liu 432c332660SJim Kitchenmodule { 442c332660SJim Kitchen // Creates a new sparse vector using the minimum values from two input sparse vectors. 452c332660SJim Kitchen // When there is no overlap, include the present value in the output. 46a8308020SRiver Riddle func.func @vector_min(%arga: tensor<?xf64, #SparseVector>, 472c332660SJim Kitchen %argb: tensor<?xf64, #SparseVector>) -> tensor<?xf64, #SparseVector> { 482c332660SJim Kitchen %c = arith.constant 0 : index 492c332660SJim Kitchen %d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector> 506232a8f3SMatthias Springer %xv = bufferization.alloc_tensor(%d) : tensor<?xf64, #SparseVector> 512c332660SJim Kitchen %0 = linalg.generic #trait_vec_op 522c332660SJim Kitchen ins(%arga, %argb: tensor<?xf64, #SparseVector>, tensor<?xf64, #SparseVector>) 532c332660SJim Kitchen outs(%xv: tensor<?xf64, #SparseVector>) { 542c332660SJim Kitchen ^bb(%a: f64, %b: f64, %x: f64): 552c332660SJim Kitchen %1 = sparse_tensor.binary %a, %b : f64, f64 to f64 562c332660SJim Kitchen overlap={ 572c332660SJim Kitchen ^bb0(%a0: f64, %b0: f64): 582c332660SJim Kitchen %cmp = arith.cmpf "olt", %a0, %b0 : f64 592c332660SJim Kitchen %2 = arith.select %cmp, %a0, %b0: f64 602c332660SJim Kitchen sparse_tensor.yield %2 : f64 612c332660SJim Kitchen } 622c332660SJim Kitchen left=identity 632c332660SJim Kitchen right=identity 642c332660SJim Kitchen linalg.yield %1 : f64 652c332660SJim Kitchen } -> tensor<?xf64, #SparseVector> 662c332660SJim Kitchen return %0 : tensor<?xf64, #SparseVector> 672c332660SJim Kitchen } 682c332660SJim Kitchen 692c332660SJim Kitchen // Creates a new sparse vector by multiplying a sparse vector with a dense vector. 702c332660SJim Kitchen // When there is no overlap, leave the result empty. 71a8308020SRiver Riddle func.func @vector_mul(%arga: tensor<?xf64, #SparseVector>, 722c332660SJim Kitchen %argb: tensor<?xf64>) -> tensor<?xf64, #SparseVector> { 732c332660SJim Kitchen %c = arith.constant 0 : index 742c332660SJim Kitchen %d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector> 756232a8f3SMatthias Springer %xv = bufferization.alloc_tensor(%d) : tensor<?xf64, #SparseVector> 762c332660SJim Kitchen %0 = linalg.generic #trait_vec_op 772c332660SJim Kitchen ins(%arga, %argb: tensor<?xf64, #SparseVector>, tensor<?xf64>) 782c332660SJim Kitchen outs(%xv: tensor<?xf64, #SparseVector>) { 792c332660SJim Kitchen ^bb(%a: f64, %b: f64, %x: f64): 802c332660SJim Kitchen %1 = sparse_tensor.binary %a, %b : f64, f64 to f64 812c332660SJim Kitchen overlap={ 822c332660SJim Kitchen ^bb0(%a0: f64, %b0: f64): 832c332660SJim Kitchen %ret = arith.mulf %a0, %b0 : f64 842c332660SJim Kitchen sparse_tensor.yield %ret : f64 852c332660SJim Kitchen } 862c332660SJim Kitchen left={} 872c332660SJim Kitchen right={} 882c332660SJim Kitchen linalg.yield %1 : f64 892c332660SJim Kitchen } -> tensor<?xf64, #SparseVector> 902c332660SJim Kitchen return %0 : tensor<?xf64, #SparseVector> 912c332660SJim Kitchen } 922c332660SJim Kitchen 932c332660SJim Kitchen // Take a set difference of two sparse vectors. The result will include only those 942c332660SJim Kitchen // sparse elements present in the first, but not the second vector. 95a8308020SRiver Riddle func.func @vector_setdiff(%arga: tensor<?xf64, #SparseVector>, 962c332660SJim Kitchen %argb: tensor<?xf64, #SparseVector>) -> tensor<?xf64, #SparseVector> { 972c332660SJim Kitchen %c = arith.constant 0 : index 982c332660SJim Kitchen %d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector> 996232a8f3SMatthias Springer %xv = bufferization.alloc_tensor(%d) : tensor<?xf64, #SparseVector> 1002c332660SJim Kitchen %0 = linalg.generic #trait_vec_op 1012c332660SJim Kitchen ins(%arga, %argb: tensor<?xf64, #SparseVector>, tensor<?xf64, #SparseVector>) 1022c332660SJim Kitchen outs(%xv: tensor<?xf64, #SparseVector>) { 1032c332660SJim Kitchen ^bb(%a: f64, %b: f64, %x: f64): 1042c332660SJim Kitchen %1 = sparse_tensor.binary %a, %b : f64, f64 to f64 1052c332660SJim Kitchen overlap={} 1062c332660SJim Kitchen left=identity 1072c332660SJim Kitchen right={} 1082c332660SJim Kitchen linalg.yield %1 : f64 1092c332660SJim Kitchen } -> tensor<?xf64, #SparseVector> 1102c332660SJim Kitchen return %0 : tensor<?xf64, #SparseVector> 1112c332660SJim Kitchen } 1122c332660SJim Kitchen 1132c332660SJim Kitchen // Return the index of each entry 114a8308020SRiver Riddle func.func @vector_index(%arga: tensor<?xf64, #SparseVector>) -> tensor<?xi32, #SparseVector> { 1152c332660SJim Kitchen %c = arith.constant 0 : index 1162c332660SJim Kitchen %d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector> 1176232a8f3SMatthias Springer %xv = bufferization.alloc_tensor(%d) : tensor<?xi32, #SparseVector> 1182c332660SJim Kitchen %0 = linalg.generic #trait_vec_scale 1192c332660SJim Kitchen ins(%arga: tensor<?xf64, #SparseVector>) 1202c332660SJim Kitchen outs(%xv: tensor<?xi32, #SparseVector>) { 1212c332660SJim Kitchen ^bb(%a: f64, %x: i32): 1222c332660SJim Kitchen %idx = linalg.index 0 : index 1232c332660SJim Kitchen %1 = sparse_tensor.binary %a, %idx : f64, index to i32 1242c332660SJim Kitchen overlap={ 1252c332660SJim Kitchen ^bb0(%x0: f64, %i: index): 1262c332660SJim Kitchen %ret = arith.index_cast %i : index to i32 1272c332660SJim Kitchen sparse_tensor.yield %ret : i32 1282c332660SJim Kitchen } 1292c332660SJim Kitchen left={} 1302c332660SJim Kitchen right={} 1312c332660SJim Kitchen linalg.yield %1 : i32 1322c332660SJim Kitchen } -> tensor<?xi32, #SparseVector> 1332c332660SJim Kitchen return %0 : tensor<?xi32, #SparseVector> 1342c332660SJim Kitchen } 1352c332660SJim Kitchen 1362c332660SJim Kitchen // Adds two sparse matrices when they intersect. Where they don't intersect, 1372c332660SJim Kitchen // negate the 2nd argument's values; ignore 1st argument-only values. 138a8308020SRiver Riddle func.func @matrix_intersect(%arga: tensor<?x?xf64, #DCSR>, 1392c332660SJim Kitchen %argb: tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR> { 1402c332660SJim Kitchen %c0 = arith.constant 0 : index 1412c332660SJim Kitchen %c1 = arith.constant 1 : index 1422c332660SJim Kitchen %d0 = tensor.dim %arga, %c0 : tensor<?x?xf64, #DCSR> 1432c332660SJim Kitchen %d1 = tensor.dim %arga, %c1 : tensor<?x?xf64, #DCSR> 1446232a8f3SMatthias Springer %xv = bufferization.alloc_tensor(%d0, %d1) : tensor<?x?xf64, #DCSR> 1452c332660SJim Kitchen %0 = linalg.generic #trait_mat_op 1462c332660SJim Kitchen ins(%arga, %argb: tensor<?x?xf64, #DCSR>, tensor<?x?xf64, #DCSR>) 1472c332660SJim Kitchen outs(%xv: tensor<?x?xf64, #DCSR>) { 1482c332660SJim Kitchen ^bb(%a: f64, %b: f64, %x: f64): 1492c332660SJim Kitchen %1 = sparse_tensor.binary %a, %b: f64, f64 to f64 1502c332660SJim Kitchen overlap={ 1512c332660SJim Kitchen ^bb0(%x0: f64, %y0: f64): 1522c332660SJim Kitchen %ret = arith.addf %x0, %y0 : f64 1532c332660SJim Kitchen sparse_tensor.yield %ret : f64 1542c332660SJim Kitchen } 1552c332660SJim Kitchen left={} 1562c332660SJim Kitchen right={ 1572c332660SJim Kitchen ^bb0(%x1: f64): 1582c332660SJim Kitchen %lret = arith.negf %x1 : f64 1592c332660SJim Kitchen sparse_tensor.yield %lret : f64 1602c332660SJim Kitchen } 1612c332660SJim Kitchen linalg.yield %1 : f64 1622c332660SJim Kitchen } -> tensor<?x?xf64, #DCSR> 1632c332660SJim Kitchen return %0 : tensor<?x?xf64, #DCSR> 1642c332660SJim Kitchen } 1652c332660SJim Kitchen 16615d1cb45SPeiming Liu // Tensor addition (use semi-ring binary operation). 16715d1cb45SPeiming Liu func.func @add_tensor_1(%A: tensor<4x4xf64, #DCSR>, 16815d1cb45SPeiming Liu %B: tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> { 16915d1cb45SPeiming Liu %C = bufferization.alloc_tensor() : tensor<4x4xf64, #DCSR> 17015d1cb45SPeiming Liu %0 = linalg.generic #trait_mat_op 17115d1cb45SPeiming Liu ins(%A, %B: tensor<4x4xf64, #DCSR>, 17215d1cb45SPeiming Liu tensor<4x4xf64, #DCSR>) 17315d1cb45SPeiming Liu outs(%C: tensor<4x4xf64, #DCSR>) { 17415d1cb45SPeiming Liu ^bb0(%a: f64, %b: f64, %c: f64) : 17515d1cb45SPeiming Liu %result = sparse_tensor.binary %a, %b : f64, f64 to f64 17615d1cb45SPeiming Liu overlap={ 17715d1cb45SPeiming Liu ^bb0(%x: f64, %y: f64): 17815d1cb45SPeiming Liu %ret = arith.addf %x, %y : f64 17915d1cb45SPeiming Liu sparse_tensor.yield %ret : f64 18015d1cb45SPeiming Liu } 18115d1cb45SPeiming Liu left=identity 18215d1cb45SPeiming Liu right=identity 18315d1cb45SPeiming Liu linalg.yield %result : f64 18415d1cb45SPeiming Liu } -> tensor<4x4xf64, #DCSR> 18515d1cb45SPeiming Liu return %0 : tensor<4x4xf64, #DCSR> 18615d1cb45SPeiming Liu } 18715d1cb45SPeiming Liu 18815d1cb45SPeiming Liu // Same as @add_tensor_1, but use sparse_tensor.yield instead of identity to yield value. 18915d1cb45SPeiming Liu func.func @add_tensor_2(%A: tensor<4x4xf64, #DCSR>, 19015d1cb45SPeiming Liu %B: tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> { 19115d1cb45SPeiming Liu %C = bufferization.alloc_tensor() : tensor<4x4xf64, #DCSR> 19215d1cb45SPeiming Liu %0 = linalg.generic #trait_mat_op 19315d1cb45SPeiming Liu ins(%A, %B: tensor<4x4xf64, #DCSR>, 19415d1cb45SPeiming Liu tensor<4x4xf64, #DCSR>) 19515d1cb45SPeiming Liu outs(%C: tensor<4x4xf64, #DCSR>) { 19615d1cb45SPeiming Liu ^bb0(%a: f64, %b: f64, %c: f64) : 19715d1cb45SPeiming Liu %result = sparse_tensor.binary %a, %b : f64, f64 to f64 19815d1cb45SPeiming Liu overlap={ 19915d1cb45SPeiming Liu ^bb0(%x: f64, %y: f64): 20015d1cb45SPeiming Liu %ret = arith.addf %x, %y : f64 20115d1cb45SPeiming Liu sparse_tensor.yield %ret : f64 20215d1cb45SPeiming Liu } 20315d1cb45SPeiming Liu left={ 20415d1cb45SPeiming Liu ^bb0(%x: f64): 20515d1cb45SPeiming Liu sparse_tensor.yield %x : f64 20615d1cb45SPeiming Liu } 20715d1cb45SPeiming Liu right={ 20815d1cb45SPeiming Liu ^bb0(%y: f64): 20915d1cb45SPeiming Liu sparse_tensor.yield %y : f64 21015d1cb45SPeiming Liu } 21115d1cb45SPeiming Liu linalg.yield %result : f64 21215d1cb45SPeiming Liu } -> tensor<4x4xf64, #DCSR> 21315d1cb45SPeiming Liu return %0 : tensor<4x4xf64, #DCSR> 21415d1cb45SPeiming Liu } 21515d1cb45SPeiming Liu 21615d1cb45SPeiming Liu // Performs triangular add/sub operation (using semi-ring binary op). 21715d1cb45SPeiming Liu func.func @triangular(%A: tensor<4x4xf64, #DCSR>, 21815d1cb45SPeiming Liu %B: tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> { 21915d1cb45SPeiming Liu %C = bufferization.alloc_tensor() : tensor<4x4xf64, #DCSR> 22015d1cb45SPeiming Liu %0 = linalg.generic #trait_mat_op 22115d1cb45SPeiming Liu ins(%A, %B: tensor<4x4xf64, #DCSR>, 22215d1cb45SPeiming Liu tensor<4x4xf64, #DCSR>) 22315d1cb45SPeiming Liu outs(%C: tensor<4x4xf64, #DCSR>) { 22415d1cb45SPeiming Liu ^bb0(%a: f64, %b: f64, %c: f64) : 22515d1cb45SPeiming Liu %row = linalg.index 0 : index 22615d1cb45SPeiming Liu %col = linalg.index 1 : index 22715d1cb45SPeiming Liu %result = sparse_tensor.binary %a, %b : f64, f64 to f64 22815d1cb45SPeiming Liu overlap={ 22915d1cb45SPeiming Liu ^bb0(%x: f64, %y: f64): 23015d1cb45SPeiming Liu %cmp = arith.cmpi "uge", %col, %row : index 23115d1cb45SPeiming Liu %upperTriangleResult = arith.addf %x, %y : f64 23215d1cb45SPeiming Liu %lowerTriangleResult = arith.subf %x, %y : f64 23315d1cb45SPeiming Liu %ret = arith.select %cmp, %upperTriangleResult, %lowerTriangleResult : f64 23415d1cb45SPeiming Liu sparse_tensor.yield %ret : f64 23515d1cb45SPeiming Liu } 23615d1cb45SPeiming Liu left=identity 23715d1cb45SPeiming Liu right={ 23815d1cb45SPeiming Liu ^bb0(%y: f64): 23915d1cb45SPeiming Liu %cmp = arith.cmpi "uge", %col, %row : index 24015d1cb45SPeiming Liu %lowerTriangleResult = arith.negf %y : f64 24115d1cb45SPeiming Liu %ret = arith.select %cmp, %y, %lowerTriangleResult : f64 24215d1cb45SPeiming Liu sparse_tensor.yield %ret : f64 24315d1cb45SPeiming Liu } 24415d1cb45SPeiming Liu linalg.yield %result : f64 24515d1cb45SPeiming Liu } -> tensor<4x4xf64, #DCSR> 24615d1cb45SPeiming Liu return %0 : tensor<4x4xf64, #DCSR> 24715d1cb45SPeiming Liu } 24815d1cb45SPeiming Liu 24915d1cb45SPeiming Liu // Perform sub operation (using semi-ring binary op) with a constant threshold. 25015d1cb45SPeiming Liu func.func @sub_with_thres(%A: tensor<4x4xf64, #DCSR>, 25115d1cb45SPeiming Liu %B: tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> { 25215d1cb45SPeiming Liu %C = bufferization.alloc_tensor() : tensor<4x4xf64, #DCSR> 25315d1cb45SPeiming Liu // Defines out-block constant bounds. 25415d1cb45SPeiming Liu %thres_out_up = arith.constant 2.0 : f64 25515d1cb45SPeiming Liu %thres_out_lo = arith.constant -2.0 : f64 25615d1cb45SPeiming Liu 25715d1cb45SPeiming Liu %0 = linalg.generic #trait_mat_op 25815d1cb45SPeiming Liu ins(%A, %B: tensor<4x4xf64, #DCSR>, 25915d1cb45SPeiming Liu tensor<4x4xf64, #DCSR>) 26015d1cb45SPeiming Liu outs(%C: tensor<4x4xf64, #DCSR>) { 26115d1cb45SPeiming Liu ^bb0(%a: f64, %b: f64, %c: f64) : 26215d1cb45SPeiming Liu %result = sparse_tensor.binary %a, %b : f64, f64 to f64 26315d1cb45SPeiming Liu overlap={ 26415d1cb45SPeiming Liu ^bb0(%x: f64, %y: f64): 26515d1cb45SPeiming Liu // Defines in-block constant bounds. 26615d1cb45SPeiming Liu %thres_up = arith.constant 1.0 : f64 26715d1cb45SPeiming Liu %thres_lo = arith.constant -1.0 : f64 26815d1cb45SPeiming Liu %result = arith.subf %x, %y : f64 26915d1cb45SPeiming Liu %cmp = arith.cmpf "oge", %result, %thres_up : f64 27015d1cb45SPeiming Liu %tmp = arith.select %cmp, %thres_up, %result : f64 27115d1cb45SPeiming Liu %cmp1 = arith.cmpf "ole", %tmp, %thres_lo : f64 27215d1cb45SPeiming Liu %ret = arith.select %cmp1, %thres_lo, %tmp : f64 27315d1cb45SPeiming Liu sparse_tensor.yield %ret : f64 27415d1cb45SPeiming Liu } 27515d1cb45SPeiming Liu left={ 27615d1cb45SPeiming Liu ^bb0(%x: f64): 27715d1cb45SPeiming Liu // Uses out-block constant bounds. 27815d1cb45SPeiming Liu %cmp = arith.cmpf "oge", %x, %thres_out_up : f64 27915d1cb45SPeiming Liu %tmp = arith.select %cmp, %thres_out_up, %x : f64 28015d1cb45SPeiming Liu %cmp1 = arith.cmpf "ole", %tmp, %thres_out_lo : f64 28115d1cb45SPeiming Liu %ret = arith.select %cmp1, %thres_out_lo, %tmp : f64 28215d1cb45SPeiming Liu sparse_tensor.yield %ret : f64 28315d1cb45SPeiming Liu } 28415d1cb45SPeiming Liu right={ 28515d1cb45SPeiming Liu ^bb0(%y: f64): 28615d1cb45SPeiming Liu %ny = arith.negf %y : f64 28715d1cb45SPeiming Liu %cmp = arith.cmpf "oge", %ny, %thres_out_up : f64 28815d1cb45SPeiming Liu %tmp = arith.select %cmp, %thres_out_up, %ny : f64 28915d1cb45SPeiming Liu %cmp1 = arith.cmpf "ole", %tmp, %thres_out_lo : f64 29015d1cb45SPeiming Liu %ret = arith.select %cmp1, %thres_out_lo, %tmp : f64 29115d1cb45SPeiming Liu sparse_tensor.yield %ret : f64 29215d1cb45SPeiming Liu } 29315d1cb45SPeiming Liu linalg.yield %result : f64 29415d1cb45SPeiming Liu } -> tensor<4x4xf64, #DCSR> 29515d1cb45SPeiming Liu return %0 : tensor<4x4xf64, #DCSR> 29615d1cb45SPeiming Liu } 29715d1cb45SPeiming Liu 29815d1cb45SPeiming Liu // Performs isEqual only on intersecting elements. 29915d1cb45SPeiming Liu func.func @intersect_equal(%A: tensor<4x4xf64, #DCSR>, 30015d1cb45SPeiming Liu %B: tensor<4x4xf64, #DCSR>) -> tensor<4x4xi8, #DCSR> { 30115d1cb45SPeiming Liu %C = bufferization.alloc_tensor() : tensor<4x4xi8, #DCSR> 30215d1cb45SPeiming Liu %0 = linalg.generic #trait_mat_op 30315d1cb45SPeiming Liu ins(%A, %B: tensor<4x4xf64, #DCSR>, 30415d1cb45SPeiming Liu tensor<4x4xf64, #DCSR>) 30515d1cb45SPeiming Liu outs(%C: tensor<4x4xi8, #DCSR>) { 30615d1cb45SPeiming Liu ^bb0(%a: f64, %b: f64, %c: i8) : 30715d1cb45SPeiming Liu %result = sparse_tensor.binary %a, %b : f64, f64 to i8 30815d1cb45SPeiming Liu overlap={ 30915d1cb45SPeiming Liu ^bb0(%x: f64, %y: f64): 31015d1cb45SPeiming Liu %cmp = arith.cmpf "oeq", %x, %y : f64 31115d1cb45SPeiming Liu %ret = arith.extui %cmp : i1 to i8 31215d1cb45SPeiming Liu sparse_tensor.yield %ret : i8 31315d1cb45SPeiming Liu } 31415d1cb45SPeiming Liu left={} 31515d1cb45SPeiming Liu right={} 31615d1cb45SPeiming Liu linalg.yield %result : i8 31715d1cb45SPeiming Liu } -> tensor<4x4xi8, #DCSR> 31815d1cb45SPeiming Liu return %0 : tensor<4x4xi8, #DCSR> 31915d1cb45SPeiming Liu } 32015d1cb45SPeiming Liu 32115d1cb45SPeiming Liu // Keeps values on left, negate value on right, ignore value when overlapping. 32215d1cb45SPeiming Liu func.func @only_left_right(%A: tensor<4x4xf64, #DCSR>, 32315d1cb45SPeiming Liu %B: tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> { 32415d1cb45SPeiming Liu %C = bufferization.alloc_tensor() : tensor<4x4xf64, #DCSR> 32515d1cb45SPeiming Liu %0 = linalg.generic #trait_mat_op 32615d1cb45SPeiming Liu ins(%A, %B: tensor<4x4xf64, #DCSR>, 32715d1cb45SPeiming Liu tensor<4x4xf64, #DCSR>) 32815d1cb45SPeiming Liu outs(%C: tensor<4x4xf64, #DCSR>) { 32915d1cb45SPeiming Liu ^bb0(%a: f64, %b: f64, %c: f64) : 33015d1cb45SPeiming Liu %result = sparse_tensor.binary %a, %b : f64, f64 to f64 33115d1cb45SPeiming Liu overlap={} 33215d1cb45SPeiming Liu left=identity 33315d1cb45SPeiming Liu right={ 33415d1cb45SPeiming Liu ^bb0(%y: f64): 33515d1cb45SPeiming Liu %ret = arith.negf %y : f64 33615d1cb45SPeiming Liu sparse_tensor.yield %ret : f64 33715d1cb45SPeiming Liu } 33815d1cb45SPeiming Liu linalg.yield %result : f64 33915d1cb45SPeiming Liu } -> tensor<4x4xf64, #DCSR> 34015d1cb45SPeiming Liu return %0 : tensor<4x4xf64, #DCSR> 34115d1cb45SPeiming Liu } 34215d1cb45SPeiming Liu 34315d1cb45SPeiming Liu // 34415d1cb45SPeiming Liu // Utility functions to dump the value of a tensor. 34515d1cb45SPeiming Liu // 34615d1cb45SPeiming Liu 347a8308020SRiver Riddle func.func @dump_vec(%arg0: tensor<?xf64, #SparseVector>) { 3482c332660SJim Kitchen // Dump the values array to verify only sparse contents are stored. 3492c332660SJim Kitchen %c0 = arith.constant 0 : index 3502c332660SJim Kitchen %d0 = arith.constant -1.0 : f64 3512c332660SJim Kitchen %0 = sparse_tensor.values %arg0 : tensor<?xf64, #SparseVector> to memref<?xf64> 3522c332660SJim Kitchen %1 = vector.transfer_read %0[%c0], %d0: memref<?xf64>, vector<16xf64> 3532c332660SJim Kitchen vector.print %1 : vector<16xf64> 3542c332660SJim Kitchen // Dump the dense vector to verify structure is correct. 3552c332660SJim Kitchen %dv = sparse_tensor.convert %arg0 : tensor<?xf64, #SparseVector> to tensor<?xf64> 356c66303c2SMatthias Springer %3 = vector.transfer_read %dv[%c0], %d0: tensor<?xf64>, vector<32xf64> 3572c332660SJim Kitchen vector.print %3 : vector<32xf64> 3582c332660SJim Kitchen return 3592c332660SJim Kitchen } 3602c332660SJim Kitchen 361a8308020SRiver Riddle func.func @dump_vec_i32(%arg0: tensor<?xi32, #SparseVector>) { 3622c332660SJim Kitchen // Dump the values array to verify only sparse contents are stored. 3632c332660SJim Kitchen %c0 = arith.constant 0 : index 3642c332660SJim Kitchen %d0 = arith.constant -1 : i32 3652c332660SJim Kitchen %0 = sparse_tensor.values %arg0 : tensor<?xi32, #SparseVector> to memref<?xi32> 3662c332660SJim Kitchen %1 = vector.transfer_read %0[%c0], %d0: memref<?xi32>, vector<24xi32> 3672c332660SJim Kitchen vector.print %1 : vector<24xi32> 3682c332660SJim Kitchen // Dump the dense vector to verify structure is correct. 3692c332660SJim Kitchen %dv = sparse_tensor.convert %arg0 : tensor<?xi32, #SparseVector> to tensor<?xi32> 370c66303c2SMatthias Springer %3 = vector.transfer_read %dv[%c0], %d0: tensor<?xi32>, vector<32xi32> 3712c332660SJim Kitchen vector.print %3 : vector<32xi32> 3722c332660SJim Kitchen return 3732c332660SJim Kitchen } 3742c332660SJim Kitchen 375a8308020SRiver Riddle func.func @dump_mat(%arg0: tensor<?x?xf64, #DCSR>) { 3762c332660SJim Kitchen %d0 = arith.constant 0.0 : f64 3772c332660SJim Kitchen %c0 = arith.constant 0 : index 3782c332660SJim Kitchen %dm = sparse_tensor.convert %arg0 : tensor<?x?xf64, #DCSR> to tensor<?x?xf64> 379c66303c2SMatthias Springer %1 = vector.transfer_read %dm[%c0, %c0], %d0: tensor<?x?xf64>, vector<4x8xf64> 3802c332660SJim Kitchen vector.print %1 : vector<4x8xf64> 3812c332660SJim Kitchen return 3822c332660SJim Kitchen } 3832c332660SJim Kitchen 38415d1cb45SPeiming Liu func.func @dump_mat_4x4(%A: tensor<4x4xf64, #DCSR>) { 38515d1cb45SPeiming Liu %c0 = arith.constant 0 : index 38615d1cb45SPeiming Liu %du = arith.constant -1.0 : f64 38715d1cb45SPeiming Liu 38815d1cb45SPeiming Liu %c = sparse_tensor.convert %A : tensor<4x4xf64, #DCSR> to tensor<4x4xf64> 389c66303c2SMatthias Springer %v = vector.transfer_read %c[%c0, %c0], %du: tensor<4x4xf64>, vector<4x4xf64> 39015d1cb45SPeiming Liu vector.print %v : vector<4x4xf64> 39115d1cb45SPeiming Liu 39215d1cb45SPeiming Liu %1 = sparse_tensor.values %A : tensor<4x4xf64, #DCSR> to memref<?xf64> 39315d1cb45SPeiming Liu %2 = vector.transfer_read %1[%c0], %du: memref<?xf64>, vector<16xf64> 39415d1cb45SPeiming Liu vector.print %2 : vector<16xf64> 39515d1cb45SPeiming Liu 39615d1cb45SPeiming Liu return 39715d1cb45SPeiming Liu } 39815d1cb45SPeiming Liu 39915d1cb45SPeiming Liu func.func @dump_mat_4x4_i8(%A: tensor<4x4xi8, #DCSR>) { 40015d1cb45SPeiming Liu %c0 = arith.constant 0 : index 40115d1cb45SPeiming Liu %du = arith.constant -1 : i8 40215d1cb45SPeiming Liu 40315d1cb45SPeiming Liu %c = sparse_tensor.convert %A : tensor<4x4xi8, #DCSR> to tensor<4x4xi8> 404c66303c2SMatthias Springer %v = vector.transfer_read %c[%c0, %c0], %du: tensor<4x4xi8>, vector<4x4xi8> 40515d1cb45SPeiming Liu vector.print %v : vector<4x4xi8> 40615d1cb45SPeiming Liu 40715d1cb45SPeiming Liu %1 = sparse_tensor.values %A : tensor<4x4xi8, #DCSR> to memref<?xi8> 40815d1cb45SPeiming Liu %2 = vector.transfer_read %1[%c0], %du: memref<?xi8>, vector<16xi8> 40915d1cb45SPeiming Liu vector.print %2 : vector<16xi8> 41015d1cb45SPeiming Liu 41115d1cb45SPeiming Liu return 41215d1cb45SPeiming Liu } 41315d1cb45SPeiming Liu 41415d1cb45SPeiming Liu // Driver method to call and verify kernels. 415a8308020SRiver Riddle func.func @entry() { 4162c332660SJim Kitchen %c0 = arith.constant 0 : index 4172c332660SJim Kitchen 4182c332660SJim Kitchen // Setup sparse vectors. 4192c332660SJim Kitchen %v1 = arith.constant sparse< 4202c332660SJim Kitchen [ [0], [3], [11], [17], [20], [21], [28], [29], [31] ], 4212c332660SJim Kitchen [ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0 ] 4222c332660SJim Kitchen > : tensor<32xf64> 4232c332660SJim Kitchen %v2 = arith.constant sparse< 4242c332660SJim Kitchen [ [1], [3], [4], [10], [16], [18], [21], [28], [29], [31] ], 4252c332660SJim Kitchen [11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0 ] 4262c332660SJim Kitchen > : tensor<32xf64> 4272c332660SJim Kitchen %v3 = arith.constant dense< 4282c332660SJim Kitchen [0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 4292c332660SJim Kitchen 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 0., 1.] 4302c332660SJim Kitchen > : tensor<32xf64> 4312c332660SJim Kitchen %sv1 = sparse_tensor.convert %v1 : tensor<32xf64> to tensor<?xf64, #SparseVector> 4322c332660SJim Kitchen %sv2 = sparse_tensor.convert %v2 : tensor<32xf64> to tensor<?xf64, #SparseVector> 4332c332660SJim Kitchen %dv3 = tensor.cast %v3 : tensor<32xf64> to tensor<?xf64> 4342c332660SJim Kitchen 4352c332660SJim Kitchen // Setup sparse matrices. 4362c332660SJim Kitchen %m1 = arith.constant sparse< 4372c332660SJim Kitchen [ [0,0], [0,1], [1,7], [2,2], [2,4], [2,7], [3,0], [3,2], [3,3] ], 4382c332660SJim Kitchen [ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0 ] 4392c332660SJim Kitchen > : tensor<4x8xf64> 4402c332660SJim Kitchen %m2 = arith.constant sparse< 4412c332660SJim Kitchen [ [0,0], [0,7], [1,0], [1,6], [2,1], [2,7] ], 4422c332660SJim Kitchen [6.0, 5.0, 4.0, 3.0, 2.0, 1.0 ] 4432c332660SJim Kitchen > : tensor<4x8xf64> 4442c332660SJim Kitchen %sm1 = sparse_tensor.convert %m1 : tensor<4x8xf64> to tensor<?x?xf64, #DCSR> 4452c332660SJim Kitchen %sm2 = sparse_tensor.convert %m2 : tensor<4x8xf64> to tensor<?x?xf64, #DCSR> 4462c332660SJim Kitchen 44715d1cb45SPeiming Liu %m3 = arith.constant dense< 44815d1cb45SPeiming Liu [ [ 1.0, 0.0, 3.0, 0.0], 44915d1cb45SPeiming Liu [ 0.0, 2.0, 0.0, 0.0], 45015d1cb45SPeiming Liu [ 0.0, 0.0, 0.0, 4.0], 45115d1cb45SPeiming Liu [ 3.0, 4.0, 0.0, 0.0] ]> : tensor<4x4xf64> 45215d1cb45SPeiming Liu %m4 = arith.constant dense< 45315d1cb45SPeiming Liu [ [ 1.0, 0.0, 1.0, 1.0], 45415d1cb45SPeiming Liu [ 0.0, 0.5, 0.0, 0.0], 45515d1cb45SPeiming Liu [ 1.0, 5.0, 2.0, 0.0], 45615d1cb45SPeiming Liu [ 2.0, 0.0, 0.0, 0.0] ]> : tensor<4x4xf64> 45715d1cb45SPeiming Liu 45815d1cb45SPeiming Liu %sm3 = sparse_tensor.convert %m3 : tensor<4x4xf64> to tensor<4x4xf64, #DCSR> 45915d1cb45SPeiming Liu %sm4 = sparse_tensor.convert %m4 : tensor<4x4xf64> to tensor<4x4xf64, #DCSR> 46015d1cb45SPeiming Liu 4612c332660SJim Kitchen // Call sparse vector kernels. 4622c332660SJim Kitchen %0 = call @vector_min(%sv1, %sv2) 4632c332660SJim Kitchen : (tensor<?xf64, #SparseVector>, 4642c332660SJim Kitchen tensor<?xf64, #SparseVector>) -> tensor<?xf64, #SparseVector> 4652c332660SJim Kitchen %1 = call @vector_mul(%sv1, %dv3) 4662c332660SJim Kitchen : (tensor<?xf64, #SparseVector>, 4672c332660SJim Kitchen tensor<?xf64>) -> tensor<?xf64, #SparseVector> 4682c332660SJim Kitchen %2 = call @vector_setdiff(%sv1, %sv2) 4692c332660SJim Kitchen : (tensor<?xf64, #SparseVector>, 4702c332660SJim Kitchen tensor<?xf64, #SparseVector>) -> tensor<?xf64, #SparseVector> 4712c332660SJim Kitchen %3 = call @vector_index(%sv1) 4722c332660SJim Kitchen : (tensor<?xf64, #SparseVector>) -> tensor<?xi32, #SparseVector> 4732c332660SJim Kitchen 4742c332660SJim Kitchen // Call sparse matrix kernels. 4752c332660SJim Kitchen %5 = call @matrix_intersect(%sm1, %sm2) 4762c332660SJim Kitchen : (tensor<?x?xf64, #DCSR>, tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR> 47715d1cb45SPeiming Liu %6 = call @add_tensor_1(%sm3, %sm4) 47815d1cb45SPeiming Liu : (tensor<4x4xf64, #DCSR>, tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> 47915d1cb45SPeiming Liu %7 = call @add_tensor_2(%sm3, %sm4) 48015d1cb45SPeiming Liu : (tensor<4x4xf64, #DCSR>, tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> 48115d1cb45SPeiming Liu %8 = call @triangular(%sm3, %sm4) 48215d1cb45SPeiming Liu : (tensor<4x4xf64, #DCSR>, tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> 48315d1cb45SPeiming Liu %9 = call @sub_with_thres(%sm3, %sm4) 48415d1cb45SPeiming Liu : (tensor<4x4xf64, #DCSR>, tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> 48515d1cb45SPeiming Liu %10 = call @intersect_equal(%sm3, %sm4) 48615d1cb45SPeiming Liu : (tensor<4x4xf64, #DCSR>, tensor<4x4xf64, #DCSR>) -> tensor<4x4xi8, #DCSR> 48715d1cb45SPeiming Liu %11 = call @only_left_right(%sm3, %sm4) 48815d1cb45SPeiming Liu : (tensor<4x4xf64, #DCSR>, tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> 4892c332660SJim Kitchen 4902c332660SJim Kitchen // 4912c332660SJim Kitchen // Verify the results. 4922c332660SJim Kitchen // 4932c332660SJim Kitchen // CHECK: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, -1 ) 4942c332660SJim Kitchen // CHECK-NEXT: ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 4, 0, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9 ) 4952c332660SJim Kitchen // CHECK-NEXT: ( 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, -1, -1, -1, -1, -1, -1 ) 4962c332660SJim Kitchen // CHECK-NEXT: ( 0, 11, 0, 12, 13, 0, 0, 0, 0, 0, 14, 0, 0, 0, 0, 0, 15, 0, 16, 0, 0, 17, 0, 0, 0, 0, 0, 0, 18, 19, 0, 20 ) 4972c332660SJim Kitchen // CHECK-NEXT: ( 1, 11, 2, 13, 14, 3, 15, 4, 16, 5, 6, 7, 8, 9, -1, -1 ) 4982c332660SJim Kitchen // CHECK-NEXT: ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9 ) 4992c332660SJim Kitchen // CHECK-NEXT: ( 0, 6, 3, 28, 0, 6, 56, 72, 9, -1, -1, -1, -1, -1, -1, -1 ) 5002c332660SJim Kitchen // CHECK-NEXT: ( 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 28, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 56, 72, 0, 9 ) 5012c332660SJim Kitchen // CHECK-NEXT: ( 1, 3, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 ) 5022c332660SJim Kitchen // CHECK-NEXT: ( 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 4, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) 5032c332660SJim Kitchen // CHECK-NEXT: ( 0, 3, 11, 17, 20, 21, 28, 29, 31, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 ) 5042c332660SJim Kitchen // CHECK-NEXT: ( 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 17, 0, 0, 20, 21, 0, 0, 0, 0, 0, 0, 28, 29, 0, 31 ) 5052c332660SJim Kitchen // CHECK-NEXT: ( ( 7, 0, 0, 0, 0, 0, 0, -5 ), ( -4, 0, 0, 0, 0, 0, -3, 0 ), ( 0, -2, 0, 0, 0, 0, 0, 7 ), ( 0, 0, 0, 0, 0, 0, 0, 0 ) ) 50615d1cb45SPeiming Liu // CHECK-NEXT: ( ( 2, 0, 4, 1 ), ( 0, 2.5, 0, 0 ), ( 1, 5, 2, 4 ), ( 5, 4, 0, 0 ) ) 50715d1cb45SPeiming Liu // CHECK-NEXT: ( 2, 4, 1, 2.5, 1, 5, 2, 4, 5, 4, -1, -1, -1, -1, -1, -1 ) 50815d1cb45SPeiming Liu // CHECK-NEXT: ( ( 2, 0, 4, 1 ), ( 0, 2.5, 0, 0 ), ( 1, 5, 2, 4 ), ( 5, 4, 0, 0 ) ) 50915d1cb45SPeiming Liu // CHECK-NEXT: ( 2, 4, 1, 2.5, 1, 5, 2, 4, 5, 4, -1, -1, -1, -1, -1, -1 ) 51015d1cb45SPeiming Liu // CHECK-NEXT: ( ( 2, 0, 4, 1 ), ( 0, 2.5, 0, 0 ), ( -1, -5, 2, 4 ), ( 1, 4, 0, 0 ) ) 51115d1cb45SPeiming Liu // CHECK-NEXT: ( 2, 4, 1, 2.5, -1, -5, 2, 4, 1, 4, -1, -1, -1, -1, -1, -1 ) 51215d1cb45SPeiming Liu // CHECK-NEXT: ( ( 0, 0, 1, -1 ), ( 0, 1, 0, 0 ), ( -1, -2, -2, 2 ), ( 1, 2, 0, 0 ) ) 51315d1cb45SPeiming Liu // CHECK-NEXT: ( 0, 1, -1, 1, -1, -2, -2, 2, 1, 2, -1, -1, -1, -1, -1, -1 ) 51415d1cb45SPeiming Liu // CHECK-NEXT: ( ( 1, 0, 0, 0 ), ( 0, 0, 0, 0 ), ( 0, 0, 0, 0 ), ( 0, 0, 0, 0 ) ) 51515d1cb45SPeiming Liu // CHECK-NEXT: ( 1, 0, 0, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 ) 51615d1cb45SPeiming Liu // CHECK-NEXT: ( ( 0, 0, 0, -1 ), ( 0, 0, 0, 0 ), ( -1, -5, -2, 4 ), ( 0, 4, 0, 0 ) ) 51715d1cb45SPeiming Liu // CHECK-NEXT: ( -1, -1, -5, -2, 4, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 ) 5182c332660SJim Kitchen // 5192c332660SJim Kitchen call @dump_vec(%sv1) : (tensor<?xf64, #SparseVector>) -> () 5202c332660SJim Kitchen call @dump_vec(%sv2) : (tensor<?xf64, #SparseVector>) -> () 5212c332660SJim Kitchen call @dump_vec(%0) : (tensor<?xf64, #SparseVector>) -> () 5222c332660SJim Kitchen call @dump_vec(%1) : (tensor<?xf64, #SparseVector>) -> () 5232c332660SJim Kitchen call @dump_vec(%2) : (tensor<?xf64, #SparseVector>) -> () 5242c332660SJim Kitchen call @dump_vec_i32(%3) : (tensor<?xi32, #SparseVector>) -> () 5252c332660SJim Kitchen call @dump_mat(%5) : (tensor<?x?xf64, #DCSR>) -> () 52615d1cb45SPeiming Liu call @dump_mat_4x4(%6) : (tensor<4x4xf64, #DCSR>) -> () 52715d1cb45SPeiming Liu call @dump_mat_4x4(%7) : (tensor<4x4xf64, #DCSR>) -> () 52815d1cb45SPeiming Liu call @dump_mat_4x4(%8) : (tensor<4x4xf64, #DCSR>) -> () 52915d1cb45SPeiming Liu call @dump_mat_4x4(%9) : (tensor<4x4xf64, #DCSR>) -> () 53015d1cb45SPeiming Liu call @dump_mat_4x4_i8(%10) : (tensor<4x4xi8, #DCSR>) -> () 53115d1cb45SPeiming Liu call @dump_mat_4x4(%11) : (tensor<4x4xf64, #DCSR>) -> () 5322c332660SJim Kitchen 5332c332660SJim Kitchen // Release the resources. 534*27a431f5SMatthias Springer bufferization.dealloc_tensor %sv1 : tensor<?xf64, #SparseVector> 535*27a431f5SMatthias Springer bufferization.dealloc_tensor %sv2 : tensor<?xf64, #SparseVector> 536*27a431f5SMatthias Springer bufferization.dealloc_tensor %sm1 : tensor<?x?xf64, #DCSR> 537*27a431f5SMatthias Springer bufferization.dealloc_tensor %sm2 : tensor<?x?xf64, #DCSR> 538*27a431f5SMatthias Springer bufferization.dealloc_tensor %sm3 : tensor<4x4xf64, #DCSR> 539*27a431f5SMatthias Springer bufferization.dealloc_tensor %sm4 : tensor<4x4xf64, #DCSR> 540*27a431f5SMatthias Springer bufferization.dealloc_tensor %0 : tensor<?xf64, #SparseVector> 541*27a431f5SMatthias Springer bufferization.dealloc_tensor %1 : tensor<?xf64, #SparseVector> 542*27a431f5SMatthias Springer bufferization.dealloc_tensor %2 : tensor<?xf64, #SparseVector> 543*27a431f5SMatthias Springer bufferization.dealloc_tensor %3 : tensor<?xi32, #SparseVector> 544*27a431f5SMatthias Springer bufferization.dealloc_tensor %5 : tensor<?x?xf64, #DCSR> 545*27a431f5SMatthias Springer bufferization.dealloc_tensor %6 : tensor<4x4xf64, #DCSR> 546*27a431f5SMatthias Springer bufferization.dealloc_tensor %7 : tensor<4x4xf64, #DCSR> 547*27a431f5SMatthias Springer bufferization.dealloc_tensor %8 : tensor<4x4xf64, #DCSR> 548*27a431f5SMatthias Springer bufferization.dealloc_tensor %9 : tensor<4x4xf64, #DCSR> 549*27a431f5SMatthias Springer bufferization.dealloc_tensor %10 : tensor<4x4xi8, #DCSR> 550*27a431f5SMatthias Springer bufferization.dealloc_tensor %11 : tensor<4x4xf64, #DCSR> 5512c332660SJim Kitchen return 5522c332660SJim Kitchen } 5532c332660SJim Kitchen} 554