1// RUN: mlir-opt %s --sparse-compiler | \ 2// RUN: mlir-cpu-runner -e entry -entry-point-result=void \ 3// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ 4// RUN: FileCheck %s 5 6#DCSR = #sparse_tensor.encoding<{ 7 dimLevelType = [ "compressed", "compressed" ] 8}> 9 10#DCSC = #sparse_tensor.encoding<{ 11 dimLevelType = [ "compressed", "compressed" ], 12 dimOrdering = affine_map<(i,j) -> (j,i)> 13}> 14 15#transpose_trait = { 16 indexing_maps = [ 17 affine_map<(i,j) -> (j,i)>, // A 18 affine_map<(i,j) -> (i,j)> // X 19 ], 20 iterator_types = ["parallel", "parallel"], 21 doc = "X(i,j) = A(j,i)" 22} 23 24module { 25 26 // 27 // Transposing a sparse row-wise matrix into another sparse row-wise 28 // matrix introduces a cycle in the iteration graph. This complication 29 // can be avoided by manually inserting a conversion of the incoming 30 // matrix into a sparse column-wise matrix first. 31 // 32 func.func @sparse_transpose(%arga: tensor<3x4xf64, #DCSR>) 33 -> tensor<4x3xf64, #DCSR> { 34 %t = sparse_tensor.convert %arga 35 : tensor<3x4xf64, #DCSR> to tensor<3x4xf64, #DCSC> 36 37 %i = bufferization.alloc_tensor() : tensor<4x3xf64, #DCSR> 38 %0 = linalg.generic #transpose_trait 39 ins(%t: tensor<3x4xf64, #DCSC>) 40 outs(%i: tensor<4x3xf64, #DCSR>) { 41 ^bb(%a: f64, %x: f64): 42 linalg.yield %a : f64 43 } -> tensor<4x3xf64, #DCSR> 44 45 bufferization.dealloc_tensor %t : tensor<3x4xf64, #DCSC> 46 47 return %0 : tensor<4x3xf64, #DCSR> 48 } 49 50 // 51 // However, even better, the sparse compiler is able to insert such a 52 // conversion automatically to resolve a cycle in the iteration graph! 53 // 54 func.func @sparse_transpose_auto(%arga: tensor<3x4xf64, #DCSR>) 55 -> tensor<4x3xf64, #DCSR> { 56 %i = bufferization.alloc_tensor() : tensor<4x3xf64, #DCSR> 57 %0 = linalg.generic #transpose_trait 58 ins(%arga: tensor<3x4xf64, #DCSR>) 59 outs(%i: tensor<4x3xf64, #DCSR>) { 60 ^bb(%a: f64, %x: f64): 61 linalg.yield %a : f64 62 } -> tensor<4x3xf64, #DCSR> 63 return %0 : tensor<4x3xf64, #DCSR> 64 } 65 66 // 67 // Main driver. 68 // 69 func.func @entry() { 70 %c0 = arith.constant 0 : index 71 %c1 = arith.constant 1 : index 72 %c4 = arith.constant 4 : index 73 %du = arith.constant 0.0 : f64 74 75 // Setup input sparse matrix from compressed constant. 76 %d = arith.constant dense <[ 77 [ 1.1, 1.2, 0.0, 1.4 ], 78 [ 0.0, 0.0, 0.0, 0.0 ], 79 [ 3.1, 0.0, 3.3, 3.4 ] 80 ]> : tensor<3x4xf64> 81 %a = sparse_tensor.convert %d : tensor<3x4xf64> to tensor<3x4xf64, #DCSR> 82 83 // Call the kernels. 84 %0 = call @sparse_transpose(%a) 85 : (tensor<3x4xf64, #DCSR>) -> tensor<4x3xf64, #DCSR> 86 %1 = call @sparse_transpose_auto(%a) 87 : (tensor<3x4xf64, #DCSR>) -> tensor<4x3xf64, #DCSR> 88 89 // 90 // Verify result. 91 // 92 // CHECK: ( 1.1, 0, 3.1 ) 93 // CHECK-NEXT: ( 1.2, 0, 0 ) 94 // CHECK-NEXT: ( 0, 0, 3.3 ) 95 // CHECK-NEXT: ( 1.4, 0, 3.4 ) 96 // 97 // CHECK-NEXT: ( 1.1, 0, 3.1 ) 98 // CHECK-NEXT: ( 1.2, 0, 0 ) 99 // CHECK-NEXT: ( 0, 0, 3.3 ) 100 // CHECK-NEXT: ( 1.4, 0, 3.4 ) 101 // 102 %x = sparse_tensor.convert %0 : tensor<4x3xf64, #DCSR> to tensor<4x3xf64> 103 scf.for %i = %c0 to %c4 step %c1 { 104 %v1 = vector.transfer_read %x[%i, %c0], %du: tensor<4x3xf64>, vector<3xf64> 105 vector.print %v1 : vector<3xf64> 106 } 107 %y = sparse_tensor.convert %1 : tensor<4x3xf64, #DCSR> to tensor<4x3xf64> 108 scf.for %i = %c0 to %c4 step %c1 { 109 %v2 = vector.transfer_read %y[%i, %c0], %du: tensor<4x3xf64>, vector<3xf64> 110 vector.print %v2 : vector<3xf64> 111 } 112 113 // Release resources. 114 bufferization.dealloc_tensor %a : tensor<3x4xf64, #DCSR> 115 bufferization.dealloc_tensor %0 : tensor<4x3xf64, #DCSR> 116 bufferization.dealloc_tensor %1 : tensor<4x3xf64, #DCSR> 117 118 return 119 } 120} 121