1// Force this file to use the kDirect method for sparse2sparse. 2// RUN: mlir-opt %s --sparse-compiler="s2s-strategy=2" | \ 3// RUN: mlir-cpu-runner -e entry -entry-point-result=void \ 4// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ 5// RUN: FileCheck %s 6 7#Tensor1 = #sparse_tensor.encoding<{ 8 dimLevelType = [ "dense", "dense", "compressed" ] 9}> 10 11// NOTE: dense after compressed is not currently supported for the target 12// of direct-sparse2sparse conversion. (It's fine for the source though.) 13#Tensor2 = #sparse_tensor.encoding<{ 14 dimLevelType = [ "dense", "compressed", "dense" ] 15}> 16 17#Tensor3 = #sparse_tensor.encoding<{ 18 dimLevelType = [ "dense", "dense", "compressed" ], 19 dimOrdering = affine_map<(i,j,k) -> (i,k,j)> 20}> 21 22module { 23 // 24 // Utilities for output and releasing memory. 25 // 26 func.func @dump(%arg0: tensor<2x3x4xf64>) { 27 %c0 = arith.constant 0 : index 28 %d0 = arith.constant -1.0 : f64 29 %0 = vector.transfer_read %arg0[%c0, %c0, %c0], %d0: tensor<2x3x4xf64>, vector<2x3x4xf64> 30 vector.print %0 : vector<2x3x4xf64> 31 return 32 } 33 func.func @dumpAndRelease_234(%arg0: tensor<2x3x4xf64>) { 34 call @dump(%arg0) : (tensor<2x3x4xf64>) -> () 35 return 36 } 37 38 // 39 // Main driver. 40 // 41 func.func @entry() { 42 // 43 // Initialize a 3-dim dense tensor. 44 // 45 %src = arith.constant dense<[ 46 [ [ 1.0, 2.0, 3.0, 4.0 ], 47 [ 5.0, 6.0, 7.0, 8.0 ], 48 [ 9.0, 10.0, 11.0, 12.0 ] ], 49 [ [ 13.0, 14.0, 15.0, 16.0 ], 50 [ 17.0, 18.0, 19.0, 20.0 ], 51 [ 21.0, 22.0, 23.0, 24.0 ] ] 52 ]> : tensor<2x3x4xf64> 53 54 // 55 // Convert dense tensor directly to various sparse tensors. 56 // 57 %s1 = sparse_tensor.convert %src : tensor<2x3x4xf64> to tensor<2x3x4xf64, #Tensor1> 58 %s2 = sparse_tensor.convert %src : tensor<2x3x4xf64> to tensor<2x3x4xf64, #Tensor2> 59 %s3 = sparse_tensor.convert %src : tensor<2x3x4xf64> to tensor<2x3x4xf64, #Tensor3> 60 61 // 62 // Convert sparse tensor directly to another sparse format. 63 // 64 %t13 = sparse_tensor.convert %s1 : tensor<2x3x4xf64, #Tensor1> to tensor<2x3x4xf64, #Tensor3> 65 %t21 = sparse_tensor.convert %s2 : tensor<2x3x4xf64, #Tensor2> to tensor<2x3x4xf64, #Tensor1> 66 %t23 = sparse_tensor.convert %s2 : tensor<2x3x4xf64, #Tensor2> to tensor<2x3x4xf64, #Tensor3> 67 %t31 = sparse_tensor.convert %s3 : tensor<2x3x4xf64, #Tensor3> to tensor<2x3x4xf64, #Tensor1> 68 69 // 70 // Convert sparse tensor back to dense. 71 // 72 %d13 = sparse_tensor.convert %t13 : tensor<2x3x4xf64, #Tensor3> to tensor<2x3x4xf64> 73 %d21 = sparse_tensor.convert %t21 : tensor<2x3x4xf64, #Tensor1> to tensor<2x3x4xf64> 74 %d23 = sparse_tensor.convert %t23 : tensor<2x3x4xf64, #Tensor3> to tensor<2x3x4xf64> 75 %d31 = sparse_tensor.convert %t31 : tensor<2x3x4xf64, #Tensor1> to tensor<2x3x4xf64> 76 77 // 78 // Check round-trip equality. And release dense tensors. 79 // 80 // CHECK-COUNT-5: ( ( ( 1, 2, 3, 4 ), ( 5, 6, 7, 8 ), ( 9, 10, 11, 12 ) ), ( ( 13, 14, 15, 16 ), ( 17, 18, 19, 20 ), ( 21, 22, 23, 24 ) ) ) 81 call @dump(%src) : (tensor<2x3x4xf64>) -> () 82 call @dumpAndRelease_234(%d13) : (tensor<2x3x4xf64>) -> () 83 call @dumpAndRelease_234(%d21) : (tensor<2x3x4xf64>) -> () 84 call @dumpAndRelease_234(%d23) : (tensor<2x3x4xf64>) -> () 85 call @dumpAndRelease_234(%d31) : (tensor<2x3x4xf64>) -> () 86 87 // 88 // Release sparse tensors. 89 // 90 bufferization.dealloc_tensor %t13 : tensor<2x3x4xf64, #Tensor3> 91 bufferization.dealloc_tensor %t21 : tensor<2x3x4xf64, #Tensor1> 92 bufferization.dealloc_tensor %t23 : tensor<2x3x4xf64, #Tensor3> 93 bufferization.dealloc_tensor %t31 : tensor<2x3x4xf64, #Tensor1> 94 bufferization.dealloc_tensor %s1 : tensor<2x3x4xf64, #Tensor1> 95 bufferization.dealloc_tensor %s2 : tensor<2x3x4xf64, #Tensor2> 96 bufferization.dealloc_tensor %s3 : tensor<2x3x4xf64, #Tensor3> 97 98 return 99 } 100} 101