1// RUN: mlir-opt %s \ 2// RUN: -sparsification -sparse-tensor-conversion \ 3// RUN: -linalg-bufferize -convert-linalg-to-loops \ 4// RUN: -convert-vector-to-scf -convert-scf-to-std \ 5// RUN: -func-bufferize -tensor-constant-bufferize -tensor-bufferize \ 6// RUN: -std-bufferize -finalizing-bufferize \ 7// RUN: -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm \ 8// RUN: -reconcile-unrealized-casts \ 9// RUN: | \ 10// RUN: mlir-cpu-runner \ 11// RUN: -e entry -entry-point-result=void \ 12// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext \ 13// RUN: | \ 14// RUN: FileCheck %s 15 16#Tensor1 = #sparse_tensor.encoding<{ 17 dimLevelType = [ "compressed", "compressed", "compressed" ], 18 dimOrdering = affine_map<(i,j,k) -> (i,j,k)> 19}> 20 21#Tensor2 = #sparse_tensor.encoding<{ 22 dimLevelType = [ "compressed", "compressed", "compressed" ], 23 dimOrdering = affine_map<(i,j,k) -> (j,k,i)> 24}> 25 26#Tensor3 = #sparse_tensor.encoding<{ 27 dimLevelType = [ "compressed", "compressed", "compressed" ], 28 dimOrdering = affine_map<(i,j,k) -> (k,i,j)> 29}> 30 31#Tensor4 = #sparse_tensor.encoding<{ 32 dimLevelType = [ "dense", "compressed", "compressed" ], 33 dimOrdering = affine_map<(i,j,k) -> (i,j,k)> 34}> 35 36#Tensor5 = #sparse_tensor.encoding<{ 37 dimLevelType = [ "dense", "compressed", "compressed" ], 38 dimOrdering = affine_map<(i,j,k) -> (j,k,i)> 39}> 40 41#Tensor6 = #sparse_tensor.encoding<{ 42 dimLevelType = [ "dense", "compressed", "compressed" ], 43 dimOrdering = affine_map<(i,j,k) -> (k,i,j)> 44}> 45 46// 47// Integration test that tests conversions from sparse to dense tensors. 48// 49module { 50 // 51 // Utilities for output and releasing memory. 52 // 53 func @dump(%arg0: tensor<2x3x4xf64>) { 54 %c0 = arith.constant 0 : index 55 %d0 = arith.constant -1.0 : f64 56 %0 = vector.transfer_read %arg0[%c0, %c0, %c0], %d0: tensor<2x3x4xf64>, vector<2x3x4xf64> 57 vector.print %0 : vector<2x3x4xf64> 58 return 59 } 60 func @dumpAndRelease_234(%arg0: tensor<2x3x4xf64>) { 61 call @dump(%arg0) : (tensor<2x3x4xf64>) -> () 62 %1 = bufferization.to_memref %arg0 : memref<2x3x4xf64> 63 memref.dealloc %1 : memref<2x3x4xf64> 64 return 65 } 66 func @dumpAndRelease_p34(%arg0: tensor<?x3x4xf64>) { 67 %0 = tensor.cast %arg0 : tensor<?x3x4xf64> to tensor<2x3x4xf64> 68 call @dump(%0) : (tensor<2x3x4xf64>) -> () 69 %1 = bufferization.to_memref %arg0 : memref<?x3x4xf64> 70 memref.dealloc %1 : memref<?x3x4xf64> 71 return 72 } 73 func @dumpAndRelease_2p4(%arg0: tensor<2x?x4xf64>) { 74 %0 = tensor.cast %arg0 : tensor<2x?x4xf64> to tensor<2x3x4xf64> 75 call @dump(%0) : (tensor<2x3x4xf64>) -> () 76 %1 = bufferization.to_memref %arg0 : memref<2x?x4xf64> 77 memref.dealloc %1 : memref<2x?x4xf64> 78 return 79 } 80 func @dumpAndRelease_23p(%arg0: tensor<2x3x?xf64>) { 81 %0 = tensor.cast %arg0 : tensor<2x3x?xf64> to tensor<2x3x4xf64> 82 call @dump(%0) : (tensor<2x3x4xf64>) -> () 83 %1 = bufferization.to_memref %arg0 : memref<2x3x?xf64> 84 memref.dealloc %1 : memref<2x3x?xf64> 85 return 86 } 87 func @dumpAndRelease_2pp(%arg0: tensor<2x?x?xf64>) { 88 %0 = tensor.cast %arg0 : tensor<2x?x?xf64> to tensor<2x3x4xf64> 89 call @dump(%0) : (tensor<2x3x4xf64>) -> () 90 %1 = bufferization.to_memref %arg0 : memref<2x?x?xf64> 91 memref.dealloc %1 : memref<2x?x?xf64> 92 return 93 } 94 func @dumpAndRelease_p3p(%arg0: tensor<?x3x?xf64>) { 95 %0 = tensor.cast %arg0 : tensor<?x3x?xf64> to tensor<2x3x4xf64> 96 call @dump(%0) : (tensor<2x3x4xf64>) -> () 97 %1 = bufferization.to_memref %arg0 : memref<?x3x?xf64> 98 memref.dealloc %1 : memref<?x3x?xf64> 99 return 100 } 101 func @dumpAndRelease_pp4(%arg0: tensor<?x?x4xf64>) { 102 %0 = tensor.cast %arg0 : tensor<?x?x4xf64> to tensor<2x3x4xf64> 103 call @dump(%0) : (tensor<2x3x4xf64>) -> () 104 %1 = bufferization.to_memref %arg0 : memref<?x?x4xf64> 105 memref.dealloc %1 : memref<?x?x4xf64> 106 return 107 } 108 func @dumpAndRelease_ppp(%arg0: tensor<?x?x?xf64>) { 109 %0 = tensor.cast %arg0 : tensor<?x?x?xf64> to tensor<2x3x4xf64> 110 call @dump(%0) : (tensor<2x3x4xf64>) -> () 111 %1 = bufferization.to_memref %arg0 : memref<?x?x?xf64> 112 memref.dealloc %1 : memref<?x?x?xf64> 113 return 114 } 115 116 // 117 // Main driver. 118 // 119 func @entry() { 120 // 121 // Initialize a 3-dim dense tensor. 122 // 123 %src = arith.constant dense<[ 124 [ [ 1.0, 2.0, 3.0, 4.0 ], 125 [ 5.0, 6.0, 7.0, 8.0 ], 126 [ 9.0, 10.0, 11.0, 12.0 ] ], 127 [ [ 13.0, 14.0, 15.0, 16.0 ], 128 [ 17.0, 18.0, 19.0, 20.0 ], 129 [ 21.0, 22.0, 23.0, 24.0 ] ] 130 ]> : tensor<2x3x4xf64> 131 132 // 133 // Convert dense tensor directly to various sparse tensors. 134 // 135 %s2341 = sparse_tensor.convert %src : tensor<2x3x4xf64> to tensor<2x3x4xf64, #Tensor1> 136 %s2342 = sparse_tensor.convert %src : tensor<2x3x4xf64> to tensor<2x3x4xf64, #Tensor2> 137 %s2343 = sparse_tensor.convert %src : tensor<2x3x4xf64> to tensor<2x3x4xf64, #Tensor3> 138 %s2344 = sparse_tensor.convert %src : tensor<2x3x4xf64> to tensor<2x3x4xf64, #Tensor4> 139 %s2345 = sparse_tensor.convert %src : tensor<2x3x4xf64> to tensor<2x3x4xf64, #Tensor5> 140 %s2346 = sparse_tensor.convert %src : tensor<2x3x4xf64> to tensor<2x3x4xf64, #Tensor6> 141 142 %sp344 = sparse_tensor.convert %src : tensor<2x3x4xf64> to tensor<?x3x4xf64, #Tensor4> 143 %sp345 = sparse_tensor.convert %src : tensor<2x3x4xf64> to tensor<?x3x4xf64, #Tensor5> 144 %sp346 = sparse_tensor.convert %src : tensor<2x3x4xf64> to tensor<?x3x4xf64, #Tensor6> 145 %s2p44 = sparse_tensor.convert %src : tensor<2x3x4xf64> to tensor<2x?x4xf64, #Tensor4> 146 %s2p45 = sparse_tensor.convert %src : tensor<2x3x4xf64> to tensor<2x?x4xf64, #Tensor5> 147 %s2p46 = sparse_tensor.convert %src : tensor<2x3x4xf64> to tensor<2x?x4xf64, #Tensor6> 148 %s23p4 = sparse_tensor.convert %src : tensor<2x3x4xf64> to tensor<2x3x?xf64, #Tensor4> 149 %s23p5 = sparse_tensor.convert %src : tensor<2x3x4xf64> to tensor<2x3x?xf64, #Tensor5> 150 %s23p6 = sparse_tensor.convert %src : tensor<2x3x4xf64> to tensor<2x3x?xf64, #Tensor6> 151 %s2pp4 = sparse_tensor.convert %src : tensor<2x3x4xf64> to tensor<2x?x?xf64, #Tensor4> 152 %s2pp5 = sparse_tensor.convert %src : tensor<2x3x4xf64> to tensor<2x?x?xf64, #Tensor5> 153 %s2pp6 = sparse_tensor.convert %src : tensor<2x3x4xf64> to tensor<2x?x?xf64, #Tensor6> 154 155 // 156 // Convert sparse tensor back to dense. 157 // 158 %d2341 = sparse_tensor.convert %s2341 : tensor<2x3x4xf64, #Tensor1> to tensor<2x3x4xf64> 159 %d2342 = sparse_tensor.convert %s2342 : tensor<2x3x4xf64, #Tensor2> to tensor<2x3x4xf64> 160 %d2343 = sparse_tensor.convert %s2343 : tensor<2x3x4xf64, #Tensor3> to tensor<2x3x4xf64> 161 %d2344 = sparse_tensor.convert %s2344 : tensor<2x3x4xf64, #Tensor4> to tensor<2x3x4xf64> 162 %d2345 = sparse_tensor.convert %s2345 : tensor<2x3x4xf64, #Tensor5> to tensor<2x3x4xf64> 163 %d2346 = sparse_tensor.convert %s2346 : tensor<2x3x4xf64, #Tensor6> to tensor<2x3x4xf64> 164 165 %dp344 = sparse_tensor.convert %sp344 : tensor<?x3x4xf64, #Tensor4> to tensor<?x3x4xf64> 166 %dp345 = sparse_tensor.convert %sp345 : tensor<?x3x4xf64, #Tensor5> to tensor<?x3x4xf64> 167 %dp346 = sparse_tensor.convert %sp346 : tensor<?x3x4xf64, #Tensor6> to tensor<?x3x4xf64> 168 %d2p44 = sparse_tensor.convert %s2p44 : tensor<2x?x4xf64, #Tensor4> to tensor<2x?x4xf64> 169 %d2p45 = sparse_tensor.convert %s2p45 : tensor<2x?x4xf64, #Tensor5> to tensor<2x?x4xf64> 170 %d2p46 = sparse_tensor.convert %s2p46 : tensor<2x?x4xf64, #Tensor6> to tensor<2x?x4xf64> 171 %d23p4 = sparse_tensor.convert %s23p4 : tensor<2x3x?xf64, #Tensor4> to tensor<2x3x?xf64> 172 %d23p5 = sparse_tensor.convert %s23p5 : tensor<2x3x?xf64, #Tensor5> to tensor<2x3x?xf64> 173 %d23p6 = sparse_tensor.convert %s23p6 : tensor<2x3x?xf64, #Tensor6> to tensor<2x3x?xf64> 174 %d2pp4 = sparse_tensor.convert %s2pp4 : tensor<2x?x?xf64, #Tensor4> to tensor<2x?x?xf64> 175 %d2pp5 = sparse_tensor.convert %s2pp5 : tensor<2x?x?xf64, #Tensor5> to tensor<2x?x?xf64> 176 %d2pp6 = sparse_tensor.convert %s2pp6 : tensor<2x?x?xf64, #Tensor6> to tensor<2x?x?xf64> 177 178 %dp3p4 = sparse_tensor.convert %sp344 : tensor<?x3x4xf64, #Tensor4> to tensor<?x3x?xf64> 179 %dp3p5 = sparse_tensor.convert %sp345 : tensor<?x3x4xf64, #Tensor5> to tensor<?x3x?xf64> 180 %dp3p6 = sparse_tensor.convert %sp346 : tensor<?x3x4xf64, #Tensor6> to tensor<?x3x?xf64> 181 %dpp44 = sparse_tensor.convert %s2p44 : tensor<2x?x4xf64, #Tensor4> to tensor<?x?x4xf64> 182 %dpp45 = sparse_tensor.convert %s2p45 : tensor<2x?x4xf64, #Tensor5> to tensor<?x?x4xf64> 183 %dpp46 = sparse_tensor.convert %s2p46 : tensor<2x?x4xf64, #Tensor6> to tensor<?x?x4xf64> 184 %dppp4 = sparse_tensor.convert %s2pp4 : tensor<2x?x?xf64, #Tensor4> to tensor<?x?x?xf64> 185 %dppp5 = sparse_tensor.convert %s2pp5 : tensor<2x?x?xf64, #Tensor5> to tensor<?x?x?xf64> 186 %dppp6 = sparse_tensor.convert %s2pp6 : tensor<2x?x?xf64, #Tensor6> to tensor<?x?x?xf64> 187 188 // 189 // Check round-trip equality. And release dense tensors. 190 // 191 // CHECK-COUNT-28: ( ( ( 1, 2, 3, 4 ), ( 5, 6, 7, 8 ), ( 9, 10, 11, 12 ) ), ( ( 13, 14, 15, 16 ), ( 17, 18, 19, 20 ), ( 21, 22, 23, 24 ) ) ) 192 call @dump(%src) : (tensor<2x3x4xf64>) -> () 193 call @dumpAndRelease_234(%d2341) : (tensor<2x3x4xf64>) -> () 194 call @dumpAndRelease_234(%d2342) : (tensor<2x3x4xf64>) -> () 195 call @dumpAndRelease_234(%d2343) : (tensor<2x3x4xf64>) -> () 196 call @dumpAndRelease_234(%d2344) : (tensor<2x3x4xf64>) -> () 197 call @dumpAndRelease_234(%d2345) : (tensor<2x3x4xf64>) -> () 198 call @dumpAndRelease_234(%d2346) : (tensor<2x3x4xf64>) -> () 199 call @dumpAndRelease_p34(%dp344) : (tensor<?x3x4xf64>) -> () 200 call @dumpAndRelease_p34(%dp345) : (tensor<?x3x4xf64>) -> () 201 call @dumpAndRelease_p34(%dp346) : (tensor<?x3x4xf64>) -> () 202 call @dumpAndRelease_2p4(%d2p44) : (tensor<2x?x4xf64>) -> () 203 call @dumpAndRelease_2p4(%d2p45) : (tensor<2x?x4xf64>) -> () 204 call @dumpAndRelease_2p4(%d2p46) : (tensor<2x?x4xf64>) -> () 205 call @dumpAndRelease_23p(%d23p4) : (tensor<2x3x?xf64>) -> () 206 call @dumpAndRelease_23p(%d23p5) : (tensor<2x3x?xf64>) -> () 207 call @dumpAndRelease_23p(%d23p6) : (tensor<2x3x?xf64>) -> () 208 call @dumpAndRelease_2pp(%d2pp4) : (tensor<2x?x?xf64>) -> () 209 call @dumpAndRelease_2pp(%d2pp5) : (tensor<2x?x?xf64>) -> () 210 call @dumpAndRelease_2pp(%d2pp6) : (tensor<2x?x?xf64>) -> () 211 call @dumpAndRelease_p3p(%dp3p4) : (tensor<?x3x?xf64>) -> () 212 call @dumpAndRelease_p3p(%dp3p5) : (tensor<?x3x?xf64>) -> () 213 call @dumpAndRelease_p3p(%dp3p6) : (tensor<?x3x?xf64>) -> () 214 call @dumpAndRelease_pp4(%dpp44) : (tensor<?x?x4xf64>) -> () 215 call @dumpAndRelease_pp4(%dpp45) : (tensor<?x?x4xf64>) -> () 216 call @dumpAndRelease_pp4(%dpp46) : (tensor<?x?x4xf64>) -> () 217 call @dumpAndRelease_ppp(%dppp4) : (tensor<?x?x?xf64>) -> () 218 call @dumpAndRelease_ppp(%dppp5) : (tensor<?x?x?xf64>) -> () 219 call @dumpAndRelease_ppp(%dppp6) : (tensor<?x?x?xf64>) -> () 220 221 // 222 // Release sparse tensors. 223 // 224 sparse_tensor.release %s2341 : tensor<2x3x4xf64, #Tensor1> 225 sparse_tensor.release %s2342 : tensor<2x3x4xf64, #Tensor2> 226 sparse_tensor.release %s2343 : tensor<2x3x4xf64, #Tensor3> 227 sparse_tensor.release %s2344 : tensor<2x3x4xf64, #Tensor4> 228 sparse_tensor.release %s2345 : tensor<2x3x4xf64, #Tensor5> 229 sparse_tensor.release %s2346 : tensor<2x3x4xf64, #Tensor6> 230 sparse_tensor.release %sp344 : tensor<?x3x4xf64, #Tensor4> 231 sparse_tensor.release %sp345 : tensor<?x3x4xf64, #Tensor5> 232 sparse_tensor.release %sp346 : tensor<?x3x4xf64, #Tensor6> 233 sparse_tensor.release %s2p44 : tensor<2x?x4xf64, #Tensor4> 234 sparse_tensor.release %s2p45 : tensor<2x?x4xf64, #Tensor5> 235 sparse_tensor.release %s2p46 : tensor<2x?x4xf64, #Tensor6> 236 sparse_tensor.release %s23p4 : tensor<2x3x?xf64, #Tensor4> 237 sparse_tensor.release %s23p5 : tensor<2x3x?xf64, #Tensor5> 238 sparse_tensor.release %s23p6 : tensor<2x3x?xf64, #Tensor6> 239 sparse_tensor.release %s2pp4 : tensor<2x?x?xf64, #Tensor4> 240 sparse_tensor.release %s2pp5 : tensor<2x?x?xf64, #Tensor5> 241 sparse_tensor.release %s2pp6 : tensor<2x?x?xf64, #Tensor6> 242 243 return 244 } 245} 246