1// RUN: mlir-opt %s \ 2// RUN: --sparsification --sparse-tensor-conversion \ 3// RUN: --convert-vector-to-scf --convert-scf-to-std \ 4// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ 5// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ 6// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ 7// RUN: TENSOR0="%mlir_integration_test_dir/data/test.tns" \ 8// RUN: mlir-cpu-runner \ 9// RUN: -e entry -entry-point-result=void \ 10// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ 11// RUN: FileCheck %s 12// 13// Do the same run, but now with SIMDization as well. This should not change the outcome. 14// 15// RUN: mlir-opt %s \ 16// RUN: --sparsification="vectorization-strategy=2 vl=4" --sparse-tensor-conversion \ 17// RUN: --convert-vector-to-scf --convert-scf-to-std \ 18// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ 19// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ 20// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ 21// RUN: TENSOR0="%mlir_integration_test_dir/data/test.tns" \ 22// RUN: mlir-cpu-runner \ 23// RUN: -e entry -entry-point-result=void \ 24// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ 25// RUN: FileCheck %s 26 27!Filename = type !llvm.ptr<i8> 28 29#SparseTensor = #sparse_tensor.encoding<{ 30 dimLevelType = [ "compressed", "compressed", "compressed", "compressed", 31 "compressed", "compressed", "compressed", "compressed" ], 32 // Note that any dimOrdering permutation should give the same results 33 // since, even though it impacts the sparse storage scheme layout, 34 // it should not change the semantics. 35 dimOrdering = affine_map<(i,j,k,l,m,n,o,p) -> (p,o,j,k,i,l,m,n)> 36}> 37 38#trait_flatten = { 39 indexing_maps = [ 40 affine_map<(i,j,k,l,m,n,o,p) -> (i,j,k,l,m,n,o,p)>, // A 41 affine_map<(i,j,k,l,m,n,o,p) -> (i,j)> // X (out) 42 ], 43 iterator_types = [ "parallel", "parallel", "reduction", "reduction", 44 "reduction", "reduction", "reduction", "reduction" ], 45 doc = "X(i,j) += A(i,j,k,l,m,n,o,p)" 46} 47 48// 49// Integration test that lowers a kernel annotated as sparse to 50// actual sparse code, initializes a matching sparse storage scheme 51// from file, and runs the resulting code with the JIT compiler. 52// 53module { 54 // 55 // A kernel that flattens a rank 8 tensor into a dense matrix. 56 // 57 func @kernel_flatten(%arga: tensor<7x3x3x3x3x3x5x3xf64, #SparseTensor>, 58 %argx: tensor<7x3xf64> {linalg.inplaceable = true}) 59 -> tensor<7x3xf64> { 60 %0 = linalg.generic #trait_flatten 61 ins(%arga: tensor<7x3x3x3x3x3x5x3xf64, #SparseTensor>) 62 outs(%argx: tensor<7x3xf64>) { 63 ^bb(%a: f64, %x: f64): 64 %0 = arith.addf %x, %a : f64 65 linalg.yield %0 : f64 66 } -> tensor<7x3xf64> 67 return %0 : tensor<7x3xf64> 68 } 69 70 func private @getTensorFilename(index) -> (!Filename) 71 72 // 73 // Main driver that reads tensor from file and calls the sparse kernel. 74 // 75 func @entry() { 76 %d0 = arith.constant 0.0 : f64 77 %c0 = arith.constant 0 : index 78 %c1 = arith.constant 1 : index 79 %c3 = arith.constant 3 : index 80 %c7 = arith.constant 7 : index 81 82 // Setup matrix memory that is initialized to zero. 83 %xdata = memref.alloc() : memref<7x3xf64> 84 scf.for %i = %c0 to %c7 step %c1 { 85 scf.for %j = %c0 to %c3 step %c1 { 86 memref.store %d0, %xdata[%i, %j] : memref<7x3xf64> 87 } 88 } 89 %x = bufferization.to_tensor %xdata : memref<7x3xf64> 90 91 // Read the sparse tensor from file, construct sparse storage. 92 %fileName = call @getTensorFilename(%c0) : (index) -> (!Filename) 93 %a = sparse_tensor.new %fileName : !Filename to tensor<7x3x3x3x3x3x5x3xf64, #SparseTensor> 94 95 // Call the kernel. 96 %0 = call @kernel_flatten(%a, %x) 97 : (tensor<7x3x3x3x3x3x5x3xf64, #SparseTensor>, tensor<7x3xf64>) -> tensor<7x3xf64> 98 99 // Print the result for verification. 100 // 101 // CHECK: ( 6.25, 0, 0 ) 102 // CHECK: ( 4.224, 6.21, 0 ) 103 // CHECK: ( 0, 0, 15.455 ) 104 // CHECK: ( 0, 0, 0 ) 105 // CHECK: ( 0, 0, 0 ) 106 // CHECK: ( 0, 0, 0 ) 107 // CHECK: ( 7, 0, 0 ) 108 // 109 %r = bufferization.to_memref %0 : memref<7x3xf64> 110 scf.for %i = %c0 to %c7 step %c1 { 111 %v = vector.transfer_read %r[%i, %c0], %d0: memref<7x3xf64>, vector<3xf64> 112 vector.print %v : vector<3xf64> 113 } 114 115 // Release the resources. 116 memref.dealloc %xdata : memref<7x3xf64> 117 sparse_tensor.release %a : tensor<7x3x3x3x3x3x5x3xf64, #SparseTensor> 118 119 return 120 } 121} 122