1// RUN: mlir-opt %s --sparse-compiler | \ 2// RUN: mlir-cpu-runner -e entry -entry-point-result=void \ 3// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ 4// RUN: FileCheck %s 5 6#SparseVector = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> 7 8module { 9 10 // 11 // Sparse kernel. 12 // 13 func.func @sparse_dot(%a: tensor<1024xf32, #SparseVector>, 14 %b: tensor<1024xf32, #SparseVector>, 15 %x: tensor<f32>) -> tensor<f32> { 16 %dot = linalg.dot ins(%a, %b: tensor<1024xf32, #SparseVector>, 17 tensor<1024xf32, #SparseVector>) 18 outs(%x: tensor<f32>) -> tensor<f32> 19 return %dot : tensor<f32> 20 } 21 22 // 23 // Main driver. 24 // 25 func.func @entry() { 26 // Setup two sparse vectors. 27 %d1 = arith.constant sparse< 28 [ [0], [1], [22], [23], [1022] ], [1.0, 2.0, 3.0, 4.0, 5.0] 29 > : tensor<1024xf32> 30 %d2 = arith.constant sparse< 31 [ [22], [1022], [1023] ], [6.0, 7.0, 8.0] 32 > : tensor<1024xf32> 33 %s1 = sparse_tensor.convert %d1 : tensor<1024xf32> to tensor<1024xf32, #SparseVector> 34 %s2 = sparse_tensor.convert %d2 : tensor<1024xf32> to tensor<1024xf32, #SparseVector> 35 36 // Call the kernel and verify the output. 37 // 38 // CHECK: 53 39 // 40 %t = bufferization.alloc_tensor() : tensor<f32> 41 %z = arith.constant 0.0 : f32 42 %x = tensor.insert %z into %t[] : tensor<f32> 43 %0 = call @sparse_dot(%s1, %s2, %x) : (tensor<1024xf32, #SparseVector>, 44 tensor<1024xf32, #SparseVector>, 45 tensor<f32>) -> tensor<f32> 46 %1 = tensor.extract %0[] : tensor<f32> 47 vector.print %1 : f32 48 49 // Release the resources. 50 bufferization.dealloc_tensor %s1 : tensor<1024xf32, #SparseVector> 51 bufferization.dealloc_tensor %s2 : tensor<1024xf32, #SparseVector> 52 53 return 54 } 55} 56