1// RUN: mlir-opt %s --sparse-compiler | \ 2// RUN: mlir-cpu-runner \ 3// RUN: -e entry -entry-point-result=void \ 4// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ 5// RUN: FileCheck %s 6 7#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> 8 9#trait_op = { 10 indexing_maps = [ 11 affine_map<(i) -> (i)>, // a (in) 12 affine_map<(i) -> (i)>, // b (in) 13 affine_map<(i) -> (i)> // x (out) 14 ], 15 iterator_types = ["parallel"], 16 doc = "x(i) = a(i) OP b(i)" 17} 18 19module { 20 func.func @cadd(%arga: tensor<?xcomplex<f64>, #SparseVector>, 21 %argb: tensor<?xcomplex<f64>, #SparseVector>) 22 -> tensor<?xcomplex<f64>, #SparseVector> { 23 %c = arith.constant 0 : index 24 %d = tensor.dim %arga, %c : tensor<?xcomplex<f64>, #SparseVector> 25 %xv = bufferization.alloc_tensor(%d) : tensor<?xcomplex<f64>, #SparseVector> 26 %0 = linalg.generic #trait_op 27 ins(%arga, %argb: tensor<?xcomplex<f64>, #SparseVector>, 28 tensor<?xcomplex<f64>, #SparseVector>) 29 outs(%xv: tensor<?xcomplex<f64>, #SparseVector>) { 30 ^bb(%a: complex<f64>, %b: complex<f64>, %x: complex<f64>): 31 %1 = complex.add %a, %b : complex<f64> 32 linalg.yield %1 : complex<f64> 33 } -> tensor<?xcomplex<f64>, #SparseVector> 34 return %0 : tensor<?xcomplex<f64>, #SparseVector> 35 } 36 37 func.func @cmul(%arga: tensor<?xcomplex<f64>, #SparseVector>, 38 %argb: tensor<?xcomplex<f64>, #SparseVector>) 39 -> tensor<?xcomplex<f64>, #SparseVector> { 40 %c = arith.constant 0 : index 41 %d = tensor.dim %arga, %c : tensor<?xcomplex<f64>, #SparseVector> 42 %xv = bufferization.alloc_tensor(%d) : tensor<?xcomplex<f64>, #SparseVector> 43 %0 = linalg.generic #trait_op 44 ins(%arga, %argb: tensor<?xcomplex<f64>, #SparseVector>, 45 tensor<?xcomplex<f64>, #SparseVector>) 46 outs(%xv: tensor<?xcomplex<f64>, #SparseVector>) { 47 ^bb(%a: complex<f64>, %b: complex<f64>, %x: complex<f64>): 48 %1 = complex.mul %a, %b : complex<f64> 49 linalg.yield %1 : complex<f64> 50 } -> tensor<?xcomplex<f64>, #SparseVector> 51 return %0 : tensor<?xcomplex<f64>, #SparseVector> 52 } 53 54 func.func @dump(%arg0: tensor<?xcomplex<f64>, #SparseVector>, %d: index) { 55 %c0 = arith.constant 0 : index 56 %c1 = arith.constant 1 : index 57 %mem = sparse_tensor.values %arg0 : tensor<?xcomplex<f64>, #SparseVector> to memref<?xcomplex<f64>> 58 scf.for %i = %c0 to %d step %c1 { 59 %v = memref.load %mem[%i] : memref<?xcomplex<f64>> 60 %real = complex.re %v : complex<f64> 61 %imag = complex.im %v : complex<f64> 62 vector.print %real : f64 63 vector.print %imag : f64 64 } 65 return 66 } 67 68 // Driver method to call and verify complex kernels. 69 func.func @entry() { 70 // Setup sparse vectors. 71 %v1 = arith.constant sparse< 72 [ [0], [28], [31] ], 73 [ (511.13, 2.0), (3.0, 4.0), (5.0, 6.0) ] > : tensor<32xcomplex<f64>> 74 %v2 = arith.constant sparse< 75 [ [1], [28], [31] ], 76 [ (1.0, 0.0), (2.0, 0.0), (3.0, 0.0) ] > : tensor<32xcomplex<f64>> 77 %sv1 = sparse_tensor.convert %v1 : tensor<32xcomplex<f64>> to tensor<?xcomplex<f64>, #SparseVector> 78 %sv2 = sparse_tensor.convert %v2 : tensor<32xcomplex<f64>> to tensor<?xcomplex<f64>, #SparseVector> 79 80 // Call sparse vector kernels. 81 %0 = call @cadd(%sv1, %sv2) 82 : (tensor<?xcomplex<f64>, #SparseVector>, 83 tensor<?xcomplex<f64>, #SparseVector>) -> tensor<?xcomplex<f64>, #SparseVector> 84 %1 = call @cmul(%sv1, %sv2) 85 : (tensor<?xcomplex<f64>, #SparseVector>, 86 tensor<?xcomplex<f64>, #SparseVector>) -> tensor<?xcomplex<f64>, #SparseVector> 87 88 // 89 // Verify the results. 90 // 91 // CHECK: 511.13 92 // CHECK-NEXT: 2 93 // CHECK-NEXT: 1 94 // CHECK-NEXT: 0 95 // CHECK-NEXT: 5 96 // CHECK-NEXT: 4 97 // CHECK-NEXT: 8 98 // CHECK-NEXT: 6 99 // CHECK-NEXT: 6 100 // CHECK-NEXT: 8 101 // CHECK-NEXT: 15 102 // CHECK-NEXT: 18 103 // 104 %d1 = arith.constant 4 : index 105 %d2 = arith.constant 2 : index 106 call @dump(%0, %d1) : (tensor<?xcomplex<f64>, #SparseVector>, index) -> () 107 call @dump(%1, %d2) : (tensor<?xcomplex<f64>, #SparseVector>, index) -> () 108 109 // Release the resources. 110 bufferization.dealloc_tensor %sv1 : tensor<?xcomplex<f64>, #SparseVector> 111 bufferization.dealloc_tensor %sv2 : tensor<?xcomplex<f64>, #SparseVector> 112 bufferization.dealloc_tensor %0 : tensor<?xcomplex<f64>, #SparseVector> 113 bufferization.dealloc_tensor %1 : tensor<?xcomplex<f64>, #SparseVector> 114 return 115 } 116} 117