1# RUN: SUPPORT_LIB=%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext %PYTHON %s | FileCheck %s
2
3import ctypes
4import numpy as np
5import os
6import sys
7
8from mlir import ir
9from mlir import runtime as rt
10from mlir.dialects import sparse_tensor as st
11from mlir.dialects import builtin
12from mlir.dialects.linalg.opdsl import lang as dsl
13
14_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
15sys.path.append(_SCRIPT_PATH)
16from tools import np_to_sparse_tensor as test_tools
17from tools import sparse_compiler
18
19# TODO: Use linalg_structured_op to generate the kernel after making it to
20# handle sparse tensor outputs.
21_KERNEL_STR = """
22#DCSR = #sparse_tensor.encoding<{
23  dimLevelType = [ "compressed", "compressed" ]
24}>
25
26#trait_add_elt = {
27  indexing_maps = [
28    affine_map<(i,j) -> (i,j)>,  // A
29    affine_map<(i,j) -> (i,j)>,  // B
30    affine_map<(i,j) -> (i,j)>   // X (out)
31  ],
32  iterator_types = ["parallel", "parallel"],
33  doc = "X(i,j) = A(i,j) + B(i,j)"
34}
35
36func.func @sparse_add_elt(
37    %arga: tensor<3x4xf64, #DCSR>, %argb: tensor<3x4xf64, #DCSR>) -> tensor<3x4xf64, #DCSR> {
38  %argx = bufferization.alloc_tensor() : tensor<3x4xf64, #DCSR>
39  %0 = linalg.generic #trait_add_elt
40    ins(%arga, %argb: tensor<3x4xf64, #DCSR>, tensor<3x4xf64, #DCSR>)
41    outs(%argx: tensor<3x4xf64, #DCSR>) {
42      ^bb(%a: f64, %b: f64, %x: f64):
43        %1 = arith.addf %a, %b : f64
44        linalg.yield %1 : f64
45  } -> tensor<3x4xf64, #DCSR>
46  return %0 : tensor<3x4xf64, #DCSR>
47}
48
49func.func @main(%ad: tensor<3x4xf64>, %bd: tensor<3x4xf64>) -> tensor<3x4xf64, #DCSR>
50  attributes { llvm.emit_c_interface } {
51  %a = sparse_tensor.convert %ad : tensor<3x4xf64> to tensor<3x4xf64, #DCSR>
52  %b = sparse_tensor.convert %bd : tensor<3x4xf64> to tensor<3x4xf64, #DCSR>
53  %0 = call @sparse_add_elt(%a, %b) : (tensor<3x4xf64, #DCSR>, tensor<3x4xf64, #DCSR>) -> tensor<3x4xf64, #DCSR>
54  return %0 : tensor<3x4xf64, #DCSR>
55}
56"""
57
58
59def _run_test(support_lib, kernel):
60  """Compiles, runs and checks results."""
61  compiler = sparse_compiler.SparseCompiler(
62      options='', opt_level=2, shared_libs=[support_lib])
63  module = ir.Module.parse(kernel)
64  engine = compiler.compile_and_jit(module)
65
66  # Set up numpy inputs and buffer for output.
67  a = np.array(
68      [[1.1, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 6.6, 0.0]],
69      np.float64)
70  b = np.array(
71      [[1.1, 0.0, 0.0, 2.8], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],
72      np.float64)
73
74  mem_a = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(a)))
75  mem_b = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(b)))
76
77  # The sparse tensor output is a pointer to pointer of char.
78  out = ctypes.c_char(0)
79  mem_out = ctypes.pointer(ctypes.pointer(out))
80
81  # Invoke the kernel.
82  engine.invoke('main', mem_a, mem_b, mem_out)
83
84  # Retrieve and check the result.
85  rank, nse, shape, values, indices = test_tools.sparse_tensor_to_coo_tensor(
86      support_lib, mem_out[0], np.float64)
87
88  # CHECK: PASSED
89  if np.allclose(values, [2.2, 2.8, 6.6]) and np.allclose(
90      indices, [[0, 0], [0, 3], [2, 2]]):
91    print('PASSED')
92  else:
93    quit('FAILURE')
94
95
96def test_elementwise_add():
97  # Obtain path to runtime support library.
98  support_lib = os.getenv('SUPPORT_LIB')
99  assert support_lib is not None, 'SUPPORT_LIB is undefined'
100  assert os.path.exists(support_lib), f'{support_lib} does not exist'
101  with ir.Context() as ctx, ir.Location.unknown():
102    _run_test(support_lib, _KERNEL_STR)
103
104
105test_elementwise_add()
106