1// RUN: mlir-opt %s \
2// RUN:   --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \
3// RUN:   --sparsification --sparse-tensor-conversion \
4// RUN:   --convert-vector-to-scf --convert-scf-to-std \
5// RUN:   --func-bufferize --tensor-constant-bufferize --tensor-bufferize \
6// RUN:   --std-bufferize --finalizing-bufferize --lower-affine \
7// RUN:   --convert-vector-to-llvm --convert-memref-to-llvm \
8// RUN:   --convert-std-to-llvm --reconcile-unrealized-casts | \
9// RUN: mlir-cpu-runner \
10// RUN:  -e entry -entry-point-result=void  \
11// RUN:  -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
12// RUN: FileCheck %s
13//
14// Do the same run, but now with SIMDization as well. This should not change the outcome.
15//
16// RUN: mlir-opt %s \
17// RUN:   --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \
18// RUN:   --sparsification="vectorization-strategy=2 vl=2" --sparse-tensor-conversion \
19// RUN:   --convert-vector-to-scf --convert-scf-to-std \
20// RUN:   --func-bufferize --tensor-constant-bufferize --tensor-bufferize \
21// RUN:   --std-bufferize --finalizing-bufferize --lower-affine \
22// RUN:   --convert-vector-to-llvm --convert-memref-to-llvm \
23// RUN:   --convert-std-to-llvm --reconcile-unrealized-casts | \
24// RUN: mlir-cpu-runner \
25// RUN:  -e entry -entry-point-result=void  \
26// RUN:  -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
27// RUN: FileCheck %s
28
29#DCSR = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>
30
31// An example of a quantized sparse matmul. With the zero offset for the
32// sparse input, the sparse compiler generates very efficient code for the
33//      x(i,j) += (ext(a(i,k)) - 2) * ext(b(k,j))
34// operation.
35module {
36
37  func @quantized_matmul(%input1: tensor<5x3xi8>,
38                         %input2: tensor<3x6xi8, #DCSR>,
39                         %output: tensor<5x6xi32>) -> tensor<5x6xi32> {
40    %c0 = arith.constant 0 : i32
41    %c2 = arith.constant 2 : i32
42    %0 = linalg.quantized_matmul
43      ins(%input1, %input2, %c2, %c0 : tensor<5x3xi8>, tensor<3x6xi8, #DCSR>, i32, i32)
44      outs(%output : tensor<5x6xi32>) -> tensor<5x6xi32>
45    return %0: tensor<5x6xi32>
46  }
47
48  func @entry() {
49    %c0 = arith.constant 0 : index
50    %i0 = arith.constant 0 : i32
51
52    %input1 = arith.constant dense<[
53      [  -128,   3,  127 ],
54      [     0,   0,    0 ],
55      [    11,   1,    0 ],
56      [     0,   5,   -1 ],
57      [    13,   0,    3 ]
58    ]> : tensor<5x3xi8>
59
60    %input2 = arith.constant dense<[
61      [  127,   0, -128,    0,   0,   3 ],
62      [    0,   0,    0,    0,   0,   0 ],
63      [    0,   0,    0,  100,  10,   0 ]
64    ]> : tensor<3x6xi8>
65
66    %sparse_input2 = sparse_tensor.convert %input2 : tensor<3x6xi8> to tensor<3x6xi8, #DCSR>
67
68    // Call the kernel.
69    %output = arith.constant dense<0> : tensor<5x6xi32>
70    %0 = call @quantized_matmul(%input1, %sparse_input2, %output)
71       : (tensor<5x3xi8>,
72          tensor<3x6xi8, #DCSR>,
73	  tensor<5x6xi32>) -> tensor<5x6xi32>
74
75    //
76    // Verify the output.
77    //
78    // CHECK:    ( ( -16510, 0, 16640, 12500, 1250, -390 ),
79    // CHECK-SAME: ( -254, 0, 256, -200, -20, -6 ),
80    // CHECK-SAME: ( 1143, 0, -1152, -200, -20, 27 ),
81    // CHECK-SAME: ( -254, 0, 256, -300, -30, -6 ),
82    // CHECK-SAME: ( 1397, 0, -1408, 100, 10, 33 ) )
83    //
84    %m = bufferization.to_memref %0 : memref<5x6xi32>
85    %v = vector.transfer_read %m[%c0, %c0], %i0
86      : memref<5x6xi32>, vector<5x6xi32>
87    vector.print %v : vector<5x6xi32>
88
89    // Release the resources.
90    sparse_tensor.release %sparse_input2 : tensor<3x6xi8, #DCSR>
91    memref.dealloc %m : memref<5x6xi32>
92
93    return
94  }
95}
96