1// RUN: mlir-opt %s --sparse-compiler | \
2// RUN: mlir-cpu-runner \
3// RUN:  -e entry -entry-point-result=void  \
4// RUN:  -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
5// RUN: FileCheck %s
6
7#DCSR  = #sparse_tensor.encoding<{
8  dimLevelType = [ "compressed", "compressed" ]
9}>
10
11#DCSC  = #sparse_tensor.encoding<{
12  dimLevelType = [ "compressed", "compressed" ],
13  dimOrdering = affine_map<(i,j) -> (j,i)>
14}>
15
16//
17// Integration test that tests conversions between sparse tensors,
18// where the dynamic sizes of the shape of the enveloping tensor
19// may change (the actual underlying sizes obviously never change).
20//
21module {
22
23  //
24  // Helper method to print values array. The transfer actually
25  // reads more than required to verify size of buffer as well.
26  //
27  func.func @dump(%arg0: memref<?xf64>) {
28    %c = arith.constant 0 : index
29    %d = arith.constant -1.0 : f64
30    %0 = vector.transfer_read %arg0[%c], %d: memref<?xf64>, vector<8xf64>
31    vector.print %0 : vector<8xf64>
32    return
33  }
34
35  func.func @entry() {
36    %t1 = arith.constant sparse<
37      [ [0,0], [0,1], [0,63], [1,0], [1,1], [31,0], [31,63] ],
38        [ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0 ]> : tensor<32x64xf64>
39    %t2 = tensor.cast %t1 : tensor<32x64xf64> to tensor<?x?xf64>
40
41    // Four dense to sparse conversions.
42    %1 = sparse_tensor.convert %t1 : tensor<32x64xf64> to tensor<?x?xf64, #DCSR>
43    %2 = sparse_tensor.convert %t1 : tensor<32x64xf64> to tensor<?x?xf64, #DCSC>
44    %3 = sparse_tensor.convert %t2 : tensor<?x?xf64> to tensor<?x?xf64, #DCSR>
45    %4 = sparse_tensor.convert %t2 : tensor<?x?xf64> to tensor<?x?xf64, #DCSC>
46
47    // Two cross conversions.
48    %5 = sparse_tensor.convert %3 : tensor<?x?xf64, #DCSR> to tensor<?x?xf64, #DCSC>
49    %6 = sparse_tensor.convert %4 : tensor<?x?xf64, #DCSC> to tensor<?x?xf64, #DCSR>
50
51    //
52    // All proper row-/column-wise?
53    //
54    // CHECK: ( 1, 2, 3, 4, 5, 6, 7, -1 )
55    // CHECK: ( 1, 4, 6, 2, 5, 3, 7, -1 )
56    // CHECK: ( 1, 2, 3, 4, 5, 6, 7, -1 )
57    // CHECK: ( 1, 4, 6, 2, 5, 3, 7, -1 )
58    // CHECK: ( 1, 4, 6, 2, 5, 3, 7, -1 )
59    // CHECK: ( 1, 2, 3, 4, 5, 6, 7, -1 )
60    //
61    %m1 = sparse_tensor.values %1 : tensor<?x?xf64, #DCSR> to memref<?xf64>
62    %m2 = sparse_tensor.values %2 : tensor<?x?xf64, #DCSC> to memref<?xf64>
63    %m3 = sparse_tensor.values %3 : tensor<?x?xf64, #DCSR> to memref<?xf64>
64    %m4 = sparse_tensor.values %4 : tensor<?x?xf64, #DCSC> to memref<?xf64>
65    %m5 = sparse_tensor.values %5 : tensor<?x?xf64, #DCSC> to memref<?xf64>
66    %m6 = sparse_tensor.values %6 : tensor<?x?xf64, #DCSR> to memref<?xf64>
67    call @dump(%m1) : (memref<?xf64>) -> ()
68    call @dump(%m2) : (memref<?xf64>) -> ()
69    call @dump(%m3) : (memref<?xf64>) -> ()
70    call @dump(%m4) : (memref<?xf64>) -> ()
71    call @dump(%m5) : (memref<?xf64>) -> ()
72    call @dump(%m6) : (memref<?xf64>) -> ()
73
74    // Release the resources.
75    bufferization.dealloc_tensor %1 : tensor<?x?xf64, #DCSR>
76    bufferization.dealloc_tensor %2 : tensor<?x?xf64, #DCSC>
77    bufferization.dealloc_tensor %3 : tensor<?x?xf64, #DCSR>
78    bufferization.dealloc_tensor %4 : tensor<?x?xf64, #DCSC>
79    bufferization.dealloc_tensor %5 : tensor<?x?xf64, #DCSC>
80    bufferization.dealloc_tensor %6 : tensor<?x?xf64, #DCSR>
81
82    return
83  }
84}
85