1// RUN: mlir-opt %s \
2// RUN:   --sparsification --sparse-tensor-conversion \
3// RUN:   --linalg-bufferize --convert-linalg-to-loops \
4// RUN:   --convert-vector-to-scf --convert-scf-to-std \
5// RUN:   --func-bufferize --tensor-constant-bufferize --tensor-bufferize \
6// RUN:   --std-bufferize --finalizing-bufferize --lower-affine \
7// RUN:   --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \
8// RUN:   --convert-std-to-llvm --reconcile-unrealized-casts | \
9// RUN: mlir-cpu-runner \
10// RUN:  -e entry -entry-point-result=void  \
11// RUN:  -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
12// RUN: FileCheck %s
13
14#DCSR  = #sparse_tensor.encoding<{
15  dimLevelType = [ "compressed", "compressed" ]
16}>
17
18#DCSC  = #sparse_tensor.encoding<{
19  dimLevelType = [ "compressed", "compressed" ],
20  dimOrdering = affine_map<(i,j) -> (j,i)>
21}>
22
23//
24// Integration test that tests conversions between sparse tensors,
25// where the dynamic sizes of the shape of the enveloping tensor
26// may change (the actual underlying sizes obviously never change).
27//
28module {
29
30  //
31  // Helper method to print values array. The transfer actually
32  // reads more than required to verify size of buffer as well.
33  //
34  func @dump(%arg0: memref<?xf64>) {
35    %c = arith.constant 0 : index
36    %d = arith.constant -1.0 : f64
37    %0 = vector.transfer_read %arg0[%c], %d: memref<?xf64>, vector<8xf64>
38    vector.print %0 : vector<8xf64>
39    return
40  }
41
42  func @entry() {
43    %t1 = arith.constant sparse<
44      [ [0,0], [0,1], [0,63], [1,0], [1,1], [31,0], [31,63] ],
45        [ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0 ]> : tensor<32x64xf64>
46    %t2 = tensor.cast %t1 : tensor<32x64xf64> to tensor<?x?xf64>
47
48    // Four dense to sparse conversions.
49    %1 = sparse_tensor.convert %t1 : tensor<32x64xf64> to tensor<?x?xf64, #DCSR>
50    %2 = sparse_tensor.convert %t1 : tensor<32x64xf64> to tensor<?x?xf64, #DCSC>
51    %3 = sparse_tensor.convert %t2 : tensor<?x?xf64> to tensor<?x?xf64, #DCSR>
52    %4 = sparse_tensor.convert %t2 : tensor<?x?xf64> to tensor<?x?xf64, #DCSC>
53
54    // Two cross conversions.
55    %5 = sparse_tensor.convert %3 : tensor<?x?xf64, #DCSR> to tensor<?x?xf64, #DCSC>
56    %6 = sparse_tensor.convert %4 : tensor<?x?xf64, #DCSC> to tensor<?x?xf64, #DCSR>
57
58    //
59    // All proper row-/column-wise?
60    //
61    // CHECK: ( 1, 2, 3, 4, 5, 6, 7, -1 )
62    // CHECK: ( 1, 4, 6, 2, 5, 3, 7, -1 )
63    // CHECK: ( 1, 2, 3, 4, 5, 6, 7, -1 )
64    // CHECK: ( 1, 4, 6, 2, 5, 3, 7, -1 )
65    // CHECK: ( 1, 4, 6, 2, 5, 3, 7, -1 )
66    // CHECK: ( 1, 2, 3, 4, 5, 6, 7, -1 )
67    //
68    %m1 = sparse_tensor.values %1 : tensor<?x?xf64, #DCSR> to memref<?xf64>
69    %m2 = sparse_tensor.values %2 : tensor<?x?xf64, #DCSC> to memref<?xf64>
70    %m3 = sparse_tensor.values %3 : tensor<?x?xf64, #DCSR> to memref<?xf64>
71    %m4 = sparse_tensor.values %4 : tensor<?x?xf64, #DCSC> to memref<?xf64>
72    %m5 = sparse_tensor.values %5 : tensor<?x?xf64, #DCSC> to memref<?xf64>
73    %m6 = sparse_tensor.values %6 : tensor<?x?xf64, #DCSR> to memref<?xf64>
74    call @dump(%m1) : (memref<?xf64>) -> ()
75    call @dump(%m2) : (memref<?xf64>) -> ()
76    call @dump(%m3) : (memref<?xf64>) -> ()
77    call @dump(%m4) : (memref<?xf64>) -> ()
78    call @dump(%m5) : (memref<?xf64>) -> ()
79    call @dump(%m6) : (memref<?xf64>) -> ()
80
81    // Release the resources.
82    sparse_tensor.release %1 : tensor<?x?xf64, #DCSR>
83    sparse_tensor.release %2 : tensor<?x?xf64, #DCSC>
84    sparse_tensor.release %3 : tensor<?x?xf64, #DCSR>
85    sparse_tensor.release %4 : tensor<?x?xf64, #DCSC>
86    sparse_tensor.release %5 : tensor<?x?xf64, #DCSC>
87    sparse_tensor.release %6 : tensor<?x?xf64, #DCSR>
88
89    return
90  }
91}
92