1// NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
2// RUN: mlir-opt %s -sparsification | FileCheck %s
3
4#DenseMatrix = #sparse_tensor.encoding<{
5  dimLevelType = ["dense", "dense"]
6}>
7
8#SparseMatrix = #sparse_tensor.encoding<{
9  dimLevelType = ["compressed", "compressed"]
10}>
11
12#trait = {
13  indexing_maps = [
14    affine_map<(i,j) -> (i,j)>,  // A
15    affine_map<(i,j) -> (i,j)>   // X (out)
16  ],
17  iterator_types = ["parallel", "parallel"],
18  doc = "X(i,j) = A(i,j) * i * j"
19}
20
21// CHECK-LABEL:   func @dense_index(
22// CHECK-SAME:      %[[VAL_0:.*]]: tensor<?x?xi64, #sparse_tensor.encoding
23// CHECK-DAG:       %[[VAL_1:.*]] = arith.constant 0 : index
24// CHECK-DAG:       %[[VAL_2:.*]] = arith.constant 1 : index
25// CHECK-DAG:       %[[VAL_3:.*]] = tensor.dim %[[VAL_0]], %[[VAL_1]] : tensor<?x?xi64, #sparse_tensor.encoding
26// CHECK-DAG:       %[[VAL_4:.*]] = tensor.dim %[[VAL_0]], %[[VAL_1]] : tensor<?x?xi64, #sparse_tensor.encoding
27// CHECK-DAG:       %[[VAL_5:.*]] = bufferization.alloc_tensor(%[[VAL_3]], %[[VAL_4]]) : tensor<?x?xi64, #sparse_tensor.encoding
28// CHECK-DAG:       %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<?x?xi64, #sparse_tensor.encoding
29// CHECK-DAG:       %[[VAL_7:.*]] = tensor.dim %[[VAL_5]], %[[VAL_1]] : tensor<?x?xi64, #sparse_tensor.encoding
30// CHECK-DAG:       %[[VAL_8:.*]] = tensor.dim %[[VAL_5]], %[[VAL_2]] : tensor<?x?xi64, #sparse_tensor.encoding
31// CHECK-DAG:       %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_5]] : tensor<?x?xi64, #sparse_tensor.encoding
32// CHECK:           scf.for %[[VAL_10:.*]] = %[[VAL_1]] to %[[VAL_7]] step %[[VAL_2]] {
33// CHECK:             scf.for %[[VAL_11:.*]] = %[[VAL_1]] to %[[VAL_8]] step %[[VAL_2]] {
34// CHECK:               %[[VAL_12:.*]] = arith.muli %[[VAL_8]], %[[VAL_10]] : index
35// CHECK:               %[[VAL_13:.*]] = arith.addi %[[VAL_12]], %[[VAL_11]] : index
36// CHECK:               %[[VAL_14:.*]] = arith.muli %[[VAL_8]], %[[VAL_10]] : index
37// CHECK:               %[[VAL_15:.*]] = arith.addi %[[VAL_14]], %[[VAL_11]] : index
38// CHECK:               %[[VAL_16:.*]] = arith.index_cast %[[VAL_11]] : index to i64
39// CHECK:               %[[VAL_17:.*]] = arith.index_cast %[[VAL_10]] : index to i64
40// CHECK:               %[[VAL_18:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_13]]] : memref<?xi64>
41// CHECK:               %[[VAL_19:.*]] = arith.muli %[[VAL_17]], %[[VAL_18]] : i64
42// CHECK:               %[[VAL_20:.*]] = arith.muli %[[VAL_16]], %[[VAL_19]] : i64
43// CHECK:               memref.store %[[VAL_20]], %[[VAL_9]]{{\[}}%[[VAL_15]]] : memref<?xi64>
44// CHECK:             }
45// CHECK:           }
46// CHECK:           %[[VAL_21:.*]] = sparse_tensor.load %[[VAL_5]] : tensor<?x?xi64, #sparse_tensor.encoding
47// CHECK:           return %[[VAL_21]] : tensor<?x?xi64, #sparse_tensor.encoding
48// CHECK:         }
49func.func @dense_index(%arga: tensor<?x?xi64, #DenseMatrix>)
50                      -> tensor<?x?xi64, #DenseMatrix> {
51  %c0 = arith.constant 0 : index
52  %c1 = arith.constant 0 : index
53  %0 = tensor.dim %arga, %c0 : tensor<?x?xi64, #DenseMatrix>
54  %1 = tensor.dim %arga, %c1 : tensor<?x?xi64, #DenseMatrix>
55  %init = bufferization.alloc_tensor(%0, %1) : tensor<?x?xi64, #DenseMatrix>
56  %r = linalg.generic #trait
57      ins(%arga: tensor<?x?xi64, #DenseMatrix>)
58     outs(%init: tensor<?x?xi64, #DenseMatrix>) {
59      ^bb(%a: i64, %x: i64):
60        %i = linalg.index 0 : index
61        %j = linalg.index 1 : index
62        %ii = arith.index_cast %i : index to i64
63        %jj = arith.index_cast %j : index to i64
64        %m1 = arith.muli %ii, %a : i64
65        %m2 = arith.muli %jj, %m1 : i64
66        linalg.yield %m2 : i64
67  } -> tensor<?x?xi64, #DenseMatrix>
68  return %r : tensor<?x?xi64, #DenseMatrix>
69}
70
71// CHECK-LABEL:   func @sparse_index(
72// CHECK-SAME:      %[[VAL_0:.*]]: tensor<?x?xi64, #sparse_tensor.encoding
73// CHECK-DAG:       %[[VAL_1:.*]] = arith.constant 0 : index
74// CHECK-DAG:       %[[VAL_2:.*]] = arith.constant 1 : index
75// CHECK-DAG:       %[[VAL_3:.*]] = arith.constant 2 : index
76// CHECK-DAG:       %[[VAL_4:.*]] = tensor.dim %[[VAL_0]], %[[VAL_1]] : tensor<?x?xi64, #sparse_tensor.encoding
77// CHECK-DAG:       %[[VAL_5:.*]] = tensor.dim %[[VAL_0]], %[[VAL_1]] : tensor<?x?xi64, #sparse_tensor.encoding
78// CHECK-DAG:       %[[VAL_6:.*]] = bufferization.alloc_tensor(%[[VAL_4]], %[[VAL_5]]) : tensor<?x?xi64, #sparse_tensor.encoding
79// CHECK-DAG:       %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_1]] : tensor<?x?xi64, #sparse_tensor.encoding
80// CHECK-DAG:       %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_1]] : tensor<?x?xi64, #sparse_tensor.encoding
81// CHECK-DAG:       %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_2]] : tensor<?x?xi64, #sparse_tensor.encoding
82// CHECK-DAG:       %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_2]] : tensor<?x?xi64, #sparse_tensor.encoding
83// CHECK-DAG:       %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<?x?xi64, #sparse_tensor.encoding
84// CHECK:           %[[VAL_12:.*]] = memref.alloca(%[[VAL_3]]) : memref<?xindex>
85// CHECK:           %[[BUF:.*]] = memref.alloca() : memref<i64>
86// CHECK:           %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_1]]] : memref<?xindex>
87// CHECK:           %[[VAL_14:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_2]]] : memref<?xindex>
88// CHECK:           scf.for %[[VAL_15:.*]] = %[[VAL_13]] to %[[VAL_14]] step %[[VAL_2]] {
89// CHECK:             %[[VAL_16:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_15]]] : memref<?xindex>
90// CHECK:             memref.store %[[VAL_16]], %[[VAL_12]]{{\[}}%[[VAL_1]]] : memref<?xindex>
91// CHECK:             %[[VAL_17:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_15]]] : memref<?xindex>
92// CHECK:             %[[VAL_18:.*]] = arith.addi %[[VAL_15]], %[[VAL_2]] : index
93// CHECK:             %[[VAL_19:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_18]]] : memref<?xindex>
94// CHECK:             scf.for %[[VAL_20:.*]] = %[[VAL_17]] to %[[VAL_19]] step %[[VAL_2]] {
95// CHECK:               %[[VAL_21:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_20]]] : memref<?xindex>
96// CHECK:               memref.store %[[VAL_21]], %[[VAL_12]]{{\[}}%[[VAL_2]]] : memref<?xindex>
97// CHECK:               %[[VAL_22:.*]] = arith.index_cast %[[VAL_21]] : index to i64
98// CHECK:               %[[VAL_23:.*]] = arith.index_cast %[[VAL_16]] : index to i64
99// CHECK:               %[[VAL_24:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_20]]] : memref<?xi64>
100// CHECK:               %[[VAL_25:.*]] = arith.muli %[[VAL_23]], %[[VAL_24]] : i64
101// CHECK:               %[[VAL_26:.*]] = arith.muli %[[VAL_22]], %[[VAL_25]] : i64
102// CHECK:               memref.store %[[VAL_26]], %[[BUF]][] : memref<i64>
103// CHECK:               sparse_tensor.lex_insert %[[VAL_6]], %[[VAL_12]], %[[BUF]] : tensor<?x?xi64, #sparse_tensor.encoding
104// CHECK:             }
105// CHECK:           }
106// CHECK:           %[[VAL_27:.*]] = sparse_tensor.load %[[VAL_6]] hasInserts : tensor<?x?xi64, #sparse_tensor.encoding
107// CHECK:           return %[[VAL_27]] : tensor<?x?xi64, #sparse_tensor.encoding
108// CHECK:         }
109func.func @sparse_index(%arga: tensor<?x?xi64, #SparseMatrix>)
110                       -> tensor<?x?xi64, #SparseMatrix> {
111  %c0 = arith.constant 0 : index
112  %c1 = arith.constant 0 : index
113  %0 = tensor.dim %arga, %c0 : tensor<?x?xi64, #SparseMatrix>
114  %1 = tensor.dim %arga, %c1 : tensor<?x?xi64, #SparseMatrix>
115  %init = bufferization.alloc_tensor(%0, %1) : tensor<?x?xi64, #SparseMatrix>
116  %r = linalg.generic #trait
117      ins(%arga: tensor<?x?xi64, #SparseMatrix>)
118     outs(%init: tensor<?x?xi64, #SparseMatrix>) {
119      ^bb(%a: i64, %x: i64):
120        %i = linalg.index 0 : index
121        %j = linalg.index 1 : index
122        %ii = arith.index_cast %i : index to i64
123        %jj = arith.index_cast %j : index to i64
124        %m1 = arith.muli %ii, %a : i64
125        %m2 = arith.muli %jj, %m1 : i64
126        linalg.yield %m2 : i64
127  } -> tensor<?x?xi64, #SparseMatrix>
128  return %r : tensor<?x?xi64, #SparseMatrix>
129}
130
131