1// RUN: mlir-opt %s -sparsification --canonicalize | FileCheck %s --check-prefix=CHECK-HIR
2//
3// RUN: mlir-opt %s -sparsification --sparse-tensor-conversion --canonicalize | \
4// RUN: FileCheck %s --check-prefix=CHECK-MIR
5
6#X = #sparse_tensor.encoding<{
7 dimLevelType = [ "dense", "dense", "dense" ],
8 dimOrdering = affine_map<(i,j,k) -> (k,i,j)>
9}>
10
11#trait = {
12  indexing_maps = [
13    affine_map<(i,j,k) -> (k,i,j)>,  // A (in)
14    affine_map<(i,j,k) -> ()>        // X (out)
15  ],
16  iterator_types = ["reduction", "reduction", "reduction"]
17}
18
19// CHECK-HIR-LABEL:   func @sparse_dynamic_dims(
20// CHECK-HIR-SAME:      %[[VAL_0:.*]]: tensor<?x?x?xf32, #sparse_tensor.encoding<{{{.*}}}>>,
21// CHECK-HIR-SAME:      %[[VAL_1:.*]]: tensor<f32>) -> tensor<f32> {
22// CHECK-HIR-DAG:       %[[VAL_2:.*]] = arith.constant 1 : index
23// CHECK-HIR-DAG:       %[[VAL_3:.*]] = arith.constant 0 : index
24// CHECK-HIR-DAG:       %[[VAL_4:.*]] = arith.constant 2 : index
25// CHECK-HIR-DAG:       %[[VAL_5:.*]] = tensor.dim %[[VAL_0]], %[[VAL_4]] : tensor<?x?x?xf32, #sparse_tensor.encoding<{{{.*}}}>>
26// CHECK-HIR-DAG:       %[[VAL_6:.*]] = tensor.dim %[[VAL_0]], %[[VAL_3]] : tensor<?x?x?xf32, #sparse_tensor.encoding<{{{.*}}}>>
27// CHECK-HIR-DAG:       %[[VAL_7:.*]] = tensor.dim %[[VAL_0]], %[[VAL_2]] : tensor<?x?x?xf32, #sparse_tensor.encoding<{{{.*}}}>>
28// CHECK-HIR-DAG:       %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<?x?x?xf32, #sparse_tensor.encoding<{{{.*}}}>>
29// CHECK-HIR-DAG:       %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<f32>
30// CHECK-HIR:           %[[VAL_11:.*]] = tensor.extract %[[VAL_1]][] : tensor<f32>
31// CHECK-HIR:           %[[VAL_12:.*]] = scf.for %[[VAL_13:.*]] = %[[VAL_3]] to %[[VAL_5]] step %[[VAL_2]] iter_args(%[[VAL_14:.*]] = %[[VAL_11]]) -> (f32) {
32// CHECK-HIR:             %[[VAL_15:.*]] = scf.for %[[VAL_16:.*]] = %[[VAL_3]] to %[[VAL_6]] step %[[VAL_2]] iter_args(%[[VAL_17:.*]] = %[[VAL_14]]) -> (f32) {
33// CHECK-HIR:               %[[VAL_18:.*]] = arith.muli %[[VAL_6]], %[[VAL_13]] : index
34// CHECK-HIR:               %[[VAL_19:.*]] = arith.addi %[[VAL_18]], %[[VAL_16]] : index
35// CHECK-HIR:               %[[VAL_20:.*]] = scf.for %[[VAL_21:.*]] = %[[VAL_3]] to %[[VAL_7]] step %[[VAL_2]] iter_args(%[[VAL_22:.*]] = %[[VAL_17]]) -> (f32) {
36// CHECK-HIR:                 %[[VAL_23:.*]] = arith.muli %[[VAL_7]], %[[VAL_19]] : index
37// CHECK-HIR:                 %[[VAL_24:.*]] = arith.addi %[[VAL_23]], %[[VAL_21]] : index
38// CHECK-HIR:                 %[[VAL_25:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_24]]] : memref<?xf32>
39// CHECK-HIR:                 %[[VAL_26:.*]] = arith.addf %[[VAL_22]], %[[VAL_25]] : f32
40// CHECK-HIR:                 scf.yield %[[VAL_26]] : f32
41// CHECK-HIR:               }
42// CHECK-HIR:               scf.yield %[[VAL_20]] : f32
43// CHECK-HIR:             }
44// CHECK-HIR:             scf.yield %[[VAL_15]] : f32
45// CHECK-HIR:           }
46// CHECK-HIR:           memref.store %[[VAL_12]], %[[VAL_10]][] : memref<f32>
47// CHECK-HIR:           %[[VAL_30:.*]] = bufferization.to_tensor %[[VAL_10]] : memref<f32>
48// CHECK-HIR:           return %[[VAL_30]] : tensor<f32>
49// CHECK-HIR:         }
50//
51// CHECK-MIR-LABEL:   func @sparse_dynamic_dims(
52// CHECK-MIR-SAME:      %[[VAL_0:.*]]: !llvm.ptr<i8>,
53// CHECK-MIR-SAME:      %[[VAL_1:.*]]: tensor<f32>) -> tensor<f32> {
54// CHECK-MIR-DAG:       %[[VAL_2:.*]] = arith.constant 2 : index
55// CHECK-MIR-DAG:       %[[VAL_3:.*]] = arith.constant 1 : index
56// CHECK-MIR-DAG:       %[[VAL_4:.*]] = arith.constant 0 : index
57// CHECK-MIR-DAG:       %[[VAL_5:.*]] = call @sparseDimSize(%[[VAL_0]], %[[VAL_4]]) : (!llvm.ptr<i8>, index) -> index
58// CHECK-MIR-DAG:       %[[VAL_6:.*]] = call @sparseDimSize(%[[VAL_0]], %[[VAL_3]]) : (!llvm.ptr<i8>, index) -> index
59// CHECK-MIR-DAG:       %[[VAL_7:.*]] = call @sparseDimSize(%[[VAL_0]], %[[VAL_2]]) : (!llvm.ptr<i8>, index) -> index
60// CHECK-MIR-DAG:       %[[VAL_8:.*]] = call @sparseValuesF32(%[[VAL_0]]) : (!llvm.ptr<i8>) -> memref<?xf32>
61// CHECK-MIR-DAG:       %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<f32>
62// CHECK-MIR:           %[[VAL_11:.*]] = tensor.extract %[[VAL_1]][] : tensor<f32>
63// CHECK-MIR:           %[[VAL_12:.*]] = scf.for %[[VAL_13:.*]] = %[[VAL_4]] to %[[VAL_5]] step %[[VAL_3]] iter_args(%[[VAL_14:.*]] = %[[VAL_11]]) -> (f32) {
64// CHECK-MIR:             %[[VAL_15:.*]] = scf.for %[[VAL_16:.*]] = %[[VAL_4]] to %[[VAL_6]] step %[[VAL_3]] iter_args(%[[VAL_17:.*]] = %[[VAL_14]]) -> (f32) {
65// CHECK-MIR:               %[[VAL_18:.*]] = arith.muli %[[VAL_6]], %[[VAL_13]] : index
66// CHECK-MIR:               %[[VAL_19:.*]] = arith.addi %[[VAL_18]], %[[VAL_16]] : index
67// CHECK-MIR:               %[[VAL_20:.*]] = scf.for %[[VAL_21:.*]] = %[[VAL_4]] to %[[VAL_7]] step %[[VAL_3]] iter_args(%[[VAL_22:.*]] = %[[VAL_17]]) -> (f32) {
68// CHECK-MIR:                 %[[VAL_23:.*]] = arith.muli %[[VAL_7]], %[[VAL_19]] : index
69// CHECK-MIR:                 %[[VAL_24:.*]] = arith.addi %[[VAL_23]], %[[VAL_21]] : index
70// CHECK-MIR:                 %[[VAL_25:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_24]]] : memref<?xf32>
71// CHECK-MIR:                 %[[VAL_26:.*]] = arith.addf %[[VAL_22]], %[[VAL_25]] : f32
72// CHECK-MIR:                 scf.yield %[[VAL_26]] : f32
73// CHECK-MIR:               }
74// CHECK-MIR:               scf.yield %[[VAL_20]] : f32
75// CHECK-MIR:             }
76// CHECK-MIR:             scf.yield %[[VAL_15]] : f32
77// CHECK-MIR:           }
78// CHECK-MIR:           memref.store %[[VAL_12]], %[[VAL_10]][] : memref<f32>
79// CHECK-MIR:           %[[VAL_30:.*]] = bufferization.to_tensor %[[VAL_10]] : memref<f32>
80// CHECK-MIR:           return %[[VAL_30]] : tensor<f32>
81// CHECK-MIR:         }
82func.func @sparse_dynamic_dims(%arga: tensor<?x?x?xf32, #X>,
83                          %argx: tensor<f32>) -> tensor<f32> {
84  %0 = linalg.generic #trait
85    ins(%arga: tensor<?x?x?xf32, #X>)
86    outs(%argx: tensor<f32>) {
87      ^bb(%a : f32, %x: f32):
88        %0 = arith.addf %x, %a : f32
89        linalg.yield %0 : f32
90  } -> tensor<f32>
91  return %0 : tensor<f32>
92}
93