// RUN: mlir-opt %s -sparsification --canonicalize | FileCheck %s --check-prefix=CHECK-HIR // // RUN: mlir-opt %s -sparsification --sparse-tensor-conversion --canonicalize | \ // RUN: FileCheck %s --check-prefix=CHECK-MIR #X = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense" ], dimOrdering = affine_map<(i,j,k) -> (k,i,j)> }> #trait = { indexing_maps = [ affine_map<(i,j,k) -> (k,i,j)>, // A (in) affine_map<(i,j,k) -> ()> // X (out) ], iterator_types = ["reduction", "reduction", "reduction"] } // CHECK-HIR-LABEL: func @sparse_dynamic_dims( // CHECK-HIR-SAME: %[[VAL_0:.*]]: tensor>, // CHECK-HIR-SAME: %[[VAL_1:.*]]: tensor) -> tensor { // CHECK-HIR-DAG: %[[VAL_2:.*]] = arith.constant 1 : index // CHECK-HIR-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-HIR-DAG: %[[VAL_4:.*]] = arith.constant 2 : index // CHECK-HIR: %[[VAL_5:.*]] = tensor.dim %[[VAL_0]], %[[VAL_4]] : tensor> // CHECK-HIR: %[[VAL_6:.*]] = tensor.dim %[[VAL_0]], %[[VAL_3]] : tensor> // CHECK-HIR: %[[VAL_7:.*]] = tensor.dim %[[VAL_0]], %[[VAL_2]] : tensor> // CHECK-HIR: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> // CHECK-HIR: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref // CHECK-HIR: %[[VAL_10:.*]] = memref.alloc() : memref // CHECK-HIR: memref.copy %[[VAL_9]], %[[VAL_10]] : memref to memref // CHECK-HIR: %[[VAL_11:.*]] = memref.load %[[VAL_10]][] : memref // CHECK-HIR: %[[VAL_12:.*]] = scf.for %[[VAL_13:.*]] = %[[VAL_3]] to %[[VAL_5]] step %[[VAL_2]] iter_args(%[[VAL_14:.*]] = %[[VAL_11]]) -> (f32) { // CHECK-HIR: %[[VAL_15:.*]] = scf.for %[[VAL_16:.*]] = %[[VAL_3]] to %[[VAL_6]] step %[[VAL_2]] iter_args(%[[VAL_17:.*]] = %[[VAL_14]]) -> (f32) { // CHECK-HIR: %[[VAL_18:.*]] = arith.muli %[[VAL_6]], %[[VAL_13]] : index // CHECK-HIR: %[[VAL_19:.*]] = arith.addi %[[VAL_18]], %[[VAL_16]] : index // CHECK-HIR: %[[VAL_20:.*]] = scf.for %[[VAL_21:.*]] = %[[VAL_3]] to %[[VAL_7]] step %[[VAL_2]] iter_args(%[[VAL_22:.*]] = %[[VAL_17]]) -> (f32) { // CHECK-HIR: %[[VAL_23:.*]] = arith.muli %[[VAL_7]], %[[VAL_19]] : index // CHECK-HIR: %[[VAL_24:.*]] = arith.addi %[[VAL_23]], %[[VAL_21]] : index // CHECK-HIR: %[[VAL_25:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_24]]] : memref // CHECK-HIR: %[[VAL_26:.*]] = arith.addf %[[VAL_22]], %[[VAL_25]] : f32 // CHECK-HIR: scf.yield %[[VAL_26]] : f32 // CHECK-HIR: } // CHECK-HIR: scf.yield %[[VAL_20]] : f32 // CHECK-HIR: } // CHECK-HIR: scf.yield %[[VAL_15]] : f32 // CHECK-HIR: } // CHECK-HIR: memref.store %[[VAL_12]], %[[VAL_10]][] : memref // CHECK-HIR: %[[VAL_30:.*]] = bufferization.to_tensor %[[VAL_10]] : memref // CHECK-HIR: return %[[VAL_30]] : tensor // CHECK-HIR: } // // CHECK-MIR-LABEL: func @sparse_dynamic_dims( // CHECK-MIR-SAME: %[[VAL_0:.*]]: !llvm.ptr, // CHECK-MIR-SAME: %[[VAL_1:.*]]: tensor) -> tensor { // CHECK-MIR-DAG: %[[VAL_2:.*]] = arith.constant 2 : index // CHECK-MIR-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-MIR-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-MIR: %[[VAL_5:.*]] = call @sparseDimSize(%[[VAL_0]], %[[VAL_4]]) : (!llvm.ptr, index) -> index // CHECK-MIR: %[[VAL_6:.*]] = call @sparseDimSize(%[[VAL_0]], %[[VAL_3]]) : (!llvm.ptr, index) -> index // CHECK-MIR: %[[VAL_7:.*]] = call @sparseDimSize(%[[VAL_0]], %[[VAL_2]]) : (!llvm.ptr, index) -> index // CHECK-MIR: %[[VAL_8:.*]] = call @sparseValuesF32(%[[VAL_0]]) : (!llvm.ptr) -> memref // CHECK-MIR: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref // CHECK-MIR: %[[VAL_10:.*]] = memref.alloc() : memref // CHECK-MIR: memref.copy %[[VAL_9]], %[[VAL_10]] : memref to memref // CHECK-MIR: %[[VAL_11:.*]] = memref.load %[[VAL_10]][] : memref // CHECK-MIR: %[[VAL_12:.*]] = scf.for %[[VAL_13:.*]] = %[[VAL_4]] to %[[VAL_5]] step %[[VAL_3]] iter_args(%[[VAL_14:.*]] = %[[VAL_11]]) -> (f32) { // CHECK-MIR: %[[VAL_15:.*]] = scf.for %[[VAL_16:.*]] = %[[VAL_4]] to %[[VAL_6]] step %[[VAL_3]] iter_args(%[[VAL_17:.*]] = %[[VAL_14]]) -> (f32) { // CHECK-MIR: %[[VAL_18:.*]] = arith.muli %[[VAL_6]], %[[VAL_13]] : index // CHECK-MIR: %[[VAL_19:.*]] = arith.addi %[[VAL_18]], %[[VAL_16]] : index // CHECK-MIR: %[[VAL_20:.*]] = scf.for %[[VAL_21:.*]] = %[[VAL_4]] to %[[VAL_7]] step %[[VAL_3]] iter_args(%[[VAL_22:.*]] = %[[VAL_17]]) -> (f32) { // CHECK-MIR: %[[VAL_23:.*]] = arith.muli %[[VAL_7]], %[[VAL_19]] : index // CHECK-MIR: %[[VAL_24:.*]] = arith.addi %[[VAL_23]], %[[VAL_21]] : index // CHECK-MIR: %[[VAL_25:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_24]]] : memref // CHECK-MIR: %[[VAL_26:.*]] = arith.addf %[[VAL_22]], %[[VAL_25]] : f32 // CHECK-MIR: scf.yield %[[VAL_26]] : f32 // CHECK-MIR: } // CHECK-MIR: scf.yield %[[VAL_20]] : f32 // CHECK-MIR: } // CHECK-MIR: scf.yield %[[VAL_15]] : f32 // CHECK-MIR: } // CHECK-MIR: memref.store %[[VAL_12]], %[[VAL_10]][] : memref // CHECK-MIR: %[[VAL_30:.*]] = bufferization.to_tensor %[[VAL_10]] : memref // CHECK-MIR: return %[[VAL_30]] : tensor // CHECK-MIR: } func @sparse_dynamic_dims(%arga: tensor, %argx: tensor) -> tensor { %0 = linalg.generic #trait ins(%arga: tensor) outs(%argx: tensor) { ^bb(%a : f32, %x: f32): %0 = arith.addf %x, %a : f32 linalg.yield %0 : f32 } -> tensor return %0 : tensor }