1// RUN: mlir-opt %s -tensor-copy-insertion="allow-return-allocs" | FileCheck %s
2// RUN: mlir-opt %s -tensor-copy-insertion="bufferize-function-boundaries allow-return-allocs" | FileCheck %s --check-prefix=CHECK-FUNC
3
4#DCSR = #sparse_tensor.encoding<{
5  dimLevelType = [ "compressed", "compressed" ],
6  dimOrdering = affine_map<(i,j) -> (i,j)>
7}>
8
9// CHECK-LABEL: func @bufferization_alloc_tensor
10// CHECK-FUNC-LABEL: func @bufferization_alloc_tensor
11func.func @bufferization_alloc_tensor() -> tensor<20x40xf32, #DCSR> {
12  // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false]}
13  // CHECK-FUNC: bufferization.alloc_tensor() {bufferization.escape = [true]}
14  %0 = bufferization.alloc_tensor() : tensor<20x40xf32, #DCSR>
15  %1 = sparse_tensor.load %0 : tensor<20x40xf32, #DCSR>
16  return %1 : tensor<20x40xf32, #DCSR>
17}
18
19!Filename = !llvm.ptr<i8>
20// CHECK-LABEL: func @sparse_tensor_new
21// CHECK-FUNC-LABEL: func @sparse_tensor_new
22func.func @sparse_tensor_new(%file: !Filename) -> tensor<20x40xf32, #DCSR> {
23  // CHECK: sparse_tensor.new {{.*}} {bufferization.escape = [false]}
24  // CHECK-FUNC: sparse_tensor.new {{.*}} {bufferization.escape = [true]}
25  %0 = sparse_tensor.new %file : !Filename to tensor<20x40xf32, #DCSR>
26  return %0 : tensor<20x40xf32, #DCSR>
27}
28
29// CHECK-LABEL: func @sparse_tensor_convert
30// CHECK-FUNC-LABEL: func @sparse_tensor_convert
31func.func @sparse_tensor_convert() -> tensor<20x40xf32> {
32  // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false]}
33  // CHECK-FUNC: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false]}
34  %0 = bufferization.alloc_tensor() : tensor<20x40xf32, #DCSR>
35  // CHECK: %[[loaded:.*]] = sparse_tensor.load %[[alloc]]
36  // CHECK-FUNC: %[[loaded:.*]] = sparse_tensor.load %[[alloc]]
37  %1 = sparse_tensor.load %0 : tensor<20x40xf32, #DCSR>
38  // CHECK: sparse_tensor.convert %[[loaded]] {bufferization.escape = [false]}
39  // CHECK-FUNC: sparse_tensor.convert %[[loaded]] {bufferization.escape = [true]}
40  %2 = sparse_tensor.convert %1 : tensor<20x40xf32, #DCSR> to tensor<20x40xf32>
41  return %2 : tensor<20x40xf32>
42}
43
44#SV = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>
45
46#trait = {
47  indexing_maps = [
48    affine_map<(i) -> (i)>,  // A (in)
49    affine_map<(i) -> (i)>   // X (out)
50  ],
51  iterator_types = ["parallel"]
52}
53
54// CHECK-LABEL: func @update_notinplace(
55//  CHECK-SAME:    %[[argb:.*]]: tensor<10xf32>
56// CHECK-FUNC-LABEL: func @update_notinplace(
57//  CHECK-FUNC-SAME:    %[[argb:.*]]: tensor<10xf32>
58func.func @update_notinplace(%argb: tensor<10xf32>, %arga: tensor<10xf32, #SV>)
59  -> (tensor<10xf32>, tensor<10xf32>)
60{
61  // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() copy(%[[argb]]) {bufferization.escape = [false]} : tensor<10xf32>
62  // CHECK: linalg.generic {{.*}} outs(%[[alloc]]
63  // CHECK-FUNC: %[[alloc:.*]] = bufferization.alloc_tensor() copy(%[[argb]]) {bufferization.escape = [true]} : tensor<10xf32>
64  // CHECK-FUNC: linalg.generic {{.*}} outs(%[[alloc]]
65  %0 = linalg.generic #trait
66  ins(%arga: tensor<10xf32, #SV>)
67  outs(%argb: tensor<10xf32>) {
68    ^bb(%a: f32, %x : f32):
69      %up = arith.addf %a, %x : f32
70      linalg.yield %up : f32
71  } -> tensor<10xf32>
72  return %0, %argb : tensor<10xf32>, tensor<10xf32>
73}
74