1// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs allow-unknown-ops" -buffer-deallocation -canonicalize -split-input-file | FileCheck %s
2
3// Run fuzzer with different seeds.
4// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs test-analysis-only analysis-fuzzer-seed=23" -split-input-file -o /dev/null
5// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs test-analysis-only analysis-fuzzer-seed=59" -split-input-file -o /dev/null
6// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs test-analysis-only analysis-fuzzer-seed=91" -split-input-file -o /dev/null
7
8// CHECK-LABEL: func @buffer_not_deallocated(
9//  CHECK-SAME:     %[[t:.*]]: tensor<?xf32>
10func.func @buffer_not_deallocated(%t : tensor<?xf32>, %c : i1) -> tensor<?xf32> {
11  // CHECK: %[[m:.*]] = bufferization.to_memref %[[t]]
12  // CHECK: %[[r:.*]] = scf.if %{{.*}} {
13  %r = scf.if %c -> tensor<?xf32> {
14    // CHECK: %[[some_op:.*]] = "test.some_op"
15    // CHECK: %[[alloc:.*]] = memref.alloc(%[[some_op]])
16    // CHECK: %[[casted:.*]] = memref.cast %[[alloc]]
17    // CHECK-NOT: dealloc
18    // CHECK: scf.yield %[[casted]]
19    %sz = "test.some_op"() : () -> (index)
20    %0 = bufferization.alloc_tensor(%sz) : tensor<?xf32>
21    scf.yield %0 : tensor<?xf32>
22  } else {
23  // CHECK: } else {
24    // CHECK: %[[cloned:.*]] = bufferization.clone %[[m]]
25    // CHECK: scf.yield %[[cloned]]
26    scf.yield %t : tensor<?xf32>
27  }
28  // CHECK: }
29  // CHECK: %[[r_tensor:.*]] = bufferization.to_tensor %[[r]]
30  // CHECK: memref.dealloc %[[r]]
31  // CHECK: return %[[r_tensor]]
32  return %r : tensor<?xf32>
33}
34
35// -----
36
37// CHECK-LABEL: func @write_to_alloc_tensor_or_readonly_tensor(
38//  CHECK-SAME:     %[[arg0:.*]]: tensor<i32>
39func.func @write_to_alloc_tensor_or_readonly_tensor(%arg0: tensor<i32>,
40                                                    %cond: i1, %val: i32)
41  -> tensor<i32>
42{
43  // CHECK: %[[arg0_m:.*]] = bufferization.to_memref %[[arg0]]
44  // CHECK: %[[r:.*]] = scf.if {{.*}} {
45  // CHECK:   %[[clone:.*]] = bufferization.clone %[[arg0_m]]
46  // CHECK:   scf.yield %[[clone]]
47  // CHECK: } else {
48  // CHECK:   %[[alloc:.*]] = memref.alloc
49  // CHECK:   memref.store %{{.*}}, %[[alloc]]
50  // CHECK:   %[[casted:.*]] = memref.cast %[[alloc]]
51  // CHECK:   scf.yield %[[casted]]
52  // CHECK: }
53  // CHECK: %[[r_t:.*]] = bufferization.to_tensor %[[r]]
54  // CHECK: memref.dealloc %[[r]]
55  // CHECK: return %[[r_t]]
56  %3 = scf.if %cond -> (tensor<i32>) {
57    scf.yield %arg0 : tensor<i32>
58  } else {
59    %7 = bufferization.alloc_tensor() : tensor<i32>
60    %8 = tensor.insert %val into %7[] : tensor<i32>
61    scf.yield %8 : tensor<i32>
62  }
63  return %3 : tensor<i32>
64}
65