1// RUN: mlir-opt %s -one-shot-bufferize="allow-unknown-ops" -split-input-file | FileCheck %s
2
3// Run fuzzer with different seeds.
4// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-fuzzer-seed=23" -split-input-file -o /dev/null
5// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-fuzzer-seed=59" -split-input-file -o /dev/null
6// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-fuzzer-seed=91" -split-input-file -o /dev/null
7
8// CHECK-LABEL: func @use_tensor_func_arg(
9//  CHECK-SAME:     %[[A:.*]]: tensor<?xf32>
10func.func @use_tensor_func_arg(%A : tensor<?xf32>) -> (vector<4xf32>) {
11  %c0 = arith.constant 0 : index
12  %f0 = arith.constant 0.0 : f32
13
14  // CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]]
15  // CHECK: %[[res:.*]] = vector.transfer_read %[[A_memref]]
16  %0 = vector.transfer_read %A[%c0], %f0 : tensor<?xf32>, vector<4xf32>
17
18  // CHECK: return %[[res]]
19  return %0 : vector<4xf32>
20}
21
22// -----
23
24// CHECK-LABEL: func @return_tensor(
25//  CHECK-SAME:     %[[A:.*]]: tensor<?xf32>
26func.func @return_tensor(%A : tensor<?xf32>, %v : vector<4xf32>) -> (tensor<?xf32>) {
27  %c0 = arith.constant 0 : index
28
29  // CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]]
30  // CHECK: %[[dim:.*]] = tensor.dim %[[A]]
31  // CHECK: %[[alloc:.*]] = memref.alloc(%[[dim]])
32  // CHECK: memref.copy %[[A_memref]], %[[alloc]]
33  // CHECK: vector.transfer_write %{{.*}}, %[[alloc]]
34  // CHECK: %[[res_tensor:.*]] = bufferization.to_tensor %[[alloc]]
35  %0 = vector.transfer_write %v, %A[%c0] : vector<4xf32>, tensor<?xf32>
36
37  // CHECK: memref.dealloc %[[alloc]]
38  // CHECK: return %[[res_tensor]]
39  return %0 : tensor<?xf32>
40}
41
42// -----
43
44// CHECK-LABEL: func @func_without_tensor_args
45func.func @func_without_tensor_args(%v : vector<10xf32>) -> () {
46  // CHECK: %[[alloc:.*]] = memref.alloc()
47  %0 = bufferization.alloc_tensor() : tensor<10xf32>
48
49  %c0 = arith.constant 0 : index
50  // CHECK: vector.transfer_write %{{.*}}, %[[alloc]]
51  %1 = vector.transfer_write %v, %0[%c0] : vector<10xf32>, tensor<10xf32>
52
53  %cst = arith.constant 0.0 : f32
54  // CHECK: vector.transfer_read %[[alloc]]
55  %r = vector.transfer_read %1[%c0], %cst : tensor<10xf32>, vector<11xf32>
56
57  vector.print %r : vector<11xf32>
58  return
59}
60
61// -----
62
63// CHECK-LABEL: func private @private_func
64func.func private @private_func(tensor<?xf32>) -> ()
65
66// CHECK-LABEL: func @empty_func()
67func.func @empty_func() -> () {
68  return
69}
70
71// -----
72
73// CHECK-LABEL: func @read_after_write_conflict(
74func.func @read_after_write_conflict(%cst : f32, %idx : index, %idx2 : index)
75    -> (f32, f32) {
76  // CHECK-DAG: %[[alloc:.*]] = memref.alloc
77  // CHECK-DAG: %[[dummy:.*]] = "test.dummy_op"
78  // CHECK-DAG: %[[dummy_m:.*]] = bufferization.to_memref %[[dummy]]
79  %t = "test.dummy_op"() : () -> (tensor<10xf32>)
80
81  // CHECK: memref.copy %[[dummy_m]], %[[alloc]]
82  // CHECK: memref.store %{{.*}}, %[[alloc]]
83  %write = tensor.insert %cst into %t[%idx2] : tensor<10xf32>
84
85  // CHECK: %[[read:.*]] = "test.some_use"(%[[dummy]])
86  %read = "test.some_use"(%t) : (tensor<10xf32>) -> (f32)
87  // CHECK: %[[read2:.*]] = memref.load %[[alloc]]
88  %read2 = tensor.extract %write[%idx] : tensor<10xf32>
89
90  // CHECK: memref.dealloc %[[alloc]]
91  // CHECK: return %[[read]], %[[read2]]
92  return %read, %read2 : f32, f32
93}
94
95// -----
96
97// CHECK-LABEL: func @copy_deallocated(
98func.func @copy_deallocated() -> tensor<10xf32> {
99  // CHECK: %[[alloc:.*]] = memref.alloc()
100  %0 = bufferization.alloc_tensor() : tensor<10xf32>
101  // CHECK: %[[alloc_tensor:.*]] = bufferization.to_tensor %[[alloc]]
102  // CHECK: memref.dealloc %[[alloc]]
103  // CHECK: return %[[alloc_tensor]]
104  return %0 : tensor<10xf32>
105}
106
107// -----
108
109// CHECK-LABEL: func @select_different_tensors(
110//  CHECK-SAME:     %[[t:.*]]: tensor<?xf32>
111func.func @select_different_tensors(%t: tensor<?xf32>, %sz: index, %c: i1) -> tensor<?xf32> {
112  // CHECK-DAG: %[[m:.*]] = bufferization.to_memref %[[t]] : memref<?xf32, #{{.*}}>
113  // CHECK-DAG: %[[alloc:.*]] = memref.alloc(%{{.*}}) {{.*}} : memref<?xf32>
114  %0 = bufferization.alloc_tensor(%sz) : tensor<?xf32>
115
116  // A cast must be inserted because %t and %0 have different memref types.
117  // CHECK: %[[casted:.*]] = memref.cast %[[alloc]] : memref<?xf32> to memref<?xf32, #{{.*}}>
118  // CHECK: arith.select %{{.*}}, %[[casted]], %[[m]]
119  %1 = arith.select %c, %0, %t : tensor<?xf32>
120  return %1 : tensor<?xf32>
121}
122
123// -----
124
125// CHECK-LABEL: func @alloc_tensor_with_copy(
126//  CHECK-SAME:     %[[t:.*]]: tensor<5xf32>)
127// TODO: Add a test case with dynamic dim size. This is not possible at the
128// moment because this would create a tensor op during bufferization. That is
129// currently forbidden.
130func.func @alloc_tensor_with_copy(%t: tensor<5xf32>) -> tensor<5xf32> {
131  // CHECK: %[[m:.*]] = bufferization.to_memref %[[t]]
132  // CHECK: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<5xf32>
133  // CHECK: memref.copy %[[m]], %[[alloc]]
134  %0 = bufferization.alloc_tensor() copy(%t) : tensor<5xf32>
135  // CHECK: %[[r:.*]] = bufferization.to_tensor %[[alloc]]
136  // CHECK: memref.dealloc %[[alloc]]
137  // CHECK: return %[[r]]
138  return %0 : tensor<5xf32>
139}
140
141// -----
142
143// CHECK-LABEL: func @alloc_tensor_with_memory_space()
144func.func @alloc_tensor_with_memory_space() -> tensor<5xf32> {
145  // CHECK: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<5xf32, 1>
146  %0 = bufferization.alloc_tensor() {memory_space = 1 : ui64} : tensor<5xf32>
147  // CHECK: %[[r:.*]] = bufferization.to_tensor %[[alloc]]
148  // CHECK: memref.dealloc %[[alloc]]
149  // CHECK: return %[[r]]
150  return %0 : tensor<5xf32>
151}
152