| /llvm-project-15.0.7/mlir/test/Dialect/Linalg/ |
| H A D | one-shot-bufferize-analysis-2fill-extract-matmul-all-perms.mlir | 10 …%arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, buffer… 11 …%arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, buffer… 12 …%arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, buffer… 37 …%arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, buffer… 38 …%arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, buffer… 39 …%arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, buffer… 64 …%arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, buffer… 65 …%arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, buffer… 66 …%arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, buffer… 91 …%arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, buffer… [all …]
|
| /llvm-project-15.0.7/mlir/test/Dialect/Bufferization/Transforms/ |
| H A D | tensor-copy-insertion.mlir | 12 …// CHECK: %[[copy:.*]] = bufferization.alloc_tensor() copy(%[[t]]) {bufferization.escape = [false]… 13 …// CHECK-FUNC: bufferization.alloc_tensor() copy(%{{.*}}) {bufferization.escape = [true]} : tensor… 14 …// CHECK-NO-DEALLOC: bufferization.alloc_tensor() copy(%{{.*}}) {bufferization.escape = [true]} : … 27 // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false]} : tensor<5xf32> 28 // CHECK-FUNC: bufferization.alloc_tensor() {bufferization.escape = [true]} : tensor<5xf32> 29 // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {bufferization.escape = [true]} : tensor<5xf32> 30 %0 = bufferization.alloc_tensor() : tensor<5xf32> 41 // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false]} : tensor<5xf32> 43 …// CHECK: bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 0 : ui64} :… 45 // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {bufferization.escape = [true]} : tensor<5xf32> [all …]
|
| H A D | finalizing-bufferize.mlir | 7 %0 = bufferization.to_tensor %arg0 : memref<f32> 8 %1 = bufferization.to_memref %0 : memref<f32> 17 %1 = bufferization.to_memref %0 : memref<f32> 24 %0 = bufferization.to_tensor %arg0 : memref<f32> 42 %0 = bufferization.to_tensor %m : memref<?xf32, #map1> 43 %1 = bufferization.to_memref %0 : memref<?xf32> 60 %1 = bufferization.to_memref %0 : memref<?xf32> 77 %1 = bufferization.to_memref %0 : memref<?xf32> 87 %0 = bufferization.to_tensor %m : memref<?xf32> 98 %0 = bufferization.to_tensor %m : memref<*xf32> [all …]
|
| H A D | one-shot-bufferize.mlir | 14 // CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]] 29 // CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]] 34 // CHECK: %[[res_tensor:.*]] = bufferization.to_tensor %[[alloc]] 47 %0 = bufferization.alloc_tensor() : tensor<10xf32> 78 // CHECK-DAG: %[[dummy_m:.*]] = bufferization.to_memref %[[dummy]] 100 %0 = bufferization.alloc_tensor() : tensor<10xf32> 114 %0 = bufferization.alloc_tensor(%sz) : tensor<?xf32> 131 // CHECK: %[[m:.*]] = bufferization.to_memref %[[t]] 134 %0 = bufferization.alloc_tensor() copy(%t) : tensor<5xf32> 135 // CHECK: %[[r:.*]] = bufferization.to_tensor %[[alloc]] [all …]
|
| H A D | tensor-copy-insertion-memory-space.mlir | 9 …// CHECK: %[[copy:.*]] = bufferization.alloc_tensor() copy(%[[dummy]]) {bufferization.escape = [fa… 20 …// CHECK: bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 1 : ui64} :… 21 %t = bufferization.alloc_tensor() {memory_space = 1 : ui64} : tensor<10xf32> 22 …// CHECK: bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 1 : ui64} :…
|
| H A D | one-shot-module-bufferize-analysis.mlir | 676 …%arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, buffer… 677 …%arg1: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, buffer… 678 …%arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, buffer… 714 …%arg0: tensor<518x518xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, buffer… 716 …%arg2: tensor<256x256xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, buffer… 769 …%arg0: tensor<62x126xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferi… 770 // CHECK-SAME: bufferization.access = "none" 771 …%arg1: tensor<126x90xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferi… 772 // CHECK-SAME: bufferization.access = "none" 773 …%arg2: tensor<62x90xf32> {bufferization.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, bufferiz… [all …]
|
| H A D | one-shot-bufferize-partial.mlir | 3 // Test bufferization using memref types that have no layout map. 62 // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] 88 // CHECK: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]] 105 // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] 123 // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] 145 // CHECK: %[[filled_tensor:.*]] = bufferization.to_tensor %[[m1]] 146 %t1 = bufferization.alloc_tensor() : tensor<10xf32> 153 // CHECK: %[[alloc_tensor:.*]] = bufferization.to_tensor %[[alloc]] 172 // CHECK: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]] 209 // CHECK-SCF: %[[t1_memref:.*]] = bufferization.to_memref %[[t1]] [all …]
|
| H A D | transform-ops.mlir | 10 transform.bufferization.one_shot_bufferize %0 20 // CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]] 25 // CHECK: %[[res_tensor:.*]] = bufferization.to_tensor %[[alloc]] 42 transform.bufferization.one_shot_bufferize %0 68 // expected-error @+1 {{bufferization failed}} 69 transform.bufferization.one_shot_bufferize %0 {target_is_module = false} 88 transform.bufferization.one_shot_bufferize %arg1 98 // CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]] 103 // CHECK: %[[res_tensor:.*]] = bufferization.to_tensor %[[alloc]]
|
| H A D | one-shot-bufferize-allow-return-allocs.mlir | 11 // CHECK: %[[m:.*]] = bufferization.to_memref %[[t]] 20 %0 = bufferization.alloc_tensor(%sz) : tensor<?xf32> 24 // CHECK: %[[cloned:.*]] = bufferization.clone %[[m]] 29 // CHECK: %[[r_tensor:.*]] = bufferization.to_tensor %[[r]] 43 // CHECK: %[[arg0_m:.*]] = bufferization.to_memref %[[arg0]] 45 // CHECK: %[[clone:.*]] = bufferization.clone %[[arg0_m]] 53 // CHECK: %[[r_t:.*]] = bufferization.to_tensor %[[r]] 59 %7 = bufferization.alloc_tensor() : tensor<i32>
|
| /llvm-project-15.0.7/mlir/test/Dialect/Bufferization/ |
| H A D | ops.mlir | 6 %clone = bufferization.clone %buf : memref<*xf32> to memref<*xf32> 13 %0 = bufferization.to_memref %arg0 15 %1 = bufferization.to_memref %arg1 22 %tensor = bufferization.to_tensor %buf : memref<2xf32> 31 %0 = bufferization.alloc_tensor(%sz) : tensor<?x5xf32> 33 %1 = bufferization.alloc_tensor() copy(%t) : tensor<?x5xf32> 34 // CHECK: bufferization.alloc_tensor() : tensor<5x6xf32> 35 %2 = bufferization.alloc_tensor() : tensor<5x6xf32> 37 %3 = bufferization.alloc_tensor(%sz, %sz) : tensor<?x?xf32> 47 // CHECK: bufferization.dealloc_tensor {{.*}} : tensor<4xi32> [all …]
|
| H A D | invalid.mlir | 6 %0 = bufferization.alloc_tensor(%arg0) : tensor<4x?x?x5xf32> 15 %0 = bufferization.alloc_tensor() copy(%t) : tensor<5xf32> 23 %0 = bufferization.alloc_tensor(%sz) copy(%t) : tensor<?xf32> 30 // expected-error @+1{{'bufferization.escape' is expected to be a bool array attribute}} 31 %0 = bufferization.alloc_tensor(%sz) {bufferization.escape = 5} : tensor<?xf32> 39 %0 = bufferization.alloc_tensor(%sz) {bufferization.escape = [true, false]} : tensor<?xf32> 46 // expected-error @+1{{'bufferization.escape' only valid for allocation results}} 54 // expected-error @+1{{'bufferization.escape' only valid on bufferizable ops}} 65 %0 = bufferization.alloc_tensor() : tensor<20x40xf32, #DCSR> 77 %0 = bufferization.alloc_tensor() : tensor<20x40xf32, #DCSR> [all …]
|
| H A D | canonicalize.mlir | 8 %0 = bufferization.to_memref %arg0 : memref<?xf32> 9 %1 = bufferization.to_tensor %0 : memref<?xf32> 20 %0 = bufferization.to_tensor %arg0 : memref<?xf32> 21 %1 = bufferization.to_memref %0 : memref<?xf32> 43 %1 = bufferization.to_memref %0 : memref<?xf32, 7> 57 // CHECK-NOT: bufferization.to_tensor 58 // CHECK-NOT: bufferization.to_memref 88 // CHECK-NOT: bufferization.to_tensor 89 // CHECK-NOT: bufferization.to_memref 108 %0 = bufferization.to_tensor %arg0 : memref<?xf32> [all …]
|
| /llvm-project-15.0.7/mlir/test/Dialect/SCF/ |
| H A D | one-shot-bufferize-tensor-copy-insertion.mlir | 10 …// CHECK: %[[A_copy:.*]] = bufferization.alloc_tensor() copy(%[[A]]) {bufferization.escape = [fals… 11 …// CHECK: %[[B_copy:.*]] = bufferization.alloc_tensor() copy(%[[B]]) {bufferization.escape = [fals… 31 …// CHECK: %[[A_copy:.*]] = bufferization.alloc_tensor() copy(%[[A]]) {bufferization.escape = [fals… 32 …// CHECK: %[[B_copy:.*]] = bufferization.alloc_tensor() copy(%[[B]]) {bufferization.escape = [fals… 38 …// CHECK-DAG: %[[yield1:.*]] = bufferization.alloc_tensor() copy(%[[iter2]]) {bufferization.escape… 54 …// CHECK: %[[A_copy:.*]] = bufferization.alloc_tensor() copy(%[[A]]) {bufferization.escape = [fals… 55 …// CHECK: %[[B_copy:.*]] = bufferization.alloc_tensor() copy(%[[B]]) {bufferization.escape = [fals… 85 …// CHECK: %[[A_copy:.*]] = bufferization.alloc_tensor() copy(%[[A]]) {bufferization.escape = [fals… 86 …// CHECK: %[[B_copy:.*]] = bufferization.alloc_tensor() copy(%[[B]]) {bufferization.escape = [fals… 93 …// CHECK-DAG: %[[yield0:.*]] = bufferization.alloc_tensor() copy(%[[w1]]) {bufferization.escape = … [all …]
|
| H A D | one-shot-bufferize.mlir | 18 %A : tensor<?xf32> {bufferization.writable = false}, 19 %B : tensor<?xf32> {bufferization.writable = true}, 72 %A : tensor<?xf32> {bufferization.writable = false}, 73 %B : tensor<?xf32> {bufferization.writable = true}, 74 %C : tensor<4xf32> {bufferization.writable = false}, 169 %t1: tensor<?xf32> {bufferization.writable = true}, 195 %A : tensor<4xf32> {bufferization.writable = false}, 196 %B : tensor<4xf32> {bufferization.writable = false}) 220 %t2 = bufferization.alloc_tensor(%i) : tensor<?xf32> 265 // This alloc is for the bufferization.alloc_tensor. [all …]
|
| H A D | bufferize.mlir | 7 // CHECK: %[[TRUE_MEMREF:.*]] = bufferization.to_memref %[[TRUE_TENSOR]] : memref<?xf32> 8 // CHECK: %[[FALSE_MEMREF:.*]] = bufferization.to_memref %[[FALSE_TENSOR]] : memref<?xf32> 14 // CHECK: %[[RESULT_TENSOR:.*]] = bufferization.to_tensor %[[RESULT_MEMREF:.*]] : memref<… 30 // CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : memref<f32> 34 // CHECK: %[[VAL_8:.*]] = bufferization.to_tensor %[[VAL_9:.*]] : memref<f32> 63 // CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : memref<f32> 65 // CHECK: %[[TENSOR_ITER:.*]] = bufferization.to_tensor %[[MEMREF_ITER]] : memref<f32> 67 // CHECK: %[[MEMREF_MUNGED:.*]] = bufferization.to_memref %[[TENSOR_MUNGED]] : memref<f… 70 // CHECK: %[[TENSOR:.*]] = bufferization.to_tensor %[[RESULT:.*]] : memref<f32> 83 // CHECK: %[[M:.*]] = bufferization.to_memref %[[ARG2]] : memref<f32> [all …]
|
| /llvm-project-15.0.7/mlir/test/Dialect/SparseTensor/ |
| H A D | one_shot_bufferize_tensor_copy_insertion.mlir | 12 // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false]} 13 // CHECK-FUNC: bufferization.alloc_tensor() {bufferization.escape = [true]} 14 %0 = bufferization.alloc_tensor() : tensor<20x40xf32, #DCSR> 23 // CHECK: sparse_tensor.new {{.*}} {bufferization.escape = [false]} 24 // CHECK-FUNC: sparse_tensor.new {{.*}} {bufferization.escape = [true]} 32 // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false]} 33 // CHECK-FUNC: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false]} 34 %0 = bufferization.alloc_tensor() : tensor<20x40xf32, #DCSR> 38 // CHECK: sparse_tensor.convert %[[loaded]] {bufferization.escape = [false]} 61 …// CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() copy(%[[argb]]) {bufferization.escape = [fa… [all …]
|
| /llvm-project-15.0.7/mlir/lib/Dialect/Bufferization/Transforms/ |
| H A D | Bufferize.cpp | 26 using namespace mlir::bufferization; 36 return builder.create<bufferization::ToTensorOp>(loc, type, inputs[0]); in materializeToTensor() 80 void mlir::bufferization::populateBufferizeMaterializationLegality( in populateBufferizeMaterializationLegality() 82 target.addLegalOp<bufferization::ToTensorOp, bufferization::ToMemrefOp>(); in populateBufferizeMaterializationLegality() 89 : public OpConversionPattern<bufferization::ToTensorOp> { 93 matchAndRewrite(bufferization::ToTensorOp op, OpAdaptor adaptor, in matchAndRewrite() 105 : public OpConversionPattern<bufferization::ToMemrefOp> { 109 matchAndRewrite(bufferization::ToMemrefOp op, OpAdaptor adaptor, in matchAndRewrite() 288 mlir::bufferization::createFinalizingBufferizePass() { in createFinalizingBufferizePass() 388 LogicalResult bufferization::bufferizeOp(Operation *op, in bufferizeOp() [all …]
|
| /llvm-project-15.0.7/mlir/include/mlir/Dialect/Bufferization/IR/ |
| H A D | BufferizationBase.td | 15 let name = "bufferization"; 16 let cppNamespace = "::mlir::bufferization"; 20 The `bufferization` dialect is intended to collect operations/interfaces 21 specific to the bufferization passes. 23 Overview of the bufferization infrastructure and important conceptual 25 found in [bufferization](/docs/Bufferization/) and [buffer 36 kWritableAttrName = "bufferization.writable"; 38 /// Attribute name used to mark the bufferization layout for region 41 kBufferLayoutAttrName = "bufferization.buffer_layout"; 51 kEscapeAttrName = "bufferization.escape";
|
| /llvm-project-15.0.7/mlir/docs/ |
| H A D | Bufferization.md | 29 and with aggressive in-place bufferization. 432 bufferization. 439 conversion-based bufferization. 471 after bufferization. 516 // Partial bufferization passes. 524 // Finalizing bufferization pass. 626 `bufferization.to_tensor` and `bufferization.to_memref` ops, which are inserted 665 `bufferization.to_tensor` / `bufferization.to_memref` materialization ops 678 bufferization could only be done as a single finalizing bufferization mega-pass 685 `bufferization.to_tensor` and `bufferization.to_memref`. [all …]
|
| /llvm-project-15.0.7/mlir/test/Integration/Dialect/SparseTensor/CPU/ |
| H A D | sparse_index.mlir | 43 %init = bufferization.alloc_tensor() : tensor<8xi64, #SparseVector> 61 %init = bufferization.alloc_tensor() : tensor<8xi64, #SparseVector> 218 bufferization.dealloc_tensor %sv : tensor<8xi64, #SparseVector> 219 bufferization.dealloc_tensor %dv : tensor<8xi64, #SparseVector> 220 bufferization.dealloc_tensor %0 : tensor<8xi64, #SparseVector> 221 bufferization.dealloc_tensor %1 : tensor<8xi64, #SparseVector> 222 bufferization.dealloc_tensor %2 : tensor<8xi64, #SparseVector> 223 bufferization.dealloc_tensor %3 : tensor<8xi64, #SparseVector> 226 bufferization.dealloc_tensor %4 : tensor<3x4xi64, #SparseMatrix> 227 bufferization.dealloc_tensor %5 : tensor<3x4xi64, #SparseMatrix> [all …]
|
| H A D | sparse_matmul.mlir | 33 %C = bufferization.alloc_tensor() : tensor<4x4xf64, #CSR> 45 %C = bufferization.alloc_tensor() : tensor<4x4xf64, #DCSR> 227 bufferization.dealloc_tensor %a1 : tensor<4x8xf64, #CSR> 229 bufferization.dealloc_tensor %a3 : tensor<4x8xf64, #CSR> 231 bufferization.dealloc_tensor %b1 : tensor<8x4xf64, #CSR> 233 bufferization.dealloc_tensor %b3 : tensor<8x4xf64, #CSR> 235 bufferization.dealloc_tensor %1 : tensor<4x4xf64, #CSR> 236 bufferization.dealloc_tensor %2 : tensor<4x4xf64, #DCSR> 237 bufferization.dealloc_tensor %4 : tensor<4x4xf64, #CSR> 238 bufferization.dealloc_tensor %5 : tensor<4x4xf64, #DCSR> [all …]
|
| /llvm-project-15.0.7/mlir/test/Conversion/BufferizationToMemRef/ |
| H A D | bufferization-to-memref.mlir | 1 // RUN: mlir-opt -verify-diagnostics -convert-bufferization-to-memref -split-input-file %s | FileCh… 5 %0 = bufferization.clone %arg0 : memref<2xf32> to memref<2xf32> 19 %1 = bufferization.clone %arg0 : memref<?xf32> to memref<?xf32> 34 // expected-error@+1 {{failed to legalize operation 'bufferization.clone' that was explicitly marke… 35 %1 = bufferization.clone %arg0 : memref<*xf32> to memref<*xf32> 54 %1 = bufferization.clone %arg0 : memref<?xf32, #map> to memref<?xf32, #map> 61 // This bufferization.clone cannot be lowered because a buffer with this layout 67 // expected-error@+1 {{failed to legalize operation 'bufferization.clone' that was explicitly marke… 68 %1 = bufferization.clone %arg0 : memref<?xf32, #map2> to memref<?xf32, #map2>
|
| /llvm-project-15.0.7/mlir/lib/Dialect/SparseTensor/Transforms/ |
| H A D | DenseBufferizationPass.cpp | 38 const bufferization::OneShotBufferizationOptions &options) in BufferizeDenseOpsPass() 45 bufferization::OpFilter opFilter; in runOnOperation() 59 if (failed(bufferization::bufferizeOp(getOperation(), options, in runOnOperation() 66 bufferization::OneShotBufferizationOptions options; 72 const bufferization::OneShotBufferizationOptions &options) { in createDenseBufferizationPass()
|
| /llvm-project-15.0.7/mlir/test/Dialect/Tensor/ |
| H A D | bufferize.mlir | 167 // CHECK: %[[RET:.*]] = bufferization.to_tensor %[[MEMREF]] 266 // CHECK: %[[r_tensor:.*]] = bufferization.to_tensor %[[r]] 284 // CHECK: %[[r_tensor:.*]] = bufferization.to_tensor %[[r]] 309 // CHECK: %[[r:.*]] = bufferization.to_tensor %[[alloc]] 360 // CHECK: %[[r:.*]] = bufferization.to_tensor %[[alloc]] 376 // CHECK: %[[r:.*]] = bufferization.to_tensor %[[expanded]] 398 // CHECK: %[[r:.*]] = bufferization.to_tensor %[[expanded]] 417 // CHECK: %[[r:.*]] = bufferization.to_tensor %[[expanded]] 433 // CHECK: %[[r:.*]] = bufferization.to_tensor %[[collapsed]] 448 // CHECK: %[[r:.*]] = bufferization.to_tensor %[[collapsed]] [all …]
|
| /llvm-project-15.0.7/mlir/include/mlir/Dialect/Bufferization/Transforms/ |
| H A D | Passes.td | 88 let constructor = "mlir::bufferization::createBufferDeallocationPass()"; 98 let constructor = "mlir::bufferization::createBufferHoistingPass()"; 137 let summary = "Finalize a partial bufferization"; 139 A bufferize pass that finalizes a partial bufferization by removing 140 remaining `bufferization.to_tensor` and `bufferization.to_buffer` operations. 144 `bufferization.to_buffer` operations. 153 let summary = "Bufferize the `bufferization` dialect"; 206 compatibility with existing partial bufferization passes: These can 212 migrate from partial bufferization to One-Shot Bufferize. 281 "core bufferization passes.">, [all …]
|