1// RUN: mlir-opt %s -tensor-bufferize -cse | FileCheck %s 2 3 // CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)> 4 // CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 20 + s0 + d1)> 5 // CHECK-DAG: #[[$MAP2:.*]] = affine_map<(d0, d1, d2, d3)[s0] -> (d0 * 140 + d1 * 20 + d2 * 5 + d3 + s0)> 6 // CHECK-DAG: #[[$MAP3:.*]] = affine_map<(d0) -> (d0 + 1)> 7 // CHECK-DAG: #[[$MAP4:.*]] = affine_map<() -> (1)> 8 // CHECK-DAG: #[[$MAP5:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1)> 9 // CHECK-DAG: #[[$MAP6:.*]] = affine_map<(d0) -> (d0 * 2)> 10 // CHECK-DAG: #[[$MAP7:.*]] = affine_map<(d0, d1, d2)[s0] -> (d0 * 8 + s0 + d1 * 4 + d2)> 11 // CHECK-DAG: #[[$MAP8:.*]] = affine_map<(d0)[s0] -> (d0 * 4 + s0)> 12 // CHECK-DAG: #[[$MAP9:.*]] = affine_map<()[s0] -> (s0)> 13 // CHECK-DAG: #[[$MAP10:.*]] = affine_map<(d0)[s0] -> (d0 + s0)> 14 15// CHECK-LABEL: func @dim( 16// CHECK-SAME: %[[TENSOR:.*]]: tensor<f32>, 17// CHECK-SAME: %[[INDEX:.*]]: index) -> index { 18// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : memref<f32> 19// CHECK: %[[EXTENT:.*]] = memref.dim %[[MEMREF]], %[[INDEX]] : memref<f32> 20// CHECK: return %[[EXTENT]] : index 21func.func @dim(%arg0: tensor<f32>, %arg1: index) -> index { 22 %0 = tensor.dim %arg0, %arg1 : tensor<f32> 23 return %0 : index 24} 25 26// CHECK-LABEL: func @rank( 27// CHECK-SAME: %[[TENSOR:.*]]: tensor<*xf32>) -> index { 28// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] 29// CHECK: %[[EXTENT:.*]] = memref.rank %[[MEMREF]] : memref<*xf32> 30func.func @rank(%arg0: tensor<*xf32>) -> index { 31 %0 = tensor.rank %arg0 : tensor<*xf32> 32 return %0 : index 33} 34 35// CHECK-LABEL: func @tensor.cast( 36// CHECK-SAME: %[[TENSOR:.*]]: tensor<?xindex>) -> tensor<2xindex> { 37// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] 38// CHECK: %[[CASTED:.*]] = memref.cast %[[MEMREF]] : memref<?xindex> to memref<2xindex> 39// CHECK: %[[RET:.*]] = bufferization.to_tensor %[[CASTED]] 40// CHECK: return %[[RET]] : tensor<2xindex> 41func.func @tensor.cast(%arg0: tensor<?xindex>) -> tensor<2xindex> { 42 %0 = tensor.cast %arg0 : tensor<?xindex> to tensor<2xindex> 43 return %0 : tensor<2xindex> 44} 45 46// CHECK-LABEL: func @tensor.cast_from_unranked( 47// CHECK-SAME: %[[TENSOR:.*]]: tensor<*xf32>) -> tensor<2xf32> { 48// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : memref<*xf32> 49// CHECK: %[[CASTED_MEMREF:.*]] = memref.cast %[[MEMREF]] : memref<*xf32> to memref<2xf32> 50// CHECK: %[[RET:.*]] = bufferization.to_tensor %[[CASTED_MEMREF]] : memref<2xf32> 51// CHECK: return %[[RET]] : tensor<2xf32> 52func.func @tensor.cast_from_unranked(%arg0: tensor<*xf32>) -> tensor<2xf32> { 53 %0 = tensor.cast %arg0 : tensor<*xf32> to tensor<2xf32> 54 return %0 : tensor<2xf32> 55} 56 57// CHECK-LABEL: func @tensor.cast_to_unranked( 58// CHECK-SAME: %[[TENSOR:.*]]: tensor<2xf32>) -> tensor<*xf32> { 59// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : memref<2xf32> 60// CHECK: %[[CASTED_MEMREF:.*]] = memref.cast %[[MEMREF]] : memref<2xf32> to memref<*xf32> 61// CHECK: %[[RET:.*]] = bufferization.to_tensor %[[CASTED_MEMREF]] : memref<*xf32> 62// CHECK: return %[[RET]] : tensor<*xf32> 63func.func @tensor.cast_to_unranked(%arg0: tensor<2xf32>) -> tensor<*xf32> { 64 %0 = tensor.cast %arg0 : tensor<2xf32> to tensor<*xf32> 65 return %0 : tensor<*xf32> 66} 67 68// CHECK-LABEL: func @tensor.extract( 69// CHECK-SAME: %[[TENSOR:.*]]: tensor<?xf32>, 70// CHECK-SAME: %[[IDX:.*]]: index) -> f32 { 71// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : memref<?xf32> 72// CHECK: %[[RET:.*]] = memref.load %[[MEMREF]][%[[IDX]]] : memref<?xf32> 73// CHECK: return %[[RET]] : f32 74// CHECK: } 75func.func @tensor.extract(%arg0: tensor<?xf32>, %arg1: index) -> f32 { 76 %0 = tensor.extract %arg0[%arg1] : tensor<?xf32> 77 return %0 : f32 78} 79 80// CHECK-LABEL: func @tensor.from_elements_0d( 81// CHECK-SAME: %[[ELEM0:.*]]: index) -> tensor<index> { 82// CHECK: %[[MEMREF:.*]] = memref.alloc() {{.*}} : memref<index> 83// CHECK: store %[[ELEM0]], %[[MEMREF]] 84// CHECK: %[[RET:.*]] = bufferization.to_tensor %[[MEMREF]] 85// CHECK: return %[[RET]] : tensor<index> 86func.func @tensor.from_elements_0d(%arg0: index) -> tensor<index> { 87 %0 = tensor.from_elements %arg0 : tensor<index> 88 return %0 : tensor<index> 89} 90 91// CHECK-LABEL: func @tensor.from_elements_1d( 92// CHECK-SAME: %[[ELEM0:.*]]: index, 93// CHECK-SAME: %[[ELEM1:.*]]: index) -> tensor<2xindex> { 94// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index 95// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index 96// CHECK-DAG: %[[MEMREF:.*]] = memref.alloc() {{.*}} : memref<2xindex> 97// CHECK: store %[[ELEM0]], %[[MEMREF]][%[[C0]]] 98// CHECK: store %[[ELEM1]], %[[MEMREF]][%[[C1]]] 99// CHECK: %[[RET:.*]] = bufferization.to_tensor %[[MEMREF]] 100// CHECK: return %[[RET]] : tensor<2xindex> 101func.func @tensor.from_elements_1d(%arg0: index, %arg1: index) -> tensor<2xindex> { 102 %0 = tensor.from_elements %arg0, %arg1 : tensor<2xindex> 103 return %0 : tensor<2xindex> 104} 105 106// CHECK-LABEL: func @tensor.from_elements_2d( 107// CHECK-SAME: %[[ELEM0:.*]]: index, %[[ELEM1:.*]]: index) 108// CHECK-SAME: -> tensor<3x2xindex> { 109// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index 110// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index 111// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index 112// CHECK-DAG: %[[MEMREF:.*]] = memref.alloc() {{.*}} : memref<3x2xindex> 113// CHECK: store %[[ELEM0]], %[[MEMREF]][%[[C0]], %[[C0]]] 114// CHECK: store %[[ELEM1]], %[[MEMREF]][%[[C0]], %[[C1]]] 115// CHECK: store %[[ELEM0]], %[[MEMREF]][%[[C1]], %[[C0]]] 116// CHECK: store %[[ELEM1]], %[[MEMREF]][%[[C1]], %[[C1]]] 117// CHECK: store %[[ELEM0]], %[[MEMREF]][%[[C2]], %[[C0]]] 118// CHECK: store %[[ELEM1]], %[[MEMREF]][%[[C2]], %[[C1]]] 119// CHECK: %[[RET:.*]] = bufferization.to_tensor %[[MEMREF]] 120// CHECK: return %[[RET]] : tensor<3x2xindex> 121func.func @tensor.from_elements_2d(%arg0: index, %arg1: index) -> tensor<3x2xindex> { 122 %0 = tensor.from_elements %arg0, %arg1, %arg0, %arg1, %arg0, %arg1 123 : tensor<3x2xindex> 124 return %0 : tensor<3x2xindex> 125} 126 127// CHECK-LABEL: func @tensor.from_elements_3d( 128// CHECK-SAME: %[[F0:.*]]: f32 129 130// CHECK-DAG: %[[F1:.*]] = arith.constant 1.0{{0+}}e+00 131// CHECK-DAG: %[[F2:.*]] = arith.constant 2.0 132// CHECK-DAG: %[[F3:.*]] = arith.constant 3.0 133// CHECK-DAG: %[[F4:.*]] = arith.constant 4.0 134// CHECK-DAG: %[[F5:.*]] = arith.constant 5.0 135// CHECK-DAG: %[[F6:.*]] = arith.constant 6.0 136// CHECK-DAG: %[[F7:.*]] = arith.constant 7.0 137// CHECK-DAG: %[[F8:.*]] = arith.constant 8.0 138// CHECK-DAG: %[[F9:.*]] = arith.constant 9.0 139// CHECK-DAG: %[[F10:.*]] = arith.constant 1.0{{0+}}e+01 140// CHECK-DAG: %[[F11:.*]] = arith.constant 1.1{{0+}}e+01 141 142// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index 143// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index 144// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index 145 146// CHECK-DAG: %[[MEMREF:.*]] = memref.alloc() {{.*}} : memref<3x2x2xf32> 147 148// CHECK: store %[[F0]], %[[MEMREF]][%[[C0]], %[[C0]], %[[C0]]] 149// CHECK: store %[[F1]], %[[MEMREF]][%[[C0]], %[[C0]], %[[C1]]] 150// CHECK: store %[[F2]], %[[MEMREF]][%[[C0]], %[[C1]], %[[C0]]] 151// CHECK: store %[[F3]], %[[MEMREF]][%[[C0]], %[[C1]], %[[C1]]] 152// CHECK: store %[[F4]], %[[MEMREF]][%[[C1]], %[[C0]], %[[C0]]] 153// CHECK: store %[[F5]], %[[MEMREF]][%[[C1]], %[[C0]], %[[C1]]] 154// CHECK: store %[[F6]], %[[MEMREF]][%[[C1]], %[[C1]], %[[C0]]] 155// CHECK: store %[[F7]], %[[MEMREF]][%[[C1]], %[[C1]], %[[C1]]] 156// CHECK: store %[[F8]], %[[MEMREF]][%[[C2]], %[[C0]], %[[C0]]] 157// CHECK: store %[[F9]], %[[MEMREF]][%[[C2]], %[[C0]], %[[C1]]] 158// CHECK: store %[[F10]], %[[MEMREF]][%[[C2]], %[[C1]], %[[C0]]] 159// CHECK: store %[[F11]], %[[MEMREF]][%[[C2]], %[[C1]], %[[C1]]] 160 161// CHECK: %[[RET:.*]] = bufferization.to_tensor %[[MEMREF]] 162// CHECK: return %[[RET]] : tensor<3x2x2xf32> 163func.func @tensor.from_elements_3d(%f0 : f32) -> tensor<3x2x2xf32> { 164 %f1 = arith.constant 1.0 : f32 165 %f2 = arith.constant 2.0 : f32 166 %f3 = arith.constant 3.0 : f32 167 %f4 = arith.constant 4.0 : f32 168 %f5 = arith.constant 5.0 : f32 169 %f6 = arith.constant 6.0 : f32 170 %f7 = arith.constant 7.0 : f32 171 %f8 = arith.constant 8.0 : f32 172 %f9 = arith.constant 9.0 : f32 173 %f10 = arith.constant 10.0 : f32 174 %f11 = arith.constant 11.0 : f32 175 %0 = tensor.from_elements %f0,%f1,%f2,%f3,%f4,%f5,%f6,%f7,%f8,%f9,%f10,%f11 176 : tensor<3x2x2xf32> 177 return %0 : tensor<3x2x2xf32> 178} 179 180// CHECK-LABEL: func @tensor.generate( 181// CHECK-SAME: %[[ARG:.*]]: tensor<*xf32>, 182// CHECK-SAME: %[[DYNAMIC_EXTENT:.*]]: index) -> tensor<?xindex> { 183// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index 184// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index 185// CHECK-DAG: %[[CASTED:.*]] = bufferization.to_memref %[[ARG]] : memref<*xf32> 186// CHECK-DAG: %[[MEMREF:.*]] = memref.alloc(%[[DYNAMIC_EXTENT]]) {{.*}} : memref<?xindex> 187// CHECK: scf.parallel (%[[I:.*]]) = (%[[C0]]) to (%[[DYNAMIC_EXTENT]]) step (%[[C1]]) { 188// CHECK: %[[ELEM:.*]] = memref.dim %[[CASTED]], %[[I]] : memref<*xf32> 189// CHECK: store %[[ELEM]], %[[MEMREF]][%[[I]]] : memref<?xindex> 190// CHECK: scf.yield 191// CHECK: } 192// CHECK: %[[RET:.*]] = bufferization.to_tensor %[[MEMREF]] : memref<?xindex> 193// CHECK: return %[[RET]] : tensor<?xindex> 194// CHECK: } 195func.func @tensor.generate(%arg: tensor<*xf32>, %dynamic_extent: index) -> tensor<?xindex> { 196 %result = tensor.generate %dynamic_extent { 197 ^bb0(%i : index): 198 %elem = tensor.dim %arg, %i : tensor<*xf32> 199 tensor.yield %elem : index 200 } : tensor<?xindex> 201 return %result : tensor<?xindex> 202} 203 204// Additional test that checks the logic for intermixed static and dynamic 205// extents. 206// 207// CHECK-LABEL: func @tensor.generate_static_and_dynamic( 208// CHECK-SAME: %[[DYNAMIC_EXTENT:.*]]: index) -> tensor<16x?xindex> { 209// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index 210// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index 211// CHECK-DAG: %[[C16:.*]] = arith.constant 16 : index 212// CHECK-DAG: %[[MEMREF:.*]] = memref.alloc(%[[DYNAMIC_EXTENT]]) {{.*}} : memref<16x?xindex> 213// CHECK: scf.parallel (%[[I:.*]], %[[J:.*]]) = (%[[C0]], %[[C0]]) to (%[[C16]], %[[DYNAMIC_EXTENT]]) step (%[[C1]], %[[C1]]) { 214// CHECK: %[[VAL_7:.*]] = arith.addi %[[I]], %[[J]] : index 215// CHECK: store %[[VAL_7]], %[[MEMREF]][%[[I]], %[[J]]] : memref<16x?xindex> 216// CHECK: scf.yield 217// CHECK: } 218// CHECK: %[[RET:.*]] = bufferization.to_tensor %[[MEMREF]] : memref<16x?xindex> 219// CHECK: return %[[RET]] : tensor<16x?xindex> 220// CHECK: } 221func.func @tensor.generate_static_and_dynamic(%arg0: index) -> tensor<16x?xindex> { 222 %result = tensor.generate %arg0 { 223 ^bb0(%i: index, %j: index): 224 %sum = arith.addi %i, %j : index 225 tensor.yield %sum : index 226 } : tensor<16x?xindex> 227 return %result : tensor<16x?xindex> 228} 229 230// CHECK-LABEL: func @tensor.generate_unknown_ops_in_body 231func.func @tensor.generate_unknown_ops_in_body(%arg0: index) -> tensor<?xindex> { 232 // CHECK-NOT: tensor.generate 233 %tensor = tensor.generate %arg0 { 234 ^bb0(%iv: index): 235 // CHECK: test.source 236 %0 = "test.source"() : () -> index 237 tensor.yield %0 : index 238 } : tensor<?xindex> 239 return %tensor : tensor<?xindex> 240} 241 242// CHECK-LABEL: func @tensor.extract_slice( 243// CHECK-SAME: %[[t1:.*]]: tensor<?x?xf32>, %[[idx1:.*]]: index, %[[idx2:.*]]: index 244func.func @tensor.extract_slice( 245 %t1: tensor<?x?xf32>, %idx1: index, %idx2: index) -> tensor<?x10xf32> { 246 // CHECK: %[[m:.*]] = bufferization.to_memref %[[t1]] : memref<?x?xf32> 247 // CHECK: %[[r:.*]] = memref.subview %[[m]][5, %[[idx2]]] [%[[idx1]], 10] [1, 1] : memref<?x?xf32> to memref<?x10xf32, #[[$MAP0]]> 248 %0 = tensor.extract_slice %t1[5, %idx2][%idx1, 10][1, 1] 249 : tensor<?x?xf32> to tensor<?x10xf32> 250 // CHECK: %[[r_tensor:.*]] = bufferization.to_tensor %[[r]] 251 // CHECK: return %[[r_tensor]] 252 return %0 : tensor<?x10xf32> 253} 254 255// CHECK-LABEL: func @tensor.extract_slice_rank_reducing( 256// CHECK-SAME: %[[t1:.*]]: tensor<?x10x?xf32>, %[[idx1:.*]]: index, 257// CHECK-SAME: %[[idx2:.*]]: index 258func.func @tensor.extract_slice_rank_reducing( 259 %t1: tensor<?x10x?xf32>, %idx1: index, %idx2: index) -> tensor<?x15xf32> { 260 // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : memref<?x10x?xf32> 261 // CHECK: %[[r:.*]] = memref.subview %[[m1]][5, %[[idx1]], 10] [%[[idx2]], 1, 15] [1, 1, 1] : memref<?x10x?xf32> to memref<?x15xf32, #[[$MAP0]]> 262 %0 = tensor.extract_slice %t1[5, %idx1, 10][%idx2, 1, 15][1, 1, 1] 263 : tensor<?x10x?xf32> to tensor<?x15xf32> 264 // CHECK: %[[r_tensor:.*]] = bufferization.to_tensor %[[r]] 265 // CHECK: return %[[r_tensor]] 266 return %0 : tensor<?x15xf32> 267} 268 269// CHECK-LABEL: func @tensor.insert_slice( 270// CHECK-SAME: %[[t1:.*]]: tensor<?x?xf32>, %[[t2:.*]]: tensor<?x10xf32>, 271// CHECK-SAME: %[[idx1:.*]]: index, %[[idx2:.*]]: index 272func.func @tensor.insert_slice(%t1: tensor<?x?xf32>, %t2: tensor<?x10xf32>, 273 %idx1: index, %idx2: index) -> tensor<?x?xf32> { 274 // CHECK-DAG: %[[c0:.*]] = arith.constant 0 : index 275 // CHECK-DAG: %[[c1:.*]] = arith.constant 1 : index 276 // CHECK-DAG: %[[m1:.*]] = bufferization.to_memref %[[t1]] : memref<?x?xf32> 277 // CHECK-DAG: %[[m2:.*]] = bufferization.to_memref %[[t2]] : memref<?x10xf32> 278 // CHECK-DAG: %[[dim0:.*]] = memref.dim %[[m1]], %[[c0]] 279 // CHECK-DAG: %[[dim1:.*]] = memref.dim %[[m1]], %[[c1]] 280 // CHECK: %[[alloc:.*]] = memref.alloc(%[[dim0]], %[[dim1]]) 281 // CHECK: memref.copy %[[m1]], %[[alloc]] 282 // CHECK: %[[subview:.*]] = memref.subview %[[alloc]][%[[idx1]], 5] [%[[idx2]], 10] [1, 1] 283 // CHECK: memref.copy %[[m2]], %[[subview]] 284 %0 = tensor.insert_slice %t2 into %t1[%idx1, 5][%idx2, 10][1, 1] 285 : tensor<?x10xf32> into tensor<?x?xf32> 286 287 // CHECK: %[[r:.*]] = bufferization.to_tensor %[[alloc]] 288 // CHECK: return %[[r]] 289 return %0 : tensor<?x?xf32> 290} 291 292// CHECK-LABEL: func @tensor.insert( 293// CHECK-SAME: %[[t1:.*]]: tensor<5xf32>, %[[idx1:.*]]: index, 294// CHECK-SAME: %[[f:.*]]: f32 295func.func @tensor.insert(%t1: tensor<5xf32>, %idx1: index, %f: f32) -> tensor<5xf32> { 296 // CHECK-DAG: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<5xf32> 297 // CHECK-DAG: %[[m1:.*]] = bufferization.to_memref %[[t1]] : memref<5xf32> 298 // CHECK: memref.copy %[[m1]], %[[alloc]] 299 // CHECK: memref.store %[[f]], %[[alloc]][%[[idx1]]] 300 %0 = tensor.insert %f into %t1[%idx1] : tensor<5xf32> 301 302 // CHECK: %[[r:.*]] = bufferization.to_tensor %[[alloc]] 303 // CHECK: return %[[r]] 304 return %0 : tensor<5xf32> 305} 306 307// CHECK-LABEL: func @tensor.expand_shape( 308// CHECK-SAME: %[[t1:.*]]: tensor<?x10xf32> 309func.func @tensor.expand_shape(%t1: tensor<?x10xf32>) -> tensor<2x?x10xf32> { 310 // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : memref<?x10xf32> 311 // CHECK: %[[expanded:.*]] = memref.expand_shape %[[m1]] [ 312 // CHECK-SAME: [0, 1], [2]] : memref<?x10xf32> into memref<2x?x10xf32> 313 %0 = tensor.expand_shape %t1 [[0, 1], [2]] 314 : tensor<?x10xf32> into tensor<2x?x10xf32> 315 316 // CHECK: %[[r:.*]] = bufferization.to_tensor %[[expanded]] 317 // CHECK: return %[[r]] 318 return %0 : tensor<2x?x10xf32> 319} 320 321// CHECK-LABEL: func @tensor.expand_shape_of_slice( 322// CHECK-SAME: %[[t1:.*]]: tensor<?x20xf32> 323func.func @tensor.expand_shape_of_slice( 324 %t1: tensor<?x20xf32>, %o1: index, %s1: index) -> tensor<?x7x2x5xf32> { 325 // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : memref<?x20xf32> 326 // CHECK: %[[subview:.*]] = memref.subview %[[m1]][%{{.*}}, 5] [%{{.*}}, 10] [1, 1] : memref<?x20xf32> to memref<?x10xf32, #[[$MAP1]]> 327 %0 = tensor.extract_slice %t1[%o1, 5][%s1, 10][1, 1] : 328 tensor<?x20xf32> to tensor<?x10xf32> 329 // CHECK: %[[expanded:.*]] = memref.expand_shape %[[subview]] [ 330 // CHECK-SAME: [0, 1], [2, 3]] : memref<?x10xf32, #[[$MAP1]]> into memref<?x7x2x5xf32, #[[$MAP2]]> 331 %1 = tensor.expand_shape %0 [[0, 1], [2, 3]] : 332 tensor<?x10xf32> into tensor<?x7x2x5xf32> 333 // CHECK: %[[r:.*]] = bufferization.to_tensor %[[expanded]] 334 // CHECK: return %[[r]] 335 return %1 : tensor<?x7x2x5xf32> 336} 337 338// CHECK-LABEL: func @tensor.expand_shape_of_scalar_slice( 339// CHECK-SAME: %[[t1:.*]]: tensor<?xf32> 340func.func @tensor.expand_shape_of_scalar_slice( 341 %t1: tensor<?xf32>, %o1: index, %s1: index) -> tensor<1xf32> { 342 // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : memref<?xf32> 343 // CHECK: %[[subview:.*]] = memref.subview %[[m1]][%{{.*}}] [1] [1] : memref<?xf32> to memref<f32, #[[$MAP9]]> 344 %0 = tensor.extract_slice %t1[%o1][1][1] : tensor<?xf32> to tensor<f32> 345 // CHECK: %[[expanded:.*]] = memref.expand_shape %[[subview]] [] : memref<f32, #[[$MAP9]]> into memref<1xf32, #[[$MAP10]]> 346 %1 = tensor.expand_shape %0 [] : tensor<f32> into tensor<1xf32> 347 // CHECK: %[[r:.*]] = bufferization.to_tensor %[[expanded]] 348 // CHECK: return %[[r]] 349 return %1 : tensor<1xf32> 350} 351 352// CHECK-LABEL: func @tensor.collapse_shape( 353// CHECK-SAME: %[[t1:.*]]: tensor<2x?x?xf32> 354func.func @tensor.collapse_shape(%t1: tensor<2x?x?xf32>) -> tensor<?x?xf32> { 355 // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : memref<2x?x?xf32> 356 // CHECK: %[[collapsed:.*]] = memref.collapse_shape %[[m1]] [ 357 // CHECK-SAME: [0, 1], [2]] : memref<2x?x?xf32> into memref<?x?xf32> 358 %0 = tensor.collapse_shape %t1 [[0, 1], [2]] 359 : tensor<2x?x?xf32> into tensor<?x?xf32> 360 361 // CHECK: %[[r:.*]] = bufferization.to_tensor %[[collapsed]] 362 // CHECK: return %[[r]] 363 return %0 : tensor<?x?xf32> 364} 365 366// CHECK-LABEL: func @tensor.collapse_shape_to_scalar( 367// CHECK-SAME: %[[t1:.*]]: tensor<1x1x1xf32> 368func.func @tensor.collapse_shape_to_scalar(%t1: tensor<1x1x1xf32>) -> tensor<f32> { 369 // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : memref<1x1x1xf32> 370 // CHECK: %[[collapsed:.*]] = memref.collapse_shape %[[m1]] [] : memref<1x1x1xf32> into memref<f32> 371 %0 = tensor.collapse_shape %t1 [] 372 : tensor<1x1x1xf32> into tensor<f32> 373 374 // CHECK: %[[r:.*]] = bufferization.to_tensor %[[collapsed]] 375 // CHECK: return %[[r]] 376 return %0 : tensor<f32> 377} 378 379// CHECK-LABEL: func @tensor.collapse_shape_of_slice( 380func.func @tensor.collapse_shape_of_slice(%arg0: tensor<2xi32>) -> tensor<i32> { 381 // CHECK: memref.subview %{{.*}}[1] [1] [1] : memref<2xi32> to memref<1xi32, #[[$MAP3]]> 382 %0 = tensor.extract_slice %arg0[1] [1] [1] : tensor<2xi32> to tensor<1xi32> 383 // CHECK: memref.collapse_shape %{{.*}} [] : memref<1xi32, #[[$MAP3]]> into memref<i32, #[[$MAP4]]> 384 %1 = tensor.collapse_shape %0 [] : tensor<1xi32> into tensor<i32> 385 return %1 : tensor<i32> 386} 387 388// CHECK-LABEL: func @tensor.collapse_shape_of_slice2( 389func.func @tensor.collapse_shape_of_slice2( 390 %arg0: tensor<?x?x?x?xi64>, %o1: index, %o2: index, %o3: index, %o4: index) 391 -> tensor<87x63648xi64> { 392 // CHECK: %[[subview:.*]] = memref.subview %{{.*}} : memref<?x?x?x?xi64> to memref<87x78x68x12xi64, #{{.*}}> 393 %0 = tensor.extract_slice %arg0[%o1, %o2, %o3, %o4] [87, 78, 68, 12] [1, 1, 1, 1] : tensor<?x?x?x?xi64> to tensor<87x78x68x12xi64> 394 395 // This memref may not be collapsible, so the buffer must be copied to get rid 396 // of the layout map. 397 // CHECK: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<87x78x68x12xi64> 398 // CHECK: memref.copy %[[subview]], %[[alloc]] 399 // CHECK: memref.collapse_shape %[[alloc]] [ 400 // CHECK-SAME: [0], [1, 2, 3]] : memref<87x78x68x12xi64> into memref<87x63648xi64> 401 %1 = tensor.collapse_shape %0 [[0], [1, 2, 3]] : tensor<87x78x68x12xi64> into tensor<87x63648xi64> 402 return %1 : tensor<87x63648xi64> 403} 404 405// CHECK-LABEL: func @tensor.collapse_shape_of_slice3( 406// CHECK-SAME: %[[t1:.*]]: tensor<1x2xf32> 407func.func @tensor.collapse_shape_of_slice3(%t1: tensor<1x2xf32>) -> tensor<1xf32> { 408 // CHECK: memref.subview {{.*}} : memref<1x2xf32> to memref<1x1xf32, #[[$MAP5]]> 409 %0 = tensor.extract_slice %t1[0, 0][1, 1][1, 1] : tensor<1x2xf32> to tensor<1x1xf32> 410 // CHECK: memref.collapse_shape %{{.*}} [ 411 // CHECK-SAME: [0, 1]] : memref<1x1xf32, #[[$MAP5]]> into memref<1xf32, #[[$MAP6]]> 412 %1 = tensor.collapse_shape %0 [[0, 1]] : tensor<1x1xf32> into tensor<1xf32> 413 return %1 : tensor<1xf32> 414} 415 416// CHECK-LABEL: func @tensor.collapse_shape_of_slice4( 417// CHECK-SAME: %[[t1:.*]]: tensor<?x2x4xf32>, 418// CHECK-SAME: %[[OFFSET:.*]]: index) -> tensor<8xf32> { 419func.func @tensor.collapse_shape_of_slice4(%arg0: tensor<?x2x4xf32>, %offset: index, %size: index) -> tensor<8xf32> { 420 // CHECK: memref.subview %{{.*}} : memref<?x2x4xf32> to memref<4x2x1xf32, #[[$MAP7]]> 421 %0 = tensor.extract_slice %arg0[0, 0, %offset] [4, 2, 1] [1, 1, 1] : tensor<?x2x4xf32> to tensor<4x2x1xf32> 422 // CHECK: memref.collapse_shape %{{.*}} [ 423 // CHECK-SAME: [0, 1, 2]] : memref<4x2x1xf32, #[[$MAP7]]> into memref<8xf32, #[[$MAP8]]> 424 %ret = tensor.collapse_shape %0 [[0, 1, 2]] : tensor<4x2x1xf32> into tensor<8xf32> 425 return %ret: tensor<8xf32> 426} 427 428// CHECK-LABEL: func @tensor.reshape( 429// CHECK-SAME: %[[t1:.*]]: tensor<?x10xf32> 430func.func @tensor.reshape(%t1: tensor<?x10xf32>) -> tensor<2x2x5xf32> { 431 // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : memref<?x10xf32> 432 433 // CHECK: %[[two:.*]] = arith.constant 2 : i64 434 %two = arith.constant 2 : i64 435 // CHECK: %[[five:.*]] = arith.constant 5 : i64 436 %five = arith.constant 5 : i64 437 438 // CHECK: %[[alloc:.*]] = memref.alloc() {alignment = 128 : i64} : memref<3xi64> 439 // CHECK: %[[zero_idx:.*]] = arith.constant 0 : index 440 // CHECK: %[[one_idx:.*]] = arith.constant 1 : index 441 // CHECK: %[[two_idx:.*]] = arith.constant 2 : index 442 // CHECK: memref.store %[[two]], %[[alloc]][%[[zero_idx]]] : memref<3xi64> 443 // CHECK: memref.store %[[two]], %[[alloc]][%[[one_idx]]] : memref<3xi64> 444 // CHECK: memref.store %[[five]], %[[alloc]][%[[two_idx]]] : memref<3xi64> 445 %shape = tensor.from_elements %two, %two, %five : tensor<3xi64> 446 447 // CHECK: %[[reshaped:.*]] = memref.reshape %[[m1]](%[[alloc]]) : (memref<?x10xf32>, memref<3xi64>) -> memref<2x2x5xf32> 448 %reshaped = tensor.reshape %t1(%shape) : (tensor<?x10xf32>, tensor<3xi64>) -> tensor<2x2x5xf32> 449 450 // CHECK: %[[r:.*]] = bufferization.to_tensor %[[reshaped]] 451 // CHECK: return %[[r]] 452 return %reshaped : tensor<2x2x5xf32> 453} 454