1 //===- Vectorization.cpp - Implementation of linalg Vectorization ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the linalg dialect Vectorization transformations. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "mlir/Analysis/SliceAnalysis.h" 14 #include "mlir/Dialect/Affine/Analysis/LoopAnalysis.h" 15 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 16 #include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h" 17 #include "mlir/Dialect/Linalg/IR/Linalg.h" 18 #include "mlir/Dialect/Linalg/Transforms/Transforms.h" 19 #include "mlir/Dialect/Linalg/Utils/Utils.h" 20 #include "mlir/Dialect/Tensor/IR/Tensor.h" 21 #include "mlir/Dialect/Utils/StructuredOpsUtils.h" 22 #include "mlir/Dialect/Vector/VectorOps.h" 23 #include "mlir/Dialect/Vector/VectorTransforms.h" 24 #include "mlir/IR/AffineExpr.h" 25 #include "mlir/IR/Matchers.h" 26 #include "mlir/IR/PatternMatch.h" 27 #include "mlir/Pass/Pass.h" 28 #include "mlir/Support/LLVM.h" 29 #include "mlir/Transforms/RegionUtils.h" 30 #include "llvm/ADT/ScopeExit.h" 31 #include "llvm/ADT/Sequence.h" 32 #include "llvm/ADT/SmallVector.h" 33 #include "llvm/ADT/TypeSwitch.h" 34 #include "llvm/Support/Debug.h" 35 #include "llvm/Support/raw_ostream.h" 36 #include <type_traits> 37 38 using namespace mlir; 39 using namespace mlir::linalg; 40 41 #define DEBUG_TYPE "linalg-vectorization" 42 43 #define DBGS() (llvm::dbgs() << '[' << DEBUG_TYPE << "] ") 44 #define LDBG(X) LLVM_DEBUG(DBGS() << X) 45 46 /// Try to vectorize `convOp` as a convolution. 47 static FailureOr<Operation *> vectorizeConvolution(OpBuilder &b, 48 LinalgOp convOp); 49 50 /// Return the unique instance of OpType in `block` if it is indeed unique. 51 /// Return null if none or more than 1 instances exist. 52 template <typename OpType> 53 static OpType getSingleOpOfType(Block &block) { 54 OpType res; 55 block.walk([&](OpType op) { 56 if (res) { 57 res = nullptr; 58 return WalkResult::interrupt(); 59 } 60 res = op; 61 return WalkResult::advance(); 62 }); 63 return res; 64 } 65 66 /// Given an indexing `map` coming from a LinalgOp indexing, restricted to a 67 /// projectedPermutation, compress the unused dimensions to serve as a 68 /// permutation_map for a vector transfer operation. 69 /// For example, given a linalg op such as: 70 /// 71 /// ``` 72 /// %0 = linalg.generic { 73 /// indexing_maps = affine_map<(d0, d1, d2, d3, d4) -> (d4, d0, d2)>, 74 /// indexing_maps = affine_map<(d0, d1, d2, d3, d4) -> (d1, d3)> 75 /// } 76 /// ins(%0 : tensor<2x3x4xf32>) 77 /// outs(%1 : tensor<5x6xf32>) 78 /// ``` 79 /// 80 /// the iteration domain size of the linalg op is 3x5x4x6x2. The first affine 81 /// map is reindexed to `affine_map<(d0, d1, d2) -> (d2, d0, d1)>`, the second 82 /// affine map is reindexed to `affine_map<(d0, d1) -> (d0, d1)>`. 83 static AffineMap reindexIndexingMap(AffineMap map) { 84 assert(map.isProjectedPermutation(/*allowZeroInResults=*/true) && 85 "expected projected permutation"); 86 auto res = compressUnusedDims(map); 87 assert(res.getNumDims() == res.getNumResults() && 88 "expected reindexed map with same number of dims and results"); 89 return res; 90 } 91 92 /// Helper data structure to represent the result of vectorization. 93 /// In certain specific cases, like terminators, we do not want to propagate/ 94 enum VectorizationStatus { 95 /// Op failed to vectorize. 96 Failure = 0, 97 /// Op vectorized and custom function took care of replacement logic 98 NoReplace, 99 /// Op vectorized into a new Op whose results will replace original Op's 100 /// results. 101 NewOp 102 // TODO: support values if Op vectorized to Many-Ops whose results we need to 103 // aggregate for replacement. 104 }; 105 struct VectorizationResult { 106 /// Return status from vectorizing the current op. 107 enum VectorizationStatus status = VectorizationStatus::Failure; 108 /// New vectorized operation to replace the current op. 109 /// Replacement behavior is specified by `status`. 110 Operation *newOp; 111 }; 112 113 llvm::Optional<vector::CombiningKind> 114 mlir::linalg::getCombinerOpKind(Operation *combinerOp) { 115 using ::mlir::vector::CombiningKind; 116 117 if (!combinerOp) 118 return llvm::None; 119 return llvm::TypeSwitch<Operation *, llvm::Optional<CombiningKind>>( 120 combinerOp) 121 .Case<arith::AddIOp, arith::AddFOp>( 122 [&](auto op) { return CombiningKind::ADD; }) 123 .Case<arith::AndIOp>([&](auto op) { return CombiningKind::AND; }) 124 .Case<arith::MaxSIOp>([&](auto op) { return CombiningKind::MAXSI; }) 125 .Case<arith::MaxFOp>([&](auto op) { return CombiningKind::MAXF; }) 126 .Case<arith::MinSIOp>([&](auto op) { return CombiningKind::MINSI; }) 127 .Case<arith::MinFOp>([&](auto op) { return CombiningKind::MINF; }) 128 .Case<arith::MulIOp, arith::MulFOp>( 129 [&](auto op) { return CombiningKind::MUL; }) 130 .Case<arith::OrIOp>([&](auto op) { return CombiningKind::OR; }) 131 .Case<arith::XOrIOp>([&](auto op) { return CombiningKind::XOR; }) 132 .Default([&](auto op) { return llvm::None; }); 133 } 134 135 /// Check whether `outputOperand` is a reduction with a single combiner 136 /// operation. Return the combiner operation of the reduction. Return 137 /// nullptr otherwise. Multiple reduction operations would impose an 138 /// ordering between reduction dimensions and is currently unsupported in 139 /// Linalg. This limitation is motivated by the fact that e.g. min(max(X)) != 140 /// max(min(X)) 141 // TODO: use in LinalgOp verification, there is a circular dependency atm. 142 static Operation *matchLinalgReduction(OpOperand *outputOperand) { 143 auto linalgOp = cast<LinalgOp>(outputOperand->getOwner()); 144 unsigned outputPos = 145 outputOperand->getOperandNumber() - linalgOp.getNumInputs(); 146 // Only single combiner operations are supported for now. 147 SmallVector<Operation *, 4> combinerOps; 148 if (!matchReduction(linalgOp.getRegionOutputArgs(), outputPos, combinerOps) || 149 combinerOps.size() != 1) 150 return nullptr; 151 152 // Return the combiner operation. 153 return combinerOps[0]; 154 } 155 156 /// Broadcast `value` to a vector of `shape` if possible. Return value 157 /// otherwise. 158 static Value broadcastIfNeeded(OpBuilder &b, Value value, 159 ArrayRef<int64_t> shape) { 160 // If no shape to broadcast to, just return `value`. 161 if (shape.empty()) 162 return value; 163 VectorType targetVectorType = 164 VectorType::get(shape, getElementTypeOrSelf(value)); 165 if (vector::isBroadcastableTo(value.getType(), targetVectorType) != 166 vector::BroadcastableToResult::Success) 167 return value; 168 Location loc = b.getInsertionPoint()->getLoc(); 169 return b.createOrFold<vector::BroadcastOp>(loc, targetVectorType, value); 170 } 171 172 /// Create MultiDimReductionOp to compute the reduction for `reductionOp`. This 173 /// assumes that `reductionOp` has two operands and one of them is the reduction 174 /// initial value. 175 static Value buildMultiDimReduce(OpBuilder &b, Operation *reduceOp, 176 Value valueToReduce, 177 const SmallVector<bool> &reductionMask) { 178 auto maybeKind = getCombinerOpKind(reduceOp); 179 assert(maybeKind && "Failed precondition: could not get reduction kind"); 180 return b.create<vector::MultiDimReductionOp>( 181 reduceOp->getLoc(), valueToReduce, reductionMask, *maybeKind); 182 } 183 184 static SmallVector<bool> getReductionMask(LinalgOp linalgOp) { 185 unsigned idx = 0; 186 SmallVector<bool> reductionMask(linalgOp.iterator_types().size(), false); 187 for (auto attr : linalgOp.iterator_types()) { 188 if (isReductionIterator(attr)) 189 reductionMask[idx] = true; 190 ++idx; 191 } 192 return reductionMask; 193 } 194 195 /// Build a vector.transfer_write of `value` into `outputOperand` at indices set 196 /// to all `0`; where `outputOperand` is an output operand of the LinalgOp 197 /// currently being vectorized. If `dest` has null rank, build an memref.store. 198 /// Return the produced value or null if no value is produced. 199 static Value buildVectorWrite(OpBuilder &b, Value value, 200 OpOperand *outputOperand) { 201 Operation *write; 202 Location loc = value.getLoc(); 203 auto linalgOp = cast<LinalgOp>(outputOperand->getOwner()); 204 ArrayRef<int64_t> shape = linalgOp.getShape(outputOperand); 205 auto vectorType = VectorType::get( 206 shape, getElementTypeOrSelf(outputOperand->get().getType())); 207 if (vectorType.getRank() > 0) { 208 // 0-d case is still special: do not invert the reindexing map. 209 AffineMap map = 210 reindexIndexingMap(linalgOp.getTiedIndexingMap(outputOperand)); 211 SmallVector<int64_t> transposeShape = 212 applyPermutationMap(inversePermutation(map), vectorType.getShape()); 213 assert(!transposeShape.empty() && "unexpected empty transpose shape"); 214 vectorType = VectorType::get(transposeShape, vectorType.getElementType()); 215 SmallVector<Value> indices(linalgOp.getRank(outputOperand), 216 b.create<arith::ConstantIndexOp>(loc, 0)); 217 value = broadcastIfNeeded(b, value, vectorType.getShape()); 218 write = b.create<vector::TransferWriteOp>(loc, value, outputOperand->get(), 219 indices, map); 220 } else { 221 if (!value.getType().isa<VectorType>()) 222 value = b.create<vector::BroadcastOp>(loc, vectorType, value); 223 assert(value.getType() == vectorType && "incorrect type"); 224 write = b.create<vector::TransferWriteOp>(loc, value, outputOperand->get(), 225 ValueRange{}); 226 } 227 LDBG("vectorized op: " << *write); 228 if (!write->getResults().empty()) 229 return write->getResult(0); 230 return Value(); 231 } 232 233 // Custom vectorization function type. Produce a vector form of Operation* 234 // assuming all its vectorized operands are already in the BlockAndValueMapping. 235 // Return nullptr if the Operation cannot be vectorized. 236 using CustomVectorizationHook = std::function<VectorizationResult( 237 Operation *, const BlockAndValueMapping &)>; 238 239 /// Helper function to vectorize the terminator of a `linalgOp`. New result 240 /// vector values are appended to `newResults`. Return 241 /// VectorizationStatus::NoReplace to signal the vectorization algorithm that it 242 /// should not try to map produced operations and instead return the results 243 /// using the `newResults` vector making them available to the 244 /// vectorization algorithm for RAUW. This function is meant to be used as a 245 /// CustomVectorizationHook. 246 static VectorizationResult 247 vectorizeLinalgYield(OpBuilder &b, Operation *op, 248 const BlockAndValueMapping &bvm, LinalgOp linalgOp, 249 SmallVectorImpl<Value> &newResults) { 250 auto yieldOp = dyn_cast<linalg::YieldOp>(op); 251 if (!yieldOp) 252 return VectorizationResult{VectorizationStatus::Failure, nullptr}; 253 for (const auto &outputs : llvm::enumerate(yieldOp.values())) { 254 // TODO: Scan for an opportunity for reuse. 255 // TODO: use a map. 256 Value vectorValue = bvm.lookup(outputs.value()); 257 Value newResult = buildVectorWrite( 258 b, vectorValue, linalgOp.getOutputOperand(outputs.index())); 259 if (newResult) 260 newResults.push_back(newResult); 261 } 262 return VectorizationResult{VectorizationStatus::NoReplace, nullptr}; 263 } 264 265 /// Helper function to vectorize the index operations of a `linalgOp`. Return 266 /// VectorizationStatus::NewOp to signal the vectorization algorithm that it 267 /// should map the produced operations. This function is meant to be used as a 268 /// CustomVectorizationHook. 269 static VectorizationResult vectorizeLinalgIndex(OpBuilder &b, Operation *op, 270 LinalgOp linalgOp) { 271 IndexOp indexOp = dyn_cast<linalg::IndexOp>(op); 272 if (!indexOp) 273 return VectorizationResult{VectorizationStatus::Failure, nullptr}; 274 auto loc = indexOp.getLoc(); 275 // Compute the static loop sizes of the index op. 276 auto targetShape = linalgOp.computeStaticLoopSizes(); 277 // Compute a one-dimensional index vector for the index op dimension. 278 SmallVector<int64_t> constantSeq = 279 llvm::to_vector<16>(llvm::seq<int64_t>(0, targetShape[indexOp.dim()])); 280 auto constantOp = 281 b.create<arith::ConstantOp>(loc, b.getIndexVectorAttr(constantSeq)); 282 // Return the one-dimensional index vector if it lives in the trailing 283 // dimension of the iteration space since the vectorization algorithm in this 284 // case can handle the broadcast. 285 if (indexOp.dim() == targetShape.size() - 1) 286 return VectorizationResult{VectorizationStatus::NewOp, constantOp}; 287 // Otherwise permute the targetShape to move the index dimension last, 288 // broadcast the one-dimensional index vector to the permuted shape, and 289 // finally transpose the broadcasted index vector to undo the permutation. 290 std::swap(targetShape[indexOp.dim()], targetShape.back()); 291 auto broadCastOp = b.create<vector::BroadcastOp>( 292 loc, VectorType::get(targetShape, b.getIndexType()), constantOp); 293 SmallVector<int64_t> transposition = 294 llvm::to_vector<16>(llvm::seq<int64_t>(0, linalgOp.getNumLoops())); 295 std::swap(transposition.back(), transposition[indexOp.dim()]); 296 auto transposeOp = 297 b.create<vector::TransposeOp>(loc, broadCastOp, transposition); 298 return VectorizationResult{VectorizationStatus::NewOp, transposeOp}; 299 } 300 301 /// Create a new vectorized verstion of `op` with the given operands and types. 302 static Operation *createVectorizedOp(OpBuilder &b, Operation *op, 303 ValueRange newOperands, 304 ArrayRef<Type> types) { 305 OperationState state(op->getLoc(), op->getName()); 306 state.addAttributes(op->getAttrs()); 307 state.addOperands(newOperands); 308 state.addTypes(types); 309 return b.createOperation(state); 310 } 311 312 /// Emit reduction operations if the shapes of the value to reduce is different 313 /// that the result shape. 314 static Operation *reduceIfNeeded(OpBuilder &b, LinalgOp linalgOp, Operation *op, 315 Value reduceValue, Value initialValue, 316 const BlockAndValueMapping &bvm) { 317 Value reduceVec = bvm.lookup(reduceValue); 318 Value outputVec = bvm.lookup(initialValue); 319 auto reduceType = reduceVec.getType().dyn_cast<VectorType>(); 320 auto outputType = outputVec.getType().dyn_cast<VectorType>(); 321 // Reduce only if needed as the value may already have been reduce for 322 // contraction vectorization. 323 if (!reduceType || 324 (outputType && reduceType.getShape() == outputType.getShape())) 325 return nullptr; 326 SmallVector<bool> reductionMask = getReductionMask(linalgOp); 327 Value reduce = buildMultiDimReduce(b, op, reduceVec, reductionMask); 328 return createVectorizedOp(b, op, {reduce, outputVec}, reduce.getType()); 329 } 330 331 /// Generic vectorization for a single operation `op`, given already vectorized 332 /// operands carried by `bvm`. Vectorization occurs as follows: 333 /// 1. Try to apply any of the `customVectorizationHooks` and return its 334 /// result on success. 335 /// 2. Clone any constant in the current scope without vectorization: each 336 /// consumer of the constant will later determine the shape to which the 337 /// constant needs to be broadcast to. 338 /// 3. Fail on any remaining non `ElementwiseMappable` op. It is the purpose 339 /// of the `customVectorizationHooks` to cover such cases. 340 /// 4. Clone `op` in vector form to a vector of shape prescribed by the first 341 /// operand of maximal rank. Other operands have smaller rank and are 342 /// broadcast accordingly. It is assumed this broadcast is always legal, 343 /// otherwise, it means one of the `customVectorizationHooks` is incorrect. 344 /// 345 /// This function assumes all operands of `op` have been vectorized and are in 346 /// the `bvm` mapping. As a consequence, this function is meant to be called on 347 /// a topologically-sorted list of ops. 348 /// This function does not update `bvm` but returns a VectorizationStatus that 349 /// instructs the caller what `bvm` update needs to occur. 350 static VectorizationResult 351 vectorizeOneOp(OpBuilder &b, LinalgOp linalgOp, Operation *op, 352 const BlockAndValueMapping &bvm, 353 ArrayRef<CustomVectorizationHook> customVectorizationHooks) { 354 LDBG("vectorize op " << *op); 355 356 // 1. Try to apply any CustomVectorizationHook. 357 if (!customVectorizationHooks.empty()) { 358 for (auto &customFunc : customVectorizationHooks) { 359 VectorizationResult result = customFunc(op, bvm); 360 if (result.status == VectorizationStatus::Failure) 361 continue; 362 return result; 363 } 364 } 365 366 // 2. Constant ops don't get vectorized but rather broadcasted at their users. 367 // Clone so that the constant is not confined to the linalgOp block . 368 if (isa<arith::ConstantOp, ConstantOp>(op)) 369 return VectorizationResult{VectorizationStatus::NewOp, b.clone(*op)}; 370 371 // 3. Only ElementwiseMappable are allowed in the generic vectorization. 372 if (!OpTrait::hasElementwiseMappableTraits(op)) 373 return VectorizationResult{VectorizationStatus::Failure, nullptr}; 374 375 // 4 . Check if the operation is a reduction. 376 SmallVector<std::pair<Value, Value>> reductionOperands; 377 for (Value operand : op->getOperands()) { 378 auto arg = operand.dyn_cast<BlockArgument>(); 379 if (!arg || arg.getArgNumber() < linalgOp.getNumInputs()) 380 continue; 381 SmallVector<Operation *> reductionOps; 382 Value reduceValue = matchReduction( 383 linalgOp.getRegionOutputArgs(), 384 arg.getArgNumber() - linalgOp.getNumInputs(), reductionOps); 385 if (!reduceValue) 386 continue; 387 reductionOperands.push_back(std::make_pair(reduceValue, operand)); 388 } 389 if (!reductionOperands.empty()) { 390 assert(reductionOperands.size() == 1); 391 Operation *reduceOp = 392 reduceIfNeeded(b, linalgOp, op, reductionOperands[0].first, 393 reductionOperands[0].second, bvm); 394 if (reduceOp) 395 return VectorizationResult{VectorizationStatus::NewOp, reduceOp}; 396 } 397 398 // 5. Generic vectorization path for ElementwiseMappable ops. 399 // a. first get the first max ranked shape. 400 SmallVector<int64_t, 4> firstMaxRankedShape; 401 for (Value operand : op->getOperands()) { 402 auto vt = bvm.lookup(operand).getType().dyn_cast<VectorType>(); 403 if (vt && firstMaxRankedShape.size() < vt.getShape().size()) 404 firstMaxRankedShape.assign(vt.getShape().begin(), vt.getShape().end()); 405 } 406 // b. broadcast each op if needed. 407 auto vectorizedOperands = llvm::map_range(op->getOperands(), [&](Value v) { 408 return firstMaxRankedShape.empty() 409 ? bvm.lookup(v) 410 : broadcastIfNeeded(b, bvm.lookup(v), firstMaxRankedShape); 411 }); 412 // c. for elementwise, the result is the vector with the firstMaxRankedShape 413 auto returnTypes = llvm::map_range(op->getResultTypes(), [&](Type t) { 414 return firstMaxRankedShape.empty() 415 ? t 416 : VectorType::get(firstMaxRankedShape, t); 417 }); 418 419 // Build and return the new op. 420 return VectorizationResult{ 421 VectorizationStatus::NewOp, 422 createVectorizedOp(b, op, llvm::to_vector<4>(vectorizedOperands), 423 llvm::to_vector<4>(returnTypes))}; 424 } 425 426 /// Detect whether `r` has only ConstantOp, ElementwiseMappable and YieldOp. 427 static bool hasOnlyScalarElementwiseOp(Region &r) { 428 if (!llvm::hasSingleElement(r)) 429 return false; 430 for (Operation &op : r.front()) { 431 if (!(isa<arith::ConstantOp, ConstantOp, linalg::YieldOp, linalg::IndexOp>( 432 op) || 433 OpTrait::hasElementwiseMappableTraits(&op)) || 434 llvm::any_of(op.getResultTypes(), 435 [](Type type) { return !type.isIntOrIndexOrFloat(); })) 436 return false; 437 } 438 return true; 439 } 440 441 // Return true if the op is an element-wise linalg op. 442 static bool isElementwise(Operation *op) { 443 auto linalgOp = dyn_cast<linalg::LinalgOp>(op); 444 if (!linalgOp) 445 return false; 446 if (linalgOp.getNumLoops() != linalgOp.getNumParallelLoops()) 447 return false; 448 // TODO: relax the restrictions on indexing map. 449 for (OpOperand *opOperand : linalgOp.getOutputOperands()) { 450 if (!linalgOp.getTiedIndexingMap(opOperand).isIdentity()) 451 return false; 452 } 453 return hasOnlyScalarElementwiseOp(linalgOp->getRegion(0)); 454 } 455 456 /// Generic vectorization function that rewrites the body of a `linalgOp` into 457 /// vector form. Generic vectorization proceeds as follows: 458 /// 1. Verify the `linalgOp` has one non-empty region. 459 /// 2. Values defined above the region are mapped to themselves and will be 460 /// broadcasted on a per-need basis by their consumers. 461 /// 3. Each region argument is vectorized into a vector.transfer_read (or 0-d 462 /// load). 463 /// TODO: Reuse opportunities for RAR dependencies. 464 /// 4a. Register CustomVectorizationHook for YieldOp to capture the results. 465 /// 4b. Register CustomVectorizationHook for IndexOp to access the iteration 466 /// indices. 467 /// 5. Iteratively call vectorizeOneOp on the region operations. 468 /// 469 /// When `broadcastToMaximalCommonShape` is set to true, eager broadcasting is 470 /// performed to the maximal common vector size implied by the `linalgOp` 471 /// iteration space. This eager broadcasting is introduced in the 472 /// permutation_map of the vector.transfer_read operations. The eager 473 /// broadcasting makes it trivial to detrmine where broadcast, transposes and 474 /// reductions should occur, without any bookkeeping. The tradeoff is that, in 475 /// the absence of good canonicalizations, the amount of work increases. 476 /// This is not deemed a problem as we expect canonicalizations and foldings to 477 /// aggressively clean up the useless work. 478 static LogicalResult 479 vectorizeAsLinalgGeneric(OpBuilder &b, LinalgOp linalgOp, 480 SmallVectorImpl<Value> &newResults) { 481 Block *block = linalgOp.getBlock(); 482 483 // 2. Values defined above the region can only be broadcast for now. Make them 484 // map to themselves. 485 BlockAndValueMapping bvm; 486 SetVector<Value> valuesSet; 487 mlir::getUsedValuesDefinedAbove(linalgOp->getRegion(0), valuesSet); 488 bvm.map(valuesSet.getArrayRef(), valuesSet.getArrayRef()); 489 490 if (linalgOp.getNumOutputs() == 0) 491 return failure(); 492 493 // TODO: the common vector shape is equal to the static loop sizes only when 494 // all indexing maps are projected permutations. For convs and stencils the 495 // logic will need to evolve. 496 SmallVector<int64_t> commonVectorShape = linalgOp.computeStaticLoopSizes(); 497 498 // 3. Turn all BBArgs into vector.transfer_read / load. 499 Location loc = linalgOp.getLoc(); 500 Value zero = b.create<arith::ConstantIndexOp>(loc, 0); 501 for (OpOperand *opOperand : linalgOp.getInputAndOutputOperands()) { 502 BlockArgument bbarg = block->getArgument(opOperand->getOperandNumber()); 503 if (linalgOp.isScalar(opOperand)) { 504 bvm.map(bbarg, opOperand->get()); 505 continue; 506 } 507 VectorType readType; 508 AffineMap map; 509 // TODO: can we keep this simplification? 510 // if (linalgOp.getShape(opOperand).empty()) { 511 // readType = VectorType::get({}, bbarg.getType()); 512 // } else { 513 if (opOperand->getOperandNumber() < linalgOp.getNumInputs()) { 514 map = inverseAndBroadcastProjectedPermuation( 515 linalgOp.getTiedIndexingMap(opOperand)); 516 readType = VectorType::get(commonVectorShape, 517 getElementTypeOrSelf(opOperand->get())); 518 } else { 519 map = inversePermutation( 520 reindexIndexingMap(linalgOp.getTiedIndexingMap(opOperand))); 521 readType = VectorType::get(map.compose(linalgOp.getShape(opOperand)), 522 getElementTypeOrSelf(opOperand->get())); 523 } 524 // } 525 526 auto shape = linalgOp.getShape(opOperand); 527 SmallVector<Value> indices(shape.size(), zero); 528 Value readValue = b.create<vector::TransferReadOp>( 529 loc, readType, opOperand->get(), indices, map); 530 // Not all ops support 0-d vectors, extract the scalar for now. 531 // TODO: remove this. 532 if (readValue.getType().cast<VectorType>().getRank() == 0) 533 readValue = b.create<vector::ExtractElementOp>(loc, readValue); 534 535 LDBG("new vectorized bbarg(" << bbarg.getArgNumber() << "): " << readValue); 536 bvm.map(bbarg, readValue); 537 bvm.map(opOperand->get(), readValue); 538 } 539 540 SmallVector<CustomVectorizationHook> hooks; 541 // 4a. Register CustomVectorizationHook for yieldOp. 542 CustomVectorizationHook vectorizeYield = 543 [&](Operation *op, 544 const BlockAndValueMapping &bvm) -> VectorizationResult { 545 return vectorizeLinalgYield(b, op, bvm, linalgOp, newResults); 546 }; 547 hooks.push_back(vectorizeYield); 548 549 // 4b. Register CustomVectorizationHook for indexOp. 550 CustomVectorizationHook vectorizeIndex = 551 [&](Operation *op, 552 const BlockAndValueMapping &bvm) -> VectorizationResult { 553 return vectorizeLinalgIndex(b, op, linalgOp); 554 }; 555 hooks.push_back(vectorizeIndex); 556 557 // 5. Iteratively call `vectorizeOneOp` to each op in the slice. 558 for (Operation &op : block->getOperations()) { 559 VectorizationResult result = vectorizeOneOp(b, linalgOp, &op, bvm, hooks); 560 if (result.status == VectorizationStatus::Failure) { 561 LDBG("failed to vectorize: " << op); 562 return failure(); 563 } 564 if (result.status == VectorizationStatus::NewOp) { 565 LDBG("new vector op: " << *result.newOp;); 566 bvm.map(op.getResults(), result.newOp->getResults()); 567 } 568 } 569 570 return success(); 571 } 572 573 /// Helper function to vectorize a `linalgOp` with contraction semantics in a 574 /// generic fashion. 575 /// This helper is needed atm because the truly generic implementation requires 576 /// good vector.multi_reduce folding patterns that are currently NYI. 577 // TODO: drop reliance on a specific pattern. 578 static bool allIndexingsAreProjectedPermutation(LinalgOp op) { 579 return llvm::all_of(op.getIndexingMaps(), [](AffineMap m) { 580 return m.isProjectedPermutation(/*allowZeroInResults=*/true); 581 }); 582 } 583 584 // TODO: probably need some extra checks for reduction followed by consumer 585 // ops that may not commute (e.g. linear reduction + non-linear instructions). 586 static LogicalResult reductionPreconditions(LinalgOp op) { 587 if (llvm::none_of(op.iterator_types(), isReductionIterator)) { 588 LDBG("reduction precondition failed: no reduction iterator"); 589 return failure(); 590 } 591 for (OpOperand *opOperand : op.getOutputOperands()) { 592 Operation *reduceOp = matchLinalgReduction(opOperand); 593 if (!reduceOp || !getCombinerOpKind(reduceOp)) { 594 LDBG("reduction precondition failed: reduction detection failed"); 595 return failure(); 596 } 597 } 598 return success(); 599 } 600 601 static LogicalResult vectorizeStaticLinalgOpPrecondition(linalg::LinalgOp op) { 602 if (isElementwise(op)) 603 return success(); 604 // TODO: isaConvolutionOpInterface that can also infer from generic features. 605 // But we will still need stride/dilation attributes that will be annoying to 606 // reverse-engineer... 607 if (isa<ConvolutionOpInterface>(op.getOperation())) 608 return success(); 609 // TODO: the common vector shape is equal to the static loop sizes only when 610 // all indexing maps are projected permutations. For convs and stencils the 611 // logic will need to evolve. 612 if (!allIndexingsAreProjectedPermutation(op)) { 613 LDBG("precondition failed: not projected permutations"); 614 return failure(); 615 } 616 if (failed(reductionPreconditions(op))) { 617 LDBG("precondition failed: reduction preconditions"); 618 return failure(); 619 } 620 return success(); 621 } 622 623 static LogicalResult vectorizeLinalgOpPrecondition(LinalgOp linalgOp) { 624 // All types must be static shape to go to vector. 625 if (linalgOp.hasDynamicShape()) { 626 LDBG("precondition failed: dynamic shape"); 627 return failure(); 628 } 629 return vectorizeStaticLinalgOpPrecondition(linalgOp); 630 } 631 632 LogicalResult mlir::linalg::vectorize(RewriterBase &rewriter, 633 LinalgOp linalgOp) { 634 if (failed(vectorizeLinalgOpPrecondition(linalgOp))) 635 return failure(); 636 637 SmallVector<Value> results; 638 // TODO: isaConvolutionOpInterface that can also infer from generic 639 // features. Will require stride/dilation attributes inference. 640 FailureOr<Operation *> convOr = vectorizeConvolution(rewriter, linalgOp); 641 if (succeeded(convOr)) { 642 llvm::append_range(results, (*convOr)->getResults()); 643 } else { 644 if (failed(vectorizeLinalgOpPrecondition(linalgOp))) 645 return failure(); 646 LDBG("Vectorize generic by broadcasting to a common shape: " << linalgOp); 647 if (failed(vectorizeAsLinalgGeneric(rewriter, linalgOp, results))) 648 return failure(); 649 } 650 651 if (!results.empty()) 652 rewriter.replaceOp(linalgOp, results); 653 else 654 rewriter.eraseOp(linalgOp); 655 656 return success(); 657 } 658 659 //----------------------------------------------------------------------------// 660 // Misc. vectorization patterns. 661 //----------------------------------------------------------------------------// 662 663 /// Helper function that retrieves the value of an IntegerAttr. 664 static int64_t getIntFromAttr(Attribute attr) { 665 return attr.cast<IntegerAttr>().getInt(); 666 } 667 668 /// Given an ArrayRef of OpFoldResults, return a vector of Values. 669 /// IntegerAttrs are converted to ConstantIndexOps. Other attribute types are 670 /// not supported. 671 static SmallVector<Value> ofrToIndexValues(OpBuilder &builder, Location loc, 672 ArrayRef<OpFoldResult> ofrs) { 673 SmallVector<Value> result; 674 llvm::for_each(ofrs, [&](auto o) { 675 if (auto val = o.template dyn_cast<Value>()) { 676 result.push_back(val); 677 } else { 678 result.push_back(builder.create<arith::ConstantIndexOp>( 679 loc, getIntFromAttr(o.template get<Attribute>()))); 680 } 681 }); 682 return result; 683 } 684 685 /// Rewrite a PadTensorOp into a sequence of InitTensorOp, FillOp and 686 /// InsertSliceOp. For now, only constant padding values are supported. 687 /// If there is enough static type information, TransferReadOps and 688 /// TransferWriteOps may be generated instead of InsertSliceOps. 689 struct GenericPadTensorOpVectorizationPattern 690 : public GeneralizePadTensorOpPattern { 691 GenericPadTensorOpVectorizationPattern(MLIRContext *context, 692 PatternBenefit benefit = 1) 693 : GeneralizePadTensorOpPattern(context, tryVectorizeCopy, benefit) {} 694 /// Vectorize the copying of a PadTensorOp's source. This is possible if 695 /// each dimension size is statically know in the source type or the result 696 /// type (or both). 697 static LogicalResult tryVectorizeCopy(PatternRewriter &rewriter, 698 PadTensorOp padOp, Value dest) { 699 auto sourceType = padOp.getSourceType(); 700 auto resultType = padOp.getResultType(); 701 702 // Copy cannot be vectorized if pad value is non-constant and source shape 703 // is dynamic. In case of a dynamic source shape, padding must be appended 704 // by TransferReadOp, but TransferReadOp supports only constant padding. 705 auto padValue = padOp.getConstantPaddingValue(); 706 if (!padValue) { 707 if (!sourceType.hasStaticShape()) 708 return failure(); 709 // Create dummy padding value. 710 auto elemType = sourceType.getElementType(); 711 padValue = rewriter.create<arith::ConstantOp>( 712 padOp.getLoc(), elemType, rewriter.getZeroAttr(elemType)); 713 } 714 715 SmallVector<int64_t> vecShape; 716 SmallVector<bool> readInBounds; 717 SmallVector<bool> writeInBounds; 718 for (unsigned i = 0; i < sourceType.getRank(); ++i) { 719 if (!sourceType.isDynamicDim(i)) { 720 vecShape.push_back(sourceType.getDimSize(i)); 721 // Source shape is statically known: Neither read nor write are 722 // out-of- bounds. 723 readInBounds.push_back(true); 724 writeInBounds.push_back(true); 725 } else if (!resultType.isDynamicDim(i)) { 726 // Source shape is not statically known, but result shape is. 727 // Vectorize with size of result shape. This may be larger than the 728 // source size. 729 vecShape.push_back(resultType.getDimSize(i)); 730 // Read may be out-of-bounds because the result size could be larger 731 // than the source size. 732 readInBounds.push_back(false); 733 // Write is out-of-bounds if low padding > 0. 734 writeInBounds.push_back( 735 getConstantIntValue(padOp.getMixedLowPad()[i]) == 736 static_cast<int64_t>(0)); 737 } else { 738 // Neither source nor result dim of padOp is static. Cannot vectorize 739 // the copy. 740 return failure(); 741 } 742 } 743 auto vecType = VectorType::get(vecShape, sourceType.getElementType()); 744 745 // Generate TransferReadOp. 746 SmallVector<Value> readIndices( 747 vecType.getRank(), 748 rewriter.create<arith::ConstantIndexOp>(padOp.getLoc(), 0)); 749 auto read = rewriter.create<vector::TransferReadOp>( 750 padOp.getLoc(), vecType, padOp.source(), readIndices, padValue, 751 ArrayRef<bool>{readInBounds}); 752 753 // If `dest` is a FillOp and the TransferWriteOp would overwrite the 754 // entire tensor, write directly to the FillOp's operand. 755 if (llvm::equal(vecShape, resultType.getShape()) && 756 llvm::all_of(writeInBounds, [](bool b) { return b; })) 757 if (auto fill = dest.getDefiningOp<FillOp>()) 758 dest = fill.output(); 759 760 // Generate TransferWriteOp. 761 auto writeIndices = 762 ofrToIndexValues(rewriter, padOp.getLoc(), padOp.getMixedLowPad()); 763 rewriter.replaceOpWithNewOp<vector::TransferWriteOp>( 764 padOp, read, dest, writeIndices, ArrayRef<bool>{writeInBounds}); 765 766 return success(); 767 } 768 }; 769 770 /// Base pattern for rewriting PadTensorOps whose result is consumed by a 771 /// given operation type OpTy. 772 template <typename OpTy> 773 struct VectorizePadTensorOpUserPattern : public OpRewritePattern<PadTensorOp> { 774 using OpRewritePattern<PadTensorOp>::OpRewritePattern; 775 776 LogicalResult matchAndRewrite(PadTensorOp padOp, 777 PatternRewriter &rewriter) const final { 778 bool changed = false; 779 // Insert users in vector, because some users may be replaced/removed. 780 for (auto *user : llvm::to_vector<4>(padOp->getUsers())) 781 if (auto op = dyn_cast<OpTy>(user)) 782 changed |= rewriteUser(rewriter, padOp, op).succeeded(); 783 return success(changed); 784 } 785 786 protected: 787 virtual LogicalResult rewriteUser(PatternRewriter &rewriter, 788 PadTensorOp padOp, OpTy op) const = 0; 789 }; 790 791 /// Rewrite use of PadTensorOp result in TransferReadOp. E.g.: 792 /// ``` 793 /// %0 = linalg.pad_tensor %src ... : tensor<?x?xf32> to tensor<17x5xf32> 794 /// %r = vector.transfer_read %0[%c0, %c0], %cst 795 /// {in_bounds = [true, true]} : tensor<17x5xf32>, vector<17x5xf32> 796 /// ``` 797 /// is rewritten to: 798 /// ``` 799 /// %r = vector.transfer_read %src[%c0, %c0], %padding 800 /// {in_bounds = [true, true]} 801 /// : tensor<?x?xf32>, vector<17x5xf32> 802 /// ``` 803 /// Note: By restricting this pattern to in-bounds TransferReadOps, we can be 804 /// sure that the original padding value %cst was never used. 805 /// 806 /// This rewrite is possible if: 807 /// - `xferOp` has no out-of-bounds dims or mask. 808 /// - Low padding is static 0. 809 /// - Single, scalar padding value. 810 struct PadTensorOpVectorizationWithTransferReadPattern 811 : public VectorizePadTensorOpUserPattern<vector::TransferReadOp> { 812 using VectorizePadTensorOpUserPattern< 813 vector::TransferReadOp>::VectorizePadTensorOpUserPattern; 814 815 LogicalResult rewriteUser(PatternRewriter &rewriter, PadTensorOp padOp, 816 vector::TransferReadOp xferOp) const override { 817 // Low padding must be static 0. 818 if (!padOp.hasZeroLowPad()) 819 return failure(); 820 // Pad value must be a constant. 821 auto padValue = padOp.getConstantPaddingValue(); 822 if (!padValue) 823 return failure(); 824 // Padding value of existing `xferOp` is unused. 825 if (xferOp.hasOutOfBoundsDim() || xferOp.mask()) 826 return failure(); 827 828 rewriter.updateRootInPlace(xferOp, [&]() { 829 SmallVector<bool> inBounds(xferOp.getVectorType().getRank(), false); 830 xferOp->setAttr(xferOp.getInBoundsAttrName(), 831 rewriter.getBoolArrayAttr(inBounds)); 832 xferOp.sourceMutable().assign(padOp.source()); 833 xferOp.paddingMutable().assign(padValue); 834 }); 835 836 return success(); 837 } 838 }; 839 840 /// Rewrite use of PadTensorOp result in TransferWriteOp. 841 /// This pattern rewrites TransferWriteOps that write to a padded tensor 842 /// value, where the same amount of padding is immediately removed again after 843 /// the write. In such cases, the TransferWriteOp can write to the non-padded 844 /// tensor value and apply out-of-bounds masking. E.g.: 845 /// ``` 846 /// %0 = tensor.extract_slice ...[...] [%s0, %s1] [1, 1] 847 /// : tensor<...> to tensor<?x?xf32> 848 /// %1 = linalg.pad_tensor %0 ... : tensor<?x?xf32> to tensor<17x5xf32> 849 /// %2 = vector.transfer_write %vec, %1[...] 850 /// : vector<17x5xf32>, tensor<17x5xf32> 851 /// %r = tensor.extract_slice %2[0, 0] [%s0, %s1] [1, 1] 852 /// : tensor<17x5xf32> to tensor<?x?xf32> 853 /// ``` 854 /// is rewritten to: 855 /// ``` 856 /// %0 = tensor.extract_slice ...[...] [%s0, %s1] [1, 1] 857 /// : tensor<...> to tensor<?x?xf32> 858 /// %r = vector.transfer_write %vec, %0[...] : vector<17x5xf32>, 859 /// tensor<?x?xf32> 860 /// ``` 861 /// Note: It is important that the ExtractSliceOp %r resizes the result of the 862 /// TransferWriteOp to the same size as the input of the TensorPadOp (or an 863 /// even smaller size). Otherwise, %r's new (dynamic) dimensions would differ 864 /// from %r's old dimensions. 865 /// 866 /// This rewrite is possible if: 867 /// - Low padding is static 0. 868 /// - `xferOp` has exactly one use, which is an ExtractSliceOp. This 869 /// ExtractSliceOp trims the same amount of padding that was added 870 /// beforehand. 871 /// - Single, scalar padding value. 872 struct PadTensorOpVectorizationWithTransferWritePattern 873 : public VectorizePadTensorOpUserPattern<vector::TransferWriteOp> { 874 using VectorizePadTensorOpUserPattern< 875 vector::TransferWriteOp>::VectorizePadTensorOpUserPattern; 876 877 LogicalResult rewriteUser(PatternRewriter &rewriter, PadTensorOp padOp, 878 vector::TransferWriteOp xferOp) const override { 879 // TODO: support 0-d corner case. 880 if (xferOp.getTransferRank() == 0) 881 return failure(); 882 883 // Low padding must be static 0. 884 if (!padOp.hasZeroLowPad()) 885 return failure(); 886 // Pad value must be a constant. 887 auto padValue = padOp.getConstantPaddingValue(); 888 if (!padValue) 889 return failure(); 890 // TransferWriteOp result must be directly consumed by an ExtractSliceOp. 891 if (!xferOp->hasOneUse()) 892 return failure(); 893 auto trimPadding = dyn_cast<tensor::ExtractSliceOp>(*xferOp->user_begin()); 894 if (!trimPadding) 895 return failure(); 896 // Only static zero offsets supported when trimming padding. 897 if (!trimPadding.hasZeroOffset()) 898 return failure(); 899 // trimPadding must remove the amount of padding that was added earlier. 900 if (!hasSameTensorSize(padOp.source(), trimPadding)) 901 return failure(); 902 903 // Insert the new TransferWriteOp at position of the old TransferWriteOp. 904 rewriter.setInsertionPoint(xferOp); 905 906 SmallVector<bool> inBounds(xferOp.getVectorType().getRank(), false); 907 auto newXferOp = rewriter.replaceOpWithNewOp<vector::TransferWriteOp>( 908 xferOp, padOp.source().getType(), xferOp.vector(), padOp.source(), 909 xferOp.indices(), xferOp.permutation_mapAttr(), xferOp.mask(), 910 rewriter.getBoolArrayAttr(inBounds)); 911 rewriter.replaceOp(trimPadding, newXferOp->getResult(0)); 912 913 return success(); 914 } 915 916 /// Check if `beforePadding` and `afterTrimming` have the same tensor size, 917 /// i.e., same dimensions. 918 /// 919 /// Dimensions may be static, dynamic or mix of both. In case of dynamic 920 /// dimensions, this function tries to infer the (static) tensor size by 921 /// looking at the defining op and utilizing op-specific knowledge. 922 /// 923 /// This is a conservative analysis. In case equal tensor sizes cannot be 924 /// proven statically, this analysis returns `false` even though the tensor 925 /// sizes may turn out to be equal at runtime. 926 bool hasSameTensorSize(Value beforePadding, 927 tensor::ExtractSliceOp afterTrimming) const { 928 // If the input to PadTensorOp is a CastOp, try with with both CastOp 929 // result and CastOp operand. 930 if (auto castOp = beforePadding.getDefiningOp<tensor::CastOp>()) 931 if (hasSameTensorSize(castOp.source(), afterTrimming)) 932 return true; 933 934 auto t1 = beforePadding.getType().dyn_cast<RankedTensorType>(); 935 auto t2 = afterTrimming.getType().dyn_cast<RankedTensorType>(); 936 // Only RankedTensorType supported. 937 if (!t1 || !t2) 938 return false; 939 // Rank of both values must be the same. 940 if (t1.getRank() != t2.getRank()) 941 return false; 942 943 // All static dimensions must be the same. Mixed cases (e.g., dimension 944 // static in `t1` but dynamic in `t2`) are not supported. 945 for (unsigned i = 0; i < t1.getRank(); ++i) { 946 if (t1.isDynamicDim(i) != t2.isDynamicDim(i)) 947 return false; 948 if (!t1.isDynamicDim(i) && t1.getDimSize(i) != t2.getDimSize(i)) 949 return false; 950 } 951 952 // Nothing more to check if all dimensions are static. 953 if (t1.getNumDynamicDims() == 0) 954 return true; 955 956 // All dynamic sizes must be the same. The only supported case at the 957 // moment is when `beforePadding` is an ExtractSliceOp (or a cast 958 // thereof). 959 960 // Apart from CastOp, only ExtractSliceOp is supported. 961 auto beforeSlice = beforePadding.getDefiningOp<tensor::ExtractSliceOp>(); 962 if (!beforeSlice) 963 return false; 964 965 assert(static_cast<size_t>(t1.getRank()) == 966 beforeSlice.getMixedSizes().size()); 967 assert(static_cast<size_t>(t2.getRank()) == 968 afterTrimming.getMixedSizes().size()); 969 970 for (unsigned i = 0; i < t1.getRank(); ++i) { 971 // Skip static dimensions. 972 if (!t1.isDynamicDim(i)) 973 continue; 974 auto size1 = beforeSlice.getMixedSizes()[i]; 975 auto size2 = afterTrimming.getMixedSizes()[i]; 976 977 // Case 1: Same value or same constant int. 978 if (isEqualConstantIntOrValue(size1, size2)) 979 continue; 980 981 // Other cases: Take a deeper look at defining ops of values. 982 auto v1 = size1.dyn_cast<Value>(); 983 auto v2 = size2.dyn_cast<Value>(); 984 if (!v1 || !v2) 985 return false; 986 987 // Case 2: Both values are identical AffineMinOps. (Should not happen if 988 // CSE is run.) 989 auto minOp1 = v1.getDefiningOp<AffineMinOp>(); 990 auto minOp2 = v2.getDefiningOp<AffineMinOp>(); 991 if (minOp1 && minOp2 && minOp1.getAffineMap() == minOp2.getAffineMap() && 992 minOp1.operands() == minOp2.operands()) 993 continue; 994 995 // Add additional cases as needed. 996 } 997 998 // All tests passed. 999 return true; 1000 } 1001 }; 1002 1003 /// Rewrite use of PadTensorOp result in InsertSliceOp. E.g.: 1004 /// ``` 1005 /// %0 = linalg.pad_tensor %src ... : tensor<?x?xf32> to tensor<17x5xf32> 1006 /// %r = tensor.insert_slice %0 1007 /// into %dest[%a, %b, 0, 0] [1, 1, 17, 5] [1, 1, 1, 1] 1008 /// : tensor<17x5xf32> into tensor<?x?x17x5xf32> 1009 /// ``` 1010 /// is rewritten to: 1011 /// ``` 1012 /// %0 = vector.transfer_read %src[%c0, %c0], %padding 1013 /// : tensor<?x?xf32>, vector<17x5xf32> 1014 /// %r = vector.transfer_write %0, %dest[%a, %b, %c0, %c0] 1015 /// {in_bounds = [true, true]} : vector<17x5xf32>, tensor<?x?x17x5xf32> 1016 /// ``` 1017 /// 1018 /// This rewrite is possible if: 1019 /// - Low padding is static 0. 1020 /// - `padOp` result shape is static. 1021 /// - The entire padded tensor is inserted. 1022 /// (Implies that sizes of `insertOp` are all static.) 1023 /// - Only unit strides in `insertOp`. 1024 /// - Single, scalar padding value. 1025 /// - `padOp` result not used as destination. 1026 struct PadTensorOpVectorizationWithInsertSlicePattern 1027 : public VectorizePadTensorOpUserPattern<tensor::InsertSliceOp> { 1028 using VectorizePadTensorOpUserPattern< 1029 tensor::InsertSliceOp>::VectorizePadTensorOpUserPattern; 1030 1031 LogicalResult rewriteUser(PatternRewriter &rewriter, PadTensorOp padOp, 1032 tensor::InsertSliceOp insertOp) const override { 1033 // Low padding must be static 0. 1034 if (!padOp.hasZeroLowPad()) 1035 return failure(); 1036 // Only unit stride supported. 1037 if (!insertOp.hasUnitStride()) 1038 return failure(); 1039 // Pad value must be a constant. 1040 auto padValue = padOp.getConstantPaddingValue(); 1041 if (!padValue) 1042 return failure(); 1043 // Dynamic shapes not supported. 1044 if (!padOp.result().getType().cast<ShapedType>().hasStaticShape()) 1045 return failure(); 1046 // Pad result not used as destination. 1047 if (insertOp.dest() == padOp.result()) 1048 return failure(); 1049 1050 auto vecType = VectorType::get(padOp.getType().getShape(), 1051 padOp.getType().getElementType()); 1052 unsigned vecRank = vecType.getRank(); 1053 unsigned tensorRank = insertOp.getType().getRank(); 1054 1055 // Check if sizes match: Insert the entire tensor into most minor dims. 1056 // (No permutations allowed.) 1057 SmallVector<int64_t> expectedSizes(tensorRank - vecRank, 1); 1058 expectedSizes.append(vecType.getShape().begin(), vecType.getShape().end()); 1059 if (!llvm::all_of( 1060 llvm::zip(insertOp.getMixedSizes(), expectedSizes), [](auto it) { 1061 return getConstantIntValue(std::get<0>(it)) == std::get<1>(it); 1062 })) 1063 return failure(); 1064 1065 // Insert the TransferReadOp and TransferWriteOp at the position of the 1066 // InsertSliceOp. 1067 rewriter.setInsertionPoint(insertOp); 1068 1069 // Generate TransferReadOp: Read entire source tensor and add high 1070 // padding. 1071 SmallVector<Value> readIndices( 1072 vecRank, rewriter.create<arith::ConstantIndexOp>(padOp.getLoc(), 0)); 1073 auto read = rewriter.create<vector::TransferReadOp>( 1074 padOp.getLoc(), vecType, padOp.source(), readIndices, padValue); 1075 1076 // Generate TransferWriteOp: Write to InsertSliceOp's dest tensor at 1077 // specified offsets. Write is fully in-bounds because a InsertSliceOp's 1078 // source must fit into the destination at the specified offsets. 1079 auto writeIndices = 1080 ofrToIndexValues(rewriter, padOp.getLoc(), insertOp.getMixedOffsets()); 1081 SmallVector<bool> inBounds(vecRank, true); 1082 rewriter.replaceOpWithNewOp<vector::TransferWriteOp>( 1083 insertOp, read, insertOp.dest(), writeIndices, 1084 ArrayRef<bool>{inBounds}); 1085 1086 return success(); 1087 } 1088 }; 1089 1090 void mlir::linalg::populatePadTensorOpVectorizationPatterns( 1091 RewritePatternSet &patterns, PatternBenefit baseBenefit) { 1092 patterns.add<GenericPadTensorOpVectorizationPattern>(patterns.getContext(), 1093 baseBenefit); 1094 // Try these specialized patterns first before resorting to the generic one. 1095 patterns.add<PadTensorOpVectorizationWithTransferReadPattern, 1096 PadTensorOpVectorizationWithTransferWritePattern, 1097 PadTensorOpVectorizationWithInsertSlicePattern>( 1098 patterns.getContext(), baseBenefit.getBenefit() + 1); 1099 } 1100 1101 //----------------------------------------------------------------------------// 1102 // Forwarding patterns 1103 //----------------------------------------------------------------------------// 1104 1105 /// Check whether there is any interleaved use of any `values` between 1106 /// `firstOp` and `secondOp`. Conservatively return `true` if any op or value 1107 /// is in a different block. 1108 static bool mayExistInterleavedUses(Operation *firstOp, Operation *secondOp, 1109 ValueRange values) { 1110 if (firstOp->getBlock() != secondOp->getBlock() || 1111 !firstOp->isBeforeInBlock(secondOp)) { 1112 LDBG("interleavedUses precondition failed, firstOp: " 1113 << *firstOp << ", second op: " << *secondOp); 1114 return true; 1115 } 1116 for (auto v : values) { 1117 for (auto &u : v.getUses()) { 1118 Operation *owner = u.getOwner(); 1119 if (owner == firstOp || owner == secondOp) 1120 continue; 1121 // TODO: this is too conservative, use dominance info in the future. 1122 if (owner->getBlock() == firstOp->getBlock() && 1123 (owner->isBeforeInBlock(firstOp) || secondOp->isBeforeInBlock(owner))) 1124 continue; 1125 LDBG(" found interleaved op " << *owner << ", firstOp: " << *firstOp 1126 << ", second op: " << *secondOp); 1127 return true; 1128 } 1129 } 1130 return false; 1131 } 1132 1133 /// Return the unique subview use of `v` if it is indeed unique, null 1134 /// otherwise. 1135 static memref::SubViewOp getSubViewUseIfUnique(Value v) { 1136 memref::SubViewOp subViewOp; 1137 for (auto &u : v.getUses()) { 1138 if (auto newSubViewOp = dyn_cast<memref::SubViewOp>(u.getOwner())) { 1139 if (subViewOp) 1140 return memref::SubViewOp(); 1141 subViewOp = newSubViewOp; 1142 } 1143 } 1144 return subViewOp; 1145 } 1146 1147 /// TODO: use interfaces, side-effects and aliasing analysis as appropriate, 1148 /// when available. 1149 LogicalResult LinalgCopyVTRForwardingPattern::matchAndRewrite( 1150 vector::TransferReadOp xferOp, PatternRewriter &rewriter) const { 1151 1152 // TODO: support mask. 1153 if (xferOp.mask()) 1154 return failure(); 1155 1156 // Transfer into `view`. 1157 Value viewOrAlloc = xferOp.source(); 1158 if (!viewOrAlloc.getDefiningOp<memref::ViewOp>() && 1159 !viewOrAlloc.getDefiningOp<memref::AllocOp>()) 1160 return failure(); 1161 1162 LDBG(viewOrAlloc); 1163 1164 // Ensure there is exactly one subview of `viewOrAlloc` defining `subView`. 1165 memref::SubViewOp subViewOp = getSubViewUseIfUnique(viewOrAlloc); 1166 if (!subViewOp) 1167 return failure(); 1168 Value subView = subViewOp.getResult(); 1169 LDBG("with subView " << subView); 1170 1171 // Find the copy into `subView` without interleaved uses. 1172 CopyOp copyOp; 1173 for (auto &u : subView.getUses()) { 1174 if (auto newCopyOp = dyn_cast<CopyOp>(u.getOwner())) { 1175 assert(newCopyOp.output().getType().isa<MemRefType>()); 1176 if (newCopyOp.output() != subView) 1177 continue; 1178 LDBG("copy candidate " << *newCopyOp); 1179 if (mayExistInterleavedUses(newCopyOp, xferOp, {viewOrAlloc, subView})) 1180 continue; 1181 copyOp = newCopyOp; 1182 break; 1183 } 1184 } 1185 if (!copyOp) 1186 return failure(); 1187 LDBG("with copy " << *copyOp); 1188 1189 // Find the fill into `viewOrAlloc` without interleaved uses before the 1190 // copy. 1191 FillOp maybeFillOp; 1192 for (auto &u : viewOrAlloc.getUses()) { 1193 if (auto newFillOp = dyn_cast<FillOp>(u.getOwner())) { 1194 assert(newFillOp.output().getType().isa<MemRefType>()); 1195 if (newFillOp.output() != viewOrAlloc) 1196 continue; 1197 LDBG("fill candidate " << *newFillOp); 1198 if (mayExistInterleavedUses(newFillOp, copyOp, {viewOrAlloc, subView})) 1199 continue; 1200 maybeFillOp = newFillOp; 1201 break; 1202 } 1203 } 1204 // Ensure padding matches. 1205 if (maybeFillOp && xferOp.padding() != maybeFillOp.value()) 1206 return failure(); 1207 if (maybeFillOp) 1208 LDBG("with maybeFillOp " << *maybeFillOp); 1209 1210 // `in` is the subview that linalg.copy reads. Replace it. 1211 Value in = copyOp.input(); 1212 1213 // linalg.copy + linalg.fill can be used to create a padded local buffer. 1214 // The `masked` attribute is only valid on this padded buffer. 1215 // When forwarding to vector.transfer_read, the attribute must be reset 1216 // conservatively. 1217 Value res = rewriter.create<vector::TransferReadOp>( 1218 xferOp.getLoc(), xferOp.getVectorType(), in, xferOp.indices(), 1219 xferOp.permutation_mapAttr(), xferOp.padding(), xferOp.mask(), 1220 // in_bounds is explicitly reset 1221 /*inBoundsAttr=*/ArrayAttr()); 1222 1223 if (maybeFillOp) 1224 rewriter.eraseOp(maybeFillOp); 1225 rewriter.eraseOp(copyOp); 1226 rewriter.replaceOp(xferOp, res); 1227 1228 return success(); 1229 } 1230 1231 /// TODO: use interfaces, side-effects and aliasing analysis as appropriate, 1232 /// when available. 1233 LogicalResult LinalgCopyVTWForwardingPattern::matchAndRewrite( 1234 vector::TransferWriteOp xferOp, PatternRewriter &rewriter) const { 1235 // TODO: support mask. 1236 if (xferOp.mask()) 1237 return failure(); 1238 1239 // Transfer into `viewOrAlloc`. 1240 Value viewOrAlloc = xferOp.source(); 1241 if (!viewOrAlloc.getDefiningOp<memref::ViewOp>() && 1242 !viewOrAlloc.getDefiningOp<memref::AllocOp>()) 1243 return failure(); 1244 1245 // Ensure there is exactly one subview of `viewOrAlloc` defining `subView`. 1246 memref::SubViewOp subViewOp = getSubViewUseIfUnique(viewOrAlloc); 1247 if (!subViewOp) 1248 return failure(); 1249 Value subView = subViewOp.getResult(); 1250 1251 // Find the copy from `subView` without interleaved uses. 1252 CopyOp copyOp; 1253 for (auto &u : subViewOp.getResult().getUses()) { 1254 if (auto newCopyOp = dyn_cast<CopyOp>(u.getOwner())) { 1255 if (newCopyOp.getInputOperand(0)->get() != subView) 1256 continue; 1257 if (mayExistInterleavedUses(xferOp, newCopyOp, {viewOrAlloc, subView})) 1258 continue; 1259 copyOp = newCopyOp; 1260 break; 1261 } 1262 } 1263 if (!copyOp) 1264 return failure(); 1265 1266 // `out` is the subview copied into that we replace. 1267 assert(copyOp.output().getType().isa<MemRefType>()); 1268 Value out = copyOp.output(); 1269 1270 // Forward vector.transfer into copy. 1271 // linalg.copy + linalg.fill can be used to create a padded local buffer. 1272 // The `masked` attribute is only valid on this padded buffer. 1273 // When forwarding to vector.transfer_write, the attribute must be reset 1274 // conservatively. 1275 rewriter.create<vector::TransferWriteOp>( 1276 xferOp.getLoc(), xferOp.vector(), out, xferOp.indices(), 1277 xferOp.permutation_mapAttr(), xferOp.mask(), 1278 // in_bounds is explicitly reset 1279 /*inBoundsAttr=*/ArrayAttr()); 1280 1281 rewriter.eraseOp(copyOp); 1282 rewriter.eraseOp(xferOp); 1283 1284 return success(); 1285 } 1286 1287 //===----------------------------------------------------------------------===// 1288 // Convolution vectorization patterns 1289 //===----------------------------------------------------------------------===// 1290 1291 template <int N> 1292 static void bindShapeDims(ShapedType shapedType) {} 1293 1294 template <int N, typename IntTy, typename... IntTy2> 1295 static void bindShapeDims(ShapedType shapedType, IntTy &val, IntTy2 &...vals) { 1296 val = shapedType.getShape()[N]; 1297 bindShapeDims<N + 1, IntTy2 &...>(shapedType, vals...); 1298 } 1299 1300 /// Bind a pack of int& to the leading dimensions of shapedType.getShape(). 1301 template <typename... IntTy> 1302 static void bindShapeDims(ShapedType shapedType, IntTy &...vals) { 1303 bindShapeDims<0>(shapedType, vals...); 1304 } 1305 1306 namespace { 1307 /// Generate a vector implementation for either: 1308 /// ``` 1309 /// Op def: ( n, w, c, kw, f ) 1310 /// Iters: ({Par(), Par(), Par(), Red(), Red()}) 1311 /// Layout: {{n, strideW * w + dilationW * kw, c}, {kw, c, f}, {n, w, f}} 1312 /// ``` 1313 /// kw is unrolled, w is unrolled iff dilationW > 1. 1314 /// 1315 /// or 1316 /// 1317 /// ``` 1318 /// Op def: ( n, w, c, kw ) 1319 /// Iters: ({Par(), Par(), Par(), Red()}) 1320 /// Layout: {{n, strideW * w + dilationW * kw, c}, {kw, c}, {n, w, c}} 1321 /// ``` 1322 /// kw is unrolled, w is unrolled iff dilationW > 1. 1323 struct Conv1DNwcGenerator : public StructuredGenerator<LinalgOp> { 1324 Conv1DNwcGenerator(OpBuilder &builder, LinalgOp linalgOp, int strideW, 1325 int dilationW) 1326 : StructuredGenerator<LinalgOp>(builder, linalgOp), valid(false), 1327 strideW(strideW), dilationW(dilationW) { 1328 // Determine whether `linalgOp` can be generated with this generator 1329 if (linalgOp.getNumInputs() != 2 || linalgOp.getNumOutputs() != 1) 1330 return; 1331 lhsShaped = linalgOp.inputs()[0]; 1332 rhsShaped = linalgOp.inputs()[1]; 1333 resShaped = linalgOp.outputs()[0]; 1334 lhsShapedType = lhsShaped.getType().dyn_cast<ShapedType>(); 1335 rhsShapedType = rhsShaped.getType().dyn_cast<ShapedType>(); 1336 resShapedType = resShaped.getType().dyn_cast<ShapedType>(); 1337 if (!lhsShapedType || !rhsShapedType || !resShapedType) 1338 return; 1339 if (lhsShapedType.getRank() != 3 || 1340 (rhsShapedType.getRank() != 2 && rhsShapedType.getRank() != 3) || 1341 resShapedType.getRank() != 3) 1342 return; 1343 1344 // Check for reduction `add` preceded by `mul`. 1345 Operation *reduceOp = matchLinalgReduction(linalgOp.getOutputOperand(0)); 1346 if (!reduceOp) 1347 return; 1348 llvm::Optional<vector::CombiningKind> maybeKind; 1349 maybeKind = getCombinerOpKind(reduceOp); 1350 if (!maybeKind || *maybeKind != vector::CombiningKind::ADD) 1351 return; 1352 maybeKind = getCombinerOpKind(&(linalgOp->getRegion(0).front().front())); 1353 if (!maybeKind || *maybeKind != vector::CombiningKind::MUL) 1354 return; 1355 1356 // The op is now known to be valid. 1357 valid = true; 1358 } 1359 1360 /// Generate a vector implementation for: 1361 /// ``` 1362 /// Op def: ( n, w, c, kw, f ) 1363 /// Iters: ({Par(), Par(), Par(), Red(), Red()}) 1364 /// Layout: {{n, strideW * w + dilationW * kw, c}, {kw, c, f}, {n, w, f}} 1365 /// ``` 1366 /// kw is always unrolled. 1367 /// TODO: w (resp. kw) is unrolled when the strideW ( resp. dilationW) is 1368 /// > 1. 1369 FailureOr<Operation *> conv() { 1370 if (!valid) 1371 return failure(); 1372 1373 int64_t nSize, wSize, cSize, kwSize, fSize; 1374 // kernel{kw, c, f} 1375 bindShapeDims(rhsShapedType, kwSize, cSize, fSize); 1376 // out{n, w, f} 1377 bindShapeDims(resShapedType, nSize, wSize); 1378 1379 vector::TransferWriteOp write; 1380 Value zero = builder.create<arith::ConstantIndexOp>(loc, 0); 1381 1382 // w is unrolled (i.e. wSizeStep == 1) iff strideW > 1. 1383 // When strideW == 1, we can batch the contiguous loads and avoid 1384 // unrolling 1385 int64_t wSizeStep = strideW == 1 ? wSize : 1; 1386 1387 Type lhsEltType = lhsShapedType.getElementType(); 1388 Type rhsEltType = rhsShapedType.getElementType(); 1389 Type resEltType = resShapedType.getElementType(); 1390 VectorType lhsType = VectorType::get( 1391 {nSize, 1392 // iw = ow * sw + kw * dw - 1 1393 // (i.e. 16 convolved with 3 (@stride 1 dilation 1) -> 14) 1394 // Perform the proper inclusive -> exclusive -> inclusive. 1395 ((wSize - 1) * strideW + 1) + ((kwSize - 1) * dilationW + 1) - 1, 1396 cSize}, 1397 lhsEltType); 1398 VectorType rhsType = VectorType::get({kwSize, cSize, fSize}, rhsEltType); 1399 VectorType resType = VectorType::get({nSize, wSize, fSize}, resEltType); 1400 1401 // Read lhs slice of size {w * strideW + kw * dilationW, c, f} @ [0, 0, 1402 // 0]. 1403 Value lhs = builder.create<vector::TransferReadOp>( 1404 loc, lhsType, lhsShaped, ValueRange{zero, zero, zero}); 1405 // Read rhs slice of size {kw, c, f} @ [0, 0, 0]. 1406 Value rhs = builder.create<vector::TransferReadOp>( 1407 loc, rhsType, rhsShaped, ValueRange{zero, zero, zero}); 1408 // Read res slice of size {n, w, f} @ [0, 0, 0]. 1409 Value res = builder.create<vector::TransferReadOp>( 1410 loc, resType, resShaped, ValueRange{zero, zero, zero}); 1411 1412 //===------------------------------------------------------------------===// 1413 // Begin vector-only rewrite part 1414 //===------------------------------------------------------------------===// 1415 // Unroll along kw and read slices of lhs and rhs. 1416 SmallVector<Value> lhsVals, rhsVals, resVals; 1417 // Extract lhs slice of size {n, wSizeStep, c} @ [0, sw * w + dw * kw, 0]. 1418 for (int64_t kw = 0; kw < kwSize; ++kw) { 1419 for (int64_t w = 0; w < wSize; w += wSizeStep) { 1420 lhsVals.push_back(builder.create<vector::ExtractStridedSliceOp>( 1421 loc, lhs, 1422 /*offsets=*/ArrayRef<int64_t>{0, w * strideW + kw * dilationW, 0}, 1423 /*sizes=*/ArrayRef<int64_t>{nSize, wSizeStep, cSize}, 1424 /*strides=*/ArrayRef<int64_t>{1, 1, 1})); 1425 } 1426 } 1427 // Extract rhs slice of size {c, f} @ [kw]. 1428 for (int64_t kw = 0; kw < kwSize; ++kw) { 1429 rhsVals.push_back(builder.create<vector::ExtractOp>( 1430 loc, rhs, /*offsets=*/ArrayRef<int64_t>{kw})); 1431 } 1432 // Extract res slice: {n, wSizeStep, f} @ [0, w, 0]. 1433 for (int64_t w = 0; w < wSize; w += wSizeStep) { 1434 resVals.push_back(builder.create<vector::ExtractStridedSliceOp>( 1435 loc, res, 1436 /*offsets=*/ArrayRef<int64_t>{0, w, 0}, 1437 /*sizes=*/ArrayRef<int64_t>{nSize, wSizeStep, fSize}, 1438 /*strides=*/ArrayRef<int64_t>{1, 1, 1})); 1439 } 1440 1441 auto linearIndex = [&](int64_t kw, int64_t w) { 1442 return kw * (wSize / wSizeStep) + w; 1443 }; 1444 1445 // Compute contraction: O{n, w, f} += I{n, sw * w + dw * kw, c} * F{c, f} 1446 for (int64_t kw = 0; kw < kwSize; ++kw) { 1447 for (int64_t w = 0; w < wSize; w += wSizeStep) { 1448 resVals[w] = conv1dSliceAsContraction( 1449 builder, loc, lhsVals[linearIndex(kw, w)], rhsVals[kw], resVals[w]); 1450 } 1451 } 1452 1453 // Write back res slice: {n, wSizeStep, f} @ [0, w, 0]. 1454 // This does not depend on kw. 1455 for (int64_t w = 0; w < wSize; w += wSizeStep) { 1456 res = builder.create<vector::InsertStridedSliceOp>( 1457 loc, resVals[w], res, 1458 /*offsets=*/ArrayRef<int64_t>{0, w, 0}, 1459 /*strides=*/ArrayRef<int64_t>{1, 1, 1}); 1460 } 1461 //===------------------------------------------------------------------===// 1462 // End vector-only rewrite part 1463 //===------------------------------------------------------------------===// 1464 1465 // Write back res slice of size {n, w, f} @ [0, 0, 0]. 1466 return builder 1467 .create<vector::TransferWriteOp>(loc, res, resShaped, 1468 ValueRange{zero, zero, zero}) 1469 .getOperation(); 1470 } 1471 1472 // Create a contraction: lhs{n, w, c} * rhs{c, f} -> res{n, w, f} 1473 Value conv1dSliceAsContraction(OpBuilder &b, Location loc, Value lhs, 1474 Value rhs, Value res) { 1475 StringRef par = Par().strRef, red = Red().strRef; 1476 AffineExpr n, w, f, c; 1477 bindDims(ctx, n, w, f, c); 1478 return builder.create<vector::ContractionOp>( 1479 loc, lhs, rhs, res, 1480 /*indexingMaps=*/MapList{{n, w, c}, {c, f}, {n, w, f}}, 1481 /*iteratorTypes=*/ArrayRef<StringRef>{par, par, par, red}); 1482 } 1483 1484 /// Generate a vector implementation for: 1485 /// ``` 1486 /// Op def: ( n, w, c, kw) 1487 /// Iters: ({Par(), Par(), Par(), Red()}) 1488 /// Layout: {{n, strideW * w + dilationW * kw, c}, {kw, c}, {n, w, c}} 1489 /// ``` 1490 /// kw is always unrolled. 1491 /// TODO: w (resp. kw) is unrolled when the strideW ( resp. dilationW) is 1492 /// > 1. 1493 FailureOr<Operation *> depthwiseConv() { 1494 if (!valid) 1495 return failure(); 1496 1497 int64_t nSize, wSize, cSize, kwSize; 1498 // kernel{kw, c} 1499 bindShapeDims(rhsShapedType, kwSize, cSize); 1500 // out{n, w, c} 1501 bindShapeDims(resShapedType, nSize, wSize); 1502 1503 vector::TransferWriteOp write; 1504 Value zero = builder.create<arith::ConstantIndexOp>(loc, 0); 1505 1506 // w is unrolled (i.e. wSizeStep == 1) iff strideW > 1. 1507 // When strideW == 1, we can batch the contiguous loads and avoid 1508 // unrolling 1509 int64_t wSizeStep = strideW == 1 ? wSize : 1; 1510 1511 Type lhsEltType = lhsShapedType.getElementType(); 1512 Type rhsEltType = rhsShapedType.getElementType(); 1513 Type resEltType = resShapedType.getElementType(); 1514 VectorType lhsType = VectorType::get( 1515 {nSize, 1516 // iw = ow * sw + kw * dw - 1 1517 // (i.e. 16 convolved with 3 (@stride 1 dilation 1) -> 14) 1518 ((wSize - 1) * strideW + 1) + ((kwSize - 1) * dilationW + 1) - 1, 1519 cSize}, 1520 lhsEltType); 1521 VectorType rhsType = VectorType::get({kwSize, cSize}, rhsEltType); 1522 VectorType resType = VectorType::get({nSize, wSize, cSize}, resEltType); 1523 1524 // Read lhs slice of size {n, w * strideW + kw * dilationW, c} @ [0, 0, 1525 // 0]. 1526 Value lhs = builder.create<vector::TransferReadOp>( 1527 loc, lhsType, lhsShaped, ValueRange{zero, zero, zero}); 1528 // Read rhs slice of size {kw, c} @ [0, 0]. 1529 Value rhs = builder.create<vector::TransferReadOp>(loc, rhsType, rhsShaped, 1530 ValueRange{zero, zero}); 1531 // Read res slice of size {n, w, c} @ [0, 0, 0]. 1532 Value res = builder.create<vector::TransferReadOp>( 1533 loc, resType, resShaped, ValueRange{zero, zero, zero}); 1534 1535 //===------------------------------------------------------------------===// 1536 // Begin vector-only rewrite part 1537 //===------------------------------------------------------------------===// 1538 // Unroll along kw and read slices of lhs and rhs. 1539 SmallVector<Value> lhsVals, rhsVals, resVals; 1540 // Extract lhs slice of size {n, wSizeStep, c} 1541 // @ [0, sw * w + dw * kw, 0]. 1542 for (int64_t kw = 0; kw < kwSize; ++kw) { 1543 for (int64_t w = 0; w < wSize; w += wSizeStep) { 1544 lhsVals.push_back(builder.create<vector::ExtractStridedSliceOp>( 1545 loc, lhs, 1546 /*offsets=*/ArrayRef<int64_t>{0, w * strideW + kw * dilationW, 0}, 1547 /*sizes=*/ArrayRef<int64_t>{nSize, wSizeStep, cSize}, 1548 /*strides=*/ArrayRef<int64_t>{1, 1, 1})); 1549 } 1550 } 1551 // Extract rhs slice of size {c} @ [kw]. 1552 for (int64_t kw = 0; kw < kwSize; ++kw) { 1553 rhsVals.push_back(builder.create<vector::ExtractOp>( 1554 loc, rhs, /*offsets=*/ArrayRef<int64_t>{kw})); 1555 } 1556 // Extract res slice: {n, wSizeStep, c} @ [0, w, 0]. 1557 for (int64_t w = 0; w < wSize; w += wSizeStep) { 1558 resVals.push_back(builder.create<vector::ExtractStridedSliceOp>( 1559 loc, res, 1560 /*offsets=*/ArrayRef<int64_t>{0, w, 0}, 1561 /*sizes=*/ArrayRef<int64_t>{nSize, wSizeStep, cSize}, 1562 /*strides=*/ArrayRef<int64_t>{1, 1, 1})); 1563 } 1564 1565 auto linearIndex = [&](int64_t kw, int64_t w) { 1566 return kw * (wSize / wSizeStep) + w; 1567 }; 1568 1569 // Compute contraction: O{n, w, c} += I{n, sw * w + dw * kw, c} * F{c} 1570 for (int64_t kw = 0; kw < kwSize; ++kw) { 1571 for (int64_t w = 0; w < wSize; w += wSizeStep) { 1572 resVals[w] = depthwiseConv1dSliceAsFma( 1573 builder, loc, lhsVals[linearIndex(kw, w)], rhsVals[kw], resVals[w]); 1574 } 1575 } 1576 1577 // Write back res slice: {n, wSizeStep, c} @ [0, w, 0]. 1578 // This does not depend on kw. 1579 for (int64_t w = 0; w < wSize; w += wSizeStep) { 1580 res = builder.create<vector::InsertStridedSliceOp>( 1581 loc, resVals[w], res, 1582 /*offsets=*/ArrayRef<int64_t>{0, w, 0}, 1583 /*strides=*/ArrayRef<int64_t>{1, 1, 1}); 1584 } 1585 //===------------------------------------------------------------------===// 1586 // End vector-only rewrite part 1587 //===------------------------------------------------------------------===// 1588 1589 // Write back res slice of size {n, w, c} @ [0, 0, 0]. 1590 return builder 1591 .create<vector::TransferWriteOp>(loc, res, resShaped, 1592 ValueRange{zero, zero, zero}) 1593 .getOperation(); 1594 } 1595 1596 /// Lower lhs{n, w, c} * rhs{c} -> res{n, w, c} to fma. 1597 Value depthwiseConv1dSliceAsFma(OpBuilder &b, Location loc, Value lhs, 1598 Value rhs, Value res) { 1599 Value bcast = builder.create<vector::BroadcastOp>(loc, res.getType(), rhs); 1600 return b.create<vector::FMAOp>(loc, lhs, bcast, res); 1601 } 1602 1603 /// Entry point that transposes into the common form: 1604 /// {{n, strideW * w + dilationW * kw, c}, {kw, c, f}, {n, w, f}} 1605 FailureOr<Operation *> generateConv() { 1606 AffineExpr n, w, f, kw, c; 1607 bindDims(ctx, n, w, f, kw, c); 1608 if (!iters({Par(), Par(), Par(), Red(), Red()})) 1609 return failure(); 1610 1611 // No transposition needed. 1612 if (layout({/*lhsIndex*/ {n, strideW * w + dilationW * kw, c}, 1613 /*rhsIndex*/ {kw, c, f}, 1614 /*resIndex*/ {n, w, f}})) 1615 return conv(); 1616 return failure(); 1617 } 1618 1619 /// Entry point that transposes into the common form: 1620 /// {{n, strideW * w + dilationW * kw, c}, {kw, c}, {n, w, c}} 1621 FailureOr<Operation *> generateDilatedConv() { 1622 AffineExpr n, w, c, kw; 1623 bindDims(ctx, n, w, c, kw); 1624 if (!iters({Par(), Par(), Par(), Red()})) 1625 return failure(); 1626 1627 // No transposition needed. 1628 if (layout({/*lhsIndex*/ {n, strideW * w + dilationW * kw, c}, 1629 /*rhsIndex*/ {kw, c}, 1630 /*resIndex*/ {n, w, c}})) 1631 return depthwiseConv(); 1632 return failure(); 1633 } 1634 1635 private: 1636 bool valid; 1637 int strideW, dilationW; 1638 Value lhsShaped, rhsShaped, resShaped; 1639 ShapedType lhsShapedType, rhsShapedType, resShapedType; 1640 }; 1641 } // namespace 1642 1643 /// Helper function to vectorize a LinalgOp with convolution semantics. 1644 // TODO: extend the generic vectorization to support windows and drop this. 1645 static FailureOr<Operation *> vectorizeConvolution(OpBuilder &b, LinalgOp op) { 1646 // The ConvolutionOpInterface gives us guarantees of existence for 1647 // strides/dilations. However, we do not need to rely on those, we can simply 1648 // use them if present, otherwise use the default and let the generic conv. 1649 // matcher in the ConvGenerator succeed or fail. 1650 auto strides = op->getAttrOfType<DenseIntElementsAttr>("strides"); 1651 auto dilations = op->getAttrOfType<DenseIntElementsAttr>("dilations"); 1652 auto stride = strides ? *strides.getValues<uint64_t>().begin() : 1; 1653 auto dilation = dilations ? *dilations.getValues<uint64_t>().begin() : 1; 1654 Conv1DNwcGenerator e(b, op, stride, dilation); 1655 auto res = e.generateConv(); 1656 if (succeeded(res)) 1657 return res; 1658 return e.generateDilatedConv(); 1659 } 1660 1661 struct VectorizeConvolution : public OpInterfaceRewritePattern<LinalgOp> { 1662 using OpInterfaceRewritePattern::OpInterfaceRewritePattern; 1663 1664 LogicalResult matchAndRewrite(LinalgOp op, 1665 PatternRewriter &rewriter) const override { 1666 FailureOr<Operation *> resultOrFail = vectorizeConvolution(rewriter, op); 1667 if (failed(resultOrFail)) 1668 return failure(); 1669 Operation *newOp = *resultOrFail; 1670 if (newOp->getNumResults() == 0) { 1671 rewriter.eraseOp(op.getOperation()); 1672 return success(); 1673 } 1674 assert(newOp->getNumResults() == 1 && "expected single result"); 1675 rewriter.replaceOp(op.getOperation(), newOp->getResult(0)); 1676 return success(); 1677 } 1678 }; 1679 1680 void mlir::linalg::populateConvolutionVectorizationPatterns( 1681 RewritePatternSet &patterns, PatternBenefit benefit) { 1682 patterns.add<VectorizeConvolution>(patterns.getContext(), benefit); 1683 } 1684