1 //===- VectorTransforms.cpp - Conversion within the Vector dialect --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements target-independent rewrites as 1->N patterns. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "mlir/Dialect/Vector/Transforms/VectorTransforms.h" 14 15 #include <type_traits> 16 17 #include "mlir/Dialect/Affine/IR/AffineOps.h" 18 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 19 #include "mlir/Dialect/Arithmetic/Utils/Utils.h" 20 #include "mlir/Dialect/Linalg/IR/Linalg.h" 21 #include "mlir/Dialect/MemRef/IR/MemRef.h" 22 #include "mlir/Dialect/SCF/IR/SCF.h" 23 #include "mlir/Dialect/Utils/IndexingUtils.h" 24 #include "mlir/Dialect/Utils/StructuredOpsUtils.h" 25 #include "mlir/Dialect/Vector/Utils/VectorUtils.h" 26 #include "mlir/IR/BuiltinTypes.h" 27 #include "mlir/IR/ImplicitLocOpBuilder.h" 28 #include "mlir/IR/Matchers.h" 29 #include "mlir/IR/PatternMatch.h" 30 #include "mlir/Interfaces/VectorInterfaces.h" 31 32 #include "llvm/ADT/DenseSet.h" 33 #include "llvm/ADT/MapVector.h" 34 #include "llvm/ADT/STLExtras.h" 35 #include "llvm/Support/CommandLine.h" 36 #include "llvm/Support/Debug.h" 37 #include "llvm/Support/raw_ostream.h" 38 39 #define DEBUG_TYPE "vector-to-vector" 40 41 using namespace mlir; 42 using namespace mlir::vector; 43 44 // Helper to find an index in an affine map. 45 static Optional<int64_t> getResultIndex(AffineMap map, int64_t index) { 46 for (int64_t i = 0, e = map.getNumResults(); i < e; ++i) { 47 int64_t idx = map.getDimPosition(i); 48 if (idx == index) 49 return i; 50 } 51 return None; 52 } 53 54 // Helper to construct iterator types with one index removed. 55 static SmallVector<Attribute, 4> adjustIter(ArrayAttr iteratorTypes, 56 int64_t index) { 57 SmallVector<Attribute, 4> results; 58 for (const auto &it : llvm::enumerate(iteratorTypes)) { 59 int64_t idx = it.index(); 60 if (idx == index) 61 continue; 62 results.push_back(it.value()); 63 } 64 return results; 65 } 66 67 // Helper to construct an affine map with one index removed. 68 static AffineMap adjustMap(AffineMap map, int64_t index, 69 PatternRewriter &rewriter) { 70 auto *ctx = rewriter.getContext(); 71 SmallVector<AffineExpr, 4> results; 72 for (int64_t i = 0, e = map.getNumResults(); i < e; ++i) { 73 int64_t idx = map.getDimPosition(i); 74 if (idx == index) 75 continue; 76 // Re-insert remaining indices, but renamed when occurring 77 // after the removed index. 78 auto targetExpr = getAffineDimExpr(idx < index ? idx : idx - 1, ctx); 79 results.push_back(targetExpr); 80 } 81 return AffineMap::get(map.getNumDims() - 1, 0, results, ctx); 82 } 83 84 // Helper method to possibly drop a dimension in a load. 85 // TODO 86 static Value reshapeLoad(Location loc, Value val, VectorType type, 87 int64_t index, int64_t pos, 88 PatternRewriter &rewriter) { 89 if (index == -1) 90 return val; 91 Type lowType = VectorType::Builder(type).dropDim(0); 92 // At extraction dimension? 93 if (index == 0) { 94 auto posAttr = rewriter.getI64ArrayAttr(pos); 95 return rewriter.create<vector::ExtractOp>(loc, lowType, val, posAttr); 96 } 97 // Unroll leading dimensions. 98 VectorType vType = lowType.cast<VectorType>(); 99 Type resType = VectorType::Builder(type).dropDim(index); 100 auto resVectorType = resType.cast<VectorType>(); 101 Value result = rewriter.create<arith::ConstantOp>( 102 loc, resVectorType, rewriter.getZeroAttr(resVectorType)); 103 for (int64_t d = 0, e = resVectorType.getDimSize(0); d < e; d++) { 104 auto posAttr = rewriter.getI64ArrayAttr(d); 105 Value ext = rewriter.create<vector::ExtractOp>(loc, vType, val, posAttr); 106 Value load = reshapeLoad(loc, ext, vType, index - 1, pos, rewriter); 107 result = rewriter.create<vector::InsertOp>(loc, resVectorType, load, result, 108 posAttr); 109 } 110 return result; 111 } 112 113 // Helper method to possibly drop a dimension in a store. 114 // TODO 115 static Value reshapeStore(Location loc, Value val, Value result, 116 VectorType type, int64_t index, int64_t pos, 117 PatternRewriter &rewriter) { 118 // Unmodified? 119 if (index == -1) 120 return val; 121 // At insertion dimension? 122 if (index == 0) { 123 auto posAttr = rewriter.getI64ArrayAttr(pos); 124 return rewriter.create<vector::InsertOp>(loc, type, val, result, posAttr); 125 } 126 // Unroll leading dimensions. 127 Type lowType = VectorType::Builder(type).dropDim(0); 128 VectorType vType = lowType.cast<VectorType>(); 129 Type insType = VectorType::Builder(vType).dropDim(0); 130 for (int64_t d = 0, e = type.getDimSize(0); d < e; d++) { 131 auto posAttr = rewriter.getI64ArrayAttr(d); 132 Value ext = rewriter.create<vector::ExtractOp>(loc, vType, result, posAttr); 133 Value ins = rewriter.create<vector::ExtractOp>(loc, insType, val, posAttr); 134 Value sto = reshapeStore(loc, ins, ext, vType, index - 1, pos, rewriter); 135 result = rewriter.create<vector::InsertOp>(loc, type, sto, result, posAttr); 136 } 137 return result; 138 } 139 140 template <typename IntType> 141 static SmallVector<IntType, 4> extractVector(ArrayAttr arrayAttr) { 142 return llvm::to_vector<4>(llvm::map_range( 143 arrayAttr.getAsRange<IntegerAttr>(), 144 [](IntegerAttr attr) { return static_cast<IntType>(attr.getInt()); })); 145 } 146 147 /// Helper to create arithmetic operation associated with a kind of contraction. 148 static Optional<Value> createContractArithOp(Location loc, Value x, Value y, 149 Value acc, 150 vector::CombiningKind kind, 151 PatternRewriter &rewriter, 152 bool isInt) { 153 using vector::CombiningKind; 154 Value mul; 155 if (isInt) { 156 if (kind == CombiningKind::MINF || kind == CombiningKind::MAXF) 157 // Only valid for floating point types. 158 return Optional<Value>(); 159 mul = rewriter.create<arith::MulIOp>(loc, x, y); 160 } else { 161 // Float case. 162 if (kind == CombiningKind::AND || kind == CombiningKind::MINUI || 163 kind == CombiningKind::MINSI || kind == CombiningKind::MAXUI || 164 kind == CombiningKind::MAXSI || kind == CombiningKind::OR || 165 kind == CombiningKind::XOR) 166 // Only valid for integer types. 167 return Optional<Value>(); 168 // Special case for fused multiply-add. 169 if (acc && acc.getType().isa<VectorType>() && kind == CombiningKind::ADD) { 170 return Optional<Value>(rewriter.create<vector::FMAOp>(loc, x, y, acc)); 171 } 172 mul = rewriter.create<arith::MulFOp>(loc, x, y); 173 } 174 if (!acc) 175 return Optional<Value>(mul); 176 return makeArithReduction(rewriter, loc, kind, mul, acc); 177 } 178 179 /// Return the positions of the reductions in the given map. 180 static SmallVector<int64_t> getReductionIndex(AffineMap map, 181 ArrayAttr iteratorTypes) { 182 SmallVector<int64_t> dimsIdx; 183 for (unsigned i = 0, e = map.getNumResults(); i < e; i++) { 184 if (isReductionIterator(iteratorTypes[map.getDimPosition(i)])) 185 dimsIdx.push_back(i); 186 } 187 return dimsIdx; 188 } 189 190 /// Look for a given dimension in an affine map and return its position. Return 191 /// llvm::None if the dimension is not in the map results. 192 static llvm::Optional<unsigned> getDimPosition(AffineMap map, unsigned dim) { 193 for (unsigned i = 0, e = map.getNumResults(); i < e; i++) { 194 if (map.getDimPosition(i) == dim) 195 return i; 196 } 197 return llvm::None; 198 } 199 200 namespace { 201 202 /// ShapeCastOpFolder folds cancelling ShapeCastOps away. 203 // 204 // Example: 205 // 206 // The following MLIR with cancelling ShapeCastOps: 207 // 208 // %0 = source : vector<5x4x2xf32> 209 // %1 = shape_cast %0 : vector<5x4x2xf32> to vector<20x2xf32> 210 // %2 = shape_cast %1 : vector<20x2xf32> to vector<5x4x2xf32> 211 // %3 = user %2 : vector<5x4x2xf32> 212 // 213 // Should canonicalize to the following: 214 // 215 // %0 = source : vector<5x4x2xf32> 216 // %1 = user %0 : vector<5x4x2xf32> 217 // 218 struct ShapeCastOpFolder : public OpRewritePattern<vector::ShapeCastOp> { 219 using OpRewritePattern<vector::ShapeCastOp>::OpRewritePattern; 220 221 LogicalResult matchAndRewrite(vector::ShapeCastOp shapeCastOp, 222 PatternRewriter &rewriter) const override { 223 // Check if 'shapeCastOp' has vector source/result type. 224 auto sourceVectorType = 225 shapeCastOp.getSource().getType().dyn_cast_or_null<VectorType>(); 226 auto resultVectorType = 227 shapeCastOp.getResult().getType().dyn_cast_or_null<VectorType>(); 228 if (!sourceVectorType || !resultVectorType) 229 return failure(); 230 231 // Check if shape cast op source operand is also a shape cast op. 232 auto sourceShapeCastOp = dyn_cast_or_null<vector::ShapeCastOp>( 233 shapeCastOp.getSource().getDefiningOp()); 234 if (!sourceShapeCastOp) 235 return failure(); 236 auto operandSourceVectorType = 237 sourceShapeCastOp.getSource().getType().cast<VectorType>(); 238 auto operandResultVectorType = sourceShapeCastOp.getType(); 239 240 // Check if shape cast operations invert each other. 241 if (operandSourceVectorType != resultVectorType || 242 operandResultVectorType != sourceVectorType) 243 return failure(); 244 245 rewriter.replaceOp(shapeCastOp, sourceShapeCastOp.getSource()); 246 return success(); 247 } 248 }; 249 250 /// Progressive lowering of BroadcastOp. 251 class BroadcastOpLowering : public OpRewritePattern<vector::BroadcastOp> { 252 public: 253 using OpRewritePattern<vector::BroadcastOp>::OpRewritePattern; 254 255 LogicalResult matchAndRewrite(vector::BroadcastOp op, 256 PatternRewriter &rewriter) const override { 257 auto loc = op.getLoc(); 258 VectorType dstType = op.getVectorType(); 259 VectorType srcType = op.getSourceType().dyn_cast<VectorType>(); 260 Type eltType = dstType.getElementType(); 261 262 // Scalar to any vector can use splat. 263 if (!srcType) { 264 rewriter.replaceOpWithNewOp<vector::SplatOp>(op, dstType, op.getSource()); 265 return success(); 266 } 267 268 // Determine rank of source and destination. 269 int64_t srcRank = srcType.getRank(); 270 int64_t dstRank = dstType.getRank(); 271 272 // Stretching scalar inside vector (e.g. vector<1xf32>) can use splat. 273 if (srcRank <= 1 && dstRank == 1) { 274 Value ext; 275 if (srcRank == 0) 276 ext = rewriter.create<vector::ExtractElementOp>(loc, op.getSource()); 277 else 278 ext = rewriter.create<vector::ExtractOp>(loc, op.getSource(), 0); 279 rewriter.replaceOpWithNewOp<vector::SplatOp>(op, dstType, ext); 280 return success(); 281 } 282 283 // Duplicate this rank. 284 // For example: 285 // %x = broadcast %y : k-D to n-D, k < n 286 // becomes: 287 // %b = broadcast %y : k-D to (n-1)-D 288 // %x = [%b,%b,%b,%b] : n-D 289 // becomes: 290 // %b = [%y,%y] : (n-1)-D 291 // %x = [%b,%b,%b,%b] : n-D 292 if (srcRank < dstRank) { 293 // Duplication. 294 VectorType resType = 295 VectorType::get(dstType.getShape().drop_front(), eltType); 296 Value bcst = 297 rewriter.create<vector::BroadcastOp>(loc, resType, op.getSource()); 298 Value result = rewriter.create<arith::ConstantOp>( 299 loc, dstType, rewriter.getZeroAttr(dstType)); 300 for (int64_t d = 0, dim = dstType.getDimSize(0); d < dim; ++d) 301 result = rewriter.create<vector::InsertOp>(loc, bcst, result, d); 302 rewriter.replaceOp(op, result); 303 return success(); 304 } 305 306 // Find non-matching dimension, if any. 307 assert(srcRank == dstRank); 308 int64_t m = -1; 309 for (int64_t r = 0; r < dstRank; r++) 310 if (srcType.getDimSize(r) != dstType.getDimSize(r)) { 311 m = r; 312 break; 313 } 314 315 // All trailing dimensions are the same. Simply pass through. 316 if (m == -1) { 317 rewriter.replaceOp(op, op.getSource()); 318 return success(); 319 } 320 321 // Any non-matching dimension forces a stretch along this rank. 322 // For example: 323 // %x = broadcast %y : vector<4x1x2xf32> to vector<4x2x2xf32> 324 // becomes: 325 // %a = broadcast %y[0] : vector<1x2xf32> to vector<2x2xf32> 326 // %b = broadcast %y[1] : vector<1x2xf32> to vector<2x2xf32> 327 // %c = broadcast %y[2] : vector<1x2xf32> to vector<2x2xf32> 328 // %d = broadcast %y[3] : vector<1x2xf32> to vector<2x2xf32> 329 // %x = [%a,%b,%c,%d] 330 // becomes: 331 // %u = broadcast %y[0][0] : vector<2xf32> to vector <2x2xf32> 332 // %v = broadcast %y[1][0] : vector<2xf32> to vector <2x2xf32> 333 // %a = [%u, %v] 334 // .. 335 // %x = [%a,%b,%c,%d] 336 VectorType resType = 337 VectorType::get(dstType.getShape().drop_front(), eltType); 338 Value result = rewriter.create<arith::ConstantOp>( 339 loc, dstType, rewriter.getZeroAttr(dstType)); 340 if (m == 0) { 341 // Stetch at start. 342 Value ext = rewriter.create<vector::ExtractOp>(loc, op.getSource(), 0); 343 Value bcst = rewriter.create<vector::BroadcastOp>(loc, resType, ext); 344 for (int64_t d = 0, dim = dstType.getDimSize(0); d < dim; ++d) 345 result = rewriter.create<vector::InsertOp>(loc, bcst, result, d); 346 } else { 347 // Stetch not at start. 348 for (int64_t d = 0, dim = dstType.getDimSize(0); d < dim; ++d) { 349 Value ext = rewriter.create<vector::ExtractOp>(loc, op.getSource(), d); 350 Value bcst = rewriter.create<vector::BroadcastOp>(loc, resType, ext); 351 result = rewriter.create<vector::InsertOp>(loc, bcst, result, d); 352 } 353 } 354 rewriter.replaceOp(op, result); 355 return success(); 356 } 357 }; 358 359 /// Given a 'transpose' pattern, prune the rightmost dimensions that are not 360 /// transposed. 361 void pruneNonTransposedDims(ArrayRef<int64_t> transpose, 362 SmallVectorImpl<int64_t> &result) { 363 size_t numTransposedDims = transpose.size(); 364 for (size_t transpDim : llvm::reverse(transpose)) { 365 if (transpDim != numTransposedDims - 1) 366 break; 367 numTransposedDims--; 368 } 369 370 result.append(transpose.begin(), transpose.begin() + numTransposedDims); 371 } 372 373 /// Progressive lowering of TransposeOp. 374 /// One: 375 /// %x = vector.transpose %y, [1, 0] 376 /// is replaced by: 377 /// %z = arith.constant dense<0.000000e+00> 378 /// %0 = vector.extract %y[0, 0] 379 /// %1 = vector.insert %0, %z [0, 0] 380 /// .. 381 /// %x = vector.insert .., .. [.., ..] 382 class TransposeOpLowering : public OpRewritePattern<vector::TransposeOp> { 383 public: 384 using OpRewritePattern<vector::TransposeOp>::OpRewritePattern; 385 386 TransposeOpLowering(vector::VectorTransformsOptions vectorTransformOptions, 387 MLIRContext *context) 388 : OpRewritePattern<vector::TransposeOp>(context), 389 vectorTransformOptions(vectorTransformOptions) {} 390 391 LogicalResult matchAndRewrite(vector::TransposeOp op, 392 PatternRewriter &rewriter) const override { 393 auto loc = op.getLoc(); 394 395 Value input = op.getVector(); 396 VectorType inputType = op.getVectorType(); 397 VectorType resType = op.getResultType(); 398 399 // Set up convenience transposition table. 400 SmallVector<int64_t, 4> transp; 401 for (auto attr : op.getTransp()) 402 transp.push_back(attr.cast<IntegerAttr>().getInt()); 403 404 if (vectorTransformOptions.vectorTransposeLowering == 405 vector::VectorTransposeLowering::Shuffle && 406 resType.getRank() == 2 && transp[0] == 1 && transp[1] == 0) 407 return rewriter.notifyMatchFailure( 408 op, "Options specifies lowering to shuffle"); 409 410 // Handle a true 2-D matrix transpose differently when requested. 411 if (vectorTransformOptions.vectorTransposeLowering == 412 vector::VectorTransposeLowering::Flat && 413 resType.getRank() == 2 && transp[0] == 1 && transp[1] == 0) { 414 Type flattenedType = 415 VectorType::get(resType.getNumElements(), resType.getElementType()); 416 auto matrix = 417 rewriter.create<vector::ShapeCastOp>(loc, flattenedType, input); 418 auto rows = rewriter.getI32IntegerAttr(resType.getShape()[0]); 419 auto columns = rewriter.getI32IntegerAttr(resType.getShape()[1]); 420 Value trans = rewriter.create<vector::FlatTransposeOp>( 421 loc, flattenedType, matrix, rows, columns); 422 rewriter.replaceOpWithNewOp<vector::ShapeCastOp>(op, resType, trans); 423 return success(); 424 } 425 426 // Generate unrolled extract/insert ops. We do not unroll the rightmost 427 // (i.e., highest-order) dimensions that are not transposed and leave them 428 // in vector form to improve performance. Therefore, we prune those 429 // dimensions from the shape/transpose data structures used to generate the 430 // extract/insert ops. 431 SmallVector<int64_t, 4> prunedTransp; 432 pruneNonTransposedDims(transp, prunedTransp); 433 size_t numPrunedDims = transp.size() - prunedTransp.size(); 434 auto prunedInShape = inputType.getShape().drop_back(numPrunedDims); 435 SmallVector<int64_t, 4> ones(prunedInShape.size(), 1); 436 auto prunedInStrides = computeStrides(prunedInShape, ones); 437 438 // Generates the extract/insert operations for every scalar/vector element 439 // of the leftmost transposed dimensions. We traverse every transpose 440 // element using a linearized index that we delinearize to generate the 441 // appropriate indices for the extract/insert operations. 442 Value result = rewriter.create<arith::ConstantOp>( 443 loc, resType, rewriter.getZeroAttr(resType)); 444 int64_t numTransposedElements = ShapedType::getNumElements(prunedInShape); 445 446 for (int64_t linearIdx = 0; linearIdx < numTransposedElements; 447 ++linearIdx) { 448 auto extractIdxs = delinearize(prunedInStrides, linearIdx); 449 SmallVector<int64_t, 4> insertIdxs(extractIdxs); 450 applyPermutationToVector(insertIdxs, prunedTransp); 451 Value extractOp = 452 rewriter.create<vector::ExtractOp>(loc, input, extractIdxs); 453 result = 454 rewriter.create<vector::InsertOp>(loc, extractOp, result, insertIdxs); 455 } 456 457 rewriter.replaceOp(op, result); 458 return success(); 459 } 460 461 private: 462 /// Options to control the vector patterns. 463 vector::VectorTransformsOptions vectorTransformOptions; 464 }; 465 466 /// Rewrite a 2-D vector.transpose as a sequence of: 467 /// vector.shape_cast 2D -> 1D 468 /// vector.shuffle 469 /// vector.shape_cast 1D -> 2D 470 class TransposeOp2DToShuffleLowering 471 : public OpRewritePattern<vector::TransposeOp> { 472 public: 473 using OpRewritePattern<vector::TransposeOp>::OpRewritePattern; 474 475 TransposeOp2DToShuffleLowering( 476 vector::VectorTransformsOptions vectorTransformOptions, 477 MLIRContext *context) 478 : OpRewritePattern<vector::TransposeOp>(context), 479 vectorTransformOptions(vectorTransformOptions) {} 480 481 LogicalResult matchAndRewrite(vector::TransposeOp op, 482 PatternRewriter &rewriter) const override { 483 auto loc = op.getLoc(); 484 485 VectorType srcType = op.getVectorType(); 486 if (srcType.getRank() != 2) 487 return rewriter.notifyMatchFailure(op, "Not a 2D transpose"); 488 489 SmallVector<int64_t, 4> transp; 490 for (auto attr : op.getTransp()) 491 transp.push_back(attr.cast<IntegerAttr>().getInt()); 492 if (transp[0] != 1 && transp[1] != 0) 493 return rewriter.notifyMatchFailure(op, "Not a 2D transpose permutation"); 494 495 if (vectorTransformOptions.vectorTransposeLowering != 496 VectorTransposeLowering::Shuffle) 497 return rewriter.notifyMatchFailure(op, "Options do not ask for Shuffle"); 498 499 int64_t m = srcType.getShape().front(), n = srcType.getShape().back(); 500 Value casted = rewriter.create<vector::ShapeCastOp>( 501 loc, VectorType::get({m * n}, srcType.getElementType()), 502 op.getVector()); 503 SmallVector<int64_t> mask; 504 mask.reserve(m * n); 505 for (int64_t j = 0; j < n; ++j) 506 for (int64_t i = 0; i < m; ++i) 507 mask.push_back(i * n + j); 508 509 Value shuffled = 510 rewriter.create<vector::ShuffleOp>(loc, casted, casted, mask); 511 rewriter.replaceOpWithNewOp<vector::ShapeCastOp>(op, op.getResultType(), 512 shuffled); 513 514 return success(); 515 } 516 517 private: 518 /// Options to control the vector patterns. 519 vector::VectorTransformsOptions vectorTransformOptions; 520 }; 521 522 /// Progressive lowering of OuterProductOp. 523 /// One: 524 /// %x = vector.outerproduct %lhs, %rhs, %acc 525 /// is replaced by: 526 /// %z = zero-result 527 /// %0 = vector.extract %lhs[0] 528 /// %1 = vector.broadcast %0 529 /// %2 = vector.extract %acc[0] 530 /// %3 = vector.fma %1, %rhs, %2 531 /// %4 = vector.insert %3, %z[0] 532 /// .. 533 /// %x = vector.insert %.., %..[N-1] 534 /// 535 class OuterProductOpLowering : public OpRewritePattern<vector::OuterProductOp> { 536 public: 537 using OpRewritePattern<vector::OuterProductOp>::OpRewritePattern; 538 539 LogicalResult matchAndRewrite(vector::OuterProductOp op, 540 PatternRewriter &rewriter) const override { 541 auto loc = op.getLoc(); 542 543 VectorType lhsType = op.getOperandVectorTypeLHS(); 544 VectorType rhsType = op.getOperandTypeRHS().dyn_cast<VectorType>(); 545 VectorType resType = op.getVectorType(); 546 Type eltType = resType.getElementType(); 547 bool isInt = eltType.isa<IntegerType, IndexType>(); 548 Value acc = (op.getAcc().empty()) ? nullptr : op.getAcc()[0]; 549 vector::CombiningKind kind = op.getKind(); 550 551 if (!rhsType) { 552 // Special case: AXPY operation. 553 Value b = rewriter.create<vector::BroadcastOp>(loc, lhsType, op.getRhs()); 554 Optional<Value> mult = createContractArithOp(loc, op.getLhs(), b, acc, 555 kind, rewriter, isInt); 556 if (!mult.has_value()) 557 return failure(); 558 rewriter.replaceOp(op, mult.getValue()); 559 return success(); 560 } 561 562 Value result = rewriter.create<arith::ConstantOp>( 563 loc, resType, rewriter.getZeroAttr(resType)); 564 for (int64_t d = 0, e = resType.getDimSize(0); d < e; ++d) { 565 auto pos = rewriter.getI64ArrayAttr(d); 566 Value x = 567 rewriter.create<vector::ExtractOp>(loc, eltType, op.getLhs(), pos); 568 Value a = rewriter.create<vector::BroadcastOp>(loc, rhsType, x); 569 Value r = nullptr; 570 if (acc) 571 r = rewriter.create<vector::ExtractOp>(loc, rhsType, acc, pos); 572 Optional<Value> m = 573 createContractArithOp(loc, a, op.getRhs(), r, kind, rewriter, isInt); 574 if (!m.has_value()) 575 return failure(); 576 result = rewriter.create<vector::InsertOp>(loc, resType, m.getValue(), 577 result, pos); 578 } 579 rewriter.replaceOp(op, result); 580 return success(); 581 } 582 }; 583 584 /// Lower vector.contract with all size one reduction dimensions to 585 /// elementwise ops when possible. 586 struct ContractOpToElementwise 587 : public OpRewritePattern<vector::ContractionOp> { 588 using OpRewritePattern::OpRewritePattern; 589 using FilterConstraintType = 590 std::function<LogicalResult(vector::ContractionOp op)>; 591 static LogicalResult defaultFilter(vector::ContractionOp op) { 592 return success(); 593 } 594 ContractOpToElementwise( 595 vector::VectorTransformsOptions vectorTransformOptions, 596 MLIRContext *context, 597 const FilterConstraintType &constraint = defaultFilter) 598 : OpRewritePattern<vector::ContractionOp>(context), 599 vectorTransformOptions(vectorTransformOptions), filter(defaultFilter) {} 600 601 LogicalResult matchAndRewrite(vector::ContractionOp contractOp, 602 PatternRewriter &rewriter) const override { 603 // TODO: implement masks 604 if (llvm::size(contractOp.getMasks()) != 0) 605 return failure(); 606 607 if (failed(filter(contractOp))) 608 return failure(); 609 610 if (vectorTransformOptions.vectorContractLowering != 611 vector::VectorContractLowering::ParallelArith) 612 return failure(); 613 ArrayRef<int64_t> lhsShape = contractOp.getLhsType().getShape(); 614 ArrayRef<int64_t> rhsShape = contractOp.getRhsType().getShape(); 615 AffineMap lhsMap = contractOp.getIndexingMaps()[0]; 616 AffineMap rhsMap = contractOp.getIndexingMaps()[1]; 617 SmallVector<int64_t> lhsReductionDims = 618 getReductionIndex(lhsMap, contractOp.getIteratorTypes()); 619 SmallVector<int64_t> rhsReductionDims = 620 getReductionIndex(rhsMap, contractOp.getIteratorTypes()); 621 // All the reduction dimensions must be a size 1. 622 for (int64_t dim : lhsReductionDims) { 623 if (lhsShape[dim] != 1) 624 return failure(); 625 } 626 for (int64_t dim : rhsReductionDims) { 627 if (rhsShape[dim] != 1) 628 return failure(); 629 } 630 AffineMap accMap = contractOp.getIndexingMaps()[2]; 631 unsigned numParallelDims = accMap.getNumResults(); 632 unsigned numLhsDimToBroadcast = 633 numParallelDims - (lhsMap.getNumResults() - lhsReductionDims.size()); 634 unsigned numRhsDimToBroadcast = 635 numParallelDims - (rhsMap.getNumResults() - rhsReductionDims.size()); 636 SmallVector<int64_t> lhsDims; 637 SmallVector<int64_t> lhsTranspose; 638 SmallVector<int64_t> rhsDims; 639 SmallVector<int64_t> rhsTranspose; 640 for (int64_t dim : lhsReductionDims) 641 lhsTranspose.push_back(numLhsDimToBroadcast + dim); 642 for (int64_t dim : rhsReductionDims) 643 rhsTranspose.push_back(numRhsDimToBroadcast + dim); 644 // Loop through the parallel dimensions to calculate the dimensions to 645 // broadcast and to permute in order to extract only parallel dimensions. 646 for (unsigned i = 0; i < numParallelDims; i++) { 647 llvm::Optional<unsigned> lhsDim = 648 getDimPosition(lhsMap, accMap.getDimPosition(i)); 649 if (lhsDim) { 650 lhsTranspose.push_back(numLhsDimToBroadcast + *lhsDim); 651 } else { 652 // If the parallel dimension doesn't exist we will have to broadcast it. 653 lhsDims.push_back( 654 contractOp.getResultType().cast<VectorType>().getDimSize(i)); 655 lhsTranspose.push_back(lhsDims.size() - 1); 656 } 657 llvm::Optional<unsigned> rhsDim = 658 getDimPosition(rhsMap, accMap.getDimPosition(i)); 659 if (rhsDim) { 660 rhsTranspose.push_back(numRhsDimToBroadcast + *rhsDim); 661 } else { 662 // If the parallel dimension doesn't exist we will have to broadcast it. 663 rhsDims.push_back( 664 contractOp.getResultType().cast<VectorType>().getDimSize(i)); 665 rhsTranspose.push_back(rhsDims.size() - 1); 666 } 667 } 668 Value newLhs = contractOp.getLhs(); 669 Value newRhs = contractOp.getRhs(); 670 Location loc = contractOp.getLoc(); 671 if (!lhsDims.empty()) { 672 lhsDims.append(lhsShape.begin(), lhsShape.end()); 673 auto expandedType = 674 VectorType::get(lhsDims, contractOp.getLhsType().getElementType()); 675 newLhs = rewriter.create<vector::BroadcastOp>(loc, expandedType, newLhs); 676 } 677 if (!rhsDims.empty()) { 678 rhsDims.append(rhsShape.begin(), rhsShape.end()); 679 auto expandedType = 680 VectorType::get(rhsDims, contractOp.getRhsType().getElementType()); 681 newRhs = rewriter.create<vector::BroadcastOp>(loc, expandedType, newRhs); 682 } 683 bool isInt = contractOp.getLhsType().getElementType().isIntOrIndex(); 684 newLhs = rewriter.create<vector::TransposeOp>(loc, newLhs, lhsTranspose); 685 newRhs = rewriter.create<vector::TransposeOp>(loc, newRhs, rhsTranspose); 686 SmallVector<int64_t, 4> lhsOffsets(lhsReductionDims.size(), 0); 687 SmallVector<int64_t, 4> rhsOffsets(rhsReductionDims.size(), 0); 688 newLhs = rewriter.create<vector::ExtractOp>( 689 loc, newLhs, rewriter.getI64ArrayAttr(lhsOffsets)); 690 newRhs = rewriter.create<vector::ExtractOp>( 691 loc, newRhs, rewriter.getI64ArrayAttr(rhsOffsets)); 692 Optional<Value> result = 693 createContractArithOp(loc, newLhs, newRhs, contractOp.getAcc(), 694 contractOp.getKind(), rewriter, isInt); 695 rewriter.replaceOp(contractOp, {*result}); 696 return success(); 697 } 698 699 private: 700 /// Options to control the vector patterns. 701 vector::VectorTransformsOptions vectorTransformOptions; 702 FilterConstraintType filter; 703 }; 704 705 /// Progressive lowering of ConstantMaskOp. 706 /// One: 707 /// %x = vector.constant_mask [a,b] 708 /// is replaced by: 709 /// %z = zero-result 710 /// %l = vector.constant_mask [b] 711 /// %4 = vector.insert %l, %z[0] 712 /// .. 713 /// %x = vector.insert %l, %..[a-1] 714 /// until a one-dimensional vector is reached. All these operations 715 /// will be folded at LLVM IR level. 716 class ConstantMaskOpLowering : public OpRewritePattern<vector::ConstantMaskOp> { 717 public: 718 using OpRewritePattern<vector::ConstantMaskOp>::OpRewritePattern; 719 720 LogicalResult matchAndRewrite(vector::ConstantMaskOp op, 721 PatternRewriter &rewriter) const override { 722 auto loc = op.getLoc(); 723 auto dstType = op.getType(); 724 auto eltType = dstType.getElementType(); 725 auto dimSizes = op.getMaskDimSizes(); 726 int64_t rank = dstType.getRank(); 727 728 if (rank == 0) { 729 assert(dimSizes.size() == 1 && 730 "Expected exactly one dim size for a 0-D vector"); 731 bool value = dimSizes[0].cast<IntegerAttr>().getInt() == 1; 732 rewriter.replaceOpWithNewOp<arith::ConstantOp>( 733 op, dstType, 734 DenseIntElementsAttr::get( 735 VectorType::get(ArrayRef<int64_t>{}, rewriter.getI1Type()), 736 ArrayRef<bool>{value})); 737 return success(); 738 } 739 740 // Scalable constant masks can only be lowered for the "none set" case. 741 if (dstType.cast<VectorType>().isScalable()) { 742 rewriter.replaceOpWithNewOp<arith::ConstantOp>( 743 op, DenseElementsAttr::get(dstType, false)); 744 return success(); 745 } 746 747 int64_t trueDim = std::min(dstType.getDimSize(0), 748 dimSizes[0].cast<IntegerAttr>().getInt()); 749 750 if (rank == 1) { 751 // Express constant 1-D case in explicit vector form: 752 // [T,..,T,F,..,F]. 753 SmallVector<bool, 4> values(dstType.getDimSize(0)); 754 for (int64_t d = 0; d < trueDim; d++) 755 values[d] = true; 756 rewriter.replaceOpWithNewOp<arith::ConstantOp>( 757 op, dstType, rewriter.getBoolVectorAttr(values)); 758 return success(); 759 } 760 761 VectorType lowType = 762 VectorType::get(dstType.getShape().drop_front(), eltType); 763 SmallVector<int64_t, 4> newDimSizes; 764 for (int64_t r = 1; r < rank; r++) 765 newDimSizes.push_back(dimSizes[r].cast<IntegerAttr>().getInt()); 766 Value trueVal = rewriter.create<vector::ConstantMaskOp>( 767 loc, lowType, rewriter.getI64ArrayAttr(newDimSizes)); 768 Value result = rewriter.create<arith::ConstantOp>( 769 loc, dstType, rewriter.getZeroAttr(dstType)); 770 for (int64_t d = 0; d < trueDim; d++) { 771 auto pos = rewriter.getI64ArrayAttr(d); 772 result = 773 rewriter.create<vector::InsertOp>(loc, dstType, trueVal, result, pos); 774 } 775 rewriter.replaceOp(op, result); 776 return success(); 777 } 778 }; 779 780 /// Progressive lowering of CreateMaskOp. 781 /// One: 782 /// %x = vector.create_mask %a, ... : vector<dx...> 783 /// is replaced by: 784 /// %l = vector.create_mask ... : vector<...> ; one lower rank 785 /// %0 = arith.cmpi "slt", %ci, %a | 786 /// %1 = select %0, %l, %zeroes | 787 /// %r = vector.insert %1, %pr [i] | d-times 788 /// %x = .... 789 /// until a one-dimensional vector is reached. 790 class CreateMaskOpLowering : public OpRewritePattern<vector::CreateMaskOp> { 791 public: 792 using OpRewritePattern<vector::CreateMaskOp>::OpRewritePattern; 793 794 LogicalResult matchAndRewrite(vector::CreateMaskOp op, 795 PatternRewriter &rewriter) const override { 796 auto dstType = op.getResult().getType().cast<VectorType>(); 797 int64_t rank = dstType.getRank(); 798 if (rank <= 1) 799 return rewriter.notifyMatchFailure( 800 op, "0-D and 1-D vectors are handled separately"); 801 802 auto loc = op.getLoc(); 803 auto eltType = dstType.getElementType(); 804 int64_t dim = dstType.getDimSize(0); 805 Value idx = op.getOperand(0); 806 807 VectorType lowType = 808 VectorType::get(dstType.getShape().drop_front(), eltType); 809 Value trueVal = rewriter.create<vector::CreateMaskOp>( 810 loc, lowType, op.getOperands().drop_front()); 811 Value falseVal = rewriter.create<arith::ConstantOp>( 812 loc, lowType, rewriter.getZeroAttr(lowType)); 813 Value result = rewriter.create<arith::ConstantOp>( 814 loc, dstType, rewriter.getZeroAttr(dstType)); 815 for (int64_t d = 0; d < dim; d++) { 816 Value bnd = 817 rewriter.create<arith::ConstantOp>(loc, rewriter.getIndexAttr(d)); 818 Value val = rewriter.create<arith::CmpIOp>(loc, arith::CmpIPredicate::slt, 819 bnd, idx); 820 Value sel = rewriter.create<arith::SelectOp>(loc, val, trueVal, falseVal); 821 auto pos = rewriter.getI64ArrayAttr(d); 822 result = 823 rewriter.create<vector::InsertOp>(loc, dstType, sel, result, pos); 824 } 825 rewriter.replaceOp(op, result); 826 return success(); 827 } 828 }; 829 830 /// ShapeOp 2D -> 1D downcast serves the purpose of flattening 2-D to 1-D 831 /// vectors progressively on the way to target llvm.matrix intrinsics. 832 /// This iterates over the most major dimension of the 2-D vector and performs 833 /// rewrites into: 834 /// vector.extract from 2-D + vector.insert_strided_slice offset into 1-D 835 class ShapeCastOp2DDownCastRewritePattern 836 : public OpRewritePattern<vector::ShapeCastOp> { 837 public: 838 using OpRewritePattern<vector::ShapeCastOp>::OpRewritePattern; 839 840 LogicalResult matchAndRewrite(vector::ShapeCastOp op, 841 PatternRewriter &rewriter) const override { 842 auto sourceVectorType = op.getSourceVectorType(); 843 auto resultVectorType = op.getResultVectorType(); 844 if (sourceVectorType.getRank() != 2 || resultVectorType.getRank() != 1) 845 return failure(); 846 847 auto loc = op.getLoc(); 848 Value desc = rewriter.create<arith::ConstantOp>( 849 loc, resultVectorType, rewriter.getZeroAttr(resultVectorType)); 850 unsigned mostMinorVectorSize = sourceVectorType.getShape()[1]; 851 for (int64_t i = 0, e = sourceVectorType.getShape().front(); i != e; ++i) { 852 Value vec = rewriter.create<vector::ExtractOp>(loc, op.getSource(), i); 853 desc = rewriter.create<vector::InsertStridedSliceOp>( 854 loc, vec, desc, 855 /*offsets=*/i * mostMinorVectorSize, /*strides=*/1); 856 } 857 rewriter.replaceOp(op, desc); 858 return success(); 859 } 860 }; 861 862 /// ShapeOp 1D -> 2D upcast serves the purpose of unflattening 2-D from 1-D 863 /// vectors progressively. 864 /// This iterates over the most major dimension of the 2-D vector and performs 865 /// rewrites into: 866 /// vector.extract_strided_slice from 1-D + vector.insert into 2-D 867 /// Note that 1-D extract_strided_slice are lowered to efficient vector.shuffle. 868 class ShapeCastOp2DUpCastRewritePattern 869 : public OpRewritePattern<vector::ShapeCastOp> { 870 public: 871 using OpRewritePattern<vector::ShapeCastOp>::OpRewritePattern; 872 873 LogicalResult matchAndRewrite(vector::ShapeCastOp op, 874 PatternRewriter &rewriter) const override { 875 auto sourceVectorType = op.getSourceVectorType(); 876 auto resultVectorType = op.getResultVectorType(); 877 if (sourceVectorType.getRank() != 1 || resultVectorType.getRank() != 2) 878 return failure(); 879 880 auto loc = op.getLoc(); 881 Value desc = rewriter.create<arith::ConstantOp>( 882 loc, resultVectorType, rewriter.getZeroAttr(resultVectorType)); 883 unsigned mostMinorVectorSize = resultVectorType.getShape()[1]; 884 for (int64_t i = 0, e = resultVectorType.getShape().front(); i != e; ++i) { 885 Value vec = rewriter.create<vector::ExtractStridedSliceOp>( 886 loc, op.getSource(), /*offsets=*/i * mostMinorVectorSize, 887 /*sizes=*/mostMinorVectorSize, 888 /*strides=*/1); 889 desc = rewriter.create<vector::InsertOp>(loc, vec, desc, i); 890 } 891 rewriter.replaceOp(op, desc); 892 return success(); 893 } 894 }; 895 896 // We typically should not lower general shape cast operations into data 897 // movement instructions, since the assumption is that these casts are 898 // optimized away during progressive lowering. For completeness, however, 899 // we fall back to a reference implementation that moves all elements 900 // into the right place if we get here. 901 class ShapeCastOpRewritePattern : public OpRewritePattern<vector::ShapeCastOp> { 902 public: 903 using OpRewritePattern<vector::ShapeCastOp>::OpRewritePattern; 904 905 LogicalResult matchAndRewrite(vector::ShapeCastOp op, 906 PatternRewriter &rewriter) const override { 907 Location loc = op.getLoc(); 908 auto sourceVectorType = op.getSourceVectorType(); 909 auto resultVectorType = op.getResultVectorType(); 910 911 // Special case 2D/1D lowerings with better implementations. 912 // TODO: make is ND/1D to allow generic ND->1D->MD. 913 int64_t srcRank = sourceVectorType.getRank(); 914 int64_t resRank = resultVectorType.getRank(); 915 if ((srcRank == 2 && resRank == 1) || (srcRank == 1 && resRank == 2)) 916 return failure(); 917 918 // Generic ShapeCast lowering path goes all the way down to unrolled scalar 919 // extract/insert chains. 920 // TODO: consider evolving the semantics to only allow 1D source or dest and 921 // drop this potentially very expensive lowering. 922 // Compute number of elements involved in the reshape. 923 int64_t numElts = 1; 924 for (int64_t r = 0; r < srcRank; r++) 925 numElts *= sourceVectorType.getDimSize(r); 926 // Replace with data movement operations: 927 // x[0,0,0] = y[0,0] 928 // x[0,0,1] = y[0,1] 929 // x[0,1,0] = y[0,2] 930 // etc., incrementing the two index vectors "row-major" 931 // within the source and result shape. 932 SmallVector<int64_t, 4> srcIdx(srcRank); 933 SmallVector<int64_t, 4> resIdx(resRank); 934 Value result = rewriter.create<arith::ConstantOp>( 935 loc, resultVectorType, rewriter.getZeroAttr(resultVectorType)); 936 for (int64_t i = 0; i < numElts; i++) { 937 if (i != 0) { 938 incIdx(srcIdx, sourceVectorType, srcRank - 1); 939 incIdx(resIdx, resultVectorType, resRank - 1); 940 } 941 Value e = rewriter.create<vector::ExtractOp>(loc, op.getSource(), srcIdx); 942 result = rewriter.create<vector::InsertOp>(loc, e, result, resIdx); 943 } 944 rewriter.replaceOp(op, result); 945 return success(); 946 } 947 948 private: 949 static void incIdx(SmallVector<int64_t, 4> &idx, VectorType tp, int64_t r) { 950 assert(0 <= r && r < tp.getRank()); 951 if (++idx[r] == tp.getDimSize(r)) { 952 idx[r] = 0; 953 incIdx(idx, tp, r - 1); 954 } 955 } 956 }; 957 958 /// Convert MulIOp/MulFOp + MultiDimReductionOp<add> into ContractionOp. 959 /// Ex: 960 /// ``` 961 /// %0 = arith.mulf %arg0, %arg1 : vector<8x32x16xf32> 962 /// %1 = vector.multi_reduction add, %0 [1] 963 /// : vector<8x32x16xf32> to vector<8x16xf32> 964 /// ``` 965 /// Gets converted to: 966 /// ``` 967 /// %1 = vector.contract {indexing_maps = [ 968 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>, 969 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>, 970 /// affine_map<(d0, d1, d2) -> (d0, d1)>], 971 /// iterator_types = ["parallel", "parallel", "reduction"], 972 /// kind = add} %0, %arg1, %cst_f0 973 /// : vector<8x32x16xf32>, vector<8x32x16xf32> into vector<8x32xf32> 974 /// ``` 975 struct MultiReduceToContract 976 : public OpRewritePattern<vector::MultiDimReductionOp> { 977 using OpRewritePattern<vector::MultiDimReductionOp>::OpRewritePattern; 978 979 LogicalResult matchAndRewrite(vector::MultiDimReductionOp reduceOp, 980 PatternRewriter &rewriter) const override { 981 if (reduceOp.getKind() != vector::CombiningKind::ADD) 982 return failure(); 983 Operation *mulOp = reduceOp.getSource().getDefiningOp(); 984 if (!mulOp || !isa<arith::MulIOp, arith::MulFOp>(mulOp)) 985 return failure(); 986 SmallVector<bool> reductionMask = reduceOp.getReductionMask(); 987 auto srcMap = rewriter.getMultiDimIdentityMap(reductionMask.size()); 988 SmallVector<AffineExpr> exprs; 989 SmallVector<StringRef> iteratorTypes; 990 for (const auto &isReduceDim : llvm::enumerate(reductionMask)) { 991 if (!isReduceDim.value()) { 992 iteratorTypes.push_back(getParallelIteratorTypeName()); 993 exprs.push_back(rewriter.getAffineDimExpr(isReduceDim.index())); 994 } else { 995 iteratorTypes.push_back(getReductionIteratorTypeName()); 996 } 997 } 998 auto dstMap = AffineMap::get(/*dimCount=*/reductionMask.size(), 999 /*symCount=*/0, exprs, reduceOp.getContext()); 1000 rewriter.replaceOpWithNewOp<mlir::vector::ContractionOp>( 1001 reduceOp, mulOp->getOperand(0), mulOp->getOperand(1), reduceOp.getAcc(), 1002 rewriter.getAffineMapArrayAttr({srcMap, srcMap, dstMap}), 1003 rewriter.getStrArrayAttr(iteratorTypes)); 1004 return success(); 1005 } 1006 }; 1007 1008 /// Merge TransposeOp into ContractionOp user. 1009 /// Ex: 1010 /// ``` 1011 /// %0 = vector.transpose %arg0, [2, 0, 1] 1012 /// : vector<32x16x8xf32> to vector<8x32x16xf32> 1013 /// %1 = vector.contract {indexing_maps = [ 1014 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>, 1015 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>, 1016 /// affine_map<(d0, d1, d2) -> (d0, d1)>], 1017 /// iterator_types = ["parallel", "parallel", "reduction"], 1018 /// kind = add} %0, %arg1, %cst_f0 1019 /// : vector<8x32x16xf32>, vector<8x32x16xf32> into vector<8x32xf32> 1020 /// ``` 1021 /// Gets converted to: 1022 /// ``` 1023 /// %1 = vector.contract {indexing_maps = [ 1024 /// affine_map<(d0, d1, d2) -> (d1, d2, d0)>, 1025 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>, 1026 /// affine_map<(d0, d1, d2) -> (d0, d1)>], 1027 /// iterator_types = ["parallel", "parallel", "reduction"], 1028 /// kind = add} %arg0, %arg1, %cst_f0 1029 /// : vector<8x32x16xf32>, vector<8x32x16xf32> into vector<8x32xf32> 1030 /// ``` 1031 struct CombineContractTranspose 1032 : public OpRewritePattern<vector::ContractionOp> { 1033 using OpRewritePattern<vector::ContractionOp>::OpRewritePattern; 1034 1035 LogicalResult matchAndRewrite(vector::ContractionOp contractOp, 1036 PatternRewriter &rewriter) const override { 1037 SmallVector<AffineMap, 4> maps = 1038 llvm::to_vector<4>(contractOp.getIndexingMaps()); 1039 Value lhs = contractOp.getLhs(); 1040 Value rhs = contractOp.getRhs(); 1041 size_t index = 0; 1042 bool changed = false; 1043 for (Value *operand : {&lhs, &rhs}) { 1044 AffineMap &map = maps[index++]; 1045 auto transposeOp = operand->getDefiningOp<vector::TransposeOp>(); 1046 if (!transposeOp) 1047 continue; 1048 SmallVector<int64_t> perm; 1049 transposeOp.getTransp(perm); 1050 AffineMap permutationMap = AffineMap::getPermutationMap( 1051 extractVector<unsigned>(transposeOp.getTransp()), 1052 contractOp.getContext()); 1053 map = inversePermutation(permutationMap).compose(map); 1054 *operand = transposeOp.getVector(); 1055 changed = true; 1056 } 1057 if (!changed) 1058 return failure(); 1059 rewriter.replaceOpWithNewOp<vector::ContractionOp>( 1060 contractOp, lhs, rhs, contractOp.getAcc(), 1061 rewriter.getAffineMapArrayAttr(maps), contractOp.getIteratorTypes()); 1062 return success(); 1063 } 1064 }; 1065 1066 /// Merge BroadcastOp into ContractionOp user. 1067 /// Ex: 1068 /// ``` 1069 /// %0 = vector.broadcast %arg0 : vector<32x16xf32> to vector<8x32x16xf32> 1070 /// %1 = vector.contract {indexing_maps = [ 1071 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>, 1072 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>, 1073 /// affine_map<(d0, d1, d2) -> (d0, d1)>], 1074 /// iterator_types = ["parallel", "parallel", "reduction"], 1075 /// kind = add} %0, %arg1, %cst_f0 1076 /// : vector<8x32x16xf32>, vector<8x32x16xf32> into vector<8x32xf32> 1077 /// ``` 1078 /// Gets converted to: 1079 /// ``` 1080 /// %1 = vector.contract {indexing_maps = [ 1081 /// affine_map<(d0, d1, d2) -> (d1, d2)>, 1082 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>, 1083 /// affine_map<(d0, d1, d2) -> (d0, d1)>], 1084 /// iterator_types = ["parallel", "parallel", "reduction"], 1085 /// kind = add} %arg0, %arg1, %cst_f0 1086 /// : vector<32x16xf32>, vector<8x32x16xf32> into vector<8x32xf32> 1087 /// ``` 1088 struct CombineContractBroadcast 1089 : public OpRewritePattern<vector::ContractionOp> { 1090 using OpRewritePattern<vector::ContractionOp>::OpRewritePattern; 1091 1092 LogicalResult matchAndRewrite(vector::ContractionOp contractOp, 1093 PatternRewriter &rewriter) const override { 1094 SmallVector<AffineMap, 4> maps = 1095 llvm::to_vector<4>(contractOp.getIndexingMaps()); 1096 Value lhs = contractOp.getLhs(); 1097 Value rhs = contractOp.getRhs(); 1098 size_t index = 0; 1099 bool changed = false; 1100 for (Value *operand : {&lhs, &rhs}) { 1101 AffineMap &map = maps[index++]; 1102 auto broadcast = operand->getDefiningOp<vector::BroadcastOp>(); 1103 if (!broadcast) 1104 continue; 1105 // contractionOp can only take vector as operands. 1106 auto srcType = broadcast.getSourceType().dyn_cast<VectorType>(); 1107 if (!srcType || srcType.getRank() == broadcast.getVectorType().getRank()) 1108 continue; 1109 int64_t rankDiff = 1110 broadcast.getVectorType().getRank() - srcType.getRank(); 1111 bool innerDimBroadcast = false; 1112 SmallVector<AffineExpr> originalDims; 1113 for (const auto &dim : llvm::enumerate(srcType.getShape())) { 1114 if (dim.value() != 1115 broadcast.getVectorType().getDimSize(rankDiff + dim.index())) { 1116 innerDimBroadcast = true; 1117 break; 1118 } 1119 originalDims.push_back( 1120 rewriter.getAffineDimExpr(dim.index() + rankDiff)); 1121 } 1122 // Contract doesn't support inner dimension broadcast. Once this is 1123 // relaxed we can remove this case. 1124 if (innerDimBroadcast) 1125 continue; 1126 1127 // It would be incorrect to fold a broadcast onto a reduction dimension 1128 // of non-unit size. 1129 bool nonUnitDimReductionBroadcast = false; 1130 for (int64_t i = 0; i < rankDiff; ++i) { 1131 if (broadcast.getVectorType().getDimSize(i) != 1 && 1132 isReductionIterator(contractOp.getIteratorTypes() 1133 .getValue()[map.getDimPosition(i)])) { 1134 nonUnitDimReductionBroadcast = true; 1135 break; 1136 } 1137 } 1138 if (nonUnitDimReductionBroadcast) 1139 continue; 1140 1141 AffineMap broadcastMap = 1142 AffineMap::get(broadcast.getVectorType().getRank(), 0, originalDims, 1143 contractOp.getContext()); 1144 map = broadcastMap.compose(map); 1145 *operand = broadcast.getSource(); 1146 changed = true; 1147 } 1148 1149 if (!changed) 1150 return failure(); 1151 1152 // Determine which dims are usused, now that the maps have been composed 1153 // with the broadcast maps. 1154 llvm::SmallBitVector unusedDimsBitVector = getUnusedDimsBitVector(maps); 1155 // Compress unused dims. 1156 for (auto &m : maps) 1157 m = compressDims(m, unusedDimsBitVector); 1158 // Compute the combined iterators. 1159 SmallVector<Attribute, 4> iterators; 1160 for (unsigned i = 0; i < unusedDimsBitVector.size(); ++i) { 1161 if (!unusedDimsBitVector.test(i)) 1162 iterators.push_back(contractOp.getIteratorTypes().getValue()[i]); 1163 } 1164 // Check that compressing unused dims isn't removing all reduction 1165 // iterators. For example, if the vector.contract had only one reduction 1166 // iterator and that was a unit-dimension created by a broadcast, 1167 // then we should bail here, otherwise we would create a contract without 1168 // a reduction iterator. 1169 if (!llvm::any_of(iterators, isReductionIterator)) 1170 return failure(); 1171 // If the compressed maps have a dimension that is not used by either LHS or 1172 // RHS then the ContractionOp verifier would fail. 1173 if (getUnusedDimsBitVector({maps[0], maps[1]}).any()) 1174 return failure(); 1175 rewriter.replaceOpWithNewOp<vector::ContractionOp>( 1176 contractOp, lhs, rhs, contractOp.getAcc(), 1177 rewriter.getAffineMapArrayAttr(maps), rewriter.getArrayAttr(iterators)); 1178 return success(); 1179 } 1180 }; 1181 1182 /// Reorders cast(broadcast) to broadcast(cast). This makes broadcast ops and 1183 /// contraction ops closer, which kicks in CombineContractBroadcast pattern when 1184 /// casting ops are around these operations. 1185 /// Ex: 1186 /// ``` 1187 /// %0 = vector.broadcast %arg0 : vector<32x16xi8> to vector<8x32x16xi8> 1188 /// %1 = arith.extsi %0 : vector<8x32x16xi8> to vector<8x32x16xi32> 1189 /// ``` 1190 /// Gets converted to: 1191 /// ``` 1192 /// %0 = arith.extsi %0 : vector<32x16xi8> to vector<32x16xi32> 1193 /// %1 = vector.broadcast %arg0 : vector<32x16xi32> to vector<8x32x16xi32> 1194 /// ``` 1195 struct ReorderCastOpsOnBroadcast 1196 : public OpInterfaceRewritePattern<CastOpInterface> { 1197 using OpInterfaceRewritePattern<CastOpInterface>::OpInterfaceRewritePattern; 1198 1199 LogicalResult matchAndRewrite(CastOpInterface op, 1200 PatternRewriter &rewriter) const override { 1201 if (op->getNumOperands() != 1) 1202 return failure(); 1203 auto bcastOp = op->getOperand(0).getDefiningOp<vector::BroadcastOp>(); 1204 if (!bcastOp) 1205 return failure(); 1206 1207 Type castResTy = getElementTypeOrSelf(op->getResult(0)); 1208 if (auto vecTy = bcastOp.getSourceType().dyn_cast<VectorType>()) 1209 castResTy = VectorType::get(vecTy.getShape(), castResTy); 1210 auto *castOp = 1211 rewriter.create(op->getLoc(), op->getName().getIdentifier(), 1212 bcastOp.getSource(), castResTy, op->getAttrs()); 1213 rewriter.replaceOpWithNewOp<vector::BroadcastOp>( 1214 op, op->getResult(0).getType(), castOp->getResult(0)); 1215 return success(); 1216 } 1217 }; 1218 1219 /// Reorders elementwise(transpose) to transpose(elementwise). This makes 1220 /// transpose ops and contraction ops closer, which kicks in 1221 /// CombineContractTranspose pattern when elementwise ops are between these 1222 /// operations. Ex: 1223 /// ``` 1224 /// %at = vector.transpose %a, [1, 0]: vector<4x2xf32> to vector<2x4xf32> 1225 /// %bt = vector.transpose %b, [1, 0]: vector<4x2xf32> to vector<2x4xf32> 1226 /// %r = arith.addf %at, %bt : vector<2x4xf32> 1227 /// ``` 1228 /// Gets converted to: 1229 /// ``` 1230 /// %0 = arith.addf %a, %b : vector<4x2xf32> 1231 /// %r = vector.transpose %0, [1, 0] : vector<2x4xf32> 1232 /// ``` 1233 struct ReorderElementwiseOpsOnTranspose final 1234 : public OpTraitRewritePattern<OpTrait::Elementwise> { 1235 using OpTraitRewritePattern::OpTraitRewritePattern; 1236 LogicalResult matchAndRewrite(Operation *op, 1237 PatternRewriter &rewriter) const override { 1238 if (op->getNumResults() != 1 || op->getNumRegions() != 0) 1239 return failure(); 1240 1241 // Make sure all operands are transpose/constant ops and collect their 1242 // transposition maps. 1243 SmallVector<ArrayAttr, 4> transposeMaps; 1244 transposeMaps.reserve(op->getNumOperands()); 1245 // Record the initial type before transposition. We'll use its shape later. 1246 // Any type will do here as we will check all transpose maps are the same. 1247 VectorType srcType; 1248 for (Value operand : op->getOperands()) { 1249 auto transposeOp = operand.getDefiningOp<vector::TransposeOp>(); 1250 if (transposeOp) { 1251 transposeMaps.push_back(transposeOp.getTransp()); 1252 srcType = transposeOp.getVectorType(); 1253 } else if (!matchPattern(operand, m_Constant())) { 1254 return failure(); 1255 } 1256 } 1257 if (transposeMaps.empty()) 1258 return failure(); 1259 // This is an elementwise op, so all transposed operands should have the 1260 // same type. We need to additionally check that all transposes uses the 1261 // same map. 1262 if (!llvm::is_splat(transposeMaps)) 1263 return rewriter.notifyMatchFailure(op, "different transpose map"); 1264 1265 SmallVector<Value, 4> srcValues; 1266 srcValues.reserve(op->getNumOperands()); 1267 1268 // If there are constant operands, we need to insert inverse transposes for 1269 // them. Calculate the inverse order first. 1270 auto order = extractVector<unsigned>(transposeMaps.front()); 1271 SmallVector<int64_t> invOrder(order.size()); 1272 for (int i = 0, e = order.size(); i < e; ++i) 1273 invOrder[order[i]] = i; 1274 1275 for (Value operand : op->getOperands()) { 1276 auto transposeOp = operand.getDefiningOp<vector::TransposeOp>(); 1277 if (transposeOp) { 1278 srcValues.push_back(transposeOp.getVector()); 1279 } else { 1280 // This is a constant. Create a reverse transpose op for it. 1281 auto vectorType = VectorType::get( 1282 srcType.getShape(), 1283 operand.getType().cast<VectorType>().getElementType()); 1284 srcValues.push_back(rewriter.create<vector::TransposeOp>( 1285 operand.getLoc(), vectorType, operand, 1286 rewriter.getI64ArrayAttr(invOrder))); 1287 } 1288 } 1289 1290 auto vectorType = VectorType::get( 1291 srcType.getShape(), 1292 op->getResultTypes()[0].cast<VectorType>().getElementType()); 1293 Operation *elementwiseOp = 1294 rewriter.create(op->getLoc(), op->getName().getIdentifier(), srcValues, 1295 vectorType, op->getAttrs()); 1296 rewriter.replaceOpWithNewOp<vector::TransposeOp>( 1297 op, op->getResultTypes()[0], elementwiseOp->getResult(0), 1298 transposeMaps.front()); 1299 return success(); 1300 } 1301 }; 1302 1303 } // namespace 1304 1305 /// Creates an AddIOp if `isInt` is true otherwise create an arith::AddFOp using 1306 /// operands `x` and `y`. 1307 static Value createAdd(Location loc, Value x, Value y, bool isInt, 1308 PatternRewriter &rewriter) { 1309 if (isInt) 1310 return rewriter.create<arith::AddIOp>(loc, x, y); 1311 return rewriter.create<arith::AddFOp>(loc, x, y); 1312 } 1313 1314 /// Creates a MulIOp if `isInt` is true otherwise create an MulFOp using 1315 /// operands `x and `y`. 1316 static Value createMul(Location loc, Value x, Value y, bool isInt, 1317 PatternRewriter &rewriter) { 1318 if (isInt) 1319 return rewriter.create<arith::MulIOp>(loc, x, y); 1320 return rewriter.create<arith::MulFOp>(loc, x, y); 1321 } 1322 1323 namespace mlir { 1324 1325 /// Progressively lower a `vector.contract %a, %b, %c` with row-major matmul 1326 /// semantics to: 1327 /// ``` 1328 /// %mta = maybe_transpose 1329 /// %mtb = maybe_transpose 1330 /// %flattened_a = vector.shape_cast %mta 1331 /// %flattened_b = vector.shape_cast %mtb 1332 /// %flattened_d = vector.matmul %flattened_a, %flattened_b 1333 /// %mtd = vector.shape_cast %flattened_d 1334 /// %d = maybe_untranspose %mtd 1335 /// %e = add %c, %d 1336 /// ``` 1337 /// `vector.matmul` later lowers to `llvm.matrix.multiply`. 1338 // 1339 /// This only kicks in when VectorTransformsOptions is set to `Matmul`. 1340 /// vector.transpose operations are inserted if the vector.contract op is not a 1341 /// row-major matrix multiply. 1342 LogicalResult 1343 ContractionOpToMatmulOpLowering::matchAndRewrite(vector::ContractionOp op, 1344 PatternRewriter &rew) const { 1345 // TODO: implement masks 1346 if (llvm::size(op.getMasks()) != 0) 1347 return failure(); 1348 if (vectorTransformOptions.vectorContractLowering != 1349 vector::VectorContractLowering::Matmul) 1350 return failure(); 1351 if (failed(filter(op))) 1352 return failure(); 1353 1354 auto iteratorTypes = op.getIteratorTypes().getValue(); 1355 if (!isParallelIterator(iteratorTypes[0]) || 1356 !isParallelIterator(iteratorTypes[1]) || 1357 !isReductionIterator(iteratorTypes[2])) 1358 return failure(); 1359 1360 Type elementType = op.getLhsType().getElementType(); 1361 if (!elementType.isIntOrFloat()) 1362 return failure(); 1363 1364 Type dstElementType = op.getType(); 1365 if (auto vecType = dstElementType.dyn_cast<VectorType>()) 1366 dstElementType = vecType.getElementType(); 1367 if (elementType != dstElementType) 1368 return failure(); 1369 1370 // Perform lhs + rhs transpositions to conform to matmul row-major semantics. 1371 // Bail out if the contraction cannot be put in this form. 1372 MLIRContext *ctx = op.getContext(); 1373 Location loc = op.getLoc(); 1374 AffineExpr m, n, k; 1375 bindDims(rew.getContext(), m, n, k); 1376 // LHS must be A(m, k) or A(k, m). 1377 Value lhs = op.getLhs(); 1378 auto lhsMap = op.getIndexingMaps()[0]; 1379 if (lhsMap == AffineMap::get(3, 0, {k, m}, ctx)) 1380 lhs = rew.create<vector::TransposeOp>(loc, lhs, ArrayRef<int64_t>{1, 0}); 1381 else if (lhsMap != AffineMap::get(3, 0, {m, k}, ctx)) 1382 return failure(); 1383 1384 // RHS must be B(k, n) or B(n, k). 1385 Value rhs = op.getRhs(); 1386 auto rhsMap = op.getIndexingMaps()[1]; 1387 if (rhsMap == AffineMap::get(3, 0, {n, k}, ctx)) 1388 rhs = rew.create<vector::TransposeOp>(loc, rhs, ArrayRef<int64_t>{1, 0}); 1389 else if (rhsMap != AffineMap::get(3, 0, {k, n}, ctx)) 1390 return failure(); 1391 1392 // At this point lhs and rhs are in row-major. 1393 VectorType lhsType = lhs.getType().cast<VectorType>(); 1394 VectorType rhsType = rhs.getType().cast<VectorType>(); 1395 int64_t lhsRows = lhsType.getDimSize(0); 1396 int64_t lhsColumns = lhsType.getDimSize(1); 1397 int64_t rhsColumns = rhsType.getDimSize(1); 1398 1399 Type flattenedLHSType = 1400 VectorType::get(lhsType.getNumElements(), lhsType.getElementType()); 1401 lhs = rew.create<vector::ShapeCastOp>(loc, flattenedLHSType, lhs); 1402 1403 Type flattenedRHSType = 1404 VectorType::get(rhsType.getNumElements(), rhsType.getElementType()); 1405 rhs = rew.create<vector::ShapeCastOp>(loc, flattenedRHSType, rhs); 1406 1407 Value mul = rew.create<vector::MatmulOp>(loc, lhs, rhs, lhsRows, lhsColumns, 1408 rhsColumns); 1409 mul = rew.create<vector::ShapeCastOp>( 1410 loc, 1411 VectorType::get({lhsRows, rhsColumns}, 1412 getElementTypeOrSelf(op.getAcc().getType())), 1413 mul); 1414 1415 // ACC must be C(m, n) or C(n, m). 1416 auto accMap = op.getIndexingMaps()[2]; 1417 if (accMap == AffineMap::get(3, 0, {n, m}, ctx)) 1418 mul = rew.create<vector::TransposeOp>(loc, mul, ArrayRef<int64_t>{1, 0}); 1419 else if (accMap != AffineMap::get(3, 0, {m, n}, ctx)) 1420 llvm_unreachable("invalid contraction semantics"); 1421 1422 Value res = 1423 elementType.isa<IntegerType>() 1424 ? static_cast<Value>(rew.create<arith::AddIOp>(loc, op.getAcc(), mul)) 1425 : static_cast<Value>( 1426 rew.create<arith::AddFOp>(loc, op.getAcc(), mul)); 1427 1428 rew.replaceOp(op, res); 1429 return success(); 1430 } 1431 1432 namespace { 1433 struct IteratorType { 1434 IteratorType(StringRef strRef) : strRef(strRef) {} 1435 bool isOfType(Attribute attr) const { 1436 auto sAttr = attr.dyn_cast<StringAttr>(); 1437 return sAttr && sAttr.getValue() == strRef; 1438 } 1439 StringRef strRef; 1440 }; 1441 struct Par : public IteratorType { 1442 Par() : IteratorType(getParallelIteratorTypeName()) {} 1443 }; 1444 struct Red : public IteratorType { 1445 Red() : IteratorType(getReductionIteratorTypeName()) {} 1446 }; 1447 1448 /// Generate a vector implementation for matmat, matvec and tmatvec. 1449 /// This unrolls outer-products along the reduction dimension. 1450 struct UnrolledOuterProductGenerator 1451 : public StructuredGenerator<vector::ContractionOp> { 1452 UnrolledOuterProductGenerator(OpBuilder &builder, vector::ContractionOp op) 1453 : StructuredGenerator<vector::ContractionOp>(builder, op), 1454 kind(op.getKind()), lhs(op.getLhs()), rhs(op.getRhs()), 1455 res(op.getAcc()), lhsType(op.getLhsType()) {} 1456 1457 Value t(Value v) { 1458 static constexpr std::array<int64_t, 2> perm = {1, 0}; 1459 return builder.create<vector::TransposeOp>(loc, v, perm); 1460 } 1461 1462 Value promote(Value v, Type dstElementType) { 1463 Type elementType = v.getType(); 1464 auto vecType = elementType.dyn_cast<VectorType>(); 1465 if (vecType) 1466 elementType = vecType.getElementType(); 1467 if (elementType == dstElementType) 1468 return v; 1469 Type promotedType = dstElementType; 1470 if (vecType) 1471 promotedType = VectorType::get(vecType.getShape(), promotedType); 1472 if (dstElementType.isa<FloatType>()) 1473 return builder.create<arith::ExtFOp>(loc, promotedType, v); 1474 return builder.create<arith::ExtSIOp>(loc, promotedType, v); 1475 } 1476 1477 Value outerProd(Value lhs, Value rhs, Value res, int reductionSize) { 1478 assert(reductionSize > 0); 1479 Type resElementType = res.getType().cast<VectorType>().getElementType(); 1480 for (int64_t k = 0; k < reductionSize; ++k) { 1481 Value a = builder.create<vector::ExtractOp>(loc, lhs, k); 1482 Value b = builder.create<vector::ExtractOp>(loc, rhs, k); 1483 a = promote(a, resElementType); 1484 b = promote(b, resElementType); 1485 res = builder.create<vector::OuterProductOp>(loc, res.getType(), a, b, 1486 res, kind); 1487 } 1488 return res; 1489 } 1490 1491 /// Two outer parallel, one inner reduction (matmat flavor). 1492 FailureOr<Value> matmat() { 1493 if (!iters({Par(), Par(), Red()})) 1494 return failure(); 1495 // Set up the parallel/reduction structure in the right form. 1496 AffineExpr m, n, k; 1497 bindDims(builder.getContext(), m, n, k); 1498 // Classical row-major matmul: Just permute the lhs. 1499 if (layout({{m, k}, {k, n}, {m, n}})) 1500 return outerProd(t(lhs), rhs, res, lhsType.getDimSize(1)); 1501 // TODO: may be better to fail and use some vector<k> -> scalar reduction. 1502 if (layout({{m, k}, {n, k}, {m, n}})) { 1503 Value tlhs = t(lhs); 1504 return outerProd(tlhs, t(rhs), res, lhsType.getDimSize(1)); 1505 } 1506 // No need to permute anything. 1507 if (layout({{k, m}, {k, n}, {m, n}})) 1508 return outerProd(lhs, rhs, res, lhsType.getDimSize(0)); 1509 // Just permute the rhs. 1510 if (layout({{k, m}, {n, k}, {m, n}})) 1511 return outerProd(lhs, t(rhs), res, lhsType.getDimSize(0)); 1512 // Transposed output: swap RHS and LHS. 1513 // Classical row-major matmul: permute the lhs. 1514 if (layout({{m, k}, {k, n}, {n, m}})) 1515 return outerProd(rhs, t(lhs), res, lhsType.getDimSize(1)); 1516 // TODO: may be better to fail and use some vector<k> -> scalar reduction. 1517 if (layout({{m, k}, {n, k}, {n, m}})) { 1518 Value trhs = t(rhs); 1519 return outerProd(trhs, t(lhs), res, lhsType.getDimSize(1)); 1520 } 1521 if (layout({{k, m}, {k, n}, {n, m}})) 1522 return outerProd(rhs, lhs, res, lhsType.getDimSize(0)); 1523 if (layout({{k, m}, {n, k}, {n, m}})) 1524 return outerProd(t(rhs), lhs, res, lhsType.getDimSize(0)); 1525 return failure(); 1526 } 1527 1528 /// One outer parallel, one inner reduction (matvec flavor) 1529 FailureOr<Value> matvec() { 1530 if (!iters({Par(), Red()})) 1531 return failure(); 1532 AffineExpr m, k; 1533 bindDims(builder.getContext(), m, k); 1534 1535 // Case mat-vec: transpose. 1536 if (layout({{m, k}, {k}, {m}})) 1537 return outerProd(t(lhs), rhs, res, lhsType.getDimSize(1)); 1538 // Case mat-trans-vec: ready to go. 1539 if (layout({{k, m}, {k}, {m}})) 1540 return outerProd(lhs, rhs, res, lhsType.getDimSize(0)); 1541 // Case vec-mat: swap and transpose. 1542 if (layout({{k}, {m, k}, {m}})) 1543 return outerProd(t(rhs), lhs, res, lhsType.getDimSize(0)); 1544 // Case vec-mat-trans: swap and ready to go. 1545 if (layout({{k}, {k, m}, {m}})) 1546 return outerProd(rhs, lhs, res, lhsType.getDimSize(0)); 1547 return failure(); 1548 } 1549 1550 // 1551 // One outer reduction, one inner parallel (tmatvec flavor) 1552 // 1553 FailureOr<Value> tmatvec() { 1554 if (!iters({Red(), Par()})) 1555 return failure(); 1556 AffineExpr k, m; 1557 bindDims(builder.getContext(), k, m); 1558 1559 // Case mat-vec: transpose. 1560 if (layout({{m, k}, {k}, {m}})) 1561 return outerProd(t(lhs), rhs, res, lhsType.getDimSize(1)); 1562 // Case mat-trans-vec: ready to go. 1563 if (layout({{k, m}, {k}, {m}})) 1564 return outerProd(lhs, rhs, res, lhsType.getDimSize(0)); 1565 // Case vec-mat: swap and transpose. 1566 if (layout({{k}, {m, k}, {m}})) 1567 return outerProd(t(rhs), lhs, res, lhsType.getDimSize(0)); 1568 // Case vec-mat-trans: swap and ready to go. 1569 if (layout({{k}, {k, m}, {m}})) 1570 return outerProd(rhs, lhs, res, lhsType.getDimSize(0)); 1571 return failure(); 1572 } 1573 1574 private: 1575 vector::CombiningKind kind; 1576 Value lhs, rhs, res; 1577 VectorType lhsType; 1578 }; 1579 } // namespace 1580 1581 /// Progressively lower a `vector.contract %a, %b, %c` with row-major matmul 1582 /// semantics to a reduction_size-unrolled sequence: 1583 /// ``` 1584 /// %at = vector.transpose %a, [1, 0] 1585 /// %bRow0 = vector.extract %b[0] 1586 /// %atRow0 = vector.extract %at[0] 1587 /// %c0 = vector.outerproduct %atRow0, %bRow0, %c 1588 /// ... 1589 /// %bRowK = vector.extract %b[K] 1590 /// %atRowK = vector.extract %at[K] 1591 /// %cK = vector.outerproduct %atRowK, %bRowK, %cK-1 1592 /// ``` 1593 /// 1594 /// This only kicks in when VectorTransformsOptions is set to OuterProduct but 1595 /// otherwise supports any layout permutation of the matrix-multiply. 1596 LogicalResult ContractionOpToOuterProductOpLowering::matchAndRewrite( 1597 vector::ContractionOp op, PatternRewriter &rewriter) const { 1598 // TODO: implement masks 1599 if (llvm::size(op.getMasks()) != 0) 1600 return failure(); 1601 1602 if (vectorTransformOptions.vectorContractLowering != 1603 vector::VectorContractLowering::OuterProduct) 1604 return failure(); 1605 1606 if (failed(filter(op))) 1607 return failure(); 1608 1609 UnrolledOuterProductGenerator e(rewriter, op); 1610 FailureOr<Value> matmatRes = e.matmat(); 1611 if (succeeded(matmatRes)) { 1612 rewriter.replaceOp(op, *matmatRes); 1613 return success(); 1614 } 1615 FailureOr<Value> matvecRes = e.matvec(); 1616 if (succeeded(matvecRes)) { 1617 rewriter.replaceOp(op, *matvecRes); 1618 return success(); 1619 } 1620 FailureOr<Value> tmatvecRes = e.tmatvec(); 1621 if (succeeded(tmatvecRes)) { 1622 rewriter.replaceOp(op, *tmatvecRes); 1623 return success(); 1624 } 1625 1626 return failure(); 1627 } 1628 1629 LogicalResult 1630 ContractionOpToDotLowering::matchAndRewrite(vector::ContractionOp op, 1631 PatternRewriter &rewriter) const { 1632 // TODO: implement masks 1633 if (llvm::size(op.getMasks()) != 0) 1634 return failure(); 1635 1636 if (failed(filter(op))) 1637 return failure(); 1638 1639 if (vectorTransformOptions.vectorContractLowering != 1640 vector::VectorContractLowering::Dot) 1641 return failure(); 1642 1643 auto iteratorTypes = op.getIteratorTypes().getValue(); 1644 static constexpr std::array<int64_t, 2> perm = {1, 0}; 1645 Location loc = op.getLoc(); 1646 Value lhs = op.getLhs(), rhs = op.getRhs(); 1647 1648 using MapList = ArrayRef<ArrayRef<AffineExpr>>; 1649 auto infer = [](MapList m) { return AffineMap::inferFromExprList(m); }; 1650 AffineExpr m, n, k; 1651 bindDims(rewriter.getContext(), m, n, k); 1652 SmallVector<AffineMap, 4> maps = op.getIndexingMaps(); 1653 // 1654 // In the following we wish to make the reduction dimension innermost so we 1655 // can load vectors and just fmul + reduce into a scalar. 1656 // 1657 if (isParallelIterator(iteratorTypes[0]) && 1658 isParallelIterator(iteratorTypes[1]) && 1659 isReductionIterator(iteratorTypes[2])) { 1660 // 1661 // Two outer parallel, one inner reduction (matmat flavor). 1662 // 1663 if (maps == infer({{m, k}, {k, n}, {m, n}})) { 1664 rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm); 1665 } else if (maps == infer({{m, k}, {n, k}, {m, n}})) { 1666 // No need to permute anything. 1667 } else if (maps == infer({{k, m}, {k, n}, {m, n}})) { 1668 lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm); 1669 rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm); 1670 } else if (maps == infer({{k, m}, {n, k}, {m, n}})) { 1671 lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm); 1672 } else if (maps == infer({{m, k}, {k, n}, {n, m}})) { 1673 // This is the classical row-major matmul. Just permute the lhs. 1674 Value tmp = lhs; 1675 lhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm); 1676 rhs = tmp; 1677 } else if (maps == infer({{m, k}, {n, k}, {n, m}})) { 1678 std::swap(lhs, rhs); 1679 } else if (maps == infer({{k, m}, {k, n}, {n, m}})) { 1680 Value tmp = lhs; 1681 lhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm); 1682 rhs = rewriter.create<vector::TransposeOp>(loc, tmp, perm); 1683 } else if (maps == infer({{k, m}, {n, k}, {n, m}})) { 1684 Value tmp = rhs; 1685 rhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm); 1686 lhs = tmp; 1687 } else { 1688 return failure(); 1689 } 1690 } else if (isParallelIterator(iteratorTypes[0]) && 1691 isReductionIterator(iteratorTypes[1])) { 1692 // 1693 // One outer parallel, one inner reduction (matvec flavor) 1694 // 1695 if (maps == infer({{m, n}, {n}, {m}})) { 1696 // No need to permute anything. 1697 } else if (maps == infer({{n, m}, {n}, {m}})) { 1698 lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm); 1699 } else if (maps == infer({{n}, {m, n}, {m}})) { 1700 std::swap(lhs, rhs); 1701 } else if (maps == infer({{n}, {n, m}, {m}})) { 1702 std::swap(lhs, rhs); 1703 lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm); 1704 } else { 1705 return failure(); 1706 } 1707 } else { 1708 return failure(); 1709 } 1710 1711 VectorType dstType = op.getResultType().cast<VectorType>(); 1712 assert(dstType.getRank() >= 1 && dstType.getRank() <= 2 && 1713 "Expected dst type of rank 1 or 2"); 1714 1715 unsigned rank = dstType.getRank(); 1716 unsigned dstRows = dstType.getShape()[0]; 1717 unsigned dstColumns = rank == 1 ? 1 : dstType.getShape()[1]; 1718 1719 // ExtractOp does not allow dynamic indexing, we must unroll explicitly. 1720 Value res = rewriter.create<arith::ConstantOp>(loc, dstType, 1721 rewriter.getZeroAttr(dstType)); 1722 bool isInt = dstType.getElementType().isa<IntegerType>(); 1723 for (unsigned r = 0; r < dstRows; ++r) { 1724 Value a = rewriter.create<vector::ExtractOp>(op.getLoc(), lhs, r); 1725 for (unsigned c = 0; c < dstColumns; ++c) { 1726 Value b = rank == 1 1727 ? rhs 1728 : rewriter.create<vector::ExtractOp>(op.getLoc(), rhs, c); 1729 Value m = createMul(op.getLoc(), a, b, isInt, rewriter); 1730 Value reduced = rewriter.create<vector::ReductionOp>( 1731 op.getLoc(), vector::CombiningKind::ADD, m); 1732 1733 SmallVector<int64_t, 2> pos = rank == 1 ? SmallVector<int64_t, 2>{r} 1734 : SmallVector<int64_t, 2>{r, c}; 1735 res = rewriter.create<vector::InsertOp>(op.getLoc(), reduced, res, pos); 1736 } 1737 } 1738 if (auto acc = op.getAcc()) 1739 res = createAdd(op.getLoc(), res, acc, isInt, rewriter); 1740 rewriter.replaceOp(op, res); 1741 return success(); 1742 } 1743 1744 /// Progressive lowering of ContractionOp. 1745 /// One: 1746 /// %x = vector.contract with at least one free/batch dimension 1747 /// is replaced by: 1748 /// %a = vector.contract with one less free/batch dimension 1749 /// %b = vector.contract with one less free/batch dimension 1750 /// .. 1751 /// %x = combine %a %b .. 1752 /// until a pure contraction is reached (no free/batch dimensions), 1753 /// which is replaced by a dot-product. 1754 /// 1755 /// This only kicks in when either VectorTransformsOptions is set 1756 /// to DOT or when other contraction patterns fail. 1757 // 1758 // TODO: break down into transpose/reshape/cast ops 1759 // when they become available to avoid code dup 1760 // TODO: investigate lowering order impact on performance 1761 LogicalResult 1762 ContractionOpLowering::matchAndRewrite(vector::ContractionOp op, 1763 PatternRewriter &rewriter) const { 1764 // TODO: implement masks. 1765 if (llvm::size(op.getMasks()) != 0) 1766 return failure(); 1767 1768 if (failed(filter(op))) 1769 return failure(); 1770 1771 // TODO: support mixed mode contract lowering. 1772 if (op.getLhsType().getElementType() != 1773 getElementTypeOrSelf(op.getAccType()) || 1774 op.getRhsType().getElementType() != getElementTypeOrSelf(op.getAccType())) 1775 return failure(); 1776 1777 // TODO: implement benefits, cost models. 1778 MLIRContext *ctx = op.getContext(); 1779 ContractionOpToMatmulOpLowering pat1(vectorTransformOptions, ctx); 1780 if (succeeded(pat1.matchAndRewrite(op, rewriter))) 1781 return success(); 1782 ContractionOpToOuterProductOpLowering pat2(vectorTransformOptions, ctx); 1783 if (succeeded(pat2.matchAndRewrite(op, rewriter))) 1784 return success(); 1785 ContractionOpToDotLowering pat3(vectorTransformOptions, ctx); 1786 if (succeeded(pat3.matchAndRewrite(op, rewriter))) 1787 return success(); 1788 ContractOpToElementwise pat4(vectorTransformOptions, ctx); 1789 if (succeeded(pat4.matchAndRewrite(op, rewriter))) 1790 return success(); 1791 1792 // Find first batch dimension in LHS/RHS, and lower when found. 1793 std::vector<std::pair<int64_t, int64_t>> batchDimMap = op.getBatchDimMap(); 1794 if (!batchDimMap.empty()) { 1795 int64_t lhsIndex = batchDimMap[0].first; 1796 int64_t rhsIndex = batchDimMap[0].second; 1797 rewriter.replaceOp(op, lowerParallel(op, lhsIndex, rhsIndex, rewriter)); 1798 return success(); 1799 } 1800 1801 // Collect contracting dimensions. 1802 std::vector<std::pair<int64_t, int64_t>> contractingDimMap = 1803 op.getContractingDimMap(); 1804 DenseSet<int64_t> lhsContractingDimSet; 1805 DenseSet<int64_t> rhsContractingDimSet; 1806 for (auto &dimPair : contractingDimMap) { 1807 lhsContractingDimSet.insert(dimPair.first); 1808 rhsContractingDimSet.insert(dimPair.second); 1809 } 1810 1811 // Find first free dimension in LHS, and lower when found. 1812 VectorType lhsType = op.getLhsType(); 1813 for (int64_t lhsIndex = 0, e = lhsType.getRank(); lhsIndex < e; ++lhsIndex) { 1814 if (lhsContractingDimSet.count(lhsIndex) == 0) { 1815 rewriter.replaceOp( 1816 op, lowerParallel(op, lhsIndex, /*rhsIndex=*/-1, rewriter)); 1817 return success(); 1818 } 1819 } 1820 1821 // Find first free dimension in RHS, and lower when found. 1822 VectorType rhsType = op.getRhsType(); 1823 for (int64_t rhsIndex = 0, e = rhsType.getRank(); rhsIndex < e; ++rhsIndex) { 1824 if (rhsContractingDimSet.count(rhsIndex) == 0) { 1825 rewriter.replaceOp( 1826 op, lowerParallel(op, /*lhsIndex=*/-1, rhsIndex, rewriter)); 1827 return success(); 1828 } 1829 } 1830 1831 // Lower the first remaining reduction dimension. 1832 if (!contractingDimMap.empty()) { 1833 rewriter.replaceOp(op, lowerReduction(op, rewriter)); 1834 return success(); 1835 } 1836 1837 return failure(); 1838 } 1839 1840 // Lower one parallel dimension. 1841 // TODO: consider reusing existing contract unrolling 1842 Value ContractionOpLowering::lowerParallel(vector::ContractionOp op, 1843 int64_t lhsIndex, int64_t rhsIndex, 1844 PatternRewriter &rewriter) const { 1845 VectorType lhsType = op.getLhsType(); 1846 VectorType rhsType = op.getRhsType(); 1847 VectorType resType = op.getResultType().cast<VectorType>(); 1848 // Find the iterator type index and result index. 1849 SmallVector<AffineMap, 4> iMap = op.getIndexingMaps(); 1850 int64_t iterIndex = -1; 1851 int64_t dimSize = -1; 1852 if (lhsIndex >= 0) { 1853 iterIndex = iMap[0].getDimPosition(lhsIndex); 1854 assert((rhsIndex < 0 || iterIndex == iMap[1].getDimPosition(rhsIndex)) && 1855 "parallel index should be free in LHS or batch in LHS/RHS"); 1856 dimSize = lhsType.getDimSize(lhsIndex); 1857 } else { 1858 assert(rhsIndex >= 0 && "missing parallel index"); 1859 iterIndex = iMap[1].getDimPosition(rhsIndex); 1860 dimSize = rhsType.getDimSize(rhsIndex); 1861 } 1862 assert(iterIndex >= 0 && "parallel index not listed in operand mapping"); 1863 Optional<int64_t> lookup = getResultIndex(iMap[2], iterIndex); 1864 assert(lookup.has_value() && "parallel index not listed in reduction"); 1865 int64_t resIndex = lookup.getValue(); 1866 // Construct new iterator types and affine map array attribute. 1867 std::array<AffineMap, 3> lowIndexingMaps = { 1868 adjustMap(iMap[0], iterIndex, rewriter), 1869 adjustMap(iMap[1], iterIndex, rewriter), 1870 adjustMap(iMap[2], iterIndex, rewriter)}; 1871 auto lowAffine = rewriter.getAffineMapArrayAttr(lowIndexingMaps); 1872 auto lowIter = 1873 rewriter.getArrayAttr(adjustIter(op.getIteratorTypes(), iterIndex)); 1874 // Unroll into a series of lower dimensional vector.contract ops. 1875 Location loc = op.getLoc(); 1876 Value result = rewriter.create<arith::ConstantOp>( 1877 loc, resType, rewriter.getZeroAttr(resType)); 1878 for (int64_t d = 0; d < dimSize; ++d) { 1879 auto lhs = reshapeLoad(loc, op.getLhs(), lhsType, lhsIndex, d, rewriter); 1880 auto rhs = reshapeLoad(loc, op.getRhs(), rhsType, rhsIndex, d, rewriter); 1881 auto acc = reshapeLoad(loc, op.getAcc(), resType, resIndex, d, rewriter); 1882 Value lowContract = rewriter.create<vector::ContractionOp>( 1883 loc, lhs, rhs, acc, lowAffine, lowIter); 1884 result = 1885 reshapeStore(loc, lowContract, result, resType, resIndex, d, rewriter); 1886 } 1887 return result; 1888 } 1889 1890 // Lower one reduction dimension. 1891 Value ContractionOpLowering::lowerReduction(vector::ContractionOp op, 1892 PatternRewriter &rewriter) const { 1893 auto loc = op.getLoc(); 1894 VectorType lhsType = op.getLhsType(); 1895 VectorType rhsType = op.getRhsType(); 1896 Type resType = op.getResultType(); 1897 assert(!resType.isa<VectorType>()); 1898 bool isInt = resType.isa<IntegerType>(); 1899 // Use iterator index 0. 1900 int64_t iterIndex = 0; 1901 SmallVector<AffineMap, 4> iMap = op.getIndexingMaps(); 1902 Optional<int64_t> lookupLhs = getResultIndex(iMap[0], iterIndex); 1903 Optional<int64_t> lookupRhs = getResultIndex(iMap[1], iterIndex); 1904 assert(lookupLhs.has_value() && "missing LHS parallel index"); 1905 assert(lookupRhs.has_value() && "missing RHS parallel index"); 1906 int64_t lhsIndex = lookupLhs.getValue(); 1907 int64_t rhsIndex = lookupRhs.getValue(); 1908 int64_t dimSize = lhsType.getDimSize(lhsIndex); 1909 assert(dimSize == rhsType.getDimSize(rhsIndex) && "corrupt shape"); 1910 // Base case. 1911 if (lhsType.getRank() == 1) { 1912 assert(rhsType.getRank() == 1 && "corrupt contraction"); 1913 Value m = createMul(loc, op.getLhs(), op.getRhs(), isInt, rewriter); 1914 auto kind = vector::CombiningKind::ADD; 1915 if (auto acc = op.getAcc()) 1916 return rewriter.create<vector::ReductionOp>(loc, kind, m, acc); 1917 return rewriter.create<vector::ReductionOp>(loc, kind, m); 1918 } 1919 // Construct new iterator types and affine map array attribute. 1920 std::array<AffineMap, 3> lowIndexingMaps = { 1921 adjustMap(iMap[0], iterIndex, rewriter), 1922 adjustMap(iMap[1], iterIndex, rewriter), 1923 adjustMap(iMap[2], iterIndex, rewriter)}; 1924 auto lowAffine = rewriter.getAffineMapArrayAttr(lowIndexingMaps); 1925 auto lowIter = 1926 rewriter.getArrayAttr(adjustIter(op.getIteratorTypes(), iterIndex)); 1927 // Unroll into a series of lower dimensional vector.contract ops. 1928 // By feeding the initial accumulator into the first contraction, 1929 // and the result of each contraction into the next, eventually 1930 // the sum of all reductions is computed. 1931 Value result = op.getAcc(); 1932 for (int64_t d = 0; d < dimSize; ++d) { 1933 auto lhs = reshapeLoad(loc, op.getLhs(), lhsType, lhsIndex, d, rewriter); 1934 auto rhs = reshapeLoad(loc, op.getRhs(), rhsType, rhsIndex, d, rewriter); 1935 result = rewriter.create<vector::ContractionOp>(loc, lhs, rhs, result, 1936 lowAffine, lowIter); 1937 } 1938 return result; 1939 } 1940 1941 } // namespace mlir 1942 1943 Optional<mlir::vector::DistributeOps> mlir::vector::distributPointwiseVectorOp( 1944 OpBuilder &builder, Operation *op, ArrayRef<Value> ids, 1945 ArrayRef<int64_t> multiplicity, const AffineMap &map) { 1946 OpBuilder::InsertionGuard guard(builder); 1947 builder.setInsertionPointAfter(op); 1948 Location loc = op->getLoc(); 1949 if (op->getNumResults() != 1) 1950 return {}; 1951 Value result = op->getResult(0); 1952 VectorType type = op->getResult(0).getType().dyn_cast<VectorType>(); 1953 if (!type || map.getNumResults() != multiplicity.size()) 1954 return {}; 1955 // For each dimension being distributed check that the size is a multiple of 1956 // the multiplicity. To handle more sizes we would need to support masking. 1957 unsigned multiplictyCount = 0; 1958 for (auto exp : map.getResults()) { 1959 auto affinExp = exp.dyn_cast<AffineDimExpr>(); 1960 if (!affinExp || affinExp.getPosition() >= type.getRank() || 1961 type.getDimSize(affinExp.getPosition()) % 1962 multiplicity[multiplictyCount++] != 1963 0) 1964 return {}; 1965 } 1966 DistributeOps ops; 1967 ops.extract = 1968 builder.create<vector::ExtractMapOp>(loc, result, ids, multiplicity, map); 1969 ops.insert = 1970 builder.create<vector::InsertMapOp>(loc, ops.extract, result, ids); 1971 return ops; 1972 } 1973 1974 /// Progressive lowering of transfer_read. This pattern supports lowering of 1975 /// `vector.transfer_read` to a combination of `vector.load` and 1976 /// `vector.broadcast` if all of the following hold: 1977 /// - Stride of most minor memref dimension must be 1. 1978 /// - Out-of-bounds masking is not required. 1979 /// - If the memref's element type is a vector type then it coincides with the 1980 /// result type. 1981 /// - The permutation map doesn't perform permutation (broadcasting is allowed). 1982 struct TransferReadToVectorLoadLowering 1983 : public OpRewritePattern<vector::TransferReadOp> { 1984 TransferReadToVectorLoadLowering(MLIRContext *context, 1985 llvm::Optional<unsigned> maxRank) 1986 : OpRewritePattern<vector::TransferReadOp>(context), 1987 maxTransferRank(maxRank) {} 1988 1989 LogicalResult matchAndRewrite(vector::TransferReadOp read, 1990 PatternRewriter &rewriter) const override { 1991 if (maxTransferRank && read.getVectorType().getRank() > *maxTransferRank) 1992 return failure(); 1993 1994 SmallVector<unsigned, 4> broadcastedDims; 1995 // Permutations are handled by VectorToSCF or 1996 // populateVectorTransferPermutationMapLoweringPatterns. 1997 // We let the 0-d corner case pass-through as it is supported. 1998 if (!read.getPermutationMap().isMinorIdentityWithBroadcasting( 1999 &broadcastedDims)) 2000 return failure(); 2001 2002 auto memRefType = read.getShapedType().dyn_cast<MemRefType>(); 2003 if (!memRefType) 2004 return failure(); 2005 2006 // Non-unit strides are handled by VectorToSCF. 2007 if (!vector::isLastMemrefDimUnitStride(memRefType)) 2008 return failure(); 2009 2010 // If there is broadcasting involved then we first load the unbroadcasted 2011 // vector, and then broadcast it with `vector.broadcast`. 2012 ArrayRef<int64_t> vectorShape = read.getVectorType().getShape(); 2013 SmallVector<int64_t, 4> unbroadcastedVectorShape(vectorShape.begin(), 2014 vectorShape.end()); 2015 for (unsigned i : broadcastedDims) 2016 unbroadcastedVectorShape[i] = 1; 2017 VectorType unbroadcastedVectorType = VectorType::get( 2018 unbroadcastedVectorShape, read.getVectorType().getElementType()); 2019 2020 // `vector.load` supports vector types as memref's elements only when the 2021 // resulting vector type is the same as the element type. 2022 auto memrefElTy = memRefType.getElementType(); 2023 if (memrefElTy.isa<VectorType>() && memrefElTy != unbroadcastedVectorType) 2024 return failure(); 2025 2026 // Otherwise, element types of the memref and the vector must match. 2027 if (!memrefElTy.isa<VectorType>() && 2028 memrefElTy != read.getVectorType().getElementType()) 2029 return failure(); 2030 2031 // Out-of-bounds dims are handled by MaterializeTransferMask. 2032 if (read.hasOutOfBoundsDim()) 2033 return failure(); 2034 2035 // Create vector load op. 2036 Operation *loadOp; 2037 if (read.getMask()) { 2038 Value fill = rewriter.create<vector::SplatOp>( 2039 read.getLoc(), unbroadcastedVectorType, read.getPadding()); 2040 loadOp = rewriter.create<vector::MaskedLoadOp>( 2041 read.getLoc(), unbroadcastedVectorType, read.getSource(), 2042 read.getIndices(), read.getMask(), fill); 2043 } else { 2044 loadOp = rewriter.create<vector::LoadOp>( 2045 read.getLoc(), unbroadcastedVectorType, read.getSource(), 2046 read.getIndices()); 2047 } 2048 2049 // Insert a broadcasting op if required. 2050 if (!broadcastedDims.empty()) { 2051 rewriter.replaceOpWithNewOp<vector::BroadcastOp>( 2052 read, read.getVectorType(), loadOp->getResult(0)); 2053 } else { 2054 rewriter.replaceOp(read, loadOp->getResult(0)); 2055 } 2056 2057 return success(); 2058 } 2059 2060 llvm::Optional<unsigned> maxTransferRank; 2061 }; 2062 2063 /// Replace a 0-d vector.load with a memref.load + vector.broadcast. 2064 // TODO: we shouldn't cross the vector/scalar domains just for this 2065 // but atm we lack the infra to avoid it. Possible solutions include: 2066 // - go directly to LLVM + bitcast 2067 // - introduce a bitcast op and likely a new pointer dialect 2068 // - let memref.load/store additionally support the 0-d vector case 2069 // There are still deeper data layout issues lingering even in this 2070 // trivial case (for architectures for which this matters). 2071 struct VectorLoadToMemrefLoadLowering 2072 : public OpRewritePattern<vector::LoadOp> { 2073 using OpRewritePattern<vector::LoadOp>::OpRewritePattern; 2074 2075 LogicalResult matchAndRewrite(vector::LoadOp loadOp, 2076 PatternRewriter &rewriter) const override { 2077 auto vecType = loadOp.getVectorType(); 2078 if (vecType.getNumElements() != 1) 2079 return failure(); 2080 auto memrefLoad = rewriter.create<memref::LoadOp>( 2081 loadOp.getLoc(), loadOp.getBase(), loadOp.getIndices()); 2082 rewriter.replaceOpWithNewOp<vector::BroadcastOp>(loadOp, vecType, 2083 memrefLoad); 2084 return success(); 2085 } 2086 }; 2087 2088 /// Replace a 0-d vector.store with a vector.extractelement + memref.store. 2089 struct VectorStoreToMemrefStoreLowering 2090 : public OpRewritePattern<vector::StoreOp> { 2091 using OpRewritePattern<vector::StoreOp>::OpRewritePattern; 2092 2093 LogicalResult matchAndRewrite(vector::StoreOp storeOp, 2094 PatternRewriter &rewriter) const override { 2095 auto vecType = storeOp.getVectorType(); 2096 if (vecType.getNumElements() != 1) 2097 return failure(); 2098 Value extracted; 2099 if (vecType.getRank() == 0) { 2100 // TODO: Unifiy once ExtractOp supports 0-d vectors. 2101 extracted = rewriter.create<vector::ExtractElementOp>( 2102 storeOp.getLoc(), storeOp.getValueToStore()); 2103 } else { 2104 SmallVector<int64_t> indices(vecType.getRank(), 0); 2105 extracted = rewriter.create<vector::ExtractOp>( 2106 storeOp.getLoc(), storeOp.getValueToStore(), indices); 2107 } 2108 2109 rewriter.replaceOpWithNewOp<memref::StoreOp>( 2110 storeOp, extracted, storeOp.getBase(), storeOp.getIndices()); 2111 return success(); 2112 } 2113 }; 2114 2115 /// Progressive lowering of transfer_write. This pattern supports lowering of 2116 /// `vector.transfer_write` to `vector.store` if all of the following hold: 2117 /// - Stride of most minor memref dimension must be 1. 2118 /// - Out-of-bounds masking is not required. 2119 /// - If the memref's element type is a vector type then it coincides with the 2120 /// type of the written value. 2121 /// - The permutation map is the minor identity map (neither permutation nor 2122 /// broadcasting is allowed). 2123 struct TransferWriteToVectorStoreLowering 2124 : public OpRewritePattern<vector::TransferWriteOp> { 2125 TransferWriteToVectorStoreLowering(MLIRContext *context, 2126 llvm::Optional<unsigned> maxRank) 2127 : OpRewritePattern<vector::TransferWriteOp>(context), 2128 maxTransferRank(maxRank) {} 2129 2130 LogicalResult matchAndRewrite(vector::TransferWriteOp write, 2131 PatternRewriter &rewriter) const override { 2132 if (maxTransferRank && write.getVectorType().getRank() > *maxTransferRank) 2133 return failure(); 2134 2135 // Permutations are handled by VectorToSCF or 2136 // populateVectorTransferPermutationMapLoweringPatterns. 2137 if ( // pass-through for the 0-d corner case. 2138 !write.getPermutationMap().isMinorIdentity()) 2139 return failure(); 2140 2141 auto memRefType = write.getShapedType().dyn_cast<MemRefType>(); 2142 if (!memRefType) 2143 return failure(); 2144 2145 // Non-unit strides are handled by VectorToSCF. 2146 if (!vector::isLastMemrefDimUnitStride(memRefType)) 2147 return failure(); 2148 2149 // `vector.store` supports vector types as memref's elements only when the 2150 // type of the vector value being written is the same as the element type. 2151 auto memrefElTy = memRefType.getElementType(); 2152 if (memrefElTy.isa<VectorType>() && memrefElTy != write.getVectorType()) 2153 return failure(); 2154 2155 // Otherwise, element types of the memref and the vector must match. 2156 if (!memrefElTy.isa<VectorType>() && 2157 memrefElTy != write.getVectorType().getElementType()) 2158 return failure(); 2159 2160 // Out-of-bounds dims are handled by MaterializeTransferMask. 2161 if (write.hasOutOfBoundsDim()) 2162 return failure(); 2163 if (write.getMask()) { 2164 rewriter.replaceOpWithNewOp<vector::MaskedStoreOp>( 2165 write, write.getSource(), write.getIndices(), write.getMask(), 2166 write.getVector()); 2167 } else { 2168 rewriter.replaceOpWithNewOp<vector::StoreOp>( 2169 write, write.getVector(), write.getSource(), write.getIndices()); 2170 } 2171 return success(); 2172 } 2173 2174 llvm::Optional<unsigned> maxTransferRank; 2175 }; 2176 2177 // Returns the values in `arrayAttr` as an integer vector. 2178 static SmallVector<int64_t, 4> getIntValueVector(ArrayAttr arrayAttr) { 2179 return llvm::to_vector<4>( 2180 llvm::map_range(arrayAttr.getAsRange<IntegerAttr>(), 2181 [](IntegerAttr attr) { return attr.getInt(); })); 2182 } 2183 2184 // Shuffles vector.bitcast op after vector.extract op. 2185 // 2186 // This transforms IR like: 2187 // %0 = vector.bitcast %src : vector<4xf32> to vector<8xf16> 2188 // %1 = vector.extract %0[3] : vector<8xf16> 2189 // Into: 2190 // %0 = vector.extract %src[1] : vector<4xf32> 2191 // %1 = vector.bitcast %0: vector<1xf32> to vector<2xf16> 2192 // %2 = vector.extract %1[1] : vector<2xf16> 2193 struct BubbleDownVectorBitCastForExtract 2194 : public OpRewritePattern<vector::ExtractOp> { 2195 using OpRewritePattern::OpRewritePattern; 2196 2197 LogicalResult matchAndRewrite(vector::ExtractOp extractOp, 2198 PatternRewriter &rewriter) const override { 2199 // Only support extracting scalars for now. 2200 if (extractOp.getVectorType().getRank() != 1) 2201 return failure(); 2202 2203 auto castOp = extractOp.getVector().getDefiningOp<vector::BitCastOp>(); 2204 if (!castOp) 2205 return failure(); 2206 2207 VectorType castSrcType = castOp.getSourceVectorType(); 2208 VectorType castDstType = castOp.getResultVectorType(); 2209 assert(castSrcType.getRank() == castDstType.getRank()); 2210 2211 // Fail to match if we only have one element in the cast op source. 2212 // This is to avoid infinite loop given that this pattern can generate 2213 // such cases. 2214 if (castSrcType.getNumElements() == 1) 2215 return failure(); 2216 2217 // Only support casting to a larger number of elements or now. 2218 // E.g., vector<4xf32> -> vector<8xf16>. 2219 if (castSrcType.getNumElements() > castDstType.getNumElements()) 2220 return failure(); 2221 2222 unsigned expandRatio = 2223 castDstType.getNumElements() / castSrcType.getNumElements(); 2224 2225 auto getFirstIntValue = [](ArrayAttr attr) -> uint64_t { 2226 return (*attr.getAsValueRange<IntegerAttr>().begin()).getZExtValue(); 2227 }; 2228 2229 uint64_t index = getFirstIntValue(extractOp.getPosition()); 2230 2231 // Get the single scalar (as a vector) in the source value that packs the 2232 // desired scalar. E.g. extract vector<1xf32> from vector<4xf32> 2233 VectorType oneScalarType = 2234 VectorType::get({1}, castSrcType.getElementType()); 2235 Value packedValue = rewriter.create<vector::ExtractOp>( 2236 extractOp.getLoc(), oneScalarType, castOp.getSource(), 2237 rewriter.getI64ArrayAttr(index / expandRatio)); 2238 2239 // Cast it to a vector with the desired scalar's type. 2240 // E.g. f32 -> vector<2xf16> 2241 VectorType packedType = 2242 VectorType::get({expandRatio}, castDstType.getElementType()); 2243 Value castedValue = rewriter.create<vector::BitCastOp>( 2244 extractOp.getLoc(), packedType, packedValue); 2245 2246 // Finally extract the desired scalar. 2247 rewriter.replaceOpWithNewOp<vector::ExtractOp>( 2248 extractOp, extractOp.getType(), castedValue, 2249 rewriter.getI64ArrayAttr(index % expandRatio)); 2250 2251 return success(); 2252 } 2253 }; 2254 2255 // Shuffles vector.bitcast op after vector.extract_strided_slice op. 2256 // 2257 // This transforms IR like: 2258 // %cast = vector.bitcast %arg0: vector<4xf32> to vector<8xf16> 2259 // %0 = vector.extract_strided_slice %cast { 2260 // offsets = [4], sizes = [4], strides = [1] 2261 // } : vector<8xf16> to vector<4xf16> 2262 // Into: 2263 // %0 = vector.extract_strided_slice %src { 2264 // offsets = [2], sizes = [2], strides = [1] 2265 // } : vector<4xf32> to vector<2xf32> 2266 // %1 = vector.bitcast %0 : vector<2xf32> to vector<4xf16> 2267 struct BubbleDownBitCastForStridedSliceExtract 2268 : public OpRewritePattern<vector::ExtractStridedSliceOp> { 2269 using OpRewritePattern::OpRewritePattern; 2270 2271 LogicalResult matchAndRewrite(vector::ExtractStridedSliceOp extractOp, 2272 PatternRewriter &rewriter) const override { 2273 auto castOp = extractOp.getVector().getDefiningOp<vector::BitCastOp>(); 2274 if (!castOp) 2275 return failure(); 2276 2277 VectorType castSrcType = castOp.getSourceVectorType(); 2278 VectorType castDstType = castOp.getResultVectorType(); 2279 assert(castSrcType.getRank() == castDstType.getRank()); 2280 2281 int64_t castSrcLastDim = castSrcType.getShape().back(); 2282 int64_t castDstLastDim = castDstType.getShape().back(); 2283 // Require casting to more elements for now; other cases to be implemented. 2284 if (castSrcLastDim > castDstLastDim) 2285 return failure(); 2286 2287 // Only accept all one strides for now. 2288 if (llvm::any_of(extractOp.getStrides().getAsValueRange<IntegerAttr>(), 2289 [](const APInt &val) { return !val.isOneValue(); })) 2290 return failure(); 2291 2292 unsigned rank = extractOp.getVectorType().getRank(); 2293 assert(castDstLastDim % castSrcLastDim == 0); 2294 int64_t expandRatio = castDstLastDim / castSrcLastDim; 2295 2296 // If we have a less number of offsets than the rank, then implicitly we 2297 // are selecting the full range for the last bitcasted dimension; other 2298 // dimensions aren't affected. Otherwise, we need to scale down the last 2299 // dimension's offset given we are extracting from less elements now. 2300 ArrayAttr newOffsets = extractOp.getOffsets(); 2301 if (newOffsets.size() == rank) { 2302 SmallVector<int64_t, 4> offsets = getIntValueVector(newOffsets); 2303 if (offsets.back() % expandRatio != 0) 2304 return failure(); 2305 offsets.back() = offsets.back() / expandRatio; 2306 newOffsets = rewriter.getI64ArrayAttr(offsets); 2307 } 2308 2309 // Similarly for sizes. 2310 ArrayAttr newSizes = extractOp.getSizes(); 2311 if (newSizes.size() == rank) { 2312 SmallVector<int64_t, 4> sizes = getIntValueVector(newSizes); 2313 if (sizes.back() % expandRatio != 0) 2314 return failure(); 2315 sizes.back() = sizes.back() / expandRatio; 2316 newSizes = rewriter.getI64ArrayAttr(sizes); 2317 } 2318 2319 SmallVector<int64_t, 4> dims = 2320 llvm::to_vector<4>(extractOp.getType().cast<VectorType>().getShape()); 2321 dims.back() = dims.back() / expandRatio; 2322 VectorType newExtractType = 2323 VectorType::get(dims, castSrcType.getElementType()); 2324 2325 auto newExtractOp = rewriter.create<vector::ExtractStridedSliceOp>( 2326 extractOp.getLoc(), newExtractType, castOp.getSource(), newOffsets, 2327 newSizes, extractOp.getStrides()); 2328 2329 rewriter.replaceOpWithNewOp<vector::BitCastOp>( 2330 extractOp, extractOp.getType(), newExtractOp); 2331 2332 return success(); 2333 } 2334 }; 2335 2336 // Shuffles vector.bitcast op before vector.insert_strided_slice op. 2337 // 2338 // This transforms IR like: 2339 // %0 = vector.insert_strided_slice %src, %dst { 2340 // offsets = [0], strides = [1]} : vector<4xf16> into vector<8xf16> 2341 // %1 = vector.bitcast %0: vector<8xf16> to vector<4xf32> 2342 // Into: 2343 // %0 = vector.bitcast %src : vector<4xf16> to vector<2xf32> 2344 // %1 = vector.bitcast %dst : vector<8xf16> to vector<4xf32> 2345 // %2 = vector.insert_strided_slice %src, %dst { 2346 // offsets = [0], strides = [1]} : vector<2xf32> into vector<4xf32> 2347 struct BubbleUpBitCastForStridedSliceInsert 2348 : public OpRewritePattern<vector::BitCastOp> { 2349 using OpRewritePattern::OpRewritePattern; 2350 LogicalResult matchAndRewrite(vector::BitCastOp bitcastOp, 2351 PatternRewriter &rewriter) const override { 2352 VectorType castSrcType = bitcastOp.getSourceVectorType(); 2353 VectorType castDstType = bitcastOp.getResultVectorType(); 2354 assert(castSrcType.getRank() == castDstType.getRank()); 2355 2356 int64_t castSrcLastDim = castSrcType.getShape().back(); 2357 int64_t castDstLastDim = castDstType.getShape().back(); 2358 // Require casting to less elements for now; other cases to be implemented. 2359 if (castSrcLastDim < castDstLastDim) 2360 return failure(); 2361 2362 assert(castSrcLastDim % castDstLastDim == 0); 2363 int64_t shrinkRatio = castSrcLastDim / castDstLastDim; 2364 2365 auto insertOp = 2366 bitcastOp.getSource().getDefiningOp<vector::InsertStridedSliceOp>(); 2367 if (!insertOp) 2368 return failure(); 2369 2370 // Only accept all one strides for now. 2371 if (llvm::any_of(insertOp.getStrides().getAsValueRange<IntegerAttr>(), 2372 [](const APInt &val) { return !val.isOneValue(); })) 2373 return failure(); 2374 2375 unsigned rank = insertOp.getSourceVectorType().getRank(); 2376 // Require insert op to have the same rank for the source and destination 2377 // vector; other cases to be implemented. 2378 if (rank != insertOp.getDestVectorType().getRank()) 2379 return failure(); 2380 2381 ArrayAttr newOffsets = insertOp.getOffsets(); 2382 assert(newOffsets.size() == rank); 2383 SmallVector<int64_t, 4> offsets = getIntValueVector(newOffsets); 2384 if (offsets.back() % shrinkRatio != 0) 2385 return failure(); 2386 offsets.back() = offsets.back() / shrinkRatio; 2387 newOffsets = rewriter.getI64ArrayAttr(offsets); 2388 2389 SmallVector<int64_t, 4> srcDims = 2390 llvm::to_vector<4>(insertOp.getSourceVectorType().getShape()); 2391 srcDims.back() = srcDims.back() / shrinkRatio; 2392 VectorType newCastSrcType = 2393 VectorType::get(srcDims, castDstType.getElementType()); 2394 2395 auto newCastSrcOp = rewriter.create<vector::BitCastOp>( 2396 bitcastOp.getLoc(), newCastSrcType, insertOp.getSource()); 2397 2398 SmallVector<int64_t, 4> dstDims = 2399 llvm::to_vector<4>(insertOp.getDestVectorType().getShape()); 2400 dstDims.back() = dstDims.back() / shrinkRatio; 2401 VectorType newCastDstType = 2402 VectorType::get(dstDims, castDstType.getElementType()); 2403 2404 auto newCastDstOp = rewriter.create<vector::BitCastOp>( 2405 bitcastOp.getLoc(), newCastDstType, insertOp.getDest()); 2406 2407 rewriter.replaceOpWithNewOp<vector::InsertStridedSliceOp>( 2408 bitcastOp, bitcastOp.getType(), newCastSrcOp, newCastDstOp, newOffsets, 2409 insertOp.getStrides()); 2410 2411 return success(); 2412 } 2413 }; 2414 2415 // Helper that returns a vector comparison that constructs a mask: 2416 // mask = [0,1,..,n-1] + [o,o,..,o] < [b,b,..,b] 2417 // 2418 // If `dim == 0` then the result will be a 0-D vector. 2419 // 2420 // NOTE: The LLVM::GetActiveLaneMaskOp intrinsic would provide an alternative, 2421 // much more compact, IR for this operation, but LLVM eventually 2422 // generates more elaborate instructions for this intrinsic since it 2423 // is very conservative on the boundary conditions. 2424 static Value buildVectorComparison(PatternRewriter &rewriter, Operation *op, 2425 bool force32BitVectorIndices, int64_t dim, 2426 Value b, Value *off = nullptr) { 2427 auto loc = op->getLoc(); 2428 // If we can assume all indices fit in 32-bit, we perform the vector 2429 // comparison in 32-bit to get a higher degree of SIMD parallelism. 2430 // Otherwise we perform the vector comparison using 64-bit indices. 2431 Type idxType = 2432 force32BitVectorIndices ? rewriter.getI32Type() : rewriter.getI64Type(); 2433 DenseIntElementsAttr indicesAttr; 2434 if (dim == 0 && force32BitVectorIndices) { 2435 indicesAttr = DenseIntElementsAttr::get( 2436 VectorType::get(ArrayRef<int64_t>{}, idxType), ArrayRef<int32_t>{0}); 2437 } else if (dim == 0) { 2438 indicesAttr = DenseIntElementsAttr::get( 2439 VectorType::get(ArrayRef<int64_t>{}, idxType), ArrayRef<int64_t>{0}); 2440 } else if (force32BitVectorIndices) { 2441 indicesAttr = rewriter.getI32VectorAttr( 2442 llvm::to_vector<4>(llvm::seq<int32_t>(0, dim))); 2443 } else { 2444 indicesAttr = rewriter.getI64VectorAttr( 2445 llvm::to_vector<4>(llvm::seq<int64_t>(0, dim))); 2446 } 2447 Value indices = rewriter.create<arith::ConstantOp>(loc, indicesAttr); 2448 // Add in an offset if requested. 2449 if (off) { 2450 Value o = getValueOrCreateCastToIndexLike(rewriter, loc, idxType, *off); 2451 Value ov = rewriter.create<vector::SplatOp>(loc, indices.getType(), o); 2452 indices = rewriter.create<arith::AddIOp>(loc, ov, indices); 2453 } 2454 // Construct the vector comparison. 2455 Value bound = getValueOrCreateCastToIndexLike(rewriter, loc, idxType, b); 2456 Value bounds = 2457 rewriter.create<vector::SplatOp>(loc, indices.getType(), bound); 2458 return rewriter.create<arith::CmpIOp>(loc, arith::CmpIPredicate::slt, indices, 2459 bounds); 2460 } 2461 2462 template <typename ConcreteOp> 2463 struct MaterializeTransferMask : public OpRewritePattern<ConcreteOp> { 2464 public: 2465 explicit MaterializeTransferMask(MLIRContext *context, bool enableIndexOpt) 2466 : mlir::OpRewritePattern<ConcreteOp>(context), 2467 force32BitVectorIndices(enableIndexOpt) {} 2468 2469 LogicalResult matchAndRewrite(ConcreteOp xferOp, 2470 PatternRewriter &rewriter) const override { 2471 if (!xferOp.hasOutOfBoundsDim()) 2472 return failure(); 2473 2474 if (xferOp.getVectorType().getRank() > 1 || 2475 llvm::size(xferOp.getIndices()) == 0) 2476 return failure(); 2477 2478 Location loc = xferOp->getLoc(); 2479 VectorType vtp = xferOp.getVectorType(); 2480 2481 // Create the in-bounds mask with all elements between [0 .. dim - offset) 2482 // set and [dim - offset .. vector_length) unset. 2483 // 2484 // TODO: when the leaf transfer rank is k > 1, we need the last `k` 2485 // dimensions here. 2486 unsigned lastIndex = llvm::size(xferOp.getIndices()) - 1; 2487 Value off = xferOp.getIndices()[lastIndex]; 2488 Value dim = 2489 vector::createOrFoldDimOp(rewriter, loc, xferOp.getSource(), lastIndex); 2490 Value b = rewriter.create<arith::SubIOp>(loc, dim.getType(), dim, off); 2491 Value mask = rewriter.create<vector::CreateMaskOp>( 2492 loc, 2493 VectorType::get(vtp.getShape(), rewriter.getI1Type(), 2494 vtp.getNumScalableDims()), 2495 b); 2496 if (xferOp.getMask()) { 2497 // Intersect the in-bounds with the mask specified as an op parameter. 2498 mask = rewriter.create<arith::AndIOp>(loc, mask, xferOp.getMask()); 2499 } 2500 2501 rewriter.updateRootInPlace(xferOp, [&]() { 2502 xferOp.getMaskMutable().assign(mask); 2503 xferOp.setInBoundsAttr(rewriter.getBoolArrayAttr({true})); 2504 }); 2505 2506 return success(); 2507 } 2508 2509 private: 2510 const bool force32BitVectorIndices; 2511 }; 2512 2513 /// Conversion pattern for a `vector.create_mask` (0-D and 1-D only). 2514 class VectorCreateMaskOpConversion 2515 : public OpRewritePattern<vector::CreateMaskOp> { 2516 public: 2517 explicit VectorCreateMaskOpConversion(MLIRContext *context, 2518 bool enableIndexOpt) 2519 : mlir::OpRewritePattern<vector::CreateMaskOp>(context), 2520 force32BitVectorIndices(enableIndexOpt) {} 2521 2522 LogicalResult matchAndRewrite(vector::CreateMaskOp op, 2523 PatternRewriter &rewriter) const override { 2524 auto dstType = op.getType(); 2525 if (dstType.cast<VectorType>().isScalable()) 2526 return failure(); 2527 int64_t rank = dstType.getRank(); 2528 if (rank > 1) 2529 return failure(); 2530 rewriter.replaceOp( 2531 op, buildVectorComparison(rewriter, op, force32BitVectorIndices, 2532 rank == 0 ? 0 : dstType.getDimSize(0), 2533 op.getOperand(0))); 2534 return success(); 2535 } 2536 2537 private: 2538 const bool force32BitVectorIndices; 2539 }; 2540 2541 // Drop inner most contiguous unit dimensions from transfer_read operand. 2542 class DropInnerMostUnitDims : public OpRewritePattern<vector::TransferReadOp> { 2543 using OpRewritePattern<vector::TransferReadOp>::OpRewritePattern; 2544 2545 LogicalResult matchAndRewrite(vector::TransferReadOp readOp, 2546 PatternRewriter &rewriter) const override { 2547 // TODO: support 0-d corner case. 2548 if (readOp.getTransferRank() == 0) 2549 return failure(); 2550 2551 // TODO: support mask. 2552 if (readOp.getMask()) 2553 return failure(); 2554 2555 auto srcType = readOp.getSource().getType().dyn_cast<MemRefType>(); 2556 if (!srcType || !srcType.hasStaticShape()) 2557 return failure(); 2558 2559 if (!readOp.getPermutationMap().isMinorIdentity()) 2560 return failure(); 2561 2562 auto targetType = readOp.getVectorType(); 2563 if (targetType.getRank() <= 1) 2564 return failure(); 2565 2566 SmallVector<int64_t> srcStrides; 2567 int64_t srcOffset; 2568 if (failed(getStridesAndOffset(srcType, srcStrides, srcOffset))) 2569 return failure(); 2570 2571 size_t dimsToDrop = 0; 2572 for (size_t i = 1; i < srcStrides.size(); ++i) { 2573 int dim = srcType.getRank() - i - 1; 2574 if (srcStrides[dim] == 1) { 2575 dimsToDrop++; 2576 } else { 2577 break; 2578 } 2579 } 2580 if (dimsToDrop == 0) 2581 return failure(); 2582 2583 auto resultTargetVecType = 2584 VectorType::get(targetType.getShape().drop_back(dimsToDrop), 2585 targetType.getElementType()); 2586 2587 MemRefType resultMemrefType; 2588 if (srcType.getLayout().getAffineMap().isIdentity()) { 2589 resultMemrefType = MemRefType::get( 2590 srcType.getShape().drop_back(dimsToDrop), srcType.getElementType(), 2591 {}, srcType.getMemorySpaceAsInt()); 2592 } else { 2593 AffineMap map = srcType.getLayout().getAffineMap(); 2594 int numSymbols = map.getNumSymbols(); 2595 for (size_t i = 0; i < dimsToDrop; ++i) { 2596 int dim = srcType.getRank() - i - 1; 2597 map = map.replace(rewriter.getAffineDimExpr(dim), 2598 rewriter.getAffineConstantExpr(0), 2599 map.getNumDims() - 1, numSymbols); 2600 } 2601 resultMemrefType = MemRefType::get( 2602 srcType.getShape().drop_back(dimsToDrop), srcType.getElementType(), 2603 map, srcType.getMemorySpaceAsInt()); 2604 } 2605 2606 auto loc = readOp.getLoc(); 2607 SmallVector<int64_t> offsets(srcType.getRank(), 0); 2608 SmallVector<int64_t> strides(srcType.getRank(), 1); 2609 2610 ArrayAttr inBoundsAttr = 2611 readOp.getInBounds() 2612 ? rewriter.getArrayAttr( 2613 readOp.getInBoundsAttr().getValue().drop_back(dimsToDrop)) 2614 : ArrayAttr(); 2615 Value rankedReducedView = rewriter.create<memref::SubViewOp>( 2616 loc, resultMemrefType, readOp.getSource(), offsets, srcType.getShape(), 2617 strides); 2618 auto permMap = getTransferMinorIdentityMap( 2619 rankedReducedView.getType().cast<ShapedType>(), resultTargetVecType); 2620 Value result = rewriter.create<vector::TransferReadOp>( 2621 loc, resultTargetVecType, rankedReducedView, 2622 readOp.getIndices().drop_back(dimsToDrop), AffineMapAttr::get(permMap), 2623 readOp.getPadding(), 2624 // TODO: support mask. 2625 /*mask=*/Value(), inBoundsAttr); 2626 rewriter.replaceOpWithNewOp<vector::ShapeCastOp>(readOp, targetType, 2627 result); 2628 return success(); 2629 } 2630 }; 2631 2632 namespace { 2633 2634 /// This function checks to see if the vector combining kind 2635 /// is consistent with the integer or float element type. 2636 static bool isValidKind(bool isInt, vector::CombiningKind kind) { 2637 using vector::CombiningKind; 2638 enum class KindType { FLOAT, INT, INVALID }; 2639 KindType type{KindType::INVALID}; 2640 switch (kind) { 2641 case CombiningKind::MINF: 2642 case CombiningKind::MAXF: 2643 type = KindType::FLOAT; 2644 break; 2645 case CombiningKind::MINUI: 2646 case CombiningKind::MINSI: 2647 case CombiningKind::MAXUI: 2648 case CombiningKind::MAXSI: 2649 case CombiningKind::AND: 2650 case CombiningKind::OR: 2651 case CombiningKind::XOR: 2652 type = KindType::INT; 2653 break; 2654 case CombiningKind::ADD: 2655 case CombiningKind::MUL: 2656 type = isInt ? KindType::INT : KindType::FLOAT; 2657 break; 2658 } 2659 bool isValidIntKind = (type == KindType::INT) && isInt; 2660 bool isValidFloatKind = (type == KindType::FLOAT) && (!isInt); 2661 return (isValidIntKind || isValidFloatKind); 2662 } 2663 2664 /// This function constructs the appropriate integer or float 2665 /// operation given the vector combining kind and operands. The 2666 /// supported int operations are : add, mul, min (signed/unsigned), 2667 /// max(signed/unsigned), and, or, xor. The supported float 2668 /// operations are : add, mul, min and max. 2669 static Value genOperator(Location loc, Value x, Value y, 2670 vector::CombiningKind kind, 2671 PatternRewriter &rewriter) { 2672 using vector::CombiningKind; 2673 2674 auto elType = x.getType().cast<VectorType>().getElementType(); 2675 bool isInt = elType.isIntOrIndex(); 2676 2677 Value combinedResult{nullptr}; 2678 switch (kind) { 2679 case CombiningKind::ADD: 2680 if (isInt) 2681 combinedResult = rewriter.create<arith::AddIOp>(loc, x, y); 2682 else 2683 combinedResult = rewriter.create<arith::AddFOp>(loc, x, y); 2684 break; 2685 case CombiningKind::MUL: 2686 if (isInt) 2687 combinedResult = rewriter.create<arith::MulIOp>(loc, x, y); 2688 else 2689 combinedResult = rewriter.create<arith::MulFOp>(loc, x, y); 2690 break; 2691 case CombiningKind::MINUI: 2692 combinedResult = rewriter.create<arith::MinUIOp>(loc, x, y); 2693 break; 2694 case CombiningKind::MINSI: 2695 combinedResult = rewriter.create<arith::MinSIOp>(loc, x, y); 2696 break; 2697 case CombiningKind::MAXUI: 2698 combinedResult = rewriter.create<arith::MaxUIOp>(loc, x, y); 2699 break; 2700 case CombiningKind::MAXSI: 2701 combinedResult = rewriter.create<arith::MaxSIOp>(loc, x, y); 2702 break; 2703 case CombiningKind::AND: 2704 combinedResult = rewriter.create<arith::AndIOp>(loc, x, y); 2705 break; 2706 case CombiningKind::OR: 2707 combinedResult = rewriter.create<arith::OrIOp>(loc, x, y); 2708 break; 2709 case CombiningKind::XOR: 2710 combinedResult = rewriter.create<arith::XOrIOp>(loc, x, y); 2711 break; 2712 case CombiningKind::MINF: 2713 combinedResult = rewriter.create<arith::MinFOp>(loc, x, y); 2714 break; 2715 case CombiningKind::MAXF: 2716 combinedResult = rewriter.create<arith::MaxFOp>(loc, x, y); 2717 break; 2718 } 2719 return combinedResult; 2720 } 2721 2722 /// Convert vector.scan op into arith ops and 2723 /// vector.insert_strided_slice/extract_strided_slice 2724 /// 2725 /// Ex: 2726 /// ``` 2727 /// %0:2 = vector.scan <add>, %arg0, %arg1 {inclusive = true, reduction_dim = 2728 /// 1} : 2729 /// (vector<2x3xi32>, vector<2xi32>) to (vector<2x3xi32>, vector<2xi32>) 2730 /// ``` 2731 /// Gets converted to: 2732 /// ``` 2733 /// %cst = arith.constant dense<0> : vector<2x3xi32> 2734 /// %0 = vector.extract_strided_slice %arg0 {offsets = [0, 0], sizes = [2, 1], 2735 /// strides = [1, 1]} : vector<2x3xi32> to vector<2x1xi32> %1 = 2736 /// vector.insert_strided_slice %0, %cst {offsets = [0, 0], strides = [1, 1]} 2737 /// : vector<2x1xi32> into vector<2x3xi32> %2 = vector.extract_strided_slice 2738 /// %arg0 {offsets = [0, 1], sizes = [2, 1], strides = [1, 1]} : 2739 /// vector<2x3xi32> to vector<2x1xi32> %3 = arith.muli %0, %2 : 2740 /// vector<2x1xi32> %4 = vector.insert_strided_slice %3, %1 {offsets = [0, 1], 2741 /// strides = [1, 1]} : vector<2x1xi32> into vector<2x3xi32> %5 = 2742 /// vector.extract_strided_slice %arg0 {offsets = [0, 2], sizes = [2, 1], 2743 /// strides = [1, 1]} : vector<2x3xi32> to vector<2x1xi32> %6 = arith.muli %3, 2744 /// %5 : vector<2x1xi32> %7 = vector.insert_strided_slice %6, %4 {offsets = 2745 /// [0, 2], strides = [1, 1]} : vector<2x1xi32> into vector<2x3xi32> %8 = 2746 /// vector.shape_cast %6 : vector<2x1xi32> to vector<2xi32> return %7, %8 : 2747 /// vector<2x3xi32>, vector<2xi32> 2748 /// ``` 2749 struct ScanToArithOps : public OpRewritePattern<vector::ScanOp> { 2750 using OpRewritePattern<vector::ScanOp>::OpRewritePattern; 2751 2752 LogicalResult matchAndRewrite(vector::ScanOp scanOp, 2753 PatternRewriter &rewriter) const override { 2754 auto loc = scanOp.getLoc(); 2755 VectorType destType = scanOp.getDestType(); 2756 ArrayRef<int64_t> destShape = destType.getShape(); 2757 auto elType = destType.getElementType(); 2758 bool isInt = elType.isIntOrIndex(); 2759 if (!isValidKind(isInt, scanOp.getKind())) 2760 return failure(); 2761 2762 VectorType resType = VectorType::get(destShape, elType); 2763 Value result = rewriter.create<arith::ConstantOp>( 2764 loc, resType, rewriter.getZeroAttr(resType)); 2765 int64_t reductionDim = scanOp.getReductionDim(); 2766 bool inclusive = scanOp.getInclusive(); 2767 int64_t destRank = destType.getRank(); 2768 VectorType initialValueType = scanOp.getInitialValueType(); 2769 int64_t initialValueRank = initialValueType.getRank(); 2770 2771 SmallVector<int64_t> reductionShape(destShape.begin(), destShape.end()); 2772 reductionShape[reductionDim] = 1; 2773 VectorType reductionType = VectorType::get(reductionShape, elType); 2774 SmallVector<int64_t> offsets(destRank, 0); 2775 SmallVector<int64_t> strides(destRank, 1); 2776 SmallVector<int64_t> sizes(destShape.begin(), destShape.end()); 2777 sizes[reductionDim] = 1; 2778 ArrayAttr scanSizes = rewriter.getI64ArrayAttr(sizes); 2779 ArrayAttr scanStrides = rewriter.getI64ArrayAttr(strides); 2780 2781 Value lastOutput, lastInput; 2782 for (int i = 0; i < destShape[reductionDim]; i++) { 2783 offsets[reductionDim] = i; 2784 ArrayAttr scanOffsets = rewriter.getI64ArrayAttr(offsets); 2785 Value input = rewriter.create<vector::ExtractStridedSliceOp>( 2786 loc, reductionType, scanOp.getSource(), scanOffsets, scanSizes, 2787 scanStrides); 2788 Value output; 2789 if (i == 0) { 2790 if (inclusive) { 2791 output = input; 2792 } else { 2793 if (initialValueRank == 0) { 2794 // ShapeCastOp cannot handle 0-D vectors 2795 output = rewriter.create<vector::BroadcastOp>( 2796 loc, input.getType(), scanOp.getInitialValue()); 2797 } else { 2798 output = rewriter.create<vector::ShapeCastOp>( 2799 loc, input.getType(), scanOp.getInitialValue()); 2800 } 2801 } 2802 } else { 2803 Value y = inclusive ? input : lastInput; 2804 output = genOperator(loc, lastOutput, y, scanOp.getKind(), rewriter); 2805 assert(output != nullptr); 2806 } 2807 result = rewriter.create<vector::InsertStridedSliceOp>( 2808 loc, output, result, offsets, strides); 2809 lastOutput = output; 2810 lastInput = input; 2811 } 2812 2813 Value reduction; 2814 if (initialValueRank == 0) { 2815 Value v = rewriter.create<vector::ExtractOp>(loc, lastOutput, 0); 2816 reduction = 2817 rewriter.create<vector::BroadcastOp>(loc, initialValueType, v); 2818 } else { 2819 reduction = rewriter.create<vector::ShapeCastOp>(loc, initialValueType, 2820 lastOutput); 2821 } 2822 2823 rewriter.replaceOp(scanOp, {result, reduction}); 2824 return success(); 2825 } 2826 }; 2827 2828 } // namespace 2829 2830 void mlir::vector::populateVectorMaskMaterializationPatterns( 2831 RewritePatternSet &patterns, bool force32BitVectorIndices) { 2832 patterns.add<VectorCreateMaskOpConversion, 2833 MaterializeTransferMask<vector::TransferReadOp>, 2834 MaterializeTransferMask<vector::TransferWriteOp>>( 2835 patterns.getContext(), force32BitVectorIndices); 2836 } 2837 2838 void mlir::vector::populateShapeCastFoldingPatterns( 2839 RewritePatternSet &patterns) { 2840 patterns.add<ShapeCastOpFolder>(patterns.getContext()); 2841 } 2842 2843 void mlir::vector::populateBubbleVectorBitCastOpPatterns( 2844 RewritePatternSet &patterns) { 2845 patterns.add<BubbleDownVectorBitCastForExtract, 2846 BubbleDownBitCastForStridedSliceExtract, 2847 BubbleUpBitCastForStridedSliceInsert>(patterns.getContext()); 2848 } 2849 2850 void mlir::vector::populateVectorBroadcastLoweringPatterns( 2851 RewritePatternSet &patterns) { 2852 patterns.add<BroadcastOpLowering>(patterns.getContext()); 2853 } 2854 2855 void mlir::vector::populateVectorMaskOpLoweringPatterns( 2856 RewritePatternSet &patterns) { 2857 patterns.add<CreateMaskOpLowering, ConstantMaskOpLowering>( 2858 patterns.getContext()); 2859 } 2860 2861 void mlir::vector::populateVectorShapeCastLoweringPatterns( 2862 RewritePatternSet &patterns) { 2863 patterns.add<ShapeCastOp2DDownCastRewritePattern, 2864 ShapeCastOp2DUpCastRewritePattern, ShapeCastOpRewritePattern>( 2865 patterns.getContext()); 2866 } 2867 2868 void mlir::vector::populateVectorContractLoweringPatterns( 2869 RewritePatternSet &patterns, VectorTransformsOptions options) { 2870 patterns.add<OuterProductOpLowering>(patterns.getContext()); 2871 patterns.add<ContractionOpLowering, ContractionOpToMatmulOpLowering, 2872 ContractionOpToOuterProductOpLowering>(options, 2873 patterns.getContext()); 2874 } 2875 2876 void mlir::vector::populateVectorTransposeLoweringPatterns( 2877 RewritePatternSet &patterns, VectorTransformsOptions options) { 2878 patterns.add<TransposeOpLowering, TransposeOp2DToShuffleLowering>( 2879 options, patterns.getContext()); 2880 } 2881 2882 void mlir::vector::populateVectorReductionToContractPatterns( 2883 RewritePatternSet &patterns) { 2884 patterns.add<MultiReduceToContract, CombineContractBroadcast, 2885 CombineContractTranspose, ReorderCastOpsOnBroadcast, 2886 ReorderElementwiseOpsOnTranspose>(patterns.getContext()); 2887 } 2888 2889 void mlir::vector:: 2890 populateVectorTransferCollapseInnerMostContiguousDimsPatterns( 2891 RewritePatternSet &patterns) { 2892 patterns.add<DropInnerMostUnitDims>(patterns.getContext()); 2893 } 2894 2895 void mlir::vector::populateVectorTransferLoweringPatterns( 2896 RewritePatternSet &patterns, llvm::Optional<unsigned> maxTransferRank) { 2897 patterns.add<TransferReadToVectorLoadLowering, 2898 TransferWriteToVectorStoreLowering>(patterns.getContext(), 2899 maxTransferRank); 2900 patterns 2901 .add<VectorLoadToMemrefLoadLowering, VectorStoreToMemrefStoreLowering>( 2902 patterns.getContext()); 2903 } 2904 2905 void mlir::vector::populateVectorScanLoweringPatterns( 2906 RewritePatternSet &patterns) { 2907 patterns.add<ScanToArithOps>(patterns.getContext()); 2908 } 2909