1 //===- VectorToLLVM.cpp - Conversion from Vector to the LLVM dialect ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h" 10 11 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h" 12 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h" 13 #include "mlir/Dialect/LLVMIR/FunctionCallUtils.h" 14 #include "mlir/Dialect/LLVMIR/LLVMDialect.h" 15 #include "mlir/Dialect/StandardOps/IR/Ops.h" 16 #include "mlir/Dialect/Vector/VectorOps.h" 17 #include "mlir/IR/BuiltinTypes.h" 18 #include "mlir/Target/LLVMIR/TypeTranslation.h" 19 #include "mlir/Transforms/DialectConversion.h" 20 21 using namespace mlir; 22 using namespace mlir::vector; 23 24 // Helper to reduce vector type by one rank at front. 25 static VectorType reducedVectorTypeFront(VectorType tp) { 26 assert((tp.getRank() > 1) && "unlowerable vector type"); 27 return VectorType::get(tp.getShape().drop_front(), tp.getElementType()); 28 } 29 30 // Helper to reduce vector type by *all* but one rank at back. 31 static VectorType reducedVectorTypeBack(VectorType tp) { 32 assert((tp.getRank() > 1) && "unlowerable vector type"); 33 return VectorType::get(tp.getShape().take_back(), tp.getElementType()); 34 } 35 36 // Helper that picks the proper sequence for inserting. 37 static Value insertOne(ConversionPatternRewriter &rewriter, 38 LLVMTypeConverter &typeConverter, Location loc, 39 Value val1, Value val2, Type llvmType, int64_t rank, 40 int64_t pos) { 41 if (rank == 1) { 42 auto idxType = rewriter.getIndexType(); 43 auto constant = rewriter.create<LLVM::ConstantOp>( 44 loc, typeConverter.convertType(idxType), 45 rewriter.getIntegerAttr(idxType, pos)); 46 return rewriter.create<LLVM::InsertElementOp>(loc, llvmType, val1, val2, 47 constant); 48 } 49 return rewriter.create<LLVM::InsertValueOp>(loc, llvmType, val1, val2, 50 rewriter.getI64ArrayAttr(pos)); 51 } 52 53 // Helper that picks the proper sequence for inserting. 54 static Value insertOne(PatternRewriter &rewriter, Location loc, Value from, 55 Value into, int64_t offset) { 56 auto vectorType = into.getType().cast<VectorType>(); 57 if (vectorType.getRank() > 1) 58 return rewriter.create<InsertOp>(loc, from, into, offset); 59 return rewriter.create<vector::InsertElementOp>( 60 loc, vectorType, from, into, 61 rewriter.create<ConstantIndexOp>(loc, offset)); 62 } 63 64 // Helper that picks the proper sequence for extracting. 65 static Value extractOne(ConversionPatternRewriter &rewriter, 66 LLVMTypeConverter &typeConverter, Location loc, 67 Value val, Type llvmType, int64_t rank, int64_t pos) { 68 if (rank == 1) { 69 auto idxType = rewriter.getIndexType(); 70 auto constant = rewriter.create<LLVM::ConstantOp>( 71 loc, typeConverter.convertType(idxType), 72 rewriter.getIntegerAttr(idxType, pos)); 73 return rewriter.create<LLVM::ExtractElementOp>(loc, llvmType, val, 74 constant); 75 } 76 return rewriter.create<LLVM::ExtractValueOp>(loc, llvmType, val, 77 rewriter.getI64ArrayAttr(pos)); 78 } 79 80 // Helper that picks the proper sequence for extracting. 81 static Value extractOne(PatternRewriter &rewriter, Location loc, Value vector, 82 int64_t offset) { 83 auto vectorType = vector.getType().cast<VectorType>(); 84 if (vectorType.getRank() > 1) 85 return rewriter.create<ExtractOp>(loc, vector, offset); 86 return rewriter.create<vector::ExtractElementOp>( 87 loc, vectorType.getElementType(), vector, 88 rewriter.create<ConstantIndexOp>(loc, offset)); 89 } 90 91 // Helper that returns a subset of `arrayAttr` as a vector of int64_t. 92 // TODO: Better support for attribute subtype forwarding + slicing. 93 static SmallVector<int64_t, 4> getI64SubArray(ArrayAttr arrayAttr, 94 unsigned dropFront = 0, 95 unsigned dropBack = 0) { 96 assert(arrayAttr.size() > dropFront + dropBack && "Out of bounds"); 97 auto range = arrayAttr.getAsRange<IntegerAttr>(); 98 SmallVector<int64_t, 4> res; 99 res.reserve(arrayAttr.size() - dropFront - dropBack); 100 for (auto it = range.begin() + dropFront, eit = range.end() - dropBack; 101 it != eit; ++it) 102 res.push_back((*it).getValue().getSExtValue()); 103 return res; 104 } 105 106 static Value createCastToIndexLike(ConversionPatternRewriter &rewriter, 107 Location loc, Type targetType, Value value) { 108 if (targetType == value.getType()) 109 return value; 110 111 bool targetIsIndex = targetType.isIndex(); 112 bool valueIsIndex = value.getType().isIndex(); 113 if (targetIsIndex ^ valueIsIndex) 114 return rewriter.create<IndexCastOp>(loc, targetType, value); 115 116 auto targetIntegerType = targetType.dyn_cast<IntegerType>(); 117 auto valueIntegerType = value.getType().dyn_cast<IntegerType>(); 118 assert(targetIntegerType && valueIntegerType && 119 "unexpected cast between types other than integers and index"); 120 assert(targetIntegerType.getSignedness() == valueIntegerType.getSignedness()); 121 122 if (targetIntegerType.getWidth() > valueIntegerType.getWidth()) 123 return rewriter.create<SignExtendIOp>(loc, targetIntegerType, value); 124 return rewriter.create<TruncateIOp>(loc, targetIntegerType, value); 125 } 126 127 // Helper that returns a vector comparison that constructs a mask: 128 // mask = [0,1,..,n-1] + [o,o,..,o] < [b,b,..,b] 129 // 130 // NOTE: The LLVM::GetActiveLaneMaskOp intrinsic would provide an alternative, 131 // much more compact, IR for this operation, but LLVM eventually 132 // generates more elaborate instructions for this intrinsic since it 133 // is very conservative on the boundary conditions. 134 static Value buildVectorComparison(ConversionPatternRewriter &rewriter, 135 Operation *op, bool enableIndexOptimizations, 136 int64_t dim, Value b, Value *off = nullptr) { 137 auto loc = op->getLoc(); 138 // If we can assume all indices fit in 32-bit, we perform the vector 139 // comparison in 32-bit to get a higher degree of SIMD parallelism. 140 // Otherwise we perform the vector comparison using 64-bit indices. 141 Value indices; 142 Type idxType; 143 if (enableIndexOptimizations) { 144 indices = rewriter.create<ConstantOp>( 145 loc, rewriter.getI32VectorAttr( 146 llvm::to_vector<4>(llvm::seq<int32_t>(0, dim)))); 147 idxType = rewriter.getI32Type(); 148 } else { 149 indices = rewriter.create<ConstantOp>( 150 loc, rewriter.getI64VectorAttr( 151 llvm::to_vector<4>(llvm::seq<int64_t>(0, dim)))); 152 idxType = rewriter.getI64Type(); 153 } 154 // Add in an offset if requested. 155 if (off) { 156 Value o = createCastToIndexLike(rewriter, loc, idxType, *off); 157 Value ov = rewriter.create<SplatOp>(loc, indices.getType(), o); 158 indices = rewriter.create<AddIOp>(loc, ov, indices); 159 } 160 // Construct the vector comparison. 161 Value bound = createCastToIndexLike(rewriter, loc, idxType, b); 162 Value bounds = rewriter.create<SplatOp>(loc, indices.getType(), bound); 163 return rewriter.create<CmpIOp>(loc, CmpIPredicate::slt, indices, bounds); 164 } 165 166 // Helper that returns data layout alignment of a memref. 167 LogicalResult getMemRefAlignment(LLVMTypeConverter &typeConverter, 168 MemRefType memrefType, unsigned &align) { 169 Type elementTy = typeConverter.convertType(memrefType.getElementType()); 170 if (!elementTy) 171 return failure(); 172 173 // TODO: this should use the MLIR data layout when it becomes available and 174 // stop depending on translation. 175 llvm::LLVMContext llvmContext; 176 align = LLVM::TypeToLLVMIRTranslator(llvmContext) 177 .getPreferredAlignment(elementTy, typeConverter.getDataLayout()); 178 return success(); 179 } 180 181 // Helper that returns the base address of a memref. 182 static LogicalResult getBase(ConversionPatternRewriter &rewriter, Location loc, 183 Value memref, MemRefType memRefType, Value &base) { 184 // Inspect stride and offset structure. 185 // 186 // TODO: flat memory only for now, generalize 187 // 188 int64_t offset; 189 SmallVector<int64_t, 4> strides; 190 auto successStrides = getStridesAndOffset(memRefType, strides, offset); 191 if (failed(successStrides) || strides.size() != 1 || strides[0] != 1 || 192 offset != 0 || memRefType.getMemorySpace() != 0) 193 return failure(); 194 base = MemRefDescriptor(memref).alignedPtr(rewriter, loc); 195 return success(); 196 } 197 198 // Helper that returns vector of pointers given a memref base with index vector. 199 static LogicalResult getIndexedPtrs(ConversionPatternRewriter &rewriter, 200 Location loc, Value memref, Value indices, 201 MemRefType memRefType, VectorType vType, 202 Type iType, Value &ptrs) { 203 Value base; 204 if (failed(getBase(rewriter, loc, memref, memRefType, base))) 205 return failure(); 206 auto pType = MemRefDescriptor(memref).getElementPtrType(); 207 auto ptrsType = LLVM::getFixedVectorType(pType, vType.getDimSize(0)); 208 ptrs = rewriter.create<LLVM::GEPOp>(loc, ptrsType, base, indices); 209 return success(); 210 } 211 212 // Casts a strided element pointer to a vector pointer. The vector pointer 213 // will be in the same address space as the incoming memref type. 214 static Value castDataPtr(ConversionPatternRewriter &rewriter, Location loc, 215 Value ptr, MemRefType memRefType, Type vt) { 216 auto pType = LLVM::LLVMPointerType::get(vt, memRefType.getMemorySpace()); 217 return rewriter.create<LLVM::BitcastOp>(loc, pType, ptr); 218 } 219 220 static LogicalResult 221 replaceTransferOpWithLoadOrStore(ConversionPatternRewriter &rewriter, 222 LLVMTypeConverter &typeConverter, Location loc, 223 TransferReadOp xferOp, 224 ArrayRef<Value> operands, Value dataPtr) { 225 unsigned align; 226 if (failed(getMemRefAlignment( 227 typeConverter, xferOp.getShapedType().cast<MemRefType>(), align))) 228 return failure(); 229 rewriter.replaceOpWithNewOp<LLVM::LoadOp>(xferOp, dataPtr, align); 230 return success(); 231 } 232 233 static LogicalResult 234 replaceTransferOpWithMasked(ConversionPatternRewriter &rewriter, 235 LLVMTypeConverter &typeConverter, Location loc, 236 TransferReadOp xferOp, ArrayRef<Value> operands, 237 Value dataPtr, Value mask) { 238 VectorType fillType = xferOp.getVectorType(); 239 Value fill = rewriter.create<SplatOp>(loc, fillType, xferOp.padding()); 240 241 Type vecTy = typeConverter.convertType(xferOp.getVectorType()); 242 if (!vecTy) 243 return failure(); 244 245 unsigned align; 246 if (failed(getMemRefAlignment( 247 typeConverter, xferOp.getShapedType().cast<MemRefType>(), align))) 248 return failure(); 249 250 rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>( 251 xferOp, vecTy, dataPtr, mask, ValueRange{fill}, 252 rewriter.getI32IntegerAttr(align)); 253 return success(); 254 } 255 256 static LogicalResult 257 replaceTransferOpWithLoadOrStore(ConversionPatternRewriter &rewriter, 258 LLVMTypeConverter &typeConverter, Location loc, 259 TransferWriteOp xferOp, 260 ArrayRef<Value> operands, Value dataPtr) { 261 unsigned align; 262 if (failed(getMemRefAlignment( 263 typeConverter, xferOp.getShapedType().cast<MemRefType>(), align))) 264 return failure(); 265 auto adaptor = TransferWriteOpAdaptor(operands); 266 rewriter.replaceOpWithNewOp<LLVM::StoreOp>(xferOp, adaptor.vector(), dataPtr, 267 align); 268 return success(); 269 } 270 271 static LogicalResult 272 replaceTransferOpWithMasked(ConversionPatternRewriter &rewriter, 273 LLVMTypeConverter &typeConverter, Location loc, 274 TransferWriteOp xferOp, ArrayRef<Value> operands, 275 Value dataPtr, Value mask) { 276 unsigned align; 277 if (failed(getMemRefAlignment( 278 typeConverter, xferOp.getShapedType().cast<MemRefType>(), align))) 279 return failure(); 280 281 auto adaptor = TransferWriteOpAdaptor(operands); 282 rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>( 283 xferOp, adaptor.vector(), dataPtr, mask, 284 rewriter.getI32IntegerAttr(align)); 285 return success(); 286 } 287 288 static TransferReadOpAdaptor getTransferOpAdapter(TransferReadOp xferOp, 289 ArrayRef<Value> operands) { 290 return TransferReadOpAdaptor(operands); 291 } 292 293 static TransferWriteOpAdaptor getTransferOpAdapter(TransferWriteOp xferOp, 294 ArrayRef<Value> operands) { 295 return TransferWriteOpAdaptor(operands); 296 } 297 298 namespace { 299 300 /// Conversion pattern for a vector.bitcast. 301 class VectorBitCastOpConversion 302 : public ConvertOpToLLVMPattern<vector::BitCastOp> { 303 public: 304 using ConvertOpToLLVMPattern<vector::BitCastOp>::ConvertOpToLLVMPattern; 305 306 LogicalResult 307 matchAndRewrite(vector::BitCastOp bitCastOp, ArrayRef<Value> operands, 308 ConversionPatternRewriter &rewriter) const override { 309 // Only 1-D vectors can be lowered to LLVM. 310 VectorType resultTy = bitCastOp.getType(); 311 if (resultTy.getRank() != 1) 312 return failure(); 313 Type newResultTy = typeConverter->convertType(resultTy); 314 rewriter.replaceOpWithNewOp<LLVM::BitcastOp>(bitCastOp, newResultTy, 315 operands[0]); 316 return success(); 317 } 318 }; 319 320 /// Conversion pattern for a vector.matrix_multiply. 321 /// This is lowered directly to the proper llvm.intr.matrix.multiply. 322 class VectorMatmulOpConversion 323 : public ConvertOpToLLVMPattern<vector::MatmulOp> { 324 public: 325 using ConvertOpToLLVMPattern<vector::MatmulOp>::ConvertOpToLLVMPattern; 326 327 LogicalResult 328 matchAndRewrite(vector::MatmulOp matmulOp, ArrayRef<Value> operands, 329 ConversionPatternRewriter &rewriter) const override { 330 auto adaptor = vector::MatmulOpAdaptor(operands); 331 rewriter.replaceOpWithNewOp<LLVM::MatrixMultiplyOp>( 332 matmulOp, typeConverter->convertType(matmulOp.res().getType()), 333 adaptor.lhs(), adaptor.rhs(), matmulOp.lhs_rows(), 334 matmulOp.lhs_columns(), matmulOp.rhs_columns()); 335 return success(); 336 } 337 }; 338 339 /// Conversion pattern for a vector.flat_transpose. 340 /// This is lowered directly to the proper llvm.intr.matrix.transpose. 341 class VectorFlatTransposeOpConversion 342 : public ConvertOpToLLVMPattern<vector::FlatTransposeOp> { 343 public: 344 using ConvertOpToLLVMPattern<vector::FlatTransposeOp>::ConvertOpToLLVMPattern; 345 346 LogicalResult 347 matchAndRewrite(vector::FlatTransposeOp transOp, ArrayRef<Value> operands, 348 ConversionPatternRewriter &rewriter) const override { 349 auto adaptor = vector::FlatTransposeOpAdaptor(operands); 350 rewriter.replaceOpWithNewOp<LLVM::MatrixTransposeOp>( 351 transOp, typeConverter->convertType(transOp.res().getType()), 352 adaptor.matrix(), transOp.rows(), transOp.columns()); 353 return success(); 354 } 355 }; 356 357 /// Overloaded utility that replaces a vector.load, vector.store, 358 /// vector.maskedload and vector.maskedstore with their respective LLVM 359 /// couterparts. 360 static void replaceLoadOrStoreOp(vector::LoadOp loadOp, 361 vector::LoadOpAdaptor adaptor, 362 VectorType vectorTy, Value ptr, unsigned align, 363 ConversionPatternRewriter &rewriter) { 364 rewriter.replaceOpWithNewOp<LLVM::LoadOp>(loadOp, ptr, align); 365 } 366 367 static void replaceLoadOrStoreOp(vector::MaskedLoadOp loadOp, 368 vector::MaskedLoadOpAdaptor adaptor, 369 VectorType vectorTy, Value ptr, unsigned align, 370 ConversionPatternRewriter &rewriter) { 371 rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>( 372 loadOp, vectorTy, ptr, adaptor.mask(), adaptor.pass_thru(), align); 373 } 374 375 static void replaceLoadOrStoreOp(vector::StoreOp storeOp, 376 vector::StoreOpAdaptor adaptor, 377 VectorType vectorTy, Value ptr, unsigned align, 378 ConversionPatternRewriter &rewriter) { 379 rewriter.replaceOpWithNewOp<LLVM::StoreOp>(storeOp, adaptor.valueToStore(), 380 ptr, align); 381 } 382 383 static void replaceLoadOrStoreOp(vector::MaskedStoreOp storeOp, 384 vector::MaskedStoreOpAdaptor adaptor, 385 VectorType vectorTy, Value ptr, unsigned align, 386 ConversionPatternRewriter &rewriter) { 387 rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>( 388 storeOp, adaptor.valueToStore(), ptr, adaptor.mask(), align); 389 } 390 391 /// Conversion pattern for a vector.load, vector.store, vector.maskedload, and 392 /// vector.maskedstore. 393 template <class LoadOrStoreOp, class LoadOrStoreOpAdaptor> 394 class VectorLoadStoreConversion : public ConvertOpToLLVMPattern<LoadOrStoreOp> { 395 public: 396 using ConvertOpToLLVMPattern<LoadOrStoreOp>::ConvertOpToLLVMPattern; 397 398 LogicalResult 399 matchAndRewrite(LoadOrStoreOp loadOrStoreOp, ArrayRef<Value> operands, 400 ConversionPatternRewriter &rewriter) const override { 401 // Only 1-D vectors can be lowered to LLVM. 402 VectorType vectorTy = loadOrStoreOp.getVectorType(); 403 if (vectorTy.getRank() > 1) 404 return failure(); 405 406 auto loc = loadOrStoreOp->getLoc(); 407 auto adaptor = LoadOrStoreOpAdaptor(operands); 408 MemRefType memRefTy = loadOrStoreOp.getMemRefType(); 409 410 // Resolve alignment. 411 unsigned align; 412 if (failed(getMemRefAlignment(*this->getTypeConverter(), memRefTy, align))) 413 return failure(); 414 415 // Resolve address. 416 auto vtype = this->typeConverter->convertType(loadOrStoreOp.getVectorType()) 417 .template cast<VectorType>(); 418 Value dataPtr = this->getStridedElementPtr(loc, memRefTy, adaptor.base(), 419 adaptor.indices(), rewriter); 420 Value ptr = castDataPtr(rewriter, loc, dataPtr, memRefTy, vtype); 421 422 replaceLoadOrStoreOp(loadOrStoreOp, adaptor, vtype, ptr, align, rewriter); 423 return success(); 424 } 425 }; 426 427 /// Conversion pattern for a vector.gather. 428 class VectorGatherOpConversion 429 : public ConvertOpToLLVMPattern<vector::GatherOp> { 430 public: 431 using ConvertOpToLLVMPattern<vector::GatherOp>::ConvertOpToLLVMPattern; 432 433 LogicalResult 434 matchAndRewrite(vector::GatherOp gather, ArrayRef<Value> operands, 435 ConversionPatternRewriter &rewriter) const override { 436 auto loc = gather->getLoc(); 437 auto adaptor = vector::GatherOpAdaptor(operands); 438 439 // Resolve alignment. 440 unsigned align; 441 if (failed(getMemRefAlignment(*getTypeConverter(), gather.getMemRefType(), 442 align))) 443 return failure(); 444 445 // Get index ptrs. 446 VectorType vType = gather.getVectorType(); 447 Type iType = gather.getIndicesVectorType().getElementType(); 448 Value ptrs; 449 if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), adaptor.indices(), 450 gather.getMemRefType(), vType, iType, ptrs))) 451 return failure(); 452 453 // Replace with the gather intrinsic. 454 rewriter.replaceOpWithNewOp<LLVM::masked_gather>( 455 gather, typeConverter->convertType(vType), ptrs, adaptor.mask(), 456 adaptor.pass_thru(), rewriter.getI32IntegerAttr(align)); 457 return success(); 458 } 459 }; 460 461 /// Conversion pattern for a vector.scatter. 462 class VectorScatterOpConversion 463 : public ConvertOpToLLVMPattern<vector::ScatterOp> { 464 public: 465 using ConvertOpToLLVMPattern<vector::ScatterOp>::ConvertOpToLLVMPattern; 466 467 LogicalResult 468 matchAndRewrite(vector::ScatterOp scatter, ArrayRef<Value> operands, 469 ConversionPatternRewriter &rewriter) const override { 470 auto loc = scatter->getLoc(); 471 auto adaptor = vector::ScatterOpAdaptor(operands); 472 473 // Resolve alignment. 474 unsigned align; 475 if (failed(getMemRefAlignment(*getTypeConverter(), scatter.getMemRefType(), 476 align))) 477 return failure(); 478 479 // Get index ptrs. 480 VectorType vType = scatter.getVectorType(); 481 Type iType = scatter.getIndicesVectorType().getElementType(); 482 Value ptrs; 483 if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), adaptor.indices(), 484 scatter.getMemRefType(), vType, iType, ptrs))) 485 return failure(); 486 487 // Replace with the scatter intrinsic. 488 rewriter.replaceOpWithNewOp<LLVM::masked_scatter>( 489 scatter, adaptor.valueToStore(), ptrs, adaptor.mask(), 490 rewriter.getI32IntegerAttr(align)); 491 return success(); 492 } 493 }; 494 495 /// Conversion pattern for a vector.expandload. 496 class VectorExpandLoadOpConversion 497 : public ConvertOpToLLVMPattern<vector::ExpandLoadOp> { 498 public: 499 using ConvertOpToLLVMPattern<vector::ExpandLoadOp>::ConvertOpToLLVMPattern; 500 501 LogicalResult 502 matchAndRewrite(vector::ExpandLoadOp expand, ArrayRef<Value> operands, 503 ConversionPatternRewriter &rewriter) const override { 504 auto loc = expand->getLoc(); 505 auto adaptor = vector::ExpandLoadOpAdaptor(operands); 506 MemRefType memRefType = expand.getMemRefType(); 507 508 // Resolve address. 509 auto vtype = typeConverter->convertType(expand.getVectorType()); 510 Value ptr = this->getStridedElementPtr(loc, memRefType, adaptor.base(), 511 adaptor.indices(), rewriter); 512 513 rewriter.replaceOpWithNewOp<LLVM::masked_expandload>( 514 expand, vtype, ptr, adaptor.mask(), adaptor.pass_thru()); 515 return success(); 516 } 517 }; 518 519 /// Conversion pattern for a vector.compressstore. 520 class VectorCompressStoreOpConversion 521 : public ConvertOpToLLVMPattern<vector::CompressStoreOp> { 522 public: 523 using ConvertOpToLLVMPattern<vector::CompressStoreOp>::ConvertOpToLLVMPattern; 524 525 LogicalResult 526 matchAndRewrite(vector::CompressStoreOp compress, ArrayRef<Value> operands, 527 ConversionPatternRewriter &rewriter) const override { 528 auto loc = compress->getLoc(); 529 auto adaptor = vector::CompressStoreOpAdaptor(operands); 530 MemRefType memRefType = compress.getMemRefType(); 531 532 // Resolve address. 533 Value ptr = this->getStridedElementPtr(loc, memRefType, adaptor.base(), 534 adaptor.indices(), rewriter); 535 536 rewriter.replaceOpWithNewOp<LLVM::masked_compressstore>( 537 compress, adaptor.valueToStore(), ptr, adaptor.mask()); 538 return success(); 539 } 540 }; 541 542 /// Conversion pattern for all vector reductions. 543 class VectorReductionOpConversion 544 : public ConvertOpToLLVMPattern<vector::ReductionOp> { 545 public: 546 explicit VectorReductionOpConversion(LLVMTypeConverter &typeConv, 547 bool reassociateFPRed) 548 : ConvertOpToLLVMPattern<vector::ReductionOp>(typeConv), 549 reassociateFPReductions(reassociateFPRed) {} 550 551 LogicalResult 552 matchAndRewrite(vector::ReductionOp reductionOp, ArrayRef<Value> operands, 553 ConversionPatternRewriter &rewriter) const override { 554 auto kind = reductionOp.kind(); 555 Type eltType = reductionOp.dest().getType(); 556 Type llvmType = typeConverter->convertType(eltType); 557 if (eltType.isIntOrIndex()) { 558 // Integer reductions: add/mul/min/max/and/or/xor. 559 if (kind == "add") 560 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_add>( 561 reductionOp, llvmType, operands[0]); 562 else if (kind == "mul") 563 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_mul>( 564 reductionOp, llvmType, operands[0]); 565 else if (kind == "min" && 566 (eltType.isIndex() || eltType.isUnsignedInteger())) 567 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umin>( 568 reductionOp, llvmType, operands[0]); 569 else if (kind == "min") 570 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smin>( 571 reductionOp, llvmType, operands[0]); 572 else if (kind == "max" && 573 (eltType.isIndex() || eltType.isUnsignedInteger())) 574 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umax>( 575 reductionOp, llvmType, operands[0]); 576 else if (kind == "max") 577 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smax>( 578 reductionOp, llvmType, operands[0]); 579 else if (kind == "and") 580 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_and>( 581 reductionOp, llvmType, operands[0]); 582 else if (kind == "or") 583 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_or>( 584 reductionOp, llvmType, operands[0]); 585 else if (kind == "xor") 586 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_xor>( 587 reductionOp, llvmType, operands[0]); 588 else 589 return failure(); 590 return success(); 591 } 592 593 if (!eltType.isa<FloatType>()) 594 return failure(); 595 596 // Floating-point reductions: add/mul/min/max 597 if (kind == "add") { 598 // Optional accumulator (or zero). 599 Value acc = operands.size() > 1 ? operands[1] 600 : rewriter.create<LLVM::ConstantOp>( 601 reductionOp->getLoc(), llvmType, 602 rewriter.getZeroAttr(eltType)); 603 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fadd>( 604 reductionOp, llvmType, acc, operands[0], 605 rewriter.getBoolAttr(reassociateFPReductions)); 606 } else if (kind == "mul") { 607 // Optional accumulator (or one). 608 Value acc = operands.size() > 1 609 ? operands[1] 610 : rewriter.create<LLVM::ConstantOp>( 611 reductionOp->getLoc(), llvmType, 612 rewriter.getFloatAttr(eltType, 1.0)); 613 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmul>( 614 reductionOp, llvmType, acc, operands[0], 615 rewriter.getBoolAttr(reassociateFPReductions)); 616 } else if (kind == "min") 617 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmin>( 618 reductionOp, llvmType, operands[0]); 619 else if (kind == "max") 620 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmax>( 621 reductionOp, llvmType, operands[0]); 622 else 623 return failure(); 624 return success(); 625 } 626 627 private: 628 const bool reassociateFPReductions; 629 }; 630 631 /// Conversion pattern for a vector.create_mask (1-D only). 632 class VectorCreateMaskOpConversion 633 : public ConvertOpToLLVMPattern<vector::CreateMaskOp> { 634 public: 635 explicit VectorCreateMaskOpConversion(LLVMTypeConverter &typeConv, 636 bool enableIndexOpt) 637 : ConvertOpToLLVMPattern<vector::CreateMaskOp>(typeConv), 638 enableIndexOptimizations(enableIndexOpt) {} 639 640 LogicalResult 641 matchAndRewrite(vector::CreateMaskOp op, ArrayRef<Value> operands, 642 ConversionPatternRewriter &rewriter) const override { 643 auto dstType = op.getType(); 644 int64_t rank = dstType.getRank(); 645 if (rank == 1) { 646 rewriter.replaceOp( 647 op, buildVectorComparison(rewriter, op, enableIndexOptimizations, 648 dstType.getDimSize(0), operands[0])); 649 return success(); 650 } 651 return failure(); 652 } 653 654 private: 655 const bool enableIndexOptimizations; 656 }; 657 658 class VectorShuffleOpConversion 659 : public ConvertOpToLLVMPattern<vector::ShuffleOp> { 660 public: 661 using ConvertOpToLLVMPattern<vector::ShuffleOp>::ConvertOpToLLVMPattern; 662 663 LogicalResult 664 matchAndRewrite(vector::ShuffleOp shuffleOp, ArrayRef<Value> operands, 665 ConversionPatternRewriter &rewriter) const override { 666 auto loc = shuffleOp->getLoc(); 667 auto adaptor = vector::ShuffleOpAdaptor(operands); 668 auto v1Type = shuffleOp.getV1VectorType(); 669 auto v2Type = shuffleOp.getV2VectorType(); 670 auto vectorType = shuffleOp.getVectorType(); 671 Type llvmType = typeConverter->convertType(vectorType); 672 auto maskArrayAttr = shuffleOp.mask(); 673 674 // Bail if result type cannot be lowered. 675 if (!llvmType) 676 return failure(); 677 678 // Get rank and dimension sizes. 679 int64_t rank = vectorType.getRank(); 680 assert(v1Type.getRank() == rank); 681 assert(v2Type.getRank() == rank); 682 int64_t v1Dim = v1Type.getDimSize(0); 683 684 // For rank 1, where both operands have *exactly* the same vector type, 685 // there is direct shuffle support in LLVM. Use it! 686 if (rank == 1 && v1Type == v2Type) { 687 Value llvmShuffleOp = rewriter.create<LLVM::ShuffleVectorOp>( 688 loc, adaptor.v1(), adaptor.v2(), maskArrayAttr); 689 rewriter.replaceOp(shuffleOp, llvmShuffleOp); 690 return success(); 691 } 692 693 // For all other cases, insert the individual values individually. 694 Value insert = rewriter.create<LLVM::UndefOp>(loc, llvmType); 695 int64_t insPos = 0; 696 for (auto en : llvm::enumerate(maskArrayAttr)) { 697 int64_t extPos = en.value().cast<IntegerAttr>().getInt(); 698 Value value = adaptor.v1(); 699 if (extPos >= v1Dim) { 700 extPos -= v1Dim; 701 value = adaptor.v2(); 702 } 703 Value extract = extractOne(rewriter, *getTypeConverter(), loc, value, 704 llvmType, rank, extPos); 705 insert = insertOne(rewriter, *getTypeConverter(), loc, insert, extract, 706 llvmType, rank, insPos++); 707 } 708 rewriter.replaceOp(shuffleOp, insert); 709 return success(); 710 } 711 }; 712 713 class VectorExtractElementOpConversion 714 : public ConvertOpToLLVMPattern<vector::ExtractElementOp> { 715 public: 716 using ConvertOpToLLVMPattern< 717 vector::ExtractElementOp>::ConvertOpToLLVMPattern; 718 719 LogicalResult 720 matchAndRewrite(vector::ExtractElementOp extractEltOp, 721 ArrayRef<Value> operands, 722 ConversionPatternRewriter &rewriter) const override { 723 auto adaptor = vector::ExtractElementOpAdaptor(operands); 724 auto vectorType = extractEltOp.getVectorType(); 725 auto llvmType = typeConverter->convertType(vectorType.getElementType()); 726 727 // Bail if result type cannot be lowered. 728 if (!llvmType) 729 return failure(); 730 731 rewriter.replaceOpWithNewOp<LLVM::ExtractElementOp>( 732 extractEltOp, llvmType, adaptor.vector(), adaptor.position()); 733 return success(); 734 } 735 }; 736 737 class VectorExtractOpConversion 738 : public ConvertOpToLLVMPattern<vector::ExtractOp> { 739 public: 740 using ConvertOpToLLVMPattern<vector::ExtractOp>::ConvertOpToLLVMPattern; 741 742 LogicalResult 743 matchAndRewrite(vector::ExtractOp extractOp, ArrayRef<Value> operands, 744 ConversionPatternRewriter &rewriter) const override { 745 auto loc = extractOp->getLoc(); 746 auto adaptor = vector::ExtractOpAdaptor(operands); 747 auto vectorType = extractOp.getVectorType(); 748 auto resultType = extractOp.getResult().getType(); 749 auto llvmResultType = typeConverter->convertType(resultType); 750 auto positionArrayAttr = extractOp.position(); 751 752 // Bail if result type cannot be lowered. 753 if (!llvmResultType) 754 return failure(); 755 756 // One-shot extraction of vector from array (only requires extractvalue). 757 if (resultType.isa<VectorType>()) { 758 Value extracted = rewriter.create<LLVM::ExtractValueOp>( 759 loc, llvmResultType, adaptor.vector(), positionArrayAttr); 760 rewriter.replaceOp(extractOp, extracted); 761 return success(); 762 } 763 764 // Potential extraction of 1-D vector from array. 765 auto *context = extractOp->getContext(); 766 Value extracted = adaptor.vector(); 767 auto positionAttrs = positionArrayAttr.getValue(); 768 if (positionAttrs.size() > 1) { 769 auto oneDVectorType = reducedVectorTypeBack(vectorType); 770 auto nMinusOnePositionAttrs = 771 ArrayAttr::get(context, positionAttrs.drop_back()); 772 extracted = rewriter.create<LLVM::ExtractValueOp>( 773 loc, typeConverter->convertType(oneDVectorType), extracted, 774 nMinusOnePositionAttrs); 775 } 776 777 // Remaining extraction of element from 1-D LLVM vector 778 auto position = positionAttrs.back().cast<IntegerAttr>(); 779 auto i64Type = IntegerType::get(rewriter.getContext(), 64); 780 auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position); 781 extracted = 782 rewriter.create<LLVM::ExtractElementOp>(loc, extracted, constant); 783 rewriter.replaceOp(extractOp, extracted); 784 785 return success(); 786 } 787 }; 788 789 /// Conversion pattern that turns a vector.fma on a 1-D vector 790 /// into an llvm.intr.fmuladd. This is a trivial 1-1 conversion. 791 /// This does not match vectors of n >= 2 rank. 792 /// 793 /// Example: 794 /// ``` 795 /// vector.fma %a, %a, %a : vector<8xf32> 796 /// ``` 797 /// is converted to: 798 /// ``` 799 /// llvm.intr.fmuladd %va, %va, %va: 800 /// (!llvm."<8 x f32>">, !llvm<"<8 x f32>">, !llvm<"<8 x f32>">) 801 /// -> !llvm."<8 x f32>"> 802 /// ``` 803 class VectorFMAOp1DConversion : public ConvertOpToLLVMPattern<vector::FMAOp> { 804 public: 805 using ConvertOpToLLVMPattern<vector::FMAOp>::ConvertOpToLLVMPattern; 806 807 LogicalResult 808 matchAndRewrite(vector::FMAOp fmaOp, ArrayRef<Value> operands, 809 ConversionPatternRewriter &rewriter) const override { 810 auto adaptor = vector::FMAOpAdaptor(operands); 811 VectorType vType = fmaOp.getVectorType(); 812 if (vType.getRank() != 1) 813 return failure(); 814 rewriter.replaceOpWithNewOp<LLVM::FMulAddOp>(fmaOp, adaptor.lhs(), 815 adaptor.rhs(), adaptor.acc()); 816 return success(); 817 } 818 }; 819 820 class VectorInsertElementOpConversion 821 : public ConvertOpToLLVMPattern<vector::InsertElementOp> { 822 public: 823 using ConvertOpToLLVMPattern<vector::InsertElementOp>::ConvertOpToLLVMPattern; 824 825 LogicalResult 826 matchAndRewrite(vector::InsertElementOp insertEltOp, ArrayRef<Value> operands, 827 ConversionPatternRewriter &rewriter) const override { 828 auto adaptor = vector::InsertElementOpAdaptor(operands); 829 auto vectorType = insertEltOp.getDestVectorType(); 830 auto llvmType = typeConverter->convertType(vectorType); 831 832 // Bail if result type cannot be lowered. 833 if (!llvmType) 834 return failure(); 835 836 rewriter.replaceOpWithNewOp<LLVM::InsertElementOp>( 837 insertEltOp, llvmType, adaptor.dest(), adaptor.source(), 838 adaptor.position()); 839 return success(); 840 } 841 }; 842 843 class VectorInsertOpConversion 844 : public ConvertOpToLLVMPattern<vector::InsertOp> { 845 public: 846 using ConvertOpToLLVMPattern<vector::InsertOp>::ConvertOpToLLVMPattern; 847 848 LogicalResult 849 matchAndRewrite(vector::InsertOp insertOp, ArrayRef<Value> operands, 850 ConversionPatternRewriter &rewriter) const override { 851 auto loc = insertOp->getLoc(); 852 auto adaptor = vector::InsertOpAdaptor(operands); 853 auto sourceType = insertOp.getSourceType(); 854 auto destVectorType = insertOp.getDestVectorType(); 855 auto llvmResultType = typeConverter->convertType(destVectorType); 856 auto positionArrayAttr = insertOp.position(); 857 858 // Bail if result type cannot be lowered. 859 if (!llvmResultType) 860 return failure(); 861 862 // One-shot insertion of a vector into an array (only requires insertvalue). 863 if (sourceType.isa<VectorType>()) { 864 Value inserted = rewriter.create<LLVM::InsertValueOp>( 865 loc, llvmResultType, adaptor.dest(), adaptor.source(), 866 positionArrayAttr); 867 rewriter.replaceOp(insertOp, inserted); 868 return success(); 869 } 870 871 // Potential extraction of 1-D vector from array. 872 auto *context = insertOp->getContext(); 873 Value extracted = adaptor.dest(); 874 auto positionAttrs = positionArrayAttr.getValue(); 875 auto position = positionAttrs.back().cast<IntegerAttr>(); 876 auto oneDVectorType = destVectorType; 877 if (positionAttrs.size() > 1) { 878 oneDVectorType = reducedVectorTypeBack(destVectorType); 879 auto nMinusOnePositionAttrs = 880 ArrayAttr::get(context, positionAttrs.drop_back()); 881 extracted = rewriter.create<LLVM::ExtractValueOp>( 882 loc, typeConverter->convertType(oneDVectorType), extracted, 883 nMinusOnePositionAttrs); 884 } 885 886 // Insertion of an element into a 1-D LLVM vector. 887 auto i64Type = IntegerType::get(rewriter.getContext(), 64); 888 auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position); 889 Value inserted = rewriter.create<LLVM::InsertElementOp>( 890 loc, typeConverter->convertType(oneDVectorType), extracted, 891 adaptor.source(), constant); 892 893 // Potential insertion of resulting 1-D vector into array. 894 if (positionAttrs.size() > 1) { 895 auto nMinusOnePositionAttrs = 896 ArrayAttr::get(context, positionAttrs.drop_back()); 897 inserted = rewriter.create<LLVM::InsertValueOp>(loc, llvmResultType, 898 adaptor.dest(), inserted, 899 nMinusOnePositionAttrs); 900 } 901 902 rewriter.replaceOp(insertOp, inserted); 903 return success(); 904 } 905 }; 906 907 /// Rank reducing rewrite for n-D FMA into (n-1)-D FMA where n > 1. 908 /// 909 /// Example: 910 /// ``` 911 /// %d = vector.fma %a, %b, %c : vector<2x4xf32> 912 /// ``` 913 /// is rewritten into: 914 /// ``` 915 /// %r = splat %f0: vector<2x4xf32> 916 /// %va = vector.extractvalue %a[0] : vector<2x4xf32> 917 /// %vb = vector.extractvalue %b[0] : vector<2x4xf32> 918 /// %vc = vector.extractvalue %c[0] : vector<2x4xf32> 919 /// %vd = vector.fma %va, %vb, %vc : vector<4xf32> 920 /// %r2 = vector.insertvalue %vd, %r[0] : vector<4xf32> into vector<2x4xf32> 921 /// %va2 = vector.extractvalue %a2[1] : vector<2x4xf32> 922 /// %vb2 = vector.extractvalue %b2[1] : vector<2x4xf32> 923 /// %vc2 = vector.extractvalue %c2[1] : vector<2x4xf32> 924 /// %vd2 = vector.fma %va2, %vb2, %vc2 : vector<4xf32> 925 /// %r3 = vector.insertvalue %vd2, %r2[1] : vector<4xf32> into vector<2x4xf32> 926 /// // %r3 holds the final value. 927 /// ``` 928 class VectorFMAOpNDRewritePattern : public OpRewritePattern<FMAOp> { 929 public: 930 using OpRewritePattern<FMAOp>::OpRewritePattern; 931 932 LogicalResult matchAndRewrite(FMAOp op, 933 PatternRewriter &rewriter) const override { 934 auto vType = op.getVectorType(); 935 if (vType.getRank() < 2) 936 return failure(); 937 938 auto loc = op.getLoc(); 939 auto elemType = vType.getElementType(); 940 Value zero = rewriter.create<ConstantOp>(loc, elemType, 941 rewriter.getZeroAttr(elemType)); 942 Value desc = rewriter.create<SplatOp>(loc, vType, zero); 943 for (int64_t i = 0, e = vType.getShape().front(); i != e; ++i) { 944 Value extrLHS = rewriter.create<ExtractOp>(loc, op.lhs(), i); 945 Value extrRHS = rewriter.create<ExtractOp>(loc, op.rhs(), i); 946 Value extrACC = rewriter.create<ExtractOp>(loc, op.acc(), i); 947 Value fma = rewriter.create<FMAOp>(loc, extrLHS, extrRHS, extrACC); 948 desc = rewriter.create<InsertOp>(loc, fma, desc, i); 949 } 950 rewriter.replaceOp(op, desc); 951 return success(); 952 } 953 }; 954 955 // When ranks are different, InsertStridedSlice needs to extract a properly 956 // ranked vector from the destination vector into which to insert. This pattern 957 // only takes care of this part and forwards the rest of the conversion to 958 // another pattern that converts InsertStridedSlice for operands of the same 959 // rank. 960 // 961 // RewritePattern for InsertStridedSliceOp where source and destination vectors 962 // have different ranks. In this case: 963 // 1. the proper subvector is extracted from the destination vector 964 // 2. a new InsertStridedSlice op is created to insert the source in the 965 // destination subvector 966 // 3. the destination subvector is inserted back in the proper place 967 // 4. the op is replaced by the result of step 3. 968 // The new InsertStridedSlice from step 2. will be picked up by a 969 // `VectorInsertStridedSliceOpSameRankRewritePattern`. 970 class VectorInsertStridedSliceOpDifferentRankRewritePattern 971 : public OpRewritePattern<InsertStridedSliceOp> { 972 public: 973 using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern; 974 975 LogicalResult matchAndRewrite(InsertStridedSliceOp op, 976 PatternRewriter &rewriter) const override { 977 auto srcType = op.getSourceVectorType(); 978 auto dstType = op.getDestVectorType(); 979 980 if (op.offsets().getValue().empty()) 981 return failure(); 982 983 auto loc = op.getLoc(); 984 int64_t rankDiff = dstType.getRank() - srcType.getRank(); 985 assert(rankDiff >= 0); 986 if (rankDiff == 0) 987 return failure(); 988 989 int64_t rankRest = dstType.getRank() - rankDiff; 990 // Extract / insert the subvector of matching rank and InsertStridedSlice 991 // on it. 992 Value extracted = 993 rewriter.create<ExtractOp>(loc, op.dest(), 994 getI64SubArray(op.offsets(), /*dropFront=*/0, 995 /*dropBack=*/rankRest)); 996 // A different pattern will kick in for InsertStridedSlice with matching 997 // ranks. 998 auto stridedSliceInnerOp = rewriter.create<InsertStridedSliceOp>( 999 loc, op.source(), extracted, 1000 getI64SubArray(op.offsets(), /*dropFront=*/rankDiff), 1001 getI64SubArray(op.strides(), /*dropFront=*/0)); 1002 rewriter.replaceOpWithNewOp<InsertOp>( 1003 op, stridedSliceInnerOp.getResult(), op.dest(), 1004 getI64SubArray(op.offsets(), /*dropFront=*/0, 1005 /*dropBack=*/rankRest)); 1006 return success(); 1007 } 1008 }; 1009 1010 // RewritePattern for InsertStridedSliceOp where source and destination vectors 1011 // have the same rank. In this case, we reduce 1012 // 1. the proper subvector is extracted from the destination vector 1013 // 2. a new InsertStridedSlice op is created to insert the source in the 1014 // destination subvector 1015 // 3. the destination subvector is inserted back in the proper place 1016 // 4. the op is replaced by the result of step 3. 1017 // The new InsertStridedSlice from step 2. will be picked up by a 1018 // `VectorInsertStridedSliceOpSameRankRewritePattern`. 1019 class VectorInsertStridedSliceOpSameRankRewritePattern 1020 : public OpRewritePattern<InsertStridedSliceOp> { 1021 public: 1022 VectorInsertStridedSliceOpSameRankRewritePattern(MLIRContext *ctx) 1023 : OpRewritePattern<InsertStridedSliceOp>(ctx) { 1024 // This pattern creates recursive InsertStridedSliceOp, but the recursion is 1025 // bounded as the rank is strictly decreasing. 1026 setHasBoundedRewriteRecursion(); 1027 } 1028 1029 LogicalResult matchAndRewrite(InsertStridedSliceOp op, 1030 PatternRewriter &rewriter) const override { 1031 auto srcType = op.getSourceVectorType(); 1032 auto dstType = op.getDestVectorType(); 1033 1034 if (op.offsets().getValue().empty()) 1035 return failure(); 1036 1037 int64_t rankDiff = dstType.getRank() - srcType.getRank(); 1038 assert(rankDiff >= 0); 1039 if (rankDiff != 0) 1040 return failure(); 1041 1042 if (srcType == dstType) { 1043 rewriter.replaceOp(op, op.source()); 1044 return success(); 1045 } 1046 1047 int64_t offset = 1048 op.offsets().getValue().front().cast<IntegerAttr>().getInt(); 1049 int64_t size = srcType.getShape().front(); 1050 int64_t stride = 1051 op.strides().getValue().front().cast<IntegerAttr>().getInt(); 1052 1053 auto loc = op.getLoc(); 1054 Value res = op.dest(); 1055 // For each slice of the source vector along the most major dimension. 1056 for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e; 1057 off += stride, ++idx) { 1058 // 1. extract the proper subvector (or element) from source 1059 Value extractedSource = extractOne(rewriter, loc, op.source(), idx); 1060 if (extractedSource.getType().isa<VectorType>()) { 1061 // 2. If we have a vector, extract the proper subvector from destination 1062 // Otherwise we are at the element level and no need to recurse. 1063 Value extractedDest = extractOne(rewriter, loc, op.dest(), off); 1064 // 3. Reduce the problem to lowering a new InsertStridedSlice op with 1065 // smaller rank. 1066 extractedSource = rewriter.create<InsertStridedSliceOp>( 1067 loc, extractedSource, extractedDest, 1068 getI64SubArray(op.offsets(), /* dropFront=*/1), 1069 getI64SubArray(op.strides(), /* dropFront=*/1)); 1070 } 1071 // 4. Insert the extractedSource into the res vector. 1072 res = insertOne(rewriter, loc, extractedSource, res, off); 1073 } 1074 1075 rewriter.replaceOp(op, res); 1076 return success(); 1077 } 1078 }; 1079 1080 /// Returns the strides if the memory underlying `memRefType` has a contiguous 1081 /// static layout. 1082 static llvm::Optional<SmallVector<int64_t, 4>> 1083 computeContiguousStrides(MemRefType memRefType) { 1084 int64_t offset; 1085 SmallVector<int64_t, 4> strides; 1086 if (failed(getStridesAndOffset(memRefType, strides, offset))) 1087 return None; 1088 if (!strides.empty() && strides.back() != 1) 1089 return None; 1090 // If no layout or identity layout, this is contiguous by definition. 1091 if (memRefType.getAffineMaps().empty() || 1092 memRefType.getAffineMaps().front().isIdentity()) 1093 return strides; 1094 1095 // Otherwise, we must determine contiguity form shapes. This can only ever 1096 // work in static cases because MemRefType is underspecified to represent 1097 // contiguous dynamic shapes in other ways than with just empty/identity 1098 // layout. 1099 auto sizes = memRefType.getShape(); 1100 for (int index = 0, e = strides.size() - 2; index < e; ++index) { 1101 if (ShapedType::isDynamic(sizes[index + 1]) || 1102 ShapedType::isDynamicStrideOrOffset(strides[index]) || 1103 ShapedType::isDynamicStrideOrOffset(strides[index + 1])) 1104 return None; 1105 if (strides[index] != strides[index + 1] * sizes[index + 1]) 1106 return None; 1107 } 1108 return strides; 1109 } 1110 1111 class VectorTypeCastOpConversion 1112 : public ConvertOpToLLVMPattern<vector::TypeCastOp> { 1113 public: 1114 using ConvertOpToLLVMPattern<vector::TypeCastOp>::ConvertOpToLLVMPattern; 1115 1116 LogicalResult 1117 matchAndRewrite(vector::TypeCastOp castOp, ArrayRef<Value> operands, 1118 ConversionPatternRewriter &rewriter) const override { 1119 auto loc = castOp->getLoc(); 1120 MemRefType sourceMemRefType = 1121 castOp.getOperand().getType().cast<MemRefType>(); 1122 MemRefType targetMemRefType = castOp.getType(); 1123 1124 // Only static shape casts supported atm. 1125 if (!sourceMemRefType.hasStaticShape() || 1126 !targetMemRefType.hasStaticShape()) 1127 return failure(); 1128 1129 auto llvmSourceDescriptorTy = 1130 operands[0].getType().dyn_cast<LLVM::LLVMStructType>(); 1131 if (!llvmSourceDescriptorTy) 1132 return failure(); 1133 MemRefDescriptor sourceMemRef(operands[0]); 1134 1135 auto llvmTargetDescriptorTy = typeConverter->convertType(targetMemRefType) 1136 .dyn_cast_or_null<LLVM::LLVMStructType>(); 1137 if (!llvmTargetDescriptorTy) 1138 return failure(); 1139 1140 // Only contiguous source buffers supported atm. 1141 auto sourceStrides = computeContiguousStrides(sourceMemRefType); 1142 if (!sourceStrides) 1143 return failure(); 1144 auto targetStrides = computeContiguousStrides(targetMemRefType); 1145 if (!targetStrides) 1146 return failure(); 1147 // Only support static strides for now, regardless of contiguity. 1148 if (llvm::any_of(*targetStrides, [](int64_t stride) { 1149 return ShapedType::isDynamicStrideOrOffset(stride); 1150 })) 1151 return failure(); 1152 1153 auto int64Ty = IntegerType::get(rewriter.getContext(), 64); 1154 1155 // Create descriptor. 1156 auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy); 1157 Type llvmTargetElementTy = desc.getElementPtrType(); 1158 // Set allocated ptr. 1159 Value allocated = sourceMemRef.allocatedPtr(rewriter, loc); 1160 allocated = 1161 rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, allocated); 1162 desc.setAllocatedPtr(rewriter, loc, allocated); 1163 // Set aligned ptr. 1164 Value ptr = sourceMemRef.alignedPtr(rewriter, loc); 1165 ptr = rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, ptr); 1166 desc.setAlignedPtr(rewriter, loc, ptr); 1167 // Fill offset 0. 1168 auto attr = rewriter.getIntegerAttr(rewriter.getIndexType(), 0); 1169 auto zero = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, attr); 1170 desc.setOffset(rewriter, loc, zero); 1171 1172 // Fill size and stride descriptors in memref. 1173 for (auto indexedSize : llvm::enumerate(targetMemRefType.getShape())) { 1174 int64_t index = indexedSize.index(); 1175 auto sizeAttr = 1176 rewriter.getIntegerAttr(rewriter.getIndexType(), indexedSize.value()); 1177 auto size = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, sizeAttr); 1178 desc.setSize(rewriter, loc, index, size); 1179 auto strideAttr = rewriter.getIntegerAttr(rewriter.getIndexType(), 1180 (*targetStrides)[index]); 1181 auto stride = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, strideAttr); 1182 desc.setStride(rewriter, loc, index, stride); 1183 } 1184 1185 rewriter.replaceOp(castOp, {desc}); 1186 return success(); 1187 } 1188 }; 1189 1190 /// Conversion pattern that converts a 1-D vector transfer read/write op in a 1191 /// sequence of: 1192 /// 1. Get the source/dst address as an LLVM vector pointer. 1193 /// 2. Create a vector with linear indices [ 0 .. vector_length - 1 ]. 1194 /// 3. Create an offsetVector = [ offset + 0 .. offset + vector_length - 1 ]. 1195 /// 4. Create a mask where offsetVector is compared against memref upper bound. 1196 /// 5. Rewrite op as a masked read or write. 1197 template <typename ConcreteOp> 1198 class VectorTransferConversion : public ConvertOpToLLVMPattern<ConcreteOp> { 1199 public: 1200 explicit VectorTransferConversion(LLVMTypeConverter &typeConv, 1201 bool enableIndexOpt) 1202 : ConvertOpToLLVMPattern<ConcreteOp>(typeConv), 1203 enableIndexOptimizations(enableIndexOpt) {} 1204 1205 LogicalResult 1206 matchAndRewrite(ConcreteOp xferOp, ArrayRef<Value> operands, 1207 ConversionPatternRewriter &rewriter) const override { 1208 auto adaptor = getTransferOpAdapter(xferOp, operands); 1209 1210 if (xferOp.getVectorType().getRank() > 1 || 1211 llvm::size(xferOp.indices()) == 0) 1212 return failure(); 1213 if (xferOp.permutation_map() != 1214 AffineMap::getMinorIdentityMap(xferOp.permutation_map().getNumInputs(), 1215 xferOp.getVectorType().getRank(), 1216 xferOp->getContext())) 1217 return failure(); 1218 auto memRefType = xferOp.getShapedType().template dyn_cast<MemRefType>(); 1219 if (!memRefType) 1220 return failure(); 1221 // Only contiguous source tensors supported atm. 1222 auto strides = computeContiguousStrides(memRefType); 1223 if (!strides) 1224 return failure(); 1225 1226 auto toLLVMTy = [&](Type t) { 1227 return this->getTypeConverter()->convertType(t); 1228 }; 1229 1230 Location loc = xferOp->getLoc(); 1231 1232 if (auto memrefVectorElementType = 1233 memRefType.getElementType().template dyn_cast<VectorType>()) { 1234 // Memref has vector element type. 1235 if (memrefVectorElementType.getElementType() != 1236 xferOp.getVectorType().getElementType()) 1237 return failure(); 1238 #ifndef NDEBUG 1239 // Check that memref vector type is a suffix of 'vectorType. 1240 unsigned memrefVecEltRank = memrefVectorElementType.getRank(); 1241 unsigned resultVecRank = xferOp.getVectorType().getRank(); 1242 assert(memrefVecEltRank <= resultVecRank); 1243 // TODO: Move this to isSuffix in Vector/Utils.h. 1244 unsigned rankOffset = resultVecRank - memrefVecEltRank; 1245 auto memrefVecEltShape = memrefVectorElementType.getShape(); 1246 auto resultVecShape = xferOp.getVectorType().getShape(); 1247 for (unsigned i = 0; i < memrefVecEltRank; ++i) 1248 assert(memrefVecEltShape[i] != resultVecShape[rankOffset + i] && 1249 "memref vector element shape should match suffix of vector " 1250 "result shape."); 1251 #endif // ifndef NDEBUG 1252 } 1253 1254 // 1. Get the source/dst address as an LLVM vector pointer. 1255 VectorType vtp = xferOp.getVectorType(); 1256 Value dataPtr = this->getStridedElementPtr( 1257 loc, memRefType, adaptor.source(), adaptor.indices(), rewriter); 1258 Value vectorDataPtr = 1259 castDataPtr(rewriter, loc, dataPtr, memRefType, toLLVMTy(vtp)); 1260 1261 if (!xferOp.isMaskedDim(0)) 1262 return replaceTransferOpWithLoadOrStore(rewriter, 1263 *this->getTypeConverter(), loc, 1264 xferOp, operands, vectorDataPtr); 1265 1266 // 2. Create a vector with linear indices [ 0 .. vector_length - 1 ]. 1267 // 3. Create offsetVector = [ offset + 0 .. offset + vector_length - 1 ]. 1268 // 4. Let dim the memref dimension, compute the vector comparison mask: 1269 // [ offset + 0 .. offset + vector_length - 1 ] < [ dim .. dim ] 1270 // 1271 // TODO: when the leaf transfer rank is k > 1, we need the last `k` 1272 // dimensions here. 1273 unsigned vecWidth = LLVM::getVectorNumElements(vtp).getFixedValue(); 1274 unsigned lastIndex = llvm::size(xferOp.indices()) - 1; 1275 Value off = xferOp.indices()[lastIndex]; 1276 Value dim = rewriter.create<DimOp>(loc, xferOp.source(), lastIndex); 1277 Value mask = buildVectorComparison( 1278 rewriter, xferOp, enableIndexOptimizations, vecWidth, dim, &off); 1279 1280 // 5. Rewrite as a masked read / write. 1281 return replaceTransferOpWithMasked(rewriter, *this->getTypeConverter(), loc, 1282 xferOp, operands, vectorDataPtr, mask); 1283 } 1284 1285 private: 1286 const bool enableIndexOptimizations; 1287 }; 1288 1289 class VectorPrintOpConversion : public ConvertOpToLLVMPattern<vector::PrintOp> { 1290 public: 1291 using ConvertOpToLLVMPattern<vector::PrintOp>::ConvertOpToLLVMPattern; 1292 1293 // Proof-of-concept lowering implementation that relies on a small 1294 // runtime support library, which only needs to provide a few 1295 // printing methods (single value for all data types, opening/closing 1296 // bracket, comma, newline). The lowering fully unrolls a vector 1297 // in terms of these elementary printing operations. The advantage 1298 // of this approach is that the library can remain unaware of all 1299 // low-level implementation details of vectors while still supporting 1300 // output of any shaped and dimensioned vector. Due to full unrolling, 1301 // this approach is less suited for very large vectors though. 1302 // 1303 // TODO: rely solely on libc in future? something else? 1304 // 1305 LogicalResult 1306 matchAndRewrite(vector::PrintOp printOp, ArrayRef<Value> operands, 1307 ConversionPatternRewriter &rewriter) const override { 1308 auto adaptor = vector::PrintOpAdaptor(operands); 1309 Type printType = printOp.getPrintType(); 1310 1311 if (typeConverter->convertType(printType) == nullptr) 1312 return failure(); 1313 1314 // Make sure element type has runtime support. 1315 PrintConversion conversion = PrintConversion::None; 1316 VectorType vectorType = printType.dyn_cast<VectorType>(); 1317 Type eltType = vectorType ? vectorType.getElementType() : printType; 1318 Operation *printer; 1319 if (eltType.isF32()) { 1320 printer = 1321 LLVM::lookupOrCreatePrintF32Fn(printOp->getParentOfType<ModuleOp>()); 1322 } else if (eltType.isF64()) { 1323 printer = 1324 LLVM::lookupOrCreatePrintF64Fn(printOp->getParentOfType<ModuleOp>()); 1325 } else if (eltType.isIndex()) { 1326 printer = 1327 LLVM::lookupOrCreatePrintU64Fn(printOp->getParentOfType<ModuleOp>()); 1328 } else if (auto intTy = eltType.dyn_cast<IntegerType>()) { 1329 // Integers need a zero or sign extension on the operand 1330 // (depending on the source type) as well as a signed or 1331 // unsigned print method. Up to 64-bit is supported. 1332 unsigned width = intTy.getWidth(); 1333 if (intTy.isUnsigned()) { 1334 if (width <= 64) { 1335 if (width < 64) 1336 conversion = PrintConversion::ZeroExt64; 1337 printer = LLVM::lookupOrCreatePrintU64Fn( 1338 printOp->getParentOfType<ModuleOp>()); 1339 } else { 1340 return failure(); 1341 } 1342 } else { 1343 assert(intTy.isSignless() || intTy.isSigned()); 1344 if (width <= 64) { 1345 // Note that we *always* zero extend booleans (1-bit integers), 1346 // so that true/false is printed as 1/0 rather than -1/0. 1347 if (width == 1) 1348 conversion = PrintConversion::ZeroExt64; 1349 else if (width < 64) 1350 conversion = PrintConversion::SignExt64; 1351 printer = LLVM::lookupOrCreatePrintI64Fn( 1352 printOp->getParentOfType<ModuleOp>()); 1353 } else { 1354 return failure(); 1355 } 1356 } 1357 } else { 1358 return failure(); 1359 } 1360 1361 // Unroll vector into elementary print calls. 1362 int64_t rank = vectorType ? vectorType.getRank() : 0; 1363 emitRanks(rewriter, printOp, adaptor.source(), vectorType, printer, rank, 1364 conversion); 1365 emitCall(rewriter, printOp->getLoc(), 1366 LLVM::lookupOrCreatePrintNewlineFn( 1367 printOp->getParentOfType<ModuleOp>())); 1368 rewriter.eraseOp(printOp); 1369 return success(); 1370 } 1371 1372 private: 1373 enum class PrintConversion { 1374 // clang-format off 1375 None, 1376 ZeroExt64, 1377 SignExt64 1378 // clang-format on 1379 }; 1380 1381 void emitRanks(ConversionPatternRewriter &rewriter, Operation *op, 1382 Value value, VectorType vectorType, Operation *printer, 1383 int64_t rank, PrintConversion conversion) const { 1384 Location loc = op->getLoc(); 1385 if (rank == 0) { 1386 switch (conversion) { 1387 case PrintConversion::ZeroExt64: 1388 value = rewriter.create<ZeroExtendIOp>( 1389 loc, value, IntegerType::get(rewriter.getContext(), 64)); 1390 break; 1391 case PrintConversion::SignExt64: 1392 value = rewriter.create<SignExtendIOp>( 1393 loc, value, IntegerType::get(rewriter.getContext(), 64)); 1394 break; 1395 case PrintConversion::None: 1396 break; 1397 } 1398 emitCall(rewriter, loc, printer, value); 1399 return; 1400 } 1401 1402 emitCall(rewriter, loc, 1403 LLVM::lookupOrCreatePrintOpenFn(op->getParentOfType<ModuleOp>())); 1404 Operation *printComma = 1405 LLVM::lookupOrCreatePrintCommaFn(op->getParentOfType<ModuleOp>()); 1406 int64_t dim = vectorType.getDimSize(0); 1407 for (int64_t d = 0; d < dim; ++d) { 1408 auto reducedType = 1409 rank > 1 ? reducedVectorTypeFront(vectorType) : nullptr; 1410 auto llvmType = typeConverter->convertType( 1411 rank > 1 ? reducedType : vectorType.getElementType()); 1412 Value nestedVal = extractOne(rewriter, *getTypeConverter(), loc, value, 1413 llvmType, rank, d); 1414 emitRanks(rewriter, op, nestedVal, reducedType, printer, rank - 1, 1415 conversion); 1416 if (d != dim - 1) 1417 emitCall(rewriter, loc, printComma); 1418 } 1419 emitCall(rewriter, loc, 1420 LLVM::lookupOrCreatePrintCloseFn(op->getParentOfType<ModuleOp>())); 1421 } 1422 1423 // Helper to emit a call. 1424 static void emitCall(ConversionPatternRewriter &rewriter, Location loc, 1425 Operation *ref, ValueRange params = ValueRange()) { 1426 rewriter.create<LLVM::CallOp>(loc, TypeRange(), 1427 rewriter.getSymbolRefAttr(ref), params); 1428 } 1429 }; 1430 1431 /// Progressive lowering of ExtractStridedSliceOp to either: 1432 /// 1. express single offset extract as a direct shuffle. 1433 /// 2. extract + lower rank strided_slice + insert for the n-D case. 1434 class VectorExtractStridedSliceOpConversion 1435 : public OpRewritePattern<ExtractStridedSliceOp> { 1436 public: 1437 VectorExtractStridedSliceOpConversion(MLIRContext *ctx) 1438 : OpRewritePattern<ExtractStridedSliceOp>(ctx) { 1439 // This pattern creates recursive ExtractStridedSliceOp, but the recursion 1440 // is bounded as the rank is strictly decreasing. 1441 setHasBoundedRewriteRecursion(); 1442 } 1443 1444 LogicalResult matchAndRewrite(ExtractStridedSliceOp op, 1445 PatternRewriter &rewriter) const override { 1446 auto dstType = op.getType(); 1447 1448 assert(!op.offsets().getValue().empty() && "Unexpected empty offsets"); 1449 1450 int64_t offset = 1451 op.offsets().getValue().front().cast<IntegerAttr>().getInt(); 1452 int64_t size = op.sizes().getValue().front().cast<IntegerAttr>().getInt(); 1453 int64_t stride = 1454 op.strides().getValue().front().cast<IntegerAttr>().getInt(); 1455 1456 auto loc = op.getLoc(); 1457 auto elemType = dstType.getElementType(); 1458 assert(elemType.isSignlessIntOrIndexOrFloat()); 1459 1460 // Single offset can be more efficiently shuffled. 1461 if (op.offsets().getValue().size() == 1) { 1462 SmallVector<int64_t, 4> offsets; 1463 offsets.reserve(size); 1464 for (int64_t off = offset, e = offset + size * stride; off < e; 1465 off += stride) 1466 offsets.push_back(off); 1467 rewriter.replaceOpWithNewOp<ShuffleOp>(op, dstType, op.vector(), 1468 op.vector(), 1469 rewriter.getI64ArrayAttr(offsets)); 1470 return success(); 1471 } 1472 1473 // Extract/insert on a lower ranked extract strided slice op. 1474 Value zero = rewriter.create<ConstantOp>(loc, elemType, 1475 rewriter.getZeroAttr(elemType)); 1476 Value res = rewriter.create<SplatOp>(loc, dstType, zero); 1477 for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e; 1478 off += stride, ++idx) { 1479 Value one = extractOne(rewriter, loc, op.vector(), off); 1480 Value extracted = rewriter.create<ExtractStridedSliceOp>( 1481 loc, one, getI64SubArray(op.offsets(), /* dropFront=*/1), 1482 getI64SubArray(op.sizes(), /* dropFront=*/1), 1483 getI64SubArray(op.strides(), /* dropFront=*/1)); 1484 res = insertOne(rewriter, loc, extracted, res, idx); 1485 } 1486 rewriter.replaceOp(op, res); 1487 return success(); 1488 } 1489 }; 1490 1491 } // namespace 1492 1493 /// Populate the given list with patterns that convert from Vector to LLVM. 1494 void mlir::populateVectorToLLVMConversionPatterns( 1495 LLVMTypeConverter &converter, OwningRewritePatternList &patterns, 1496 bool reassociateFPReductions, bool enableIndexOptimizations) { 1497 MLIRContext *ctx = converter.getDialect()->getContext(); 1498 // clang-format off 1499 patterns.insert<VectorFMAOpNDRewritePattern, 1500 VectorInsertStridedSliceOpDifferentRankRewritePattern, 1501 VectorInsertStridedSliceOpSameRankRewritePattern, 1502 VectorExtractStridedSliceOpConversion>(ctx); 1503 patterns.insert<VectorReductionOpConversion>( 1504 converter, reassociateFPReductions); 1505 patterns.insert<VectorCreateMaskOpConversion, 1506 VectorTransferConversion<TransferReadOp>, 1507 VectorTransferConversion<TransferWriteOp>>( 1508 converter, enableIndexOptimizations); 1509 patterns 1510 .insert<VectorBitCastOpConversion, 1511 VectorShuffleOpConversion, 1512 VectorExtractElementOpConversion, 1513 VectorExtractOpConversion, 1514 VectorFMAOp1DConversion, 1515 VectorInsertElementOpConversion, 1516 VectorInsertOpConversion, 1517 VectorPrintOpConversion, 1518 VectorTypeCastOpConversion, 1519 VectorLoadStoreConversion<vector::LoadOp, 1520 vector::LoadOpAdaptor>, 1521 VectorLoadStoreConversion<vector::MaskedLoadOp, 1522 vector::MaskedLoadOpAdaptor>, 1523 VectorLoadStoreConversion<vector::StoreOp, 1524 vector::StoreOpAdaptor>, 1525 VectorLoadStoreConversion<vector::MaskedStoreOp, 1526 vector::MaskedStoreOpAdaptor>, 1527 VectorGatherOpConversion, 1528 VectorScatterOpConversion, 1529 VectorExpandLoadOpConversion, 1530 VectorCompressStoreOpConversion>(converter); 1531 // clang-format on 1532 } 1533 1534 void mlir::populateVectorToLLVMMatrixConversionPatterns( 1535 LLVMTypeConverter &converter, OwningRewritePatternList &patterns) { 1536 patterns.insert<VectorMatmulOpConversion>(converter); 1537 patterns.insert<VectorFlatTransposeOpConversion>(converter); 1538 } 1539