1 //===- VectorToLLVM.cpp - Conversion from Vector to the LLVM dialect ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h" 10 11 #include "mlir/Conversion/LLVMCommon/VectorPattern.h" 12 #include "mlir/Dialect/LLVMIR/FunctionCallUtils.h" 13 #include "mlir/Dialect/LLVMIR/LLVMDialect.h" 14 #include "mlir/Dialect/MemRef/IR/MemRef.h" 15 #include "mlir/Dialect/StandardOps/IR/Ops.h" 16 #include "mlir/Dialect/Vector/VectorOps.h" 17 #include "mlir/IR/BuiltinTypes.h" 18 #include "mlir/Support/MathExtras.h" 19 #include "mlir/Target/LLVMIR/TypeToLLVM.h" 20 #include "mlir/Transforms/DialectConversion.h" 21 22 using namespace mlir; 23 using namespace mlir::vector; 24 25 // Helper to reduce vector type by one rank at front. 26 static VectorType reducedVectorTypeFront(VectorType tp) { 27 assert((tp.getRank() > 1) && "unlowerable vector type"); 28 return VectorType::get(tp.getShape().drop_front(), tp.getElementType()); 29 } 30 31 // Helper to reduce vector type by *all* but one rank at back. 32 static VectorType reducedVectorTypeBack(VectorType tp) { 33 assert((tp.getRank() > 1) && "unlowerable vector type"); 34 return VectorType::get(tp.getShape().take_back(), tp.getElementType()); 35 } 36 37 // Helper that picks the proper sequence for inserting. 38 static Value insertOne(ConversionPatternRewriter &rewriter, 39 LLVMTypeConverter &typeConverter, Location loc, 40 Value val1, Value val2, Type llvmType, int64_t rank, 41 int64_t pos) { 42 if (rank == 1) { 43 auto idxType = rewriter.getIndexType(); 44 auto constant = rewriter.create<LLVM::ConstantOp>( 45 loc, typeConverter.convertType(idxType), 46 rewriter.getIntegerAttr(idxType, pos)); 47 return rewriter.create<LLVM::InsertElementOp>(loc, llvmType, val1, val2, 48 constant); 49 } 50 return rewriter.create<LLVM::InsertValueOp>(loc, llvmType, val1, val2, 51 rewriter.getI64ArrayAttr(pos)); 52 } 53 54 // Helper that picks the proper sequence for inserting. 55 static Value insertOne(PatternRewriter &rewriter, Location loc, Value from, 56 Value into, int64_t offset) { 57 auto vectorType = into.getType().cast<VectorType>(); 58 if (vectorType.getRank() > 1) 59 return rewriter.create<InsertOp>(loc, from, into, offset); 60 return rewriter.create<vector::InsertElementOp>( 61 loc, vectorType, from, into, 62 rewriter.create<ConstantIndexOp>(loc, offset)); 63 } 64 65 // Helper that picks the proper sequence for extracting. 66 static Value extractOne(ConversionPatternRewriter &rewriter, 67 LLVMTypeConverter &typeConverter, Location loc, 68 Value val, Type llvmType, int64_t rank, int64_t pos) { 69 if (rank == 1) { 70 auto idxType = rewriter.getIndexType(); 71 auto constant = rewriter.create<LLVM::ConstantOp>( 72 loc, typeConverter.convertType(idxType), 73 rewriter.getIntegerAttr(idxType, pos)); 74 return rewriter.create<LLVM::ExtractElementOp>(loc, llvmType, val, 75 constant); 76 } 77 return rewriter.create<LLVM::ExtractValueOp>(loc, llvmType, val, 78 rewriter.getI64ArrayAttr(pos)); 79 } 80 81 // Helper that picks the proper sequence for extracting. 82 static Value extractOne(PatternRewriter &rewriter, Location loc, Value vector, 83 int64_t offset) { 84 auto vectorType = vector.getType().cast<VectorType>(); 85 if (vectorType.getRank() > 1) 86 return rewriter.create<ExtractOp>(loc, vector, offset); 87 return rewriter.create<vector::ExtractElementOp>( 88 loc, vectorType.getElementType(), vector, 89 rewriter.create<ConstantIndexOp>(loc, offset)); 90 } 91 92 // Helper that returns a subset of `arrayAttr` as a vector of int64_t. 93 // TODO: Better support for attribute subtype forwarding + slicing. 94 static SmallVector<int64_t, 4> getI64SubArray(ArrayAttr arrayAttr, 95 unsigned dropFront = 0, 96 unsigned dropBack = 0) { 97 assert(arrayAttr.size() > dropFront + dropBack && "Out of bounds"); 98 auto range = arrayAttr.getAsRange<IntegerAttr>(); 99 SmallVector<int64_t, 4> res; 100 res.reserve(arrayAttr.size() - dropFront - dropBack); 101 for (auto it = range.begin() + dropFront, eit = range.end() - dropBack; 102 it != eit; ++it) 103 res.push_back((*it).getValue().getSExtValue()); 104 return res; 105 } 106 107 // Helper that returns data layout alignment of a memref. 108 LogicalResult getMemRefAlignment(LLVMTypeConverter &typeConverter, 109 MemRefType memrefType, unsigned &align) { 110 Type elementTy = typeConverter.convertType(memrefType.getElementType()); 111 if (!elementTy) 112 return failure(); 113 114 // TODO: this should use the MLIR data layout when it becomes available and 115 // stop depending on translation. 116 llvm::LLVMContext llvmContext; 117 align = LLVM::TypeToLLVMIRTranslator(llvmContext) 118 .getPreferredAlignment(elementTy, typeConverter.getDataLayout()); 119 return success(); 120 } 121 122 // Return the minimal alignment value that satisfies all the AssumeAlignment 123 // uses of `value`. If no such uses exist, return 1. 124 static unsigned getAssumedAlignment(Value value) { 125 unsigned align = 1; 126 for (auto &u : value.getUses()) { 127 Operation *owner = u.getOwner(); 128 if (auto op = dyn_cast<memref::AssumeAlignmentOp>(owner)) 129 align = mlir::lcm(align, op.alignment()); 130 } 131 return align; 132 } 133 134 // Helper that returns data layout alignment of a memref associated with a 135 // load, store, scatter, or gather op, including additional information from 136 // assume_alignment calls on the source of the transfer 137 template <class OpAdaptor> 138 LogicalResult getMemRefOpAlignment(LLVMTypeConverter &typeConverter, 139 OpAdaptor op, unsigned &align) { 140 if (failed(getMemRefAlignment(typeConverter, op.getMemRefType(), align))) 141 return failure(); 142 align = std::max(align, getAssumedAlignment(op.base())); 143 return success(); 144 } 145 146 // Add an index vector component to a base pointer. This almost always succeeds 147 // unless the last stride is non-unit or the memory space is not zero. 148 static LogicalResult getIndexedPtrs(ConversionPatternRewriter &rewriter, 149 Location loc, Value memref, Value base, 150 Value index, MemRefType memRefType, 151 VectorType vType, Value &ptrs) { 152 int64_t offset; 153 SmallVector<int64_t, 4> strides; 154 auto successStrides = getStridesAndOffset(memRefType, strides, offset); 155 if (failed(successStrides) || strides.back() != 1 || 156 memRefType.getMemorySpaceAsInt() != 0) 157 return failure(); 158 auto pType = MemRefDescriptor(memref).getElementPtrType(); 159 auto ptrsType = LLVM::getFixedVectorType(pType, vType.getDimSize(0)); 160 ptrs = rewriter.create<LLVM::GEPOp>(loc, ptrsType, base, index); 161 return success(); 162 } 163 164 // Casts a strided element pointer to a vector pointer. The vector pointer 165 // will be in the same address space as the incoming memref type. 166 static Value castDataPtr(ConversionPatternRewriter &rewriter, Location loc, 167 Value ptr, MemRefType memRefType, Type vt) { 168 auto pType = LLVM::LLVMPointerType::get(vt, memRefType.getMemorySpaceAsInt()); 169 return rewriter.create<LLVM::BitcastOp>(loc, pType, ptr); 170 } 171 172 namespace { 173 174 /// Conversion pattern for a vector.bitcast. 175 class VectorBitCastOpConversion 176 : public ConvertOpToLLVMPattern<vector::BitCastOp> { 177 public: 178 using ConvertOpToLLVMPattern<vector::BitCastOp>::ConvertOpToLLVMPattern; 179 180 LogicalResult 181 matchAndRewrite(vector::BitCastOp bitCastOp, OpAdaptor adaptor, 182 ConversionPatternRewriter &rewriter) const override { 183 // Only 1-D vectors can be lowered to LLVM. 184 VectorType resultTy = bitCastOp.getType(); 185 if (resultTy.getRank() != 1) 186 return failure(); 187 Type newResultTy = typeConverter->convertType(resultTy); 188 rewriter.replaceOpWithNewOp<LLVM::BitcastOp>(bitCastOp, newResultTy, 189 adaptor.getOperands()[0]); 190 return success(); 191 } 192 }; 193 194 /// Conversion pattern for a vector.matrix_multiply. 195 /// This is lowered directly to the proper llvm.intr.matrix.multiply. 196 class VectorMatmulOpConversion 197 : public ConvertOpToLLVMPattern<vector::MatmulOp> { 198 public: 199 using ConvertOpToLLVMPattern<vector::MatmulOp>::ConvertOpToLLVMPattern; 200 201 LogicalResult 202 matchAndRewrite(vector::MatmulOp matmulOp, OpAdaptor adaptor, 203 ConversionPatternRewriter &rewriter) const override { 204 rewriter.replaceOpWithNewOp<LLVM::MatrixMultiplyOp>( 205 matmulOp, typeConverter->convertType(matmulOp.res().getType()), 206 adaptor.lhs(), adaptor.rhs(), matmulOp.lhs_rows(), 207 matmulOp.lhs_columns(), matmulOp.rhs_columns()); 208 return success(); 209 } 210 }; 211 212 /// Conversion pattern for a vector.flat_transpose. 213 /// This is lowered directly to the proper llvm.intr.matrix.transpose. 214 class VectorFlatTransposeOpConversion 215 : public ConvertOpToLLVMPattern<vector::FlatTransposeOp> { 216 public: 217 using ConvertOpToLLVMPattern<vector::FlatTransposeOp>::ConvertOpToLLVMPattern; 218 219 LogicalResult 220 matchAndRewrite(vector::FlatTransposeOp transOp, OpAdaptor adaptor, 221 ConversionPatternRewriter &rewriter) const override { 222 rewriter.replaceOpWithNewOp<LLVM::MatrixTransposeOp>( 223 transOp, typeConverter->convertType(transOp.res().getType()), 224 adaptor.matrix(), transOp.rows(), transOp.columns()); 225 return success(); 226 } 227 }; 228 229 /// Overloaded utility that replaces a vector.load, vector.store, 230 /// vector.maskedload and vector.maskedstore with their respective LLVM 231 /// couterparts. 232 static void replaceLoadOrStoreOp(vector::LoadOp loadOp, 233 vector::LoadOpAdaptor adaptor, 234 VectorType vectorTy, Value ptr, unsigned align, 235 ConversionPatternRewriter &rewriter) { 236 rewriter.replaceOpWithNewOp<LLVM::LoadOp>(loadOp, ptr, align); 237 } 238 239 static void replaceLoadOrStoreOp(vector::MaskedLoadOp loadOp, 240 vector::MaskedLoadOpAdaptor adaptor, 241 VectorType vectorTy, Value ptr, unsigned align, 242 ConversionPatternRewriter &rewriter) { 243 rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>( 244 loadOp, vectorTy, ptr, adaptor.mask(), adaptor.pass_thru(), align); 245 } 246 247 static void replaceLoadOrStoreOp(vector::StoreOp storeOp, 248 vector::StoreOpAdaptor adaptor, 249 VectorType vectorTy, Value ptr, unsigned align, 250 ConversionPatternRewriter &rewriter) { 251 rewriter.replaceOpWithNewOp<LLVM::StoreOp>(storeOp, adaptor.valueToStore(), 252 ptr, align); 253 } 254 255 static void replaceLoadOrStoreOp(vector::MaskedStoreOp storeOp, 256 vector::MaskedStoreOpAdaptor adaptor, 257 VectorType vectorTy, Value ptr, unsigned align, 258 ConversionPatternRewriter &rewriter) { 259 rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>( 260 storeOp, adaptor.valueToStore(), ptr, adaptor.mask(), align); 261 } 262 263 /// Conversion pattern for a vector.load, vector.store, vector.maskedload, and 264 /// vector.maskedstore. 265 template <class LoadOrStoreOp, class LoadOrStoreOpAdaptor> 266 class VectorLoadStoreConversion : public ConvertOpToLLVMPattern<LoadOrStoreOp> { 267 public: 268 using ConvertOpToLLVMPattern<LoadOrStoreOp>::ConvertOpToLLVMPattern; 269 270 LogicalResult 271 matchAndRewrite(LoadOrStoreOp loadOrStoreOp, 272 typename LoadOrStoreOp::Adaptor adaptor, 273 ConversionPatternRewriter &rewriter) const override { 274 // Only 1-D vectors can be lowered to LLVM. 275 VectorType vectorTy = loadOrStoreOp.getVectorType(); 276 if (vectorTy.getRank() > 1) 277 return failure(); 278 279 auto loc = loadOrStoreOp->getLoc(); 280 MemRefType memRefTy = loadOrStoreOp.getMemRefType(); 281 282 // Resolve alignment. 283 unsigned align; 284 if (failed(getMemRefOpAlignment(*this->getTypeConverter(), loadOrStoreOp, 285 align))) 286 return failure(); 287 288 // Resolve address. 289 auto vtype = this->typeConverter->convertType(loadOrStoreOp.getVectorType()) 290 .template cast<VectorType>(); 291 Value dataPtr = this->getStridedElementPtr(loc, memRefTy, adaptor.base(), 292 adaptor.indices(), rewriter); 293 Value ptr = castDataPtr(rewriter, loc, dataPtr, memRefTy, vtype); 294 295 replaceLoadOrStoreOp(loadOrStoreOp, adaptor, vtype, ptr, align, rewriter); 296 return success(); 297 } 298 }; 299 300 /// Conversion pattern for a vector.gather. 301 class VectorGatherOpConversion 302 : public ConvertOpToLLVMPattern<vector::GatherOp> { 303 public: 304 using ConvertOpToLLVMPattern<vector::GatherOp>::ConvertOpToLLVMPattern; 305 306 LogicalResult 307 matchAndRewrite(vector::GatherOp gather, OpAdaptor adaptor, 308 ConversionPatternRewriter &rewriter) const override { 309 auto loc = gather->getLoc(); 310 MemRefType memRefType = gather.getMemRefType(); 311 312 // Resolve alignment. 313 unsigned align; 314 if (failed(getMemRefOpAlignment(*getTypeConverter(), gather, align))) 315 return failure(); 316 317 // Resolve address. 318 Value ptrs; 319 VectorType vType = gather.getVectorType(); 320 Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(), 321 adaptor.indices(), rewriter); 322 if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), ptr, 323 adaptor.index_vec(), memRefType, vType, ptrs))) 324 return failure(); 325 326 // Replace with the gather intrinsic. 327 rewriter.replaceOpWithNewOp<LLVM::masked_gather>( 328 gather, typeConverter->convertType(vType), ptrs, adaptor.mask(), 329 adaptor.pass_thru(), rewriter.getI32IntegerAttr(align)); 330 return success(); 331 } 332 }; 333 334 /// Conversion pattern for a vector.scatter. 335 class VectorScatterOpConversion 336 : public ConvertOpToLLVMPattern<vector::ScatterOp> { 337 public: 338 using ConvertOpToLLVMPattern<vector::ScatterOp>::ConvertOpToLLVMPattern; 339 340 LogicalResult 341 matchAndRewrite(vector::ScatterOp scatter, OpAdaptor adaptor, 342 ConversionPatternRewriter &rewriter) const override { 343 auto loc = scatter->getLoc(); 344 MemRefType memRefType = scatter.getMemRefType(); 345 346 // Resolve alignment. 347 unsigned align; 348 if (failed(getMemRefOpAlignment(*getTypeConverter(), scatter, align))) 349 return failure(); 350 351 // Resolve address. 352 Value ptrs; 353 VectorType vType = scatter.getVectorType(); 354 Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(), 355 adaptor.indices(), rewriter); 356 if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), ptr, 357 adaptor.index_vec(), memRefType, vType, ptrs))) 358 return failure(); 359 360 // Replace with the scatter intrinsic. 361 rewriter.replaceOpWithNewOp<LLVM::masked_scatter>( 362 scatter, adaptor.valueToStore(), ptrs, adaptor.mask(), 363 rewriter.getI32IntegerAttr(align)); 364 return success(); 365 } 366 }; 367 368 /// Conversion pattern for a vector.expandload. 369 class VectorExpandLoadOpConversion 370 : public ConvertOpToLLVMPattern<vector::ExpandLoadOp> { 371 public: 372 using ConvertOpToLLVMPattern<vector::ExpandLoadOp>::ConvertOpToLLVMPattern; 373 374 LogicalResult 375 matchAndRewrite(vector::ExpandLoadOp expand, OpAdaptor adaptor, 376 ConversionPatternRewriter &rewriter) const override { 377 auto loc = expand->getLoc(); 378 MemRefType memRefType = expand.getMemRefType(); 379 380 // Resolve address. 381 auto vtype = typeConverter->convertType(expand.getVectorType()); 382 Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(), 383 adaptor.indices(), rewriter); 384 385 rewriter.replaceOpWithNewOp<LLVM::masked_expandload>( 386 expand, vtype, ptr, adaptor.mask(), adaptor.pass_thru()); 387 return success(); 388 } 389 }; 390 391 /// Conversion pattern for a vector.compressstore. 392 class VectorCompressStoreOpConversion 393 : public ConvertOpToLLVMPattern<vector::CompressStoreOp> { 394 public: 395 using ConvertOpToLLVMPattern<vector::CompressStoreOp>::ConvertOpToLLVMPattern; 396 397 LogicalResult 398 matchAndRewrite(vector::CompressStoreOp compress, OpAdaptor adaptor, 399 ConversionPatternRewriter &rewriter) const override { 400 auto loc = compress->getLoc(); 401 MemRefType memRefType = compress.getMemRefType(); 402 403 // Resolve address. 404 Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(), 405 adaptor.indices(), rewriter); 406 407 rewriter.replaceOpWithNewOp<LLVM::masked_compressstore>( 408 compress, adaptor.valueToStore(), ptr, adaptor.mask()); 409 return success(); 410 } 411 }; 412 413 /// Conversion pattern for all vector reductions. 414 class VectorReductionOpConversion 415 : public ConvertOpToLLVMPattern<vector::ReductionOp> { 416 public: 417 explicit VectorReductionOpConversion(LLVMTypeConverter &typeConv, 418 bool reassociateFPRed) 419 : ConvertOpToLLVMPattern<vector::ReductionOp>(typeConv), 420 reassociateFPReductions(reassociateFPRed) {} 421 422 LogicalResult 423 matchAndRewrite(vector::ReductionOp reductionOp, OpAdaptor adaptor, 424 ConversionPatternRewriter &rewriter) const override { 425 auto kind = reductionOp.kind(); 426 Type eltType = reductionOp.dest().getType(); 427 Type llvmType = typeConverter->convertType(eltType); 428 Value operand = adaptor.getOperands()[0]; 429 if (eltType.isIntOrIndex()) { 430 // Integer reductions: add/mul/min/max/and/or/xor. 431 if (kind == "add") 432 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_add>(reductionOp, 433 llvmType, operand); 434 else if (kind == "mul") 435 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_mul>(reductionOp, 436 llvmType, operand); 437 else if (kind == "minui") 438 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umin>( 439 reductionOp, llvmType, operand); 440 else if (kind == "minsi") 441 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smin>( 442 reductionOp, llvmType, operand); 443 else if (kind == "maxui") 444 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umax>( 445 reductionOp, llvmType, operand); 446 else if (kind == "maxsi") 447 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smax>( 448 reductionOp, llvmType, operand); 449 else if (kind == "and") 450 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_and>(reductionOp, 451 llvmType, operand); 452 else if (kind == "or") 453 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_or>(reductionOp, 454 llvmType, operand); 455 else if (kind == "xor") 456 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_xor>(reductionOp, 457 llvmType, operand); 458 else 459 return failure(); 460 return success(); 461 } 462 463 if (!eltType.isa<FloatType>()) 464 return failure(); 465 466 // Floating-point reductions: add/mul/min/max 467 if (kind == "add") { 468 // Optional accumulator (or zero). 469 Value acc = adaptor.getOperands().size() > 1 470 ? adaptor.getOperands()[1] 471 : rewriter.create<LLVM::ConstantOp>( 472 reductionOp->getLoc(), llvmType, 473 rewriter.getZeroAttr(eltType)); 474 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fadd>( 475 reductionOp, llvmType, acc, operand, 476 rewriter.getBoolAttr(reassociateFPReductions)); 477 } else if (kind == "mul") { 478 // Optional accumulator (or one). 479 Value acc = adaptor.getOperands().size() > 1 480 ? adaptor.getOperands()[1] 481 : rewriter.create<LLVM::ConstantOp>( 482 reductionOp->getLoc(), llvmType, 483 rewriter.getFloatAttr(eltType, 1.0)); 484 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmul>( 485 reductionOp, llvmType, acc, operand, 486 rewriter.getBoolAttr(reassociateFPReductions)); 487 } else if (kind == "minf") 488 // FIXME: MLIR's 'minf' and LLVM's 'vector_reduce_fmin' do not handle 489 // NaNs/-0.0/+0.0 in the same way. 490 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmin>(reductionOp, 491 llvmType, operand); 492 else if (kind == "maxf") 493 // FIXME: MLIR's 'maxf' and LLVM's 'vector_reduce_fmax' do not handle 494 // NaNs/-0.0/+0.0 in the same way. 495 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmax>(reductionOp, 496 llvmType, operand); 497 else 498 return failure(); 499 return success(); 500 } 501 502 private: 503 const bool reassociateFPReductions; 504 }; 505 506 class VectorShuffleOpConversion 507 : public ConvertOpToLLVMPattern<vector::ShuffleOp> { 508 public: 509 using ConvertOpToLLVMPattern<vector::ShuffleOp>::ConvertOpToLLVMPattern; 510 511 LogicalResult 512 matchAndRewrite(vector::ShuffleOp shuffleOp, OpAdaptor adaptor, 513 ConversionPatternRewriter &rewriter) const override { 514 auto loc = shuffleOp->getLoc(); 515 auto v1Type = shuffleOp.getV1VectorType(); 516 auto v2Type = shuffleOp.getV2VectorType(); 517 auto vectorType = shuffleOp.getVectorType(); 518 Type llvmType = typeConverter->convertType(vectorType); 519 auto maskArrayAttr = shuffleOp.mask(); 520 521 // Bail if result type cannot be lowered. 522 if (!llvmType) 523 return failure(); 524 525 // Get rank and dimension sizes. 526 int64_t rank = vectorType.getRank(); 527 assert(v1Type.getRank() == rank); 528 assert(v2Type.getRank() == rank); 529 int64_t v1Dim = v1Type.getDimSize(0); 530 531 // For rank 1, where both operands have *exactly* the same vector type, 532 // there is direct shuffle support in LLVM. Use it! 533 if (rank == 1 && v1Type == v2Type) { 534 Value llvmShuffleOp = rewriter.create<LLVM::ShuffleVectorOp>( 535 loc, adaptor.v1(), adaptor.v2(), maskArrayAttr); 536 rewriter.replaceOp(shuffleOp, llvmShuffleOp); 537 return success(); 538 } 539 540 // For all other cases, insert the individual values individually. 541 Type eltType; 542 llvm::errs() << llvmType << "\n"; 543 if (auto arrayType = llvmType.dyn_cast<LLVM::LLVMArrayType>()) 544 eltType = arrayType.getElementType(); 545 else 546 eltType = llvmType.cast<VectorType>().getElementType(); 547 Value insert = rewriter.create<LLVM::UndefOp>(loc, llvmType); 548 int64_t insPos = 0; 549 for (auto en : llvm::enumerate(maskArrayAttr)) { 550 int64_t extPos = en.value().cast<IntegerAttr>().getInt(); 551 Value value = adaptor.v1(); 552 if (extPos >= v1Dim) { 553 extPos -= v1Dim; 554 value = adaptor.v2(); 555 } 556 Value extract = extractOne(rewriter, *getTypeConverter(), loc, value, 557 eltType, rank, extPos); 558 insert = insertOne(rewriter, *getTypeConverter(), loc, insert, extract, 559 llvmType, rank, insPos++); 560 } 561 rewriter.replaceOp(shuffleOp, insert); 562 return success(); 563 } 564 }; 565 566 class VectorExtractElementOpConversion 567 : public ConvertOpToLLVMPattern<vector::ExtractElementOp> { 568 public: 569 using ConvertOpToLLVMPattern< 570 vector::ExtractElementOp>::ConvertOpToLLVMPattern; 571 572 LogicalResult 573 matchAndRewrite(vector::ExtractElementOp extractEltOp, OpAdaptor adaptor, 574 ConversionPatternRewriter &rewriter) const override { 575 auto vectorType = extractEltOp.getVectorType(); 576 auto llvmType = typeConverter->convertType(vectorType.getElementType()); 577 578 // Bail if result type cannot be lowered. 579 if (!llvmType) 580 return failure(); 581 582 rewriter.replaceOpWithNewOp<LLVM::ExtractElementOp>( 583 extractEltOp, llvmType, adaptor.vector(), adaptor.position()); 584 return success(); 585 } 586 }; 587 588 class VectorExtractOpConversion 589 : public ConvertOpToLLVMPattern<vector::ExtractOp> { 590 public: 591 using ConvertOpToLLVMPattern<vector::ExtractOp>::ConvertOpToLLVMPattern; 592 593 LogicalResult 594 matchAndRewrite(vector::ExtractOp extractOp, OpAdaptor adaptor, 595 ConversionPatternRewriter &rewriter) const override { 596 auto loc = extractOp->getLoc(); 597 auto vectorType = extractOp.getVectorType(); 598 auto resultType = extractOp.getResult().getType(); 599 auto llvmResultType = typeConverter->convertType(resultType); 600 auto positionArrayAttr = extractOp.position(); 601 602 // Bail if result type cannot be lowered. 603 if (!llvmResultType) 604 return failure(); 605 606 // Extract entire vector. Should be handled by folder, but just to be safe. 607 if (positionArrayAttr.empty()) { 608 rewriter.replaceOp(extractOp, adaptor.vector()); 609 return success(); 610 } 611 612 // One-shot extraction of vector from array (only requires extractvalue). 613 if (resultType.isa<VectorType>()) { 614 Value extracted = rewriter.create<LLVM::ExtractValueOp>( 615 loc, llvmResultType, adaptor.vector(), positionArrayAttr); 616 rewriter.replaceOp(extractOp, extracted); 617 return success(); 618 } 619 620 // Potential extraction of 1-D vector from array. 621 auto *context = extractOp->getContext(); 622 Value extracted = adaptor.vector(); 623 auto positionAttrs = positionArrayAttr.getValue(); 624 if (positionAttrs.size() > 1) { 625 auto oneDVectorType = reducedVectorTypeBack(vectorType); 626 auto nMinusOnePositionAttrs = 627 ArrayAttr::get(context, positionAttrs.drop_back()); 628 extracted = rewriter.create<LLVM::ExtractValueOp>( 629 loc, typeConverter->convertType(oneDVectorType), extracted, 630 nMinusOnePositionAttrs); 631 } 632 633 // Remaining extraction of element from 1-D LLVM vector 634 auto position = positionAttrs.back().cast<IntegerAttr>(); 635 auto i64Type = IntegerType::get(rewriter.getContext(), 64); 636 auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position); 637 extracted = 638 rewriter.create<LLVM::ExtractElementOp>(loc, extracted, constant); 639 rewriter.replaceOp(extractOp, extracted); 640 641 return success(); 642 } 643 }; 644 645 /// Conversion pattern that turns a vector.fma on a 1-D vector 646 /// into an llvm.intr.fmuladd. This is a trivial 1-1 conversion. 647 /// This does not match vectors of n >= 2 rank. 648 /// 649 /// Example: 650 /// ``` 651 /// vector.fma %a, %a, %a : vector<8xf32> 652 /// ``` 653 /// is converted to: 654 /// ``` 655 /// llvm.intr.fmuladd %va, %va, %va: 656 /// (!llvm."<8 x f32>">, !llvm<"<8 x f32>">, !llvm<"<8 x f32>">) 657 /// -> !llvm."<8 x f32>"> 658 /// ``` 659 class VectorFMAOp1DConversion : public ConvertOpToLLVMPattern<vector::FMAOp> { 660 public: 661 using ConvertOpToLLVMPattern<vector::FMAOp>::ConvertOpToLLVMPattern; 662 663 LogicalResult 664 matchAndRewrite(vector::FMAOp fmaOp, OpAdaptor adaptor, 665 ConversionPatternRewriter &rewriter) const override { 666 VectorType vType = fmaOp.getVectorType(); 667 if (vType.getRank() != 1) 668 return failure(); 669 rewriter.replaceOpWithNewOp<LLVM::FMulAddOp>(fmaOp, adaptor.lhs(), 670 adaptor.rhs(), adaptor.acc()); 671 return success(); 672 } 673 }; 674 675 class VectorInsertElementOpConversion 676 : public ConvertOpToLLVMPattern<vector::InsertElementOp> { 677 public: 678 using ConvertOpToLLVMPattern<vector::InsertElementOp>::ConvertOpToLLVMPattern; 679 680 LogicalResult 681 matchAndRewrite(vector::InsertElementOp insertEltOp, OpAdaptor adaptor, 682 ConversionPatternRewriter &rewriter) const override { 683 auto vectorType = insertEltOp.getDestVectorType(); 684 auto llvmType = typeConverter->convertType(vectorType); 685 686 // Bail if result type cannot be lowered. 687 if (!llvmType) 688 return failure(); 689 690 rewriter.replaceOpWithNewOp<LLVM::InsertElementOp>( 691 insertEltOp, llvmType, adaptor.dest(), adaptor.source(), 692 adaptor.position()); 693 return success(); 694 } 695 }; 696 697 class VectorInsertOpConversion 698 : public ConvertOpToLLVMPattern<vector::InsertOp> { 699 public: 700 using ConvertOpToLLVMPattern<vector::InsertOp>::ConvertOpToLLVMPattern; 701 702 LogicalResult 703 matchAndRewrite(vector::InsertOp insertOp, OpAdaptor adaptor, 704 ConversionPatternRewriter &rewriter) const override { 705 auto loc = insertOp->getLoc(); 706 auto sourceType = insertOp.getSourceType(); 707 auto destVectorType = insertOp.getDestVectorType(); 708 auto llvmResultType = typeConverter->convertType(destVectorType); 709 auto positionArrayAttr = insertOp.position(); 710 711 // Bail if result type cannot be lowered. 712 if (!llvmResultType) 713 return failure(); 714 715 // Overwrite entire vector with value. Should be handled by folder, but 716 // just to be safe. 717 if (positionArrayAttr.empty()) { 718 rewriter.replaceOp(insertOp, adaptor.source()); 719 return success(); 720 } 721 722 // One-shot insertion of a vector into an array (only requires insertvalue). 723 if (sourceType.isa<VectorType>()) { 724 Value inserted = rewriter.create<LLVM::InsertValueOp>( 725 loc, llvmResultType, adaptor.dest(), adaptor.source(), 726 positionArrayAttr); 727 rewriter.replaceOp(insertOp, inserted); 728 return success(); 729 } 730 731 // Potential extraction of 1-D vector from array. 732 auto *context = insertOp->getContext(); 733 Value extracted = adaptor.dest(); 734 auto positionAttrs = positionArrayAttr.getValue(); 735 auto position = positionAttrs.back().cast<IntegerAttr>(); 736 auto oneDVectorType = destVectorType; 737 if (positionAttrs.size() > 1) { 738 oneDVectorType = reducedVectorTypeBack(destVectorType); 739 auto nMinusOnePositionAttrs = 740 ArrayAttr::get(context, positionAttrs.drop_back()); 741 extracted = rewriter.create<LLVM::ExtractValueOp>( 742 loc, typeConverter->convertType(oneDVectorType), extracted, 743 nMinusOnePositionAttrs); 744 } 745 746 // Insertion of an element into a 1-D LLVM vector. 747 auto i64Type = IntegerType::get(rewriter.getContext(), 64); 748 auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position); 749 Value inserted = rewriter.create<LLVM::InsertElementOp>( 750 loc, typeConverter->convertType(oneDVectorType), extracted, 751 adaptor.source(), constant); 752 753 // Potential insertion of resulting 1-D vector into array. 754 if (positionAttrs.size() > 1) { 755 auto nMinusOnePositionAttrs = 756 ArrayAttr::get(context, positionAttrs.drop_back()); 757 inserted = rewriter.create<LLVM::InsertValueOp>(loc, llvmResultType, 758 adaptor.dest(), inserted, 759 nMinusOnePositionAttrs); 760 } 761 762 rewriter.replaceOp(insertOp, inserted); 763 return success(); 764 } 765 }; 766 767 /// Rank reducing rewrite for n-D FMA into (n-1)-D FMA where n > 1. 768 /// 769 /// Example: 770 /// ``` 771 /// %d = vector.fma %a, %b, %c : vector<2x4xf32> 772 /// ``` 773 /// is rewritten into: 774 /// ``` 775 /// %r = splat %f0: vector<2x4xf32> 776 /// %va = vector.extractvalue %a[0] : vector<2x4xf32> 777 /// %vb = vector.extractvalue %b[0] : vector<2x4xf32> 778 /// %vc = vector.extractvalue %c[0] : vector<2x4xf32> 779 /// %vd = vector.fma %va, %vb, %vc : vector<4xf32> 780 /// %r2 = vector.insertvalue %vd, %r[0] : vector<4xf32> into vector<2x4xf32> 781 /// %va2 = vector.extractvalue %a2[1] : vector<2x4xf32> 782 /// %vb2 = vector.extractvalue %b2[1] : vector<2x4xf32> 783 /// %vc2 = vector.extractvalue %c2[1] : vector<2x4xf32> 784 /// %vd2 = vector.fma %va2, %vb2, %vc2 : vector<4xf32> 785 /// %r3 = vector.insertvalue %vd2, %r2[1] : vector<4xf32> into vector<2x4xf32> 786 /// // %r3 holds the final value. 787 /// ``` 788 class VectorFMAOpNDRewritePattern : public OpRewritePattern<FMAOp> { 789 public: 790 using OpRewritePattern<FMAOp>::OpRewritePattern; 791 792 LogicalResult matchAndRewrite(FMAOp op, 793 PatternRewriter &rewriter) const override { 794 auto vType = op.getVectorType(); 795 if (vType.getRank() < 2) 796 return failure(); 797 798 auto loc = op.getLoc(); 799 auto elemType = vType.getElementType(); 800 Value zero = rewriter.create<ConstantOp>(loc, elemType, 801 rewriter.getZeroAttr(elemType)); 802 Value desc = rewriter.create<SplatOp>(loc, vType, zero); 803 for (int64_t i = 0, e = vType.getShape().front(); i != e; ++i) { 804 Value extrLHS = rewriter.create<ExtractOp>(loc, op.lhs(), i); 805 Value extrRHS = rewriter.create<ExtractOp>(loc, op.rhs(), i); 806 Value extrACC = rewriter.create<ExtractOp>(loc, op.acc(), i); 807 Value fma = rewriter.create<FMAOp>(loc, extrLHS, extrRHS, extrACC); 808 desc = rewriter.create<InsertOp>(loc, fma, desc, i); 809 } 810 rewriter.replaceOp(op, desc); 811 return success(); 812 } 813 }; 814 815 // When ranks are different, InsertStridedSlice needs to extract a properly 816 // ranked vector from the destination vector into which to insert. This pattern 817 // only takes care of this part and forwards the rest of the conversion to 818 // another pattern that converts InsertStridedSlice for operands of the same 819 // rank. 820 // 821 // RewritePattern for InsertStridedSliceOp where source and destination vectors 822 // have different ranks. In this case: 823 // 1. the proper subvector is extracted from the destination vector 824 // 2. a new InsertStridedSlice op is created to insert the source in the 825 // destination subvector 826 // 3. the destination subvector is inserted back in the proper place 827 // 4. the op is replaced by the result of step 3. 828 // The new InsertStridedSlice from step 2. will be picked up by a 829 // `VectorInsertStridedSliceOpSameRankRewritePattern`. 830 class VectorInsertStridedSliceOpDifferentRankRewritePattern 831 : public OpRewritePattern<InsertStridedSliceOp> { 832 public: 833 using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern; 834 835 LogicalResult matchAndRewrite(InsertStridedSliceOp op, 836 PatternRewriter &rewriter) const override { 837 auto srcType = op.getSourceVectorType(); 838 auto dstType = op.getDestVectorType(); 839 840 if (op.offsets().getValue().empty()) 841 return failure(); 842 843 auto loc = op.getLoc(); 844 int64_t rankDiff = dstType.getRank() - srcType.getRank(); 845 assert(rankDiff >= 0); 846 if (rankDiff == 0) 847 return failure(); 848 849 int64_t rankRest = dstType.getRank() - rankDiff; 850 // Extract / insert the subvector of matching rank and InsertStridedSlice 851 // on it. 852 Value extracted = 853 rewriter.create<ExtractOp>(loc, op.dest(), 854 getI64SubArray(op.offsets(), /*dropFront=*/0, 855 /*dropBack=*/rankRest)); 856 // A different pattern will kick in for InsertStridedSlice with matching 857 // ranks. 858 auto stridedSliceInnerOp = rewriter.create<InsertStridedSliceOp>( 859 loc, op.source(), extracted, 860 getI64SubArray(op.offsets(), /*dropFront=*/rankDiff), 861 getI64SubArray(op.strides(), /*dropFront=*/0)); 862 rewriter.replaceOpWithNewOp<InsertOp>( 863 op, stridedSliceInnerOp.getResult(), op.dest(), 864 getI64SubArray(op.offsets(), /*dropFront=*/0, 865 /*dropBack=*/rankRest)); 866 return success(); 867 } 868 }; 869 870 // RewritePattern for InsertStridedSliceOp where source and destination vectors 871 // have the same rank. In this case, we reduce 872 // 1. the proper subvector is extracted from the destination vector 873 // 2. a new InsertStridedSlice op is created to insert the source in the 874 // destination subvector 875 // 3. the destination subvector is inserted back in the proper place 876 // 4. the op is replaced by the result of step 3. 877 // The new InsertStridedSlice from step 2. will be picked up by a 878 // `VectorInsertStridedSliceOpSameRankRewritePattern`. 879 class VectorInsertStridedSliceOpSameRankRewritePattern 880 : public OpRewritePattern<InsertStridedSliceOp> { 881 public: 882 using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern; 883 884 void initialize() { 885 // This pattern creates recursive InsertStridedSliceOp, but the recursion is 886 // bounded as the rank is strictly decreasing. 887 setHasBoundedRewriteRecursion(); 888 } 889 890 LogicalResult matchAndRewrite(InsertStridedSliceOp op, 891 PatternRewriter &rewriter) const override { 892 auto srcType = op.getSourceVectorType(); 893 auto dstType = op.getDestVectorType(); 894 895 if (op.offsets().getValue().empty()) 896 return failure(); 897 898 int64_t rankDiff = dstType.getRank() - srcType.getRank(); 899 assert(rankDiff >= 0); 900 if (rankDiff != 0) 901 return failure(); 902 903 if (srcType == dstType) { 904 rewriter.replaceOp(op, op.source()); 905 return success(); 906 } 907 908 int64_t offset = 909 op.offsets().getValue().front().cast<IntegerAttr>().getInt(); 910 int64_t size = srcType.getShape().front(); 911 int64_t stride = 912 op.strides().getValue().front().cast<IntegerAttr>().getInt(); 913 914 auto loc = op.getLoc(); 915 Value res = op.dest(); 916 // For each slice of the source vector along the most major dimension. 917 for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e; 918 off += stride, ++idx) { 919 // 1. extract the proper subvector (or element) from source 920 Value extractedSource = extractOne(rewriter, loc, op.source(), idx); 921 if (extractedSource.getType().isa<VectorType>()) { 922 // 2. If we have a vector, extract the proper subvector from destination 923 // Otherwise we are at the element level and no need to recurse. 924 Value extractedDest = extractOne(rewriter, loc, op.dest(), off); 925 // 3. Reduce the problem to lowering a new InsertStridedSlice op with 926 // smaller rank. 927 extractedSource = rewriter.create<InsertStridedSliceOp>( 928 loc, extractedSource, extractedDest, 929 getI64SubArray(op.offsets(), /* dropFront=*/1), 930 getI64SubArray(op.strides(), /* dropFront=*/1)); 931 } 932 // 4. Insert the extractedSource into the res vector. 933 res = insertOne(rewriter, loc, extractedSource, res, off); 934 } 935 936 rewriter.replaceOp(op, res); 937 return success(); 938 } 939 }; 940 941 /// Returns the strides if the memory underlying `memRefType` has a contiguous 942 /// static layout. 943 static llvm::Optional<SmallVector<int64_t, 4>> 944 computeContiguousStrides(MemRefType memRefType) { 945 int64_t offset; 946 SmallVector<int64_t, 4> strides; 947 if (failed(getStridesAndOffset(memRefType, strides, offset))) 948 return None; 949 if (!strides.empty() && strides.back() != 1) 950 return None; 951 // If no layout or identity layout, this is contiguous by definition. 952 if (memRefType.getAffineMaps().empty() || 953 memRefType.getAffineMaps().front().isIdentity()) 954 return strides; 955 956 // Otherwise, we must determine contiguity form shapes. This can only ever 957 // work in static cases because MemRefType is underspecified to represent 958 // contiguous dynamic shapes in other ways than with just empty/identity 959 // layout. 960 auto sizes = memRefType.getShape(); 961 for (int index = 0, e = strides.size() - 1; index < e; ++index) { 962 if (ShapedType::isDynamic(sizes[index + 1]) || 963 ShapedType::isDynamicStrideOrOffset(strides[index]) || 964 ShapedType::isDynamicStrideOrOffset(strides[index + 1])) 965 return None; 966 if (strides[index] != strides[index + 1] * sizes[index + 1]) 967 return None; 968 } 969 return strides; 970 } 971 972 class VectorTypeCastOpConversion 973 : public ConvertOpToLLVMPattern<vector::TypeCastOp> { 974 public: 975 using ConvertOpToLLVMPattern<vector::TypeCastOp>::ConvertOpToLLVMPattern; 976 977 LogicalResult 978 matchAndRewrite(vector::TypeCastOp castOp, OpAdaptor adaptor, 979 ConversionPatternRewriter &rewriter) const override { 980 auto loc = castOp->getLoc(); 981 MemRefType sourceMemRefType = 982 castOp.getOperand().getType().cast<MemRefType>(); 983 MemRefType targetMemRefType = castOp.getType(); 984 985 // Only static shape casts supported atm. 986 if (!sourceMemRefType.hasStaticShape() || 987 !targetMemRefType.hasStaticShape()) 988 return failure(); 989 990 auto llvmSourceDescriptorTy = 991 adaptor.getOperands()[0].getType().dyn_cast<LLVM::LLVMStructType>(); 992 if (!llvmSourceDescriptorTy) 993 return failure(); 994 MemRefDescriptor sourceMemRef(adaptor.getOperands()[0]); 995 996 auto llvmTargetDescriptorTy = typeConverter->convertType(targetMemRefType) 997 .dyn_cast_or_null<LLVM::LLVMStructType>(); 998 if (!llvmTargetDescriptorTy) 999 return failure(); 1000 1001 // Only contiguous source buffers supported atm. 1002 auto sourceStrides = computeContiguousStrides(sourceMemRefType); 1003 if (!sourceStrides) 1004 return failure(); 1005 auto targetStrides = computeContiguousStrides(targetMemRefType); 1006 if (!targetStrides) 1007 return failure(); 1008 // Only support static strides for now, regardless of contiguity. 1009 if (llvm::any_of(*targetStrides, [](int64_t stride) { 1010 return ShapedType::isDynamicStrideOrOffset(stride); 1011 })) 1012 return failure(); 1013 1014 auto int64Ty = IntegerType::get(rewriter.getContext(), 64); 1015 1016 // Create descriptor. 1017 auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy); 1018 Type llvmTargetElementTy = desc.getElementPtrType(); 1019 // Set allocated ptr. 1020 Value allocated = sourceMemRef.allocatedPtr(rewriter, loc); 1021 allocated = 1022 rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, allocated); 1023 desc.setAllocatedPtr(rewriter, loc, allocated); 1024 // Set aligned ptr. 1025 Value ptr = sourceMemRef.alignedPtr(rewriter, loc); 1026 ptr = rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, ptr); 1027 desc.setAlignedPtr(rewriter, loc, ptr); 1028 // Fill offset 0. 1029 auto attr = rewriter.getIntegerAttr(rewriter.getIndexType(), 0); 1030 auto zero = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, attr); 1031 desc.setOffset(rewriter, loc, zero); 1032 1033 // Fill size and stride descriptors in memref. 1034 for (auto indexedSize : llvm::enumerate(targetMemRefType.getShape())) { 1035 int64_t index = indexedSize.index(); 1036 auto sizeAttr = 1037 rewriter.getIntegerAttr(rewriter.getIndexType(), indexedSize.value()); 1038 auto size = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, sizeAttr); 1039 desc.setSize(rewriter, loc, index, size); 1040 auto strideAttr = rewriter.getIntegerAttr(rewriter.getIndexType(), 1041 (*targetStrides)[index]); 1042 auto stride = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, strideAttr); 1043 desc.setStride(rewriter, loc, index, stride); 1044 } 1045 1046 rewriter.replaceOp(castOp, {desc}); 1047 return success(); 1048 } 1049 }; 1050 1051 class VectorPrintOpConversion : public ConvertOpToLLVMPattern<vector::PrintOp> { 1052 public: 1053 using ConvertOpToLLVMPattern<vector::PrintOp>::ConvertOpToLLVMPattern; 1054 1055 // Proof-of-concept lowering implementation that relies on a small 1056 // runtime support library, which only needs to provide a few 1057 // printing methods (single value for all data types, opening/closing 1058 // bracket, comma, newline). The lowering fully unrolls a vector 1059 // in terms of these elementary printing operations. The advantage 1060 // of this approach is that the library can remain unaware of all 1061 // low-level implementation details of vectors while still supporting 1062 // output of any shaped and dimensioned vector. Due to full unrolling, 1063 // this approach is less suited for very large vectors though. 1064 // 1065 // TODO: rely solely on libc in future? something else? 1066 // 1067 LogicalResult 1068 matchAndRewrite(vector::PrintOp printOp, OpAdaptor adaptor, 1069 ConversionPatternRewriter &rewriter) const override { 1070 Type printType = printOp.getPrintType(); 1071 1072 if (typeConverter->convertType(printType) == nullptr) 1073 return failure(); 1074 1075 // Make sure element type has runtime support. 1076 PrintConversion conversion = PrintConversion::None; 1077 VectorType vectorType = printType.dyn_cast<VectorType>(); 1078 Type eltType = vectorType ? vectorType.getElementType() : printType; 1079 Operation *printer; 1080 if (eltType.isF32()) { 1081 printer = 1082 LLVM::lookupOrCreatePrintF32Fn(printOp->getParentOfType<ModuleOp>()); 1083 } else if (eltType.isF64()) { 1084 printer = 1085 LLVM::lookupOrCreatePrintF64Fn(printOp->getParentOfType<ModuleOp>()); 1086 } else if (eltType.isIndex()) { 1087 printer = 1088 LLVM::lookupOrCreatePrintU64Fn(printOp->getParentOfType<ModuleOp>()); 1089 } else if (auto intTy = eltType.dyn_cast<IntegerType>()) { 1090 // Integers need a zero or sign extension on the operand 1091 // (depending on the source type) as well as a signed or 1092 // unsigned print method. Up to 64-bit is supported. 1093 unsigned width = intTy.getWidth(); 1094 if (intTy.isUnsigned()) { 1095 if (width <= 64) { 1096 if (width < 64) 1097 conversion = PrintConversion::ZeroExt64; 1098 printer = LLVM::lookupOrCreatePrintU64Fn( 1099 printOp->getParentOfType<ModuleOp>()); 1100 } else { 1101 return failure(); 1102 } 1103 } else { 1104 assert(intTy.isSignless() || intTy.isSigned()); 1105 if (width <= 64) { 1106 // Note that we *always* zero extend booleans (1-bit integers), 1107 // so that true/false is printed as 1/0 rather than -1/0. 1108 if (width == 1) 1109 conversion = PrintConversion::ZeroExt64; 1110 else if (width < 64) 1111 conversion = PrintConversion::SignExt64; 1112 printer = LLVM::lookupOrCreatePrintI64Fn( 1113 printOp->getParentOfType<ModuleOp>()); 1114 } else { 1115 return failure(); 1116 } 1117 } 1118 } else { 1119 return failure(); 1120 } 1121 1122 // Unroll vector into elementary print calls. 1123 int64_t rank = vectorType ? vectorType.getRank() : 0; 1124 emitRanks(rewriter, printOp, adaptor.source(), vectorType, printer, rank, 1125 conversion); 1126 emitCall(rewriter, printOp->getLoc(), 1127 LLVM::lookupOrCreatePrintNewlineFn( 1128 printOp->getParentOfType<ModuleOp>())); 1129 rewriter.eraseOp(printOp); 1130 return success(); 1131 } 1132 1133 private: 1134 enum class PrintConversion { 1135 // clang-format off 1136 None, 1137 ZeroExt64, 1138 SignExt64 1139 // clang-format on 1140 }; 1141 1142 void emitRanks(ConversionPatternRewriter &rewriter, Operation *op, 1143 Value value, VectorType vectorType, Operation *printer, 1144 int64_t rank, PrintConversion conversion) const { 1145 Location loc = op->getLoc(); 1146 if (rank == 0) { 1147 switch (conversion) { 1148 case PrintConversion::ZeroExt64: 1149 value = rewriter.create<ZeroExtendIOp>( 1150 loc, value, IntegerType::get(rewriter.getContext(), 64)); 1151 break; 1152 case PrintConversion::SignExt64: 1153 value = rewriter.create<SignExtendIOp>( 1154 loc, value, IntegerType::get(rewriter.getContext(), 64)); 1155 break; 1156 case PrintConversion::None: 1157 break; 1158 } 1159 emitCall(rewriter, loc, printer, value); 1160 return; 1161 } 1162 1163 emitCall(rewriter, loc, 1164 LLVM::lookupOrCreatePrintOpenFn(op->getParentOfType<ModuleOp>())); 1165 Operation *printComma = 1166 LLVM::lookupOrCreatePrintCommaFn(op->getParentOfType<ModuleOp>()); 1167 int64_t dim = vectorType.getDimSize(0); 1168 for (int64_t d = 0; d < dim; ++d) { 1169 auto reducedType = 1170 rank > 1 ? reducedVectorTypeFront(vectorType) : nullptr; 1171 auto llvmType = typeConverter->convertType( 1172 rank > 1 ? reducedType : vectorType.getElementType()); 1173 Value nestedVal = extractOne(rewriter, *getTypeConverter(), loc, value, 1174 llvmType, rank, d); 1175 emitRanks(rewriter, op, nestedVal, reducedType, printer, rank - 1, 1176 conversion); 1177 if (d != dim - 1) 1178 emitCall(rewriter, loc, printComma); 1179 } 1180 emitCall(rewriter, loc, 1181 LLVM::lookupOrCreatePrintCloseFn(op->getParentOfType<ModuleOp>())); 1182 } 1183 1184 // Helper to emit a call. 1185 static void emitCall(ConversionPatternRewriter &rewriter, Location loc, 1186 Operation *ref, ValueRange params = ValueRange()) { 1187 rewriter.create<LLVM::CallOp>(loc, TypeRange(), SymbolRefAttr::get(ref), 1188 params); 1189 } 1190 }; 1191 1192 /// Progressive lowering of ExtractStridedSliceOp to either: 1193 /// 1. express single offset extract as a direct shuffle. 1194 /// 2. extract + lower rank strided_slice + insert for the n-D case. 1195 class VectorExtractStridedSliceOpConversion 1196 : public OpRewritePattern<ExtractStridedSliceOp> { 1197 public: 1198 using OpRewritePattern<ExtractStridedSliceOp>::OpRewritePattern; 1199 1200 void initialize() { 1201 // This pattern creates recursive ExtractStridedSliceOp, but the recursion 1202 // is bounded as the rank is strictly decreasing. 1203 setHasBoundedRewriteRecursion(); 1204 } 1205 1206 LogicalResult matchAndRewrite(ExtractStridedSliceOp op, 1207 PatternRewriter &rewriter) const override { 1208 auto dstType = op.getType(); 1209 1210 assert(!op.offsets().getValue().empty() && "Unexpected empty offsets"); 1211 1212 int64_t offset = 1213 op.offsets().getValue().front().cast<IntegerAttr>().getInt(); 1214 int64_t size = op.sizes().getValue().front().cast<IntegerAttr>().getInt(); 1215 int64_t stride = 1216 op.strides().getValue().front().cast<IntegerAttr>().getInt(); 1217 1218 auto loc = op.getLoc(); 1219 auto elemType = dstType.getElementType(); 1220 assert(elemType.isSignlessIntOrIndexOrFloat()); 1221 1222 // Single offset can be more efficiently shuffled. 1223 if (op.offsets().getValue().size() == 1) { 1224 SmallVector<int64_t, 4> offsets; 1225 offsets.reserve(size); 1226 for (int64_t off = offset, e = offset + size * stride; off < e; 1227 off += stride) 1228 offsets.push_back(off); 1229 rewriter.replaceOpWithNewOp<ShuffleOp>(op, dstType, op.vector(), 1230 op.vector(), 1231 rewriter.getI64ArrayAttr(offsets)); 1232 return success(); 1233 } 1234 1235 // Extract/insert on a lower ranked extract strided slice op. 1236 Value zero = rewriter.create<ConstantOp>(loc, elemType, 1237 rewriter.getZeroAttr(elemType)); 1238 Value res = rewriter.create<SplatOp>(loc, dstType, zero); 1239 for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e; 1240 off += stride, ++idx) { 1241 Value one = extractOne(rewriter, loc, op.vector(), off); 1242 Value extracted = rewriter.create<ExtractStridedSliceOp>( 1243 loc, one, getI64SubArray(op.offsets(), /* dropFront=*/1), 1244 getI64SubArray(op.sizes(), /* dropFront=*/1), 1245 getI64SubArray(op.strides(), /* dropFront=*/1)); 1246 res = insertOne(rewriter, loc, extracted, res, idx); 1247 } 1248 rewriter.replaceOp(op, res); 1249 return success(); 1250 } 1251 }; 1252 1253 } // namespace 1254 1255 /// Populate the given list with patterns that convert from Vector to LLVM. 1256 void mlir::populateVectorToLLVMConversionPatterns( 1257 LLVMTypeConverter &converter, RewritePatternSet &patterns, 1258 bool reassociateFPReductions) { 1259 MLIRContext *ctx = converter.getDialect()->getContext(); 1260 patterns.add<VectorFMAOpNDRewritePattern, 1261 VectorInsertStridedSliceOpDifferentRankRewritePattern, 1262 VectorInsertStridedSliceOpSameRankRewritePattern, 1263 VectorExtractStridedSliceOpConversion>(ctx); 1264 patterns.add<VectorReductionOpConversion>(converter, reassociateFPReductions); 1265 patterns 1266 .add<VectorBitCastOpConversion, VectorShuffleOpConversion, 1267 VectorExtractElementOpConversion, VectorExtractOpConversion, 1268 VectorFMAOp1DConversion, VectorInsertElementOpConversion, 1269 VectorInsertOpConversion, VectorPrintOpConversion, 1270 VectorTypeCastOpConversion, 1271 VectorLoadStoreConversion<vector::LoadOp, vector::LoadOpAdaptor>, 1272 VectorLoadStoreConversion<vector::MaskedLoadOp, 1273 vector::MaskedLoadOpAdaptor>, 1274 VectorLoadStoreConversion<vector::StoreOp, vector::StoreOpAdaptor>, 1275 VectorLoadStoreConversion<vector::MaskedStoreOp, 1276 vector::MaskedStoreOpAdaptor>, 1277 VectorGatherOpConversion, VectorScatterOpConversion, 1278 VectorExpandLoadOpConversion, VectorCompressStoreOpConversion>( 1279 converter); 1280 // Transfer ops with rank > 1 are handled by VectorToSCF. 1281 populateVectorTransferLoweringPatterns(patterns, /*maxTransferRank=*/1); 1282 } 1283 1284 void mlir::populateVectorToLLVMMatrixConversionPatterns( 1285 LLVMTypeConverter &converter, RewritePatternSet &patterns) { 1286 patterns.add<VectorMatmulOpConversion>(converter); 1287 patterns.add<VectorFlatTransposeOpConversion>(converter); 1288 } 1289