1 //===- VectorToLLVM.cpp - Conversion from Vector to the LLVM dialect ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h" 10 11 #include "mlir/Conversion/LLVMCommon/VectorPattern.h" 12 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 13 #include "mlir/Dialect/LLVMIR/FunctionCallUtils.h" 14 #include "mlir/Dialect/LLVMIR/LLVMDialect.h" 15 #include "mlir/Dialect/MemRef/IR/MemRef.h" 16 #include "mlir/Dialect/StandardOps/IR/Ops.h" 17 #include "mlir/Dialect/Vector/VectorOps.h" 18 #include "mlir/IR/BuiltinTypes.h" 19 #include "mlir/Support/MathExtras.h" 20 #include "mlir/Target/LLVMIR/TypeToLLVM.h" 21 #include "mlir/Transforms/DialectConversion.h" 22 23 using namespace mlir; 24 using namespace mlir::vector; 25 26 // Helper to reduce vector type by one rank at front. 27 static VectorType reducedVectorTypeFront(VectorType tp) { 28 assert((tp.getRank() > 1) && "unlowerable vector type"); 29 return VectorType::get(tp.getShape().drop_front(), tp.getElementType()); 30 } 31 32 // Helper to reduce vector type by *all* but one rank at back. 33 static VectorType reducedVectorTypeBack(VectorType tp) { 34 assert((tp.getRank() > 1) && "unlowerable vector type"); 35 return VectorType::get(tp.getShape().take_back(), tp.getElementType()); 36 } 37 38 // Helper that picks the proper sequence for inserting. 39 static Value insertOne(ConversionPatternRewriter &rewriter, 40 LLVMTypeConverter &typeConverter, Location loc, 41 Value val1, Value val2, Type llvmType, int64_t rank, 42 int64_t pos) { 43 if (rank == 1) { 44 auto idxType = rewriter.getIndexType(); 45 auto constant = rewriter.create<LLVM::ConstantOp>( 46 loc, typeConverter.convertType(idxType), 47 rewriter.getIntegerAttr(idxType, pos)); 48 return rewriter.create<LLVM::InsertElementOp>(loc, llvmType, val1, val2, 49 constant); 50 } 51 return rewriter.create<LLVM::InsertValueOp>(loc, llvmType, val1, val2, 52 rewriter.getI64ArrayAttr(pos)); 53 } 54 55 // Helper that picks the proper sequence for inserting. 56 static Value insertOne(PatternRewriter &rewriter, Location loc, Value from, 57 Value into, int64_t offset) { 58 auto vectorType = into.getType().cast<VectorType>(); 59 if (vectorType.getRank() > 1) 60 return rewriter.create<InsertOp>(loc, from, into, offset); 61 return rewriter.create<vector::InsertElementOp>( 62 loc, vectorType, from, into, 63 rewriter.create<arith::ConstantIndexOp>(loc, offset)); 64 } 65 66 // Helper that picks the proper sequence for extracting. 67 static Value extractOne(ConversionPatternRewriter &rewriter, 68 LLVMTypeConverter &typeConverter, Location loc, 69 Value val, Type llvmType, int64_t rank, int64_t pos) { 70 if (rank == 1) { 71 auto idxType = rewriter.getIndexType(); 72 auto constant = rewriter.create<LLVM::ConstantOp>( 73 loc, typeConverter.convertType(idxType), 74 rewriter.getIntegerAttr(idxType, pos)); 75 return rewriter.create<LLVM::ExtractElementOp>(loc, llvmType, val, 76 constant); 77 } 78 return rewriter.create<LLVM::ExtractValueOp>(loc, llvmType, val, 79 rewriter.getI64ArrayAttr(pos)); 80 } 81 82 // Helper that picks the proper sequence for extracting. 83 static Value extractOne(PatternRewriter &rewriter, Location loc, Value vector, 84 int64_t offset) { 85 auto vectorType = vector.getType().cast<VectorType>(); 86 if (vectorType.getRank() > 1) 87 return rewriter.create<ExtractOp>(loc, vector, offset); 88 return rewriter.create<vector::ExtractElementOp>( 89 loc, vectorType.getElementType(), vector, 90 rewriter.create<arith::ConstantIndexOp>(loc, offset)); 91 } 92 93 // Helper that returns a subset of `arrayAttr` as a vector of int64_t. 94 // TODO: Better support for attribute subtype forwarding + slicing. 95 static SmallVector<int64_t, 4> getI64SubArray(ArrayAttr arrayAttr, 96 unsigned dropFront = 0, 97 unsigned dropBack = 0) { 98 assert(arrayAttr.size() > dropFront + dropBack && "Out of bounds"); 99 auto range = arrayAttr.getAsRange<IntegerAttr>(); 100 SmallVector<int64_t, 4> res; 101 res.reserve(arrayAttr.size() - dropFront - dropBack); 102 for (auto it = range.begin() + dropFront, eit = range.end() - dropBack; 103 it != eit; ++it) 104 res.push_back((*it).getValue().getSExtValue()); 105 return res; 106 } 107 108 // Helper that returns data layout alignment of a memref. 109 LogicalResult getMemRefAlignment(LLVMTypeConverter &typeConverter, 110 MemRefType memrefType, unsigned &align) { 111 Type elementTy = typeConverter.convertType(memrefType.getElementType()); 112 if (!elementTy) 113 return failure(); 114 115 // TODO: this should use the MLIR data layout when it becomes available and 116 // stop depending on translation. 117 llvm::LLVMContext llvmContext; 118 align = LLVM::TypeToLLVMIRTranslator(llvmContext) 119 .getPreferredAlignment(elementTy, typeConverter.getDataLayout()); 120 return success(); 121 } 122 123 // Return the minimal alignment value that satisfies all the AssumeAlignment 124 // uses of `value`. If no such uses exist, return 1. 125 static unsigned getAssumedAlignment(Value value) { 126 unsigned align = 1; 127 for (auto &u : value.getUses()) { 128 Operation *owner = u.getOwner(); 129 if (auto op = dyn_cast<memref::AssumeAlignmentOp>(owner)) 130 align = mlir::lcm(align, op.alignment()); 131 } 132 return align; 133 } 134 135 // Helper that returns data layout alignment of a memref associated with a 136 // load, store, scatter, or gather op, including additional information from 137 // assume_alignment calls on the source of the transfer 138 template <class OpAdaptor> 139 LogicalResult getMemRefOpAlignment(LLVMTypeConverter &typeConverter, 140 OpAdaptor op, unsigned &align) { 141 if (failed(getMemRefAlignment(typeConverter, op.getMemRefType(), align))) 142 return failure(); 143 align = std::max(align, getAssumedAlignment(op.base())); 144 return success(); 145 } 146 147 // Add an index vector component to a base pointer. This almost always succeeds 148 // unless the last stride is non-unit or the memory space is not zero. 149 static LogicalResult getIndexedPtrs(ConversionPatternRewriter &rewriter, 150 Location loc, Value memref, Value base, 151 Value index, MemRefType memRefType, 152 VectorType vType, Value &ptrs) { 153 int64_t offset; 154 SmallVector<int64_t, 4> strides; 155 auto successStrides = getStridesAndOffset(memRefType, strides, offset); 156 if (failed(successStrides) || strides.back() != 1 || 157 memRefType.getMemorySpaceAsInt() != 0) 158 return failure(); 159 auto pType = MemRefDescriptor(memref).getElementPtrType(); 160 auto ptrsType = LLVM::getFixedVectorType(pType, vType.getDimSize(0)); 161 ptrs = rewriter.create<LLVM::GEPOp>(loc, ptrsType, base, index); 162 return success(); 163 } 164 165 // Casts a strided element pointer to a vector pointer. The vector pointer 166 // will be in the same address space as the incoming memref type. 167 static Value castDataPtr(ConversionPatternRewriter &rewriter, Location loc, 168 Value ptr, MemRefType memRefType, Type vt) { 169 auto pType = LLVM::LLVMPointerType::get(vt, memRefType.getMemorySpaceAsInt()); 170 return rewriter.create<LLVM::BitcastOp>(loc, pType, ptr); 171 } 172 173 namespace { 174 175 /// Conversion pattern for a vector.bitcast. 176 class VectorBitCastOpConversion 177 : public ConvertOpToLLVMPattern<vector::BitCastOp> { 178 public: 179 using ConvertOpToLLVMPattern<vector::BitCastOp>::ConvertOpToLLVMPattern; 180 181 LogicalResult 182 matchAndRewrite(vector::BitCastOp bitCastOp, OpAdaptor adaptor, 183 ConversionPatternRewriter &rewriter) const override { 184 // Only 1-D vectors can be lowered to LLVM. 185 VectorType resultTy = bitCastOp.getType(); 186 if (resultTy.getRank() != 1) 187 return failure(); 188 Type newResultTy = typeConverter->convertType(resultTy); 189 rewriter.replaceOpWithNewOp<LLVM::BitcastOp>(bitCastOp, newResultTy, 190 adaptor.getOperands()[0]); 191 return success(); 192 } 193 }; 194 195 /// Conversion pattern for a vector.matrix_multiply. 196 /// This is lowered directly to the proper llvm.intr.matrix.multiply. 197 class VectorMatmulOpConversion 198 : public ConvertOpToLLVMPattern<vector::MatmulOp> { 199 public: 200 using ConvertOpToLLVMPattern<vector::MatmulOp>::ConvertOpToLLVMPattern; 201 202 LogicalResult 203 matchAndRewrite(vector::MatmulOp matmulOp, OpAdaptor adaptor, 204 ConversionPatternRewriter &rewriter) const override { 205 rewriter.replaceOpWithNewOp<LLVM::MatrixMultiplyOp>( 206 matmulOp, typeConverter->convertType(matmulOp.res().getType()), 207 adaptor.lhs(), adaptor.rhs(), matmulOp.lhs_rows(), 208 matmulOp.lhs_columns(), matmulOp.rhs_columns()); 209 return success(); 210 } 211 }; 212 213 /// Conversion pattern for a vector.flat_transpose. 214 /// This is lowered directly to the proper llvm.intr.matrix.transpose. 215 class VectorFlatTransposeOpConversion 216 : public ConvertOpToLLVMPattern<vector::FlatTransposeOp> { 217 public: 218 using ConvertOpToLLVMPattern<vector::FlatTransposeOp>::ConvertOpToLLVMPattern; 219 220 LogicalResult 221 matchAndRewrite(vector::FlatTransposeOp transOp, OpAdaptor adaptor, 222 ConversionPatternRewriter &rewriter) const override { 223 rewriter.replaceOpWithNewOp<LLVM::MatrixTransposeOp>( 224 transOp, typeConverter->convertType(transOp.res().getType()), 225 adaptor.matrix(), transOp.rows(), transOp.columns()); 226 return success(); 227 } 228 }; 229 230 /// Overloaded utility that replaces a vector.load, vector.store, 231 /// vector.maskedload and vector.maskedstore with their respective LLVM 232 /// couterparts. 233 static void replaceLoadOrStoreOp(vector::LoadOp loadOp, 234 vector::LoadOpAdaptor adaptor, 235 VectorType vectorTy, Value ptr, unsigned align, 236 ConversionPatternRewriter &rewriter) { 237 rewriter.replaceOpWithNewOp<LLVM::LoadOp>(loadOp, ptr, align); 238 } 239 240 static void replaceLoadOrStoreOp(vector::MaskedLoadOp loadOp, 241 vector::MaskedLoadOpAdaptor adaptor, 242 VectorType vectorTy, Value ptr, unsigned align, 243 ConversionPatternRewriter &rewriter) { 244 rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>( 245 loadOp, vectorTy, ptr, adaptor.mask(), adaptor.pass_thru(), align); 246 } 247 248 static void replaceLoadOrStoreOp(vector::StoreOp storeOp, 249 vector::StoreOpAdaptor adaptor, 250 VectorType vectorTy, Value ptr, unsigned align, 251 ConversionPatternRewriter &rewriter) { 252 rewriter.replaceOpWithNewOp<LLVM::StoreOp>(storeOp, adaptor.valueToStore(), 253 ptr, align); 254 } 255 256 static void replaceLoadOrStoreOp(vector::MaskedStoreOp storeOp, 257 vector::MaskedStoreOpAdaptor adaptor, 258 VectorType vectorTy, Value ptr, unsigned align, 259 ConversionPatternRewriter &rewriter) { 260 rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>( 261 storeOp, adaptor.valueToStore(), ptr, adaptor.mask(), align); 262 } 263 264 /// Conversion pattern for a vector.load, vector.store, vector.maskedload, and 265 /// vector.maskedstore. 266 template <class LoadOrStoreOp, class LoadOrStoreOpAdaptor> 267 class VectorLoadStoreConversion : public ConvertOpToLLVMPattern<LoadOrStoreOp> { 268 public: 269 using ConvertOpToLLVMPattern<LoadOrStoreOp>::ConvertOpToLLVMPattern; 270 271 LogicalResult 272 matchAndRewrite(LoadOrStoreOp loadOrStoreOp, 273 typename LoadOrStoreOp::Adaptor adaptor, 274 ConversionPatternRewriter &rewriter) const override { 275 // Only 1-D vectors can be lowered to LLVM. 276 VectorType vectorTy = loadOrStoreOp.getVectorType(); 277 if (vectorTy.getRank() > 1) 278 return failure(); 279 280 auto loc = loadOrStoreOp->getLoc(); 281 MemRefType memRefTy = loadOrStoreOp.getMemRefType(); 282 283 // Resolve alignment. 284 unsigned align; 285 if (failed(getMemRefOpAlignment(*this->getTypeConverter(), loadOrStoreOp, 286 align))) 287 return failure(); 288 289 // Resolve address. 290 auto vtype = this->typeConverter->convertType(loadOrStoreOp.getVectorType()) 291 .template cast<VectorType>(); 292 Value dataPtr = this->getStridedElementPtr(loc, memRefTy, adaptor.base(), 293 adaptor.indices(), rewriter); 294 Value ptr = castDataPtr(rewriter, loc, dataPtr, memRefTy, vtype); 295 296 replaceLoadOrStoreOp(loadOrStoreOp, adaptor, vtype, ptr, align, rewriter); 297 return success(); 298 } 299 }; 300 301 /// Conversion pattern for a vector.gather. 302 class VectorGatherOpConversion 303 : public ConvertOpToLLVMPattern<vector::GatherOp> { 304 public: 305 using ConvertOpToLLVMPattern<vector::GatherOp>::ConvertOpToLLVMPattern; 306 307 LogicalResult 308 matchAndRewrite(vector::GatherOp gather, OpAdaptor adaptor, 309 ConversionPatternRewriter &rewriter) const override { 310 auto loc = gather->getLoc(); 311 MemRefType memRefType = gather.getMemRefType(); 312 313 // Resolve alignment. 314 unsigned align; 315 if (failed(getMemRefOpAlignment(*getTypeConverter(), gather, align))) 316 return failure(); 317 318 // Resolve address. 319 Value ptrs; 320 VectorType vType = gather.getVectorType(); 321 Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(), 322 adaptor.indices(), rewriter); 323 if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), ptr, 324 adaptor.index_vec(), memRefType, vType, ptrs))) 325 return failure(); 326 327 // Replace with the gather intrinsic. 328 rewriter.replaceOpWithNewOp<LLVM::masked_gather>( 329 gather, typeConverter->convertType(vType), ptrs, adaptor.mask(), 330 adaptor.pass_thru(), rewriter.getI32IntegerAttr(align)); 331 return success(); 332 } 333 }; 334 335 /// Conversion pattern for a vector.scatter. 336 class VectorScatterOpConversion 337 : public ConvertOpToLLVMPattern<vector::ScatterOp> { 338 public: 339 using ConvertOpToLLVMPattern<vector::ScatterOp>::ConvertOpToLLVMPattern; 340 341 LogicalResult 342 matchAndRewrite(vector::ScatterOp scatter, OpAdaptor adaptor, 343 ConversionPatternRewriter &rewriter) const override { 344 auto loc = scatter->getLoc(); 345 MemRefType memRefType = scatter.getMemRefType(); 346 347 // Resolve alignment. 348 unsigned align; 349 if (failed(getMemRefOpAlignment(*getTypeConverter(), scatter, align))) 350 return failure(); 351 352 // Resolve address. 353 Value ptrs; 354 VectorType vType = scatter.getVectorType(); 355 Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(), 356 adaptor.indices(), rewriter); 357 if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), ptr, 358 adaptor.index_vec(), memRefType, vType, ptrs))) 359 return failure(); 360 361 // Replace with the scatter intrinsic. 362 rewriter.replaceOpWithNewOp<LLVM::masked_scatter>( 363 scatter, adaptor.valueToStore(), ptrs, adaptor.mask(), 364 rewriter.getI32IntegerAttr(align)); 365 return success(); 366 } 367 }; 368 369 /// Conversion pattern for a vector.expandload. 370 class VectorExpandLoadOpConversion 371 : public ConvertOpToLLVMPattern<vector::ExpandLoadOp> { 372 public: 373 using ConvertOpToLLVMPattern<vector::ExpandLoadOp>::ConvertOpToLLVMPattern; 374 375 LogicalResult 376 matchAndRewrite(vector::ExpandLoadOp expand, OpAdaptor adaptor, 377 ConversionPatternRewriter &rewriter) const override { 378 auto loc = expand->getLoc(); 379 MemRefType memRefType = expand.getMemRefType(); 380 381 // Resolve address. 382 auto vtype = typeConverter->convertType(expand.getVectorType()); 383 Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(), 384 adaptor.indices(), rewriter); 385 386 rewriter.replaceOpWithNewOp<LLVM::masked_expandload>( 387 expand, vtype, ptr, adaptor.mask(), adaptor.pass_thru()); 388 return success(); 389 } 390 }; 391 392 /// Conversion pattern for a vector.compressstore. 393 class VectorCompressStoreOpConversion 394 : public ConvertOpToLLVMPattern<vector::CompressStoreOp> { 395 public: 396 using ConvertOpToLLVMPattern<vector::CompressStoreOp>::ConvertOpToLLVMPattern; 397 398 LogicalResult 399 matchAndRewrite(vector::CompressStoreOp compress, OpAdaptor adaptor, 400 ConversionPatternRewriter &rewriter) const override { 401 auto loc = compress->getLoc(); 402 MemRefType memRefType = compress.getMemRefType(); 403 404 // Resolve address. 405 Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(), 406 adaptor.indices(), rewriter); 407 408 rewriter.replaceOpWithNewOp<LLVM::masked_compressstore>( 409 compress, adaptor.valueToStore(), ptr, adaptor.mask()); 410 return success(); 411 } 412 }; 413 414 /// Conversion pattern for all vector reductions. 415 class VectorReductionOpConversion 416 : public ConvertOpToLLVMPattern<vector::ReductionOp> { 417 public: 418 explicit VectorReductionOpConversion(LLVMTypeConverter &typeConv, 419 bool reassociateFPRed) 420 : ConvertOpToLLVMPattern<vector::ReductionOp>(typeConv), 421 reassociateFPReductions(reassociateFPRed) {} 422 423 LogicalResult 424 matchAndRewrite(vector::ReductionOp reductionOp, OpAdaptor adaptor, 425 ConversionPatternRewriter &rewriter) const override { 426 auto kind = reductionOp.kind(); 427 Type eltType = reductionOp.dest().getType(); 428 Type llvmType = typeConverter->convertType(eltType); 429 Value operand = adaptor.getOperands()[0]; 430 if (eltType.isIntOrIndex()) { 431 // Integer reductions: add/mul/min/max/and/or/xor. 432 if (kind == "add") 433 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_add>(reductionOp, 434 llvmType, operand); 435 else if (kind == "mul") 436 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_mul>(reductionOp, 437 llvmType, operand); 438 else if (kind == "minui") 439 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umin>( 440 reductionOp, llvmType, operand); 441 else if (kind == "minsi") 442 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smin>( 443 reductionOp, llvmType, operand); 444 else if (kind == "maxui") 445 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umax>( 446 reductionOp, llvmType, operand); 447 else if (kind == "maxsi") 448 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smax>( 449 reductionOp, llvmType, operand); 450 else if (kind == "and") 451 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_and>(reductionOp, 452 llvmType, operand); 453 else if (kind == "or") 454 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_or>(reductionOp, 455 llvmType, operand); 456 else if (kind == "xor") 457 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_xor>(reductionOp, 458 llvmType, operand); 459 else 460 return failure(); 461 return success(); 462 } 463 464 if (!eltType.isa<FloatType>()) 465 return failure(); 466 467 // Floating-point reductions: add/mul/min/max 468 if (kind == "add") { 469 // Optional accumulator (or zero). 470 Value acc = adaptor.getOperands().size() > 1 471 ? adaptor.getOperands()[1] 472 : rewriter.create<LLVM::ConstantOp>( 473 reductionOp->getLoc(), llvmType, 474 rewriter.getZeroAttr(eltType)); 475 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fadd>( 476 reductionOp, llvmType, acc, operand, 477 rewriter.getBoolAttr(reassociateFPReductions)); 478 } else if (kind == "mul") { 479 // Optional accumulator (or one). 480 Value acc = adaptor.getOperands().size() > 1 481 ? adaptor.getOperands()[1] 482 : rewriter.create<LLVM::ConstantOp>( 483 reductionOp->getLoc(), llvmType, 484 rewriter.getFloatAttr(eltType, 1.0)); 485 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmul>( 486 reductionOp, llvmType, acc, operand, 487 rewriter.getBoolAttr(reassociateFPReductions)); 488 } else if (kind == "minf") 489 // FIXME: MLIR's 'minf' and LLVM's 'vector_reduce_fmin' do not handle 490 // NaNs/-0.0/+0.0 in the same way. 491 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmin>(reductionOp, 492 llvmType, operand); 493 else if (kind == "maxf") 494 // FIXME: MLIR's 'maxf' and LLVM's 'vector_reduce_fmax' do not handle 495 // NaNs/-0.0/+0.0 in the same way. 496 rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmax>(reductionOp, 497 llvmType, operand); 498 else 499 return failure(); 500 return success(); 501 } 502 503 private: 504 const bool reassociateFPReductions; 505 }; 506 507 class VectorShuffleOpConversion 508 : public ConvertOpToLLVMPattern<vector::ShuffleOp> { 509 public: 510 using ConvertOpToLLVMPattern<vector::ShuffleOp>::ConvertOpToLLVMPattern; 511 512 LogicalResult 513 matchAndRewrite(vector::ShuffleOp shuffleOp, OpAdaptor adaptor, 514 ConversionPatternRewriter &rewriter) const override { 515 auto loc = shuffleOp->getLoc(); 516 auto v1Type = shuffleOp.getV1VectorType(); 517 auto v2Type = shuffleOp.getV2VectorType(); 518 auto vectorType = shuffleOp.getVectorType(); 519 Type llvmType = typeConverter->convertType(vectorType); 520 auto maskArrayAttr = shuffleOp.mask(); 521 522 // Bail if result type cannot be lowered. 523 if (!llvmType) 524 return failure(); 525 526 // Get rank and dimension sizes. 527 int64_t rank = vectorType.getRank(); 528 assert(v1Type.getRank() == rank); 529 assert(v2Type.getRank() == rank); 530 int64_t v1Dim = v1Type.getDimSize(0); 531 532 // For rank 1, where both operands have *exactly* the same vector type, 533 // there is direct shuffle support in LLVM. Use it! 534 if (rank == 1 && v1Type == v2Type) { 535 Value llvmShuffleOp = rewriter.create<LLVM::ShuffleVectorOp>( 536 loc, adaptor.v1(), adaptor.v2(), maskArrayAttr); 537 rewriter.replaceOp(shuffleOp, llvmShuffleOp); 538 return success(); 539 } 540 541 // For all other cases, insert the individual values individually. 542 Type eltType; 543 llvm::errs() << llvmType << "\n"; 544 if (auto arrayType = llvmType.dyn_cast<LLVM::LLVMArrayType>()) 545 eltType = arrayType.getElementType(); 546 else 547 eltType = llvmType.cast<VectorType>().getElementType(); 548 Value insert = rewriter.create<LLVM::UndefOp>(loc, llvmType); 549 int64_t insPos = 0; 550 for (auto en : llvm::enumerate(maskArrayAttr)) { 551 int64_t extPos = en.value().cast<IntegerAttr>().getInt(); 552 Value value = adaptor.v1(); 553 if (extPos >= v1Dim) { 554 extPos -= v1Dim; 555 value = adaptor.v2(); 556 } 557 Value extract = extractOne(rewriter, *getTypeConverter(), loc, value, 558 eltType, rank, extPos); 559 insert = insertOne(rewriter, *getTypeConverter(), loc, insert, extract, 560 llvmType, rank, insPos++); 561 } 562 rewriter.replaceOp(shuffleOp, insert); 563 return success(); 564 } 565 }; 566 567 class VectorExtractElementOpConversion 568 : public ConvertOpToLLVMPattern<vector::ExtractElementOp> { 569 public: 570 using ConvertOpToLLVMPattern< 571 vector::ExtractElementOp>::ConvertOpToLLVMPattern; 572 573 LogicalResult 574 matchAndRewrite(vector::ExtractElementOp extractEltOp, OpAdaptor adaptor, 575 ConversionPatternRewriter &rewriter) const override { 576 auto vectorType = extractEltOp.getVectorType(); 577 auto llvmType = typeConverter->convertType(vectorType.getElementType()); 578 579 // Bail if result type cannot be lowered. 580 if (!llvmType) 581 return failure(); 582 583 rewriter.replaceOpWithNewOp<LLVM::ExtractElementOp>( 584 extractEltOp, llvmType, adaptor.vector(), adaptor.position()); 585 return success(); 586 } 587 }; 588 589 class VectorExtractOpConversion 590 : public ConvertOpToLLVMPattern<vector::ExtractOp> { 591 public: 592 using ConvertOpToLLVMPattern<vector::ExtractOp>::ConvertOpToLLVMPattern; 593 594 LogicalResult 595 matchAndRewrite(vector::ExtractOp extractOp, OpAdaptor adaptor, 596 ConversionPatternRewriter &rewriter) const override { 597 auto loc = extractOp->getLoc(); 598 auto vectorType = extractOp.getVectorType(); 599 auto resultType = extractOp.getResult().getType(); 600 auto llvmResultType = typeConverter->convertType(resultType); 601 auto positionArrayAttr = extractOp.position(); 602 603 // Bail if result type cannot be lowered. 604 if (!llvmResultType) 605 return failure(); 606 607 // Extract entire vector. Should be handled by folder, but just to be safe. 608 if (positionArrayAttr.empty()) { 609 rewriter.replaceOp(extractOp, adaptor.vector()); 610 return success(); 611 } 612 613 // One-shot extraction of vector from array (only requires extractvalue). 614 if (resultType.isa<VectorType>()) { 615 Value extracted = rewriter.create<LLVM::ExtractValueOp>( 616 loc, llvmResultType, adaptor.vector(), positionArrayAttr); 617 rewriter.replaceOp(extractOp, extracted); 618 return success(); 619 } 620 621 // Potential extraction of 1-D vector from array. 622 auto *context = extractOp->getContext(); 623 Value extracted = adaptor.vector(); 624 auto positionAttrs = positionArrayAttr.getValue(); 625 if (positionAttrs.size() > 1) { 626 auto oneDVectorType = reducedVectorTypeBack(vectorType); 627 auto nMinusOnePositionAttrs = 628 ArrayAttr::get(context, positionAttrs.drop_back()); 629 extracted = rewriter.create<LLVM::ExtractValueOp>( 630 loc, typeConverter->convertType(oneDVectorType), extracted, 631 nMinusOnePositionAttrs); 632 } 633 634 // Remaining extraction of element from 1-D LLVM vector 635 auto position = positionAttrs.back().cast<IntegerAttr>(); 636 auto i64Type = IntegerType::get(rewriter.getContext(), 64); 637 auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position); 638 extracted = 639 rewriter.create<LLVM::ExtractElementOp>(loc, extracted, constant); 640 rewriter.replaceOp(extractOp, extracted); 641 642 return success(); 643 } 644 }; 645 646 /// Conversion pattern that turns a vector.fma on a 1-D vector 647 /// into an llvm.intr.fmuladd. This is a trivial 1-1 conversion. 648 /// This does not match vectors of n >= 2 rank. 649 /// 650 /// Example: 651 /// ``` 652 /// vector.fma %a, %a, %a : vector<8xf32> 653 /// ``` 654 /// is converted to: 655 /// ``` 656 /// llvm.intr.fmuladd %va, %va, %va: 657 /// (!llvm."<8 x f32>">, !llvm<"<8 x f32>">, !llvm<"<8 x f32>">) 658 /// -> !llvm."<8 x f32>"> 659 /// ``` 660 class VectorFMAOp1DConversion : public ConvertOpToLLVMPattern<vector::FMAOp> { 661 public: 662 using ConvertOpToLLVMPattern<vector::FMAOp>::ConvertOpToLLVMPattern; 663 664 LogicalResult 665 matchAndRewrite(vector::FMAOp fmaOp, OpAdaptor adaptor, 666 ConversionPatternRewriter &rewriter) const override { 667 VectorType vType = fmaOp.getVectorType(); 668 if (vType.getRank() != 1) 669 return failure(); 670 rewriter.replaceOpWithNewOp<LLVM::FMulAddOp>(fmaOp, adaptor.lhs(), 671 adaptor.rhs(), adaptor.acc()); 672 return success(); 673 } 674 }; 675 676 class VectorInsertElementOpConversion 677 : public ConvertOpToLLVMPattern<vector::InsertElementOp> { 678 public: 679 using ConvertOpToLLVMPattern<vector::InsertElementOp>::ConvertOpToLLVMPattern; 680 681 LogicalResult 682 matchAndRewrite(vector::InsertElementOp insertEltOp, OpAdaptor adaptor, 683 ConversionPatternRewriter &rewriter) const override { 684 auto vectorType = insertEltOp.getDestVectorType(); 685 auto llvmType = typeConverter->convertType(vectorType); 686 687 // Bail if result type cannot be lowered. 688 if (!llvmType) 689 return failure(); 690 691 rewriter.replaceOpWithNewOp<LLVM::InsertElementOp>( 692 insertEltOp, llvmType, adaptor.dest(), adaptor.source(), 693 adaptor.position()); 694 return success(); 695 } 696 }; 697 698 class VectorInsertOpConversion 699 : public ConvertOpToLLVMPattern<vector::InsertOp> { 700 public: 701 using ConvertOpToLLVMPattern<vector::InsertOp>::ConvertOpToLLVMPattern; 702 703 LogicalResult 704 matchAndRewrite(vector::InsertOp insertOp, OpAdaptor adaptor, 705 ConversionPatternRewriter &rewriter) const override { 706 auto loc = insertOp->getLoc(); 707 auto sourceType = insertOp.getSourceType(); 708 auto destVectorType = insertOp.getDestVectorType(); 709 auto llvmResultType = typeConverter->convertType(destVectorType); 710 auto positionArrayAttr = insertOp.position(); 711 712 // Bail if result type cannot be lowered. 713 if (!llvmResultType) 714 return failure(); 715 716 // Overwrite entire vector with value. Should be handled by folder, but 717 // just to be safe. 718 if (positionArrayAttr.empty()) { 719 rewriter.replaceOp(insertOp, adaptor.source()); 720 return success(); 721 } 722 723 // One-shot insertion of a vector into an array (only requires insertvalue). 724 if (sourceType.isa<VectorType>()) { 725 Value inserted = rewriter.create<LLVM::InsertValueOp>( 726 loc, llvmResultType, adaptor.dest(), adaptor.source(), 727 positionArrayAttr); 728 rewriter.replaceOp(insertOp, inserted); 729 return success(); 730 } 731 732 // Potential extraction of 1-D vector from array. 733 auto *context = insertOp->getContext(); 734 Value extracted = adaptor.dest(); 735 auto positionAttrs = positionArrayAttr.getValue(); 736 auto position = positionAttrs.back().cast<IntegerAttr>(); 737 auto oneDVectorType = destVectorType; 738 if (positionAttrs.size() > 1) { 739 oneDVectorType = reducedVectorTypeBack(destVectorType); 740 auto nMinusOnePositionAttrs = 741 ArrayAttr::get(context, positionAttrs.drop_back()); 742 extracted = rewriter.create<LLVM::ExtractValueOp>( 743 loc, typeConverter->convertType(oneDVectorType), extracted, 744 nMinusOnePositionAttrs); 745 } 746 747 // Insertion of an element into a 1-D LLVM vector. 748 auto i64Type = IntegerType::get(rewriter.getContext(), 64); 749 auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position); 750 Value inserted = rewriter.create<LLVM::InsertElementOp>( 751 loc, typeConverter->convertType(oneDVectorType), extracted, 752 adaptor.source(), constant); 753 754 // Potential insertion of resulting 1-D vector into array. 755 if (positionAttrs.size() > 1) { 756 auto nMinusOnePositionAttrs = 757 ArrayAttr::get(context, positionAttrs.drop_back()); 758 inserted = rewriter.create<LLVM::InsertValueOp>(loc, llvmResultType, 759 adaptor.dest(), inserted, 760 nMinusOnePositionAttrs); 761 } 762 763 rewriter.replaceOp(insertOp, inserted); 764 return success(); 765 } 766 }; 767 768 /// Rank reducing rewrite for n-D FMA into (n-1)-D FMA where n > 1. 769 /// 770 /// Example: 771 /// ``` 772 /// %d = vector.fma %a, %b, %c : vector<2x4xf32> 773 /// ``` 774 /// is rewritten into: 775 /// ``` 776 /// %r = splat %f0: vector<2x4xf32> 777 /// %va = vector.extractvalue %a[0] : vector<2x4xf32> 778 /// %vb = vector.extractvalue %b[0] : vector<2x4xf32> 779 /// %vc = vector.extractvalue %c[0] : vector<2x4xf32> 780 /// %vd = vector.fma %va, %vb, %vc : vector<4xf32> 781 /// %r2 = vector.insertvalue %vd, %r[0] : vector<4xf32> into vector<2x4xf32> 782 /// %va2 = vector.extractvalue %a2[1] : vector<2x4xf32> 783 /// %vb2 = vector.extractvalue %b2[1] : vector<2x4xf32> 784 /// %vc2 = vector.extractvalue %c2[1] : vector<2x4xf32> 785 /// %vd2 = vector.fma %va2, %vb2, %vc2 : vector<4xf32> 786 /// %r3 = vector.insertvalue %vd2, %r2[1] : vector<4xf32> into vector<2x4xf32> 787 /// // %r3 holds the final value. 788 /// ``` 789 class VectorFMAOpNDRewritePattern : public OpRewritePattern<FMAOp> { 790 public: 791 using OpRewritePattern<FMAOp>::OpRewritePattern; 792 793 LogicalResult matchAndRewrite(FMAOp op, 794 PatternRewriter &rewriter) const override { 795 auto vType = op.getVectorType(); 796 if (vType.getRank() < 2) 797 return failure(); 798 799 auto loc = op.getLoc(); 800 auto elemType = vType.getElementType(); 801 Value zero = rewriter.create<arith::ConstantOp>( 802 loc, elemType, rewriter.getZeroAttr(elemType)); 803 Value desc = rewriter.create<SplatOp>(loc, vType, zero); 804 for (int64_t i = 0, e = vType.getShape().front(); i != e; ++i) { 805 Value extrLHS = rewriter.create<ExtractOp>(loc, op.lhs(), i); 806 Value extrRHS = rewriter.create<ExtractOp>(loc, op.rhs(), i); 807 Value extrACC = rewriter.create<ExtractOp>(loc, op.acc(), i); 808 Value fma = rewriter.create<FMAOp>(loc, extrLHS, extrRHS, extrACC); 809 desc = rewriter.create<InsertOp>(loc, fma, desc, i); 810 } 811 rewriter.replaceOp(op, desc); 812 return success(); 813 } 814 }; 815 816 // When ranks are different, InsertStridedSlice needs to extract a properly 817 // ranked vector from the destination vector into which to insert. This pattern 818 // only takes care of this part and forwards the rest of the conversion to 819 // another pattern that converts InsertStridedSlice for operands of the same 820 // rank. 821 // 822 // RewritePattern for InsertStridedSliceOp where source and destination vectors 823 // have different ranks. In this case: 824 // 1. the proper subvector is extracted from the destination vector 825 // 2. a new InsertStridedSlice op is created to insert the source in the 826 // destination subvector 827 // 3. the destination subvector is inserted back in the proper place 828 // 4. the op is replaced by the result of step 3. 829 // The new InsertStridedSlice from step 2. will be picked up by a 830 // `VectorInsertStridedSliceOpSameRankRewritePattern`. 831 class VectorInsertStridedSliceOpDifferentRankRewritePattern 832 : public OpRewritePattern<InsertStridedSliceOp> { 833 public: 834 using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern; 835 836 LogicalResult matchAndRewrite(InsertStridedSliceOp op, 837 PatternRewriter &rewriter) const override { 838 auto srcType = op.getSourceVectorType(); 839 auto dstType = op.getDestVectorType(); 840 841 if (op.offsets().getValue().empty()) 842 return failure(); 843 844 auto loc = op.getLoc(); 845 int64_t rankDiff = dstType.getRank() - srcType.getRank(); 846 assert(rankDiff >= 0); 847 if (rankDiff == 0) 848 return failure(); 849 850 int64_t rankRest = dstType.getRank() - rankDiff; 851 // Extract / insert the subvector of matching rank and InsertStridedSlice 852 // on it. 853 Value extracted = 854 rewriter.create<ExtractOp>(loc, op.dest(), 855 getI64SubArray(op.offsets(), /*dropFront=*/0, 856 /*dropBack=*/rankRest)); 857 // A different pattern will kick in for InsertStridedSlice with matching 858 // ranks. 859 auto stridedSliceInnerOp = rewriter.create<InsertStridedSliceOp>( 860 loc, op.source(), extracted, 861 getI64SubArray(op.offsets(), /*dropFront=*/rankDiff), 862 getI64SubArray(op.strides(), /*dropFront=*/0)); 863 rewriter.replaceOpWithNewOp<InsertOp>( 864 op, stridedSliceInnerOp.getResult(), op.dest(), 865 getI64SubArray(op.offsets(), /*dropFront=*/0, 866 /*dropBack=*/rankRest)); 867 return success(); 868 } 869 }; 870 871 // RewritePattern for InsertStridedSliceOp where source and destination vectors 872 // have the same rank. In this case, we reduce 873 // 1. the proper subvector is extracted from the destination vector 874 // 2. a new InsertStridedSlice op is created to insert the source in the 875 // destination subvector 876 // 3. the destination subvector is inserted back in the proper place 877 // 4. the op is replaced by the result of step 3. 878 // The new InsertStridedSlice from step 2. will be picked up by a 879 // `VectorInsertStridedSliceOpSameRankRewritePattern`. 880 class VectorInsertStridedSliceOpSameRankRewritePattern 881 : public OpRewritePattern<InsertStridedSliceOp> { 882 public: 883 using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern; 884 885 void initialize() { 886 // This pattern creates recursive InsertStridedSliceOp, but the recursion is 887 // bounded as the rank is strictly decreasing. 888 setHasBoundedRewriteRecursion(); 889 } 890 891 LogicalResult matchAndRewrite(InsertStridedSliceOp op, 892 PatternRewriter &rewriter) const override { 893 auto srcType = op.getSourceVectorType(); 894 auto dstType = op.getDestVectorType(); 895 896 if (op.offsets().getValue().empty()) 897 return failure(); 898 899 int64_t rankDiff = dstType.getRank() - srcType.getRank(); 900 assert(rankDiff >= 0); 901 if (rankDiff != 0) 902 return failure(); 903 904 if (srcType == dstType) { 905 rewriter.replaceOp(op, op.source()); 906 return success(); 907 } 908 909 int64_t offset = 910 op.offsets().getValue().front().cast<IntegerAttr>().getInt(); 911 int64_t size = srcType.getShape().front(); 912 int64_t stride = 913 op.strides().getValue().front().cast<IntegerAttr>().getInt(); 914 915 auto loc = op.getLoc(); 916 Value res = op.dest(); 917 // For each slice of the source vector along the most major dimension. 918 for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e; 919 off += stride, ++idx) { 920 // 1. extract the proper subvector (or element) from source 921 Value extractedSource = extractOne(rewriter, loc, op.source(), idx); 922 if (extractedSource.getType().isa<VectorType>()) { 923 // 2. If we have a vector, extract the proper subvector from destination 924 // Otherwise we are at the element level and no need to recurse. 925 Value extractedDest = extractOne(rewriter, loc, op.dest(), off); 926 // 3. Reduce the problem to lowering a new InsertStridedSlice op with 927 // smaller rank. 928 extractedSource = rewriter.create<InsertStridedSliceOp>( 929 loc, extractedSource, extractedDest, 930 getI64SubArray(op.offsets(), /* dropFront=*/1), 931 getI64SubArray(op.strides(), /* dropFront=*/1)); 932 } 933 // 4. Insert the extractedSource into the res vector. 934 res = insertOne(rewriter, loc, extractedSource, res, off); 935 } 936 937 rewriter.replaceOp(op, res); 938 return success(); 939 } 940 }; 941 942 /// Returns the strides if the memory underlying `memRefType` has a contiguous 943 /// static layout. 944 static llvm::Optional<SmallVector<int64_t, 4>> 945 computeContiguousStrides(MemRefType memRefType) { 946 int64_t offset; 947 SmallVector<int64_t, 4> strides; 948 if (failed(getStridesAndOffset(memRefType, strides, offset))) 949 return None; 950 if (!strides.empty() && strides.back() != 1) 951 return None; 952 // If no layout or identity layout, this is contiguous by definition. 953 if (memRefType.getAffineMaps().empty() || 954 memRefType.getAffineMaps().front().isIdentity()) 955 return strides; 956 957 // Otherwise, we must determine contiguity form shapes. This can only ever 958 // work in static cases because MemRefType is underspecified to represent 959 // contiguous dynamic shapes in other ways than with just empty/identity 960 // layout. 961 auto sizes = memRefType.getShape(); 962 for (int index = 0, e = strides.size() - 1; index < e; ++index) { 963 if (ShapedType::isDynamic(sizes[index + 1]) || 964 ShapedType::isDynamicStrideOrOffset(strides[index]) || 965 ShapedType::isDynamicStrideOrOffset(strides[index + 1])) 966 return None; 967 if (strides[index] != strides[index + 1] * sizes[index + 1]) 968 return None; 969 } 970 return strides; 971 } 972 973 class VectorTypeCastOpConversion 974 : public ConvertOpToLLVMPattern<vector::TypeCastOp> { 975 public: 976 using ConvertOpToLLVMPattern<vector::TypeCastOp>::ConvertOpToLLVMPattern; 977 978 LogicalResult 979 matchAndRewrite(vector::TypeCastOp castOp, OpAdaptor adaptor, 980 ConversionPatternRewriter &rewriter) const override { 981 auto loc = castOp->getLoc(); 982 MemRefType sourceMemRefType = 983 castOp.getOperand().getType().cast<MemRefType>(); 984 MemRefType targetMemRefType = castOp.getType(); 985 986 // Only static shape casts supported atm. 987 if (!sourceMemRefType.hasStaticShape() || 988 !targetMemRefType.hasStaticShape()) 989 return failure(); 990 991 auto llvmSourceDescriptorTy = 992 adaptor.getOperands()[0].getType().dyn_cast<LLVM::LLVMStructType>(); 993 if (!llvmSourceDescriptorTy) 994 return failure(); 995 MemRefDescriptor sourceMemRef(adaptor.getOperands()[0]); 996 997 auto llvmTargetDescriptorTy = typeConverter->convertType(targetMemRefType) 998 .dyn_cast_or_null<LLVM::LLVMStructType>(); 999 if (!llvmTargetDescriptorTy) 1000 return failure(); 1001 1002 // Only contiguous source buffers supported atm. 1003 auto sourceStrides = computeContiguousStrides(sourceMemRefType); 1004 if (!sourceStrides) 1005 return failure(); 1006 auto targetStrides = computeContiguousStrides(targetMemRefType); 1007 if (!targetStrides) 1008 return failure(); 1009 // Only support static strides for now, regardless of contiguity. 1010 if (llvm::any_of(*targetStrides, [](int64_t stride) { 1011 return ShapedType::isDynamicStrideOrOffset(stride); 1012 })) 1013 return failure(); 1014 1015 auto int64Ty = IntegerType::get(rewriter.getContext(), 64); 1016 1017 // Create descriptor. 1018 auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy); 1019 Type llvmTargetElementTy = desc.getElementPtrType(); 1020 // Set allocated ptr. 1021 Value allocated = sourceMemRef.allocatedPtr(rewriter, loc); 1022 allocated = 1023 rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, allocated); 1024 desc.setAllocatedPtr(rewriter, loc, allocated); 1025 // Set aligned ptr. 1026 Value ptr = sourceMemRef.alignedPtr(rewriter, loc); 1027 ptr = rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, ptr); 1028 desc.setAlignedPtr(rewriter, loc, ptr); 1029 // Fill offset 0. 1030 auto attr = rewriter.getIntegerAttr(rewriter.getIndexType(), 0); 1031 auto zero = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, attr); 1032 desc.setOffset(rewriter, loc, zero); 1033 1034 // Fill size and stride descriptors in memref. 1035 for (auto indexedSize : llvm::enumerate(targetMemRefType.getShape())) { 1036 int64_t index = indexedSize.index(); 1037 auto sizeAttr = 1038 rewriter.getIntegerAttr(rewriter.getIndexType(), indexedSize.value()); 1039 auto size = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, sizeAttr); 1040 desc.setSize(rewriter, loc, index, size); 1041 auto strideAttr = rewriter.getIntegerAttr(rewriter.getIndexType(), 1042 (*targetStrides)[index]); 1043 auto stride = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, strideAttr); 1044 desc.setStride(rewriter, loc, index, stride); 1045 } 1046 1047 rewriter.replaceOp(castOp, {desc}); 1048 return success(); 1049 } 1050 }; 1051 1052 class VectorPrintOpConversion : public ConvertOpToLLVMPattern<vector::PrintOp> { 1053 public: 1054 using ConvertOpToLLVMPattern<vector::PrintOp>::ConvertOpToLLVMPattern; 1055 1056 // Proof-of-concept lowering implementation that relies on a small 1057 // runtime support library, which only needs to provide a few 1058 // printing methods (single value for all data types, opening/closing 1059 // bracket, comma, newline). The lowering fully unrolls a vector 1060 // in terms of these elementary printing operations. The advantage 1061 // of this approach is that the library can remain unaware of all 1062 // low-level implementation details of vectors while still supporting 1063 // output of any shaped and dimensioned vector. Due to full unrolling, 1064 // this approach is less suited for very large vectors though. 1065 // 1066 // TODO: rely solely on libc in future? something else? 1067 // 1068 LogicalResult 1069 matchAndRewrite(vector::PrintOp printOp, OpAdaptor adaptor, 1070 ConversionPatternRewriter &rewriter) const override { 1071 Type printType = printOp.getPrintType(); 1072 1073 if (typeConverter->convertType(printType) == nullptr) 1074 return failure(); 1075 1076 // Make sure element type has runtime support. 1077 PrintConversion conversion = PrintConversion::None; 1078 VectorType vectorType = printType.dyn_cast<VectorType>(); 1079 Type eltType = vectorType ? vectorType.getElementType() : printType; 1080 Operation *printer; 1081 if (eltType.isF32()) { 1082 printer = 1083 LLVM::lookupOrCreatePrintF32Fn(printOp->getParentOfType<ModuleOp>()); 1084 } else if (eltType.isF64()) { 1085 printer = 1086 LLVM::lookupOrCreatePrintF64Fn(printOp->getParentOfType<ModuleOp>()); 1087 } else if (eltType.isIndex()) { 1088 printer = 1089 LLVM::lookupOrCreatePrintU64Fn(printOp->getParentOfType<ModuleOp>()); 1090 } else if (auto intTy = eltType.dyn_cast<IntegerType>()) { 1091 // Integers need a zero or sign extension on the operand 1092 // (depending on the source type) as well as a signed or 1093 // unsigned print method. Up to 64-bit is supported. 1094 unsigned width = intTy.getWidth(); 1095 if (intTy.isUnsigned()) { 1096 if (width <= 64) { 1097 if (width < 64) 1098 conversion = PrintConversion::ZeroExt64; 1099 printer = LLVM::lookupOrCreatePrintU64Fn( 1100 printOp->getParentOfType<ModuleOp>()); 1101 } else { 1102 return failure(); 1103 } 1104 } else { 1105 assert(intTy.isSignless() || intTy.isSigned()); 1106 if (width <= 64) { 1107 // Note that we *always* zero extend booleans (1-bit integers), 1108 // so that true/false is printed as 1/0 rather than -1/0. 1109 if (width == 1) 1110 conversion = PrintConversion::ZeroExt64; 1111 else if (width < 64) 1112 conversion = PrintConversion::SignExt64; 1113 printer = LLVM::lookupOrCreatePrintI64Fn( 1114 printOp->getParentOfType<ModuleOp>()); 1115 } else { 1116 return failure(); 1117 } 1118 } 1119 } else { 1120 return failure(); 1121 } 1122 1123 // Unroll vector into elementary print calls. 1124 int64_t rank = vectorType ? vectorType.getRank() : 0; 1125 emitRanks(rewriter, printOp, adaptor.source(), vectorType, printer, rank, 1126 conversion); 1127 emitCall(rewriter, printOp->getLoc(), 1128 LLVM::lookupOrCreatePrintNewlineFn( 1129 printOp->getParentOfType<ModuleOp>())); 1130 rewriter.eraseOp(printOp); 1131 return success(); 1132 } 1133 1134 private: 1135 enum class PrintConversion { 1136 // clang-format off 1137 None, 1138 ZeroExt64, 1139 SignExt64 1140 // clang-format on 1141 }; 1142 1143 void emitRanks(ConversionPatternRewriter &rewriter, Operation *op, 1144 Value value, VectorType vectorType, Operation *printer, 1145 int64_t rank, PrintConversion conversion) const { 1146 Location loc = op->getLoc(); 1147 if (rank == 0) { 1148 switch (conversion) { 1149 case PrintConversion::ZeroExt64: 1150 value = rewriter.create<arith::ExtUIOp>( 1151 loc, value, IntegerType::get(rewriter.getContext(), 64)); 1152 break; 1153 case PrintConversion::SignExt64: 1154 value = rewriter.create<arith::ExtSIOp>( 1155 loc, value, IntegerType::get(rewriter.getContext(), 64)); 1156 break; 1157 case PrintConversion::None: 1158 break; 1159 } 1160 emitCall(rewriter, loc, printer, value); 1161 return; 1162 } 1163 1164 emitCall(rewriter, loc, 1165 LLVM::lookupOrCreatePrintOpenFn(op->getParentOfType<ModuleOp>())); 1166 Operation *printComma = 1167 LLVM::lookupOrCreatePrintCommaFn(op->getParentOfType<ModuleOp>()); 1168 int64_t dim = vectorType.getDimSize(0); 1169 for (int64_t d = 0; d < dim; ++d) { 1170 auto reducedType = 1171 rank > 1 ? reducedVectorTypeFront(vectorType) : nullptr; 1172 auto llvmType = typeConverter->convertType( 1173 rank > 1 ? reducedType : vectorType.getElementType()); 1174 Value nestedVal = extractOne(rewriter, *getTypeConverter(), loc, value, 1175 llvmType, rank, d); 1176 emitRanks(rewriter, op, nestedVal, reducedType, printer, rank - 1, 1177 conversion); 1178 if (d != dim - 1) 1179 emitCall(rewriter, loc, printComma); 1180 } 1181 emitCall(rewriter, loc, 1182 LLVM::lookupOrCreatePrintCloseFn(op->getParentOfType<ModuleOp>())); 1183 } 1184 1185 // Helper to emit a call. 1186 static void emitCall(ConversionPatternRewriter &rewriter, Location loc, 1187 Operation *ref, ValueRange params = ValueRange()) { 1188 rewriter.create<LLVM::CallOp>(loc, TypeRange(), SymbolRefAttr::get(ref), 1189 params); 1190 } 1191 }; 1192 1193 /// Progressive lowering of ExtractStridedSliceOp to either: 1194 /// 1. express single offset extract as a direct shuffle. 1195 /// 2. extract + lower rank strided_slice + insert for the n-D case. 1196 class VectorExtractStridedSliceOpConversion 1197 : public OpRewritePattern<ExtractStridedSliceOp> { 1198 public: 1199 using OpRewritePattern<ExtractStridedSliceOp>::OpRewritePattern; 1200 1201 void initialize() { 1202 // This pattern creates recursive ExtractStridedSliceOp, but the recursion 1203 // is bounded as the rank is strictly decreasing. 1204 setHasBoundedRewriteRecursion(); 1205 } 1206 1207 LogicalResult matchAndRewrite(ExtractStridedSliceOp op, 1208 PatternRewriter &rewriter) const override { 1209 auto dstType = op.getType(); 1210 1211 assert(!op.offsets().getValue().empty() && "Unexpected empty offsets"); 1212 1213 int64_t offset = 1214 op.offsets().getValue().front().cast<IntegerAttr>().getInt(); 1215 int64_t size = op.sizes().getValue().front().cast<IntegerAttr>().getInt(); 1216 int64_t stride = 1217 op.strides().getValue().front().cast<IntegerAttr>().getInt(); 1218 1219 auto loc = op.getLoc(); 1220 auto elemType = dstType.getElementType(); 1221 assert(elemType.isSignlessIntOrIndexOrFloat()); 1222 1223 // Single offset can be more efficiently shuffled. 1224 if (op.offsets().getValue().size() == 1) { 1225 SmallVector<int64_t, 4> offsets; 1226 offsets.reserve(size); 1227 for (int64_t off = offset, e = offset + size * stride; off < e; 1228 off += stride) 1229 offsets.push_back(off); 1230 rewriter.replaceOpWithNewOp<ShuffleOp>(op, dstType, op.vector(), 1231 op.vector(), 1232 rewriter.getI64ArrayAttr(offsets)); 1233 return success(); 1234 } 1235 1236 // Extract/insert on a lower ranked extract strided slice op. 1237 Value zero = rewriter.create<arith::ConstantOp>( 1238 loc, elemType, rewriter.getZeroAttr(elemType)); 1239 Value res = rewriter.create<SplatOp>(loc, dstType, zero); 1240 for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e; 1241 off += stride, ++idx) { 1242 Value one = extractOne(rewriter, loc, op.vector(), off); 1243 Value extracted = rewriter.create<ExtractStridedSliceOp>( 1244 loc, one, getI64SubArray(op.offsets(), /* dropFront=*/1), 1245 getI64SubArray(op.sizes(), /* dropFront=*/1), 1246 getI64SubArray(op.strides(), /* dropFront=*/1)); 1247 res = insertOne(rewriter, loc, extracted, res, idx); 1248 } 1249 rewriter.replaceOp(op, res); 1250 return success(); 1251 } 1252 }; 1253 1254 } // namespace 1255 1256 /// Populate the given list with patterns that convert from Vector to LLVM. 1257 void mlir::populateVectorToLLVMConversionPatterns( 1258 LLVMTypeConverter &converter, RewritePatternSet &patterns, 1259 bool reassociateFPReductions) { 1260 MLIRContext *ctx = converter.getDialect()->getContext(); 1261 patterns.add<VectorFMAOpNDRewritePattern, 1262 VectorInsertStridedSliceOpDifferentRankRewritePattern, 1263 VectorInsertStridedSliceOpSameRankRewritePattern, 1264 VectorExtractStridedSliceOpConversion>(ctx); 1265 patterns.add<VectorReductionOpConversion>(converter, reassociateFPReductions); 1266 patterns 1267 .add<VectorBitCastOpConversion, VectorShuffleOpConversion, 1268 VectorExtractElementOpConversion, VectorExtractOpConversion, 1269 VectorFMAOp1DConversion, VectorInsertElementOpConversion, 1270 VectorInsertOpConversion, VectorPrintOpConversion, 1271 VectorTypeCastOpConversion, 1272 VectorLoadStoreConversion<vector::LoadOp, vector::LoadOpAdaptor>, 1273 VectorLoadStoreConversion<vector::MaskedLoadOp, 1274 vector::MaskedLoadOpAdaptor>, 1275 VectorLoadStoreConversion<vector::StoreOp, vector::StoreOpAdaptor>, 1276 VectorLoadStoreConversion<vector::MaskedStoreOp, 1277 vector::MaskedStoreOpAdaptor>, 1278 VectorGatherOpConversion, VectorScatterOpConversion, 1279 VectorExpandLoadOpConversion, VectorCompressStoreOpConversion>( 1280 converter); 1281 // Transfer ops with rank > 1 are handled by VectorToSCF. 1282 populateVectorTransferLoweringPatterns(patterns, /*maxTransferRank=*/1); 1283 } 1284 1285 void mlir::populateVectorToLLVMMatrixConversionPatterns( 1286 LLVMTypeConverter &converter, RewritePatternSet &patterns) { 1287 patterns.add<VectorMatmulOpConversion>(converter); 1288 patterns.add<VectorFlatTransposeOpConversion>(converter); 1289 } 1290