1 //===- VectorToLLVM.cpp - Conversion from Vector to the LLVM dialect ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h"
10 
11 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h"
12 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h"
13 #include "mlir/Dialect/LLVMIR/LLVMDialect.h"
14 #include "mlir/Dialect/StandardOps/IR/Ops.h"
15 #include "mlir/Dialect/Vector/VectorOps.h"
16 #include "mlir/IR/BuiltinTypes.h"
17 #include "mlir/Target/LLVMIR/TypeTranslation.h"
18 #include "mlir/Transforms/DialectConversion.h"
19 
20 using namespace mlir;
21 using namespace mlir::vector;
22 
23 // Helper to reduce vector type by one rank at front.
24 static VectorType reducedVectorTypeFront(VectorType tp) {
25   assert((tp.getRank() > 1) && "unlowerable vector type");
26   return VectorType::get(tp.getShape().drop_front(), tp.getElementType());
27 }
28 
29 // Helper to reduce vector type by *all* but one rank at back.
30 static VectorType reducedVectorTypeBack(VectorType tp) {
31   assert((tp.getRank() > 1) && "unlowerable vector type");
32   return VectorType::get(tp.getShape().take_back(), tp.getElementType());
33 }
34 
35 // Helper that picks the proper sequence for inserting.
36 static Value insertOne(ConversionPatternRewriter &rewriter,
37                        LLVMTypeConverter &typeConverter, Location loc,
38                        Value val1, Value val2, Type llvmType, int64_t rank,
39                        int64_t pos) {
40   if (rank == 1) {
41     auto idxType = rewriter.getIndexType();
42     auto constant = rewriter.create<LLVM::ConstantOp>(
43         loc, typeConverter.convertType(idxType),
44         rewriter.getIntegerAttr(idxType, pos));
45     return rewriter.create<LLVM::InsertElementOp>(loc, llvmType, val1, val2,
46                                                   constant);
47   }
48   return rewriter.create<LLVM::InsertValueOp>(loc, llvmType, val1, val2,
49                                               rewriter.getI64ArrayAttr(pos));
50 }
51 
52 // Helper that picks the proper sequence for inserting.
53 static Value insertOne(PatternRewriter &rewriter, Location loc, Value from,
54                        Value into, int64_t offset) {
55   auto vectorType = into.getType().cast<VectorType>();
56   if (vectorType.getRank() > 1)
57     return rewriter.create<InsertOp>(loc, from, into, offset);
58   return rewriter.create<vector::InsertElementOp>(
59       loc, vectorType, from, into,
60       rewriter.create<ConstantIndexOp>(loc, offset));
61 }
62 
63 // Helper that picks the proper sequence for extracting.
64 static Value extractOne(ConversionPatternRewriter &rewriter,
65                         LLVMTypeConverter &typeConverter, Location loc,
66                         Value val, Type llvmType, int64_t rank, int64_t pos) {
67   if (rank == 1) {
68     auto idxType = rewriter.getIndexType();
69     auto constant = rewriter.create<LLVM::ConstantOp>(
70         loc, typeConverter.convertType(idxType),
71         rewriter.getIntegerAttr(idxType, pos));
72     return rewriter.create<LLVM::ExtractElementOp>(loc, llvmType, val,
73                                                    constant);
74   }
75   return rewriter.create<LLVM::ExtractValueOp>(loc, llvmType, val,
76                                                rewriter.getI64ArrayAttr(pos));
77 }
78 
79 // Helper that picks the proper sequence for extracting.
80 static Value extractOne(PatternRewriter &rewriter, Location loc, Value vector,
81                         int64_t offset) {
82   auto vectorType = vector.getType().cast<VectorType>();
83   if (vectorType.getRank() > 1)
84     return rewriter.create<ExtractOp>(loc, vector, offset);
85   return rewriter.create<vector::ExtractElementOp>(
86       loc, vectorType.getElementType(), vector,
87       rewriter.create<ConstantIndexOp>(loc, offset));
88 }
89 
90 // Helper that returns a subset of `arrayAttr` as a vector of int64_t.
91 // TODO: Better support for attribute subtype forwarding + slicing.
92 static SmallVector<int64_t, 4> getI64SubArray(ArrayAttr arrayAttr,
93                                               unsigned dropFront = 0,
94                                               unsigned dropBack = 0) {
95   assert(arrayAttr.size() > dropFront + dropBack && "Out of bounds");
96   auto range = arrayAttr.getAsRange<IntegerAttr>();
97   SmallVector<int64_t, 4> res;
98   res.reserve(arrayAttr.size() - dropFront - dropBack);
99   for (auto it = range.begin() + dropFront, eit = range.end() - dropBack;
100        it != eit; ++it)
101     res.push_back((*it).getValue().getSExtValue());
102   return res;
103 }
104 
105 // Helper that returns a vector comparison that constructs a mask:
106 //     mask = [0,1,..,n-1] + [o,o,..,o] < [b,b,..,b]
107 //
108 // NOTE: The LLVM::GetActiveLaneMaskOp intrinsic would provide an alternative,
109 //       much more compact, IR for this operation, but LLVM eventually
110 //       generates more elaborate instructions for this intrinsic since it
111 //       is very conservative on the boundary conditions.
112 static Value buildVectorComparison(ConversionPatternRewriter &rewriter,
113                                    Operation *op, bool enableIndexOptimizations,
114                                    int64_t dim, Value b, Value *off = nullptr) {
115   auto loc = op->getLoc();
116   // If we can assume all indices fit in 32-bit, we perform the vector
117   // comparison in 32-bit to get a higher degree of SIMD parallelism.
118   // Otherwise we perform the vector comparison using 64-bit indices.
119   Value indices;
120   Type idxType;
121   if (enableIndexOptimizations) {
122     indices = rewriter.create<ConstantOp>(
123         loc, rewriter.getI32VectorAttr(
124                  llvm::to_vector<4>(llvm::seq<int32_t>(0, dim))));
125     idxType = rewriter.getI32Type();
126   } else {
127     indices = rewriter.create<ConstantOp>(
128         loc, rewriter.getI64VectorAttr(
129                  llvm::to_vector<4>(llvm::seq<int64_t>(0, dim))));
130     idxType = rewriter.getI64Type();
131   }
132   // Add in an offset if requested.
133   if (off) {
134     Value o = rewriter.create<IndexCastOp>(loc, idxType, *off);
135     Value ov = rewriter.create<SplatOp>(loc, indices.getType(), o);
136     indices = rewriter.create<AddIOp>(loc, ov, indices);
137   }
138   // Construct the vector comparison.
139   Value bound = rewriter.create<IndexCastOp>(loc, idxType, b);
140   Value bounds = rewriter.create<SplatOp>(loc, indices.getType(), bound);
141   return rewriter.create<CmpIOp>(loc, CmpIPredicate::slt, indices, bounds);
142 }
143 
144 // Helper that returns data layout alignment of a memref.
145 LogicalResult getMemRefAlignment(LLVMTypeConverter &typeConverter,
146                                  MemRefType memrefType, unsigned &align) {
147   Type elementTy = typeConverter.convertType(memrefType.getElementType());
148   if (!elementTy)
149     return failure();
150 
151   // TODO: this should use the MLIR data layout when it becomes available and
152   // stop depending on translation.
153   llvm::LLVMContext llvmContext;
154   align = LLVM::TypeToLLVMIRTranslator(llvmContext)
155               .getPreferredAlignment(elementTy, typeConverter.getDataLayout());
156   return success();
157 }
158 
159 // Helper that returns the base address of a memref.
160 static LogicalResult getBase(ConversionPatternRewriter &rewriter, Location loc,
161                              Value memref, MemRefType memRefType, Value &base) {
162   // Inspect stride and offset structure.
163   //
164   // TODO: flat memory only for now, generalize
165   //
166   int64_t offset;
167   SmallVector<int64_t, 4> strides;
168   auto successStrides = getStridesAndOffset(memRefType, strides, offset);
169   if (failed(successStrides) || strides.size() != 1 || strides[0] != 1 ||
170       offset != 0 || memRefType.getMemorySpace() != 0)
171     return failure();
172   base = MemRefDescriptor(memref).alignedPtr(rewriter, loc);
173   return success();
174 }
175 
176 // Helper that returns vector of pointers given a memref base with index vector.
177 static LogicalResult getIndexedPtrs(ConversionPatternRewriter &rewriter,
178                                     Location loc, Value memref, Value indices,
179                                     MemRefType memRefType, VectorType vType,
180                                     Type iType, Value &ptrs) {
181   Value base;
182   if (failed(getBase(rewriter, loc, memref, memRefType, base)))
183     return failure();
184   auto pType = MemRefDescriptor(memref).getElementPtrType();
185   auto ptrsType = LLVM::LLVMFixedVectorType::get(pType, vType.getDimSize(0));
186   ptrs = rewriter.create<LLVM::GEPOp>(loc, ptrsType, base, indices);
187   return success();
188 }
189 
190 // Casts a strided element pointer to a vector pointer. The vector pointer
191 // would always be on address space 0, therefore addrspacecast shall be
192 // used when source/dst memrefs are not on address space 0.
193 static Value castDataPtr(ConversionPatternRewriter &rewriter, Location loc,
194                          Value ptr, MemRefType memRefType, Type vt) {
195   auto pType =
196       LLVM::LLVMPointerType::get(vt.template cast<LLVM::LLVMFixedVectorType>());
197   if (memRefType.getMemorySpace() == 0)
198     return rewriter.create<LLVM::BitcastOp>(loc, pType, ptr);
199   return rewriter.create<LLVM::AddrSpaceCastOp>(loc, pType, ptr);
200 }
201 
202 static LogicalResult
203 replaceTransferOpWithLoadOrStore(ConversionPatternRewriter &rewriter,
204                                  LLVMTypeConverter &typeConverter, Location loc,
205                                  TransferReadOp xferOp,
206                                  ArrayRef<Value> operands, Value dataPtr) {
207   unsigned align;
208   if (failed(getMemRefAlignment(
209           typeConverter, xferOp.getShapedType().cast<MemRefType>(), align)))
210     return failure();
211   rewriter.replaceOpWithNewOp<LLVM::LoadOp>(xferOp, dataPtr, align);
212   return success();
213 }
214 
215 static LogicalResult
216 replaceTransferOpWithMasked(ConversionPatternRewriter &rewriter,
217                             LLVMTypeConverter &typeConverter, Location loc,
218                             TransferReadOp xferOp, ArrayRef<Value> operands,
219                             Value dataPtr, Value mask) {
220   auto toLLVMTy = [&](Type t) { return typeConverter.convertType(t); };
221   VectorType fillType = xferOp.getVectorType();
222   Value fill = rewriter.create<SplatOp>(loc, fillType, xferOp.padding());
223   fill = rewriter.create<LLVM::DialectCastOp>(loc, toLLVMTy(fillType), fill);
224 
225   Type vecTy = typeConverter.convertType(xferOp.getVectorType());
226   if (!vecTy)
227     return failure();
228 
229   unsigned align;
230   if (failed(getMemRefAlignment(
231           typeConverter, xferOp.getShapedType().cast<MemRefType>(), align)))
232     return failure();
233 
234   rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>(
235       xferOp, vecTy, dataPtr, mask, ValueRange{fill},
236       rewriter.getI32IntegerAttr(align));
237   return success();
238 }
239 
240 static LogicalResult
241 replaceTransferOpWithLoadOrStore(ConversionPatternRewriter &rewriter,
242                                  LLVMTypeConverter &typeConverter, Location loc,
243                                  TransferWriteOp xferOp,
244                                  ArrayRef<Value> operands, Value dataPtr) {
245   unsigned align;
246   if (failed(getMemRefAlignment(
247           typeConverter, xferOp.getShapedType().cast<MemRefType>(), align)))
248     return failure();
249   auto adaptor = TransferWriteOpAdaptor(operands);
250   rewriter.replaceOpWithNewOp<LLVM::StoreOp>(xferOp, adaptor.vector(), dataPtr,
251                                              align);
252   return success();
253 }
254 
255 static LogicalResult
256 replaceTransferOpWithMasked(ConversionPatternRewriter &rewriter,
257                             LLVMTypeConverter &typeConverter, Location loc,
258                             TransferWriteOp xferOp, ArrayRef<Value> operands,
259                             Value dataPtr, Value mask) {
260   unsigned align;
261   if (failed(getMemRefAlignment(
262           typeConverter, xferOp.getShapedType().cast<MemRefType>(), align)))
263     return failure();
264 
265   auto adaptor = TransferWriteOpAdaptor(operands);
266   rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>(
267       xferOp, adaptor.vector(), dataPtr, mask,
268       rewriter.getI32IntegerAttr(align));
269   return success();
270 }
271 
272 static TransferReadOpAdaptor getTransferOpAdapter(TransferReadOp xferOp,
273                                                   ArrayRef<Value> operands) {
274   return TransferReadOpAdaptor(operands);
275 }
276 
277 static TransferWriteOpAdaptor getTransferOpAdapter(TransferWriteOp xferOp,
278                                                    ArrayRef<Value> operands) {
279   return TransferWriteOpAdaptor(operands);
280 }
281 
282 namespace {
283 
284 /// Conversion pattern for a vector.matrix_multiply.
285 /// This is lowered directly to the proper llvm.intr.matrix.multiply.
286 class VectorMatmulOpConversion
287     : public ConvertOpToLLVMPattern<vector::MatmulOp> {
288 public:
289   using ConvertOpToLLVMPattern<vector::MatmulOp>::ConvertOpToLLVMPattern;
290 
291   LogicalResult
292   matchAndRewrite(vector::MatmulOp matmulOp, ArrayRef<Value> operands,
293                   ConversionPatternRewriter &rewriter) const override {
294     auto adaptor = vector::MatmulOpAdaptor(operands);
295     rewriter.replaceOpWithNewOp<LLVM::MatrixMultiplyOp>(
296         matmulOp, typeConverter->convertType(matmulOp.res().getType()),
297         adaptor.lhs(), adaptor.rhs(), matmulOp.lhs_rows(),
298         matmulOp.lhs_columns(), matmulOp.rhs_columns());
299     return success();
300   }
301 };
302 
303 /// Conversion pattern for a vector.flat_transpose.
304 /// This is lowered directly to the proper llvm.intr.matrix.transpose.
305 class VectorFlatTransposeOpConversion
306     : public ConvertOpToLLVMPattern<vector::FlatTransposeOp> {
307 public:
308   using ConvertOpToLLVMPattern<vector::FlatTransposeOp>::ConvertOpToLLVMPattern;
309 
310   LogicalResult
311   matchAndRewrite(vector::FlatTransposeOp transOp, ArrayRef<Value> operands,
312                   ConversionPatternRewriter &rewriter) const override {
313     auto adaptor = vector::FlatTransposeOpAdaptor(operands);
314     rewriter.replaceOpWithNewOp<LLVM::MatrixTransposeOp>(
315         transOp, typeConverter->convertType(transOp.res().getType()),
316         adaptor.matrix(), transOp.rows(), transOp.columns());
317     return success();
318   }
319 };
320 
321 /// Conversion pattern for a vector.maskedload.
322 class VectorMaskedLoadOpConversion
323     : public ConvertOpToLLVMPattern<vector::MaskedLoadOp> {
324 public:
325   using ConvertOpToLLVMPattern<vector::MaskedLoadOp>::ConvertOpToLLVMPattern;
326 
327   LogicalResult
328   matchAndRewrite(vector::MaskedLoadOp load, ArrayRef<Value> operands,
329                   ConversionPatternRewriter &rewriter) const override {
330     auto loc = load->getLoc();
331     auto adaptor = vector::MaskedLoadOpAdaptor(operands);
332     MemRefType memRefType = load.getMemRefType();
333 
334     // Resolve alignment.
335     unsigned align;
336     if (failed(getMemRefAlignment(*getTypeConverter(), memRefType, align)))
337       return failure();
338 
339     // Resolve address.
340     auto vtype = typeConverter->convertType(load.getResultVectorType());
341     Value dataPtr = this->getStridedElementPtr(loc, memRefType, adaptor.base(),
342                                                adaptor.indices(), rewriter);
343     Value ptr = castDataPtr(rewriter, loc, dataPtr, memRefType, vtype);
344 
345     rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>(
346         load, vtype, ptr, adaptor.mask(), adaptor.pass_thru(),
347         rewriter.getI32IntegerAttr(align));
348     return success();
349   }
350 };
351 
352 /// Conversion pattern for a vector.maskedstore.
353 class VectorMaskedStoreOpConversion
354     : public ConvertOpToLLVMPattern<vector::MaskedStoreOp> {
355 public:
356   using ConvertOpToLLVMPattern<vector::MaskedStoreOp>::ConvertOpToLLVMPattern;
357 
358   LogicalResult
359   matchAndRewrite(vector::MaskedStoreOp store, ArrayRef<Value> operands,
360                   ConversionPatternRewriter &rewriter) const override {
361     auto loc = store->getLoc();
362     auto adaptor = vector::MaskedStoreOpAdaptor(operands);
363     MemRefType memRefType = store.getMemRefType();
364 
365     // Resolve alignment.
366     unsigned align;
367     if (failed(getMemRefAlignment(*getTypeConverter(), memRefType, align)))
368       return failure();
369 
370     // Resolve address.
371     auto vtype = typeConverter->convertType(store.getValueVectorType());
372     Value dataPtr = this->getStridedElementPtr(loc, memRefType, adaptor.base(),
373                                                adaptor.indices(), rewriter);
374     Value ptr = castDataPtr(rewriter, loc, dataPtr, memRefType, vtype);
375 
376     rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>(
377         store, adaptor.value(), ptr, adaptor.mask(),
378         rewriter.getI32IntegerAttr(align));
379     return success();
380   }
381 };
382 
383 /// Conversion pattern for a vector.gather.
384 class VectorGatherOpConversion
385     : public ConvertOpToLLVMPattern<vector::GatherOp> {
386 public:
387   using ConvertOpToLLVMPattern<vector::GatherOp>::ConvertOpToLLVMPattern;
388 
389   LogicalResult
390   matchAndRewrite(vector::GatherOp gather, ArrayRef<Value> operands,
391                   ConversionPatternRewriter &rewriter) const override {
392     auto loc = gather->getLoc();
393     auto adaptor = vector::GatherOpAdaptor(operands);
394 
395     // Resolve alignment.
396     unsigned align;
397     if (failed(getMemRefAlignment(*getTypeConverter(), gather.getMemRefType(),
398                                   align)))
399       return failure();
400 
401     // Get index ptrs.
402     VectorType vType = gather.getResultVectorType();
403     Type iType = gather.getIndicesVectorType().getElementType();
404     Value ptrs;
405     if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), adaptor.indices(),
406                               gather.getMemRefType(), vType, iType, ptrs)))
407       return failure();
408 
409     // Replace with the gather intrinsic.
410     rewriter.replaceOpWithNewOp<LLVM::masked_gather>(
411         gather, typeConverter->convertType(vType), ptrs, adaptor.mask(),
412         adaptor.pass_thru(), rewriter.getI32IntegerAttr(align));
413     return success();
414   }
415 };
416 
417 /// Conversion pattern for a vector.scatter.
418 class VectorScatterOpConversion
419     : public ConvertOpToLLVMPattern<vector::ScatterOp> {
420 public:
421   using ConvertOpToLLVMPattern<vector::ScatterOp>::ConvertOpToLLVMPattern;
422 
423   LogicalResult
424   matchAndRewrite(vector::ScatterOp scatter, ArrayRef<Value> operands,
425                   ConversionPatternRewriter &rewriter) const override {
426     auto loc = scatter->getLoc();
427     auto adaptor = vector::ScatterOpAdaptor(operands);
428 
429     // Resolve alignment.
430     unsigned align;
431     if (failed(getMemRefAlignment(*getTypeConverter(), scatter.getMemRefType(),
432                                   align)))
433       return failure();
434 
435     // Get index ptrs.
436     VectorType vType = scatter.getValueVectorType();
437     Type iType = scatter.getIndicesVectorType().getElementType();
438     Value ptrs;
439     if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), adaptor.indices(),
440                               scatter.getMemRefType(), vType, iType, ptrs)))
441       return failure();
442 
443     // Replace with the scatter intrinsic.
444     rewriter.replaceOpWithNewOp<LLVM::masked_scatter>(
445         scatter, adaptor.value(), ptrs, adaptor.mask(),
446         rewriter.getI32IntegerAttr(align));
447     return success();
448   }
449 };
450 
451 /// Conversion pattern for a vector.expandload.
452 class VectorExpandLoadOpConversion
453     : public ConvertOpToLLVMPattern<vector::ExpandLoadOp> {
454 public:
455   using ConvertOpToLLVMPattern<vector::ExpandLoadOp>::ConvertOpToLLVMPattern;
456 
457   LogicalResult
458   matchAndRewrite(vector::ExpandLoadOp expand, ArrayRef<Value> operands,
459                   ConversionPatternRewriter &rewriter) const override {
460     auto loc = expand->getLoc();
461     auto adaptor = vector::ExpandLoadOpAdaptor(operands);
462     MemRefType memRefType = expand.getMemRefType();
463 
464     // Resolve address.
465     auto vtype = typeConverter->convertType(expand.getResultVectorType());
466     Value ptr = this->getStridedElementPtr(loc, memRefType, adaptor.base(),
467                                            adaptor.indices(), rewriter);
468 
469     rewriter.replaceOpWithNewOp<LLVM::masked_expandload>(
470         expand, vtype, ptr, adaptor.mask(), adaptor.pass_thru());
471     return success();
472   }
473 };
474 
475 /// Conversion pattern for a vector.compressstore.
476 class VectorCompressStoreOpConversion
477     : public ConvertOpToLLVMPattern<vector::CompressStoreOp> {
478 public:
479   using ConvertOpToLLVMPattern<vector::CompressStoreOp>::ConvertOpToLLVMPattern;
480 
481   LogicalResult
482   matchAndRewrite(vector::CompressStoreOp compress, ArrayRef<Value> operands,
483                   ConversionPatternRewriter &rewriter) const override {
484     auto loc = compress->getLoc();
485     auto adaptor = vector::CompressStoreOpAdaptor(operands);
486     MemRefType memRefType = compress.getMemRefType();
487 
488     // Resolve address.
489     Value ptr = this->getStridedElementPtr(loc, memRefType, adaptor.base(),
490                                            adaptor.indices(), rewriter);
491 
492     rewriter.replaceOpWithNewOp<LLVM::masked_compressstore>(
493         compress, adaptor.value(), ptr, adaptor.mask());
494     return success();
495   }
496 };
497 
498 /// Conversion pattern for all vector reductions.
499 class VectorReductionOpConversion
500     : public ConvertOpToLLVMPattern<vector::ReductionOp> {
501 public:
502   explicit VectorReductionOpConversion(LLVMTypeConverter &typeConv,
503                                        bool reassociateFPRed)
504       : ConvertOpToLLVMPattern<vector::ReductionOp>(typeConv),
505         reassociateFPReductions(reassociateFPRed) {}
506 
507   LogicalResult
508   matchAndRewrite(vector::ReductionOp reductionOp, ArrayRef<Value> operands,
509                   ConversionPatternRewriter &rewriter) const override {
510     auto kind = reductionOp.kind();
511     Type eltType = reductionOp.dest().getType();
512     Type llvmType = typeConverter->convertType(eltType);
513     if (eltType.isIntOrIndex()) {
514       // Integer reductions: add/mul/min/max/and/or/xor.
515       if (kind == "add")
516         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_add>(
517             reductionOp, llvmType, operands[0]);
518       else if (kind == "mul")
519         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_mul>(
520             reductionOp, llvmType, operands[0]);
521       else if (kind == "min" &&
522                (eltType.isIndex() || eltType.isUnsignedInteger()))
523         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umin>(
524             reductionOp, llvmType, operands[0]);
525       else if (kind == "min")
526         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smin>(
527             reductionOp, llvmType, operands[0]);
528       else if (kind == "max" &&
529                (eltType.isIndex() || eltType.isUnsignedInteger()))
530         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umax>(
531             reductionOp, llvmType, operands[0]);
532       else if (kind == "max")
533         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smax>(
534             reductionOp, llvmType, operands[0]);
535       else if (kind == "and")
536         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_and>(
537             reductionOp, llvmType, operands[0]);
538       else if (kind == "or")
539         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_or>(
540             reductionOp, llvmType, operands[0]);
541       else if (kind == "xor")
542         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_xor>(
543             reductionOp, llvmType, operands[0]);
544       else
545         return failure();
546       return success();
547     }
548 
549     if (!eltType.isa<FloatType>())
550       return failure();
551 
552     // Floating-point reductions: add/mul/min/max
553     if (kind == "add") {
554       // Optional accumulator (or zero).
555       Value acc = operands.size() > 1 ? operands[1]
556                                       : rewriter.create<LLVM::ConstantOp>(
557                                             reductionOp->getLoc(), llvmType,
558                                             rewriter.getZeroAttr(eltType));
559       rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fadd>(
560           reductionOp, llvmType, acc, operands[0],
561           rewriter.getBoolAttr(reassociateFPReductions));
562     } else if (kind == "mul") {
563       // Optional accumulator (or one).
564       Value acc = operands.size() > 1
565                       ? operands[1]
566                       : rewriter.create<LLVM::ConstantOp>(
567                             reductionOp->getLoc(), llvmType,
568                             rewriter.getFloatAttr(eltType, 1.0));
569       rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmul>(
570           reductionOp, llvmType, acc, operands[0],
571           rewriter.getBoolAttr(reassociateFPReductions));
572     } else if (kind == "min")
573       rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmin>(
574           reductionOp, llvmType, operands[0]);
575     else if (kind == "max")
576       rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmax>(
577           reductionOp, llvmType, operands[0]);
578     else
579       return failure();
580     return success();
581   }
582 
583 private:
584   const bool reassociateFPReductions;
585 };
586 
587 /// Conversion pattern for a vector.create_mask (1-D only).
588 class VectorCreateMaskOpConversion
589     : public ConvertOpToLLVMPattern<vector::CreateMaskOp> {
590 public:
591   explicit VectorCreateMaskOpConversion(LLVMTypeConverter &typeConv,
592                                         bool enableIndexOpt)
593       : ConvertOpToLLVMPattern<vector::CreateMaskOp>(typeConv),
594         enableIndexOptimizations(enableIndexOpt) {}
595 
596   LogicalResult
597   matchAndRewrite(vector::CreateMaskOp op, ArrayRef<Value> operands,
598                   ConversionPatternRewriter &rewriter) const override {
599     auto dstType = op.getType();
600     int64_t rank = dstType.getRank();
601     if (rank == 1) {
602       rewriter.replaceOp(
603           op, buildVectorComparison(rewriter, op, enableIndexOptimizations,
604                                     dstType.getDimSize(0), operands[0]));
605       return success();
606     }
607     return failure();
608   }
609 
610 private:
611   const bool enableIndexOptimizations;
612 };
613 
614 class VectorShuffleOpConversion
615     : public ConvertOpToLLVMPattern<vector::ShuffleOp> {
616 public:
617   using ConvertOpToLLVMPattern<vector::ShuffleOp>::ConvertOpToLLVMPattern;
618 
619   LogicalResult
620   matchAndRewrite(vector::ShuffleOp shuffleOp, ArrayRef<Value> operands,
621                   ConversionPatternRewriter &rewriter) const override {
622     auto loc = shuffleOp->getLoc();
623     auto adaptor = vector::ShuffleOpAdaptor(operands);
624     auto v1Type = shuffleOp.getV1VectorType();
625     auto v2Type = shuffleOp.getV2VectorType();
626     auto vectorType = shuffleOp.getVectorType();
627     Type llvmType = typeConverter->convertType(vectorType);
628     auto maskArrayAttr = shuffleOp.mask();
629 
630     // Bail if result type cannot be lowered.
631     if (!llvmType)
632       return failure();
633 
634     // Get rank and dimension sizes.
635     int64_t rank = vectorType.getRank();
636     assert(v1Type.getRank() == rank);
637     assert(v2Type.getRank() == rank);
638     int64_t v1Dim = v1Type.getDimSize(0);
639 
640     // For rank 1, where both operands have *exactly* the same vector type,
641     // there is direct shuffle support in LLVM. Use it!
642     if (rank == 1 && v1Type == v2Type) {
643       Value llvmShuffleOp = rewriter.create<LLVM::ShuffleVectorOp>(
644           loc, adaptor.v1(), adaptor.v2(), maskArrayAttr);
645       rewriter.replaceOp(shuffleOp, llvmShuffleOp);
646       return success();
647     }
648 
649     // For all other cases, insert the individual values individually.
650     Value insert = rewriter.create<LLVM::UndefOp>(loc, llvmType);
651     int64_t insPos = 0;
652     for (auto en : llvm::enumerate(maskArrayAttr)) {
653       int64_t extPos = en.value().cast<IntegerAttr>().getInt();
654       Value value = adaptor.v1();
655       if (extPos >= v1Dim) {
656         extPos -= v1Dim;
657         value = adaptor.v2();
658       }
659       Value extract = extractOne(rewriter, *getTypeConverter(), loc, value,
660                                  llvmType, rank, extPos);
661       insert = insertOne(rewriter, *getTypeConverter(), loc, insert, extract,
662                          llvmType, rank, insPos++);
663     }
664     rewriter.replaceOp(shuffleOp, insert);
665     return success();
666   }
667 };
668 
669 class VectorExtractElementOpConversion
670     : public ConvertOpToLLVMPattern<vector::ExtractElementOp> {
671 public:
672   using ConvertOpToLLVMPattern<
673       vector::ExtractElementOp>::ConvertOpToLLVMPattern;
674 
675   LogicalResult
676   matchAndRewrite(vector::ExtractElementOp extractEltOp,
677                   ArrayRef<Value> operands,
678                   ConversionPatternRewriter &rewriter) const override {
679     auto adaptor = vector::ExtractElementOpAdaptor(operands);
680     auto vectorType = extractEltOp.getVectorType();
681     auto llvmType = typeConverter->convertType(vectorType.getElementType());
682 
683     // Bail if result type cannot be lowered.
684     if (!llvmType)
685       return failure();
686 
687     rewriter.replaceOpWithNewOp<LLVM::ExtractElementOp>(
688         extractEltOp, llvmType, adaptor.vector(), adaptor.position());
689     return success();
690   }
691 };
692 
693 class VectorExtractOpConversion
694     : public ConvertOpToLLVMPattern<vector::ExtractOp> {
695 public:
696   using ConvertOpToLLVMPattern<vector::ExtractOp>::ConvertOpToLLVMPattern;
697 
698   LogicalResult
699   matchAndRewrite(vector::ExtractOp extractOp, ArrayRef<Value> operands,
700                   ConversionPatternRewriter &rewriter) const override {
701     auto loc = extractOp->getLoc();
702     auto adaptor = vector::ExtractOpAdaptor(operands);
703     auto vectorType = extractOp.getVectorType();
704     auto resultType = extractOp.getResult().getType();
705     auto llvmResultType = typeConverter->convertType(resultType);
706     auto positionArrayAttr = extractOp.position();
707 
708     // Bail if result type cannot be lowered.
709     if (!llvmResultType)
710       return failure();
711 
712     // One-shot extraction of vector from array (only requires extractvalue).
713     if (resultType.isa<VectorType>()) {
714       Value extracted = rewriter.create<LLVM::ExtractValueOp>(
715           loc, llvmResultType, adaptor.vector(), positionArrayAttr);
716       rewriter.replaceOp(extractOp, extracted);
717       return success();
718     }
719 
720     // Potential extraction of 1-D vector from array.
721     auto *context = extractOp->getContext();
722     Value extracted = adaptor.vector();
723     auto positionAttrs = positionArrayAttr.getValue();
724     if (positionAttrs.size() > 1) {
725       auto oneDVectorType = reducedVectorTypeBack(vectorType);
726       auto nMinusOnePositionAttrs =
727           ArrayAttr::get(positionAttrs.drop_back(), context);
728       extracted = rewriter.create<LLVM::ExtractValueOp>(
729           loc, typeConverter->convertType(oneDVectorType), extracted,
730           nMinusOnePositionAttrs);
731     }
732 
733     // Remaining extraction of element from 1-D LLVM vector
734     auto position = positionAttrs.back().cast<IntegerAttr>();
735     auto i64Type = IntegerType::get(rewriter.getContext(), 64);
736     auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position);
737     extracted =
738         rewriter.create<LLVM::ExtractElementOp>(loc, extracted, constant);
739     rewriter.replaceOp(extractOp, extracted);
740 
741     return success();
742   }
743 };
744 
745 /// Conversion pattern that turns a vector.fma on a 1-D vector
746 /// into an llvm.intr.fmuladd. This is a trivial 1-1 conversion.
747 /// This does not match vectors of n >= 2 rank.
748 ///
749 /// Example:
750 /// ```
751 ///  vector.fma %a, %a, %a : vector<8xf32>
752 /// ```
753 /// is converted to:
754 /// ```
755 ///  llvm.intr.fmuladd %va, %va, %va:
756 ///    (!llvm."<8 x f32>">, !llvm<"<8 x f32>">, !llvm<"<8 x f32>">)
757 ///    -> !llvm."<8 x f32>">
758 /// ```
759 class VectorFMAOp1DConversion : public ConvertOpToLLVMPattern<vector::FMAOp> {
760 public:
761   using ConvertOpToLLVMPattern<vector::FMAOp>::ConvertOpToLLVMPattern;
762 
763   LogicalResult
764   matchAndRewrite(vector::FMAOp fmaOp, ArrayRef<Value> operands,
765                   ConversionPatternRewriter &rewriter) const override {
766     auto adaptor = vector::FMAOpAdaptor(operands);
767     VectorType vType = fmaOp.getVectorType();
768     if (vType.getRank() != 1)
769       return failure();
770     rewriter.replaceOpWithNewOp<LLVM::FMulAddOp>(fmaOp, adaptor.lhs(),
771                                                  adaptor.rhs(), adaptor.acc());
772     return success();
773   }
774 };
775 
776 class VectorInsertElementOpConversion
777     : public ConvertOpToLLVMPattern<vector::InsertElementOp> {
778 public:
779   using ConvertOpToLLVMPattern<vector::InsertElementOp>::ConvertOpToLLVMPattern;
780 
781   LogicalResult
782   matchAndRewrite(vector::InsertElementOp insertEltOp, ArrayRef<Value> operands,
783                   ConversionPatternRewriter &rewriter) const override {
784     auto adaptor = vector::InsertElementOpAdaptor(operands);
785     auto vectorType = insertEltOp.getDestVectorType();
786     auto llvmType = typeConverter->convertType(vectorType);
787 
788     // Bail if result type cannot be lowered.
789     if (!llvmType)
790       return failure();
791 
792     rewriter.replaceOpWithNewOp<LLVM::InsertElementOp>(
793         insertEltOp, llvmType, adaptor.dest(), adaptor.source(),
794         adaptor.position());
795     return success();
796   }
797 };
798 
799 class VectorInsertOpConversion
800     : public ConvertOpToLLVMPattern<vector::InsertOp> {
801 public:
802   using ConvertOpToLLVMPattern<vector::InsertOp>::ConvertOpToLLVMPattern;
803 
804   LogicalResult
805   matchAndRewrite(vector::InsertOp insertOp, ArrayRef<Value> operands,
806                   ConversionPatternRewriter &rewriter) const override {
807     auto loc = insertOp->getLoc();
808     auto adaptor = vector::InsertOpAdaptor(operands);
809     auto sourceType = insertOp.getSourceType();
810     auto destVectorType = insertOp.getDestVectorType();
811     auto llvmResultType = typeConverter->convertType(destVectorType);
812     auto positionArrayAttr = insertOp.position();
813 
814     // Bail if result type cannot be lowered.
815     if (!llvmResultType)
816       return failure();
817 
818     // One-shot insertion of a vector into an array (only requires insertvalue).
819     if (sourceType.isa<VectorType>()) {
820       Value inserted = rewriter.create<LLVM::InsertValueOp>(
821           loc, llvmResultType, adaptor.dest(), adaptor.source(),
822           positionArrayAttr);
823       rewriter.replaceOp(insertOp, inserted);
824       return success();
825     }
826 
827     // Potential extraction of 1-D vector from array.
828     auto *context = insertOp->getContext();
829     Value extracted = adaptor.dest();
830     auto positionAttrs = positionArrayAttr.getValue();
831     auto position = positionAttrs.back().cast<IntegerAttr>();
832     auto oneDVectorType = destVectorType;
833     if (positionAttrs.size() > 1) {
834       oneDVectorType = reducedVectorTypeBack(destVectorType);
835       auto nMinusOnePositionAttrs =
836           ArrayAttr::get(positionAttrs.drop_back(), context);
837       extracted = rewriter.create<LLVM::ExtractValueOp>(
838           loc, typeConverter->convertType(oneDVectorType), extracted,
839           nMinusOnePositionAttrs);
840     }
841 
842     // Insertion of an element into a 1-D LLVM vector.
843     auto i64Type = IntegerType::get(rewriter.getContext(), 64);
844     auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position);
845     Value inserted = rewriter.create<LLVM::InsertElementOp>(
846         loc, typeConverter->convertType(oneDVectorType), extracted,
847         adaptor.source(), constant);
848 
849     // Potential insertion of resulting 1-D vector into array.
850     if (positionAttrs.size() > 1) {
851       auto nMinusOnePositionAttrs =
852           ArrayAttr::get(positionAttrs.drop_back(), context);
853       inserted = rewriter.create<LLVM::InsertValueOp>(loc, llvmResultType,
854                                                       adaptor.dest(), inserted,
855                                                       nMinusOnePositionAttrs);
856     }
857 
858     rewriter.replaceOp(insertOp, inserted);
859     return success();
860   }
861 };
862 
863 /// Rank reducing rewrite for n-D FMA into (n-1)-D FMA where n > 1.
864 ///
865 /// Example:
866 /// ```
867 ///   %d = vector.fma %a, %b, %c : vector<2x4xf32>
868 /// ```
869 /// is rewritten into:
870 /// ```
871 ///  %r = splat %f0: vector<2x4xf32>
872 ///  %va = vector.extractvalue %a[0] : vector<2x4xf32>
873 ///  %vb = vector.extractvalue %b[0] : vector<2x4xf32>
874 ///  %vc = vector.extractvalue %c[0] : vector<2x4xf32>
875 ///  %vd = vector.fma %va, %vb, %vc : vector<4xf32>
876 ///  %r2 = vector.insertvalue %vd, %r[0] : vector<4xf32> into vector<2x4xf32>
877 ///  %va2 = vector.extractvalue %a2[1] : vector<2x4xf32>
878 ///  %vb2 = vector.extractvalue %b2[1] : vector<2x4xf32>
879 ///  %vc2 = vector.extractvalue %c2[1] : vector<2x4xf32>
880 ///  %vd2 = vector.fma %va2, %vb2, %vc2 : vector<4xf32>
881 ///  %r3 = vector.insertvalue %vd2, %r2[1] : vector<4xf32> into vector<2x4xf32>
882 ///  // %r3 holds the final value.
883 /// ```
884 class VectorFMAOpNDRewritePattern : public OpRewritePattern<FMAOp> {
885 public:
886   using OpRewritePattern<FMAOp>::OpRewritePattern;
887 
888   LogicalResult matchAndRewrite(FMAOp op,
889                                 PatternRewriter &rewriter) const override {
890     auto vType = op.getVectorType();
891     if (vType.getRank() < 2)
892       return failure();
893 
894     auto loc = op.getLoc();
895     auto elemType = vType.getElementType();
896     Value zero = rewriter.create<ConstantOp>(loc, elemType,
897                                              rewriter.getZeroAttr(elemType));
898     Value desc = rewriter.create<SplatOp>(loc, vType, zero);
899     for (int64_t i = 0, e = vType.getShape().front(); i != e; ++i) {
900       Value extrLHS = rewriter.create<ExtractOp>(loc, op.lhs(), i);
901       Value extrRHS = rewriter.create<ExtractOp>(loc, op.rhs(), i);
902       Value extrACC = rewriter.create<ExtractOp>(loc, op.acc(), i);
903       Value fma = rewriter.create<FMAOp>(loc, extrLHS, extrRHS, extrACC);
904       desc = rewriter.create<InsertOp>(loc, fma, desc, i);
905     }
906     rewriter.replaceOp(op, desc);
907     return success();
908   }
909 };
910 
911 // When ranks are different, InsertStridedSlice needs to extract a properly
912 // ranked vector from the destination vector into which to insert. This pattern
913 // only takes care of this part and forwards the rest of the conversion to
914 // another pattern that converts InsertStridedSlice for operands of the same
915 // rank.
916 //
917 // RewritePattern for InsertStridedSliceOp where source and destination vectors
918 // have different ranks. In this case:
919 //   1. the proper subvector is extracted from the destination vector
920 //   2. a new InsertStridedSlice op is created to insert the source in the
921 //   destination subvector
922 //   3. the destination subvector is inserted back in the proper place
923 //   4. the op is replaced by the result of step 3.
924 // The new InsertStridedSlice from step 2. will be picked up by a
925 // `VectorInsertStridedSliceOpSameRankRewritePattern`.
926 class VectorInsertStridedSliceOpDifferentRankRewritePattern
927     : public OpRewritePattern<InsertStridedSliceOp> {
928 public:
929   using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern;
930 
931   LogicalResult matchAndRewrite(InsertStridedSliceOp op,
932                                 PatternRewriter &rewriter) const override {
933     auto srcType = op.getSourceVectorType();
934     auto dstType = op.getDestVectorType();
935 
936     if (op.offsets().getValue().empty())
937       return failure();
938 
939     auto loc = op.getLoc();
940     int64_t rankDiff = dstType.getRank() - srcType.getRank();
941     assert(rankDiff >= 0);
942     if (rankDiff == 0)
943       return failure();
944 
945     int64_t rankRest = dstType.getRank() - rankDiff;
946     // Extract / insert the subvector of matching rank and InsertStridedSlice
947     // on it.
948     Value extracted =
949         rewriter.create<ExtractOp>(loc, op.dest(),
950                                    getI64SubArray(op.offsets(), /*dropFront=*/0,
951                                                   /*dropBack=*/rankRest));
952     // A different pattern will kick in for InsertStridedSlice with matching
953     // ranks.
954     auto stridedSliceInnerOp = rewriter.create<InsertStridedSliceOp>(
955         loc, op.source(), extracted,
956         getI64SubArray(op.offsets(), /*dropFront=*/rankDiff),
957         getI64SubArray(op.strides(), /*dropFront=*/0));
958     rewriter.replaceOpWithNewOp<InsertOp>(
959         op, stridedSliceInnerOp.getResult(), op.dest(),
960         getI64SubArray(op.offsets(), /*dropFront=*/0,
961                        /*dropBack=*/rankRest));
962     return success();
963   }
964 };
965 
966 // RewritePattern for InsertStridedSliceOp where source and destination vectors
967 // have the same rank. In this case, we reduce
968 //   1. the proper subvector is extracted from the destination vector
969 //   2. a new InsertStridedSlice op is created to insert the source in the
970 //   destination subvector
971 //   3. the destination subvector is inserted back in the proper place
972 //   4. the op is replaced by the result of step 3.
973 // The new InsertStridedSlice from step 2. will be picked up by a
974 // `VectorInsertStridedSliceOpSameRankRewritePattern`.
975 class VectorInsertStridedSliceOpSameRankRewritePattern
976     : public OpRewritePattern<InsertStridedSliceOp> {
977 public:
978   VectorInsertStridedSliceOpSameRankRewritePattern(MLIRContext *ctx)
979       : OpRewritePattern<InsertStridedSliceOp>(ctx) {
980     // This pattern creates recursive InsertStridedSliceOp, but the recursion is
981     // bounded as the rank is strictly decreasing.
982     setHasBoundedRewriteRecursion();
983   }
984 
985   LogicalResult matchAndRewrite(InsertStridedSliceOp op,
986                                 PatternRewriter &rewriter) const override {
987     auto srcType = op.getSourceVectorType();
988     auto dstType = op.getDestVectorType();
989 
990     if (op.offsets().getValue().empty())
991       return failure();
992 
993     int64_t rankDiff = dstType.getRank() - srcType.getRank();
994     assert(rankDiff >= 0);
995     if (rankDiff != 0)
996       return failure();
997 
998     if (srcType == dstType) {
999       rewriter.replaceOp(op, op.source());
1000       return success();
1001     }
1002 
1003     int64_t offset =
1004         op.offsets().getValue().front().cast<IntegerAttr>().getInt();
1005     int64_t size = srcType.getShape().front();
1006     int64_t stride =
1007         op.strides().getValue().front().cast<IntegerAttr>().getInt();
1008 
1009     auto loc = op.getLoc();
1010     Value res = op.dest();
1011     // For each slice of the source vector along the most major dimension.
1012     for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e;
1013          off += stride, ++idx) {
1014       // 1. extract the proper subvector (or element) from source
1015       Value extractedSource = extractOne(rewriter, loc, op.source(), idx);
1016       if (extractedSource.getType().isa<VectorType>()) {
1017         // 2. If we have a vector, extract the proper subvector from destination
1018         // Otherwise we are at the element level and no need to recurse.
1019         Value extractedDest = extractOne(rewriter, loc, op.dest(), off);
1020         // 3. Reduce the problem to lowering a new InsertStridedSlice op with
1021         // smaller rank.
1022         extractedSource = rewriter.create<InsertStridedSliceOp>(
1023             loc, extractedSource, extractedDest,
1024             getI64SubArray(op.offsets(), /* dropFront=*/1),
1025             getI64SubArray(op.strides(), /* dropFront=*/1));
1026       }
1027       // 4. Insert the extractedSource into the res vector.
1028       res = insertOne(rewriter, loc, extractedSource, res, off);
1029     }
1030 
1031     rewriter.replaceOp(op, res);
1032     return success();
1033   }
1034 };
1035 
1036 /// Returns the strides if the memory underlying `memRefType` has a contiguous
1037 /// static layout.
1038 static llvm::Optional<SmallVector<int64_t, 4>>
1039 computeContiguousStrides(MemRefType memRefType) {
1040   int64_t offset;
1041   SmallVector<int64_t, 4> strides;
1042   if (failed(getStridesAndOffset(memRefType, strides, offset)))
1043     return None;
1044   if (!strides.empty() && strides.back() != 1)
1045     return None;
1046   // If no layout or identity layout, this is contiguous by definition.
1047   if (memRefType.getAffineMaps().empty() ||
1048       memRefType.getAffineMaps().front().isIdentity())
1049     return strides;
1050 
1051   // Otherwise, we must determine contiguity form shapes. This can only ever
1052   // work in static cases because MemRefType is underspecified to represent
1053   // contiguous dynamic shapes in other ways than with just empty/identity
1054   // layout.
1055   auto sizes = memRefType.getShape();
1056   for (int index = 0, e = strides.size() - 2; index < e; ++index) {
1057     if (ShapedType::isDynamic(sizes[index + 1]) ||
1058         ShapedType::isDynamicStrideOrOffset(strides[index]) ||
1059         ShapedType::isDynamicStrideOrOffset(strides[index + 1]))
1060       return None;
1061     if (strides[index] != strides[index + 1] * sizes[index + 1])
1062       return None;
1063   }
1064   return strides;
1065 }
1066 
1067 class VectorTypeCastOpConversion
1068     : public ConvertOpToLLVMPattern<vector::TypeCastOp> {
1069 public:
1070   using ConvertOpToLLVMPattern<vector::TypeCastOp>::ConvertOpToLLVMPattern;
1071 
1072   LogicalResult
1073   matchAndRewrite(vector::TypeCastOp castOp, ArrayRef<Value> operands,
1074                   ConversionPatternRewriter &rewriter) const override {
1075     auto loc = castOp->getLoc();
1076     MemRefType sourceMemRefType =
1077         castOp.getOperand().getType().cast<MemRefType>();
1078     MemRefType targetMemRefType = castOp.getType();
1079 
1080     // Only static shape casts supported atm.
1081     if (!sourceMemRefType.hasStaticShape() ||
1082         !targetMemRefType.hasStaticShape())
1083       return failure();
1084 
1085     auto llvmSourceDescriptorTy =
1086         operands[0].getType().dyn_cast<LLVM::LLVMStructType>();
1087     if (!llvmSourceDescriptorTy)
1088       return failure();
1089     MemRefDescriptor sourceMemRef(operands[0]);
1090 
1091     auto llvmTargetDescriptorTy = typeConverter->convertType(targetMemRefType)
1092                                       .dyn_cast_or_null<LLVM::LLVMStructType>();
1093     if (!llvmTargetDescriptorTy)
1094       return failure();
1095 
1096     // Only contiguous source buffers supported atm.
1097     auto sourceStrides = computeContiguousStrides(sourceMemRefType);
1098     if (!sourceStrides)
1099       return failure();
1100     auto targetStrides = computeContiguousStrides(targetMemRefType);
1101     if (!targetStrides)
1102       return failure();
1103     // Only support static strides for now, regardless of contiguity.
1104     if (llvm::any_of(*targetStrides, [](int64_t stride) {
1105           return ShapedType::isDynamicStrideOrOffset(stride);
1106         }))
1107       return failure();
1108 
1109     auto int64Ty = IntegerType::get(rewriter.getContext(), 64);
1110 
1111     // Create descriptor.
1112     auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy);
1113     Type llvmTargetElementTy = desc.getElementPtrType();
1114     // Set allocated ptr.
1115     Value allocated = sourceMemRef.allocatedPtr(rewriter, loc);
1116     allocated =
1117         rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, allocated);
1118     desc.setAllocatedPtr(rewriter, loc, allocated);
1119     // Set aligned ptr.
1120     Value ptr = sourceMemRef.alignedPtr(rewriter, loc);
1121     ptr = rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, ptr);
1122     desc.setAlignedPtr(rewriter, loc, ptr);
1123     // Fill offset 0.
1124     auto attr = rewriter.getIntegerAttr(rewriter.getIndexType(), 0);
1125     auto zero = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, attr);
1126     desc.setOffset(rewriter, loc, zero);
1127 
1128     // Fill size and stride descriptors in memref.
1129     for (auto indexedSize : llvm::enumerate(targetMemRefType.getShape())) {
1130       int64_t index = indexedSize.index();
1131       auto sizeAttr =
1132           rewriter.getIntegerAttr(rewriter.getIndexType(), indexedSize.value());
1133       auto size = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, sizeAttr);
1134       desc.setSize(rewriter, loc, index, size);
1135       auto strideAttr = rewriter.getIntegerAttr(rewriter.getIndexType(),
1136                                                 (*targetStrides)[index]);
1137       auto stride = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, strideAttr);
1138       desc.setStride(rewriter, loc, index, stride);
1139     }
1140 
1141     rewriter.replaceOp(castOp, {desc});
1142     return success();
1143   }
1144 };
1145 
1146 /// Conversion pattern that converts a 1-D vector transfer read/write op in a
1147 /// sequence of:
1148 /// 1. Get the source/dst address as an LLVM vector pointer.
1149 /// 2. Create a vector with linear indices [ 0 .. vector_length - 1 ].
1150 /// 3. Create an offsetVector = [ offset + 0 .. offset + vector_length - 1 ].
1151 /// 4. Create a mask where offsetVector is compared against memref upper bound.
1152 /// 5. Rewrite op as a masked read or write.
1153 template <typename ConcreteOp>
1154 class VectorTransferConversion : public ConvertOpToLLVMPattern<ConcreteOp> {
1155 public:
1156   explicit VectorTransferConversion(LLVMTypeConverter &typeConv,
1157                                     bool enableIndexOpt)
1158       : ConvertOpToLLVMPattern<ConcreteOp>(typeConv),
1159         enableIndexOptimizations(enableIndexOpt) {}
1160 
1161   LogicalResult
1162   matchAndRewrite(ConcreteOp xferOp, ArrayRef<Value> operands,
1163                   ConversionPatternRewriter &rewriter) const override {
1164     auto adaptor = getTransferOpAdapter(xferOp, operands);
1165 
1166     if (xferOp.getVectorType().getRank() > 1 ||
1167         llvm::size(xferOp.indices()) == 0)
1168       return failure();
1169     if (xferOp.permutation_map() !=
1170         AffineMap::getMinorIdentityMap(xferOp.permutation_map().getNumInputs(),
1171                                        xferOp.getVectorType().getRank(),
1172                                        xferOp->getContext()))
1173       return failure();
1174     auto memRefType = xferOp.getShapedType().template dyn_cast<MemRefType>();
1175     if (!memRefType)
1176       return failure();
1177     // Only contiguous source tensors supported atm.
1178     auto strides = computeContiguousStrides(memRefType);
1179     if (!strides)
1180       return failure();
1181 
1182     auto toLLVMTy = [&](Type t) {
1183       return this->getTypeConverter()->convertType(t);
1184     };
1185 
1186     Location loc = xferOp->getLoc();
1187 
1188     if (auto memrefVectorElementType =
1189             memRefType.getElementType().template dyn_cast<VectorType>()) {
1190       // Memref has vector element type.
1191       if (memrefVectorElementType.getElementType() !=
1192           xferOp.getVectorType().getElementType())
1193         return failure();
1194 #ifndef NDEBUG
1195       // Check that memref vector type is a suffix of 'vectorType.
1196       unsigned memrefVecEltRank = memrefVectorElementType.getRank();
1197       unsigned resultVecRank = xferOp.getVectorType().getRank();
1198       assert(memrefVecEltRank <= resultVecRank);
1199       // TODO: Move this to isSuffix in Vector/Utils.h.
1200       unsigned rankOffset = resultVecRank - memrefVecEltRank;
1201       auto memrefVecEltShape = memrefVectorElementType.getShape();
1202       auto resultVecShape = xferOp.getVectorType().getShape();
1203       for (unsigned i = 0; i < memrefVecEltRank; ++i)
1204         assert(memrefVecEltShape[i] != resultVecShape[rankOffset + i] &&
1205                "memref vector element shape should match suffix of vector "
1206                "result shape.");
1207 #endif // ifndef NDEBUG
1208     }
1209 
1210     // 1. Get the source/dst address as an LLVM vector pointer.
1211     VectorType vtp = xferOp.getVectorType();
1212     Value dataPtr = this->getStridedElementPtr(
1213         loc, memRefType, adaptor.source(), adaptor.indices(), rewriter);
1214     Value vectorDataPtr =
1215         castDataPtr(rewriter, loc, dataPtr, memRefType, toLLVMTy(vtp));
1216 
1217     if (!xferOp.isMaskedDim(0))
1218       return replaceTransferOpWithLoadOrStore(rewriter,
1219                                               *this->getTypeConverter(), loc,
1220                                               xferOp, operands, vectorDataPtr);
1221 
1222     // 2. Create a vector with linear indices [ 0 .. vector_length - 1 ].
1223     // 3. Create offsetVector = [ offset + 0 .. offset + vector_length - 1 ].
1224     // 4. Let dim the memref dimension, compute the vector comparison mask:
1225     //   [ offset + 0 .. offset + vector_length - 1 ] < [ dim .. dim ]
1226     //
1227     // TODO: when the leaf transfer rank is k > 1, we need the last `k`
1228     //       dimensions here.
1229     unsigned vecWidth = vtp.getNumElements();
1230     unsigned lastIndex = llvm::size(xferOp.indices()) - 1;
1231     Value off = xferOp.indices()[lastIndex];
1232     Value dim = rewriter.create<DimOp>(loc, xferOp.source(), lastIndex);
1233     Value mask = buildVectorComparison(
1234         rewriter, xferOp, enableIndexOptimizations, vecWidth, dim, &off);
1235 
1236     // 5. Rewrite as a masked read / write.
1237     return replaceTransferOpWithMasked(rewriter, *this->getTypeConverter(), loc,
1238                                        xferOp, operands, vectorDataPtr, mask);
1239   }
1240 
1241 private:
1242   const bool enableIndexOptimizations;
1243 };
1244 
1245 class VectorPrintOpConversion : public ConvertOpToLLVMPattern<vector::PrintOp> {
1246 public:
1247   using ConvertOpToLLVMPattern<vector::PrintOp>::ConvertOpToLLVMPattern;
1248 
1249   // Proof-of-concept lowering implementation that relies on a small
1250   // runtime support library, which only needs to provide a few
1251   // printing methods (single value for all data types, opening/closing
1252   // bracket, comma, newline). The lowering fully unrolls a vector
1253   // in terms of these elementary printing operations. The advantage
1254   // of this approach is that the library can remain unaware of all
1255   // low-level implementation details of vectors while still supporting
1256   // output of any shaped and dimensioned vector. Due to full unrolling,
1257   // this approach is less suited for very large vectors though.
1258   //
1259   // TODO: rely solely on libc in future? something else?
1260   //
1261   LogicalResult
1262   matchAndRewrite(vector::PrintOp printOp, ArrayRef<Value> operands,
1263                   ConversionPatternRewriter &rewriter) const override {
1264     auto adaptor = vector::PrintOpAdaptor(operands);
1265     Type printType = printOp.getPrintType();
1266 
1267     if (typeConverter->convertType(printType) == nullptr)
1268       return failure();
1269 
1270     // Make sure element type has runtime support.
1271     PrintConversion conversion = PrintConversion::None;
1272     VectorType vectorType = printType.dyn_cast<VectorType>();
1273     Type eltType = vectorType ? vectorType.getElementType() : printType;
1274     Operation *printer;
1275     if (eltType.isF32()) {
1276       printer = getPrintFloat(printOp);
1277     } else if (eltType.isF64()) {
1278       printer = getPrintDouble(printOp);
1279     } else if (eltType.isIndex()) {
1280       printer = getPrintU64(printOp);
1281     } else if (auto intTy = eltType.dyn_cast<IntegerType>()) {
1282       // Integers need a zero or sign extension on the operand
1283       // (depending on the source type) as well as a signed or
1284       // unsigned print method. Up to 64-bit is supported.
1285       unsigned width = intTy.getWidth();
1286       if (intTy.isUnsigned()) {
1287         if (width <= 64) {
1288           if (width < 64)
1289             conversion = PrintConversion::ZeroExt64;
1290           printer = getPrintU64(printOp);
1291         } else {
1292           return failure();
1293         }
1294       } else {
1295         assert(intTy.isSignless() || intTy.isSigned());
1296         if (width <= 64) {
1297           // Note that we *always* zero extend booleans (1-bit integers),
1298           // so that true/false is printed as 1/0 rather than -1/0.
1299           if (width == 1)
1300             conversion = PrintConversion::ZeroExt64;
1301           else if (width < 64)
1302             conversion = PrintConversion::SignExt64;
1303           printer = getPrintI64(printOp);
1304         } else {
1305           return failure();
1306         }
1307       }
1308     } else {
1309       return failure();
1310     }
1311 
1312     // Unroll vector into elementary print calls.
1313     int64_t rank = vectorType ? vectorType.getRank() : 0;
1314     emitRanks(rewriter, printOp, adaptor.source(), vectorType, printer, rank,
1315               conversion);
1316     emitCall(rewriter, printOp->getLoc(), getPrintNewline(printOp));
1317     rewriter.eraseOp(printOp);
1318     return success();
1319   }
1320 
1321 private:
1322   enum class PrintConversion {
1323     // clang-format off
1324     None,
1325     ZeroExt64,
1326     SignExt64
1327     // clang-format on
1328   };
1329 
1330   void emitRanks(ConversionPatternRewriter &rewriter, Operation *op,
1331                  Value value, VectorType vectorType, Operation *printer,
1332                  int64_t rank, PrintConversion conversion) const {
1333     Location loc = op->getLoc();
1334     if (rank == 0) {
1335       switch (conversion) {
1336       case PrintConversion::ZeroExt64:
1337         value = rewriter.create<ZeroExtendIOp>(
1338             loc, value, IntegerType::get(rewriter.getContext(), 64));
1339         break;
1340       case PrintConversion::SignExt64:
1341         value = rewriter.create<SignExtendIOp>(
1342             loc, value, IntegerType::get(rewriter.getContext(), 64));
1343         break;
1344       case PrintConversion::None:
1345         break;
1346       }
1347       emitCall(rewriter, loc, printer, value);
1348       return;
1349     }
1350 
1351     emitCall(rewriter, loc, getPrintOpen(op));
1352     Operation *printComma = getPrintComma(op);
1353     int64_t dim = vectorType.getDimSize(0);
1354     for (int64_t d = 0; d < dim; ++d) {
1355       auto reducedType =
1356           rank > 1 ? reducedVectorTypeFront(vectorType) : nullptr;
1357       auto llvmType = typeConverter->convertType(
1358           rank > 1 ? reducedType : vectorType.getElementType());
1359       Value nestedVal = extractOne(rewriter, *getTypeConverter(), loc, value,
1360                                    llvmType, rank, d);
1361       emitRanks(rewriter, op, nestedVal, reducedType, printer, rank - 1,
1362                 conversion);
1363       if (d != dim - 1)
1364         emitCall(rewriter, loc, printComma);
1365     }
1366     emitCall(rewriter, loc, getPrintClose(op));
1367   }
1368 
1369   // Helper to emit a call.
1370   static void emitCall(ConversionPatternRewriter &rewriter, Location loc,
1371                        Operation *ref, ValueRange params = ValueRange()) {
1372     rewriter.create<LLVM::CallOp>(loc, TypeRange(),
1373                                   rewriter.getSymbolRefAttr(ref), params);
1374   }
1375 
1376   // Helper for printer method declaration (first hit) and lookup.
1377   static Operation *getPrint(Operation *op, StringRef name,
1378                              ArrayRef<Type> params) {
1379     auto module = op->getParentOfType<ModuleOp>();
1380     auto func = module.lookupSymbol<LLVM::LLVMFuncOp>(name);
1381     if (func)
1382       return func;
1383     OpBuilder moduleBuilder(module.getBodyRegion());
1384     return moduleBuilder.create<LLVM::LLVMFuncOp>(
1385         op->getLoc(), name,
1386         LLVM::LLVMFunctionType::get(LLVM::LLVMVoidType::get(op->getContext()),
1387                                     params));
1388   }
1389 
1390   // Helpers for method names.
1391   Operation *getPrintI64(Operation *op) const {
1392     return getPrint(op, "printI64", IntegerType::get(op->getContext(), 64));
1393   }
1394   Operation *getPrintU64(Operation *op) const {
1395     return getPrint(op, "printU64", IntegerType::get(op->getContext(), 64));
1396   }
1397   Operation *getPrintFloat(Operation *op) const {
1398     return getPrint(op, "printF32", Float32Type::get(op->getContext()));
1399   }
1400   Operation *getPrintDouble(Operation *op) const {
1401     return getPrint(op, "printF64", Float64Type::get(op->getContext()));
1402   }
1403   Operation *getPrintOpen(Operation *op) const {
1404     return getPrint(op, "printOpen", {});
1405   }
1406   Operation *getPrintClose(Operation *op) const {
1407     return getPrint(op, "printClose", {});
1408   }
1409   Operation *getPrintComma(Operation *op) const {
1410     return getPrint(op, "printComma", {});
1411   }
1412   Operation *getPrintNewline(Operation *op) const {
1413     return getPrint(op, "printNewline", {});
1414   }
1415 };
1416 
1417 /// Progressive lowering of ExtractStridedSliceOp to either:
1418 ///   1. express single offset extract as a direct shuffle.
1419 ///   2. extract + lower rank strided_slice + insert for the n-D case.
1420 class VectorExtractStridedSliceOpConversion
1421     : public OpRewritePattern<ExtractStridedSliceOp> {
1422 public:
1423   VectorExtractStridedSliceOpConversion(MLIRContext *ctx)
1424       : OpRewritePattern<ExtractStridedSliceOp>(ctx) {
1425     // This pattern creates recursive ExtractStridedSliceOp, but the recursion
1426     // is bounded as the rank is strictly decreasing.
1427     setHasBoundedRewriteRecursion();
1428   }
1429 
1430   LogicalResult matchAndRewrite(ExtractStridedSliceOp op,
1431                                 PatternRewriter &rewriter) const override {
1432     auto dstType = op.getType();
1433 
1434     assert(!op.offsets().getValue().empty() && "Unexpected empty offsets");
1435 
1436     int64_t offset =
1437         op.offsets().getValue().front().cast<IntegerAttr>().getInt();
1438     int64_t size = op.sizes().getValue().front().cast<IntegerAttr>().getInt();
1439     int64_t stride =
1440         op.strides().getValue().front().cast<IntegerAttr>().getInt();
1441 
1442     auto loc = op.getLoc();
1443     auto elemType = dstType.getElementType();
1444     assert(elemType.isSignlessIntOrIndexOrFloat());
1445 
1446     // Single offset can be more efficiently shuffled.
1447     if (op.offsets().getValue().size() == 1) {
1448       SmallVector<int64_t, 4> offsets;
1449       offsets.reserve(size);
1450       for (int64_t off = offset, e = offset + size * stride; off < e;
1451            off += stride)
1452         offsets.push_back(off);
1453       rewriter.replaceOpWithNewOp<ShuffleOp>(op, dstType, op.vector(),
1454                                              op.vector(),
1455                                              rewriter.getI64ArrayAttr(offsets));
1456       return success();
1457     }
1458 
1459     // Extract/insert on a lower ranked extract strided slice op.
1460     Value zero = rewriter.create<ConstantOp>(loc, elemType,
1461                                              rewriter.getZeroAttr(elemType));
1462     Value res = rewriter.create<SplatOp>(loc, dstType, zero);
1463     for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e;
1464          off += stride, ++idx) {
1465       Value one = extractOne(rewriter, loc, op.vector(), off);
1466       Value extracted = rewriter.create<ExtractStridedSliceOp>(
1467           loc, one, getI64SubArray(op.offsets(), /* dropFront=*/1),
1468           getI64SubArray(op.sizes(), /* dropFront=*/1),
1469           getI64SubArray(op.strides(), /* dropFront=*/1));
1470       res = insertOne(rewriter, loc, extracted, res, idx);
1471     }
1472     rewriter.replaceOp(op, res);
1473     return success();
1474   }
1475 };
1476 
1477 } // namespace
1478 
1479 /// Populate the given list with patterns that convert from Vector to LLVM.
1480 void mlir::populateVectorToLLVMConversionPatterns(
1481     LLVMTypeConverter &converter, OwningRewritePatternList &patterns,
1482     bool reassociateFPReductions, bool enableIndexOptimizations) {
1483   MLIRContext *ctx = converter.getDialect()->getContext();
1484   // clang-format off
1485   patterns.insert<VectorFMAOpNDRewritePattern,
1486                   VectorInsertStridedSliceOpDifferentRankRewritePattern,
1487                   VectorInsertStridedSliceOpSameRankRewritePattern,
1488                   VectorExtractStridedSliceOpConversion>(ctx);
1489   patterns.insert<VectorReductionOpConversion>(
1490       converter, reassociateFPReductions);
1491   patterns.insert<VectorCreateMaskOpConversion,
1492                   VectorTransferConversion<TransferReadOp>,
1493                   VectorTransferConversion<TransferWriteOp>>(
1494       converter, enableIndexOptimizations);
1495   patterns
1496       .insert<VectorShuffleOpConversion,
1497               VectorExtractElementOpConversion,
1498               VectorExtractOpConversion,
1499               VectorFMAOp1DConversion,
1500               VectorInsertElementOpConversion,
1501               VectorInsertOpConversion,
1502               VectorPrintOpConversion,
1503               VectorTypeCastOpConversion,
1504               VectorMaskedLoadOpConversion,
1505               VectorMaskedStoreOpConversion,
1506               VectorGatherOpConversion,
1507               VectorScatterOpConversion,
1508               VectorExpandLoadOpConversion,
1509               VectorCompressStoreOpConversion>(converter);
1510   // clang-format on
1511 }
1512 
1513 void mlir::populateVectorToLLVMMatrixConversionPatterns(
1514     LLVMTypeConverter &converter, OwningRewritePatternList &patterns) {
1515   patterns.insert<VectorMatmulOpConversion>(converter);
1516   patterns.insert<VectorFlatTransposeOpConversion>(converter);
1517 }
1518