1 //===- SuperVectorize.cpp - Vectorize Pass Impl ---------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements vectorization of loops, operations and data types to
10 // a target-independent, n-D super-vector abstraction.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "PassDetail.h"
15 #include "mlir/Analysis/LoopAnalysis.h"
16 #include "mlir/Analysis/NestedMatcher.h"
17 #include "mlir/Analysis/Utils.h"
18 #include "mlir/Dialect/Affine/IR/AffineOps.h"
19 #include "mlir/Dialect/Affine/Utils.h"
20 #include "mlir/Dialect/Vector/VectorOps.h"
21 #include "mlir/Dialect/Vector/VectorUtils.h"
22 #include "mlir/IR/BlockAndValueMapping.h"
23 #include "mlir/Support/LLVM.h"
24 
25 using namespace mlir;
26 using namespace vector;
27 
28 ///
29 /// Implements a high-level vectorization strategy on a Function.
30 /// The abstraction used is that of super-vectors, which provide a single,
31 /// compact, representation in the vector types, information that is expected
32 /// to reduce the impact of the phase ordering problem
33 ///
34 /// Vector granularity:
35 /// ===================
36 /// This pass is designed to perform vectorization at a super-vector
37 /// granularity. A super-vector is loosely defined as a vector type that is a
38 /// multiple of a "good" vector size so the HW can efficiently implement a set
39 /// of high-level primitives. Multiple is understood along any dimension; e.g.
40 /// both vector<16xf32> and vector<2x8xf32> are valid super-vectors for a
41 /// vector<8xf32> HW vector. Note that a "good vector size so the HW can
42 /// efficiently implement a set of high-level primitives" is not necessarily an
43 /// integer multiple of actual hardware registers. We leave details of this
44 /// distinction unspecified for now.
45 ///
46 /// Some may prefer the terminology a "tile of HW vectors". In this case, one
47 /// should note that super-vectors implement an "always full tile" abstraction.
48 /// They guarantee no partial-tile separation is necessary by relying on a
49 /// high-level copy-reshape abstraction that we call vector.transfer. This
50 /// copy-reshape operations is also responsible for performing layout
51 /// transposition if necessary. In the general case this will require a scoped
52 /// allocation in some notional local memory.
53 ///
54 /// Whatever the mental model one prefers to use for this abstraction, the key
55 /// point is that we burn into a single, compact, representation in the vector
56 /// types, information that is expected to reduce the impact of the phase
57 /// ordering problem. Indeed, a vector type conveys information that:
58 ///   1. the associated loops have dependency semantics that do not prevent
59 ///      vectorization;
60 ///   2. the associate loops have been sliced in chunks of static sizes that are
61 ///      compatible with vector sizes (i.e. similar to unroll-and-jam);
62 ///   3. the inner loops, in the unroll-and-jam analogy of 2, are captured by
63 ///   the
64 ///      vector type and no vectorization hampering transformations can be
65 ///      applied to them anymore;
66 ///   4. the underlying memrefs are accessed in some notional contiguous way
67 ///      that allows loading into vectors with some amount of spatial locality;
68 /// In other words, super-vectorization provides a level of separation of
69 /// concern by way of opacity to subsequent passes. This has the effect of
70 /// encapsulating and propagating vectorization constraints down the list of
71 /// passes until we are ready to lower further.
72 ///
73 /// For a particular target, a notion of minimal n-d vector size will be
74 /// specified and vectorization targets a multiple of those. In the following
75 /// paragraph, let "k ." represent "a multiple of", to be understood as a
76 /// multiple in the same dimension (e.g. vector<16 x k . 128> summarizes
77 /// vector<16 x 128>, vector<16 x 256>, vector<16 x 1024>, etc).
78 ///
79 /// Some non-exhaustive notable super-vector sizes of interest include:
80 ///   - CPU: vector<k . HW_vector_size>,
81 ///          vector<k' . core_count x k . HW_vector_size>,
82 ///          vector<socket_count x k' . core_count x k . HW_vector_size>;
83 ///   - GPU: vector<k . warp_size>,
84 ///          vector<k . warp_size x float2>,
85 ///          vector<k . warp_size x float4>,
86 ///          vector<k . warp_size x 4 x 4x 4> (for tensor_core sizes).
87 ///
88 /// Loops and operations are emitted that operate on those super-vector shapes.
89 /// Subsequent lowering passes will materialize to actual HW vector sizes. These
90 /// passes are expected to be (gradually) more target-specific.
91 ///
92 /// At a high level, a vectorized load in a loop will resemble:
93 /// ```mlir
94 ///   affine.for %i = ? to ? step ? {
95 ///     %v_a = vector.transfer_read A[%i] : memref<?xf32>, vector<128xf32>
96 ///   }
97 /// ```
98 /// It is the responsibility of the implementation of vector.transfer_read to
99 /// materialize vector registers from the original scalar memrefs. A later (more
100 /// target-dependent) lowering pass will materialize to actual HW vector sizes.
101 /// This lowering may be occur at different times:
102 ///   1. at the MLIR level into a combination of loops, unrolling, DmaStartOp +
103 ///      DmaWaitOp + vectorized operations for data transformations and shuffle;
104 ///      thus opening opportunities for unrolling and pipelining. This is an
105 ///      instance of library call "whiteboxing"; or
106 ///   2. later in the a target-specific lowering pass or hand-written library
107 ///      call; achieving full separation of concerns. This is an instance of
108 ///      library call; or
109 ///   3. a mix of both, e.g. based on a model.
110 /// In the future, these operations will expose a contract to constrain the
111 /// search on vectorization patterns and sizes.
112 ///
113 /// Occurrence of super-vectorization in the compiler flow:
114 /// =======================================================
115 /// This is an active area of investigation. We start with 2 remarks to position
116 /// super-vectorization in the context of existing ongoing work: LLVM VPLAN
117 /// and LLVM SLP Vectorizer.
118 ///
119 /// LLVM VPLAN:
120 /// -----------
121 /// The astute reader may have noticed that in the limit, super-vectorization
122 /// can be applied at a similar time and with similar objectives than VPLAN.
123 /// For instance, in the case of a traditional, polyhedral compilation-flow (for
124 /// instance, the PPCG project uses ISL to provide dependence analysis,
125 /// multi-level(scheduling + tiling), lifting footprint to fast memory,
126 /// communication synthesis, mapping, register optimizations) and before
127 /// unrolling. When vectorization is applied at this *late* level in a typical
128 /// polyhedral flow, and is instantiated with actual hardware vector sizes,
129 /// super-vectorization is expected to match (or subsume) the type of patterns
130 /// that LLVM's VPLAN aims at targeting. The main difference here is that MLIR
131 /// is higher level and our implementation should be significantly simpler. Also
132 /// note that in this mode, recursive patterns are probably a bit of an overkill
133 /// although it is reasonable to expect that mixing a bit of outer loop and
134 /// inner loop vectorization + unrolling will provide interesting choices to
135 /// MLIR.
136 ///
137 /// LLVM SLP Vectorizer:
138 /// --------------------
139 /// Super-vectorization however is not meant to be usable in a similar fashion
140 /// to the SLP vectorizer. The main difference lies in the information that
141 /// both vectorizers use: super-vectorization examines contiguity of memory
142 /// references along fastest varying dimensions and loops with recursive nested
143 /// patterns capturing imperfectly-nested loop nests; the SLP vectorizer, on
144 /// the other hand, performs flat pattern matching inside a single unrolled loop
145 /// body and stitches together pieces of load and store operations into full
146 /// 1-D vectors. We envision that the SLP vectorizer is a good way to capture
147 /// innermost loop, control-flow dependent patterns that super-vectorization may
148 /// not be able to capture easily. In other words, super-vectorization does not
149 /// aim at replacing the SLP vectorizer and the two solutions are complementary.
150 ///
151 /// Ongoing investigations:
152 /// -----------------------
153 /// We discuss the following *early* places where super-vectorization is
154 /// applicable and touch on the expected benefits and risks . We list the
155 /// opportunities in the context of the traditional polyhedral compiler flow
156 /// described in PPCG. There are essentially 6 places in the MLIR pass pipeline
157 /// we expect to experiment with super-vectorization:
158 /// 1. Right after language lowering to MLIR: this is the earliest time where
159 ///    super-vectorization is expected to be applied. At this level, all the
160 ///    language/user/library-level annotations are available and can be fully
161 ///    exploited. Examples include loop-type annotations (such as parallel,
162 ///    reduction, scan, dependence distance vector, vectorizable) as well as
163 ///    memory access annotations (such as non-aliasing writes guaranteed,
164 ///    indirect accesses that are permutations by construction) accesses or
165 ///    that a particular operation is prescribed atomic by the user. At this
166 ///    level, anything that enriches what dependence analysis can do should be
167 ///    aggressively exploited. At this level we are close to having explicit
168 ///    vector types in the language, except we do not impose that burden on the
169 ///    programmer/library: we derive information from scalar code + annotations.
170 /// 2. After dependence analysis and before polyhedral scheduling: the
171 ///    information that supports vectorization does not need to be supplied by a
172 ///    higher level of abstraction. Traditional dependence analysis is available
173 ///    in MLIR and will be used to drive vectorization and cost models.
174 ///
175 /// Let's pause here and remark that applying super-vectorization as described
176 /// in 1. and 2. presents clear opportunities and risks:
177 ///   - the opportunity is that vectorization is burned in the type system and
178 ///   is protected from the adverse effect of loop scheduling, tiling, loop
179 ///   interchange and all passes downstream. Provided that subsequent passes are
180 ///   able to operate on vector types; the vector shapes, associated loop
181 ///   iterator properties, alignment, and contiguity of fastest varying
182 ///   dimensions are preserved until we lower the super-vector types. We expect
183 ///   this to significantly rein in on the adverse effects of phase ordering.
184 ///   - the risks are that a. all passes after super-vectorization have to work
185 ///   on elemental vector types (not that this is always true, wherever
186 ///   vectorization is applied) and b. that imposing vectorization constraints
187 ///   too early may be overall detrimental to loop fusion, tiling and other
188 ///   transformations because the dependence distances are coarsened when
189 ///   operating on elemental vector types. For this reason, the pattern
190 ///   profitability analysis should include a component that also captures the
191 ///   maximal amount of fusion available under a particular pattern. This is
192 ///   still at the stage of rough ideas but in this context, search is our
193 ///   friend as the Tensor Comprehensions and auto-TVM contributions
194 ///   demonstrated previously.
195 /// Bottom-line is we do not yet have good answers for the above but aim at
196 /// making it easy to answer such questions.
197 ///
198 /// Back to our listing, the last places where early super-vectorization makes
199 /// sense are:
200 /// 3. right after polyhedral-style scheduling: PLUTO-style algorithms are known
201 ///    to improve locality, parallelism and be configurable (e.g. max-fuse,
202 ///    smart-fuse etc). They can also have adverse effects on contiguity
203 ///    properties that are required for vectorization but the vector.transfer
204 ///    copy-reshape-pad-transpose abstraction is expected to help recapture
205 ///    these properties.
206 /// 4. right after polyhedral-style scheduling+tiling;
207 /// 5. right after scheduling+tiling+rescheduling: points 4 and 5 represent
208 ///    probably the most promising places because applying tiling achieves a
209 ///    separation of concerns that allows rescheduling to worry less about
210 ///    locality and more about parallelism and distribution (e.g. min-fuse).
211 ///
212 /// At these levels the risk-reward looks different: on one hand we probably
213 /// lost a good deal of language/user/library-level annotation; on the other
214 /// hand we gained parallelism and locality through scheduling and tiling.
215 /// However we probably want to ensure tiling is compatible with the
216 /// full-tile-only abstraction used in super-vectorization or suffer the
217 /// consequences. It is too early to place bets on what will win but we expect
218 /// super-vectorization to be the right abstraction to allow exploring at all
219 /// these levels. And again, search is our friend.
220 ///
221 /// Lastly, we mention it again here:
222 /// 6. as a MLIR-based alternative to VPLAN.
223 ///
224 /// Lowering, unrolling, pipelining:
225 /// ================================
226 /// TODO: point to the proper places.
227 ///
228 /// Algorithm:
229 /// ==========
230 /// The algorithm proceeds in a few steps:
231 ///  1. defining super-vectorization patterns and matching them on the tree of
232 ///     AffineForOp. A super-vectorization pattern is defined as a recursive
233 ///     data structures that matches and captures nested, imperfectly-nested
234 ///     loops that have a. conformable loop annotations attached (e.g. parallel,
235 ///     reduction, vectorizable, ...) as well as b. all contiguous load/store
236 ///     operations along a specified minor dimension (not necessarily the
237 ///     fastest varying) ;
238 ///  2. analyzing those patterns for profitability (TODO: and
239 ///     interference);
240 ///  3. then, for each pattern in order:
241 ///    a. applying iterative rewriting of the loops and all their nested
242 ///       operations in topological order. Rewriting is implemented by
243 ///       coarsening the loops and converting operations and operands to their
244 ///       vector forms. Processing operations in topological order is relatively
245 ///       simple due to the structured nature of the control-flow
246 ///       representation. This order ensures that all the operands of a given
247 ///       operation have been vectorized before the operation itself in a single
248 ///       traversal, except for operands defined outside of the loop nest. The
249 ///       algorithm can convert the following operations to their vector form:
250 ///         * Affine load and store operations are converted to opaque vector
251 ///           transfer read and write operations.
252 ///         * Scalar constant operations/operands are converted to vector
253 ///           constant operations (splat).
254 ///         * Uniform operands (only operands defined outside of the loop nest,
255 ///           for now) are broadcasted to a vector.
256 ///           TODO: Support more uniform cases.
257 ///         * Affine for operations with 'iter_args' are vectorized by
258 ///           vectorizing their 'iter_args' operands and results.
259 ///           TODO: Support more complex loops with divergent lbs and/or ubs.
260 ///         * The remaining operations in the loop nest are vectorized by
261 ///           widening their scalar types to vector types.
262 ///    b. if everything under the root AffineForOp in the current pattern
263 ///       is vectorized properly, we commit that loop to the IR and remove the
264 ///       scalar loop. Otherwise, we discard the vectorized loop and keep the
265 ///       original scalar loop.
266 ///    c. vectorization is applied on the next pattern in the list. Because
267 ///       pattern interference avoidance is not yet implemented and that we do
268 ///       not support further vectorizing an already vector load we need to
269 ///       re-verify that the pattern is still vectorizable. This is expected to
270 ///       make cost models more difficult to write and is subject to improvement
271 ///       in the future.
272 ///
273 /// Choice of loop transformation to support the algorithm:
274 /// =======================================================
275 /// The choice of loop transformation to apply for coarsening vectorized loops
276 /// is still subject to exploratory tradeoffs. In particular, say we want to
277 /// vectorize by a factor 128, we want to transform the following input:
278 /// ```mlir
279 ///   affine.for %i = %M to %N {
280 ///     %a = affine.load %A[%i] : memref<?xf32>
281 ///   }
282 /// ```
283 ///
284 /// Traditionally, one would vectorize late (after scheduling, tiling,
285 /// memory promotion etc) say after stripmining (and potentially unrolling in
286 /// the case of LLVM's SLP vectorizer):
287 /// ```mlir
288 ///   affine.for %i = floor(%M, 128) to ceil(%N, 128) {
289 ///     affine.for %ii = max(%M, 128 * %i) to min(%N, 128*%i + 127) {
290 ///       %a = affine.load %A[%ii] : memref<?xf32>
291 ///     }
292 ///   }
293 /// ```
294 ///
295 /// Instead, we seek to vectorize early and freeze vector types before
296 /// scheduling, so we want to generate a pattern that resembles:
297 /// ```mlir
298 ///   affine.for %i = ? to ? step ? {
299 ///     %v_a = vector.transfer_read %A[%i] : memref<?xf32>, vector<128xf32>
300 ///   }
301 /// ```
302 ///
303 /// i. simply dividing the lower / upper bounds by 128 creates issues
304 ///    when representing expressions such as ii + 1 because now we only
305 ///    have access to original values that have been divided. Additional
306 ///    information is needed to specify accesses at below-128 granularity;
307 /// ii. another alternative is to coarsen the loop step but this may have
308 ///    consequences on dependence analysis and fusability of loops: fusable
309 ///    loops probably need to have the same step (because we don't want to
310 ///    stripmine/unroll to enable fusion).
311 /// As a consequence, we choose to represent the coarsening using the loop
312 /// step for now and reevaluate in the future. Note that we can renormalize
313 /// loop steps later if/when we have evidence that they are problematic.
314 ///
315 /// For the simple strawman example above, vectorizing for a 1-D vector
316 /// abstraction of size 128 returns code similar to:
317 /// ```mlir
318 ///   affine.for %i = %M to %N step 128 {
319 ///     %v_a = vector.transfer_read %A[%i] : memref<?xf32>, vector<128xf32>
320 ///   }
321 /// ```
322 ///
323 /// Unsupported cases, extensions, and work in progress (help welcome :-) ):
324 /// ========================================================================
325 ///   1. lowering to concrete vector types for various HW;
326 ///   2. reduction support;
327 ///   3. non-effecting padding during vector.transfer_read and filter during
328 ///      vector.transfer_write;
329 ///   4. misalignment support vector.transfer_read / vector.transfer_write
330 ///      (hopefully without read-modify-writes);
331 ///   5. control-flow support;
332 ///   6. cost-models, heuristics and search;
333 ///   7. Op implementation, extensions and implication on memref views;
334 ///   8. many TODOs left around.
335 ///
336 /// Examples:
337 /// =========
338 /// Consider the following Function:
339 /// ```mlir
340 /// func @vector_add_2d(%M : index, %N : index) -> f32 {
341 ///   %A = alloc (%M, %N) : memref<?x?xf32, 0>
342 ///   %B = alloc (%M, %N) : memref<?x?xf32, 0>
343 ///   %C = alloc (%M, %N) : memref<?x?xf32, 0>
344 ///   %f1 = constant 1.0 : f32
345 ///   %f2 = constant 2.0 : f32
346 ///   affine.for %i0 = 0 to %M {
347 ///     affine.for %i1 = 0 to %N {
348 ///       // non-scoped %f1
349 ///       affine.store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
350 ///     }
351 ///   }
352 ///   affine.for %i2 = 0 to %M {
353 ///     affine.for %i3 = 0 to %N {
354 ///       // non-scoped %f2
355 ///       affine.store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
356 ///     }
357 ///   }
358 ///   affine.for %i4 = 0 to %M {
359 ///     affine.for %i5 = 0 to %N {
360 ///       %a5 = affine.load %A[%i4, %i5] : memref<?x?xf32, 0>
361 ///       %b5 = affine.load %B[%i4, %i5] : memref<?x?xf32, 0>
362 ///       %s5 = addf %a5, %b5 : f32
363 ///       // non-scoped %f1
364 ///       %s6 = addf %s5, %f1 : f32
365 ///       // non-scoped %f2
366 ///       %s7 = addf %s5, %f2 : f32
367 ///       // diamond dependency.
368 ///       %s8 = addf %s7, %s6 : f32
369 ///       affine.store %s8, %C[%i4, %i5] : memref<?x?xf32, 0>
370 ///     }
371 ///   }
372 ///   %c7 = constant 7 : index
373 ///   %c42 = constant 42 : index
374 ///   %res = load %C[%c7, %c42] : memref<?x?xf32, 0>
375 ///   return %res : f32
376 /// }
377 /// ```
378 ///
379 /// The -affine-vectorize pass with the following arguments:
380 /// ```
381 /// -affine-vectorize="virtual-vector-size=256 test-fastest-varying=0"
382 /// ```
383 ///
384 /// produces this standard innermost-loop vectorized code:
385 /// ```mlir
386 /// func @vector_add_2d(%arg0 : index, %arg1 : index) -> f32 {
387 ///   %0 = alloc(%arg0, %arg1) : memref<?x?xf32>
388 ///   %1 = alloc(%arg0, %arg1) : memref<?x?xf32>
389 ///   %2 = alloc(%arg0, %arg1) : memref<?x?xf32>
390 ///   %cst = constant 1.0 : f32
391 ///   %cst_0 = constant 2.0 : f32
392 ///   affine.for %i0 = 0 to %arg0 {
393 ///     affine.for %i1 = 0 to %arg1 step 256 {
394 ///       %cst_1 = constant dense<vector<256xf32>, 1.0> :
395 ///                vector<256xf32>
396 ///       vector.transfer_write %cst_1, %0[%i0, %i1] :
397 ///                vector<256xf32>, memref<?x?xf32>
398 ///     }
399 ///   }
400 ///   affine.for %i2 = 0 to %arg0 {
401 ///     affine.for %i3 = 0 to %arg1 step 256 {
402 ///       %cst_2 = constant dense<vector<256xf32>, 2.0> :
403 ///                vector<256xf32>
404 ///       vector.transfer_write %cst_2, %1[%i2, %i3] :
405 ///                vector<256xf32>, memref<?x?xf32>
406 ///     }
407 ///   }
408 ///   affine.for %i4 = 0 to %arg0 {
409 ///     affine.for %i5 = 0 to %arg1 step 256 {
410 ///       %3 = vector.transfer_read %0[%i4, %i5] :
411 ///            memref<?x?xf32>, vector<256xf32>
412 ///       %4 = vector.transfer_read %1[%i4, %i5] :
413 ///            memref<?x?xf32>, vector<256xf32>
414 ///       %5 = addf %3, %4 : vector<256xf32>
415 ///       %cst_3 = constant dense<vector<256xf32>, 1.0> :
416 ///                vector<256xf32>
417 ///       %6 = addf %5, %cst_3 : vector<256xf32>
418 ///       %cst_4 = constant dense<vector<256xf32>, 2.0> :
419 ///                vector<256xf32>
420 ///       %7 = addf %5, %cst_4 : vector<256xf32>
421 ///       %8 = addf %7, %6 : vector<256xf32>
422 ///       vector.transfer_write %8, %2[%i4, %i5] :
423 ///                vector<256xf32>, memref<?x?xf32>
424 ///     }
425 ///   }
426 ///   %c7 = constant 7 : index
427 ///   %c42 = constant 42 : index
428 ///   %9 = load %2[%c7, %c42] : memref<?x?xf32>
429 ///   return %9 : f32
430 /// }
431 /// ```
432 ///
433 /// The -affine-vectorize pass with the following arguments:
434 /// ```
435 /// -affine-vectorize="virtual-vector-size=32,256 test-fastest-varying=1,0"
436 /// ```
437 ///
438 /// produces this more interesting mixed outer-innermost-loop vectorized code:
439 /// ```mlir
440 /// func @vector_add_2d(%arg0 : index, %arg1 : index) -> f32 {
441 ///   %0 = alloc(%arg0, %arg1) : memref<?x?xf32>
442 ///   %1 = alloc(%arg0, %arg1) : memref<?x?xf32>
443 ///   %2 = alloc(%arg0, %arg1) : memref<?x?xf32>
444 ///   %cst = constant 1.0 : f32
445 ///   %cst_0 = constant 2.0 : f32
446 ///   affine.for %i0 = 0 to %arg0 step 32 {
447 ///     affine.for %i1 = 0 to %arg1 step 256 {
448 ///       %cst_1 = constant dense<vector<32x256xf32>, 1.0> :
449 ///                vector<32x256xf32>
450 ///       vector.transfer_write %cst_1, %0[%i0, %i1] :
451 ///                vector<32x256xf32>, memref<?x?xf32>
452 ///     }
453 ///   }
454 ///   affine.for %i2 = 0 to %arg0 step 32 {
455 ///     affine.for %i3 = 0 to %arg1 step 256 {
456 ///       %cst_2 = constant dense<vector<32x256xf32>, 2.0> :
457 ///                vector<32x256xf32>
458 ///       vector.transfer_write %cst_2, %1[%i2, %i3] :
459 ///                vector<32x256xf32>, memref<?x?xf32>
460 ///     }
461 ///   }
462 ///   affine.for %i4 = 0 to %arg0 step 32 {
463 ///     affine.for %i5 = 0 to %arg1 step 256 {
464 ///       %3 = vector.transfer_read %0[%i4, %i5] :
465 ///                memref<?x?xf32> vector<32x256xf32>
466 ///       %4 = vector.transfer_read %1[%i4, %i5] :
467 ///                memref<?x?xf32>, vector<32x256xf32>
468 ///       %5 = addf %3, %4 : vector<32x256xf32>
469 ///       %cst_3 = constant dense<vector<32x256xf32>, 1.0> :
470 ///                vector<32x256xf32>
471 ///       %6 = addf %5, %cst_3 : vector<32x256xf32>
472 ///       %cst_4 = constant dense<vector<32x256xf32>, 2.0> :
473 ///                vector<32x256xf32>
474 ///       %7 = addf %5, %cst_4 : vector<32x256xf32>
475 ///       %8 = addf %7, %6 : vector<32x256xf32>
476 ///       vector.transfer_write %8, %2[%i4, %i5] :
477 ///                vector<32x256xf32>, memref<?x?xf32>
478 ///     }
479 ///   }
480 ///   %c7 = constant 7 : index
481 ///   %c42 = constant 42 : index
482 ///   %9 = load %2[%c7, %c42] : memref<?x?xf32>
483 ///   return %9 : f32
484 /// }
485 /// ```
486 ///
487 /// Of course, much more intricate n-D imperfectly-nested patterns can be
488 /// vectorized too and specified in a fully declarative fashion.
489 
490 #define DEBUG_TYPE "early-vect"
491 
492 using llvm::dbgs;
493 
494 /// Forward declaration.
495 static FilterFunctionType
496 isVectorizableLoopPtrFactory(const DenseSet<Operation *> &parallelLoops,
497                              int fastestVaryingMemRefDimension);
498 
499 /// Creates a vectorization pattern from the command line arguments.
500 /// Up to 3-D patterns are supported.
501 /// If the command line argument requests a pattern of higher order, returns an
502 /// empty pattern list which will conservatively result in no vectorization.
503 static Optional<NestedPattern>
504 makePattern(const DenseSet<Operation *> &parallelLoops, int vectorRank,
505             ArrayRef<int64_t> fastestVaryingPattern) {
506   using matcher::For;
507   int64_t d0 = fastestVaryingPattern.empty() ? -1 : fastestVaryingPattern[0];
508   int64_t d1 = fastestVaryingPattern.size() < 2 ? -1 : fastestVaryingPattern[1];
509   int64_t d2 = fastestVaryingPattern.size() < 3 ? -1 : fastestVaryingPattern[2];
510   switch (vectorRank) {
511   case 1:
512     return For(isVectorizableLoopPtrFactory(parallelLoops, d0));
513   case 2:
514     return For(isVectorizableLoopPtrFactory(parallelLoops, d0),
515                For(isVectorizableLoopPtrFactory(parallelLoops, d1)));
516   case 3:
517     return For(isVectorizableLoopPtrFactory(parallelLoops, d0),
518                For(isVectorizableLoopPtrFactory(parallelLoops, d1),
519                    For(isVectorizableLoopPtrFactory(parallelLoops, d2))));
520   default: {
521     return llvm::None;
522   }
523   }
524 }
525 
526 static NestedPattern &vectorTransferPattern() {
527   static auto pattern = matcher::Op([](Operation &op) {
528     return isa<vector::TransferReadOp, vector::TransferWriteOp>(op);
529   });
530   return pattern;
531 }
532 
533 namespace {
534 
535 /// Base state for the vectorize pass.
536 /// Command line arguments are preempted by non-empty pass arguments.
537 struct Vectorize : public AffineVectorizeBase<Vectorize> {
538   Vectorize() = default;
539   Vectorize(ArrayRef<int64_t> virtualVectorSize);
540   void runOnFunction() override;
541 };
542 
543 } // end anonymous namespace
544 
545 Vectorize::Vectorize(ArrayRef<int64_t> virtualVectorSize) {
546   vectorSizes = virtualVectorSize;
547 }
548 
549 static void vectorizeLoopIfProfitable(Operation *loop, unsigned depthInPattern,
550                                       unsigned patternDepth,
551                                       VectorizationStrategy *strategy) {
552   assert(patternDepth > depthInPattern &&
553          "patternDepth is greater than depthInPattern");
554   if (patternDepth - depthInPattern > strategy->vectorSizes.size()) {
555     // Don't vectorize this loop
556     return;
557   }
558   strategy->loopToVectorDim[loop] =
559       strategy->vectorSizes.size() - (patternDepth - depthInPattern);
560 }
561 
562 /// Implements a simple strawman strategy for vectorization.
563 /// Given a matched pattern `matches` of depth `patternDepth`, this strategy
564 /// greedily assigns the fastest varying dimension ** of the vector ** to the
565 /// innermost loop in the pattern.
566 /// When coupled with a pattern that looks for the fastest varying dimension in
567 /// load/store MemRefs, this creates a generic vectorization strategy that works
568 /// for any loop in a hierarchy (outermost, innermost or intermediate).
569 ///
570 /// TODO: In the future we should additionally increase the power of the
571 /// profitability analysis along 3 directions:
572 ///   1. account for loop extents (both static and parametric + annotations);
573 ///   2. account for data layout permutations;
574 ///   3. account for impact of vectorization on maximal loop fusion.
575 /// Then we can quantify the above to build a cost model and search over
576 /// strategies.
577 static LogicalResult analyzeProfitability(ArrayRef<NestedMatch> matches,
578                                           unsigned depthInPattern,
579                                           unsigned patternDepth,
580                                           VectorizationStrategy *strategy) {
581   for (auto m : matches) {
582     if (failed(analyzeProfitability(m.getMatchedChildren(), depthInPattern + 1,
583                                     patternDepth, strategy))) {
584       return failure();
585     }
586     vectorizeLoopIfProfitable(m.getMatchedOperation(), depthInPattern,
587                               patternDepth, strategy);
588   }
589   return success();
590 }
591 
592 ///// end TODO: Hoist to a VectorizationStrategy.cpp when appropriate /////
593 
594 namespace {
595 
596 struct VectorizationState {
597 
598   VectorizationState(MLIRContext *context) : builder(context) {}
599 
600   /// Registers the vector replacement of a scalar operation and its result
601   /// values. Both operations must have the same number of results.
602   ///
603   /// This utility is used to register the replacement for the vast majority of
604   /// the vectorized operations.
605   ///
606   /// Example:
607   ///   * 'replaced': %0 = addf %1, %2 : f32
608   ///   * 'replacement': %0 = addf %1, %2 : vector<128xf32>
609   void registerOpVectorReplacement(Operation *replaced, Operation *replacement);
610 
611   /// Registers the vector replacement of a scalar value. The replacement
612   /// operation should have a single result, which replaces the scalar value.
613   ///
614   /// This utility is used to register the vector replacement of block arguments
615   /// and operation results which are not directly vectorized (i.e., their
616   /// scalar version still exists after vectorization), like uniforms.
617   ///
618   /// Example:
619   ///   * 'replaced': block argument or operation outside of the vectorized
620   ///     loop.
621   ///   * 'replacement': %0 = vector.broadcast %1 : f32 to vector<128xf32>
622   void registerValueVectorReplacement(Value replaced, Operation *replacement);
623 
624   /// Registers the vector replacement of a block argument (e.g., iter_args).
625   ///
626   /// Example:
627   ///   * 'replaced': 'iter_arg' block argument.
628   ///   * 'replacement': vectorized 'iter_arg' block argument.
629   void registerBlockArgVectorReplacement(BlockArgument replaced,
630                                          BlockArgument replacement);
631 
632   /// Registers the scalar replacement of a scalar value. 'replacement' must be
633   /// scalar. Both values must be block arguments. Operation results should be
634   /// replaced using the 'registerOp*' utilitites.
635   ///
636   /// This utility is used to register the replacement of block arguments
637   /// that are within the loop to be vectorized and will continue being scalar
638   /// within the vector loop.
639   ///
640   /// Example:
641   ///   * 'replaced': induction variable of a loop to be vectorized.
642   ///   * 'replacement': new induction variable in the new vector loop.
643   void registerValueScalarReplacement(BlockArgument replaced,
644                                       BlockArgument replacement);
645 
646   /// Returns in 'replacedVals' the scalar replacement for values in
647   /// 'inputVals'.
648   void getScalarValueReplacementsFor(ValueRange inputVals,
649                                      SmallVectorImpl<Value> &replacedVals);
650 
651   /// Erases the scalar loop nest after its successful vectorization.
652   void finishVectorizationPattern(AffineForOp rootLoop);
653 
654   // Used to build and insert all the new operations created. The insertion
655   // point is preserved and updated along the vectorization process.
656   OpBuilder builder;
657 
658   // Maps input scalar operations to their vector counterparts.
659   DenseMap<Operation *, Operation *> opVectorReplacement;
660   // Maps input scalar values to their vector counterparts.
661   BlockAndValueMapping valueVectorReplacement;
662   // Maps input scalar values to their new scalar counterparts in the vector
663   // loop nest.
664   BlockAndValueMapping valueScalarReplacement;
665 
666   // Maps the newly created vector loops to their vector dimension.
667   DenseMap<Operation *, unsigned> vecLoopToVecDim;
668 
669   // The strategy drives which loop to vectorize by which amount.
670   const VectorizationStrategy *strategy;
671 
672 private:
673   /// Internal implementation to map input scalar values to new vector or scalar
674   /// values.
675   void registerValueVectorReplacementImpl(Value replaced, Value replacement);
676   void registerValueScalarReplacementImpl(Value replaced, Value replacement);
677 };
678 
679 } // end namespace
680 
681 /// Registers the vector replacement of a scalar operation and its result
682 /// values. Both operations must have the same number of results.
683 ///
684 /// This utility is used to register the replacement for the vast majority of
685 /// the vectorized operations.
686 ///
687 /// Example:
688 ///   * 'replaced': %0 = addf %1, %2 : f32
689 ///   * 'replacement': %0 = addf %1, %2 : vector<128xf32>
690 void VectorizationState::registerOpVectorReplacement(Operation *replaced,
691                                                      Operation *replacement) {
692   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ commit vectorized op:\n");
693   LLVM_DEBUG(dbgs() << *replaced << "\n");
694   LLVM_DEBUG(dbgs() << "into\n");
695   LLVM_DEBUG(dbgs() << *replacement << "\n");
696 
697   assert(replaced->getNumResults() == replacement->getNumResults() &&
698          "Unexpected replaced and replacement results");
699   assert(opVectorReplacement.count(replaced) == 0 && "already registered");
700   opVectorReplacement[replaced] = replacement;
701 
702   for (auto resultTuple :
703        llvm::zip(replaced->getResults(), replacement->getResults()))
704     registerValueVectorReplacementImpl(std::get<0>(resultTuple),
705                                        std::get<1>(resultTuple));
706 }
707 
708 /// Registers the vector replacement of a scalar value. The replacement
709 /// operation should have a single result, which replaces the scalar value.
710 ///
711 /// This utility is used to register the vector replacement of block arguments
712 /// and operation results which are not directly vectorized (i.e., their
713 /// scalar version still exists after vectorization), like uniforms.
714 ///
715 /// Example:
716 ///   * 'replaced': block argument or operation outside of the vectorized loop.
717 ///   * 'replacement': %0 = vector.broadcast %1 : f32 to vector<128xf32>
718 void VectorizationState::registerValueVectorReplacement(
719     Value replaced, Operation *replacement) {
720   assert(replacement->getNumResults() == 1 &&
721          "Expected single-result replacement");
722   if (Operation *defOp = replaced.getDefiningOp())
723     registerOpVectorReplacement(defOp, replacement);
724   else
725     registerValueVectorReplacementImpl(replaced, replacement->getResult(0));
726 }
727 
728 /// Registers the vector replacement of a block argument (e.g., iter_args).
729 ///
730 /// Example:
731 ///   * 'replaced': 'iter_arg' block argument.
732 ///   * 'replacement': vectorized 'iter_arg' block argument.
733 void VectorizationState::registerBlockArgVectorReplacement(
734     BlockArgument replaced, BlockArgument replacement) {
735   registerValueVectorReplacementImpl(replaced, replacement);
736 }
737 
738 void VectorizationState::registerValueVectorReplacementImpl(Value replaced,
739                                                             Value replacement) {
740   assert(!valueVectorReplacement.contains(replaced) &&
741          "Vector replacement already registered");
742   assert(replacement.getType().isa<VectorType>() &&
743          "Expected vector type in vector replacement");
744   valueVectorReplacement.map(replaced, replacement);
745 }
746 
747 /// Registers the scalar replacement of a scalar value. 'replacement' must be
748 /// scalar. Both values must be block arguments. Operation results should be
749 /// replaced using the 'registerOp*' utilitites.
750 ///
751 /// This utility is used to register the replacement of block arguments
752 /// that are within the loop to be vectorized and will continue being scalar
753 /// within the vector loop.
754 ///
755 /// Example:
756 ///   * 'replaced': induction variable of a loop to be vectorized.
757 ///   * 'replacement': new induction variable in the new vector loop.
758 void VectorizationState::registerValueScalarReplacement(
759     BlockArgument replaced, BlockArgument replacement) {
760   registerValueScalarReplacementImpl(replaced, replacement);
761 }
762 
763 void VectorizationState::registerValueScalarReplacementImpl(Value replaced,
764                                                             Value replacement) {
765   assert(!valueScalarReplacement.contains(replaced) &&
766          "Scalar value replacement already registered");
767   assert(!replacement.getType().isa<VectorType>() &&
768          "Expected scalar type in scalar replacement");
769   valueScalarReplacement.map(replaced, replacement);
770 }
771 
772 /// Returns in 'replacedVals' the scalar replacement for values in 'inputVals'.
773 void VectorizationState::getScalarValueReplacementsFor(
774     ValueRange inputVals, SmallVectorImpl<Value> &replacedVals) {
775   for (Value inputVal : inputVals)
776     replacedVals.push_back(valueScalarReplacement.lookupOrDefault(inputVal));
777 }
778 
779 /// Erases a loop nest, including all its nested operations.
780 static void eraseLoopNest(AffineForOp forOp) {
781   LLVM_DEBUG(dbgs() << "[early-vect]+++++ erasing:\n" << forOp << "\n");
782   forOp.erase();
783 }
784 
785 /// Erases the scalar loop nest after its successful vectorization.
786 void VectorizationState::finishVectorizationPattern(AffineForOp rootLoop) {
787   LLVM_DEBUG(dbgs() << "\n[early-vect] Finalizing vectorization\n");
788   eraseLoopNest(rootLoop);
789 }
790 
791 // Apply 'map' with 'mapOperands' returning resulting values in 'results'.
792 static void computeMemoryOpIndices(Operation *op, AffineMap map,
793                                    ValueRange mapOperands,
794                                    VectorizationState &state,
795                                    SmallVectorImpl<Value> &results) {
796   for (auto resultExpr : map.getResults()) {
797     auto singleResMap =
798         AffineMap::get(map.getNumDims(), map.getNumSymbols(), resultExpr);
799     auto afOp = state.builder.create<AffineApplyOp>(op->getLoc(), singleResMap,
800                                                     mapOperands);
801     results.push_back(afOp);
802   }
803 }
804 
805 /// Returns a FilterFunctionType that can be used in NestedPattern to match a
806 /// loop whose underlying load/store accesses are either invariant or all
807 // varying along the `fastestVaryingMemRefDimension`.
808 static FilterFunctionType
809 isVectorizableLoopPtrFactory(const DenseSet<Operation *> &parallelLoops,
810                              int fastestVaryingMemRefDimension) {
811   return [&parallelLoops, fastestVaryingMemRefDimension](Operation &forOp) {
812     auto loop = cast<AffineForOp>(forOp);
813     auto parallelIt = parallelLoops.find(loop);
814     if (parallelIt == parallelLoops.end())
815       return false;
816     int memRefDim = -1;
817     auto vectorizableBody =
818         isVectorizableLoopBody(loop, &memRefDim, vectorTransferPattern());
819     if (!vectorizableBody)
820       return false;
821     return memRefDim == -1 || fastestVaryingMemRefDimension == -1 ||
822            memRefDim == fastestVaryingMemRefDimension;
823   };
824 }
825 
826 /// Returns the vector type resulting from applying the provided vectorization
827 /// strategy on the scalar type.
828 static VectorType getVectorType(Type scalarTy,
829                                 const VectorizationStrategy *strategy) {
830   assert(!scalarTy.isa<VectorType>() && "Expected scalar type");
831   return VectorType::get(strategy->vectorSizes, scalarTy);
832 }
833 
834 /// Tries to transform a scalar constant into a vector constant. Returns the
835 /// vector constant if the scalar type is valid vector element type. Returns
836 /// nullptr, otherwise.
837 static ConstantOp vectorizeConstant(ConstantOp constOp,
838                                     VectorizationState &state) {
839   Type scalarTy = constOp.getType();
840   if (!VectorType::isValidElementType(scalarTy))
841     return nullptr;
842 
843   auto vecTy = getVectorType(scalarTy, state.strategy);
844   auto vecAttr = DenseElementsAttr::get(vecTy, constOp.getValue());
845   auto newConstOp = state.builder.create<ConstantOp>(constOp.getLoc(), vecAttr);
846 
847   // Register vector replacement for future uses in the scope.
848   state.registerOpVectorReplacement(constOp, newConstOp);
849   return newConstOp;
850 }
851 
852 /// Returns true if the provided value is vector uniform given the vectorization
853 /// strategy.
854 // TODO: For now, only values that are invariants to all the loops in the
855 // vectorization strategy are considered vector uniforms.
856 static bool isUniformDefinition(Value value,
857                                 const VectorizationStrategy *strategy) {
858   for (auto loopToDim : strategy->loopToVectorDim) {
859     auto loop = cast<AffineForOp>(loopToDim.first);
860     if (!loop.isDefinedOutsideOfLoop(value))
861       return false;
862   }
863   return true;
864 }
865 
866 /// Generates a broadcast op for the provided uniform value using the
867 /// vectorization strategy in 'state'.
868 static Operation *vectorizeUniform(Value uniformVal,
869                                    VectorizationState &state) {
870   OpBuilder::InsertionGuard guard(state.builder);
871   state.builder.setInsertionPointAfterValue(uniformVal);
872 
873   auto vectorTy = getVectorType(uniformVal.getType(), state.strategy);
874   auto bcastOp = state.builder.create<BroadcastOp>(uniformVal.getLoc(),
875                                                    vectorTy, uniformVal);
876   state.registerValueVectorReplacement(uniformVal, bcastOp);
877   return bcastOp;
878 }
879 
880 /// Tries to vectorize a given `operand` by applying the following logic:
881 /// 1. if the defining operation has been already vectorized, `operand` is
882 ///    already in the proper vector form;
883 /// 2. if the `operand` is a constant, returns the vectorized form of the
884 ///    constant;
885 /// 3. if the `operand` is uniform, returns a vector broadcast of the `op`;
886 /// 4. otherwise, the vectorization of `operand` is not supported.
887 /// Newly created vector operations are registered in `state` as replacement
888 /// for their scalar counterparts.
889 /// In particular this logic captures some of the use cases where definitions
890 /// that are not scoped under the current pattern are needed to vectorize.
891 /// One such example is top level function constants that need to be splatted.
892 ///
893 /// Returns an operand that has been vectorized to match `state`'s strategy if
894 /// vectorization is possible with the above logic. Returns nullptr otherwise.
895 ///
896 /// TODO: handle more complex cases.
897 static Value vectorizeOperand(Value operand, VectorizationState &state) {
898   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorize operand: " << operand);
899   // If this value is already vectorized, we are done.
900   if (Value vecRepl = state.valueVectorReplacement.lookupOrNull(operand)) {
901     LLVM_DEBUG(dbgs() << " -> already vectorized: " << vecRepl);
902     return vecRepl;
903   }
904 
905   // An vector operand that is not in the replacement map should never reach
906   // this point. Reaching this point could mean that the code was already
907   // vectorized and we shouldn't try to vectorize already vectorized code.
908   assert(!operand.getType().isa<VectorType>() &&
909          "Vector op not found in replacement map");
910 
911   // Vectorize constant.
912   if (auto constOp = operand.getDefiningOp<ConstantOp>()) {
913     ConstantOp vecConstant = vectorizeConstant(constOp, state);
914     LLVM_DEBUG(dbgs() << "-> constant: " << vecConstant);
915     return vecConstant.getResult();
916   }
917 
918   // Vectorize uniform values.
919   if (isUniformDefinition(operand, state.strategy)) {
920     Operation *vecUniform = vectorizeUniform(operand, state);
921     LLVM_DEBUG(dbgs() << "-> uniform: " << *vecUniform);
922     return vecUniform->getResult(0);
923   }
924 
925   // Check for unsupported block argument scenarios. A supported block argument
926   // should have been vectorized already.
927   if (!operand.getDefiningOp())
928     LLVM_DEBUG(dbgs() << "-> unsupported block argument\n");
929   else
930     // Generic unsupported case.
931     LLVM_DEBUG(dbgs() << "-> non-vectorizable\n");
932 
933   return nullptr;
934 }
935 
936 /// Vectorizes an affine load with the vectorization strategy in 'state' by
937 /// generating a 'vector.transfer_read' op with the proper permutation map
938 /// inferred from the indices of the load. The new 'vector.transfer_read' is
939 /// registered as replacement of the scalar load. Returns the newly created
940 /// 'vector.transfer_read' if vectorization was successful. Returns nullptr,
941 /// otherwise.
942 static Operation *vectorizeAffineLoad(AffineLoadOp loadOp,
943                                       VectorizationState &state) {
944   MemRefType memRefType = loadOp.getMemRefType();
945   Type elementType = memRefType.getElementType();
946   auto vectorType = VectorType::get(state.strategy->vectorSizes, elementType);
947 
948   // Replace map operands with operands from the vector loop nest.
949   SmallVector<Value, 8> mapOperands;
950   state.getScalarValueReplacementsFor(loadOp.getMapOperands(), mapOperands);
951 
952   // Compute indices for the transfer op. AffineApplyOp's may be generated.
953   SmallVector<Value, 8> indices;
954   indices.reserve(memRefType.getRank());
955   if (loadOp.getAffineMap() !=
956       state.builder.getMultiDimIdentityMap(memRefType.getRank()))
957     computeMemoryOpIndices(loadOp, loadOp.getAffineMap(), mapOperands, state,
958                            indices);
959   else
960     indices.append(mapOperands.begin(), mapOperands.end());
961 
962   // Compute permutation map using the information of new vector loops.
963   auto permutationMap = makePermutationMap(state.builder.getInsertionBlock(),
964                                            indices, state.vecLoopToVecDim);
965   if (!permutationMap) {
966     LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ can't compute permutationMap\n");
967     return nullptr;
968   }
969   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
970   LLVM_DEBUG(permutationMap.print(dbgs()));
971 
972   auto transfer = state.builder.create<vector::TransferReadOp>(
973       loadOp.getLoc(), vectorType, loadOp.getMemRef(), indices, permutationMap);
974 
975   // Register replacement for future uses in the scope.
976   state.registerOpVectorReplacement(loadOp, transfer);
977   return transfer;
978 }
979 
980 /// Vectorizes an affine store with the vectorization strategy in 'state' by
981 /// generating a 'vector.transfer_write' op with the proper permutation map
982 /// inferred from the indices of the store. The new 'vector.transfer_store' is
983 /// registered as replacement of the scalar load. Returns the newly created
984 /// 'vector.transfer_write' if vectorization was successful. Returns nullptr,
985 /// otherwise.
986 static Operation *vectorizeAffineStore(AffineStoreOp storeOp,
987                                        VectorizationState &state) {
988   MemRefType memRefType = storeOp.getMemRefType();
989   Value vectorValue = vectorizeOperand(storeOp.getValueToStore(), state);
990   if (!vectorValue)
991     return nullptr;
992 
993   // Replace map operands with operands from the vector loop nest.
994   SmallVector<Value, 8> mapOperands;
995   state.getScalarValueReplacementsFor(storeOp.getMapOperands(), mapOperands);
996 
997   // Compute indices for the transfer op. AffineApplyOp's may be generated.
998   SmallVector<Value, 8> indices;
999   indices.reserve(memRefType.getRank());
1000   if (storeOp.getAffineMap() !=
1001       state.builder.getMultiDimIdentityMap(memRefType.getRank()))
1002     computeMemoryOpIndices(storeOp, storeOp.getAffineMap(), mapOperands, state,
1003                            indices);
1004   else
1005     indices.append(mapOperands.begin(), mapOperands.end());
1006 
1007   // Compute permutation map using the information of new vector loops.
1008   auto permutationMap = makePermutationMap(state.builder.getInsertionBlock(),
1009                                            indices, state.vecLoopToVecDim);
1010   if (!permutationMap)
1011     return nullptr;
1012   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
1013   LLVM_DEBUG(permutationMap.print(dbgs()));
1014 
1015   auto transfer = state.builder.create<vector::TransferWriteOp>(
1016       storeOp.getLoc(), vectorValue, storeOp.getMemRef(), indices,
1017       permutationMap);
1018   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorized store: " << transfer);
1019 
1020   // Register replacement for future uses in the scope.
1021   state.registerOpVectorReplacement(storeOp, transfer);
1022   return transfer;
1023 }
1024 
1025 /// Vectorizes a loop with the vectorization strategy in 'state'. A new loop is
1026 /// created and registered as replacement for the scalar loop. The builder's
1027 /// insertion point is set to the new loop's body so that subsequent vectorized
1028 /// operations are inserted into the new loop. If the loop is a vector
1029 /// dimension, the step of the newly created loop will reflect the vectorization
1030 /// factor used to vectorized that dimension.
1031 // TODO: Add support for 'iter_args'. Related operands and results will be
1032 // vectorized at this point.
1033 static Operation *vectorizeAffineForOp(AffineForOp forOp,
1034                                        VectorizationState &state) {
1035   const VectorizationStrategy &strategy = *state.strategy;
1036   auto loopToVecDimIt = strategy.loopToVectorDim.find(forOp);
1037   bool isLoopVecDim = loopToVecDimIt != strategy.loopToVectorDim.end();
1038 
1039   // We only support 'iter_args' when the loop is not one of the vector
1040   // dimensions.
1041   // TODO: Support vector dimension loops. They require special handling:
1042   // generate horizontal reduction, last-value extraction, etc.
1043   if (forOp.getNumIterOperands() > 0 && isLoopVecDim)
1044     return nullptr;
1045 
1046   // If we are vectorizing a vector dimension, compute a new step for the new
1047   // vectorized loop using the vectorization factor for the vector dimension.
1048   // Otherwise, propagate the step of the scalar loop.
1049   unsigned newStep;
1050   if (isLoopVecDim) {
1051     unsigned vectorDim = loopToVecDimIt->second;
1052     assert(vectorDim < strategy.vectorSizes.size() && "vector dim overflow");
1053     int64_t forOpVecFactor = strategy.vectorSizes[vectorDim];
1054     newStep = forOp.getStep() * forOpVecFactor;
1055   } else {
1056     newStep = forOp.getStep();
1057   }
1058 
1059   // Vectorize 'iter_args'.
1060   SmallVector<Value, 8> vecIterOperands;
1061   for (auto operand : forOp.getIterOperands())
1062     vecIterOperands.push_back(vectorizeOperand(operand, state));
1063 
1064   auto vecForOp = state.builder.create<AffineForOp>(
1065       forOp.getLoc(), forOp.getLowerBoundOperands(), forOp.getLowerBoundMap(),
1066       forOp.getUpperBoundOperands(), forOp.getUpperBoundMap(), newStep,
1067       vecIterOperands,
1068       /*bodyBuilder=*/[](OpBuilder &, Location, Value, ValueRange) {
1069         // Make sure we don't create a default terminator in the loop body as
1070         // the proper terminator will be added during vectorization.
1071         return;
1072       });
1073 
1074   // Register loop-related replacements:
1075   //   1) The new vectorized loop is registered as vector replacement of the
1076   //      scalar loop.
1077   //      TODO: Support reductions along the vector dimension.
1078   //   2) The new iv of the vectorized loop is registered as scalar replacement
1079   //      since a scalar copy of the iv will prevail in the vectorized loop.
1080   //      TODO: A vector replacement will also be added in the future when
1081   //      vectorization of linear ops is supported.
1082   //   3) The new 'iter_args' region arguments are registered as vector
1083   //      replacements since they have been vectorized.
1084   state.registerOpVectorReplacement(forOp, vecForOp);
1085   state.registerValueScalarReplacement(forOp.getInductionVar(),
1086                                        vecForOp.getInductionVar());
1087   for (auto iterTuple :
1088        llvm ::zip(forOp.getRegionIterArgs(), vecForOp.getRegionIterArgs()))
1089     state.registerBlockArgVectorReplacement(std::get<0>(iterTuple),
1090                                             std::get<1>(iterTuple));
1091 
1092   if (isLoopVecDim)
1093     state.vecLoopToVecDim[vecForOp] = loopToVecDimIt->second;
1094 
1095   // Change insertion point so that upcoming vectorized instructions are
1096   // inserted into the vectorized loop's body.
1097   state.builder.setInsertionPointToStart(vecForOp.getBody());
1098   return vecForOp;
1099 }
1100 
1101 /// Vectorizes arbitrary operation by plain widening. We apply generic type
1102 /// widening of all its results and retrieve the vector counterparts for all its
1103 /// operands.
1104 static Operation *widenOp(Operation *op, VectorizationState &state) {
1105   SmallVector<Type, 8> vectorTypes;
1106   for (Value result : op->getResults())
1107     vectorTypes.push_back(
1108         VectorType::get(state.strategy->vectorSizes, result.getType()));
1109 
1110   SmallVector<Value, 8> vectorOperands;
1111   for (Value operand : op->getOperands()) {
1112     Value vecOperand = vectorizeOperand(operand, state);
1113     if (!vecOperand) {
1114       LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ an operand failed vectorize\n");
1115       return nullptr;
1116     }
1117     vectorOperands.push_back(vecOperand);
1118   }
1119 
1120   // Create a clone of the op with the proper operands and return types.
1121   // TODO: The following assumes there is always an op with a fixed
1122   // name that works both in scalar mode and vector mode.
1123   // TODO: Is it worth considering an Operation.clone operation which
1124   // changes the type so we can promote an Operation with less boilerplate?
1125   OperationState vecOpState(op->getLoc(), op->getName().getStringRef(),
1126                             vectorOperands, vectorTypes, op->getAttrs(),
1127                             /*successors=*/{}, /*regions=*/{});
1128   Operation *vecOp = state.builder.createOperation(vecOpState);
1129   state.registerOpVectorReplacement(op, vecOp);
1130   return vecOp;
1131 }
1132 
1133 /// Vectorizes a yield operation by widening its types. The builder's insertion
1134 /// point is set after the vectorized parent op to continue vectorizing the
1135 /// operations after the parent op.
1136 static Operation *vectorizeAffineYieldOp(AffineYieldOp yieldOp,
1137                                          VectorizationState &state) {
1138   Operation *newYieldOp = widenOp(yieldOp, state);
1139   Operation *newParentOp = state.builder.getInsertionBlock()->getParentOp();
1140   state.builder.setInsertionPointAfter(newParentOp);
1141   return newYieldOp;
1142 }
1143 
1144 /// Encodes Operation-specific behavior for vectorization. In general we
1145 /// assume that all operands of an op must be vectorized but this is not
1146 /// always true. In the future, it would be nice to have a trait that
1147 /// describes how a particular operation vectorizes. For now we implement the
1148 /// case distinction here. Returns a vectorized form of an operation or
1149 /// nullptr if vectorization fails.
1150 // TODO: consider adding a trait to Op to describe how it gets vectorized.
1151 // Maybe some Ops are not vectorizable or require some tricky logic, we cannot
1152 // do one-off logic here; ideally it would be TableGen'd.
1153 static Operation *vectorizeOneOperation(Operation *op,
1154                                         VectorizationState &state) {
1155   // Sanity checks.
1156   assert(!isa<vector::TransferReadOp>(op) &&
1157          "vector.transfer_read cannot be further vectorized");
1158   assert(!isa<vector::TransferWriteOp>(op) &&
1159          "vector.transfer_write cannot be further vectorized");
1160 
1161   if (auto loadOp = dyn_cast<AffineLoadOp>(op))
1162     return vectorizeAffineLoad(loadOp, state);
1163   if (auto storeOp = dyn_cast<AffineStoreOp>(op))
1164     return vectorizeAffineStore(storeOp, state);
1165   if (auto forOp = dyn_cast<AffineForOp>(op))
1166     return vectorizeAffineForOp(forOp, state);
1167   if (auto yieldOp = dyn_cast<AffineYieldOp>(op))
1168     return vectorizeAffineYieldOp(yieldOp, state);
1169   if (auto constant = dyn_cast<ConstantOp>(op))
1170     return vectorizeConstant(constant, state);
1171 
1172   // Other ops with regions are not supported.
1173   if (op->getNumRegions() != 0)
1174     return nullptr;
1175 
1176   return widenOp(op, state);
1177 }
1178 
1179 /// Recursive implementation to convert all the nested loops in 'match' to a 2D
1180 /// vector container that preserves the relative nesting level of each loop with
1181 /// respect to the others in 'match'. 'currentLevel' is the nesting level that
1182 /// will be assigned to the loop in the current 'match'.
1183 static void
1184 getMatchedAffineLoopsRec(NestedMatch match, unsigned currentLevel,
1185                          std::vector<SmallVector<AffineForOp, 2>> &loops) {
1186   // Add a new empty level to the output if it doesn't exist already.
1187   assert(currentLevel <= loops.size() && "Unexpected currentLevel");
1188   if (currentLevel == loops.size())
1189     loops.push_back(SmallVector<AffineForOp, 2>());
1190 
1191   // Add current match and recursively visit its children.
1192   loops[currentLevel].push_back(cast<AffineForOp>(match.getMatchedOperation()));
1193   for (auto childMatch : match.getMatchedChildren()) {
1194     getMatchedAffineLoopsRec(childMatch, currentLevel + 1, loops);
1195   }
1196 }
1197 
1198 /// Converts all the nested loops in 'match' to a 2D vector container that
1199 /// preserves the relative nesting level of each loop with respect to the others
1200 /// in 'match'. This means that every loop in 'loops[i]' will have a parent loop
1201 /// in 'loops[i-1]'. A loop in 'loops[i]' may or may not have a child loop in
1202 /// 'loops[i+1]'.
1203 static void
1204 getMatchedAffineLoops(NestedMatch match,
1205                       std::vector<SmallVector<AffineForOp, 2>> &loops) {
1206   getMatchedAffineLoopsRec(match, /*currLoopDepth=*/0, loops);
1207 }
1208 
1209 /// Internal implementation to vectorize affine loops from a single loop nest
1210 /// using an n-D vectorization strategy.
1211 static LogicalResult
1212 vectorizeLoopNest(std::vector<SmallVector<AffineForOp, 2>> &loops,
1213                   const VectorizationStrategy &strategy) {
1214   assert(loops[0].size() == 1 && "Expected single root loop");
1215   AffineForOp rootLoop = loops[0][0];
1216   VectorizationState state(rootLoop.getContext());
1217   state.builder.setInsertionPointAfter(rootLoop);
1218   state.strategy = &strategy;
1219 
1220   // Since patterns are recursive, they can very well intersect.
1221   // Since we do not want a fully greedy strategy in general, we decouple
1222   // pattern matching, from profitability analysis, from application.
1223   // As a consequence we must check that each root pattern is still
1224   // vectorizable. If a pattern is not vectorizable anymore, we just skip it.
1225   // TODO: implement a non-greedy profitability analysis that keeps only
1226   // non-intersecting patterns.
1227   if (!isVectorizableLoopBody(rootLoop, vectorTransferPattern())) {
1228     LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ loop is not vectorizable");
1229     return failure();
1230   }
1231 
1232   //////////////////////////////////////////////////////////////////////////////
1233   // Vectorize the scalar loop nest following a topological order. A new vector
1234   // loop nest with the vectorized operations is created along the process. If
1235   // vectorization succeeds, the scalar loop nest is erased. If vectorization
1236   // fails, the vector loop nest is erased and the scalar loop nest is not
1237   // modified.
1238   //////////////////////////////////////////////////////////////////////////////
1239 
1240   auto opVecResult = rootLoop.walk<WalkOrder::PreOrder>([&](Operation *op) {
1241     LLVM_DEBUG(dbgs() << "[early-vect]+++++ Vectorizing: " << *op);
1242     Operation *vectorOp = vectorizeOneOperation(op, state);
1243     if (!vectorOp)
1244       return WalkResult::interrupt();
1245 
1246     return WalkResult::advance();
1247   });
1248 
1249   if (opVecResult.wasInterrupted()) {
1250     LLVM_DEBUG(dbgs() << "[early-vect]+++++ failed vectorization for: "
1251                       << rootLoop << "\n");
1252     // Erase vector loop nest if it was created.
1253     auto vecRootLoopIt = state.opVectorReplacement.find(rootLoop);
1254     if (vecRootLoopIt != state.opVectorReplacement.end())
1255       eraseLoopNest(cast<AffineForOp>(vecRootLoopIt->second));
1256 
1257     return failure();
1258   }
1259 
1260   assert(state.opVectorReplacement.count(rootLoop) == 1 &&
1261          "Expected vector replacement for loop nest");
1262   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ success vectorizing pattern");
1263   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorization result:\n"
1264                     << *state.opVectorReplacement[rootLoop]);
1265 
1266   // Finish this vectorization pattern.
1267   state.finishVectorizationPattern(rootLoop);
1268   return success();
1269 }
1270 
1271 /// Extracts the matched loops and vectorizes them following a topological
1272 /// order. A new vector loop nest will be created if vectorization succeeds. The
1273 /// original loop nest won't be modified in any case.
1274 static LogicalResult vectorizeRootMatch(NestedMatch m,
1275                                         const VectorizationStrategy &strategy) {
1276   std::vector<SmallVector<AffineForOp, 2>> loopsToVectorize;
1277   getMatchedAffineLoops(m, loopsToVectorize);
1278   return vectorizeLoopNest(loopsToVectorize, strategy);
1279 }
1280 
1281 /// Traverses all the loop matches and classifies them into intersection
1282 /// buckets. Two matches intersect if any of them encloses the other one. A
1283 /// match intersects with a bucket if the match intersects with the root
1284 /// (outermost) loop in that bucket.
1285 static void computeIntersectionBuckets(
1286     ArrayRef<NestedMatch> matches,
1287     std::vector<SmallVector<NestedMatch, 8>> &intersectionBuckets) {
1288   assert(intersectionBuckets.empty() && "Expected empty output");
1289   // Keeps track of the root (outermost) loop of each bucket.
1290   SmallVector<AffineForOp, 8> bucketRoots;
1291 
1292   for (const NestedMatch &match : matches) {
1293     AffineForOp matchRoot = cast<AffineForOp>(match.getMatchedOperation());
1294     bool intersects = false;
1295     for (int i = 0, end = intersectionBuckets.size(); i < end; ++i) {
1296       AffineForOp bucketRoot = bucketRoots[i];
1297       // Add match to the bucket if the bucket root encloses the match root.
1298       if (bucketRoot->isAncestor(matchRoot)) {
1299         intersectionBuckets[i].push_back(match);
1300         intersects = true;
1301         break;
1302       }
1303       // Add match to the bucket if the match root encloses the bucket root. The
1304       // match root becomes the new bucket root.
1305       if (matchRoot->isAncestor(bucketRoot)) {
1306         bucketRoots[i] = matchRoot;
1307         intersectionBuckets[i].push_back(match);
1308         intersects = true;
1309         break;
1310       }
1311     }
1312 
1313     // Match doesn't intersect with any existing bucket. Create a new bucket for
1314     // it.
1315     if (!intersects) {
1316       bucketRoots.push_back(matchRoot);
1317       intersectionBuckets.push_back(SmallVector<NestedMatch, 8>());
1318       intersectionBuckets.back().push_back(match);
1319     }
1320   }
1321 }
1322 
1323 /// Internal implementation to vectorize affine loops in 'loops' using the n-D
1324 /// vectorization factors in 'vectorSizes'. By default, each vectorization
1325 /// factor is applied inner-to-outer to the loops of each loop nest.
1326 /// 'fastestVaryingPattern' can be optionally used to provide a different loop
1327 /// vectorization order.
1328 static void vectorizeLoops(Operation *parentOp, DenseSet<Operation *> &loops,
1329                            ArrayRef<int64_t> vectorSizes,
1330                            ArrayRef<int64_t> fastestVaryingPattern) {
1331   // Compute 1-D, 2-D or 3-D loop pattern to be matched on the target loops.
1332   Optional<NestedPattern> pattern =
1333       makePattern(loops, vectorSizes.size(), fastestVaryingPattern);
1334   if (!pattern.hasValue()) {
1335     LLVM_DEBUG(dbgs() << "\n[early-vect] pattern couldn't be computed\n");
1336     return;
1337   }
1338 
1339   LLVM_DEBUG(dbgs() << "\n******************************************");
1340   LLVM_DEBUG(dbgs() << "\n******************************************");
1341   LLVM_DEBUG(dbgs() << "\n[early-vect] new pattern on parent op\n");
1342   LLVM_DEBUG(dbgs() << *parentOp << "\n");
1343 
1344   unsigned patternDepth = pattern->getDepth();
1345 
1346   // Compute all the pattern matches and classify them into buckets of
1347   // intersecting matches.
1348   SmallVector<NestedMatch, 32> allMatches;
1349   pattern->match(parentOp, &allMatches);
1350   std::vector<SmallVector<NestedMatch, 8>> intersectionBuckets;
1351   computeIntersectionBuckets(allMatches, intersectionBuckets);
1352 
1353   // Iterate over all buckets and vectorize the matches eagerly. We can only
1354   // vectorize one match from each bucket since all the matches within a bucket
1355   // intersect.
1356   for (auto &intersectingMatches : intersectionBuckets) {
1357     for (NestedMatch &match : intersectingMatches) {
1358       VectorizationStrategy strategy;
1359       // TODO: depending on profitability, elect to reduce the vector size.
1360       strategy.vectorSizes.assign(vectorSizes.begin(), vectorSizes.end());
1361       if (failed(analyzeProfitability(match.getMatchedChildren(), 1,
1362                                       patternDepth, &strategy))) {
1363         continue;
1364       }
1365       vectorizeLoopIfProfitable(match.getMatchedOperation(), 0, patternDepth,
1366                                 &strategy);
1367       // Vectorize match. Skip the rest of intersecting matches in the bucket if
1368       // vectorization succeeded.
1369       // TODO: if pattern does not apply, report it; alter the cost/benefit.
1370       // TODO: some diagnostics if failure to vectorize occurs.
1371       if (succeeded(vectorizeRootMatch(match, strategy)))
1372         break;
1373     }
1374   }
1375 
1376   LLVM_DEBUG(dbgs() << "\n");
1377 }
1378 
1379 std::unique_ptr<OperationPass<FuncOp>>
1380 createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize) {
1381   return std::make_unique<Vectorize>(virtualVectorSize);
1382 }
1383 std::unique_ptr<OperationPass<FuncOp>> createSuperVectorizePass() {
1384   return std::make_unique<Vectorize>();
1385 }
1386 
1387 /// Applies vectorization to the current function by searching over a bunch of
1388 /// predetermined patterns.
1389 void Vectorize::runOnFunction() {
1390   FuncOp f = getFunction();
1391   if (!fastestVaryingPattern.empty() &&
1392       fastestVaryingPattern.size() != vectorSizes.size()) {
1393     f.emitRemark("Fastest varying pattern specified with different size than "
1394                  "the vector size.");
1395     return signalPassFailure();
1396   }
1397 
1398   DenseSet<Operation *> parallelLoops;
1399   f.walk([&parallelLoops](AffineForOp loop) {
1400     if (isLoopParallel(loop))
1401       parallelLoops.insert(loop);
1402   });
1403 
1404   // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
1405   NestedPatternContext mlContext;
1406   vectorizeLoops(f, parallelLoops, vectorSizes, fastestVaryingPattern);
1407 }
1408 
1409 /// Verify that affine loops in 'loops' meet the nesting criteria expected by
1410 /// SuperVectorizer:
1411 ///   * There must be at least one loop.
1412 ///   * There must be a single root loop (nesting level 0).
1413 ///   * Each loop at a given nesting level must be nested in a loop from a
1414 ///     previous nesting level.
1415 static LogicalResult
1416 verifyLoopNesting(const std::vector<SmallVector<AffineForOp, 2>> &loops) {
1417   // Expected at least one loop.
1418   if (loops.empty())
1419     return failure();
1420 
1421   // Expected only one root loop.
1422   if (loops[0].size() != 1)
1423     return failure();
1424 
1425   // Traverse loops outer-to-inner to check some invariants.
1426   for (int i = 1, end = loops.size(); i < end; ++i) {
1427     for (AffineForOp loop : loops[i]) {
1428       //  Check that each loop at this level is nested in one of the loops from
1429       //  the previous level.
1430       if (none_of(loops[i - 1], [&](AffineForOp maybeParent) {
1431             return maybeParent->isProperAncestor(loop);
1432           }))
1433         return failure();
1434 
1435       //  Check that each loop at this level is not nested in another loop from
1436       //  this level.
1437       for (AffineForOp sibling : loops[i]) {
1438         if (sibling->isProperAncestor(loop))
1439           return failure();
1440       }
1441     }
1442   }
1443 
1444   return success();
1445 }
1446 
1447 namespace mlir {
1448 
1449 /// External utility to vectorize affine loops in 'loops' using the n-D
1450 /// vectorization factors in 'vectorSizes'. By default, each vectorization
1451 /// factor is applied inner-to-outer to the loops of each loop nest.
1452 /// 'fastestVaryingPattern' can be optionally used to provide a different loop
1453 /// vectorization order.
1454 void vectorizeAffineLoops(Operation *parentOp, DenseSet<Operation *> &loops,
1455                           ArrayRef<int64_t> vectorSizes,
1456                           ArrayRef<int64_t> fastestVaryingPattern) {
1457   // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
1458   NestedPatternContext mlContext;
1459   vectorizeLoops(parentOp, loops, vectorSizes, fastestVaryingPattern);
1460 }
1461 
1462 /// External utility to vectorize affine loops from a single loop nest using an
1463 /// n-D vectorization strategy (see doc in VectorizationStrategy definition).
1464 /// Loops are provided in a 2D vector container. The first dimension represents
1465 /// the nesting level relative to the loops to be vectorized. The second
1466 /// dimension contains the loops. This means that:
1467 ///   a) every loop in 'loops[i]' must have a parent loop in 'loops[i-1]',
1468 ///   b) a loop in 'loops[i]' may or may not have a child loop in 'loops[i+1]'.
1469 ///
1470 /// For example, for the following loop nest:
1471 ///
1472 ///   func @vec2d(%in0: memref<64x128x512xf32>, %in1: memref<64x128x128xf32>,
1473 ///               %out0: memref<64x128x512xf32>,
1474 ///               %out1: memref<64x128x128xf32>) {
1475 ///     affine.for %i0 = 0 to 64 {
1476 ///       affine.for %i1 = 0 to 128 {
1477 ///         affine.for %i2 = 0 to 512 {
1478 ///           %ld = affine.load %in0[%i0, %i1, %i2] : memref<64x128x512xf32>
1479 ///           affine.store %ld, %out0[%i0, %i1, %i2] : memref<64x128x512xf32>
1480 ///         }
1481 ///         affine.for %i3 = 0 to 128 {
1482 ///           %ld = affine.load %in1[%i0, %i1, %i3] : memref<64x128x128xf32>
1483 ///           affine.store %ld, %out1[%i0, %i1, %i3] : memref<64x128x128xf32>
1484 ///         }
1485 ///       }
1486 ///     }
1487 ///     return
1488 ///   }
1489 ///
1490 /// loops = {{%i0}, {%i2, %i3}}, to vectorize the outermost and the two
1491 /// innermost loops;
1492 /// loops = {{%i1}, {%i2, %i3}}, to vectorize the middle and the two innermost
1493 /// loops;
1494 /// loops = {{%i2}}, to vectorize only the first innermost loop;
1495 /// loops = {{%i3}}, to vectorize only the second innermost loop;
1496 /// loops = {{%i1}}, to vectorize only the middle loop.
1497 LogicalResult
1498 vectorizeAffineLoopNest(std::vector<SmallVector<AffineForOp, 2>> &loops,
1499                         const VectorizationStrategy &strategy) {
1500   // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
1501   NestedPatternContext mlContext;
1502   if (failed(verifyLoopNesting(loops)))
1503     return failure();
1504   return vectorizeLoopNest(loops, strategy);
1505 }
1506 
1507 std::unique_ptr<OperationPass<FuncOp>>
1508 createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize) {
1509   return std::make_unique<Vectorize>(virtualVectorSize);
1510 }
1511 std::unique_ptr<OperationPass<FuncOp>> createSuperVectorizePass() {
1512   return std::make_unique<Vectorize>();
1513 }
1514 
1515 } // namespace mlir
1516