1 //===- SuperVectorize.cpp - Vectorize Pass Impl ---------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements vectorization of loops, operations and data types to
10 // a target-independent, n-D super-vector abstraction.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "PassDetail.h"
15 #include "mlir/Analysis/LoopAnalysis.h"
16 #include "mlir/Analysis/NestedMatcher.h"
17 #include "mlir/Analysis/Utils.h"
18 #include "mlir/Dialect/Affine/IR/AffineOps.h"
19 #include "mlir/Dialect/Affine/Utils.h"
20 #include "mlir/Dialect/Vector/VectorOps.h"
21 #include "mlir/Dialect/Vector/VectorUtils.h"
22 #include "mlir/IR/BlockAndValueMapping.h"
23 #include "llvm/Support/Debug.h"
24 #include "mlir/Support/LLVM.h"
25 
26 using namespace mlir;
27 using namespace vector;
28 
29 ///
30 /// Implements a high-level vectorization strategy on a Function.
31 /// The abstraction used is that of super-vectors, which provide a single,
32 /// compact, representation in the vector types, information that is expected
33 /// to reduce the impact of the phase ordering problem
34 ///
35 /// Vector granularity:
36 /// ===================
37 /// This pass is designed to perform vectorization at a super-vector
38 /// granularity. A super-vector is loosely defined as a vector type that is a
39 /// multiple of a "good" vector size so the HW can efficiently implement a set
40 /// of high-level primitives. Multiple is understood along any dimension; e.g.
41 /// both vector<16xf32> and vector<2x8xf32> are valid super-vectors for a
42 /// vector<8xf32> HW vector. Note that a "good vector size so the HW can
43 /// efficiently implement a set of high-level primitives" is not necessarily an
44 /// integer multiple of actual hardware registers. We leave details of this
45 /// distinction unspecified for now.
46 ///
47 /// Some may prefer the terminology a "tile of HW vectors". In this case, one
48 /// should note that super-vectors implement an "always full tile" abstraction.
49 /// They guarantee no partial-tile separation is necessary by relying on a
50 /// high-level copy-reshape abstraction that we call vector.transfer. This
51 /// copy-reshape operations is also responsible for performing layout
52 /// transposition if necessary. In the general case this will require a scoped
53 /// allocation in some notional local memory.
54 ///
55 /// Whatever the mental model one prefers to use for this abstraction, the key
56 /// point is that we burn into a single, compact, representation in the vector
57 /// types, information that is expected to reduce the impact of the phase
58 /// ordering problem. Indeed, a vector type conveys information that:
59 ///   1. the associated loops have dependency semantics that do not prevent
60 ///      vectorization;
61 ///   2. the associate loops have been sliced in chunks of static sizes that are
62 ///      compatible with vector sizes (i.e. similar to unroll-and-jam);
63 ///   3. the inner loops, in the unroll-and-jam analogy of 2, are captured by
64 ///   the
65 ///      vector type and no vectorization hampering transformations can be
66 ///      applied to them anymore;
67 ///   4. the underlying memrefs are accessed in some notional contiguous way
68 ///      that allows loading into vectors with some amount of spatial locality;
69 /// In other words, super-vectorization provides a level of separation of
70 /// concern by way of opacity to subsequent passes. This has the effect of
71 /// encapsulating and propagating vectorization constraints down the list of
72 /// passes until we are ready to lower further.
73 ///
74 /// For a particular target, a notion of minimal n-d vector size will be
75 /// specified and vectorization targets a multiple of those. In the following
76 /// paragraph, let "k ." represent "a multiple of", to be understood as a
77 /// multiple in the same dimension (e.g. vector<16 x k . 128> summarizes
78 /// vector<16 x 128>, vector<16 x 256>, vector<16 x 1024>, etc).
79 ///
80 /// Some non-exhaustive notable super-vector sizes of interest include:
81 ///   - CPU: vector<k . HW_vector_size>,
82 ///          vector<k' . core_count x k . HW_vector_size>,
83 ///          vector<socket_count x k' . core_count x k . HW_vector_size>;
84 ///   - GPU: vector<k . warp_size>,
85 ///          vector<k . warp_size x float2>,
86 ///          vector<k . warp_size x float4>,
87 ///          vector<k . warp_size x 4 x 4x 4> (for tensor_core sizes).
88 ///
89 /// Loops and operations are emitted that operate on those super-vector shapes.
90 /// Subsequent lowering passes will materialize to actual HW vector sizes. These
91 /// passes are expected to be (gradually) more target-specific.
92 ///
93 /// At a high level, a vectorized load in a loop will resemble:
94 /// ```mlir
95 ///   affine.for %i = ? to ? step ? {
96 ///     %v_a = vector.transfer_read A[%i] : memref<?xf32>, vector<128xf32>
97 ///   }
98 /// ```
99 /// It is the responsibility of the implementation of vector.transfer_read to
100 /// materialize vector registers from the original scalar memrefs. A later (more
101 /// target-dependent) lowering pass will materialize to actual HW vector sizes.
102 /// This lowering may be occur at different times:
103 ///   1. at the MLIR level into a combination of loops, unrolling, DmaStartOp +
104 ///      DmaWaitOp + vectorized operations for data transformations and shuffle;
105 ///      thus opening opportunities for unrolling and pipelining. This is an
106 ///      instance of library call "whiteboxing"; or
107 ///   2. later in the a target-specific lowering pass or hand-written library
108 ///      call; achieving full separation of concerns. This is an instance of
109 ///      library call; or
110 ///   3. a mix of both, e.g. based on a model.
111 /// In the future, these operations will expose a contract to constrain the
112 /// search on vectorization patterns and sizes.
113 ///
114 /// Occurrence of super-vectorization in the compiler flow:
115 /// =======================================================
116 /// This is an active area of investigation. We start with 2 remarks to position
117 /// super-vectorization in the context of existing ongoing work: LLVM VPLAN
118 /// and LLVM SLP Vectorizer.
119 ///
120 /// LLVM VPLAN:
121 /// -----------
122 /// The astute reader may have noticed that in the limit, super-vectorization
123 /// can be applied at a similar time and with similar objectives than VPLAN.
124 /// For instance, in the case of a traditional, polyhedral compilation-flow (for
125 /// instance, the PPCG project uses ISL to provide dependence analysis,
126 /// multi-level(scheduling + tiling), lifting footprint to fast memory,
127 /// communication synthesis, mapping, register optimizations) and before
128 /// unrolling. When vectorization is applied at this *late* level in a typical
129 /// polyhedral flow, and is instantiated with actual hardware vector sizes,
130 /// super-vectorization is expected to match (or subsume) the type of patterns
131 /// that LLVM's VPLAN aims at targeting. The main difference here is that MLIR
132 /// is higher level and our implementation should be significantly simpler. Also
133 /// note that in this mode, recursive patterns are probably a bit of an overkill
134 /// although it is reasonable to expect that mixing a bit of outer loop and
135 /// inner loop vectorization + unrolling will provide interesting choices to
136 /// MLIR.
137 ///
138 /// LLVM SLP Vectorizer:
139 /// --------------------
140 /// Super-vectorization however is not meant to be usable in a similar fashion
141 /// to the SLP vectorizer. The main difference lies in the information that
142 /// both vectorizers use: super-vectorization examines contiguity of memory
143 /// references along fastest varying dimensions and loops with recursive nested
144 /// patterns capturing imperfectly-nested loop nests; the SLP vectorizer, on
145 /// the other hand, performs flat pattern matching inside a single unrolled loop
146 /// body and stitches together pieces of load and store operations into full
147 /// 1-D vectors. We envision that the SLP vectorizer is a good way to capture
148 /// innermost loop, control-flow dependent patterns that super-vectorization may
149 /// not be able to capture easily. In other words, super-vectorization does not
150 /// aim at replacing the SLP vectorizer and the two solutions are complementary.
151 ///
152 /// Ongoing investigations:
153 /// -----------------------
154 /// We discuss the following *early* places where super-vectorization is
155 /// applicable and touch on the expected benefits and risks . We list the
156 /// opportunities in the context of the traditional polyhedral compiler flow
157 /// described in PPCG. There are essentially 6 places in the MLIR pass pipeline
158 /// we expect to experiment with super-vectorization:
159 /// 1. Right after language lowering to MLIR: this is the earliest time where
160 ///    super-vectorization is expected to be applied. At this level, all the
161 ///    language/user/library-level annotations are available and can be fully
162 ///    exploited. Examples include loop-type annotations (such as parallel,
163 ///    reduction, scan, dependence distance vector, vectorizable) as well as
164 ///    memory access annotations (such as non-aliasing writes guaranteed,
165 ///    indirect accesses that are permutations by construction) accesses or
166 ///    that a particular operation is prescribed atomic by the user. At this
167 ///    level, anything that enriches what dependence analysis can do should be
168 ///    aggressively exploited. At this level we are close to having explicit
169 ///    vector types in the language, except we do not impose that burden on the
170 ///    programmer/library: we derive information from scalar code + annotations.
171 /// 2. After dependence analysis and before polyhedral scheduling: the
172 ///    information that supports vectorization does not need to be supplied by a
173 ///    higher level of abstraction. Traditional dependence analysis is available
174 ///    in MLIR and will be used to drive vectorization and cost models.
175 ///
176 /// Let's pause here and remark that applying super-vectorization as described
177 /// in 1. and 2. presents clear opportunities and risks:
178 ///   - the opportunity is that vectorization is burned in the type system and
179 ///   is protected from the adverse effect of loop scheduling, tiling, loop
180 ///   interchange and all passes downstream. Provided that subsequent passes are
181 ///   able to operate on vector types; the vector shapes, associated loop
182 ///   iterator properties, alignment, and contiguity of fastest varying
183 ///   dimensions are preserved until we lower the super-vector types. We expect
184 ///   this to significantly rein in on the adverse effects of phase ordering.
185 ///   - the risks are that a. all passes after super-vectorization have to work
186 ///   on elemental vector types (not that this is always true, wherever
187 ///   vectorization is applied) and b. that imposing vectorization constraints
188 ///   too early may be overall detrimental to loop fusion, tiling and other
189 ///   transformations because the dependence distances are coarsened when
190 ///   operating on elemental vector types. For this reason, the pattern
191 ///   profitability analysis should include a component that also captures the
192 ///   maximal amount of fusion available under a particular pattern. This is
193 ///   still at the stage of rough ideas but in this context, search is our
194 ///   friend as the Tensor Comprehensions and auto-TVM contributions
195 ///   demonstrated previously.
196 /// Bottom-line is we do not yet have good answers for the above but aim at
197 /// making it easy to answer such questions.
198 ///
199 /// Back to our listing, the last places where early super-vectorization makes
200 /// sense are:
201 /// 3. right after polyhedral-style scheduling: PLUTO-style algorithms are known
202 ///    to improve locality, parallelism and be configurable (e.g. max-fuse,
203 ///    smart-fuse etc). They can also have adverse effects on contiguity
204 ///    properties that are required for vectorization but the vector.transfer
205 ///    copy-reshape-pad-transpose abstraction is expected to help recapture
206 ///    these properties.
207 /// 4. right after polyhedral-style scheduling+tiling;
208 /// 5. right after scheduling+tiling+rescheduling: points 4 and 5 represent
209 ///    probably the most promising places because applying tiling achieves a
210 ///    separation of concerns that allows rescheduling to worry less about
211 ///    locality and more about parallelism and distribution (e.g. min-fuse).
212 ///
213 /// At these levels the risk-reward looks different: on one hand we probably
214 /// lost a good deal of language/user/library-level annotation; on the other
215 /// hand we gained parallelism and locality through scheduling and tiling.
216 /// However we probably want to ensure tiling is compatible with the
217 /// full-tile-only abstraction used in super-vectorization or suffer the
218 /// consequences. It is too early to place bets on what will win but we expect
219 /// super-vectorization to be the right abstraction to allow exploring at all
220 /// these levels. And again, search is our friend.
221 ///
222 /// Lastly, we mention it again here:
223 /// 6. as a MLIR-based alternative to VPLAN.
224 ///
225 /// Lowering, unrolling, pipelining:
226 /// ================================
227 /// TODO: point to the proper places.
228 ///
229 /// Algorithm:
230 /// ==========
231 /// The algorithm proceeds in a few steps:
232 ///  1. defining super-vectorization patterns and matching them on the tree of
233 ///     AffineForOp. A super-vectorization pattern is defined as a recursive
234 ///     data structures that matches and captures nested, imperfectly-nested
235 ///     loops that have a. conformable loop annotations attached (e.g. parallel,
236 ///     reduction, vectorizable, ...) as well as b. all contiguous load/store
237 ///     operations along a specified minor dimension (not necessarily the
238 ///     fastest varying) ;
239 ///  2. analyzing those patterns for profitability (TODO: and
240 ///     interference);
241 ///  3. then, for each pattern in order:
242 ///    a. applying iterative rewriting of the loops and all their nested
243 ///       operations in topological order. Rewriting is implemented by
244 ///       coarsening the loops and converting operations and operands to their
245 ///       vector forms. Processing operations in topological order is relatively
246 ///       simple due to the structured nature of the control-flow
247 ///       representation. This order ensures that all the operands of a given
248 ///       operation have been vectorized before the operation itself in a single
249 ///       traversal, except for operands defined outside of the loop nest. The
250 ///       algorithm can convert the following operations to their vector form:
251 ///         * Affine load and store operations are converted to opaque vector
252 ///           transfer read and write operations.
253 ///         * Scalar constant operations/operands are converted to vector
254 ///           constant operations (splat).
255 ///         * Uniform operands (only operands defined outside of the loop nest,
256 ///           for now) are broadcasted to a vector.
257 ///           TODO: Support more uniform cases.
258 ///         * Affine for operations with 'iter_args' are vectorized by
259 ///           vectorizing their 'iter_args' operands and results.
260 ///           TODO: Support more complex loops with divergent lbs and/or ubs.
261 ///         * The remaining operations in the loop nest are vectorized by
262 ///           widening their scalar types to vector types.
263 ///    b. if everything under the root AffineForOp in the current pattern
264 ///       is vectorized properly, we commit that loop to the IR and remove the
265 ///       scalar loop. Otherwise, we discard the vectorized loop and keep the
266 ///       original scalar loop.
267 ///    c. vectorization is applied on the next pattern in the list. Because
268 ///       pattern interference avoidance is not yet implemented and that we do
269 ///       not support further vectorizing an already vector load we need to
270 ///       re-verify that the pattern is still vectorizable. This is expected to
271 ///       make cost models more difficult to write and is subject to improvement
272 ///       in the future.
273 ///
274 /// Choice of loop transformation to support the algorithm:
275 /// =======================================================
276 /// The choice of loop transformation to apply for coarsening vectorized loops
277 /// is still subject to exploratory tradeoffs. In particular, say we want to
278 /// vectorize by a factor 128, we want to transform the following input:
279 /// ```mlir
280 ///   affine.for %i = %M to %N {
281 ///     %a = affine.load %A[%i] : memref<?xf32>
282 ///   }
283 /// ```
284 ///
285 /// Traditionally, one would vectorize late (after scheduling, tiling,
286 /// memory promotion etc) say after stripmining (and potentially unrolling in
287 /// the case of LLVM's SLP vectorizer):
288 /// ```mlir
289 ///   affine.for %i = floor(%M, 128) to ceil(%N, 128) {
290 ///     affine.for %ii = max(%M, 128 * %i) to min(%N, 128*%i + 127) {
291 ///       %a = affine.load %A[%ii] : memref<?xf32>
292 ///     }
293 ///   }
294 /// ```
295 ///
296 /// Instead, we seek to vectorize early and freeze vector types before
297 /// scheduling, so we want to generate a pattern that resembles:
298 /// ```mlir
299 ///   affine.for %i = ? to ? step ? {
300 ///     %v_a = vector.transfer_read %A[%i] : memref<?xf32>, vector<128xf32>
301 ///   }
302 /// ```
303 ///
304 /// i. simply dividing the lower / upper bounds by 128 creates issues
305 ///    when representing expressions such as ii + 1 because now we only
306 ///    have access to original values that have been divided. Additional
307 ///    information is needed to specify accesses at below-128 granularity;
308 /// ii. another alternative is to coarsen the loop step but this may have
309 ///    consequences on dependence analysis and fusability of loops: fusable
310 ///    loops probably need to have the same step (because we don't want to
311 ///    stripmine/unroll to enable fusion).
312 /// As a consequence, we choose to represent the coarsening using the loop
313 /// step for now and reevaluate in the future. Note that we can renormalize
314 /// loop steps later if/when we have evidence that they are problematic.
315 ///
316 /// For the simple strawman example above, vectorizing for a 1-D vector
317 /// abstraction of size 128 returns code similar to:
318 /// ```mlir
319 ///   affine.for %i = %M to %N step 128 {
320 ///     %v_a = vector.transfer_read %A[%i] : memref<?xf32>, vector<128xf32>
321 ///   }
322 /// ```
323 ///
324 /// Unsupported cases, extensions, and work in progress (help welcome :-) ):
325 /// ========================================================================
326 ///   1. lowering to concrete vector types for various HW;
327 ///   2. reduction support;
328 ///   3. non-effecting padding during vector.transfer_read and filter during
329 ///      vector.transfer_write;
330 ///   4. misalignment support vector.transfer_read / vector.transfer_write
331 ///      (hopefully without read-modify-writes);
332 ///   5. control-flow support;
333 ///   6. cost-models, heuristics and search;
334 ///   7. Op implementation, extensions and implication on memref views;
335 ///   8. many TODOs left around.
336 ///
337 /// Examples:
338 /// =========
339 /// Consider the following Function:
340 /// ```mlir
341 /// func @vector_add_2d(%M : index, %N : index) -> f32 {
342 ///   %A = alloc (%M, %N) : memref<?x?xf32, 0>
343 ///   %B = alloc (%M, %N) : memref<?x?xf32, 0>
344 ///   %C = alloc (%M, %N) : memref<?x?xf32, 0>
345 ///   %f1 = constant 1.0 : f32
346 ///   %f2 = constant 2.0 : f32
347 ///   affine.for %i0 = 0 to %M {
348 ///     affine.for %i1 = 0 to %N {
349 ///       // non-scoped %f1
350 ///       affine.store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
351 ///     }
352 ///   }
353 ///   affine.for %i2 = 0 to %M {
354 ///     affine.for %i3 = 0 to %N {
355 ///       // non-scoped %f2
356 ///       affine.store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
357 ///     }
358 ///   }
359 ///   affine.for %i4 = 0 to %M {
360 ///     affine.for %i5 = 0 to %N {
361 ///       %a5 = affine.load %A[%i4, %i5] : memref<?x?xf32, 0>
362 ///       %b5 = affine.load %B[%i4, %i5] : memref<?x?xf32, 0>
363 ///       %s5 = addf %a5, %b5 : f32
364 ///       // non-scoped %f1
365 ///       %s6 = addf %s5, %f1 : f32
366 ///       // non-scoped %f2
367 ///       %s7 = addf %s5, %f2 : f32
368 ///       // diamond dependency.
369 ///       %s8 = addf %s7, %s6 : f32
370 ///       affine.store %s8, %C[%i4, %i5] : memref<?x?xf32, 0>
371 ///     }
372 ///   }
373 ///   %c7 = constant 7 : index
374 ///   %c42 = constant 42 : index
375 ///   %res = load %C[%c7, %c42] : memref<?x?xf32, 0>
376 ///   return %res : f32
377 /// }
378 /// ```
379 ///
380 /// The -affine-vectorize pass with the following arguments:
381 /// ```
382 /// -affine-vectorize="virtual-vector-size=256 test-fastest-varying=0"
383 /// ```
384 ///
385 /// produces this standard innermost-loop vectorized code:
386 /// ```mlir
387 /// func @vector_add_2d(%arg0 : index, %arg1 : index) -> f32 {
388 ///   %0 = alloc(%arg0, %arg1) : memref<?x?xf32>
389 ///   %1 = alloc(%arg0, %arg1) : memref<?x?xf32>
390 ///   %2 = alloc(%arg0, %arg1) : memref<?x?xf32>
391 ///   %cst = constant 1.0 : f32
392 ///   %cst_0 = constant 2.0 : f32
393 ///   affine.for %i0 = 0 to %arg0 {
394 ///     affine.for %i1 = 0 to %arg1 step 256 {
395 ///       %cst_1 = constant dense<vector<256xf32>, 1.0> :
396 ///                vector<256xf32>
397 ///       vector.transfer_write %cst_1, %0[%i0, %i1] :
398 ///                vector<256xf32>, memref<?x?xf32>
399 ///     }
400 ///   }
401 ///   affine.for %i2 = 0 to %arg0 {
402 ///     affine.for %i3 = 0 to %arg1 step 256 {
403 ///       %cst_2 = constant dense<vector<256xf32>, 2.0> :
404 ///                vector<256xf32>
405 ///       vector.transfer_write %cst_2, %1[%i2, %i3] :
406 ///                vector<256xf32>, memref<?x?xf32>
407 ///     }
408 ///   }
409 ///   affine.for %i4 = 0 to %arg0 {
410 ///     affine.for %i5 = 0 to %arg1 step 256 {
411 ///       %3 = vector.transfer_read %0[%i4, %i5] :
412 ///            memref<?x?xf32>, vector<256xf32>
413 ///       %4 = vector.transfer_read %1[%i4, %i5] :
414 ///            memref<?x?xf32>, vector<256xf32>
415 ///       %5 = addf %3, %4 : vector<256xf32>
416 ///       %cst_3 = constant dense<vector<256xf32>, 1.0> :
417 ///                vector<256xf32>
418 ///       %6 = addf %5, %cst_3 : vector<256xf32>
419 ///       %cst_4 = constant dense<vector<256xf32>, 2.0> :
420 ///                vector<256xf32>
421 ///       %7 = addf %5, %cst_4 : vector<256xf32>
422 ///       %8 = addf %7, %6 : vector<256xf32>
423 ///       vector.transfer_write %8, %2[%i4, %i5] :
424 ///                vector<256xf32>, memref<?x?xf32>
425 ///     }
426 ///   }
427 ///   %c7 = constant 7 : index
428 ///   %c42 = constant 42 : index
429 ///   %9 = load %2[%c7, %c42] : memref<?x?xf32>
430 ///   return %9 : f32
431 /// }
432 /// ```
433 ///
434 /// The -affine-vectorize pass with the following arguments:
435 /// ```
436 /// -affine-vectorize="virtual-vector-size=32,256 test-fastest-varying=1,0"
437 /// ```
438 ///
439 /// produces this more interesting mixed outer-innermost-loop vectorized code:
440 /// ```mlir
441 /// func @vector_add_2d(%arg0 : index, %arg1 : index) -> f32 {
442 ///   %0 = alloc(%arg0, %arg1) : memref<?x?xf32>
443 ///   %1 = alloc(%arg0, %arg1) : memref<?x?xf32>
444 ///   %2 = alloc(%arg0, %arg1) : memref<?x?xf32>
445 ///   %cst = constant 1.0 : f32
446 ///   %cst_0 = constant 2.0 : f32
447 ///   affine.for %i0 = 0 to %arg0 step 32 {
448 ///     affine.for %i1 = 0 to %arg1 step 256 {
449 ///       %cst_1 = constant dense<vector<32x256xf32>, 1.0> :
450 ///                vector<32x256xf32>
451 ///       vector.transfer_write %cst_1, %0[%i0, %i1] :
452 ///                vector<32x256xf32>, memref<?x?xf32>
453 ///     }
454 ///   }
455 ///   affine.for %i2 = 0 to %arg0 step 32 {
456 ///     affine.for %i3 = 0 to %arg1 step 256 {
457 ///       %cst_2 = constant dense<vector<32x256xf32>, 2.0> :
458 ///                vector<32x256xf32>
459 ///       vector.transfer_write %cst_2, %1[%i2, %i3] :
460 ///                vector<32x256xf32>, memref<?x?xf32>
461 ///     }
462 ///   }
463 ///   affine.for %i4 = 0 to %arg0 step 32 {
464 ///     affine.for %i5 = 0 to %arg1 step 256 {
465 ///       %3 = vector.transfer_read %0[%i4, %i5] :
466 ///                memref<?x?xf32> vector<32x256xf32>
467 ///       %4 = vector.transfer_read %1[%i4, %i5] :
468 ///                memref<?x?xf32>, vector<32x256xf32>
469 ///       %5 = addf %3, %4 : vector<32x256xf32>
470 ///       %cst_3 = constant dense<vector<32x256xf32>, 1.0> :
471 ///                vector<32x256xf32>
472 ///       %6 = addf %5, %cst_3 : vector<32x256xf32>
473 ///       %cst_4 = constant dense<vector<32x256xf32>, 2.0> :
474 ///                vector<32x256xf32>
475 ///       %7 = addf %5, %cst_4 : vector<32x256xf32>
476 ///       %8 = addf %7, %6 : vector<32x256xf32>
477 ///       vector.transfer_write %8, %2[%i4, %i5] :
478 ///                vector<32x256xf32>, memref<?x?xf32>
479 ///     }
480 ///   }
481 ///   %c7 = constant 7 : index
482 ///   %c42 = constant 42 : index
483 ///   %9 = load %2[%c7, %c42] : memref<?x?xf32>
484 ///   return %9 : f32
485 /// }
486 /// ```
487 ///
488 /// Of course, much more intricate n-D imperfectly-nested patterns can be
489 /// vectorized too and specified in a fully declarative fashion.
490 
491 #define DEBUG_TYPE "early-vect"
492 
493 using llvm::dbgs;
494 
495 /// Forward declaration.
496 static FilterFunctionType
497 isVectorizableLoopPtrFactory(const DenseSet<Operation *> &parallelLoops,
498                              int fastestVaryingMemRefDimension);
499 
500 /// Creates a vectorization pattern from the command line arguments.
501 /// Up to 3-D patterns are supported.
502 /// If the command line argument requests a pattern of higher order, returns an
503 /// empty pattern list which will conservatively result in no vectorization.
504 static Optional<NestedPattern>
505 makePattern(const DenseSet<Operation *> &parallelLoops, int vectorRank,
506             ArrayRef<int64_t> fastestVaryingPattern) {
507   using matcher::For;
508   int64_t d0 = fastestVaryingPattern.empty() ? -1 : fastestVaryingPattern[0];
509   int64_t d1 = fastestVaryingPattern.size() < 2 ? -1 : fastestVaryingPattern[1];
510   int64_t d2 = fastestVaryingPattern.size() < 3 ? -1 : fastestVaryingPattern[2];
511   switch (vectorRank) {
512   case 1:
513     return For(isVectorizableLoopPtrFactory(parallelLoops, d0));
514   case 2:
515     return For(isVectorizableLoopPtrFactory(parallelLoops, d0),
516                For(isVectorizableLoopPtrFactory(parallelLoops, d1)));
517   case 3:
518     return For(isVectorizableLoopPtrFactory(parallelLoops, d0),
519                For(isVectorizableLoopPtrFactory(parallelLoops, d1),
520                    For(isVectorizableLoopPtrFactory(parallelLoops, d2))));
521   default: {
522     return llvm::None;
523   }
524   }
525 }
526 
527 static NestedPattern &vectorTransferPattern() {
528   static auto pattern = matcher::Op([](Operation &op) {
529     return isa<vector::TransferReadOp, vector::TransferWriteOp>(op);
530   });
531   return pattern;
532 }
533 
534 namespace {
535 
536 /// Base state for the vectorize pass.
537 /// Command line arguments are preempted by non-empty pass arguments.
538 struct Vectorize : public AffineVectorizeBase<Vectorize> {
539   Vectorize() = default;
540   Vectorize(ArrayRef<int64_t> virtualVectorSize);
541   void runOnFunction() override;
542 };
543 
544 } // end anonymous namespace
545 
546 Vectorize::Vectorize(ArrayRef<int64_t> virtualVectorSize) {
547   vectorSizes = virtualVectorSize;
548 }
549 
550 static void vectorizeLoopIfProfitable(Operation *loop, unsigned depthInPattern,
551                                       unsigned patternDepth,
552                                       VectorizationStrategy *strategy) {
553   assert(patternDepth > depthInPattern &&
554          "patternDepth is greater than depthInPattern");
555   if (patternDepth - depthInPattern > strategy->vectorSizes.size()) {
556     // Don't vectorize this loop
557     return;
558   }
559   strategy->loopToVectorDim[loop] =
560       strategy->vectorSizes.size() - (patternDepth - depthInPattern);
561 }
562 
563 /// Implements a simple strawman strategy for vectorization.
564 /// Given a matched pattern `matches` of depth `patternDepth`, this strategy
565 /// greedily assigns the fastest varying dimension ** of the vector ** to the
566 /// innermost loop in the pattern.
567 /// When coupled with a pattern that looks for the fastest varying dimension in
568 /// load/store MemRefs, this creates a generic vectorization strategy that works
569 /// for any loop in a hierarchy (outermost, innermost or intermediate).
570 ///
571 /// TODO: In the future we should additionally increase the power of the
572 /// profitability analysis along 3 directions:
573 ///   1. account for loop extents (both static and parametric + annotations);
574 ///   2. account for data layout permutations;
575 ///   3. account for impact of vectorization on maximal loop fusion.
576 /// Then we can quantify the above to build a cost model and search over
577 /// strategies.
578 static LogicalResult analyzeProfitability(ArrayRef<NestedMatch> matches,
579                                           unsigned depthInPattern,
580                                           unsigned patternDepth,
581                                           VectorizationStrategy *strategy) {
582   for (auto m : matches) {
583     if (failed(analyzeProfitability(m.getMatchedChildren(), depthInPattern + 1,
584                                     patternDepth, strategy))) {
585       return failure();
586     }
587     vectorizeLoopIfProfitable(m.getMatchedOperation(), depthInPattern,
588                               patternDepth, strategy);
589   }
590   return success();
591 }
592 
593 ///// end TODO: Hoist to a VectorizationStrategy.cpp when appropriate /////
594 
595 namespace {
596 
597 struct VectorizationState {
598 
599   VectorizationState(MLIRContext *context) : builder(context) {}
600 
601   /// Registers the vector replacement of a scalar operation and its result
602   /// values. Both operations must have the same number of results.
603   ///
604   /// This utility is used to register the replacement for the vast majority of
605   /// the vectorized operations.
606   ///
607   /// Example:
608   ///   * 'replaced': %0 = addf %1, %2 : f32
609   ///   * 'replacement': %0 = addf %1, %2 : vector<128xf32>
610   void registerOpVectorReplacement(Operation *replaced, Operation *replacement);
611 
612   /// Registers the vector replacement of a scalar value. The replacement
613   /// operation should have a single result, which replaces the scalar value.
614   ///
615   /// This utility is used to register the vector replacement of block arguments
616   /// and operation results which are not directly vectorized (i.e., their
617   /// scalar version still exists after vectorization), like uniforms.
618   ///
619   /// Example:
620   ///   * 'replaced': block argument or operation outside of the vectorized
621   ///     loop.
622   ///   * 'replacement': %0 = vector.broadcast %1 : f32 to vector<128xf32>
623   void registerValueVectorReplacement(Value replaced, Operation *replacement);
624 
625   /// Registers the vector replacement of a block argument (e.g., iter_args).
626   ///
627   /// Example:
628   ///   * 'replaced': 'iter_arg' block argument.
629   ///   * 'replacement': vectorized 'iter_arg' block argument.
630   void registerBlockArgVectorReplacement(BlockArgument replaced,
631                                          BlockArgument replacement);
632 
633   /// Registers the scalar replacement of a scalar value. 'replacement' must be
634   /// scalar. Both values must be block arguments. Operation results should be
635   /// replaced using the 'registerOp*' utilitites.
636   ///
637   /// This utility is used to register the replacement of block arguments
638   /// that are within the loop to be vectorized and will continue being scalar
639   /// within the vector loop.
640   ///
641   /// Example:
642   ///   * 'replaced': induction variable of a loop to be vectorized.
643   ///   * 'replacement': new induction variable in the new vector loop.
644   void registerValueScalarReplacement(BlockArgument replaced,
645                                       BlockArgument replacement);
646 
647   /// Returns in 'replacedVals' the scalar replacement for values in
648   /// 'inputVals'.
649   void getScalarValueReplacementsFor(ValueRange inputVals,
650                                      SmallVectorImpl<Value> &replacedVals);
651 
652   /// Erases the scalar loop nest after its successful vectorization.
653   void finishVectorizationPattern(AffineForOp rootLoop);
654 
655   // Used to build and insert all the new operations created. The insertion
656   // point is preserved and updated along the vectorization process.
657   OpBuilder builder;
658 
659   // Maps input scalar operations to their vector counterparts.
660   DenseMap<Operation *, Operation *> opVectorReplacement;
661   // Maps input scalar values to their vector counterparts.
662   BlockAndValueMapping valueVectorReplacement;
663   // Maps input scalar values to their new scalar counterparts in the vector
664   // loop nest.
665   BlockAndValueMapping valueScalarReplacement;
666 
667   // Maps the newly created vector loops to their vector dimension.
668   DenseMap<Operation *, unsigned> vecLoopToVecDim;
669 
670   // The strategy drives which loop to vectorize by which amount.
671   const VectorizationStrategy *strategy;
672 
673 private:
674   /// Internal implementation to map input scalar values to new vector or scalar
675   /// values.
676   void registerValueVectorReplacementImpl(Value replaced, Value replacement);
677   void registerValueScalarReplacementImpl(Value replaced, Value replacement);
678 };
679 
680 } // end namespace
681 
682 /// Registers the vector replacement of a scalar operation and its result
683 /// values. Both operations must have the same number of results.
684 ///
685 /// This utility is used to register the replacement for the vast majority of
686 /// the vectorized operations.
687 ///
688 /// Example:
689 ///   * 'replaced': %0 = addf %1, %2 : f32
690 ///   * 'replacement': %0 = addf %1, %2 : vector<128xf32>
691 void VectorizationState::registerOpVectorReplacement(Operation *replaced,
692                                                      Operation *replacement) {
693   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ commit vectorized op:\n");
694   LLVM_DEBUG(dbgs() << *replaced << "\n");
695   LLVM_DEBUG(dbgs() << "into\n");
696   LLVM_DEBUG(dbgs() << *replacement << "\n");
697 
698   assert(replaced->getNumResults() == replacement->getNumResults() &&
699          "Unexpected replaced and replacement results");
700   assert(opVectorReplacement.count(replaced) == 0 && "already registered");
701   opVectorReplacement[replaced] = replacement;
702 
703   for (auto resultTuple :
704        llvm::zip(replaced->getResults(), replacement->getResults()))
705     registerValueVectorReplacementImpl(std::get<0>(resultTuple),
706                                        std::get<1>(resultTuple));
707 }
708 
709 /// Registers the vector replacement of a scalar value. The replacement
710 /// operation should have a single result, which replaces the scalar value.
711 ///
712 /// This utility is used to register the vector replacement of block arguments
713 /// and operation results which are not directly vectorized (i.e., their
714 /// scalar version still exists after vectorization), like uniforms.
715 ///
716 /// Example:
717 ///   * 'replaced': block argument or operation outside of the vectorized loop.
718 ///   * 'replacement': %0 = vector.broadcast %1 : f32 to vector<128xf32>
719 void VectorizationState::registerValueVectorReplacement(
720     Value replaced, Operation *replacement) {
721   assert(replacement->getNumResults() == 1 &&
722          "Expected single-result replacement");
723   if (Operation *defOp = replaced.getDefiningOp())
724     registerOpVectorReplacement(defOp, replacement);
725   else
726     registerValueVectorReplacementImpl(replaced, replacement->getResult(0));
727 }
728 
729 /// Registers the vector replacement of a block argument (e.g., iter_args).
730 ///
731 /// Example:
732 ///   * 'replaced': 'iter_arg' block argument.
733 ///   * 'replacement': vectorized 'iter_arg' block argument.
734 void VectorizationState::registerBlockArgVectorReplacement(
735     BlockArgument replaced, BlockArgument replacement) {
736   registerValueVectorReplacementImpl(replaced, replacement);
737 }
738 
739 void VectorizationState::registerValueVectorReplacementImpl(Value replaced,
740                                                             Value replacement) {
741   assert(!valueVectorReplacement.contains(replaced) &&
742          "Vector replacement already registered");
743   assert(replacement.getType().isa<VectorType>() &&
744          "Expected vector type in vector replacement");
745   valueVectorReplacement.map(replaced, replacement);
746 }
747 
748 /// Registers the scalar replacement of a scalar value. 'replacement' must be
749 /// scalar. Both values must be block arguments. Operation results should be
750 /// replaced using the 'registerOp*' utilitites.
751 ///
752 /// This utility is used to register the replacement of block arguments
753 /// that are within the loop to be vectorized and will continue being scalar
754 /// within the vector loop.
755 ///
756 /// Example:
757 ///   * 'replaced': induction variable of a loop to be vectorized.
758 ///   * 'replacement': new induction variable in the new vector loop.
759 void VectorizationState::registerValueScalarReplacement(
760     BlockArgument replaced, BlockArgument replacement) {
761   registerValueScalarReplacementImpl(replaced, replacement);
762 }
763 
764 void VectorizationState::registerValueScalarReplacementImpl(Value replaced,
765                                                             Value replacement) {
766   assert(!valueScalarReplacement.contains(replaced) &&
767          "Scalar value replacement already registered");
768   assert(!replacement.getType().isa<VectorType>() &&
769          "Expected scalar type in scalar replacement");
770   valueScalarReplacement.map(replaced, replacement);
771 }
772 
773 /// Returns in 'replacedVals' the scalar replacement for values in 'inputVals'.
774 void VectorizationState::getScalarValueReplacementsFor(
775     ValueRange inputVals, SmallVectorImpl<Value> &replacedVals) {
776   for (Value inputVal : inputVals)
777     replacedVals.push_back(valueScalarReplacement.lookupOrDefault(inputVal));
778 }
779 
780 /// Erases a loop nest, including all its nested operations.
781 static void eraseLoopNest(AffineForOp forOp) {
782   LLVM_DEBUG(dbgs() << "[early-vect]+++++ erasing:\n" << forOp << "\n");
783   forOp.erase();
784 }
785 
786 /// Erases the scalar loop nest after its successful vectorization.
787 void VectorizationState::finishVectorizationPattern(AffineForOp rootLoop) {
788   LLVM_DEBUG(dbgs() << "\n[early-vect] Finalizing vectorization\n");
789   eraseLoopNest(rootLoop);
790 }
791 
792 // Apply 'map' with 'mapOperands' returning resulting values in 'results'.
793 static void computeMemoryOpIndices(Operation *op, AffineMap map,
794                                    ValueRange mapOperands,
795                                    VectorizationState &state,
796                                    SmallVectorImpl<Value> &results) {
797   for (auto resultExpr : map.getResults()) {
798     auto singleResMap =
799         AffineMap::get(map.getNumDims(), map.getNumSymbols(), resultExpr);
800     auto afOp = state.builder.create<AffineApplyOp>(op->getLoc(), singleResMap,
801                                                     mapOperands);
802     results.push_back(afOp);
803   }
804 }
805 
806 /// Returns a FilterFunctionType that can be used in NestedPattern to match a
807 /// loop whose underlying load/store accesses are either invariant or all
808 // varying along the `fastestVaryingMemRefDimension`.
809 static FilterFunctionType
810 isVectorizableLoopPtrFactory(const DenseSet<Operation *> &parallelLoops,
811                              int fastestVaryingMemRefDimension) {
812   return [&parallelLoops, fastestVaryingMemRefDimension](Operation &forOp) {
813     auto loop = cast<AffineForOp>(forOp);
814     auto parallelIt = parallelLoops.find(loop);
815     if (parallelIt == parallelLoops.end())
816       return false;
817     int memRefDim = -1;
818     auto vectorizableBody =
819         isVectorizableLoopBody(loop, &memRefDim, vectorTransferPattern());
820     if (!vectorizableBody)
821       return false;
822     return memRefDim == -1 || fastestVaryingMemRefDimension == -1 ||
823            memRefDim == fastestVaryingMemRefDimension;
824   };
825 }
826 
827 /// Returns the vector type resulting from applying the provided vectorization
828 /// strategy on the scalar type.
829 static VectorType getVectorType(Type scalarTy,
830                                 const VectorizationStrategy *strategy) {
831   assert(!scalarTy.isa<VectorType>() && "Expected scalar type");
832   return VectorType::get(strategy->vectorSizes, scalarTy);
833 }
834 
835 /// Tries to transform a scalar constant into a vector constant. Returns the
836 /// vector constant if the scalar type is valid vector element type. Returns
837 /// nullptr, otherwise.
838 static ConstantOp vectorizeConstant(ConstantOp constOp,
839                                     VectorizationState &state) {
840   Type scalarTy = constOp.getType();
841   if (!VectorType::isValidElementType(scalarTy))
842     return nullptr;
843 
844   auto vecTy = getVectorType(scalarTy, state.strategy);
845   auto vecAttr = DenseElementsAttr::get(vecTy, constOp.getValue());
846   auto newConstOp = state.builder.create<ConstantOp>(constOp.getLoc(), vecAttr);
847 
848   // Register vector replacement for future uses in the scope.
849   state.registerOpVectorReplacement(constOp, newConstOp);
850   return newConstOp;
851 }
852 
853 /// Returns true if the provided value is vector uniform given the vectorization
854 /// strategy.
855 // TODO: For now, only values that are invariants to all the loops in the
856 // vectorization strategy are considered vector uniforms.
857 static bool isUniformDefinition(Value value,
858                                 const VectorizationStrategy *strategy) {
859   for (auto loopToDim : strategy->loopToVectorDim) {
860     auto loop = cast<AffineForOp>(loopToDim.first);
861     if (!loop.isDefinedOutsideOfLoop(value))
862       return false;
863   }
864   return true;
865 }
866 
867 /// Generates a broadcast op for the provided uniform value using the
868 /// vectorization strategy in 'state'.
869 static Operation *vectorizeUniform(Value uniformVal,
870                                    VectorizationState &state) {
871   OpBuilder::InsertionGuard guard(state.builder);
872   state.builder.setInsertionPointAfterValue(uniformVal);
873 
874   auto vectorTy = getVectorType(uniformVal.getType(), state.strategy);
875   auto bcastOp = state.builder.create<BroadcastOp>(uniformVal.getLoc(),
876                                                    vectorTy, uniformVal);
877   state.registerValueVectorReplacement(uniformVal, bcastOp);
878   return bcastOp;
879 }
880 
881 /// Tries to vectorize a given `operand` by applying the following logic:
882 /// 1. if the defining operation has been already vectorized, `operand` is
883 ///    already in the proper vector form;
884 /// 2. if the `operand` is a constant, returns the vectorized form of the
885 ///    constant;
886 /// 3. if the `operand` is uniform, returns a vector broadcast of the `op`;
887 /// 4. otherwise, the vectorization of `operand` is not supported.
888 /// Newly created vector operations are registered in `state` as replacement
889 /// for their scalar counterparts.
890 /// In particular this logic captures some of the use cases where definitions
891 /// that are not scoped under the current pattern are needed to vectorize.
892 /// One such example is top level function constants that need to be splatted.
893 ///
894 /// Returns an operand that has been vectorized to match `state`'s strategy if
895 /// vectorization is possible with the above logic. Returns nullptr otherwise.
896 ///
897 /// TODO: handle more complex cases.
898 static Value vectorizeOperand(Value operand, VectorizationState &state) {
899   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorize operand: " << operand);
900   // If this value is already vectorized, we are done.
901   if (Value vecRepl = state.valueVectorReplacement.lookupOrNull(operand)) {
902     LLVM_DEBUG(dbgs() << " -> already vectorized: " << vecRepl);
903     return vecRepl;
904   }
905 
906   // An vector operand that is not in the replacement map should never reach
907   // this point. Reaching this point could mean that the code was already
908   // vectorized and we shouldn't try to vectorize already vectorized code.
909   assert(!operand.getType().isa<VectorType>() &&
910          "Vector op not found in replacement map");
911 
912   // Vectorize constant.
913   if (auto constOp = operand.getDefiningOp<ConstantOp>()) {
914     ConstantOp vecConstant = vectorizeConstant(constOp, state);
915     LLVM_DEBUG(dbgs() << "-> constant: " << vecConstant);
916     return vecConstant.getResult();
917   }
918 
919   // Vectorize uniform values.
920   if (isUniformDefinition(operand, state.strategy)) {
921     Operation *vecUniform = vectorizeUniform(operand, state);
922     LLVM_DEBUG(dbgs() << "-> uniform: " << *vecUniform);
923     return vecUniform->getResult(0);
924   }
925 
926   // Check for unsupported block argument scenarios. A supported block argument
927   // should have been vectorized already.
928   if (!operand.getDefiningOp())
929     LLVM_DEBUG(dbgs() << "-> unsupported block argument\n");
930   else
931     // Generic unsupported case.
932     LLVM_DEBUG(dbgs() << "-> non-vectorizable\n");
933 
934   return nullptr;
935 }
936 
937 /// Vectorizes an affine load with the vectorization strategy in 'state' by
938 /// generating a 'vector.transfer_read' op with the proper permutation map
939 /// inferred from the indices of the load. The new 'vector.transfer_read' is
940 /// registered as replacement of the scalar load. Returns the newly created
941 /// 'vector.transfer_read' if vectorization was successful. Returns nullptr,
942 /// otherwise.
943 static Operation *vectorizeAffineLoad(AffineLoadOp loadOp,
944                                       VectorizationState &state) {
945   MemRefType memRefType = loadOp.getMemRefType();
946   Type elementType = memRefType.getElementType();
947   auto vectorType = VectorType::get(state.strategy->vectorSizes, elementType);
948 
949   // Replace map operands with operands from the vector loop nest.
950   SmallVector<Value, 8> mapOperands;
951   state.getScalarValueReplacementsFor(loadOp.getMapOperands(), mapOperands);
952 
953   // Compute indices for the transfer op. AffineApplyOp's may be generated.
954   SmallVector<Value, 8> indices;
955   indices.reserve(memRefType.getRank());
956   if (loadOp.getAffineMap() !=
957       state.builder.getMultiDimIdentityMap(memRefType.getRank()))
958     computeMemoryOpIndices(loadOp, loadOp.getAffineMap(), mapOperands, state,
959                            indices);
960   else
961     indices.append(mapOperands.begin(), mapOperands.end());
962 
963   // Compute permutation map using the information of new vector loops.
964   auto permutationMap = makePermutationMap(state.builder.getInsertionBlock(),
965                                            indices, state.vecLoopToVecDim);
966   if (!permutationMap) {
967     LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ can't compute permutationMap\n");
968     return nullptr;
969   }
970   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
971   LLVM_DEBUG(permutationMap.print(dbgs()));
972 
973   auto transfer = state.builder.create<vector::TransferReadOp>(
974       loadOp.getLoc(), vectorType, loadOp.getMemRef(), indices, permutationMap);
975 
976   // Register replacement for future uses in the scope.
977   state.registerOpVectorReplacement(loadOp, transfer);
978   return transfer;
979 }
980 
981 /// Vectorizes an affine store with the vectorization strategy in 'state' by
982 /// generating a 'vector.transfer_write' op with the proper permutation map
983 /// inferred from the indices of the store. The new 'vector.transfer_store' is
984 /// registered as replacement of the scalar load. Returns the newly created
985 /// 'vector.transfer_write' if vectorization was successful. Returns nullptr,
986 /// otherwise.
987 static Operation *vectorizeAffineStore(AffineStoreOp storeOp,
988                                        VectorizationState &state) {
989   MemRefType memRefType = storeOp.getMemRefType();
990   Value vectorValue = vectorizeOperand(storeOp.getValueToStore(), state);
991   if (!vectorValue)
992     return nullptr;
993 
994   // Replace map operands with operands from the vector loop nest.
995   SmallVector<Value, 8> mapOperands;
996   state.getScalarValueReplacementsFor(storeOp.getMapOperands(), mapOperands);
997 
998   // Compute indices for the transfer op. AffineApplyOp's may be generated.
999   SmallVector<Value, 8> indices;
1000   indices.reserve(memRefType.getRank());
1001   if (storeOp.getAffineMap() !=
1002       state.builder.getMultiDimIdentityMap(memRefType.getRank()))
1003     computeMemoryOpIndices(storeOp, storeOp.getAffineMap(), mapOperands, state,
1004                            indices);
1005   else
1006     indices.append(mapOperands.begin(), mapOperands.end());
1007 
1008   // Compute permutation map using the information of new vector loops.
1009   auto permutationMap = makePermutationMap(state.builder.getInsertionBlock(),
1010                                            indices, state.vecLoopToVecDim);
1011   if (!permutationMap)
1012     return nullptr;
1013   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
1014   LLVM_DEBUG(permutationMap.print(dbgs()));
1015 
1016   auto transfer = state.builder.create<vector::TransferWriteOp>(
1017       storeOp.getLoc(), vectorValue, storeOp.getMemRef(), indices,
1018       permutationMap);
1019   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorized store: " << transfer);
1020 
1021   // Register replacement for future uses in the scope.
1022   state.registerOpVectorReplacement(storeOp, transfer);
1023   return transfer;
1024 }
1025 
1026 /// Vectorizes a loop with the vectorization strategy in 'state'. A new loop is
1027 /// created and registered as replacement for the scalar loop. The builder's
1028 /// insertion point is set to the new loop's body so that subsequent vectorized
1029 /// operations are inserted into the new loop. If the loop is a vector
1030 /// dimension, the step of the newly created loop will reflect the vectorization
1031 /// factor used to vectorized that dimension.
1032 // TODO: Add support for 'iter_args'. Related operands and results will be
1033 // vectorized at this point.
1034 static Operation *vectorizeAffineForOp(AffineForOp forOp,
1035                                        VectorizationState &state) {
1036   const VectorizationStrategy &strategy = *state.strategy;
1037   auto loopToVecDimIt = strategy.loopToVectorDim.find(forOp);
1038   bool isLoopVecDim = loopToVecDimIt != strategy.loopToVectorDim.end();
1039 
1040   // We only support 'iter_args' when the loop is not one of the vector
1041   // dimensions.
1042   // TODO: Support vector dimension loops. They require special handling:
1043   // generate horizontal reduction, last-value extraction, etc.
1044   if (forOp.getNumIterOperands() > 0 && isLoopVecDim)
1045     return nullptr;
1046 
1047   // If we are vectorizing a vector dimension, compute a new step for the new
1048   // vectorized loop using the vectorization factor for the vector dimension.
1049   // Otherwise, propagate the step of the scalar loop.
1050   unsigned newStep;
1051   if (isLoopVecDim) {
1052     unsigned vectorDim = loopToVecDimIt->second;
1053     assert(vectorDim < strategy.vectorSizes.size() && "vector dim overflow");
1054     int64_t forOpVecFactor = strategy.vectorSizes[vectorDim];
1055     newStep = forOp.getStep() * forOpVecFactor;
1056   } else {
1057     newStep = forOp.getStep();
1058   }
1059 
1060   // Vectorize 'iter_args'.
1061   SmallVector<Value, 8> vecIterOperands;
1062   for (auto operand : forOp.getIterOperands())
1063     vecIterOperands.push_back(vectorizeOperand(operand, state));
1064 
1065   auto vecForOp = state.builder.create<AffineForOp>(
1066       forOp.getLoc(), forOp.getLowerBoundOperands(), forOp.getLowerBoundMap(),
1067       forOp.getUpperBoundOperands(), forOp.getUpperBoundMap(), newStep,
1068       vecIterOperands,
1069       /*bodyBuilder=*/[](OpBuilder &, Location, Value, ValueRange) {
1070         // Make sure we don't create a default terminator in the loop body as
1071         // the proper terminator will be added during vectorization.
1072         return;
1073       });
1074 
1075   // Register loop-related replacements:
1076   //   1) The new vectorized loop is registered as vector replacement of the
1077   //      scalar loop.
1078   //      TODO: Support reductions along the vector dimension.
1079   //   2) The new iv of the vectorized loop is registered as scalar replacement
1080   //      since a scalar copy of the iv will prevail in the vectorized loop.
1081   //      TODO: A vector replacement will also be added in the future when
1082   //      vectorization of linear ops is supported.
1083   //   3) The new 'iter_args' region arguments are registered as vector
1084   //      replacements since they have been vectorized.
1085   state.registerOpVectorReplacement(forOp, vecForOp);
1086   state.registerValueScalarReplacement(forOp.getInductionVar(),
1087                                        vecForOp.getInductionVar());
1088   for (auto iterTuple :
1089        llvm ::zip(forOp.getRegionIterArgs(), vecForOp.getRegionIterArgs()))
1090     state.registerBlockArgVectorReplacement(std::get<0>(iterTuple),
1091                                             std::get<1>(iterTuple));
1092 
1093   if (isLoopVecDim)
1094     state.vecLoopToVecDim[vecForOp] = loopToVecDimIt->second;
1095 
1096   // Change insertion point so that upcoming vectorized instructions are
1097   // inserted into the vectorized loop's body.
1098   state.builder.setInsertionPointToStart(vecForOp.getBody());
1099   return vecForOp;
1100 }
1101 
1102 /// Vectorizes arbitrary operation by plain widening. We apply generic type
1103 /// widening of all its results and retrieve the vector counterparts for all its
1104 /// operands.
1105 static Operation *widenOp(Operation *op, VectorizationState &state) {
1106   SmallVector<Type, 8> vectorTypes;
1107   for (Value result : op->getResults())
1108     vectorTypes.push_back(
1109         VectorType::get(state.strategy->vectorSizes, result.getType()));
1110 
1111   SmallVector<Value, 8> vectorOperands;
1112   for (Value operand : op->getOperands()) {
1113     Value vecOperand = vectorizeOperand(operand, state);
1114     if (!vecOperand) {
1115       LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ an operand failed vectorize\n");
1116       return nullptr;
1117     }
1118     vectorOperands.push_back(vecOperand);
1119   }
1120 
1121   // Create a clone of the op with the proper operands and return types.
1122   // TODO: The following assumes there is always an op with a fixed
1123   // name that works both in scalar mode and vector mode.
1124   // TODO: Is it worth considering an Operation.clone operation which
1125   // changes the type so we can promote an Operation with less boilerplate?
1126   OperationState vecOpState(op->getLoc(), op->getName().getStringRef(),
1127                             vectorOperands, vectorTypes, op->getAttrs(),
1128                             /*successors=*/{}, /*regions=*/{});
1129   Operation *vecOp = state.builder.createOperation(vecOpState);
1130   state.registerOpVectorReplacement(op, vecOp);
1131   return vecOp;
1132 }
1133 
1134 /// Vectorizes a yield operation by widening its types. The builder's insertion
1135 /// point is set after the vectorized parent op to continue vectorizing the
1136 /// operations after the parent op.
1137 static Operation *vectorizeAffineYieldOp(AffineYieldOp yieldOp,
1138                                          VectorizationState &state) {
1139   Operation *newYieldOp = widenOp(yieldOp, state);
1140   Operation *newParentOp = state.builder.getInsertionBlock()->getParentOp();
1141   state.builder.setInsertionPointAfter(newParentOp);
1142   return newYieldOp;
1143 }
1144 
1145 /// Encodes Operation-specific behavior for vectorization. In general we
1146 /// assume that all operands of an op must be vectorized but this is not
1147 /// always true. In the future, it would be nice to have a trait that
1148 /// describes how a particular operation vectorizes. For now we implement the
1149 /// case distinction here. Returns a vectorized form of an operation or
1150 /// nullptr if vectorization fails.
1151 // TODO: consider adding a trait to Op to describe how it gets vectorized.
1152 // Maybe some Ops are not vectorizable or require some tricky logic, we cannot
1153 // do one-off logic here; ideally it would be TableGen'd.
1154 static Operation *vectorizeOneOperation(Operation *op,
1155                                         VectorizationState &state) {
1156   // Sanity checks.
1157   assert(!isa<vector::TransferReadOp>(op) &&
1158          "vector.transfer_read cannot be further vectorized");
1159   assert(!isa<vector::TransferWriteOp>(op) &&
1160          "vector.transfer_write cannot be further vectorized");
1161 
1162   if (auto loadOp = dyn_cast<AffineLoadOp>(op))
1163     return vectorizeAffineLoad(loadOp, state);
1164   if (auto storeOp = dyn_cast<AffineStoreOp>(op))
1165     return vectorizeAffineStore(storeOp, state);
1166   if (auto forOp = dyn_cast<AffineForOp>(op))
1167     return vectorizeAffineForOp(forOp, state);
1168   if (auto yieldOp = dyn_cast<AffineYieldOp>(op))
1169     return vectorizeAffineYieldOp(yieldOp, state);
1170   if (auto constant = dyn_cast<ConstantOp>(op))
1171     return vectorizeConstant(constant, state);
1172 
1173   // Other ops with regions are not supported.
1174   if (op->getNumRegions() != 0)
1175     return nullptr;
1176 
1177   return widenOp(op, state);
1178 }
1179 
1180 /// Recursive implementation to convert all the nested loops in 'match' to a 2D
1181 /// vector container that preserves the relative nesting level of each loop with
1182 /// respect to the others in 'match'. 'currentLevel' is the nesting level that
1183 /// will be assigned to the loop in the current 'match'.
1184 static void
1185 getMatchedAffineLoopsRec(NestedMatch match, unsigned currentLevel,
1186                          std::vector<SmallVector<AffineForOp, 2>> &loops) {
1187   // Add a new empty level to the output if it doesn't exist already.
1188   assert(currentLevel <= loops.size() && "Unexpected currentLevel");
1189   if (currentLevel == loops.size())
1190     loops.push_back(SmallVector<AffineForOp, 2>());
1191 
1192   // Add current match and recursively visit its children.
1193   loops[currentLevel].push_back(cast<AffineForOp>(match.getMatchedOperation()));
1194   for (auto childMatch : match.getMatchedChildren()) {
1195     getMatchedAffineLoopsRec(childMatch, currentLevel + 1, loops);
1196   }
1197 }
1198 
1199 /// Converts all the nested loops in 'match' to a 2D vector container that
1200 /// preserves the relative nesting level of each loop with respect to the others
1201 /// in 'match'. This means that every loop in 'loops[i]' will have a parent loop
1202 /// in 'loops[i-1]'. A loop in 'loops[i]' may or may not have a child loop in
1203 /// 'loops[i+1]'.
1204 static void
1205 getMatchedAffineLoops(NestedMatch match,
1206                       std::vector<SmallVector<AffineForOp, 2>> &loops) {
1207   getMatchedAffineLoopsRec(match, /*currLoopDepth=*/0, loops);
1208 }
1209 
1210 /// Internal implementation to vectorize affine loops from a single loop nest
1211 /// using an n-D vectorization strategy.
1212 static LogicalResult
1213 vectorizeLoopNest(std::vector<SmallVector<AffineForOp, 2>> &loops,
1214                   const VectorizationStrategy &strategy) {
1215   assert(loops[0].size() == 1 && "Expected single root loop");
1216   AffineForOp rootLoop = loops[0][0];
1217   VectorizationState state(rootLoop.getContext());
1218   state.builder.setInsertionPointAfter(rootLoop);
1219   state.strategy = &strategy;
1220 
1221   // Since patterns are recursive, they can very well intersect.
1222   // Since we do not want a fully greedy strategy in general, we decouple
1223   // pattern matching, from profitability analysis, from application.
1224   // As a consequence we must check that each root pattern is still
1225   // vectorizable. If a pattern is not vectorizable anymore, we just skip it.
1226   // TODO: implement a non-greedy profitability analysis that keeps only
1227   // non-intersecting patterns.
1228   if (!isVectorizableLoopBody(rootLoop, vectorTransferPattern())) {
1229     LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ loop is not vectorizable");
1230     return failure();
1231   }
1232 
1233   //////////////////////////////////////////////////////////////////////////////
1234   // Vectorize the scalar loop nest following a topological order. A new vector
1235   // loop nest with the vectorized operations is created along the process. If
1236   // vectorization succeeds, the scalar loop nest is erased. If vectorization
1237   // fails, the vector loop nest is erased and the scalar loop nest is not
1238   // modified.
1239   //////////////////////////////////////////////////////////////////////////////
1240 
1241   auto opVecResult = rootLoop.walk<WalkOrder::PreOrder>([&](Operation *op) {
1242     LLVM_DEBUG(dbgs() << "[early-vect]+++++ Vectorizing: " << *op);
1243     Operation *vectorOp = vectorizeOneOperation(op, state);
1244     if (!vectorOp)
1245       return WalkResult::interrupt();
1246 
1247     return WalkResult::advance();
1248   });
1249 
1250   if (opVecResult.wasInterrupted()) {
1251     LLVM_DEBUG(dbgs() << "[early-vect]+++++ failed vectorization for: "
1252                       << rootLoop << "\n");
1253     // Erase vector loop nest if it was created.
1254     auto vecRootLoopIt = state.opVectorReplacement.find(rootLoop);
1255     if (vecRootLoopIt != state.opVectorReplacement.end())
1256       eraseLoopNest(cast<AffineForOp>(vecRootLoopIt->second));
1257 
1258     return failure();
1259   }
1260 
1261   assert(state.opVectorReplacement.count(rootLoop) == 1 &&
1262          "Expected vector replacement for loop nest");
1263   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ success vectorizing pattern");
1264   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorization result:\n"
1265                     << *state.opVectorReplacement[rootLoop]);
1266 
1267   // Finish this vectorization pattern.
1268   state.finishVectorizationPattern(rootLoop);
1269   return success();
1270 }
1271 
1272 /// Extracts the matched loops and vectorizes them following a topological
1273 /// order. A new vector loop nest will be created if vectorization succeeds. The
1274 /// original loop nest won't be modified in any case.
1275 static LogicalResult vectorizeRootMatch(NestedMatch m,
1276                                         const VectorizationStrategy &strategy) {
1277   std::vector<SmallVector<AffineForOp, 2>> loopsToVectorize;
1278   getMatchedAffineLoops(m, loopsToVectorize);
1279   return vectorizeLoopNest(loopsToVectorize, strategy);
1280 }
1281 
1282 /// Traverses all the loop matches and classifies them into intersection
1283 /// buckets. Two matches intersect if any of them encloses the other one. A
1284 /// match intersects with a bucket if the match intersects with the root
1285 /// (outermost) loop in that bucket.
1286 static void computeIntersectionBuckets(
1287     ArrayRef<NestedMatch> matches,
1288     std::vector<SmallVector<NestedMatch, 8>> &intersectionBuckets) {
1289   assert(intersectionBuckets.empty() && "Expected empty output");
1290   // Keeps track of the root (outermost) loop of each bucket.
1291   SmallVector<AffineForOp, 8> bucketRoots;
1292 
1293   for (const NestedMatch &match : matches) {
1294     AffineForOp matchRoot = cast<AffineForOp>(match.getMatchedOperation());
1295     bool intersects = false;
1296     for (int i = 0, end = intersectionBuckets.size(); i < end; ++i) {
1297       AffineForOp bucketRoot = bucketRoots[i];
1298       // Add match to the bucket if the bucket root encloses the match root.
1299       if (bucketRoot->isAncestor(matchRoot)) {
1300         intersectionBuckets[i].push_back(match);
1301         intersects = true;
1302         break;
1303       }
1304       // Add match to the bucket if the match root encloses the bucket root. The
1305       // match root becomes the new bucket root.
1306       if (matchRoot->isAncestor(bucketRoot)) {
1307         bucketRoots[i] = matchRoot;
1308         intersectionBuckets[i].push_back(match);
1309         intersects = true;
1310         break;
1311       }
1312     }
1313 
1314     // Match doesn't intersect with any existing bucket. Create a new bucket for
1315     // it.
1316     if (!intersects) {
1317       bucketRoots.push_back(matchRoot);
1318       intersectionBuckets.push_back(SmallVector<NestedMatch, 8>());
1319       intersectionBuckets.back().push_back(match);
1320     }
1321   }
1322 }
1323 
1324 /// Internal implementation to vectorize affine loops in 'loops' using the n-D
1325 /// vectorization factors in 'vectorSizes'. By default, each vectorization
1326 /// factor is applied inner-to-outer to the loops of each loop nest.
1327 /// 'fastestVaryingPattern' can be optionally used to provide a different loop
1328 /// vectorization order.
1329 static void vectorizeLoops(Operation *parentOp, DenseSet<Operation *> &loops,
1330                            ArrayRef<int64_t> vectorSizes,
1331                            ArrayRef<int64_t> fastestVaryingPattern) {
1332   // Compute 1-D, 2-D or 3-D loop pattern to be matched on the target loops.
1333   Optional<NestedPattern> pattern =
1334       makePattern(loops, vectorSizes.size(), fastestVaryingPattern);
1335   if (!pattern.hasValue()) {
1336     LLVM_DEBUG(dbgs() << "\n[early-vect] pattern couldn't be computed\n");
1337     return;
1338   }
1339 
1340   LLVM_DEBUG(dbgs() << "\n******************************************");
1341   LLVM_DEBUG(dbgs() << "\n******************************************");
1342   LLVM_DEBUG(dbgs() << "\n[early-vect] new pattern on parent op\n");
1343   LLVM_DEBUG(dbgs() << *parentOp << "\n");
1344 
1345   unsigned patternDepth = pattern->getDepth();
1346 
1347   // Compute all the pattern matches and classify them into buckets of
1348   // intersecting matches.
1349   SmallVector<NestedMatch, 32> allMatches;
1350   pattern->match(parentOp, &allMatches);
1351   std::vector<SmallVector<NestedMatch, 8>> intersectionBuckets;
1352   computeIntersectionBuckets(allMatches, intersectionBuckets);
1353 
1354   // Iterate over all buckets and vectorize the matches eagerly. We can only
1355   // vectorize one match from each bucket since all the matches within a bucket
1356   // intersect.
1357   for (auto &intersectingMatches : intersectionBuckets) {
1358     for (NestedMatch &match : intersectingMatches) {
1359       VectorizationStrategy strategy;
1360       // TODO: depending on profitability, elect to reduce the vector size.
1361       strategy.vectorSizes.assign(vectorSizes.begin(), vectorSizes.end());
1362       if (failed(analyzeProfitability(match.getMatchedChildren(), 1,
1363                                       patternDepth, &strategy))) {
1364         continue;
1365       }
1366       vectorizeLoopIfProfitable(match.getMatchedOperation(), 0, patternDepth,
1367                                 &strategy);
1368       // Vectorize match. Skip the rest of intersecting matches in the bucket if
1369       // vectorization succeeded.
1370       // TODO: if pattern does not apply, report it; alter the cost/benefit.
1371       // TODO: some diagnostics if failure to vectorize occurs.
1372       if (succeeded(vectorizeRootMatch(match, strategy)))
1373         break;
1374     }
1375   }
1376 
1377   LLVM_DEBUG(dbgs() << "\n");
1378 }
1379 
1380 std::unique_ptr<OperationPass<FuncOp>>
1381 createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize) {
1382   return std::make_unique<Vectorize>(virtualVectorSize);
1383 }
1384 std::unique_ptr<OperationPass<FuncOp>> createSuperVectorizePass() {
1385   return std::make_unique<Vectorize>();
1386 }
1387 
1388 /// Applies vectorization to the current function by searching over a bunch of
1389 /// predetermined patterns.
1390 void Vectorize::runOnFunction() {
1391   FuncOp f = getFunction();
1392   if (!fastestVaryingPattern.empty() &&
1393       fastestVaryingPattern.size() != vectorSizes.size()) {
1394     f.emitRemark("Fastest varying pattern specified with different size than "
1395                  "the vector size.");
1396     return signalPassFailure();
1397   }
1398 
1399   DenseSet<Operation *> parallelLoops;
1400   f.walk([&parallelLoops](AffineForOp loop) {
1401     if (isLoopParallel(loop))
1402       parallelLoops.insert(loop);
1403   });
1404 
1405   // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
1406   NestedPatternContext mlContext;
1407   vectorizeLoops(f, parallelLoops, vectorSizes, fastestVaryingPattern);
1408 }
1409 
1410 /// Verify that affine loops in 'loops' meet the nesting criteria expected by
1411 /// SuperVectorizer:
1412 ///   * There must be at least one loop.
1413 ///   * There must be a single root loop (nesting level 0).
1414 ///   * Each loop at a given nesting level must be nested in a loop from a
1415 ///     previous nesting level.
1416 static LogicalResult
1417 verifyLoopNesting(const std::vector<SmallVector<AffineForOp, 2>> &loops) {
1418   // Expected at least one loop.
1419   if (loops.empty())
1420     return failure();
1421 
1422   // Expected only one root loop.
1423   if (loops[0].size() != 1)
1424     return failure();
1425 
1426   // Traverse loops outer-to-inner to check some invariants.
1427   for (int i = 1, end = loops.size(); i < end; ++i) {
1428     for (AffineForOp loop : loops[i]) {
1429       //  Check that each loop at this level is nested in one of the loops from
1430       //  the previous level.
1431       if (none_of(loops[i - 1], [&](AffineForOp maybeParent) {
1432             return maybeParent->isProperAncestor(loop);
1433           }))
1434         return failure();
1435 
1436       //  Check that each loop at this level is not nested in another loop from
1437       //  this level.
1438       for (AffineForOp sibling : loops[i]) {
1439         if (sibling->isProperAncestor(loop))
1440           return failure();
1441       }
1442     }
1443   }
1444 
1445   return success();
1446 }
1447 
1448 namespace mlir {
1449 
1450 /// External utility to vectorize affine loops in 'loops' using the n-D
1451 /// vectorization factors in 'vectorSizes'. By default, each vectorization
1452 /// factor is applied inner-to-outer to the loops of each loop nest.
1453 /// 'fastestVaryingPattern' can be optionally used to provide a different loop
1454 /// vectorization order.
1455 void vectorizeAffineLoops(Operation *parentOp, DenseSet<Operation *> &loops,
1456                           ArrayRef<int64_t> vectorSizes,
1457                           ArrayRef<int64_t> fastestVaryingPattern) {
1458   // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
1459   NestedPatternContext mlContext;
1460   vectorizeLoops(parentOp, loops, vectorSizes, fastestVaryingPattern);
1461 }
1462 
1463 /// External utility to vectorize affine loops from a single loop nest using an
1464 /// n-D vectorization strategy (see doc in VectorizationStrategy definition).
1465 /// Loops are provided in a 2D vector container. The first dimension represents
1466 /// the nesting level relative to the loops to be vectorized. The second
1467 /// dimension contains the loops. This means that:
1468 ///   a) every loop in 'loops[i]' must have a parent loop in 'loops[i-1]',
1469 ///   b) a loop in 'loops[i]' may or may not have a child loop in 'loops[i+1]'.
1470 ///
1471 /// For example, for the following loop nest:
1472 ///
1473 ///   func @vec2d(%in0: memref<64x128x512xf32>, %in1: memref<64x128x128xf32>,
1474 ///               %out0: memref<64x128x512xf32>,
1475 ///               %out1: memref<64x128x128xf32>) {
1476 ///     affine.for %i0 = 0 to 64 {
1477 ///       affine.for %i1 = 0 to 128 {
1478 ///         affine.for %i2 = 0 to 512 {
1479 ///           %ld = affine.load %in0[%i0, %i1, %i2] : memref<64x128x512xf32>
1480 ///           affine.store %ld, %out0[%i0, %i1, %i2] : memref<64x128x512xf32>
1481 ///         }
1482 ///         affine.for %i3 = 0 to 128 {
1483 ///           %ld = affine.load %in1[%i0, %i1, %i3] : memref<64x128x128xf32>
1484 ///           affine.store %ld, %out1[%i0, %i1, %i3] : memref<64x128x128xf32>
1485 ///         }
1486 ///       }
1487 ///     }
1488 ///     return
1489 ///   }
1490 ///
1491 /// loops = {{%i0}, {%i2, %i3}}, to vectorize the outermost and the two
1492 /// innermost loops;
1493 /// loops = {{%i1}, {%i2, %i3}}, to vectorize the middle and the two innermost
1494 /// loops;
1495 /// loops = {{%i2}}, to vectorize only the first innermost loop;
1496 /// loops = {{%i3}}, to vectorize only the second innermost loop;
1497 /// loops = {{%i1}}, to vectorize only the middle loop.
1498 LogicalResult
1499 vectorizeAffineLoopNest(std::vector<SmallVector<AffineForOp, 2>> &loops,
1500                         const VectorizationStrategy &strategy) {
1501   // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
1502   NestedPatternContext mlContext;
1503   if (failed(verifyLoopNesting(loops)))
1504     return failure();
1505   return vectorizeLoopNest(loops, strategy);
1506 }
1507 
1508 std::unique_ptr<OperationPass<FuncOp>>
1509 createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize) {
1510   return std::make_unique<Vectorize>(virtualVectorSize);
1511 }
1512 std::unique_ptr<OperationPass<FuncOp>> createSuperVectorizePass() {
1513   return std::make_unique<Vectorize>();
1514 }
1515 
1516 } // namespace mlir
1517