1 //===- SuperVectorize.cpp - Vectorize Pass Impl ---------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements vectorization of loops, operations and data types to
10 // a target-independent, n-D super-vector abstraction.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "PassDetail.h"
15 #include "mlir/Analysis/AffineAnalysis.h"
16 #include "mlir/Analysis/LoopAnalysis.h"
17 #include "mlir/Analysis/NestedMatcher.h"
18 #include "mlir/Dialect/Affine/IR/AffineOps.h"
19 #include "mlir/Dialect/Affine/Utils.h"
20 #include "mlir/Dialect/Vector/VectorOps.h"
21 #include "mlir/Dialect/Vector/VectorUtils.h"
22 #include "mlir/IR/BlockAndValueMapping.h"
23 #include "mlir/Support/LLVM.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/Support/Debug.h"
26 
27 using namespace mlir;
28 using namespace vector;
29 
30 ///
31 /// Implements a high-level vectorization strategy on a Function.
32 /// The abstraction used is that of super-vectors, which provide a single,
33 /// compact, representation in the vector types, information that is expected
34 /// to reduce the impact of the phase ordering problem
35 ///
36 /// Vector granularity:
37 /// ===================
38 /// This pass is designed to perform vectorization at a super-vector
39 /// granularity. A super-vector is loosely defined as a vector type that is a
40 /// multiple of a "good" vector size so the HW can efficiently implement a set
41 /// of high-level primitives. Multiple is understood along any dimension; e.g.
42 /// both vector<16xf32> and vector<2x8xf32> are valid super-vectors for a
43 /// vector<8xf32> HW vector. Note that a "good vector size so the HW can
44 /// efficiently implement a set of high-level primitives" is not necessarily an
45 /// integer multiple of actual hardware registers. We leave details of this
46 /// distinction unspecified for now.
47 ///
48 /// Some may prefer the terminology a "tile of HW vectors". In this case, one
49 /// should note that super-vectors implement an "always full tile" abstraction.
50 /// They guarantee no partial-tile separation is necessary by relying on a
51 /// high-level copy-reshape abstraction that we call vector.transfer. This
52 /// copy-reshape operations is also responsible for performing layout
53 /// transposition if necessary. In the general case this will require a scoped
54 /// allocation in some notional local memory.
55 ///
56 /// Whatever the mental model one prefers to use for this abstraction, the key
57 /// point is that we burn into a single, compact, representation in the vector
58 /// types, information that is expected to reduce the impact of the phase
59 /// ordering problem. Indeed, a vector type conveys information that:
60 ///   1. the associated loops have dependency semantics that do not prevent
61 ///      vectorization;
62 ///   2. the associate loops have been sliced in chunks of static sizes that are
63 ///      compatible with vector sizes (i.e. similar to unroll-and-jam);
64 ///   3. the inner loops, in the unroll-and-jam analogy of 2, are captured by
65 ///   the
66 ///      vector type and no vectorization hampering transformations can be
67 ///      applied to them anymore;
68 ///   4. the underlying memrefs are accessed in some notional contiguous way
69 ///      that allows loading into vectors with some amount of spatial locality;
70 /// In other words, super-vectorization provides a level of separation of
71 /// concern by way of opacity to subsequent passes. This has the effect of
72 /// encapsulating and propagating vectorization constraints down the list of
73 /// passes until we are ready to lower further.
74 ///
75 /// For a particular target, a notion of minimal n-d vector size will be
76 /// specified and vectorization targets a multiple of those. In the following
77 /// paragraph, let "k ." represent "a multiple of", to be understood as a
78 /// multiple in the same dimension (e.g. vector<16 x k . 128> summarizes
79 /// vector<16 x 128>, vector<16 x 256>, vector<16 x 1024>, etc).
80 ///
81 /// Some non-exhaustive notable super-vector sizes of interest include:
82 ///   - CPU: vector<k . HW_vector_size>,
83 ///          vector<k' . core_count x k . HW_vector_size>,
84 ///          vector<socket_count x k' . core_count x k . HW_vector_size>;
85 ///   - GPU: vector<k . warp_size>,
86 ///          vector<k . warp_size x float2>,
87 ///          vector<k . warp_size x float4>,
88 ///          vector<k . warp_size x 4 x 4x 4> (for tensor_core sizes).
89 ///
90 /// Loops and operations are emitted that operate on those super-vector shapes.
91 /// Subsequent lowering passes will materialize to actual HW vector sizes. These
92 /// passes are expected to be (gradually) more target-specific.
93 ///
94 /// At a high level, a vectorized load in a loop will resemble:
95 /// ```mlir
96 ///   affine.for %i = ? to ? step ? {
97 ///     %v_a = vector.transfer_read A[%i] : memref<?xf32>, vector<128xf32>
98 ///   }
99 /// ```
100 /// It is the responsibility of the implementation of vector.transfer_read to
101 /// materialize vector registers from the original scalar memrefs. A later (more
102 /// target-dependent) lowering pass will materialize to actual HW vector sizes.
103 /// This lowering may be occur at different times:
104 ///   1. at the MLIR level into a combination of loops, unrolling, DmaStartOp +
105 ///      DmaWaitOp + vectorized operations for data transformations and shuffle;
106 ///      thus opening opportunities for unrolling and pipelining. This is an
107 ///      instance of library call "whiteboxing"; or
108 ///   2. later in the a target-specific lowering pass or hand-written library
109 ///      call; achieving full separation of concerns. This is an instance of
110 ///      library call; or
111 ///   3. a mix of both, e.g. based on a model.
112 /// In the future, these operations will expose a contract to constrain the
113 /// search on vectorization patterns and sizes.
114 ///
115 /// Occurrence of super-vectorization in the compiler flow:
116 /// =======================================================
117 /// This is an active area of investigation. We start with 2 remarks to position
118 /// super-vectorization in the context of existing ongoing work: LLVM VPLAN
119 /// and LLVM SLP Vectorizer.
120 ///
121 /// LLVM VPLAN:
122 /// -----------
123 /// The astute reader may have noticed that in the limit, super-vectorization
124 /// can be applied at a similar time and with similar objectives than VPLAN.
125 /// For instance, in the case of a traditional, polyhedral compilation-flow (for
126 /// instance, the PPCG project uses ISL to provide dependence analysis,
127 /// multi-level(scheduling + tiling), lifting footprint to fast memory,
128 /// communication synthesis, mapping, register optimizations) and before
129 /// unrolling. When vectorization is applied at this *late* level in a typical
130 /// polyhedral flow, and is instantiated with actual hardware vector sizes,
131 /// super-vectorization is expected to match (or subsume) the type of patterns
132 /// that LLVM's VPLAN aims at targeting. The main difference here is that MLIR
133 /// is higher level and our implementation should be significantly simpler. Also
134 /// note that in this mode, recursive patterns are probably a bit of an overkill
135 /// although it is reasonable to expect that mixing a bit of outer loop and
136 /// inner loop vectorization + unrolling will provide interesting choices to
137 /// MLIR.
138 ///
139 /// LLVM SLP Vectorizer:
140 /// --------------------
141 /// Super-vectorization however is not meant to be usable in a similar fashion
142 /// to the SLP vectorizer. The main difference lies in the information that
143 /// both vectorizers use: super-vectorization examines contiguity of memory
144 /// references along fastest varying dimensions and loops with recursive nested
145 /// patterns capturing imperfectly-nested loop nests; the SLP vectorizer, on
146 /// the other hand, performs flat pattern matching inside a single unrolled loop
147 /// body and stitches together pieces of load and store operations into full
148 /// 1-D vectors. We envision that the SLP vectorizer is a good way to capture
149 /// innermost loop, control-flow dependent patterns that super-vectorization may
150 /// not be able to capture easily. In other words, super-vectorization does not
151 /// aim at replacing the SLP vectorizer and the two solutions are complementary.
152 ///
153 /// Ongoing investigations:
154 /// -----------------------
155 /// We discuss the following *early* places where super-vectorization is
156 /// applicable and touch on the expected benefits and risks . We list the
157 /// opportunities in the context of the traditional polyhedral compiler flow
158 /// described in PPCG. There are essentially 6 places in the MLIR pass pipeline
159 /// we expect to experiment with super-vectorization:
160 /// 1. Right after language lowering to MLIR: this is the earliest time where
161 ///    super-vectorization is expected to be applied. At this level, all the
162 ///    language/user/library-level annotations are available and can be fully
163 ///    exploited. Examples include loop-type annotations (such as parallel,
164 ///    reduction, scan, dependence distance vector, vectorizable) as well as
165 ///    memory access annotations (such as non-aliasing writes guaranteed,
166 ///    indirect accesses that are permutations by construction) accesses or
167 ///    that a particular operation is prescribed atomic by the user. At this
168 ///    level, anything that enriches what dependence analysis can do should be
169 ///    aggressively exploited. At this level we are close to having explicit
170 ///    vector types in the language, except we do not impose that burden on the
171 ///    programmer/library: we derive information from scalar code + annotations.
172 /// 2. After dependence analysis and before polyhedral scheduling: the
173 ///    information that supports vectorization does not need to be supplied by a
174 ///    higher level of abstraction. Traditional dependence analysis is available
175 ///    in MLIR and will be used to drive vectorization and cost models.
176 ///
177 /// Let's pause here and remark that applying super-vectorization as described
178 /// in 1. and 2. presents clear opportunities and risks:
179 ///   - the opportunity is that vectorization is burned in the type system and
180 ///   is protected from the adverse effect of loop scheduling, tiling, loop
181 ///   interchange and all passes downstream. Provided that subsequent passes are
182 ///   able to operate on vector types; the vector shapes, associated loop
183 ///   iterator properties, alignment, and contiguity of fastest varying
184 ///   dimensions are preserved until we lower the super-vector types. We expect
185 ///   this to significantly rein in on the adverse effects of phase ordering.
186 ///   - the risks are that a. all passes after super-vectorization have to work
187 ///   on elemental vector types (not that this is always true, wherever
188 ///   vectorization is applied) and b. that imposing vectorization constraints
189 ///   too early may be overall detrimental to loop fusion, tiling and other
190 ///   transformations because the dependence distances are coarsened when
191 ///   operating on elemental vector types. For this reason, the pattern
192 ///   profitability analysis should include a component that also captures the
193 ///   maximal amount of fusion available under a particular pattern. This is
194 ///   still at the stage of rough ideas but in this context, search is our
195 ///   friend as the Tensor Comprehensions and auto-TVM contributions
196 ///   demonstrated previously.
197 /// Bottom-line is we do not yet have good answers for the above but aim at
198 /// making it easy to answer such questions.
199 ///
200 /// Back to our listing, the last places where early super-vectorization makes
201 /// sense are:
202 /// 3. right after polyhedral-style scheduling: PLUTO-style algorithms are known
203 ///    to improve locality, parallelism and be configurable (e.g. max-fuse,
204 ///    smart-fuse etc). They can also have adverse effects on contiguity
205 ///    properties that are required for vectorization but the vector.transfer
206 ///    copy-reshape-pad-transpose abstraction is expected to help recapture
207 ///    these properties.
208 /// 4. right after polyhedral-style scheduling+tiling;
209 /// 5. right after scheduling+tiling+rescheduling: points 4 and 5 represent
210 ///    probably the most promising places because applying tiling achieves a
211 ///    separation of concerns that allows rescheduling to worry less about
212 ///    locality and more about parallelism and distribution (e.g. min-fuse).
213 ///
214 /// At these levels the risk-reward looks different: on one hand we probably
215 /// lost a good deal of language/user/library-level annotation; on the other
216 /// hand we gained parallelism and locality through scheduling and tiling.
217 /// However we probably want to ensure tiling is compatible with the
218 /// full-tile-only abstraction used in super-vectorization or suffer the
219 /// consequences. It is too early to place bets on what will win but we expect
220 /// super-vectorization to be the right abstraction to allow exploring at all
221 /// these levels. And again, search is our friend.
222 ///
223 /// Lastly, we mention it again here:
224 /// 6. as a MLIR-based alternative to VPLAN.
225 ///
226 /// Lowering, unrolling, pipelining:
227 /// ================================
228 /// TODO: point to the proper places.
229 ///
230 /// Algorithm:
231 /// ==========
232 /// The algorithm proceeds in a few steps:
233 ///  1. defining super-vectorization patterns and matching them on the tree of
234 ///     AffineForOp. A super-vectorization pattern is defined as a recursive
235 ///     data structures that matches and captures nested, imperfectly-nested
236 ///     loops that have a. conformable loop annotations attached (e.g. parallel,
237 ///     reduction, vectorizable, ...) as well as b. all contiguous load/store
238 ///     operations along a specified minor dimension (not necessarily the
239 ///     fastest varying) ;
240 ///  2. analyzing those patterns for profitability (TODO: and
241 ///     interference);
242 ///  3. then, for each pattern in order:
243 ///    a. applying iterative rewriting of the loops and all their nested
244 ///       operations in topological order. Rewriting is implemented by
245 ///       coarsening the loops and converting operations and operands to their
246 ///       vector forms. Processing operations in topological order is relatively
247 ///       simple due to the structured nature of the control-flow
248 ///       representation. This order ensures that all the operands of a given
249 ///       operation have been vectorized before the operation itself in a single
250 ///       traversal, except for operands defined outside of the loop nest. The
251 ///       algorithm can convert the following operations to their vector form:
252 ///         * Affine load and store operations are converted to opaque vector
253 ///           transfer read and write operations.
254 ///         * Scalar constant operations/operands are converted to vector
255 ///           constant operations (splat).
256 ///         * Uniform operands (only induction variables of loops not mapped to
257 ///           a vector dimension, or operands defined outside of the loop nest
258 ///           for now) are broadcasted to a vector.
259 ///           TODO: Support more uniform cases.
260 ///         * Affine for operations with 'iter_args' are vectorized by
261 ///           vectorizing their 'iter_args' operands and results.
262 ///           TODO: Support more complex loops with divergent lbs and/or ubs.
263 ///         * The remaining operations in the loop nest are vectorized by
264 ///           widening their scalar types to vector types.
265 ///    b. if everything under the root AffineForOp in the current pattern
266 ///       is vectorized properly, we commit that loop to the IR and remove the
267 ///       scalar loop. Otherwise, we discard the vectorized loop and keep the
268 ///       original scalar loop.
269 ///    c. vectorization is applied on the next pattern in the list. Because
270 ///       pattern interference avoidance is not yet implemented and that we do
271 ///       not support further vectorizing an already vector load we need to
272 ///       re-verify that the pattern is still vectorizable. This is expected to
273 ///       make cost models more difficult to write and is subject to improvement
274 ///       in the future.
275 ///
276 /// Choice of loop transformation to support the algorithm:
277 /// =======================================================
278 /// The choice of loop transformation to apply for coarsening vectorized loops
279 /// is still subject to exploratory tradeoffs. In particular, say we want to
280 /// vectorize by a factor 128, we want to transform the following input:
281 /// ```mlir
282 ///   affine.for %i = %M to %N {
283 ///     %a = affine.load %A[%i] : memref<?xf32>
284 ///   }
285 /// ```
286 ///
287 /// Traditionally, one would vectorize late (after scheduling, tiling,
288 /// memory promotion etc) say after stripmining (and potentially unrolling in
289 /// the case of LLVM's SLP vectorizer):
290 /// ```mlir
291 ///   affine.for %i = floor(%M, 128) to ceil(%N, 128) {
292 ///     affine.for %ii = max(%M, 128 * %i) to min(%N, 128*%i + 127) {
293 ///       %a = affine.load %A[%ii] : memref<?xf32>
294 ///     }
295 ///   }
296 /// ```
297 ///
298 /// Instead, we seek to vectorize early and freeze vector types before
299 /// scheduling, so we want to generate a pattern that resembles:
300 /// ```mlir
301 ///   affine.for %i = ? to ? step ? {
302 ///     %v_a = vector.transfer_read %A[%i] : memref<?xf32>, vector<128xf32>
303 ///   }
304 /// ```
305 ///
306 /// i. simply dividing the lower / upper bounds by 128 creates issues
307 ///    when representing expressions such as ii + 1 because now we only
308 ///    have access to original values that have been divided. Additional
309 ///    information is needed to specify accesses at below-128 granularity;
310 /// ii. another alternative is to coarsen the loop step but this may have
311 ///    consequences on dependence analysis and fusability of loops: fusable
312 ///    loops probably need to have the same step (because we don't want to
313 ///    stripmine/unroll to enable fusion).
314 /// As a consequence, we choose to represent the coarsening using the loop
315 /// step for now and reevaluate in the future. Note that we can renormalize
316 /// loop steps later if/when we have evidence that they are problematic.
317 ///
318 /// For the simple strawman example above, vectorizing for a 1-D vector
319 /// abstraction of size 128 returns code similar to:
320 /// ```mlir
321 ///   affine.for %i = %M to %N step 128 {
322 ///     %v_a = vector.transfer_read %A[%i] : memref<?xf32>, vector<128xf32>
323 ///   }
324 /// ```
325 ///
326 /// Unsupported cases, extensions, and work in progress (help welcome :-) ):
327 /// ========================================================================
328 ///   1. lowering to concrete vector types for various HW;
329 ///   2. reduction support for n-D vectorization and non-unit steps;
330 ///   3. non-effecting padding during vector.transfer_read and filter during
331 ///      vector.transfer_write;
332 ///   4. misalignment support vector.transfer_read / vector.transfer_write
333 ///      (hopefully without read-modify-writes);
334 ///   5. control-flow support;
335 ///   6. cost-models, heuristics and search;
336 ///   7. Op implementation, extensions and implication on memref views;
337 ///   8. many TODOs left around.
338 ///
339 /// Examples:
340 /// =========
341 /// Consider the following Function:
342 /// ```mlir
343 /// func @vector_add_2d(%M : index, %N : index) -> f32 {
344 ///   %A = alloc (%M, %N) : memref<?x?xf32, 0>
345 ///   %B = alloc (%M, %N) : memref<?x?xf32, 0>
346 ///   %C = alloc (%M, %N) : memref<?x?xf32, 0>
347 ///   %f1 = constant 1.0 : f32
348 ///   %f2 = constant 2.0 : f32
349 ///   affine.for %i0 = 0 to %M {
350 ///     affine.for %i1 = 0 to %N {
351 ///       // non-scoped %f1
352 ///       affine.store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
353 ///     }
354 ///   }
355 ///   affine.for %i2 = 0 to %M {
356 ///     affine.for %i3 = 0 to %N {
357 ///       // non-scoped %f2
358 ///       affine.store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
359 ///     }
360 ///   }
361 ///   affine.for %i4 = 0 to %M {
362 ///     affine.for %i5 = 0 to %N {
363 ///       %a5 = affine.load %A[%i4, %i5] : memref<?x?xf32, 0>
364 ///       %b5 = affine.load %B[%i4, %i5] : memref<?x?xf32, 0>
365 ///       %s5 = addf %a5, %b5 : f32
366 ///       // non-scoped %f1
367 ///       %s6 = addf %s5, %f1 : f32
368 ///       // non-scoped %f2
369 ///       %s7 = addf %s5, %f2 : f32
370 ///       // diamond dependency.
371 ///       %s8 = addf %s7, %s6 : f32
372 ///       affine.store %s8, %C[%i4, %i5] : memref<?x?xf32, 0>
373 ///     }
374 ///   }
375 ///   %c7 = constant 7 : index
376 ///   %c42 = constant 42 : index
377 ///   %res = load %C[%c7, %c42] : memref<?x?xf32, 0>
378 ///   return %res : f32
379 /// }
380 /// ```
381 ///
382 /// The -affine-vectorize pass with the following arguments:
383 /// ```
384 /// -affine-vectorize="virtual-vector-size=256 test-fastest-varying=0"
385 /// ```
386 ///
387 /// produces this standard innermost-loop vectorized code:
388 /// ```mlir
389 /// func @vector_add_2d(%arg0 : index, %arg1 : index) -> f32 {
390 ///   %0 = alloc(%arg0, %arg1) : memref<?x?xf32>
391 ///   %1 = alloc(%arg0, %arg1) : memref<?x?xf32>
392 ///   %2 = alloc(%arg0, %arg1) : memref<?x?xf32>
393 ///   %cst = constant 1.0 : f32
394 ///   %cst_0 = constant 2.0 : f32
395 ///   affine.for %i0 = 0 to %arg0 {
396 ///     affine.for %i1 = 0 to %arg1 step 256 {
397 ///       %cst_1 = constant dense<vector<256xf32>, 1.0> :
398 ///                vector<256xf32>
399 ///       vector.transfer_write %cst_1, %0[%i0, %i1] :
400 ///                vector<256xf32>, memref<?x?xf32>
401 ///     }
402 ///   }
403 ///   affine.for %i2 = 0 to %arg0 {
404 ///     affine.for %i3 = 0 to %arg1 step 256 {
405 ///       %cst_2 = constant dense<vector<256xf32>, 2.0> :
406 ///                vector<256xf32>
407 ///       vector.transfer_write %cst_2, %1[%i2, %i3] :
408 ///                vector<256xf32>, memref<?x?xf32>
409 ///     }
410 ///   }
411 ///   affine.for %i4 = 0 to %arg0 {
412 ///     affine.for %i5 = 0 to %arg1 step 256 {
413 ///       %3 = vector.transfer_read %0[%i4, %i5] :
414 ///            memref<?x?xf32>, vector<256xf32>
415 ///       %4 = vector.transfer_read %1[%i4, %i5] :
416 ///            memref<?x?xf32>, vector<256xf32>
417 ///       %5 = addf %3, %4 : vector<256xf32>
418 ///       %cst_3 = constant dense<vector<256xf32>, 1.0> :
419 ///                vector<256xf32>
420 ///       %6 = addf %5, %cst_3 : vector<256xf32>
421 ///       %cst_4 = constant dense<vector<256xf32>, 2.0> :
422 ///                vector<256xf32>
423 ///       %7 = addf %5, %cst_4 : vector<256xf32>
424 ///       %8 = addf %7, %6 : vector<256xf32>
425 ///       vector.transfer_write %8, %2[%i4, %i5] :
426 ///                vector<256xf32>, memref<?x?xf32>
427 ///     }
428 ///   }
429 ///   %c7 = constant 7 : index
430 ///   %c42 = constant 42 : index
431 ///   %9 = load %2[%c7, %c42] : memref<?x?xf32>
432 ///   return %9 : f32
433 /// }
434 /// ```
435 ///
436 /// The -affine-vectorize pass with the following arguments:
437 /// ```
438 /// -affine-vectorize="virtual-vector-size=32,256 test-fastest-varying=1,0"
439 /// ```
440 ///
441 /// produces this more interesting mixed outer-innermost-loop vectorized code:
442 /// ```mlir
443 /// func @vector_add_2d(%arg0 : index, %arg1 : index) -> f32 {
444 ///   %0 = alloc(%arg0, %arg1) : memref<?x?xf32>
445 ///   %1 = alloc(%arg0, %arg1) : memref<?x?xf32>
446 ///   %2 = alloc(%arg0, %arg1) : memref<?x?xf32>
447 ///   %cst = constant 1.0 : f32
448 ///   %cst_0 = constant 2.0 : f32
449 ///   affine.for %i0 = 0 to %arg0 step 32 {
450 ///     affine.for %i1 = 0 to %arg1 step 256 {
451 ///       %cst_1 = constant dense<vector<32x256xf32>, 1.0> :
452 ///                vector<32x256xf32>
453 ///       vector.transfer_write %cst_1, %0[%i0, %i1] :
454 ///                vector<32x256xf32>, memref<?x?xf32>
455 ///     }
456 ///   }
457 ///   affine.for %i2 = 0 to %arg0 step 32 {
458 ///     affine.for %i3 = 0 to %arg1 step 256 {
459 ///       %cst_2 = constant dense<vector<32x256xf32>, 2.0> :
460 ///                vector<32x256xf32>
461 ///       vector.transfer_write %cst_2, %1[%i2, %i3] :
462 ///                vector<32x256xf32>, memref<?x?xf32>
463 ///     }
464 ///   }
465 ///   affine.for %i4 = 0 to %arg0 step 32 {
466 ///     affine.for %i5 = 0 to %arg1 step 256 {
467 ///       %3 = vector.transfer_read %0[%i4, %i5] :
468 ///                memref<?x?xf32> vector<32x256xf32>
469 ///       %4 = vector.transfer_read %1[%i4, %i5] :
470 ///                memref<?x?xf32>, vector<32x256xf32>
471 ///       %5 = addf %3, %4 : vector<32x256xf32>
472 ///       %cst_3 = constant dense<vector<32x256xf32>, 1.0> :
473 ///                vector<32x256xf32>
474 ///       %6 = addf %5, %cst_3 : vector<32x256xf32>
475 ///       %cst_4 = constant dense<vector<32x256xf32>, 2.0> :
476 ///                vector<32x256xf32>
477 ///       %7 = addf %5, %cst_4 : vector<32x256xf32>
478 ///       %8 = addf %7, %6 : vector<32x256xf32>
479 ///       vector.transfer_write %8, %2[%i4, %i5] :
480 ///                vector<32x256xf32>, memref<?x?xf32>
481 ///     }
482 ///   }
483 ///   %c7 = constant 7 : index
484 ///   %c42 = constant 42 : index
485 ///   %9 = load %2[%c7, %c42] : memref<?x?xf32>
486 ///   return %9 : f32
487 /// }
488 /// ```
489 ///
490 /// Of course, much more intricate n-D imperfectly-nested patterns can be
491 /// vectorized too and specified in a fully declarative fashion.
492 ///
493 /// Reduction:
494 /// ==========
495 /// Vectorizing reduction loops along the reduction dimension is supported if:
496 /// - the reduction kind is supported,
497 /// - the vectorization is 1-D, and
498 /// - the step size of the loop equals to one.
499 ///
500 /// Comparing to the non-vector-dimension case, two additional things are done
501 /// during vectorization of such loops:
502 /// - The resulting vector returned from the loop is reduced to a scalar using
503 ///   `vector.reduce`.
504 /// - In some cases a mask is applied to the vector yielded at the end of the
505 ///   loop to prevent garbage values from being written to the accumulator.
506 ///
507 /// Reduction vectorization is switched off by default, it can be enabled by
508 /// passing a map from loops to reductions to utility functions, or by passing
509 /// `vectorize-reductions=true` to the vectorization pass.
510 ///
511 /// Consider the following example:
512 /// ```mlir
513 /// func @vecred(%in: memref<512xf32>) -> f32 {
514 ///   %cst = constant 0.000000e+00 : f32
515 ///   %sum = affine.for %i = 0 to 500 iter_args(%part_sum = %cst) -> (f32) {
516 ///     %ld = affine.load %in[%i] : memref<512xf32>
517 ///     %cos = math.cos %ld : f32
518 ///     %add = addf %part_sum, %cos : f32
519 ///     affine.yield %add : f32
520 ///   }
521 ///   return %sum : f32
522 /// }
523 /// ```
524 ///
525 /// The -affine-vectorize pass with the following arguments:
526 /// ```
527 /// -affine-vectorize="virtual-vector-size=128 test-fastest-varying=0 \
528 ///                    vectorize-reductions=true"
529 /// ```
530 /// produces the following output:
531 /// ```mlir
532 /// #map = affine_map<(d0) -> (-d0 + 500)>
533 /// func @vecred(%arg0: memref<512xf32>) -> f32 {
534 ///   %cst = constant 0.000000e+00 : f32
535 ///   %cst_0 = constant dense<0.000000e+00> : vector<128xf32>
536 ///   %0 = affine.for %arg1 = 0 to 500 step 128 iter_args(%arg2 = %cst_0)
537 ///           -> (vector<128xf32>) {
538 ///     // %2 is the number of iterations left in the original loop.
539 ///     %2 = affine.apply #map(%arg1)
540 ///     %3 = vector.create_mask %2 : vector<128xi1>
541 ///     %cst_1 = constant 0.000000e+00 : f32
542 ///     %4 = vector.transfer_read %arg0[%arg1], %cst_1 :
543 ///                     memref<512xf32>, vector<128xf32>
544 ///     %5 = math.cos %4 : vector<128xf32>
545 ///     %6 = addf %arg2, %5 : vector<128xf32>
546 ///     // We filter out the effect of last 12 elements using the mask.
547 ///     %7 = select %3, %6, %arg2 : vector<128xi1>, vector<128xf32>
548 ///     affine.yield %7 : vector<128xf32>
549 ///   }
550 ///   %1 = vector.reduction "add", %0 : vector<128xf32> into f32
551 ///   return %1 : f32
552 /// }
553 /// ```
554 ///
555 /// Note that because of loop misalignment we needed to apply a mask to prevent
556 /// last 12 elements from affecting the final result. The mask is full of ones
557 /// in every iteration except for the last one, in which it has the form
558 /// `11...100...0` with 116 ones and 12 zeros.
559 
560 #define DEBUG_TYPE "early-vect"
561 
562 using llvm::dbgs;
563 
564 /// Forward declaration.
565 static FilterFunctionType
566 isVectorizableLoopPtrFactory(const DenseSet<Operation *> &parallelLoops,
567                              int fastestVaryingMemRefDimension);
568 
569 /// Creates a vectorization pattern from the command line arguments.
570 /// Up to 3-D patterns are supported.
571 /// If the command line argument requests a pattern of higher order, returns an
572 /// empty pattern list which will conservatively result in no vectorization.
573 static Optional<NestedPattern>
574 makePattern(const DenseSet<Operation *> &parallelLoops, int vectorRank,
575             ArrayRef<int64_t> fastestVaryingPattern) {
576   using matcher::For;
577   int64_t d0 = fastestVaryingPattern.empty() ? -1 : fastestVaryingPattern[0];
578   int64_t d1 = fastestVaryingPattern.size() < 2 ? -1 : fastestVaryingPattern[1];
579   int64_t d2 = fastestVaryingPattern.size() < 3 ? -1 : fastestVaryingPattern[2];
580   switch (vectorRank) {
581   case 1:
582     return For(isVectorizableLoopPtrFactory(parallelLoops, d0));
583   case 2:
584     return For(isVectorizableLoopPtrFactory(parallelLoops, d0),
585                For(isVectorizableLoopPtrFactory(parallelLoops, d1)));
586   case 3:
587     return For(isVectorizableLoopPtrFactory(parallelLoops, d0),
588                For(isVectorizableLoopPtrFactory(parallelLoops, d1),
589                    For(isVectorizableLoopPtrFactory(parallelLoops, d2))));
590   default: {
591     return llvm::None;
592   }
593   }
594 }
595 
596 static NestedPattern &vectorTransferPattern() {
597   static auto pattern = matcher::Op([](Operation &op) {
598     return isa<vector::TransferReadOp, vector::TransferWriteOp>(op);
599   });
600   return pattern;
601 }
602 
603 namespace {
604 
605 /// Base state for the vectorize pass.
606 /// Command line arguments are preempted by non-empty pass arguments.
607 struct Vectorize : public AffineVectorizeBase<Vectorize> {
608   Vectorize() = default;
609   Vectorize(ArrayRef<int64_t> virtualVectorSize);
610   void runOnFunction() override;
611 };
612 
613 } // end anonymous namespace
614 
615 Vectorize::Vectorize(ArrayRef<int64_t> virtualVectorSize) {
616   vectorSizes = virtualVectorSize;
617 }
618 
619 static void vectorizeLoopIfProfitable(Operation *loop, unsigned depthInPattern,
620                                       unsigned patternDepth,
621                                       VectorizationStrategy *strategy) {
622   assert(patternDepth > depthInPattern &&
623          "patternDepth is greater than depthInPattern");
624   if (patternDepth - depthInPattern > strategy->vectorSizes.size()) {
625     // Don't vectorize this loop
626     return;
627   }
628   strategy->loopToVectorDim[loop] =
629       strategy->vectorSizes.size() - (patternDepth - depthInPattern);
630 }
631 
632 /// Implements a simple strawman strategy for vectorization.
633 /// Given a matched pattern `matches` of depth `patternDepth`, this strategy
634 /// greedily assigns the fastest varying dimension ** of the vector ** to the
635 /// innermost loop in the pattern.
636 /// When coupled with a pattern that looks for the fastest varying dimension in
637 /// load/store MemRefs, this creates a generic vectorization strategy that works
638 /// for any loop in a hierarchy (outermost, innermost or intermediate).
639 ///
640 /// TODO: In the future we should additionally increase the power of the
641 /// profitability analysis along 3 directions:
642 ///   1. account for loop extents (both static and parametric + annotations);
643 ///   2. account for data layout permutations;
644 ///   3. account for impact of vectorization on maximal loop fusion.
645 /// Then we can quantify the above to build a cost model and search over
646 /// strategies.
647 static LogicalResult analyzeProfitability(ArrayRef<NestedMatch> matches,
648                                           unsigned depthInPattern,
649                                           unsigned patternDepth,
650                                           VectorizationStrategy *strategy) {
651   for (auto m : matches) {
652     if (failed(analyzeProfitability(m.getMatchedChildren(), depthInPattern + 1,
653                                     patternDepth, strategy))) {
654       return failure();
655     }
656     vectorizeLoopIfProfitable(m.getMatchedOperation(), depthInPattern,
657                               patternDepth, strategy);
658   }
659   return success();
660 }
661 
662 ///// end TODO: Hoist to a VectorizationStrategy.cpp when appropriate /////
663 
664 namespace {
665 
666 struct VectorizationState {
667 
668   VectorizationState(MLIRContext *context) : builder(context) {}
669 
670   /// Registers the vector replacement of a scalar operation and its result
671   /// values. Both operations must have the same number of results.
672   ///
673   /// This utility is used to register the replacement for the vast majority of
674   /// the vectorized operations.
675   ///
676   /// Example:
677   ///   * 'replaced': %0 = addf %1, %2 : f32
678   ///   * 'replacement': %0 = addf %1, %2 : vector<128xf32>
679   void registerOpVectorReplacement(Operation *replaced, Operation *replacement);
680 
681   /// Registers the vector replacement of a scalar value. The replacement
682   /// operation should have a single result, which replaces the scalar value.
683   ///
684   /// This utility is used to register the vector replacement of block arguments
685   /// and operation results which are not directly vectorized (i.e., their
686   /// scalar version still exists after vectorization), like uniforms.
687   ///
688   /// Example:
689   ///   * 'replaced': block argument or operation outside of the vectorized
690   ///     loop.
691   ///   * 'replacement': %0 = vector.broadcast %1 : f32 to vector<128xf32>
692   void registerValueVectorReplacement(Value replaced, Operation *replacement);
693 
694   /// Registers the vector replacement of a block argument (e.g., iter_args).
695   ///
696   /// Example:
697   ///   * 'replaced': 'iter_arg' block argument.
698   ///   * 'replacement': vectorized 'iter_arg' block argument.
699   void registerBlockArgVectorReplacement(BlockArgument replaced,
700                                          BlockArgument replacement);
701 
702   /// Registers the scalar replacement of a scalar value. 'replacement' must be
703   /// scalar. Both values must be block arguments. Operation results should be
704   /// replaced using the 'registerOp*' utilitites.
705   ///
706   /// This utility is used to register the replacement of block arguments
707   /// that are within the loop to be vectorized and will continue being scalar
708   /// within the vector loop.
709   ///
710   /// Example:
711   ///   * 'replaced': induction variable of a loop to be vectorized.
712   ///   * 'replacement': new induction variable in the new vector loop.
713   void registerValueScalarReplacement(BlockArgument replaced,
714                                       BlockArgument replacement);
715 
716   /// Registers the scalar replacement of a scalar result returned from a
717   /// reduction loop. 'replacement' must be scalar.
718   ///
719   /// This utility is used to register the replacement for scalar results of
720   /// vectorized reduction loops with iter_args.
721   ///
722   /// Example 2:
723   ///   * 'replaced': %0 = affine.for %i = 0 to 512 iter_args(%x = ...) -> (f32)
724   ///   * 'replacement': %1 = vector.reduction "add" %0 : vector<4xf32> into f32
725   void registerLoopResultScalarReplacement(Value replaced, Value replacement);
726 
727   /// Returns in 'replacedVals' the scalar replacement for values in
728   /// 'inputVals'.
729   void getScalarValueReplacementsFor(ValueRange inputVals,
730                                      SmallVectorImpl<Value> &replacedVals);
731 
732   /// Erases the scalar loop nest after its successful vectorization.
733   void finishVectorizationPattern(AffineForOp rootLoop);
734 
735   // Used to build and insert all the new operations created. The insertion
736   // point is preserved and updated along the vectorization process.
737   OpBuilder builder;
738 
739   // Maps input scalar operations to their vector counterparts.
740   DenseMap<Operation *, Operation *> opVectorReplacement;
741   // Maps input scalar values to their vector counterparts.
742   BlockAndValueMapping valueVectorReplacement;
743   // Maps input scalar values to their new scalar counterparts in the vector
744   // loop nest.
745   BlockAndValueMapping valueScalarReplacement;
746   // Maps results of reduction loops to their new scalar counterparts.
747   DenseMap<Value, Value> loopResultScalarReplacement;
748 
749   // Maps the newly created vector loops to their vector dimension.
750   DenseMap<Operation *, unsigned> vecLoopToVecDim;
751 
752   // Maps the new vectorized loops to the corresponding vector masks if it is
753   // required.
754   DenseMap<Operation *, Value> vecLoopToMask;
755 
756   // The strategy drives which loop to vectorize by which amount.
757   const VectorizationStrategy *strategy;
758 
759 private:
760   /// Internal implementation to map input scalar values to new vector or scalar
761   /// values.
762   void registerValueVectorReplacementImpl(Value replaced, Value replacement);
763   void registerValueScalarReplacementImpl(Value replaced, Value replacement);
764 };
765 
766 } // end namespace
767 
768 /// Registers the vector replacement of a scalar operation and its result
769 /// values. Both operations must have the same number of results.
770 ///
771 /// This utility is used to register the replacement for the vast majority of
772 /// the vectorized operations.
773 ///
774 /// Example:
775 ///   * 'replaced': %0 = addf %1, %2 : f32
776 ///   * 'replacement': %0 = addf %1, %2 : vector<128xf32>
777 void VectorizationState::registerOpVectorReplacement(Operation *replaced,
778                                                      Operation *replacement) {
779   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ commit vectorized op:\n");
780   LLVM_DEBUG(dbgs() << *replaced << "\n");
781   LLVM_DEBUG(dbgs() << "into\n");
782   LLVM_DEBUG(dbgs() << *replacement << "\n");
783 
784   assert(replaced->getNumResults() == replacement->getNumResults() &&
785          "Unexpected replaced and replacement results");
786   assert(opVectorReplacement.count(replaced) == 0 && "already registered");
787   opVectorReplacement[replaced] = replacement;
788 
789   for (auto resultTuple :
790        llvm::zip(replaced->getResults(), replacement->getResults()))
791     registerValueVectorReplacementImpl(std::get<0>(resultTuple),
792                                        std::get<1>(resultTuple));
793 }
794 
795 /// Registers the vector replacement of a scalar value. The replacement
796 /// operation should have a single result, which replaces the scalar value.
797 ///
798 /// This utility is used to register the vector replacement of block arguments
799 /// and operation results which are not directly vectorized (i.e., their
800 /// scalar version still exists after vectorization), like uniforms.
801 ///
802 /// Example:
803 ///   * 'replaced': block argument or operation outside of the vectorized loop.
804 ///   * 'replacement': %0 = vector.broadcast %1 : f32 to vector<128xf32>
805 void VectorizationState::registerValueVectorReplacement(
806     Value replaced, Operation *replacement) {
807   assert(replacement->getNumResults() == 1 &&
808          "Expected single-result replacement");
809   if (Operation *defOp = replaced.getDefiningOp())
810     registerOpVectorReplacement(defOp, replacement);
811   else
812     registerValueVectorReplacementImpl(replaced, replacement->getResult(0));
813 }
814 
815 /// Registers the vector replacement of a block argument (e.g., iter_args).
816 ///
817 /// Example:
818 ///   * 'replaced': 'iter_arg' block argument.
819 ///   * 'replacement': vectorized 'iter_arg' block argument.
820 void VectorizationState::registerBlockArgVectorReplacement(
821     BlockArgument replaced, BlockArgument replacement) {
822   registerValueVectorReplacementImpl(replaced, replacement);
823 }
824 
825 void VectorizationState::registerValueVectorReplacementImpl(Value replaced,
826                                                             Value replacement) {
827   assert(!valueVectorReplacement.contains(replaced) &&
828          "Vector replacement already registered");
829   assert(replacement.getType().isa<VectorType>() &&
830          "Expected vector type in vector replacement");
831   valueVectorReplacement.map(replaced, replacement);
832 }
833 
834 /// Registers the scalar replacement of a scalar value. 'replacement' must be
835 /// scalar. Both values must be block arguments. Operation results should be
836 /// replaced using the 'registerOp*' utilitites.
837 ///
838 /// This utility is used to register the replacement of block arguments
839 /// that are within the loop to be vectorized and will continue being scalar
840 /// within the vector loop.
841 ///
842 /// Example:
843 ///   * 'replaced': induction variable of a loop to be vectorized.
844 ///   * 'replacement': new induction variable in the new vector loop.
845 void VectorizationState::registerValueScalarReplacement(
846     BlockArgument replaced, BlockArgument replacement) {
847   registerValueScalarReplacementImpl(replaced, replacement);
848 }
849 
850 /// Registers the scalar replacement of a scalar result returned from a
851 /// reduction loop. 'replacement' must be scalar.
852 ///
853 /// This utility is used to register the replacement for scalar results of
854 /// vectorized reduction loops with iter_args.
855 ///
856 /// Example 2:
857 ///   * 'replaced': %0 = affine.for %i = 0 to 512 iter_args(%x = ...) -> (f32)
858 ///   * 'replacement': %1 = vector.reduction "add" %0 : vector<4xf32> into f32
859 void VectorizationState::registerLoopResultScalarReplacement(
860     Value replaced, Value replacement) {
861   assert(isa<AffineForOp>(replaced.getDefiningOp()));
862   assert(loopResultScalarReplacement.count(replaced) == 0 &&
863          "already registered");
864   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ will replace a result of the loop "
865                        "with scalar: "
866                     << replacement);
867   loopResultScalarReplacement[replaced] = replacement;
868 }
869 
870 void VectorizationState::registerValueScalarReplacementImpl(Value replaced,
871                                                             Value replacement) {
872   assert(!valueScalarReplacement.contains(replaced) &&
873          "Scalar value replacement already registered");
874   assert(!replacement.getType().isa<VectorType>() &&
875          "Expected scalar type in scalar replacement");
876   valueScalarReplacement.map(replaced, replacement);
877 }
878 
879 /// Returns in 'replacedVals' the scalar replacement for values in 'inputVals'.
880 void VectorizationState::getScalarValueReplacementsFor(
881     ValueRange inputVals, SmallVectorImpl<Value> &replacedVals) {
882   for (Value inputVal : inputVals)
883     replacedVals.push_back(valueScalarReplacement.lookupOrDefault(inputVal));
884 }
885 
886 /// Erases a loop nest, including all its nested operations.
887 static void eraseLoopNest(AffineForOp forOp) {
888   LLVM_DEBUG(dbgs() << "[early-vect]+++++ erasing:\n" << forOp << "\n");
889   forOp.erase();
890 }
891 
892 /// Erases the scalar loop nest after its successful vectorization.
893 void VectorizationState::finishVectorizationPattern(AffineForOp rootLoop) {
894   LLVM_DEBUG(dbgs() << "\n[early-vect] Finalizing vectorization\n");
895   eraseLoopNest(rootLoop);
896 }
897 
898 // Apply 'map' with 'mapOperands' returning resulting values in 'results'.
899 static void computeMemoryOpIndices(Operation *op, AffineMap map,
900                                    ValueRange mapOperands,
901                                    VectorizationState &state,
902                                    SmallVectorImpl<Value> &results) {
903   for (auto resultExpr : map.getResults()) {
904     auto singleResMap =
905         AffineMap::get(map.getNumDims(), map.getNumSymbols(), resultExpr);
906     auto afOp = state.builder.create<AffineApplyOp>(op->getLoc(), singleResMap,
907                                                     mapOperands);
908     results.push_back(afOp);
909   }
910 }
911 
912 /// Returns a FilterFunctionType that can be used in NestedPattern to match a
913 /// loop whose underlying load/store accesses are either invariant or all
914 // varying along the `fastestVaryingMemRefDimension`.
915 static FilterFunctionType
916 isVectorizableLoopPtrFactory(const DenseSet<Operation *> &parallelLoops,
917                              int fastestVaryingMemRefDimension) {
918   return [&parallelLoops, fastestVaryingMemRefDimension](Operation &forOp) {
919     auto loop = cast<AffineForOp>(forOp);
920     auto parallelIt = parallelLoops.find(loop);
921     if (parallelIt == parallelLoops.end())
922       return false;
923     int memRefDim = -1;
924     auto vectorizableBody =
925         isVectorizableLoopBody(loop, &memRefDim, vectorTransferPattern());
926     if (!vectorizableBody)
927       return false;
928     return memRefDim == -1 || fastestVaryingMemRefDimension == -1 ||
929            memRefDim == fastestVaryingMemRefDimension;
930   };
931 }
932 
933 /// Returns the vector type resulting from applying the provided vectorization
934 /// strategy on the scalar type.
935 static VectorType getVectorType(Type scalarTy,
936                                 const VectorizationStrategy *strategy) {
937   assert(!scalarTy.isa<VectorType>() && "Expected scalar type");
938   return VectorType::get(strategy->vectorSizes, scalarTy);
939 }
940 
941 /// Tries to transform a scalar constant into a vector constant. Returns the
942 /// vector constant if the scalar type is valid vector element type. Returns
943 /// nullptr, otherwise.
944 static ConstantOp vectorizeConstant(ConstantOp constOp,
945                                     VectorizationState &state) {
946   Type scalarTy = constOp.getType();
947   if (!VectorType::isValidElementType(scalarTy))
948     return nullptr;
949 
950   auto vecTy = getVectorType(scalarTy, state.strategy);
951   auto vecAttr = DenseElementsAttr::get(vecTy, constOp.getValue());
952 
953   OpBuilder::InsertionGuard guard(state.builder);
954   Operation *parentOp = state.builder.getInsertionBlock()->getParentOp();
955   // Find the innermost vectorized ancestor loop to insert the vector constant.
956   while (parentOp && !state.vecLoopToVecDim.count(parentOp))
957     parentOp = parentOp->getParentOp();
958   assert(parentOp && state.vecLoopToVecDim.count(parentOp) &&
959          isa<AffineForOp>(parentOp) && "Expected a vectorized for op");
960   auto vecForOp = cast<AffineForOp>(parentOp);
961   state.builder.setInsertionPointToStart(vecForOp.getBody());
962   auto newConstOp = state.builder.create<ConstantOp>(constOp.getLoc(), vecAttr);
963 
964   // Register vector replacement for future uses in the scope.
965   state.registerOpVectorReplacement(constOp, newConstOp);
966   return newConstOp;
967 }
968 
969 /// Creates a constant vector filled with the neutral elements of the given
970 /// reduction. The scalar type of vector elements will be taken from
971 /// `oldOperand`.
972 static ConstantOp createInitialVector(AtomicRMWKind reductionKind,
973                                       Value oldOperand,
974                                       VectorizationState &state) {
975   Type scalarTy = oldOperand.getType();
976   if (!VectorType::isValidElementType(scalarTy))
977     return nullptr;
978 
979   Attribute valueAttr = getIdentityValueAttr(
980       reductionKind, scalarTy, state.builder, oldOperand.getLoc());
981   auto vecTy = getVectorType(scalarTy, state.strategy);
982   auto vecAttr = DenseElementsAttr::get(vecTy, valueAttr);
983   auto newConstOp =
984       state.builder.create<ConstantOp>(oldOperand.getLoc(), vecAttr);
985 
986   return newConstOp;
987 }
988 
989 /// Creates a mask used to filter out garbage elements in the last iteration
990 /// of unaligned loops. If a mask is not required then `nullptr` is returned.
991 /// The mask will be a vector of booleans representing meaningful vector
992 /// elements in the current iteration. It is filled with ones for each iteration
993 /// except for the last one, where it has the form `11...100...0` with the
994 /// number of ones equal to the number of meaningful elements (i.e. the number
995 /// of iterations that would be left in the original loop).
996 static Value createMask(AffineForOp vecForOp, VectorizationState &state) {
997   assert(state.strategy->vectorSizes.size() == 1 &&
998          "Creating a mask non-1-D vectors is not supported.");
999   assert(vecForOp.getStep() == state.strategy->vectorSizes[0] &&
1000          "Creating a mask for loops with non-unit original step size is not "
1001          "supported.");
1002 
1003   // Check if we have already created the mask.
1004   if (Value mask = state.vecLoopToMask.lookup(vecForOp))
1005     return mask;
1006 
1007   // If the loop has constant bounds and the original number of iterations is
1008   // divisable by the vector size then we don't need a mask.
1009   if (vecForOp.hasConstantBounds()) {
1010     int64_t originalTripCount =
1011         vecForOp.getConstantUpperBound() - vecForOp.getConstantLowerBound();
1012     if (originalTripCount % vecForOp.getStep() == 0)
1013       return nullptr;
1014   }
1015 
1016   OpBuilder::InsertionGuard guard(state.builder);
1017   state.builder.setInsertionPointToStart(vecForOp.getBody());
1018 
1019   // We generate the mask using the `vector.create_mask` operation which accepts
1020   // the number of meaningful elements (i.e. the length of the prefix of 1s).
1021   // To compute the number of meaningful elements we subtract the current value
1022   // of the iteration variable from the upper bound of the loop. Example:
1023   //
1024   //     // 500 is the upper bound of the loop
1025   //     #map = affine_map<(d0) -> (500 - d0)>
1026   //     %elems_left = affine.apply #map(%iv)
1027   //     %mask = vector.create_mask %elems_left : vector<128xi1>
1028 
1029   Location loc = vecForOp.getLoc();
1030 
1031   // First we get the upper bound of the loop using `affine.apply` or
1032   // `affine.min`.
1033   AffineMap ubMap = vecForOp.getUpperBoundMap();
1034   Value ub;
1035   if (ubMap.getNumResults() == 1)
1036     ub = state.builder.create<AffineApplyOp>(loc, vecForOp.getUpperBoundMap(),
1037                                              vecForOp.getUpperBoundOperands());
1038   else
1039     ub = state.builder.create<AffineMinOp>(loc, vecForOp.getUpperBoundMap(),
1040                                            vecForOp.getUpperBoundOperands());
1041   // Then we compute the number of (original) iterations left in the loop.
1042   AffineExpr subExpr =
1043       state.builder.getAffineDimExpr(0) - state.builder.getAffineDimExpr(1);
1044   Value itersLeft =
1045       makeComposedAffineApply(state.builder, loc, AffineMap::get(2, 0, subExpr),
1046                               {ub, vecForOp.getInductionVar()});
1047   // If the affine maps were successfully composed then `ub` is unneeded.
1048   if (ub.use_empty())
1049     ub.getDefiningOp()->erase();
1050   // Finally we create the mask.
1051   Type maskTy = VectorType::get(state.strategy->vectorSizes,
1052                                 state.builder.getIntegerType(1));
1053   Value mask =
1054       state.builder.create<vector::CreateMaskOp>(loc, maskTy, itersLeft);
1055 
1056   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ creating a mask:\n"
1057                     << itersLeft << "\n"
1058                     << mask << "\n");
1059 
1060   state.vecLoopToMask[vecForOp] = mask;
1061   return mask;
1062 }
1063 
1064 /// Returns true if the provided value is vector uniform given the vectorization
1065 /// strategy.
1066 // TODO: For now, only values that are induction variables of loops not in
1067 // `loopToVectorDim` or invariants to all the loops in the vectorization
1068 // strategy are considered vector uniforms.
1069 static bool isUniformDefinition(Value value,
1070                                 const VectorizationStrategy *strategy) {
1071   AffineForOp forOp = getForInductionVarOwner(value);
1072   if (forOp && strategy->loopToVectorDim.count(forOp) == 0)
1073     return true;
1074 
1075   for (auto loopToDim : strategy->loopToVectorDim) {
1076     auto loop = cast<AffineForOp>(loopToDim.first);
1077     if (!loop.isDefinedOutsideOfLoop(value))
1078       return false;
1079   }
1080   return true;
1081 }
1082 
1083 /// Generates a broadcast op for the provided uniform value using the
1084 /// vectorization strategy in 'state'.
1085 static Operation *vectorizeUniform(Value uniformVal,
1086                                    VectorizationState &state) {
1087   OpBuilder::InsertionGuard guard(state.builder);
1088   Value uniformScalarRepl =
1089       state.valueScalarReplacement.lookupOrDefault(uniformVal);
1090   state.builder.setInsertionPointAfterValue(uniformScalarRepl);
1091 
1092   auto vectorTy = getVectorType(uniformVal.getType(), state.strategy);
1093   auto bcastOp = state.builder.create<BroadcastOp>(uniformVal.getLoc(),
1094                                                    vectorTy, uniformScalarRepl);
1095   state.registerValueVectorReplacement(uniformVal, bcastOp);
1096   return bcastOp;
1097 }
1098 
1099 /// Tries to vectorize a given `operand` by applying the following logic:
1100 /// 1. if the defining operation has been already vectorized, `operand` is
1101 ///    already in the proper vector form;
1102 /// 2. if the `operand` is a constant, returns the vectorized form of the
1103 ///    constant;
1104 /// 3. if the `operand` is uniform, returns a vector broadcast of the `op`;
1105 /// 4. otherwise, the vectorization of `operand` is not supported.
1106 /// Newly created vector operations are registered in `state` as replacement
1107 /// for their scalar counterparts.
1108 /// In particular this logic captures some of the use cases where definitions
1109 /// that are not scoped under the current pattern are needed to vectorize.
1110 /// One such example is top level function constants that need to be splatted.
1111 ///
1112 /// Returns an operand that has been vectorized to match `state`'s strategy if
1113 /// vectorization is possible with the above logic. Returns nullptr otherwise.
1114 ///
1115 /// TODO: handle more complex cases.
1116 static Value vectorizeOperand(Value operand, VectorizationState &state) {
1117   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorize operand: " << operand);
1118   // If this value is already vectorized, we are done.
1119   if (Value vecRepl = state.valueVectorReplacement.lookupOrNull(operand)) {
1120     LLVM_DEBUG(dbgs() << " -> already vectorized: " << vecRepl);
1121     return vecRepl;
1122   }
1123 
1124   // An vector operand that is not in the replacement map should never reach
1125   // this point. Reaching this point could mean that the code was already
1126   // vectorized and we shouldn't try to vectorize already vectorized code.
1127   assert(!operand.getType().isa<VectorType>() &&
1128          "Vector op not found in replacement map");
1129 
1130   // Vectorize constant.
1131   if (auto constOp = operand.getDefiningOp<ConstantOp>()) {
1132     ConstantOp vecConstant = vectorizeConstant(constOp, state);
1133     LLVM_DEBUG(dbgs() << "-> constant: " << vecConstant);
1134     return vecConstant.getResult();
1135   }
1136 
1137   // Vectorize uniform values.
1138   if (isUniformDefinition(operand, state.strategy)) {
1139     Operation *vecUniform = vectorizeUniform(operand, state);
1140     LLVM_DEBUG(dbgs() << "-> uniform: " << *vecUniform);
1141     return vecUniform->getResult(0);
1142   }
1143 
1144   // Check for unsupported block argument scenarios. A supported block argument
1145   // should have been vectorized already.
1146   if (!operand.getDefiningOp())
1147     LLVM_DEBUG(dbgs() << "-> unsupported block argument\n");
1148   else
1149     // Generic unsupported case.
1150     LLVM_DEBUG(dbgs() << "-> non-vectorizable\n");
1151 
1152   return nullptr;
1153 }
1154 
1155 /// Vectorizes an affine load with the vectorization strategy in 'state' by
1156 /// generating a 'vector.transfer_read' op with the proper permutation map
1157 /// inferred from the indices of the load. The new 'vector.transfer_read' is
1158 /// registered as replacement of the scalar load. Returns the newly created
1159 /// 'vector.transfer_read' if vectorization was successful. Returns nullptr,
1160 /// otherwise.
1161 static Operation *vectorizeAffineLoad(AffineLoadOp loadOp,
1162                                       VectorizationState &state) {
1163   MemRefType memRefType = loadOp.getMemRefType();
1164   Type elementType = memRefType.getElementType();
1165   auto vectorType = VectorType::get(state.strategy->vectorSizes, elementType);
1166 
1167   // Replace map operands with operands from the vector loop nest.
1168   SmallVector<Value, 8> mapOperands;
1169   state.getScalarValueReplacementsFor(loadOp.getMapOperands(), mapOperands);
1170 
1171   // Compute indices for the transfer op. AffineApplyOp's may be generated.
1172   SmallVector<Value, 8> indices;
1173   indices.reserve(memRefType.getRank());
1174   if (loadOp.getAffineMap() !=
1175       state.builder.getMultiDimIdentityMap(memRefType.getRank()))
1176     computeMemoryOpIndices(loadOp, loadOp.getAffineMap(), mapOperands, state,
1177                            indices);
1178   else
1179     indices.append(mapOperands.begin(), mapOperands.end());
1180 
1181   // Compute permutation map using the information of new vector loops.
1182   auto permutationMap = makePermutationMap(state.builder.getInsertionBlock(),
1183                                            indices, state.vecLoopToVecDim);
1184   if (!permutationMap) {
1185     LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ can't compute permutationMap\n");
1186     return nullptr;
1187   }
1188   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
1189   LLVM_DEBUG(permutationMap.print(dbgs()));
1190 
1191   auto transfer = state.builder.create<vector::TransferReadOp>(
1192       loadOp.getLoc(), vectorType, loadOp.getMemRef(), indices, permutationMap);
1193 
1194   // Register replacement for future uses in the scope.
1195   state.registerOpVectorReplacement(loadOp, transfer);
1196   return transfer;
1197 }
1198 
1199 /// Vectorizes an affine store with the vectorization strategy in 'state' by
1200 /// generating a 'vector.transfer_write' op with the proper permutation map
1201 /// inferred from the indices of the store. The new 'vector.transfer_store' is
1202 /// registered as replacement of the scalar load. Returns the newly created
1203 /// 'vector.transfer_write' if vectorization was successful. Returns nullptr,
1204 /// otherwise.
1205 static Operation *vectorizeAffineStore(AffineStoreOp storeOp,
1206                                        VectorizationState &state) {
1207   MemRefType memRefType = storeOp.getMemRefType();
1208   Value vectorValue = vectorizeOperand(storeOp.getValueToStore(), state);
1209   if (!vectorValue)
1210     return nullptr;
1211 
1212   // Replace map operands with operands from the vector loop nest.
1213   SmallVector<Value, 8> mapOperands;
1214   state.getScalarValueReplacementsFor(storeOp.getMapOperands(), mapOperands);
1215 
1216   // Compute indices for the transfer op. AffineApplyOp's may be generated.
1217   SmallVector<Value, 8> indices;
1218   indices.reserve(memRefType.getRank());
1219   if (storeOp.getAffineMap() !=
1220       state.builder.getMultiDimIdentityMap(memRefType.getRank()))
1221     computeMemoryOpIndices(storeOp, storeOp.getAffineMap(), mapOperands, state,
1222                            indices);
1223   else
1224     indices.append(mapOperands.begin(), mapOperands.end());
1225 
1226   // Compute permutation map using the information of new vector loops.
1227   auto permutationMap = makePermutationMap(state.builder.getInsertionBlock(),
1228                                            indices, state.vecLoopToVecDim);
1229   if (!permutationMap)
1230     return nullptr;
1231   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
1232   LLVM_DEBUG(permutationMap.print(dbgs()));
1233 
1234   auto transfer = state.builder.create<vector::TransferWriteOp>(
1235       storeOp.getLoc(), vectorValue, storeOp.getMemRef(), indices,
1236       permutationMap);
1237   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorized store: " << transfer);
1238 
1239   // Register replacement for future uses in the scope.
1240   state.registerOpVectorReplacement(storeOp, transfer);
1241   return transfer;
1242 }
1243 
1244 /// Returns true if `value` is a constant equal to the neutral element of the
1245 /// given vectorizable reduction.
1246 static bool isNeutralElementConst(AtomicRMWKind reductionKind, Value value,
1247                                   VectorizationState &state) {
1248   Type scalarTy = value.getType();
1249   if (!VectorType::isValidElementType(scalarTy))
1250     return false;
1251   Attribute valueAttr = getIdentityValueAttr(reductionKind, scalarTy,
1252                                              state.builder, value.getLoc());
1253   if (auto constOp = dyn_cast_or_null<ConstantOp>(value.getDefiningOp()))
1254     return constOp.value() == valueAttr;
1255   return false;
1256 }
1257 
1258 /// Vectorizes a loop with the vectorization strategy in 'state'. A new loop is
1259 /// created and registered as replacement for the scalar loop. The builder's
1260 /// insertion point is set to the new loop's body so that subsequent vectorized
1261 /// operations are inserted into the new loop. If the loop is a vector
1262 /// dimension, the step of the newly created loop will reflect the vectorization
1263 /// factor used to vectorized that dimension.
1264 static Operation *vectorizeAffineForOp(AffineForOp forOp,
1265                                        VectorizationState &state) {
1266   const VectorizationStrategy &strategy = *state.strategy;
1267   auto loopToVecDimIt = strategy.loopToVectorDim.find(forOp);
1268   bool isLoopVecDim = loopToVecDimIt != strategy.loopToVectorDim.end();
1269 
1270   // TODO: Vectorization of reduction loops is not supported for non-unit steps.
1271   if (isLoopVecDim && forOp.getNumIterOperands() > 0 && forOp.getStep() != 1) {
1272     LLVM_DEBUG(
1273         dbgs()
1274         << "\n[early-vect]+++++ unsupported step size for reduction loop: "
1275         << forOp.getStep() << "\n");
1276     return nullptr;
1277   }
1278 
1279   // If we are vectorizing a vector dimension, compute a new step for the new
1280   // vectorized loop using the vectorization factor for the vector dimension.
1281   // Otherwise, propagate the step of the scalar loop.
1282   unsigned newStep;
1283   if (isLoopVecDim) {
1284     unsigned vectorDim = loopToVecDimIt->second;
1285     assert(vectorDim < strategy.vectorSizes.size() && "vector dim overflow");
1286     int64_t forOpVecFactor = strategy.vectorSizes[vectorDim];
1287     newStep = forOp.getStep() * forOpVecFactor;
1288   } else {
1289     newStep = forOp.getStep();
1290   }
1291 
1292   // Get information about reduction kinds.
1293   ArrayRef<LoopReduction> reductions;
1294   if (isLoopVecDim && forOp.getNumIterOperands() > 0) {
1295     auto it = strategy.reductionLoops.find(forOp);
1296     assert(it != strategy.reductionLoops.end() &&
1297            "Reduction descriptors not found when vectorizing a reduction loop");
1298     reductions = it->second;
1299     assert(reductions.size() == forOp.getNumIterOperands() &&
1300            "The size of reductions array must match the number of iter_args");
1301   }
1302 
1303   // Vectorize 'iter_args'.
1304   SmallVector<Value, 8> vecIterOperands;
1305   if (!isLoopVecDim) {
1306     for (auto operand : forOp.getIterOperands())
1307       vecIterOperands.push_back(vectorizeOperand(operand, state));
1308   } else {
1309     // For reduction loops we need to pass a vector of neutral elements as an
1310     // initial value of the accumulator. We will add the original initial value
1311     // later.
1312     for (auto redAndOperand : llvm::zip(reductions, forOp.getIterOperands())) {
1313       vecIterOperands.push_back(createInitialVector(
1314           std::get<0>(redAndOperand).kind, std::get<1>(redAndOperand), state));
1315     }
1316   }
1317 
1318   auto vecForOp = state.builder.create<AffineForOp>(
1319       forOp.getLoc(), forOp.getLowerBoundOperands(), forOp.getLowerBoundMap(),
1320       forOp.getUpperBoundOperands(), forOp.getUpperBoundMap(), newStep,
1321       vecIterOperands,
1322       /*bodyBuilder=*/[](OpBuilder &, Location, Value, ValueRange) {
1323         // Make sure we don't create a default terminator in the loop body as
1324         // the proper terminator will be added during vectorization.
1325         return;
1326       });
1327 
1328   // Register loop-related replacements:
1329   //   1) The new vectorized loop is registered as vector replacement of the
1330   //      scalar loop.
1331   //   2) The new iv of the vectorized loop is registered as scalar replacement
1332   //      since a scalar copy of the iv will prevail in the vectorized loop.
1333   //      TODO: A vector replacement will also be added in the future when
1334   //      vectorization of linear ops is supported.
1335   //   3) The new 'iter_args' region arguments are registered as vector
1336   //      replacements since they have been vectorized.
1337   //   4) If the loop performs a reduction along the vector dimension, a
1338   //      `vector.reduction` or similar op is inserted for each resulting value
1339   //      of the loop and its scalar value replaces the corresponding scalar
1340   //      result of the loop.
1341   state.registerOpVectorReplacement(forOp, vecForOp);
1342   state.registerValueScalarReplacement(forOp.getInductionVar(),
1343                                        vecForOp.getInductionVar());
1344   for (auto iterTuple :
1345        llvm ::zip(forOp.getRegionIterArgs(), vecForOp.getRegionIterArgs()))
1346     state.registerBlockArgVectorReplacement(std::get<0>(iterTuple),
1347                                             std::get<1>(iterTuple));
1348 
1349   if (isLoopVecDim) {
1350     for (unsigned i = 0; i < vecForOp.getNumIterOperands(); ++i) {
1351       // First, we reduce the vector returned from the loop into a scalar.
1352       Value reducedRes =
1353           getVectorReductionOp(reductions[i].kind, state.builder,
1354                                vecForOp.getLoc(), vecForOp.getResult(i));
1355       LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ creating a vector reduction: "
1356                         << reducedRes);
1357       // Then we combine it with the original (scalar) initial value unless it
1358       // is equal to the neutral element of the reduction.
1359       Value origInit = forOp.getOperand(forOp.getNumControlOperands() + i);
1360       Value finalRes = reducedRes;
1361       if (!isNeutralElementConst(reductions[i].kind, origInit, state))
1362         finalRes = getReductionOp(reductions[i].kind, state.builder,
1363                                   reducedRes.getLoc(), reducedRes, origInit);
1364       state.registerLoopResultScalarReplacement(forOp.getResult(i), finalRes);
1365     }
1366   }
1367 
1368   if (isLoopVecDim)
1369     state.vecLoopToVecDim[vecForOp] = loopToVecDimIt->second;
1370 
1371   // Change insertion point so that upcoming vectorized instructions are
1372   // inserted into the vectorized loop's body.
1373   state.builder.setInsertionPointToStart(vecForOp.getBody());
1374 
1375   // If this is a reduction loop then we may need to create a mask to filter out
1376   // garbage in the last iteration.
1377   if (isLoopVecDim && forOp.getNumIterOperands() > 0)
1378     createMask(vecForOp, state);
1379 
1380   return vecForOp;
1381 }
1382 
1383 /// Vectorizes arbitrary operation by plain widening. We apply generic type
1384 /// widening of all its results and retrieve the vector counterparts for all its
1385 /// operands.
1386 static Operation *widenOp(Operation *op, VectorizationState &state) {
1387   SmallVector<Type, 8> vectorTypes;
1388   for (Value result : op->getResults())
1389     vectorTypes.push_back(
1390         VectorType::get(state.strategy->vectorSizes, result.getType()));
1391 
1392   SmallVector<Value, 8> vectorOperands;
1393   for (Value operand : op->getOperands()) {
1394     Value vecOperand = vectorizeOperand(operand, state);
1395     if (!vecOperand) {
1396       LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ an operand failed vectorize\n");
1397       return nullptr;
1398     }
1399     vectorOperands.push_back(vecOperand);
1400   }
1401 
1402   // Create a clone of the op with the proper operands and return types.
1403   // TODO: The following assumes there is always an op with a fixed
1404   // name that works both in scalar mode and vector mode.
1405   // TODO: Is it worth considering an Operation.clone operation which
1406   // changes the type so we can promote an Operation with less boilerplate?
1407   OperationState vecOpState(op->getLoc(), op->getName().getStringRef(),
1408                             vectorOperands, vectorTypes, op->getAttrs(),
1409                             /*successors=*/{}, /*regions=*/{});
1410   Operation *vecOp = state.builder.createOperation(vecOpState);
1411   state.registerOpVectorReplacement(op, vecOp);
1412   return vecOp;
1413 }
1414 
1415 /// Vectorizes a yield operation by widening its types. The builder's insertion
1416 /// point is set after the vectorized parent op to continue vectorizing the
1417 /// operations after the parent op. When vectorizing a reduction loop a mask may
1418 /// be used to prevent adding garbage values to the accumulator.
1419 static Operation *vectorizeAffineYieldOp(AffineYieldOp yieldOp,
1420                                          VectorizationState &state) {
1421   Operation *newYieldOp = widenOp(yieldOp, state);
1422   Operation *newParentOp = state.builder.getInsertionBlock()->getParentOp();
1423 
1424   // If there is a mask for this loop then we must prevent garbage values from
1425   // being added to the accumulator by inserting `select` operations, for
1426   // example:
1427   //
1428   //   %res = addf %acc, %val : vector<128xf32>
1429   //   %res_masked = select %mask, %res, %acc : vector<128xi1>, vector<128xf32>
1430   //   affine.yield %res_masked : vector<128xf32>
1431   //
1432   if (Value mask = state.vecLoopToMask.lookup(newParentOp)) {
1433     state.builder.setInsertionPoint(newYieldOp);
1434     for (unsigned i = 0; i < newYieldOp->getNumOperands(); ++i) {
1435       Value result = newYieldOp->getOperand(i);
1436       Value iterArg = cast<AffineForOp>(newParentOp).getRegionIterArgs()[i];
1437       Value maskedResult = state.builder.create<SelectOp>(result.getLoc(), mask,
1438                                                           result, iterArg);
1439       LLVM_DEBUG(
1440           dbgs() << "\n[early-vect]+++++ masking a yielded vector value: "
1441                  << maskedResult);
1442       newYieldOp->setOperand(i, maskedResult);
1443     }
1444   }
1445 
1446   state.builder.setInsertionPointAfter(newParentOp);
1447   return newYieldOp;
1448 }
1449 
1450 /// Encodes Operation-specific behavior for vectorization. In general we
1451 /// assume that all operands of an op must be vectorized but this is not
1452 /// always true. In the future, it would be nice to have a trait that
1453 /// describes how a particular operation vectorizes. For now we implement the
1454 /// case distinction here. Returns a vectorized form of an operation or
1455 /// nullptr if vectorization fails.
1456 // TODO: consider adding a trait to Op to describe how it gets vectorized.
1457 // Maybe some Ops are not vectorizable or require some tricky logic, we cannot
1458 // do one-off logic here; ideally it would be TableGen'd.
1459 static Operation *vectorizeOneOperation(Operation *op,
1460                                         VectorizationState &state) {
1461   // Sanity checks.
1462   assert(!isa<vector::TransferReadOp>(op) &&
1463          "vector.transfer_read cannot be further vectorized");
1464   assert(!isa<vector::TransferWriteOp>(op) &&
1465          "vector.transfer_write cannot be further vectorized");
1466 
1467   if (auto loadOp = dyn_cast<AffineLoadOp>(op))
1468     return vectorizeAffineLoad(loadOp, state);
1469   if (auto storeOp = dyn_cast<AffineStoreOp>(op))
1470     return vectorizeAffineStore(storeOp, state);
1471   if (auto forOp = dyn_cast<AffineForOp>(op))
1472     return vectorizeAffineForOp(forOp, state);
1473   if (auto yieldOp = dyn_cast<AffineYieldOp>(op))
1474     return vectorizeAffineYieldOp(yieldOp, state);
1475   if (auto constant = dyn_cast<ConstantOp>(op))
1476     return vectorizeConstant(constant, state);
1477 
1478   // Other ops with regions are not supported.
1479   if (op->getNumRegions() != 0)
1480     return nullptr;
1481 
1482   return widenOp(op, state);
1483 }
1484 
1485 /// Recursive implementation to convert all the nested loops in 'match' to a 2D
1486 /// vector container that preserves the relative nesting level of each loop with
1487 /// respect to the others in 'match'. 'currentLevel' is the nesting level that
1488 /// will be assigned to the loop in the current 'match'.
1489 static void
1490 getMatchedAffineLoopsRec(NestedMatch match, unsigned currentLevel,
1491                          std::vector<SmallVector<AffineForOp, 2>> &loops) {
1492   // Add a new empty level to the output if it doesn't exist already.
1493   assert(currentLevel <= loops.size() && "Unexpected currentLevel");
1494   if (currentLevel == loops.size())
1495     loops.push_back(SmallVector<AffineForOp, 2>());
1496 
1497   // Add current match and recursively visit its children.
1498   loops[currentLevel].push_back(cast<AffineForOp>(match.getMatchedOperation()));
1499   for (auto childMatch : match.getMatchedChildren()) {
1500     getMatchedAffineLoopsRec(childMatch, currentLevel + 1, loops);
1501   }
1502 }
1503 
1504 /// Converts all the nested loops in 'match' to a 2D vector container that
1505 /// preserves the relative nesting level of each loop with respect to the others
1506 /// in 'match'. This means that every loop in 'loops[i]' will have a parent loop
1507 /// in 'loops[i-1]'. A loop in 'loops[i]' may or may not have a child loop in
1508 /// 'loops[i+1]'.
1509 static void
1510 getMatchedAffineLoops(NestedMatch match,
1511                       std::vector<SmallVector<AffineForOp, 2>> &loops) {
1512   getMatchedAffineLoopsRec(match, /*currLoopDepth=*/0, loops);
1513 }
1514 
1515 /// Internal implementation to vectorize affine loops from a single loop nest
1516 /// using an n-D vectorization strategy.
1517 static LogicalResult
1518 vectorizeLoopNest(std::vector<SmallVector<AffineForOp, 2>> &loops,
1519                   const VectorizationStrategy &strategy) {
1520   assert(loops[0].size() == 1 && "Expected single root loop");
1521   AffineForOp rootLoop = loops[0][0];
1522   VectorizationState state(rootLoop.getContext());
1523   state.builder.setInsertionPointAfter(rootLoop);
1524   state.strategy = &strategy;
1525 
1526   // Since patterns are recursive, they can very well intersect.
1527   // Since we do not want a fully greedy strategy in general, we decouple
1528   // pattern matching, from profitability analysis, from application.
1529   // As a consequence we must check that each root pattern is still
1530   // vectorizable. If a pattern is not vectorizable anymore, we just skip it.
1531   // TODO: implement a non-greedy profitability analysis that keeps only
1532   // non-intersecting patterns.
1533   if (!isVectorizableLoopBody(rootLoop, vectorTransferPattern())) {
1534     LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ loop is not vectorizable");
1535     return failure();
1536   }
1537 
1538   //////////////////////////////////////////////////////////////////////////////
1539   // Vectorize the scalar loop nest following a topological order. A new vector
1540   // loop nest with the vectorized operations is created along the process. If
1541   // vectorization succeeds, the scalar loop nest is erased. If vectorization
1542   // fails, the vector loop nest is erased and the scalar loop nest is not
1543   // modified.
1544   //////////////////////////////////////////////////////////////////////////////
1545 
1546   auto opVecResult = rootLoop.walk<WalkOrder::PreOrder>([&](Operation *op) {
1547     LLVM_DEBUG(dbgs() << "[early-vect]+++++ Vectorizing: " << *op);
1548     Operation *vectorOp = vectorizeOneOperation(op, state);
1549     if (!vectorOp) {
1550       LLVM_DEBUG(
1551           dbgs() << "[early-vect]+++++ failed vectorizing the operation: "
1552                  << *op << "\n");
1553       return WalkResult::interrupt();
1554     }
1555 
1556     return WalkResult::advance();
1557   });
1558 
1559   if (opVecResult.wasInterrupted()) {
1560     LLVM_DEBUG(dbgs() << "[early-vect]+++++ failed vectorization for: "
1561                       << rootLoop << "\n");
1562     // Erase vector loop nest if it was created.
1563     auto vecRootLoopIt = state.opVectorReplacement.find(rootLoop);
1564     if (vecRootLoopIt != state.opVectorReplacement.end())
1565       eraseLoopNest(cast<AffineForOp>(vecRootLoopIt->second));
1566 
1567     return failure();
1568   }
1569 
1570   // Replace results of reduction loops with the scalar values computed using
1571   // `vector.reduce` or similar ops.
1572   for (auto resPair : state.loopResultScalarReplacement)
1573     resPair.first.replaceAllUsesWith(resPair.second);
1574 
1575   assert(state.opVectorReplacement.count(rootLoop) == 1 &&
1576          "Expected vector replacement for loop nest");
1577   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ success vectorizing pattern");
1578   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorization result:\n"
1579                     << *state.opVectorReplacement[rootLoop]);
1580 
1581   // Finish this vectorization pattern.
1582   state.finishVectorizationPattern(rootLoop);
1583   return success();
1584 }
1585 
1586 /// Extracts the matched loops and vectorizes them following a topological
1587 /// order. A new vector loop nest will be created if vectorization succeeds. The
1588 /// original loop nest won't be modified in any case.
1589 static LogicalResult vectorizeRootMatch(NestedMatch m,
1590                                         const VectorizationStrategy &strategy) {
1591   std::vector<SmallVector<AffineForOp, 2>> loopsToVectorize;
1592   getMatchedAffineLoops(m, loopsToVectorize);
1593   return vectorizeLoopNest(loopsToVectorize, strategy);
1594 }
1595 
1596 /// Traverses all the loop matches and classifies them into intersection
1597 /// buckets. Two matches intersect if any of them encloses the other one. A
1598 /// match intersects with a bucket if the match intersects with the root
1599 /// (outermost) loop in that bucket.
1600 static void computeIntersectionBuckets(
1601     ArrayRef<NestedMatch> matches,
1602     std::vector<SmallVector<NestedMatch, 8>> &intersectionBuckets) {
1603   assert(intersectionBuckets.empty() && "Expected empty output");
1604   // Keeps track of the root (outermost) loop of each bucket.
1605   SmallVector<AffineForOp, 8> bucketRoots;
1606 
1607   for (const NestedMatch &match : matches) {
1608     AffineForOp matchRoot = cast<AffineForOp>(match.getMatchedOperation());
1609     bool intersects = false;
1610     for (int i = 0, end = intersectionBuckets.size(); i < end; ++i) {
1611       AffineForOp bucketRoot = bucketRoots[i];
1612       // Add match to the bucket if the bucket root encloses the match root.
1613       if (bucketRoot->isAncestor(matchRoot)) {
1614         intersectionBuckets[i].push_back(match);
1615         intersects = true;
1616         break;
1617       }
1618       // Add match to the bucket if the match root encloses the bucket root. The
1619       // match root becomes the new bucket root.
1620       if (matchRoot->isAncestor(bucketRoot)) {
1621         bucketRoots[i] = matchRoot;
1622         intersectionBuckets[i].push_back(match);
1623         intersects = true;
1624         break;
1625       }
1626     }
1627 
1628     // Match doesn't intersect with any existing bucket. Create a new bucket for
1629     // it.
1630     if (!intersects) {
1631       bucketRoots.push_back(matchRoot);
1632       intersectionBuckets.push_back(SmallVector<NestedMatch, 8>());
1633       intersectionBuckets.back().push_back(match);
1634     }
1635   }
1636 }
1637 
1638 /// Internal implementation to vectorize affine loops in 'loops' using the n-D
1639 /// vectorization factors in 'vectorSizes'. By default, each vectorization
1640 /// factor is applied inner-to-outer to the loops of each loop nest.
1641 /// 'fastestVaryingPattern' can be optionally used to provide a different loop
1642 /// vectorization order. `reductionLoops` can be provided to specify loops which
1643 /// can be vectorized along the reduction dimension.
1644 static void vectorizeLoops(Operation *parentOp, DenseSet<Operation *> &loops,
1645                            ArrayRef<int64_t> vectorSizes,
1646                            ArrayRef<int64_t> fastestVaryingPattern,
1647                            const ReductionLoopMap &reductionLoops) {
1648   assert((reductionLoops.empty() || vectorSizes.size() == 1) &&
1649          "Vectorizing reductions is supported only for 1-D vectors");
1650 
1651   // Compute 1-D, 2-D or 3-D loop pattern to be matched on the target loops.
1652   Optional<NestedPattern> pattern =
1653       makePattern(loops, vectorSizes.size(), fastestVaryingPattern);
1654   if (!pattern.hasValue()) {
1655     LLVM_DEBUG(dbgs() << "\n[early-vect] pattern couldn't be computed\n");
1656     return;
1657   }
1658 
1659   LLVM_DEBUG(dbgs() << "\n******************************************");
1660   LLVM_DEBUG(dbgs() << "\n******************************************");
1661   LLVM_DEBUG(dbgs() << "\n[early-vect] new pattern on parent op\n");
1662   LLVM_DEBUG(dbgs() << *parentOp << "\n");
1663 
1664   unsigned patternDepth = pattern->getDepth();
1665 
1666   // Compute all the pattern matches and classify them into buckets of
1667   // intersecting matches.
1668   SmallVector<NestedMatch, 32> allMatches;
1669   pattern->match(parentOp, &allMatches);
1670   std::vector<SmallVector<NestedMatch, 8>> intersectionBuckets;
1671   computeIntersectionBuckets(allMatches, intersectionBuckets);
1672 
1673   // Iterate over all buckets and vectorize the matches eagerly. We can only
1674   // vectorize one match from each bucket since all the matches within a bucket
1675   // intersect.
1676   for (auto &intersectingMatches : intersectionBuckets) {
1677     for (NestedMatch &match : intersectingMatches) {
1678       VectorizationStrategy strategy;
1679       // TODO: depending on profitability, elect to reduce the vector size.
1680       strategy.vectorSizes.assign(vectorSizes.begin(), vectorSizes.end());
1681       strategy.reductionLoops = reductionLoops;
1682       if (failed(analyzeProfitability(match.getMatchedChildren(), 1,
1683                                       patternDepth, &strategy))) {
1684         continue;
1685       }
1686       vectorizeLoopIfProfitable(match.getMatchedOperation(), 0, patternDepth,
1687                                 &strategy);
1688       // Vectorize match. Skip the rest of intersecting matches in the bucket if
1689       // vectorization succeeded.
1690       // TODO: if pattern does not apply, report it; alter the cost/benefit.
1691       // TODO: some diagnostics if failure to vectorize occurs.
1692       if (succeeded(vectorizeRootMatch(match, strategy)))
1693         break;
1694     }
1695   }
1696 
1697   LLVM_DEBUG(dbgs() << "\n");
1698 }
1699 
1700 std::unique_ptr<OperationPass<FuncOp>>
1701 createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize) {
1702   return std::make_unique<Vectorize>(virtualVectorSize);
1703 }
1704 std::unique_ptr<OperationPass<FuncOp>> createSuperVectorizePass() {
1705   return std::make_unique<Vectorize>();
1706 }
1707 
1708 /// Applies vectorization to the current function by searching over a bunch of
1709 /// predetermined patterns.
1710 void Vectorize::runOnFunction() {
1711   FuncOp f = getFunction();
1712   if (!fastestVaryingPattern.empty() &&
1713       fastestVaryingPattern.size() != vectorSizes.size()) {
1714     f.emitRemark("Fastest varying pattern specified with different size than "
1715                  "the vector size.");
1716     return signalPassFailure();
1717   }
1718 
1719   if (vectorizeReductions && vectorSizes.size() != 1) {
1720     f.emitError("Vectorizing reductions is supported only for 1-D vectors.");
1721     return signalPassFailure();
1722   }
1723 
1724   DenseSet<Operation *> parallelLoops;
1725   ReductionLoopMap reductionLoops;
1726 
1727   // If 'vectorize-reduction=true' is provided, we also populate the
1728   // `reductionLoops` map.
1729   if (vectorizeReductions) {
1730     f.walk([&parallelLoops, &reductionLoops](AffineForOp loop) {
1731       SmallVector<LoopReduction, 2> reductions;
1732       if (isLoopParallel(loop, &reductions)) {
1733         parallelLoops.insert(loop);
1734         // If it's not a reduction loop, adding it to the map is not necessary.
1735         if (!reductions.empty())
1736           reductionLoops[loop] = reductions;
1737       }
1738     });
1739   } else {
1740     f.walk([&parallelLoops](AffineForOp loop) {
1741       if (isLoopParallel(loop))
1742         parallelLoops.insert(loop);
1743     });
1744   }
1745 
1746   // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
1747   NestedPatternContext mlContext;
1748   vectorizeLoops(f, parallelLoops, vectorSizes, fastestVaryingPattern,
1749                  reductionLoops);
1750 }
1751 
1752 /// Verify that affine loops in 'loops' meet the nesting criteria expected by
1753 /// SuperVectorizer:
1754 ///   * There must be at least one loop.
1755 ///   * There must be a single root loop (nesting level 0).
1756 ///   * Each loop at a given nesting level must be nested in a loop from a
1757 ///     previous nesting level.
1758 static LogicalResult
1759 verifyLoopNesting(const std::vector<SmallVector<AffineForOp, 2>> &loops) {
1760   // Expected at least one loop.
1761   if (loops.empty())
1762     return failure();
1763 
1764   // Expected only one root loop.
1765   if (loops[0].size() != 1)
1766     return failure();
1767 
1768   // Traverse loops outer-to-inner to check some invariants.
1769   for (int i = 1, end = loops.size(); i < end; ++i) {
1770     for (AffineForOp loop : loops[i]) {
1771       //  Check that each loop at this level is nested in one of the loops from
1772       //  the previous level.
1773       if (none_of(loops[i - 1], [&](AffineForOp maybeParent) {
1774             return maybeParent->isProperAncestor(loop);
1775           }))
1776         return failure();
1777 
1778       //  Check that each loop at this level is not nested in another loop from
1779       //  this level.
1780       for (AffineForOp sibling : loops[i]) {
1781         if (sibling->isProperAncestor(loop))
1782           return failure();
1783       }
1784     }
1785   }
1786 
1787   return success();
1788 }
1789 
1790 namespace mlir {
1791 
1792 /// External utility to vectorize affine loops in 'loops' using the n-D
1793 /// vectorization factors in 'vectorSizes'. By default, each vectorization
1794 /// factor is applied inner-to-outer to the loops of each loop nest.
1795 /// 'fastestVaryingPattern' can be optionally used to provide a different loop
1796 /// vectorization order.
1797 /// If `reductionLoops` is not empty, the given reduction loops may be
1798 /// vectorized along the reduction dimension.
1799 /// TODO: Vectorizing reductions is supported only for 1-D vectorization.
1800 void vectorizeAffineLoops(Operation *parentOp, DenseSet<Operation *> &loops,
1801                           ArrayRef<int64_t> vectorSizes,
1802                           ArrayRef<int64_t> fastestVaryingPattern,
1803                           const ReductionLoopMap &reductionLoops) {
1804   // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
1805   NestedPatternContext mlContext;
1806   vectorizeLoops(parentOp, loops, vectorSizes, fastestVaryingPattern,
1807                  reductionLoops);
1808 }
1809 
1810 /// External utility to vectorize affine loops from a single loop nest using an
1811 /// n-D vectorization strategy (see doc in VectorizationStrategy definition).
1812 /// Loops are provided in a 2D vector container. The first dimension represents
1813 /// the nesting level relative to the loops to be vectorized. The second
1814 /// dimension contains the loops. This means that:
1815 ///   a) every loop in 'loops[i]' must have a parent loop in 'loops[i-1]',
1816 ///   b) a loop in 'loops[i]' may or may not have a child loop in 'loops[i+1]'.
1817 ///
1818 /// For example, for the following loop nest:
1819 ///
1820 ///   func @vec2d(%in0: memref<64x128x512xf32>, %in1: memref<64x128x128xf32>,
1821 ///               %out0: memref<64x128x512xf32>,
1822 ///               %out1: memref<64x128x128xf32>) {
1823 ///     affine.for %i0 = 0 to 64 {
1824 ///       affine.for %i1 = 0 to 128 {
1825 ///         affine.for %i2 = 0 to 512 {
1826 ///           %ld = affine.load %in0[%i0, %i1, %i2] : memref<64x128x512xf32>
1827 ///           affine.store %ld, %out0[%i0, %i1, %i2] : memref<64x128x512xf32>
1828 ///         }
1829 ///         affine.for %i3 = 0 to 128 {
1830 ///           %ld = affine.load %in1[%i0, %i1, %i3] : memref<64x128x128xf32>
1831 ///           affine.store %ld, %out1[%i0, %i1, %i3] : memref<64x128x128xf32>
1832 ///         }
1833 ///       }
1834 ///     }
1835 ///     return
1836 ///   }
1837 ///
1838 /// loops = {{%i0}, {%i2, %i3}}, to vectorize the outermost and the two
1839 /// innermost loops;
1840 /// loops = {{%i1}, {%i2, %i3}}, to vectorize the middle and the two innermost
1841 /// loops;
1842 /// loops = {{%i2}}, to vectorize only the first innermost loop;
1843 /// loops = {{%i3}}, to vectorize only the second innermost loop;
1844 /// loops = {{%i1}}, to vectorize only the middle loop.
1845 LogicalResult
1846 vectorizeAffineLoopNest(std::vector<SmallVector<AffineForOp, 2>> &loops,
1847                         const VectorizationStrategy &strategy) {
1848   // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
1849   NestedPatternContext mlContext;
1850   if (failed(verifyLoopNesting(loops)))
1851     return failure();
1852   return vectorizeLoopNest(loops, strategy);
1853 }
1854 
1855 std::unique_ptr<OperationPass<FuncOp>>
1856 createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize) {
1857   return std::make_unique<Vectorize>(virtualVectorSize);
1858 }
1859 std::unique_ptr<OperationPass<FuncOp>> createSuperVectorizePass() {
1860   return std::make_unique<Vectorize>();
1861 }
1862 
1863 } // namespace mlir
1864