1 //===- SuperVectorize.cpp - Vectorize Pass Impl ---------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements vectorization of loops, operations and data types to
10 // a target-independent, n-D super-vector abstraction.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "PassDetail.h"
15 #include "mlir/Analysis/AffineAnalysis.h"
16 #include "mlir/Analysis/LoopAnalysis.h"
17 #include "mlir/Analysis/NestedMatcher.h"
18 #include "mlir/Dialect/Affine/IR/AffineOps.h"
19 #include "mlir/Dialect/Affine/Utils.h"
20 #include "mlir/Dialect/Vector/VectorOps.h"
21 #include "mlir/Dialect/Vector/VectorUtils.h"
22 #include "mlir/IR/BlockAndValueMapping.h"
23 #include "mlir/Support/LLVM.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/Support/Debug.h"
26 
27 using namespace mlir;
28 using namespace vector;
29 
30 ///
31 /// Implements a high-level vectorization strategy on a Function.
32 /// The abstraction used is that of super-vectors, which provide a single,
33 /// compact, representation in the vector types, information that is expected
34 /// to reduce the impact of the phase ordering problem
35 ///
36 /// Vector granularity:
37 /// ===================
38 /// This pass is designed to perform vectorization at a super-vector
39 /// granularity. A super-vector is loosely defined as a vector type that is a
40 /// multiple of a "good" vector size so the HW can efficiently implement a set
41 /// of high-level primitives. Multiple is understood along any dimension; e.g.
42 /// both vector<16xf32> and vector<2x8xf32> are valid super-vectors for a
43 /// vector<8xf32> HW vector. Note that a "good vector size so the HW can
44 /// efficiently implement a set of high-level primitives" is not necessarily an
45 /// integer multiple of actual hardware registers. We leave details of this
46 /// distinction unspecified for now.
47 ///
48 /// Some may prefer the terminology a "tile of HW vectors". In this case, one
49 /// should note that super-vectors implement an "always full tile" abstraction.
50 /// They guarantee no partial-tile separation is necessary by relying on a
51 /// high-level copy-reshape abstraction that we call vector.transfer. This
52 /// copy-reshape operations is also responsible for performing layout
53 /// transposition if necessary. In the general case this will require a scoped
54 /// allocation in some notional local memory.
55 ///
56 /// Whatever the mental model one prefers to use for this abstraction, the key
57 /// point is that we burn into a single, compact, representation in the vector
58 /// types, information that is expected to reduce the impact of the phase
59 /// ordering problem. Indeed, a vector type conveys information that:
60 ///   1. the associated loops have dependency semantics that do not prevent
61 ///      vectorization;
62 ///   2. the associate loops have been sliced in chunks of static sizes that are
63 ///      compatible with vector sizes (i.e. similar to unroll-and-jam);
64 ///   3. the inner loops, in the unroll-and-jam analogy of 2, are captured by
65 ///   the
66 ///      vector type and no vectorization hampering transformations can be
67 ///      applied to them anymore;
68 ///   4. the underlying memrefs are accessed in some notional contiguous way
69 ///      that allows loading into vectors with some amount of spatial locality;
70 /// In other words, super-vectorization provides a level of separation of
71 /// concern by way of opacity to subsequent passes. This has the effect of
72 /// encapsulating and propagating vectorization constraints down the list of
73 /// passes until we are ready to lower further.
74 ///
75 /// For a particular target, a notion of minimal n-d vector size will be
76 /// specified and vectorization targets a multiple of those. In the following
77 /// paragraph, let "k ." represent "a multiple of", to be understood as a
78 /// multiple in the same dimension (e.g. vector<16 x k . 128> summarizes
79 /// vector<16 x 128>, vector<16 x 256>, vector<16 x 1024>, etc).
80 ///
81 /// Some non-exhaustive notable super-vector sizes of interest include:
82 ///   - CPU: vector<k . HW_vector_size>,
83 ///          vector<k' . core_count x k . HW_vector_size>,
84 ///          vector<socket_count x k' . core_count x k . HW_vector_size>;
85 ///   - GPU: vector<k . warp_size>,
86 ///          vector<k . warp_size x float2>,
87 ///          vector<k . warp_size x float4>,
88 ///          vector<k . warp_size x 4 x 4x 4> (for tensor_core sizes).
89 ///
90 /// Loops and operations are emitted that operate on those super-vector shapes.
91 /// Subsequent lowering passes will materialize to actual HW vector sizes. These
92 /// passes are expected to be (gradually) more target-specific.
93 ///
94 /// At a high level, a vectorized load in a loop will resemble:
95 /// ```mlir
96 ///   affine.for %i = ? to ? step ? {
97 ///     %v_a = vector.transfer_read A[%i] : memref<?xf32>, vector<128xf32>
98 ///   }
99 /// ```
100 /// It is the responsibility of the implementation of vector.transfer_read to
101 /// materialize vector registers from the original scalar memrefs. A later (more
102 /// target-dependent) lowering pass will materialize to actual HW vector sizes.
103 /// This lowering may be occur at different times:
104 ///   1. at the MLIR level into a combination of loops, unrolling, DmaStartOp +
105 ///      DmaWaitOp + vectorized operations for data transformations and shuffle;
106 ///      thus opening opportunities for unrolling and pipelining. This is an
107 ///      instance of library call "whiteboxing"; or
108 ///   2. later in the a target-specific lowering pass or hand-written library
109 ///      call; achieving full separation of concerns. This is an instance of
110 ///      library call; or
111 ///   3. a mix of both, e.g. based on a model.
112 /// In the future, these operations will expose a contract to constrain the
113 /// search on vectorization patterns and sizes.
114 ///
115 /// Occurrence of super-vectorization in the compiler flow:
116 /// =======================================================
117 /// This is an active area of investigation. We start with 2 remarks to position
118 /// super-vectorization in the context of existing ongoing work: LLVM VPLAN
119 /// and LLVM SLP Vectorizer.
120 ///
121 /// LLVM VPLAN:
122 /// -----------
123 /// The astute reader may have noticed that in the limit, super-vectorization
124 /// can be applied at a similar time and with similar objectives than VPLAN.
125 /// For instance, in the case of a traditional, polyhedral compilation-flow (for
126 /// instance, the PPCG project uses ISL to provide dependence analysis,
127 /// multi-level(scheduling + tiling), lifting footprint to fast memory,
128 /// communication synthesis, mapping, register optimizations) and before
129 /// unrolling. When vectorization is applied at this *late* level in a typical
130 /// polyhedral flow, and is instantiated with actual hardware vector sizes,
131 /// super-vectorization is expected to match (or subsume) the type of patterns
132 /// that LLVM's VPLAN aims at targeting. The main difference here is that MLIR
133 /// is higher level and our implementation should be significantly simpler. Also
134 /// note that in this mode, recursive patterns are probably a bit of an overkill
135 /// although it is reasonable to expect that mixing a bit of outer loop and
136 /// inner loop vectorization + unrolling will provide interesting choices to
137 /// MLIR.
138 ///
139 /// LLVM SLP Vectorizer:
140 /// --------------------
141 /// Super-vectorization however is not meant to be usable in a similar fashion
142 /// to the SLP vectorizer. The main difference lies in the information that
143 /// both vectorizers use: super-vectorization examines contiguity of memory
144 /// references along fastest varying dimensions and loops with recursive nested
145 /// patterns capturing imperfectly-nested loop nests; the SLP vectorizer, on
146 /// the other hand, performs flat pattern matching inside a single unrolled loop
147 /// body and stitches together pieces of load and store operations into full
148 /// 1-D vectors. We envision that the SLP vectorizer is a good way to capture
149 /// innermost loop, control-flow dependent patterns that super-vectorization may
150 /// not be able to capture easily. In other words, super-vectorization does not
151 /// aim at replacing the SLP vectorizer and the two solutions are complementary.
152 ///
153 /// Ongoing investigations:
154 /// -----------------------
155 /// We discuss the following *early* places where super-vectorization is
156 /// applicable and touch on the expected benefits and risks . We list the
157 /// opportunities in the context of the traditional polyhedral compiler flow
158 /// described in PPCG. There are essentially 6 places in the MLIR pass pipeline
159 /// we expect to experiment with super-vectorization:
160 /// 1. Right after language lowering to MLIR: this is the earliest time where
161 ///    super-vectorization is expected to be applied. At this level, all the
162 ///    language/user/library-level annotations are available and can be fully
163 ///    exploited. Examples include loop-type annotations (such as parallel,
164 ///    reduction, scan, dependence distance vector, vectorizable) as well as
165 ///    memory access annotations (such as non-aliasing writes guaranteed,
166 ///    indirect accesses that are permutations by construction) accesses or
167 ///    that a particular operation is prescribed atomic by the user. At this
168 ///    level, anything that enriches what dependence analysis can do should be
169 ///    aggressively exploited. At this level we are close to having explicit
170 ///    vector types in the language, except we do not impose that burden on the
171 ///    programmer/library: we derive information from scalar code + annotations.
172 /// 2. After dependence analysis and before polyhedral scheduling: the
173 ///    information that supports vectorization does not need to be supplied by a
174 ///    higher level of abstraction. Traditional dependence analysis is available
175 ///    in MLIR and will be used to drive vectorization and cost models.
176 ///
177 /// Let's pause here and remark that applying super-vectorization as described
178 /// in 1. and 2. presents clear opportunities and risks:
179 ///   - the opportunity is that vectorization is burned in the type system and
180 ///   is protected from the adverse effect of loop scheduling, tiling, loop
181 ///   interchange and all passes downstream. Provided that subsequent passes are
182 ///   able to operate on vector types; the vector shapes, associated loop
183 ///   iterator properties, alignment, and contiguity of fastest varying
184 ///   dimensions are preserved until we lower the super-vector types. We expect
185 ///   this to significantly rein in on the adverse effects of phase ordering.
186 ///   - the risks are that a. all passes after super-vectorization have to work
187 ///   on elemental vector types (not that this is always true, wherever
188 ///   vectorization is applied) and b. that imposing vectorization constraints
189 ///   too early may be overall detrimental to loop fusion, tiling and other
190 ///   transformations because the dependence distances are coarsened when
191 ///   operating on elemental vector types. For this reason, the pattern
192 ///   profitability analysis should include a component that also captures the
193 ///   maximal amount of fusion available under a particular pattern. This is
194 ///   still at the stage of rough ideas but in this context, search is our
195 ///   friend as the Tensor Comprehensions and auto-TVM contributions
196 ///   demonstrated previously.
197 /// Bottom-line is we do not yet have good answers for the above but aim at
198 /// making it easy to answer such questions.
199 ///
200 /// Back to our listing, the last places where early super-vectorization makes
201 /// sense are:
202 /// 3. right after polyhedral-style scheduling: PLUTO-style algorithms are known
203 ///    to improve locality, parallelism and be configurable (e.g. max-fuse,
204 ///    smart-fuse etc). They can also have adverse effects on contiguity
205 ///    properties that are required for vectorization but the vector.transfer
206 ///    copy-reshape-pad-transpose abstraction is expected to help recapture
207 ///    these properties.
208 /// 4. right after polyhedral-style scheduling+tiling;
209 /// 5. right after scheduling+tiling+rescheduling: points 4 and 5 represent
210 ///    probably the most promising places because applying tiling achieves a
211 ///    separation of concerns that allows rescheduling to worry less about
212 ///    locality and more about parallelism and distribution (e.g. min-fuse).
213 ///
214 /// At these levels the risk-reward looks different: on one hand we probably
215 /// lost a good deal of language/user/library-level annotation; on the other
216 /// hand we gained parallelism and locality through scheduling and tiling.
217 /// However we probably want to ensure tiling is compatible with the
218 /// full-tile-only abstraction used in super-vectorization or suffer the
219 /// consequences. It is too early to place bets on what will win but we expect
220 /// super-vectorization to be the right abstraction to allow exploring at all
221 /// these levels. And again, search is our friend.
222 ///
223 /// Lastly, we mention it again here:
224 /// 6. as a MLIR-based alternative to VPLAN.
225 ///
226 /// Lowering, unrolling, pipelining:
227 /// ================================
228 /// TODO: point to the proper places.
229 ///
230 /// Algorithm:
231 /// ==========
232 /// The algorithm proceeds in a few steps:
233 ///  1. defining super-vectorization patterns and matching them on the tree of
234 ///     AffineForOp. A super-vectorization pattern is defined as a recursive
235 ///     data structures that matches and captures nested, imperfectly-nested
236 ///     loops that have a. conformable loop annotations attached (e.g. parallel,
237 ///     reduction, vectorizable, ...) as well as b. all contiguous load/store
238 ///     operations along a specified minor dimension (not necessarily the
239 ///     fastest varying) ;
240 ///  2. analyzing those patterns for profitability (TODO: and
241 ///     interference);
242 ///  3. then, for each pattern in order:
243 ///    a. applying iterative rewriting of the loops and all their nested
244 ///       operations in topological order. Rewriting is implemented by
245 ///       coarsening the loops and converting operations and operands to their
246 ///       vector forms. Processing operations in topological order is relatively
247 ///       simple due to the structured nature of the control-flow
248 ///       representation. This order ensures that all the operands of a given
249 ///       operation have been vectorized before the operation itself in a single
250 ///       traversal, except for operands defined outside of the loop nest. The
251 ///       algorithm can convert the following operations to their vector form:
252 ///         * Affine load and store operations are converted to opaque vector
253 ///           transfer read and write operations.
254 ///         * Scalar constant operations/operands are converted to vector
255 ///           constant operations (splat).
256 ///         * Uniform operands (only operands defined outside of the loop nest,
257 ///           for now) are broadcasted to a vector.
258 ///           TODO: Support more uniform cases.
259 ///         * Affine for operations with 'iter_args' are vectorized by
260 ///           vectorizing their 'iter_args' operands and results.
261 ///           TODO: Support more complex loops with divergent lbs and/or ubs.
262 ///         * The remaining operations in the loop nest are vectorized by
263 ///           widening their scalar types to vector types.
264 ///    b. if everything under the root AffineForOp in the current pattern
265 ///       is vectorized properly, we commit that loop to the IR and remove the
266 ///       scalar loop. Otherwise, we discard the vectorized loop and keep the
267 ///       original scalar loop.
268 ///    c. vectorization is applied on the next pattern in the list. Because
269 ///       pattern interference avoidance is not yet implemented and that we do
270 ///       not support further vectorizing an already vector load we need to
271 ///       re-verify that the pattern is still vectorizable. This is expected to
272 ///       make cost models more difficult to write and is subject to improvement
273 ///       in the future.
274 ///
275 /// Choice of loop transformation to support the algorithm:
276 /// =======================================================
277 /// The choice of loop transformation to apply for coarsening vectorized loops
278 /// is still subject to exploratory tradeoffs. In particular, say we want to
279 /// vectorize by a factor 128, we want to transform the following input:
280 /// ```mlir
281 ///   affine.for %i = %M to %N {
282 ///     %a = affine.load %A[%i] : memref<?xf32>
283 ///   }
284 /// ```
285 ///
286 /// Traditionally, one would vectorize late (after scheduling, tiling,
287 /// memory promotion etc) say after stripmining (and potentially unrolling in
288 /// the case of LLVM's SLP vectorizer):
289 /// ```mlir
290 ///   affine.for %i = floor(%M, 128) to ceil(%N, 128) {
291 ///     affine.for %ii = max(%M, 128 * %i) to min(%N, 128*%i + 127) {
292 ///       %a = affine.load %A[%ii] : memref<?xf32>
293 ///     }
294 ///   }
295 /// ```
296 ///
297 /// Instead, we seek to vectorize early and freeze vector types before
298 /// scheduling, so we want to generate a pattern that resembles:
299 /// ```mlir
300 ///   affine.for %i = ? to ? step ? {
301 ///     %v_a = vector.transfer_read %A[%i] : memref<?xf32>, vector<128xf32>
302 ///   }
303 /// ```
304 ///
305 /// i. simply dividing the lower / upper bounds by 128 creates issues
306 ///    when representing expressions such as ii + 1 because now we only
307 ///    have access to original values that have been divided. Additional
308 ///    information is needed to specify accesses at below-128 granularity;
309 /// ii. another alternative is to coarsen the loop step but this may have
310 ///    consequences on dependence analysis and fusability of loops: fusable
311 ///    loops probably need to have the same step (because we don't want to
312 ///    stripmine/unroll to enable fusion).
313 /// As a consequence, we choose to represent the coarsening using the loop
314 /// step for now and reevaluate in the future. Note that we can renormalize
315 /// loop steps later if/when we have evidence that they are problematic.
316 ///
317 /// For the simple strawman example above, vectorizing for a 1-D vector
318 /// abstraction of size 128 returns code similar to:
319 /// ```mlir
320 ///   affine.for %i = %M to %N step 128 {
321 ///     %v_a = vector.transfer_read %A[%i] : memref<?xf32>, vector<128xf32>
322 ///   }
323 /// ```
324 ///
325 /// Unsupported cases, extensions, and work in progress (help welcome :-) ):
326 /// ========================================================================
327 ///   1. lowering to concrete vector types for various HW;
328 ///   2. reduction support for n-D vectorization and non-unit steps;
329 ///   3. non-effecting padding during vector.transfer_read and filter during
330 ///      vector.transfer_write;
331 ///   4. misalignment support vector.transfer_read / vector.transfer_write
332 ///      (hopefully without read-modify-writes);
333 ///   5. control-flow support;
334 ///   6. cost-models, heuristics and search;
335 ///   7. Op implementation, extensions and implication on memref views;
336 ///   8. many TODOs left around.
337 ///
338 /// Examples:
339 /// =========
340 /// Consider the following Function:
341 /// ```mlir
342 /// func @vector_add_2d(%M : index, %N : index) -> f32 {
343 ///   %A = alloc (%M, %N) : memref<?x?xf32, 0>
344 ///   %B = alloc (%M, %N) : memref<?x?xf32, 0>
345 ///   %C = alloc (%M, %N) : memref<?x?xf32, 0>
346 ///   %f1 = constant 1.0 : f32
347 ///   %f2 = constant 2.0 : f32
348 ///   affine.for %i0 = 0 to %M {
349 ///     affine.for %i1 = 0 to %N {
350 ///       // non-scoped %f1
351 ///       affine.store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
352 ///     }
353 ///   }
354 ///   affine.for %i2 = 0 to %M {
355 ///     affine.for %i3 = 0 to %N {
356 ///       // non-scoped %f2
357 ///       affine.store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
358 ///     }
359 ///   }
360 ///   affine.for %i4 = 0 to %M {
361 ///     affine.for %i5 = 0 to %N {
362 ///       %a5 = affine.load %A[%i4, %i5] : memref<?x?xf32, 0>
363 ///       %b5 = affine.load %B[%i4, %i5] : memref<?x?xf32, 0>
364 ///       %s5 = addf %a5, %b5 : f32
365 ///       // non-scoped %f1
366 ///       %s6 = addf %s5, %f1 : f32
367 ///       // non-scoped %f2
368 ///       %s7 = addf %s5, %f2 : f32
369 ///       // diamond dependency.
370 ///       %s8 = addf %s7, %s6 : f32
371 ///       affine.store %s8, %C[%i4, %i5] : memref<?x?xf32, 0>
372 ///     }
373 ///   }
374 ///   %c7 = constant 7 : index
375 ///   %c42 = constant 42 : index
376 ///   %res = load %C[%c7, %c42] : memref<?x?xf32, 0>
377 ///   return %res : f32
378 /// }
379 /// ```
380 ///
381 /// The -affine-vectorize pass with the following arguments:
382 /// ```
383 /// -affine-vectorize="virtual-vector-size=256 test-fastest-varying=0"
384 /// ```
385 ///
386 /// produces this standard innermost-loop vectorized code:
387 /// ```mlir
388 /// func @vector_add_2d(%arg0 : index, %arg1 : index) -> f32 {
389 ///   %0 = alloc(%arg0, %arg1) : memref<?x?xf32>
390 ///   %1 = alloc(%arg0, %arg1) : memref<?x?xf32>
391 ///   %2 = alloc(%arg0, %arg1) : memref<?x?xf32>
392 ///   %cst = constant 1.0 : f32
393 ///   %cst_0 = constant 2.0 : f32
394 ///   affine.for %i0 = 0 to %arg0 {
395 ///     affine.for %i1 = 0 to %arg1 step 256 {
396 ///       %cst_1 = constant dense<vector<256xf32>, 1.0> :
397 ///                vector<256xf32>
398 ///       vector.transfer_write %cst_1, %0[%i0, %i1] :
399 ///                vector<256xf32>, memref<?x?xf32>
400 ///     }
401 ///   }
402 ///   affine.for %i2 = 0 to %arg0 {
403 ///     affine.for %i3 = 0 to %arg1 step 256 {
404 ///       %cst_2 = constant dense<vector<256xf32>, 2.0> :
405 ///                vector<256xf32>
406 ///       vector.transfer_write %cst_2, %1[%i2, %i3] :
407 ///                vector<256xf32>, memref<?x?xf32>
408 ///     }
409 ///   }
410 ///   affine.for %i4 = 0 to %arg0 {
411 ///     affine.for %i5 = 0 to %arg1 step 256 {
412 ///       %3 = vector.transfer_read %0[%i4, %i5] :
413 ///            memref<?x?xf32>, vector<256xf32>
414 ///       %4 = vector.transfer_read %1[%i4, %i5] :
415 ///            memref<?x?xf32>, vector<256xf32>
416 ///       %5 = addf %3, %4 : vector<256xf32>
417 ///       %cst_3 = constant dense<vector<256xf32>, 1.0> :
418 ///                vector<256xf32>
419 ///       %6 = addf %5, %cst_3 : vector<256xf32>
420 ///       %cst_4 = constant dense<vector<256xf32>, 2.0> :
421 ///                vector<256xf32>
422 ///       %7 = addf %5, %cst_4 : vector<256xf32>
423 ///       %8 = addf %7, %6 : vector<256xf32>
424 ///       vector.transfer_write %8, %2[%i4, %i5] :
425 ///                vector<256xf32>, memref<?x?xf32>
426 ///     }
427 ///   }
428 ///   %c7 = constant 7 : index
429 ///   %c42 = constant 42 : index
430 ///   %9 = load %2[%c7, %c42] : memref<?x?xf32>
431 ///   return %9 : f32
432 /// }
433 /// ```
434 ///
435 /// The -affine-vectorize pass with the following arguments:
436 /// ```
437 /// -affine-vectorize="virtual-vector-size=32,256 test-fastest-varying=1,0"
438 /// ```
439 ///
440 /// produces this more interesting mixed outer-innermost-loop vectorized code:
441 /// ```mlir
442 /// func @vector_add_2d(%arg0 : index, %arg1 : index) -> f32 {
443 ///   %0 = alloc(%arg0, %arg1) : memref<?x?xf32>
444 ///   %1 = alloc(%arg0, %arg1) : memref<?x?xf32>
445 ///   %2 = alloc(%arg0, %arg1) : memref<?x?xf32>
446 ///   %cst = constant 1.0 : f32
447 ///   %cst_0 = constant 2.0 : f32
448 ///   affine.for %i0 = 0 to %arg0 step 32 {
449 ///     affine.for %i1 = 0 to %arg1 step 256 {
450 ///       %cst_1 = constant dense<vector<32x256xf32>, 1.0> :
451 ///                vector<32x256xf32>
452 ///       vector.transfer_write %cst_1, %0[%i0, %i1] :
453 ///                vector<32x256xf32>, memref<?x?xf32>
454 ///     }
455 ///   }
456 ///   affine.for %i2 = 0 to %arg0 step 32 {
457 ///     affine.for %i3 = 0 to %arg1 step 256 {
458 ///       %cst_2 = constant dense<vector<32x256xf32>, 2.0> :
459 ///                vector<32x256xf32>
460 ///       vector.transfer_write %cst_2, %1[%i2, %i3] :
461 ///                vector<32x256xf32>, memref<?x?xf32>
462 ///     }
463 ///   }
464 ///   affine.for %i4 = 0 to %arg0 step 32 {
465 ///     affine.for %i5 = 0 to %arg1 step 256 {
466 ///       %3 = vector.transfer_read %0[%i4, %i5] :
467 ///                memref<?x?xf32> vector<32x256xf32>
468 ///       %4 = vector.transfer_read %1[%i4, %i5] :
469 ///                memref<?x?xf32>, vector<32x256xf32>
470 ///       %5 = addf %3, %4 : vector<32x256xf32>
471 ///       %cst_3 = constant dense<vector<32x256xf32>, 1.0> :
472 ///                vector<32x256xf32>
473 ///       %6 = addf %5, %cst_3 : vector<32x256xf32>
474 ///       %cst_4 = constant dense<vector<32x256xf32>, 2.0> :
475 ///                vector<32x256xf32>
476 ///       %7 = addf %5, %cst_4 : vector<32x256xf32>
477 ///       %8 = addf %7, %6 : vector<32x256xf32>
478 ///       vector.transfer_write %8, %2[%i4, %i5] :
479 ///                vector<32x256xf32>, memref<?x?xf32>
480 ///     }
481 ///   }
482 ///   %c7 = constant 7 : index
483 ///   %c42 = constant 42 : index
484 ///   %9 = load %2[%c7, %c42] : memref<?x?xf32>
485 ///   return %9 : f32
486 /// }
487 /// ```
488 ///
489 /// Of course, much more intricate n-D imperfectly-nested patterns can be
490 /// vectorized too and specified in a fully declarative fashion.
491 ///
492 /// Reduction:
493 /// ==========
494 /// Vectorizing reduction loops along the reduction dimension is supported if:
495 /// - the reduction kind is supported,
496 /// - the vectorization is 1-D, and
497 /// - the step size of the loop equals to one.
498 ///
499 /// Comparing to the non-vector-dimension case, two additional things are done
500 /// during vectorization of such loops:
501 /// - The resulting vector returned from the loop is reduced to a scalar using
502 ///   `vector.reduce`.
503 /// - In some cases a mask is applied to the vector yielded at the end of the
504 ///   loop to prevent garbage values from being written to the accumulator.
505 ///
506 /// Reduction vectorization is switched off by default, it can be enabled by
507 /// passing a map from loops to reductions to utility functions, or by passing
508 /// `vectorize-reductions=true` to the vectorization pass.
509 ///
510 /// Consider the following example:
511 /// ```mlir
512 /// func @vecred(%in: memref<512xf32>) -> f32 {
513 ///   %cst = constant 0.000000e+00 : f32
514 ///   %sum = affine.for %i = 0 to 500 iter_args(%part_sum = %cst) -> (f32) {
515 ///     %ld = affine.load %in[%i] : memref<512xf32>
516 ///     %cos = math.cos %ld : f32
517 ///     %add = addf %part_sum, %cos : f32
518 ///     affine.yield %add : f32
519 ///   }
520 ///   return %sum : f32
521 /// }
522 /// ```
523 ///
524 /// The -affine-vectorize pass with the following arguments:
525 /// ```
526 /// -affine-vectorize="virtual-vector-size=128 test-fastest-varying=0 \
527 ///                    vectorize-reductions=true"
528 /// ```
529 /// produces the following output:
530 /// ```mlir
531 /// #map = affine_map<(d0) -> (-d0 + 500)>
532 /// func @vecred(%arg0: memref<512xf32>) -> f32 {
533 ///   %cst = constant 0.000000e+00 : f32
534 ///   %cst_0 = constant dense<0.000000e+00> : vector<128xf32>
535 ///   %0 = affine.for %arg1 = 0 to 500 step 128 iter_args(%arg2 = %cst_0)
536 ///           -> (vector<128xf32>) {
537 ///     // %2 is the number of iterations left in the original loop.
538 ///     %2 = affine.apply #map(%arg1)
539 ///     %3 = vector.create_mask %2 : vector<128xi1>
540 ///     %cst_1 = constant 0.000000e+00 : f32
541 ///     %4 = vector.transfer_read %arg0[%arg1], %cst_1 :
542 ///                     memref<512xf32>, vector<128xf32>
543 ///     %5 = math.cos %4 : vector<128xf32>
544 ///     %6 = addf %arg2, %5 : vector<128xf32>
545 ///     // We filter out the effect of last 12 elements using the mask.
546 ///     %7 = select %3, %6, %arg2 : vector<128xi1>, vector<128xf32>
547 ///     affine.yield %7 : vector<128xf32>
548 ///   }
549 ///   %1 = vector.reduction "add", %0 : vector<128xf32> into f32
550 ///   return %1 : f32
551 /// }
552 /// ```
553 ///
554 /// Note that because of loop misalignment we needed to apply a mask to prevent
555 /// last 12 elements from affecting the final result. The mask is full of ones
556 /// in every iteration except for the last one, in which it has the form
557 /// `11...100...0` with 116 ones and 12 zeros.
558 
559 #define DEBUG_TYPE "early-vect"
560 
561 using llvm::dbgs;
562 
563 /// Forward declaration.
564 static FilterFunctionType
565 isVectorizableLoopPtrFactory(const DenseSet<Operation *> &parallelLoops,
566                              int fastestVaryingMemRefDimension);
567 
568 /// Creates a vectorization pattern from the command line arguments.
569 /// Up to 3-D patterns are supported.
570 /// If the command line argument requests a pattern of higher order, returns an
571 /// empty pattern list which will conservatively result in no vectorization.
572 static Optional<NestedPattern>
573 makePattern(const DenseSet<Operation *> &parallelLoops, int vectorRank,
574             ArrayRef<int64_t> fastestVaryingPattern) {
575   using matcher::For;
576   int64_t d0 = fastestVaryingPattern.empty() ? -1 : fastestVaryingPattern[0];
577   int64_t d1 = fastestVaryingPattern.size() < 2 ? -1 : fastestVaryingPattern[1];
578   int64_t d2 = fastestVaryingPattern.size() < 3 ? -1 : fastestVaryingPattern[2];
579   switch (vectorRank) {
580   case 1:
581     return For(isVectorizableLoopPtrFactory(parallelLoops, d0));
582   case 2:
583     return For(isVectorizableLoopPtrFactory(parallelLoops, d0),
584                For(isVectorizableLoopPtrFactory(parallelLoops, d1)));
585   case 3:
586     return For(isVectorizableLoopPtrFactory(parallelLoops, d0),
587                For(isVectorizableLoopPtrFactory(parallelLoops, d1),
588                    For(isVectorizableLoopPtrFactory(parallelLoops, d2))));
589   default: {
590     return llvm::None;
591   }
592   }
593 }
594 
595 static NestedPattern &vectorTransferPattern() {
596   static auto pattern = matcher::Op([](Operation &op) {
597     return isa<vector::TransferReadOp, vector::TransferWriteOp>(op);
598   });
599   return pattern;
600 }
601 
602 namespace {
603 
604 /// Base state for the vectorize pass.
605 /// Command line arguments are preempted by non-empty pass arguments.
606 struct Vectorize : public AffineVectorizeBase<Vectorize> {
607   Vectorize() = default;
608   Vectorize(ArrayRef<int64_t> virtualVectorSize);
609   void runOnFunction() override;
610 };
611 
612 } // end anonymous namespace
613 
614 Vectorize::Vectorize(ArrayRef<int64_t> virtualVectorSize) {
615   vectorSizes = virtualVectorSize;
616 }
617 
618 static void vectorizeLoopIfProfitable(Operation *loop, unsigned depthInPattern,
619                                       unsigned patternDepth,
620                                       VectorizationStrategy *strategy) {
621   assert(patternDepth > depthInPattern &&
622          "patternDepth is greater than depthInPattern");
623   if (patternDepth - depthInPattern > strategy->vectorSizes.size()) {
624     // Don't vectorize this loop
625     return;
626   }
627   strategy->loopToVectorDim[loop] =
628       strategy->vectorSizes.size() - (patternDepth - depthInPattern);
629 }
630 
631 /// Implements a simple strawman strategy for vectorization.
632 /// Given a matched pattern `matches` of depth `patternDepth`, this strategy
633 /// greedily assigns the fastest varying dimension ** of the vector ** to the
634 /// innermost loop in the pattern.
635 /// When coupled with a pattern that looks for the fastest varying dimension in
636 /// load/store MemRefs, this creates a generic vectorization strategy that works
637 /// for any loop in a hierarchy (outermost, innermost or intermediate).
638 ///
639 /// TODO: In the future we should additionally increase the power of the
640 /// profitability analysis along 3 directions:
641 ///   1. account for loop extents (both static and parametric + annotations);
642 ///   2. account for data layout permutations;
643 ///   3. account for impact of vectorization on maximal loop fusion.
644 /// Then we can quantify the above to build a cost model and search over
645 /// strategies.
646 static LogicalResult analyzeProfitability(ArrayRef<NestedMatch> matches,
647                                           unsigned depthInPattern,
648                                           unsigned patternDepth,
649                                           VectorizationStrategy *strategy) {
650   for (auto m : matches) {
651     if (failed(analyzeProfitability(m.getMatchedChildren(), depthInPattern + 1,
652                                     patternDepth, strategy))) {
653       return failure();
654     }
655     vectorizeLoopIfProfitable(m.getMatchedOperation(), depthInPattern,
656                               patternDepth, strategy);
657   }
658   return success();
659 }
660 
661 ///// end TODO: Hoist to a VectorizationStrategy.cpp when appropriate /////
662 
663 namespace {
664 
665 struct VectorizationState {
666 
667   VectorizationState(MLIRContext *context) : builder(context) {}
668 
669   /// Registers the vector replacement of a scalar operation and its result
670   /// values. Both operations must have the same number of results.
671   ///
672   /// This utility is used to register the replacement for the vast majority of
673   /// the vectorized operations.
674   ///
675   /// Example:
676   ///   * 'replaced': %0 = addf %1, %2 : f32
677   ///   * 'replacement': %0 = addf %1, %2 : vector<128xf32>
678   void registerOpVectorReplacement(Operation *replaced, Operation *replacement);
679 
680   /// Registers the vector replacement of a scalar value. The replacement
681   /// operation should have a single result, which replaces the scalar value.
682   ///
683   /// This utility is used to register the vector replacement of block arguments
684   /// and operation results which are not directly vectorized (i.e., their
685   /// scalar version still exists after vectorization), like uniforms.
686   ///
687   /// Example:
688   ///   * 'replaced': block argument or operation outside of the vectorized
689   ///     loop.
690   ///   * 'replacement': %0 = vector.broadcast %1 : f32 to vector<128xf32>
691   void registerValueVectorReplacement(Value replaced, Operation *replacement);
692 
693   /// Registers the vector replacement of a block argument (e.g., iter_args).
694   ///
695   /// Example:
696   ///   * 'replaced': 'iter_arg' block argument.
697   ///   * 'replacement': vectorized 'iter_arg' block argument.
698   void registerBlockArgVectorReplacement(BlockArgument replaced,
699                                          BlockArgument replacement);
700 
701   /// Registers the scalar replacement of a scalar value. 'replacement' must be
702   /// scalar. Both values must be block arguments. Operation results should be
703   /// replaced using the 'registerOp*' utilitites.
704   ///
705   /// This utility is used to register the replacement of block arguments
706   /// that are within the loop to be vectorized and will continue being scalar
707   /// within the vector loop.
708   ///
709   /// Example:
710   ///   * 'replaced': induction variable of a loop to be vectorized.
711   ///   * 'replacement': new induction variable in the new vector loop.
712   void registerValueScalarReplacement(BlockArgument replaced,
713                                       BlockArgument replacement);
714 
715   /// Registers the scalar replacement of a scalar result returned from a
716   /// reduction loop. 'replacement' must be scalar.
717   ///
718   /// This utility is used to register the replacement for scalar results of
719   /// vectorized reduction loops with iter_args.
720   ///
721   /// Example 2:
722   ///   * 'replaced': %0 = affine.for %i = 0 to 512 iter_args(%x = ...) -> (f32)
723   ///   * 'replacement': %1 = vector.reduction "add" %0 : vector<4xf32> into f32
724   void registerLoopResultScalarReplacement(Value replaced, Value replacement);
725 
726   /// Returns in 'replacedVals' the scalar replacement for values in
727   /// 'inputVals'.
728   void getScalarValueReplacementsFor(ValueRange inputVals,
729                                      SmallVectorImpl<Value> &replacedVals);
730 
731   /// Erases the scalar loop nest after its successful vectorization.
732   void finishVectorizationPattern(AffineForOp rootLoop);
733 
734   // Used to build and insert all the new operations created. The insertion
735   // point is preserved and updated along the vectorization process.
736   OpBuilder builder;
737 
738   // Maps input scalar operations to their vector counterparts.
739   DenseMap<Operation *, Operation *> opVectorReplacement;
740   // Maps input scalar values to their vector counterparts.
741   BlockAndValueMapping valueVectorReplacement;
742   // Maps input scalar values to their new scalar counterparts in the vector
743   // loop nest.
744   BlockAndValueMapping valueScalarReplacement;
745   // Maps results of reduction loops to their new scalar counterparts.
746   DenseMap<Value, Value> loopResultScalarReplacement;
747 
748   // Maps the newly created vector loops to their vector dimension.
749   DenseMap<Operation *, unsigned> vecLoopToVecDim;
750 
751   // Maps the new vectorized loops to the corresponding vector masks if it is
752   // required.
753   DenseMap<Operation *, Value> vecLoopToMask;
754 
755   // The strategy drives which loop to vectorize by which amount.
756   const VectorizationStrategy *strategy;
757 
758 private:
759   /// Internal implementation to map input scalar values to new vector or scalar
760   /// values.
761   void registerValueVectorReplacementImpl(Value replaced, Value replacement);
762   void registerValueScalarReplacementImpl(Value replaced, Value replacement);
763 };
764 
765 } // end namespace
766 
767 /// Registers the vector replacement of a scalar operation and its result
768 /// values. Both operations must have the same number of results.
769 ///
770 /// This utility is used to register the replacement for the vast majority of
771 /// the vectorized operations.
772 ///
773 /// Example:
774 ///   * 'replaced': %0 = addf %1, %2 : f32
775 ///   * 'replacement': %0 = addf %1, %2 : vector<128xf32>
776 void VectorizationState::registerOpVectorReplacement(Operation *replaced,
777                                                      Operation *replacement) {
778   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ commit vectorized op:\n");
779   LLVM_DEBUG(dbgs() << *replaced << "\n");
780   LLVM_DEBUG(dbgs() << "into\n");
781   LLVM_DEBUG(dbgs() << *replacement << "\n");
782 
783   assert(replaced->getNumResults() == replacement->getNumResults() &&
784          "Unexpected replaced and replacement results");
785   assert(opVectorReplacement.count(replaced) == 0 && "already registered");
786   opVectorReplacement[replaced] = replacement;
787 
788   for (auto resultTuple :
789        llvm::zip(replaced->getResults(), replacement->getResults()))
790     registerValueVectorReplacementImpl(std::get<0>(resultTuple),
791                                        std::get<1>(resultTuple));
792 }
793 
794 /// Registers the vector replacement of a scalar value. The replacement
795 /// operation should have a single result, which replaces the scalar value.
796 ///
797 /// This utility is used to register the vector replacement of block arguments
798 /// and operation results which are not directly vectorized (i.e., their
799 /// scalar version still exists after vectorization), like uniforms.
800 ///
801 /// Example:
802 ///   * 'replaced': block argument or operation outside of the vectorized loop.
803 ///   * 'replacement': %0 = vector.broadcast %1 : f32 to vector<128xf32>
804 void VectorizationState::registerValueVectorReplacement(
805     Value replaced, Operation *replacement) {
806   assert(replacement->getNumResults() == 1 &&
807          "Expected single-result replacement");
808   if (Operation *defOp = replaced.getDefiningOp())
809     registerOpVectorReplacement(defOp, replacement);
810   else
811     registerValueVectorReplacementImpl(replaced, replacement->getResult(0));
812 }
813 
814 /// Registers the vector replacement of a block argument (e.g., iter_args).
815 ///
816 /// Example:
817 ///   * 'replaced': 'iter_arg' block argument.
818 ///   * 'replacement': vectorized 'iter_arg' block argument.
819 void VectorizationState::registerBlockArgVectorReplacement(
820     BlockArgument replaced, BlockArgument replacement) {
821   registerValueVectorReplacementImpl(replaced, replacement);
822 }
823 
824 void VectorizationState::registerValueVectorReplacementImpl(Value replaced,
825                                                             Value replacement) {
826   assert(!valueVectorReplacement.contains(replaced) &&
827          "Vector replacement already registered");
828   assert(replacement.getType().isa<VectorType>() &&
829          "Expected vector type in vector replacement");
830   valueVectorReplacement.map(replaced, replacement);
831 }
832 
833 /// Registers the scalar replacement of a scalar value. 'replacement' must be
834 /// scalar. Both values must be block arguments. Operation results should be
835 /// replaced using the 'registerOp*' utilitites.
836 ///
837 /// This utility is used to register the replacement of block arguments
838 /// that are within the loop to be vectorized and will continue being scalar
839 /// within the vector loop.
840 ///
841 /// Example:
842 ///   * 'replaced': induction variable of a loop to be vectorized.
843 ///   * 'replacement': new induction variable in the new vector loop.
844 void VectorizationState::registerValueScalarReplacement(
845     BlockArgument replaced, BlockArgument replacement) {
846   registerValueScalarReplacementImpl(replaced, replacement);
847 }
848 
849 /// Registers the scalar replacement of a scalar result returned from a
850 /// reduction loop. 'replacement' must be scalar.
851 ///
852 /// This utility is used to register the replacement for scalar results of
853 /// vectorized reduction loops with iter_args.
854 ///
855 /// Example 2:
856 ///   * 'replaced': %0 = affine.for %i = 0 to 512 iter_args(%x = ...) -> (f32)
857 ///   * 'replacement': %1 = vector.reduction "add" %0 : vector<4xf32> into f32
858 void VectorizationState::registerLoopResultScalarReplacement(
859     Value replaced, Value replacement) {
860   assert(isa<AffineForOp>(replaced.getDefiningOp()));
861   assert(loopResultScalarReplacement.count(replaced) == 0 &&
862          "already registered");
863   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ will replace a result of the loop "
864                        "with scalar: "
865                     << replacement);
866   loopResultScalarReplacement[replaced] = replacement;
867 }
868 
869 void VectorizationState::registerValueScalarReplacementImpl(Value replaced,
870                                                             Value replacement) {
871   assert(!valueScalarReplacement.contains(replaced) &&
872          "Scalar value replacement already registered");
873   assert(!replacement.getType().isa<VectorType>() &&
874          "Expected scalar type in scalar replacement");
875   valueScalarReplacement.map(replaced, replacement);
876 }
877 
878 /// Returns in 'replacedVals' the scalar replacement for values in 'inputVals'.
879 void VectorizationState::getScalarValueReplacementsFor(
880     ValueRange inputVals, SmallVectorImpl<Value> &replacedVals) {
881   for (Value inputVal : inputVals)
882     replacedVals.push_back(valueScalarReplacement.lookupOrDefault(inputVal));
883 }
884 
885 /// Erases a loop nest, including all its nested operations.
886 static void eraseLoopNest(AffineForOp forOp) {
887   LLVM_DEBUG(dbgs() << "[early-vect]+++++ erasing:\n" << forOp << "\n");
888   forOp.erase();
889 }
890 
891 /// Erases the scalar loop nest after its successful vectorization.
892 void VectorizationState::finishVectorizationPattern(AffineForOp rootLoop) {
893   LLVM_DEBUG(dbgs() << "\n[early-vect] Finalizing vectorization\n");
894   eraseLoopNest(rootLoop);
895 }
896 
897 // Apply 'map' with 'mapOperands' returning resulting values in 'results'.
898 static void computeMemoryOpIndices(Operation *op, AffineMap map,
899                                    ValueRange mapOperands,
900                                    VectorizationState &state,
901                                    SmallVectorImpl<Value> &results) {
902   for (auto resultExpr : map.getResults()) {
903     auto singleResMap =
904         AffineMap::get(map.getNumDims(), map.getNumSymbols(), resultExpr);
905     auto afOp = state.builder.create<AffineApplyOp>(op->getLoc(), singleResMap,
906                                                     mapOperands);
907     results.push_back(afOp);
908   }
909 }
910 
911 /// Returns a FilterFunctionType that can be used in NestedPattern to match a
912 /// loop whose underlying load/store accesses are either invariant or all
913 // varying along the `fastestVaryingMemRefDimension`.
914 static FilterFunctionType
915 isVectorizableLoopPtrFactory(const DenseSet<Operation *> &parallelLoops,
916                              int fastestVaryingMemRefDimension) {
917   return [&parallelLoops, fastestVaryingMemRefDimension](Operation &forOp) {
918     auto loop = cast<AffineForOp>(forOp);
919     auto parallelIt = parallelLoops.find(loop);
920     if (parallelIt == parallelLoops.end())
921       return false;
922     int memRefDim = -1;
923     auto vectorizableBody =
924         isVectorizableLoopBody(loop, &memRefDim, vectorTransferPattern());
925     if (!vectorizableBody)
926       return false;
927     return memRefDim == -1 || fastestVaryingMemRefDimension == -1 ||
928            memRefDim == fastestVaryingMemRefDimension;
929   };
930 }
931 
932 /// Returns the vector type resulting from applying the provided vectorization
933 /// strategy on the scalar type.
934 static VectorType getVectorType(Type scalarTy,
935                                 const VectorizationStrategy *strategy) {
936   assert(!scalarTy.isa<VectorType>() && "Expected scalar type");
937   return VectorType::get(strategy->vectorSizes, scalarTy);
938 }
939 
940 /// Tries to transform a scalar constant into a vector constant. Returns the
941 /// vector constant if the scalar type is valid vector element type. Returns
942 /// nullptr, otherwise.
943 static ConstantOp vectorizeConstant(ConstantOp constOp,
944                                     VectorizationState &state) {
945   Type scalarTy = constOp.getType();
946   if (!VectorType::isValidElementType(scalarTy))
947     return nullptr;
948 
949   auto vecTy = getVectorType(scalarTy, state.strategy);
950   auto vecAttr = DenseElementsAttr::get(vecTy, constOp.getValue());
951 
952   OpBuilder::InsertionGuard guard(state.builder);
953   Operation *parentOp = state.builder.getInsertionBlock()->getParentOp();
954   // Find the innermost vectorized ancestor loop to insert the vector constant.
955   while (parentOp && !state.vecLoopToVecDim.count(parentOp))
956     parentOp = parentOp->getParentOp();
957   assert(parentOp && state.vecLoopToVecDim.count(parentOp) &&
958          isa<AffineForOp>(parentOp) && "Expected a vectorized for op");
959   auto vecForOp = cast<AffineForOp>(parentOp);
960   state.builder.setInsertionPointToStart(vecForOp.getBody());
961   auto newConstOp = state.builder.create<ConstantOp>(constOp.getLoc(), vecAttr);
962 
963   // Register vector replacement for future uses in the scope.
964   state.registerOpVectorReplacement(constOp, newConstOp);
965   return newConstOp;
966 }
967 
968 /// Creates a constant vector filled with the neutral elements of the given
969 /// reduction. The scalar type of vector elements will be taken from
970 /// `oldOperand`.
971 static ConstantOp createInitialVector(AtomicRMWKind reductionKind,
972                                       Value oldOperand,
973                                       VectorizationState &state) {
974   Type scalarTy = oldOperand.getType();
975   if (!VectorType::isValidElementType(scalarTy))
976     return nullptr;
977 
978   Attribute valueAttr = getIdentityValueAttr(
979       reductionKind, scalarTy, state.builder, oldOperand.getLoc());
980   auto vecTy = getVectorType(scalarTy, state.strategy);
981   auto vecAttr = DenseElementsAttr::get(vecTy, valueAttr);
982   auto newConstOp =
983       state.builder.create<ConstantOp>(oldOperand.getLoc(), vecAttr);
984 
985   return newConstOp;
986 }
987 
988 /// Creates a mask used to filter out garbage elements in the last iteration
989 /// of unaligned loops. If a mask is not required then `nullptr` is returned.
990 /// The mask will be a vector of booleans representing meaningful vector
991 /// elements in the current iteration. It is filled with ones for each iteration
992 /// except for the last one, where it has the form `11...100...0` with the
993 /// number of ones equal to the number of meaningful elements (i.e. the number
994 /// of iterations that would be left in the original loop).
995 static Value createMask(AffineForOp vecForOp, VectorizationState &state) {
996   assert(state.strategy->vectorSizes.size() == 1 &&
997          "Creating a mask non-1-D vectors is not supported.");
998   assert(vecForOp.getStep() == state.strategy->vectorSizes[0] &&
999          "Creating a mask for loops with non-unit original step size is not "
1000          "supported.");
1001 
1002   // Check if we have already created the mask.
1003   if (Value mask = state.vecLoopToMask.lookup(vecForOp))
1004     return mask;
1005 
1006   // If the loop has constant bounds and the original number of iterations is
1007   // divisable by the vector size then we don't need a mask.
1008   if (vecForOp.hasConstantBounds()) {
1009     int64_t originalTripCount =
1010         vecForOp.getConstantUpperBound() - vecForOp.getConstantLowerBound();
1011     if (originalTripCount % vecForOp.getStep() == 0)
1012       return nullptr;
1013   }
1014 
1015   OpBuilder::InsertionGuard guard(state.builder);
1016   state.builder.setInsertionPointToStart(vecForOp.getBody());
1017 
1018   // We generate the mask using the `vector.create_mask` operation which accepts
1019   // the number of meaningful elements (i.e. the length of the prefix of 1s).
1020   // To compute the number of meaningful elements we subtract the current value
1021   // of the iteration variable from the upper bound of the loop. Example:
1022   //
1023   //     // 500 is the upper bound of the loop
1024   //     #map = affine_map<(d0) -> (500 - d0)>
1025   //     %elems_left = affine.apply #map(%iv)
1026   //     %mask = vector.create_mask %elems_left : vector<128xi1>
1027 
1028   Location loc = vecForOp.getLoc();
1029 
1030   // First we get the upper bound of the loop using `affine.apply` or
1031   // `affine.min`.
1032   AffineMap ubMap = vecForOp.getUpperBoundMap();
1033   Value ub;
1034   if (ubMap.getNumResults() == 1)
1035     ub = state.builder.create<AffineApplyOp>(loc, vecForOp.getUpperBoundMap(),
1036                                              vecForOp.getUpperBoundOperands());
1037   else
1038     ub = state.builder.create<AffineMinOp>(loc, vecForOp.getUpperBoundMap(),
1039                                            vecForOp.getUpperBoundOperands());
1040   // Then we compute the number of (original) iterations left in the loop.
1041   AffineExpr subExpr =
1042       state.builder.getAffineDimExpr(0) - state.builder.getAffineDimExpr(1);
1043   Value itersLeft =
1044       makeComposedAffineApply(state.builder, loc, AffineMap::get(2, 0, subExpr),
1045                               {ub, vecForOp.getInductionVar()});
1046   // If the affine maps were successfully composed then `ub` is unneeded.
1047   if (ub.use_empty())
1048     ub.getDefiningOp()->erase();
1049   // Finally we create the mask.
1050   Type maskTy = VectorType::get(state.strategy->vectorSizes,
1051                                 state.builder.getIntegerType(1));
1052   Value mask =
1053       state.builder.create<vector::CreateMaskOp>(loc, maskTy, itersLeft);
1054 
1055   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ creating a mask:\n"
1056                     << itersLeft << "\n"
1057                     << mask << "\n");
1058 
1059   state.vecLoopToMask[vecForOp] = mask;
1060   return mask;
1061 }
1062 
1063 /// Returns true if the provided value is vector uniform given the vectorization
1064 /// strategy.
1065 // TODO: For now, only values that are invariants to all the loops in the
1066 // vectorization strategy are considered vector uniforms.
1067 static bool isUniformDefinition(Value value,
1068                                 const VectorizationStrategy *strategy) {
1069   for (auto loopToDim : strategy->loopToVectorDim) {
1070     auto loop = cast<AffineForOp>(loopToDim.first);
1071     if (!loop.isDefinedOutsideOfLoop(value))
1072       return false;
1073   }
1074   return true;
1075 }
1076 
1077 /// Generates a broadcast op for the provided uniform value using the
1078 /// vectorization strategy in 'state'.
1079 static Operation *vectorizeUniform(Value uniformVal,
1080                                    VectorizationState &state) {
1081   OpBuilder::InsertionGuard guard(state.builder);
1082   state.builder.setInsertionPointAfterValue(uniformVal);
1083 
1084   auto vectorTy = getVectorType(uniformVal.getType(), state.strategy);
1085   auto bcastOp = state.builder.create<BroadcastOp>(uniformVal.getLoc(),
1086                                                    vectorTy, uniformVal);
1087   state.registerValueVectorReplacement(uniformVal, bcastOp);
1088   return bcastOp;
1089 }
1090 
1091 /// Tries to vectorize a given `operand` by applying the following logic:
1092 /// 1. if the defining operation has been already vectorized, `operand` is
1093 ///    already in the proper vector form;
1094 /// 2. if the `operand` is a constant, returns the vectorized form of the
1095 ///    constant;
1096 /// 3. if the `operand` is uniform, returns a vector broadcast of the `op`;
1097 /// 4. otherwise, the vectorization of `operand` is not supported.
1098 /// Newly created vector operations are registered in `state` as replacement
1099 /// for their scalar counterparts.
1100 /// In particular this logic captures some of the use cases where definitions
1101 /// that are not scoped under the current pattern are needed to vectorize.
1102 /// One such example is top level function constants that need to be splatted.
1103 ///
1104 /// Returns an operand that has been vectorized to match `state`'s strategy if
1105 /// vectorization is possible with the above logic. Returns nullptr otherwise.
1106 ///
1107 /// TODO: handle more complex cases.
1108 static Value vectorizeOperand(Value operand, VectorizationState &state) {
1109   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorize operand: " << operand);
1110   // If this value is already vectorized, we are done.
1111   if (Value vecRepl = state.valueVectorReplacement.lookupOrNull(operand)) {
1112     LLVM_DEBUG(dbgs() << " -> already vectorized: " << vecRepl);
1113     return vecRepl;
1114   }
1115 
1116   // An vector operand that is not in the replacement map should never reach
1117   // this point. Reaching this point could mean that the code was already
1118   // vectorized and we shouldn't try to vectorize already vectorized code.
1119   assert(!operand.getType().isa<VectorType>() &&
1120          "Vector op not found in replacement map");
1121 
1122   // Vectorize constant.
1123   if (auto constOp = operand.getDefiningOp<ConstantOp>()) {
1124     ConstantOp vecConstant = vectorizeConstant(constOp, state);
1125     LLVM_DEBUG(dbgs() << "-> constant: " << vecConstant);
1126     return vecConstant.getResult();
1127   }
1128 
1129   // Vectorize uniform values.
1130   if (isUniformDefinition(operand, state.strategy)) {
1131     Operation *vecUniform = vectorizeUniform(operand, state);
1132     LLVM_DEBUG(dbgs() << "-> uniform: " << *vecUniform);
1133     return vecUniform->getResult(0);
1134   }
1135 
1136   // Check for unsupported block argument scenarios. A supported block argument
1137   // should have been vectorized already.
1138   if (!operand.getDefiningOp())
1139     LLVM_DEBUG(dbgs() << "-> unsupported block argument\n");
1140   else
1141     // Generic unsupported case.
1142     LLVM_DEBUG(dbgs() << "-> non-vectorizable\n");
1143 
1144   return nullptr;
1145 }
1146 
1147 /// Vectorizes an affine load with the vectorization strategy in 'state' by
1148 /// generating a 'vector.transfer_read' op with the proper permutation map
1149 /// inferred from the indices of the load. The new 'vector.transfer_read' is
1150 /// registered as replacement of the scalar load. Returns the newly created
1151 /// 'vector.transfer_read' if vectorization was successful. Returns nullptr,
1152 /// otherwise.
1153 static Operation *vectorizeAffineLoad(AffineLoadOp loadOp,
1154                                       VectorizationState &state) {
1155   MemRefType memRefType = loadOp.getMemRefType();
1156   Type elementType = memRefType.getElementType();
1157   auto vectorType = VectorType::get(state.strategy->vectorSizes, elementType);
1158 
1159   // Replace map operands with operands from the vector loop nest.
1160   SmallVector<Value, 8> mapOperands;
1161   state.getScalarValueReplacementsFor(loadOp.getMapOperands(), mapOperands);
1162 
1163   // Compute indices for the transfer op. AffineApplyOp's may be generated.
1164   SmallVector<Value, 8> indices;
1165   indices.reserve(memRefType.getRank());
1166   if (loadOp.getAffineMap() !=
1167       state.builder.getMultiDimIdentityMap(memRefType.getRank()))
1168     computeMemoryOpIndices(loadOp, loadOp.getAffineMap(), mapOperands, state,
1169                            indices);
1170   else
1171     indices.append(mapOperands.begin(), mapOperands.end());
1172 
1173   // Compute permutation map using the information of new vector loops.
1174   auto permutationMap = makePermutationMap(state.builder.getInsertionBlock(),
1175                                            indices, state.vecLoopToVecDim);
1176   if (!permutationMap) {
1177     LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ can't compute permutationMap\n");
1178     return nullptr;
1179   }
1180   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
1181   LLVM_DEBUG(permutationMap.print(dbgs()));
1182 
1183   auto transfer = state.builder.create<vector::TransferReadOp>(
1184       loadOp.getLoc(), vectorType, loadOp.getMemRef(), indices, permutationMap);
1185 
1186   // Register replacement for future uses in the scope.
1187   state.registerOpVectorReplacement(loadOp, transfer);
1188   return transfer;
1189 }
1190 
1191 /// Vectorizes an affine store with the vectorization strategy in 'state' by
1192 /// generating a 'vector.transfer_write' op with the proper permutation map
1193 /// inferred from the indices of the store. The new 'vector.transfer_store' is
1194 /// registered as replacement of the scalar load. Returns the newly created
1195 /// 'vector.transfer_write' if vectorization was successful. Returns nullptr,
1196 /// otherwise.
1197 static Operation *vectorizeAffineStore(AffineStoreOp storeOp,
1198                                        VectorizationState &state) {
1199   MemRefType memRefType = storeOp.getMemRefType();
1200   Value vectorValue = vectorizeOperand(storeOp.getValueToStore(), state);
1201   if (!vectorValue)
1202     return nullptr;
1203 
1204   // Replace map operands with operands from the vector loop nest.
1205   SmallVector<Value, 8> mapOperands;
1206   state.getScalarValueReplacementsFor(storeOp.getMapOperands(), mapOperands);
1207 
1208   // Compute indices for the transfer op. AffineApplyOp's may be generated.
1209   SmallVector<Value, 8> indices;
1210   indices.reserve(memRefType.getRank());
1211   if (storeOp.getAffineMap() !=
1212       state.builder.getMultiDimIdentityMap(memRefType.getRank()))
1213     computeMemoryOpIndices(storeOp, storeOp.getAffineMap(), mapOperands, state,
1214                            indices);
1215   else
1216     indices.append(mapOperands.begin(), mapOperands.end());
1217 
1218   // Compute permutation map using the information of new vector loops.
1219   auto permutationMap = makePermutationMap(state.builder.getInsertionBlock(),
1220                                            indices, state.vecLoopToVecDim);
1221   if (!permutationMap)
1222     return nullptr;
1223   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
1224   LLVM_DEBUG(permutationMap.print(dbgs()));
1225 
1226   auto transfer = state.builder.create<vector::TransferWriteOp>(
1227       storeOp.getLoc(), vectorValue, storeOp.getMemRef(), indices,
1228       permutationMap);
1229   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorized store: " << transfer);
1230 
1231   // Register replacement for future uses in the scope.
1232   state.registerOpVectorReplacement(storeOp, transfer);
1233   return transfer;
1234 }
1235 
1236 /// Returns true if `value` is a constant equal to the neutral element of the
1237 /// given vectorizable reduction.
1238 static bool isNeutralElementConst(AtomicRMWKind reductionKind, Value value,
1239                                   VectorizationState &state) {
1240   Type scalarTy = value.getType();
1241   if (!VectorType::isValidElementType(scalarTy))
1242     return false;
1243   Attribute valueAttr = getIdentityValueAttr(reductionKind, scalarTy,
1244                                              state.builder, value.getLoc());
1245   if (auto constOp = dyn_cast_or_null<ConstantOp>(value.getDefiningOp()))
1246     return constOp.value() == valueAttr;
1247   return false;
1248 }
1249 
1250 /// Vectorizes a loop with the vectorization strategy in 'state'. A new loop is
1251 /// created and registered as replacement for the scalar loop. The builder's
1252 /// insertion point is set to the new loop's body so that subsequent vectorized
1253 /// operations are inserted into the new loop. If the loop is a vector
1254 /// dimension, the step of the newly created loop will reflect the vectorization
1255 /// factor used to vectorized that dimension.
1256 static Operation *vectorizeAffineForOp(AffineForOp forOp,
1257                                        VectorizationState &state) {
1258   const VectorizationStrategy &strategy = *state.strategy;
1259   auto loopToVecDimIt = strategy.loopToVectorDim.find(forOp);
1260   bool isLoopVecDim = loopToVecDimIt != strategy.loopToVectorDim.end();
1261 
1262   // TODO: Vectorization of reduction loops is not supported for non-unit steps.
1263   if (isLoopVecDim && forOp.getNumIterOperands() > 0 && forOp.getStep() != 1) {
1264     LLVM_DEBUG(
1265         dbgs()
1266         << "\n[early-vect]+++++ unsupported step size for reduction loop: "
1267         << forOp.getStep() << "\n");
1268     return nullptr;
1269   }
1270 
1271   // If we are vectorizing a vector dimension, compute a new step for the new
1272   // vectorized loop using the vectorization factor for the vector dimension.
1273   // Otherwise, propagate the step of the scalar loop.
1274   unsigned newStep;
1275   if (isLoopVecDim) {
1276     unsigned vectorDim = loopToVecDimIt->second;
1277     assert(vectorDim < strategy.vectorSizes.size() && "vector dim overflow");
1278     int64_t forOpVecFactor = strategy.vectorSizes[vectorDim];
1279     newStep = forOp.getStep() * forOpVecFactor;
1280   } else {
1281     newStep = forOp.getStep();
1282   }
1283 
1284   // Get information about reduction kinds.
1285   ArrayRef<LoopReduction> reductions;
1286   if (isLoopVecDim && forOp.getNumIterOperands() > 0) {
1287     auto it = strategy.reductionLoops.find(forOp);
1288     assert(it != strategy.reductionLoops.end() &&
1289            "Reduction descriptors not found when vectorizing a reduction loop");
1290     reductions = it->second;
1291     assert(reductions.size() == forOp.getNumIterOperands() &&
1292            "The size of reductions array must match the number of iter_args");
1293   }
1294 
1295   // Vectorize 'iter_args'.
1296   SmallVector<Value, 8> vecIterOperands;
1297   if (!isLoopVecDim) {
1298     for (auto operand : forOp.getIterOperands())
1299       vecIterOperands.push_back(vectorizeOperand(operand, state));
1300   } else {
1301     // For reduction loops we need to pass a vector of neutral elements as an
1302     // initial value of the accumulator. We will add the original initial value
1303     // later.
1304     for (auto redAndOperand : llvm::zip(reductions, forOp.getIterOperands())) {
1305       vecIterOperands.push_back(createInitialVector(
1306           std::get<0>(redAndOperand).kind, std::get<1>(redAndOperand), state));
1307     }
1308   }
1309 
1310   auto vecForOp = state.builder.create<AffineForOp>(
1311       forOp.getLoc(), forOp.getLowerBoundOperands(), forOp.getLowerBoundMap(),
1312       forOp.getUpperBoundOperands(), forOp.getUpperBoundMap(), newStep,
1313       vecIterOperands,
1314       /*bodyBuilder=*/[](OpBuilder &, Location, Value, ValueRange) {
1315         // Make sure we don't create a default terminator in the loop body as
1316         // the proper terminator will be added during vectorization.
1317         return;
1318       });
1319 
1320   // Register loop-related replacements:
1321   //   1) The new vectorized loop is registered as vector replacement of the
1322   //      scalar loop.
1323   //   2) The new iv of the vectorized loop is registered as scalar replacement
1324   //      since a scalar copy of the iv will prevail in the vectorized loop.
1325   //      TODO: A vector replacement will also be added in the future when
1326   //      vectorization of linear ops is supported.
1327   //   3) The new 'iter_args' region arguments are registered as vector
1328   //      replacements since they have been vectorized.
1329   //   4) If the loop performs a reduction along the vector dimension, a
1330   //      `vector.reduction` or similar op is inserted for each resulting value
1331   //      of the loop and its scalar value replaces the corresponding scalar
1332   //      result of the loop.
1333   state.registerOpVectorReplacement(forOp, vecForOp);
1334   state.registerValueScalarReplacement(forOp.getInductionVar(),
1335                                        vecForOp.getInductionVar());
1336   for (auto iterTuple :
1337        llvm ::zip(forOp.getRegionIterArgs(), vecForOp.getRegionIterArgs()))
1338     state.registerBlockArgVectorReplacement(std::get<0>(iterTuple),
1339                                             std::get<1>(iterTuple));
1340 
1341   if (isLoopVecDim) {
1342     for (unsigned i = 0; i < vecForOp.getNumIterOperands(); ++i) {
1343       // First, we reduce the vector returned from the loop into a scalar.
1344       Value reducedRes =
1345           getVectorReductionOp(reductions[i].kind, state.builder,
1346                                vecForOp.getLoc(), vecForOp.getResult(i));
1347       LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ creating a vector reduction: "
1348                         << reducedRes);
1349       // Then we combine it with the original (scalar) initial value unless it
1350       // is equal to the neutral element of the reduction.
1351       Value origInit = forOp.getOperand(forOp.getNumControlOperands() + i);
1352       Value finalRes = reducedRes;
1353       if (!isNeutralElementConst(reductions[i].kind, origInit, state))
1354         finalRes = getReductionOp(reductions[i].kind, state.builder,
1355                                   reducedRes.getLoc(), reducedRes, origInit);
1356       state.registerLoopResultScalarReplacement(forOp.getResult(i), finalRes);
1357     }
1358   }
1359 
1360   if (isLoopVecDim)
1361     state.vecLoopToVecDim[vecForOp] = loopToVecDimIt->second;
1362 
1363   // Change insertion point so that upcoming vectorized instructions are
1364   // inserted into the vectorized loop's body.
1365   state.builder.setInsertionPointToStart(vecForOp.getBody());
1366 
1367   // If this is a reduction loop then we may need to create a mask to filter out
1368   // garbage in the last iteration.
1369   if (isLoopVecDim && forOp.getNumIterOperands() > 0)
1370     createMask(vecForOp, state);
1371 
1372   return vecForOp;
1373 }
1374 
1375 /// Vectorizes arbitrary operation by plain widening. We apply generic type
1376 /// widening of all its results and retrieve the vector counterparts for all its
1377 /// operands.
1378 static Operation *widenOp(Operation *op, VectorizationState &state) {
1379   SmallVector<Type, 8> vectorTypes;
1380   for (Value result : op->getResults())
1381     vectorTypes.push_back(
1382         VectorType::get(state.strategy->vectorSizes, result.getType()));
1383 
1384   SmallVector<Value, 8> vectorOperands;
1385   for (Value operand : op->getOperands()) {
1386     Value vecOperand = vectorizeOperand(operand, state);
1387     if (!vecOperand) {
1388       LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ an operand failed vectorize\n");
1389       return nullptr;
1390     }
1391     vectorOperands.push_back(vecOperand);
1392   }
1393 
1394   // Create a clone of the op with the proper operands and return types.
1395   // TODO: The following assumes there is always an op with a fixed
1396   // name that works both in scalar mode and vector mode.
1397   // TODO: Is it worth considering an Operation.clone operation which
1398   // changes the type so we can promote an Operation with less boilerplate?
1399   OperationState vecOpState(op->getLoc(), op->getName().getStringRef(),
1400                             vectorOperands, vectorTypes, op->getAttrs(),
1401                             /*successors=*/{}, /*regions=*/{});
1402   Operation *vecOp = state.builder.createOperation(vecOpState);
1403   state.registerOpVectorReplacement(op, vecOp);
1404   return vecOp;
1405 }
1406 
1407 /// Vectorizes a yield operation by widening its types. The builder's insertion
1408 /// point is set after the vectorized parent op to continue vectorizing the
1409 /// operations after the parent op. When vectorizing a reduction loop a mask may
1410 /// be used to prevent adding garbage values to the accumulator.
1411 static Operation *vectorizeAffineYieldOp(AffineYieldOp yieldOp,
1412                                          VectorizationState &state) {
1413   Operation *newYieldOp = widenOp(yieldOp, state);
1414   Operation *newParentOp = state.builder.getInsertionBlock()->getParentOp();
1415 
1416   // If there is a mask for this loop then we must prevent garbage values from
1417   // being added to the accumulator by inserting `select` operations, for
1418   // example:
1419   //
1420   //   %res = addf %acc, %val : vector<128xf32>
1421   //   %res_masked = select %mask, %res, %acc : vector<128xi1>, vector<128xf32>
1422   //   affine.yield %res_masked : vector<128xf32>
1423   //
1424   if (Value mask = state.vecLoopToMask.lookup(newParentOp)) {
1425     state.builder.setInsertionPoint(newYieldOp);
1426     for (unsigned i = 0; i < newYieldOp->getNumOperands(); ++i) {
1427       Value result = newYieldOp->getOperand(i);
1428       Value iterArg = cast<AffineForOp>(newParentOp).getRegionIterArgs()[i];
1429       Value maskedResult = state.builder.create<SelectOp>(result.getLoc(), mask,
1430                                                           result, iterArg);
1431       LLVM_DEBUG(
1432           dbgs() << "\n[early-vect]+++++ masking a yielded vector value: "
1433                  << maskedResult);
1434       newYieldOp->setOperand(i, maskedResult);
1435     }
1436   }
1437 
1438   state.builder.setInsertionPointAfter(newParentOp);
1439   return newYieldOp;
1440 }
1441 
1442 /// Encodes Operation-specific behavior for vectorization. In general we
1443 /// assume that all operands of an op must be vectorized but this is not
1444 /// always true. In the future, it would be nice to have a trait that
1445 /// describes how a particular operation vectorizes. For now we implement the
1446 /// case distinction here. Returns a vectorized form of an operation or
1447 /// nullptr if vectorization fails.
1448 // TODO: consider adding a trait to Op to describe how it gets vectorized.
1449 // Maybe some Ops are not vectorizable or require some tricky logic, we cannot
1450 // do one-off logic here; ideally it would be TableGen'd.
1451 static Operation *vectorizeOneOperation(Operation *op,
1452                                         VectorizationState &state) {
1453   // Sanity checks.
1454   assert(!isa<vector::TransferReadOp>(op) &&
1455          "vector.transfer_read cannot be further vectorized");
1456   assert(!isa<vector::TransferWriteOp>(op) &&
1457          "vector.transfer_write cannot be further vectorized");
1458 
1459   if (auto loadOp = dyn_cast<AffineLoadOp>(op))
1460     return vectorizeAffineLoad(loadOp, state);
1461   if (auto storeOp = dyn_cast<AffineStoreOp>(op))
1462     return vectorizeAffineStore(storeOp, state);
1463   if (auto forOp = dyn_cast<AffineForOp>(op))
1464     return vectorizeAffineForOp(forOp, state);
1465   if (auto yieldOp = dyn_cast<AffineYieldOp>(op))
1466     return vectorizeAffineYieldOp(yieldOp, state);
1467   if (auto constant = dyn_cast<ConstantOp>(op))
1468     return vectorizeConstant(constant, state);
1469 
1470   // Other ops with regions are not supported.
1471   if (op->getNumRegions() != 0)
1472     return nullptr;
1473 
1474   return widenOp(op, state);
1475 }
1476 
1477 /// Recursive implementation to convert all the nested loops in 'match' to a 2D
1478 /// vector container that preserves the relative nesting level of each loop with
1479 /// respect to the others in 'match'. 'currentLevel' is the nesting level that
1480 /// will be assigned to the loop in the current 'match'.
1481 static void
1482 getMatchedAffineLoopsRec(NestedMatch match, unsigned currentLevel,
1483                          std::vector<SmallVector<AffineForOp, 2>> &loops) {
1484   // Add a new empty level to the output if it doesn't exist already.
1485   assert(currentLevel <= loops.size() && "Unexpected currentLevel");
1486   if (currentLevel == loops.size())
1487     loops.push_back(SmallVector<AffineForOp, 2>());
1488 
1489   // Add current match and recursively visit its children.
1490   loops[currentLevel].push_back(cast<AffineForOp>(match.getMatchedOperation()));
1491   for (auto childMatch : match.getMatchedChildren()) {
1492     getMatchedAffineLoopsRec(childMatch, currentLevel + 1, loops);
1493   }
1494 }
1495 
1496 /// Converts all the nested loops in 'match' to a 2D vector container that
1497 /// preserves the relative nesting level of each loop with respect to the others
1498 /// in 'match'. This means that every loop in 'loops[i]' will have a parent loop
1499 /// in 'loops[i-1]'. A loop in 'loops[i]' may or may not have a child loop in
1500 /// 'loops[i+1]'.
1501 static void
1502 getMatchedAffineLoops(NestedMatch match,
1503                       std::vector<SmallVector<AffineForOp, 2>> &loops) {
1504   getMatchedAffineLoopsRec(match, /*currLoopDepth=*/0, loops);
1505 }
1506 
1507 /// Internal implementation to vectorize affine loops from a single loop nest
1508 /// using an n-D vectorization strategy.
1509 static LogicalResult
1510 vectorizeLoopNest(std::vector<SmallVector<AffineForOp, 2>> &loops,
1511                   const VectorizationStrategy &strategy) {
1512   assert(loops[0].size() == 1 && "Expected single root loop");
1513   AffineForOp rootLoop = loops[0][0];
1514   VectorizationState state(rootLoop.getContext());
1515   state.builder.setInsertionPointAfter(rootLoop);
1516   state.strategy = &strategy;
1517 
1518   // Since patterns are recursive, they can very well intersect.
1519   // Since we do not want a fully greedy strategy in general, we decouple
1520   // pattern matching, from profitability analysis, from application.
1521   // As a consequence we must check that each root pattern is still
1522   // vectorizable. If a pattern is not vectorizable anymore, we just skip it.
1523   // TODO: implement a non-greedy profitability analysis that keeps only
1524   // non-intersecting patterns.
1525   if (!isVectorizableLoopBody(rootLoop, vectorTransferPattern())) {
1526     LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ loop is not vectorizable");
1527     return failure();
1528   }
1529 
1530   //////////////////////////////////////////////////////////////////////////////
1531   // Vectorize the scalar loop nest following a topological order. A new vector
1532   // loop nest with the vectorized operations is created along the process. If
1533   // vectorization succeeds, the scalar loop nest is erased. If vectorization
1534   // fails, the vector loop nest is erased and the scalar loop nest is not
1535   // modified.
1536   //////////////////////////////////////////////////////////////////////////////
1537 
1538   auto opVecResult = rootLoop.walk<WalkOrder::PreOrder>([&](Operation *op) {
1539     LLVM_DEBUG(dbgs() << "[early-vect]+++++ Vectorizing: " << *op);
1540     Operation *vectorOp = vectorizeOneOperation(op, state);
1541     if (!vectorOp) {
1542       LLVM_DEBUG(
1543           dbgs() << "[early-vect]+++++ failed vectorizing the operation: "
1544                  << *op << "\n");
1545       return WalkResult::interrupt();
1546     }
1547 
1548     return WalkResult::advance();
1549   });
1550 
1551   if (opVecResult.wasInterrupted()) {
1552     LLVM_DEBUG(dbgs() << "[early-vect]+++++ failed vectorization for: "
1553                       << rootLoop << "\n");
1554     // Erase vector loop nest if it was created.
1555     auto vecRootLoopIt = state.opVectorReplacement.find(rootLoop);
1556     if (vecRootLoopIt != state.opVectorReplacement.end())
1557       eraseLoopNest(cast<AffineForOp>(vecRootLoopIt->second));
1558 
1559     return failure();
1560   }
1561 
1562   // Replace results of reduction loops with the scalar values computed using
1563   // `vector.reduce` or similar ops.
1564   for (auto resPair : state.loopResultScalarReplacement)
1565     resPair.first.replaceAllUsesWith(resPair.second);
1566 
1567   assert(state.opVectorReplacement.count(rootLoop) == 1 &&
1568          "Expected vector replacement for loop nest");
1569   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ success vectorizing pattern");
1570   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorization result:\n"
1571                     << *state.opVectorReplacement[rootLoop]);
1572 
1573   // Finish this vectorization pattern.
1574   state.finishVectorizationPattern(rootLoop);
1575   return success();
1576 }
1577 
1578 /// Extracts the matched loops and vectorizes them following a topological
1579 /// order. A new vector loop nest will be created if vectorization succeeds. The
1580 /// original loop nest won't be modified in any case.
1581 static LogicalResult vectorizeRootMatch(NestedMatch m,
1582                                         const VectorizationStrategy &strategy) {
1583   std::vector<SmallVector<AffineForOp, 2>> loopsToVectorize;
1584   getMatchedAffineLoops(m, loopsToVectorize);
1585   return vectorizeLoopNest(loopsToVectorize, strategy);
1586 }
1587 
1588 /// Traverses all the loop matches and classifies them into intersection
1589 /// buckets. Two matches intersect if any of them encloses the other one. A
1590 /// match intersects with a bucket if the match intersects with the root
1591 /// (outermost) loop in that bucket.
1592 static void computeIntersectionBuckets(
1593     ArrayRef<NestedMatch> matches,
1594     std::vector<SmallVector<NestedMatch, 8>> &intersectionBuckets) {
1595   assert(intersectionBuckets.empty() && "Expected empty output");
1596   // Keeps track of the root (outermost) loop of each bucket.
1597   SmallVector<AffineForOp, 8> bucketRoots;
1598 
1599   for (const NestedMatch &match : matches) {
1600     AffineForOp matchRoot = cast<AffineForOp>(match.getMatchedOperation());
1601     bool intersects = false;
1602     for (int i = 0, end = intersectionBuckets.size(); i < end; ++i) {
1603       AffineForOp bucketRoot = bucketRoots[i];
1604       // Add match to the bucket if the bucket root encloses the match root.
1605       if (bucketRoot->isAncestor(matchRoot)) {
1606         intersectionBuckets[i].push_back(match);
1607         intersects = true;
1608         break;
1609       }
1610       // Add match to the bucket if the match root encloses the bucket root. The
1611       // match root becomes the new bucket root.
1612       if (matchRoot->isAncestor(bucketRoot)) {
1613         bucketRoots[i] = matchRoot;
1614         intersectionBuckets[i].push_back(match);
1615         intersects = true;
1616         break;
1617       }
1618     }
1619 
1620     // Match doesn't intersect with any existing bucket. Create a new bucket for
1621     // it.
1622     if (!intersects) {
1623       bucketRoots.push_back(matchRoot);
1624       intersectionBuckets.push_back(SmallVector<NestedMatch, 8>());
1625       intersectionBuckets.back().push_back(match);
1626     }
1627   }
1628 }
1629 
1630 /// Internal implementation to vectorize affine loops in 'loops' using the n-D
1631 /// vectorization factors in 'vectorSizes'. By default, each vectorization
1632 /// factor is applied inner-to-outer to the loops of each loop nest.
1633 /// 'fastestVaryingPattern' can be optionally used to provide a different loop
1634 /// vectorization order. `reductionLoops` can be provided to specify loops which
1635 /// can be vectorized along the reduction dimension.
1636 static void vectorizeLoops(Operation *parentOp, DenseSet<Operation *> &loops,
1637                            ArrayRef<int64_t> vectorSizes,
1638                            ArrayRef<int64_t> fastestVaryingPattern,
1639                            const ReductionLoopMap &reductionLoops) {
1640   assert((reductionLoops.empty() || vectorSizes.size() == 1) &&
1641          "Vectorizing reductions is supported only for 1-D vectors");
1642 
1643   // Compute 1-D, 2-D or 3-D loop pattern to be matched on the target loops.
1644   Optional<NestedPattern> pattern =
1645       makePattern(loops, vectorSizes.size(), fastestVaryingPattern);
1646   if (!pattern.hasValue()) {
1647     LLVM_DEBUG(dbgs() << "\n[early-vect] pattern couldn't be computed\n");
1648     return;
1649   }
1650 
1651   LLVM_DEBUG(dbgs() << "\n******************************************");
1652   LLVM_DEBUG(dbgs() << "\n******************************************");
1653   LLVM_DEBUG(dbgs() << "\n[early-vect] new pattern on parent op\n");
1654   LLVM_DEBUG(dbgs() << *parentOp << "\n");
1655 
1656   unsigned patternDepth = pattern->getDepth();
1657 
1658   // Compute all the pattern matches and classify them into buckets of
1659   // intersecting matches.
1660   SmallVector<NestedMatch, 32> allMatches;
1661   pattern->match(parentOp, &allMatches);
1662   std::vector<SmallVector<NestedMatch, 8>> intersectionBuckets;
1663   computeIntersectionBuckets(allMatches, intersectionBuckets);
1664 
1665   // Iterate over all buckets and vectorize the matches eagerly. We can only
1666   // vectorize one match from each bucket since all the matches within a bucket
1667   // intersect.
1668   for (auto &intersectingMatches : intersectionBuckets) {
1669     for (NestedMatch &match : intersectingMatches) {
1670       VectorizationStrategy strategy;
1671       // TODO: depending on profitability, elect to reduce the vector size.
1672       strategy.vectorSizes.assign(vectorSizes.begin(), vectorSizes.end());
1673       strategy.reductionLoops = reductionLoops;
1674       if (failed(analyzeProfitability(match.getMatchedChildren(), 1,
1675                                       patternDepth, &strategy))) {
1676         continue;
1677       }
1678       vectorizeLoopIfProfitable(match.getMatchedOperation(), 0, patternDepth,
1679                                 &strategy);
1680       // Vectorize match. Skip the rest of intersecting matches in the bucket if
1681       // vectorization succeeded.
1682       // TODO: if pattern does not apply, report it; alter the cost/benefit.
1683       // TODO: some diagnostics if failure to vectorize occurs.
1684       if (succeeded(vectorizeRootMatch(match, strategy)))
1685         break;
1686     }
1687   }
1688 
1689   LLVM_DEBUG(dbgs() << "\n");
1690 }
1691 
1692 std::unique_ptr<OperationPass<FuncOp>>
1693 createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize) {
1694   return std::make_unique<Vectorize>(virtualVectorSize);
1695 }
1696 std::unique_ptr<OperationPass<FuncOp>> createSuperVectorizePass() {
1697   return std::make_unique<Vectorize>();
1698 }
1699 
1700 /// Applies vectorization to the current function by searching over a bunch of
1701 /// predetermined patterns.
1702 void Vectorize::runOnFunction() {
1703   FuncOp f = getFunction();
1704   if (!fastestVaryingPattern.empty() &&
1705       fastestVaryingPattern.size() != vectorSizes.size()) {
1706     f.emitRemark("Fastest varying pattern specified with different size than "
1707                  "the vector size.");
1708     return signalPassFailure();
1709   }
1710 
1711   if (vectorizeReductions && vectorSizes.size() != 1) {
1712     f.emitError("Vectorizing reductions is supported only for 1-D vectors.");
1713     return signalPassFailure();
1714   }
1715 
1716   DenseSet<Operation *> parallelLoops;
1717   ReductionLoopMap reductionLoops;
1718 
1719   // If 'vectorize-reduction=true' is provided, we also populate the
1720   // `reductionLoops` map.
1721   if (vectorizeReductions) {
1722     f.walk([&parallelLoops, &reductionLoops](AffineForOp loop) {
1723       SmallVector<LoopReduction, 2> reductions;
1724       if (isLoopParallel(loop, &reductions)) {
1725         parallelLoops.insert(loop);
1726         // If it's not a reduction loop, adding it to the map is not necessary.
1727         if (!reductions.empty())
1728           reductionLoops[loop] = reductions;
1729       }
1730     });
1731   } else {
1732     f.walk([&parallelLoops](AffineForOp loop) {
1733       if (isLoopParallel(loop))
1734         parallelLoops.insert(loop);
1735     });
1736   }
1737 
1738   // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
1739   NestedPatternContext mlContext;
1740   vectorizeLoops(f, parallelLoops, vectorSizes, fastestVaryingPattern,
1741                  reductionLoops);
1742 }
1743 
1744 /// Verify that affine loops in 'loops' meet the nesting criteria expected by
1745 /// SuperVectorizer:
1746 ///   * There must be at least one loop.
1747 ///   * There must be a single root loop (nesting level 0).
1748 ///   * Each loop at a given nesting level must be nested in a loop from a
1749 ///     previous nesting level.
1750 static LogicalResult
1751 verifyLoopNesting(const std::vector<SmallVector<AffineForOp, 2>> &loops) {
1752   // Expected at least one loop.
1753   if (loops.empty())
1754     return failure();
1755 
1756   // Expected only one root loop.
1757   if (loops[0].size() != 1)
1758     return failure();
1759 
1760   // Traverse loops outer-to-inner to check some invariants.
1761   for (int i = 1, end = loops.size(); i < end; ++i) {
1762     for (AffineForOp loop : loops[i]) {
1763       //  Check that each loop at this level is nested in one of the loops from
1764       //  the previous level.
1765       if (none_of(loops[i - 1], [&](AffineForOp maybeParent) {
1766             return maybeParent->isProperAncestor(loop);
1767           }))
1768         return failure();
1769 
1770       //  Check that each loop at this level is not nested in another loop from
1771       //  this level.
1772       for (AffineForOp sibling : loops[i]) {
1773         if (sibling->isProperAncestor(loop))
1774           return failure();
1775       }
1776     }
1777   }
1778 
1779   return success();
1780 }
1781 
1782 namespace mlir {
1783 
1784 /// External utility to vectorize affine loops in 'loops' using the n-D
1785 /// vectorization factors in 'vectorSizes'. By default, each vectorization
1786 /// factor is applied inner-to-outer to the loops of each loop nest.
1787 /// 'fastestVaryingPattern' can be optionally used to provide a different loop
1788 /// vectorization order.
1789 /// If `reductionLoops` is not empty, the given reduction loops may be
1790 /// vectorized along the reduction dimension.
1791 /// TODO: Vectorizing reductions is supported only for 1-D vectorization.
1792 void vectorizeAffineLoops(Operation *parentOp, DenseSet<Operation *> &loops,
1793                           ArrayRef<int64_t> vectorSizes,
1794                           ArrayRef<int64_t> fastestVaryingPattern,
1795                           const ReductionLoopMap &reductionLoops) {
1796   // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
1797   NestedPatternContext mlContext;
1798   vectorizeLoops(parentOp, loops, vectorSizes, fastestVaryingPattern,
1799                  reductionLoops);
1800 }
1801 
1802 /// External utility to vectorize affine loops from a single loop nest using an
1803 /// n-D vectorization strategy (see doc in VectorizationStrategy definition).
1804 /// Loops are provided in a 2D vector container. The first dimension represents
1805 /// the nesting level relative to the loops to be vectorized. The second
1806 /// dimension contains the loops. This means that:
1807 ///   a) every loop in 'loops[i]' must have a parent loop in 'loops[i-1]',
1808 ///   b) a loop in 'loops[i]' may or may not have a child loop in 'loops[i+1]'.
1809 ///
1810 /// For example, for the following loop nest:
1811 ///
1812 ///   func @vec2d(%in0: memref<64x128x512xf32>, %in1: memref<64x128x128xf32>,
1813 ///               %out0: memref<64x128x512xf32>,
1814 ///               %out1: memref<64x128x128xf32>) {
1815 ///     affine.for %i0 = 0 to 64 {
1816 ///       affine.for %i1 = 0 to 128 {
1817 ///         affine.for %i2 = 0 to 512 {
1818 ///           %ld = affine.load %in0[%i0, %i1, %i2] : memref<64x128x512xf32>
1819 ///           affine.store %ld, %out0[%i0, %i1, %i2] : memref<64x128x512xf32>
1820 ///         }
1821 ///         affine.for %i3 = 0 to 128 {
1822 ///           %ld = affine.load %in1[%i0, %i1, %i3] : memref<64x128x128xf32>
1823 ///           affine.store %ld, %out1[%i0, %i1, %i3] : memref<64x128x128xf32>
1824 ///         }
1825 ///       }
1826 ///     }
1827 ///     return
1828 ///   }
1829 ///
1830 /// loops = {{%i0}, {%i2, %i3}}, to vectorize the outermost and the two
1831 /// innermost loops;
1832 /// loops = {{%i1}, {%i2, %i3}}, to vectorize the middle and the two innermost
1833 /// loops;
1834 /// loops = {{%i2}}, to vectorize only the first innermost loop;
1835 /// loops = {{%i3}}, to vectorize only the second innermost loop;
1836 /// loops = {{%i1}}, to vectorize only the middle loop.
1837 LogicalResult
1838 vectorizeAffineLoopNest(std::vector<SmallVector<AffineForOp, 2>> &loops,
1839                         const VectorizationStrategy &strategy) {
1840   // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
1841   NestedPatternContext mlContext;
1842   if (failed(verifyLoopNesting(loops)))
1843     return failure();
1844   return vectorizeLoopNest(loops, strategy);
1845 }
1846 
1847 std::unique_ptr<OperationPass<FuncOp>>
1848 createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize) {
1849   return std::make_unique<Vectorize>(virtualVectorSize);
1850 }
1851 std::unique_ptr<OperationPass<FuncOp>> createSuperVectorizePass() {
1852   return std::make_unique<Vectorize>();
1853 }
1854 
1855 } // namespace mlir
1856