1 //===- SuperVectorize.cpp - Vectorize Pass Impl ---------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements vectorization of loops, operations and data types to
10 // a target-independent, n-D super-vector abstraction.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "PassDetail.h"
15 #include "mlir/Dialect/Affine/Analysis/AffineAnalysis.h"
16 #include "mlir/Dialect/Affine/Analysis/LoopAnalysis.h"
17 #include "mlir/Dialect/Affine/Analysis/NestedMatcher.h"
18 #include "mlir/Dialect/Affine/IR/AffineOps.h"
19 #include "mlir/Dialect/Affine/Utils.h"
20 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
21 #include "mlir/Dialect/Vector/IR/VectorOps.h"
22 #include "mlir/Dialect/Vector/Utils/VectorUtils.h"
23 #include "mlir/IR/BlockAndValueMapping.h"
24 #include "mlir/Support/LLVM.h"
25 #include "llvm/ADT/STLExtras.h"
26 #include "llvm/Support/Debug.h"
27 
28 using namespace mlir;
29 using namespace vector;
30 
31 ///
32 /// Implements a high-level vectorization strategy on a Function.
33 /// The abstraction used is that of super-vectors, which provide a single,
34 /// compact, representation in the vector types, information that is expected
35 /// to reduce the impact of the phase ordering problem
36 ///
37 /// Vector granularity:
38 /// ===================
39 /// This pass is designed to perform vectorization at a super-vector
40 /// granularity. A super-vector is loosely defined as a vector type that is a
41 /// multiple of a "good" vector size so the HW can efficiently implement a set
42 /// of high-level primitives. Multiple is understood along any dimension; e.g.
43 /// both vector<16xf32> and vector<2x8xf32> are valid super-vectors for a
44 /// vector<8xf32> HW vector. Note that a "good vector size so the HW can
45 /// efficiently implement a set of high-level primitives" is not necessarily an
46 /// integer multiple of actual hardware registers. We leave details of this
47 /// distinction unspecified for now.
48 ///
49 /// Some may prefer the terminology a "tile of HW vectors". In this case, one
50 /// should note that super-vectors implement an "always full tile" abstraction.
51 /// They guarantee no partial-tile separation is necessary by relying on a
52 /// high-level copy-reshape abstraction that we call vector.transfer. This
53 /// copy-reshape operations is also responsible for performing layout
54 /// transposition if necessary. In the general case this will require a scoped
55 /// allocation in some notional local memory.
56 ///
57 /// Whatever the mental model one prefers to use for this abstraction, the key
58 /// point is that we burn into a single, compact, representation in the vector
59 /// types, information that is expected to reduce the impact of the phase
60 /// ordering problem. Indeed, a vector type conveys information that:
61 ///   1. the associated loops have dependency semantics that do not prevent
62 ///      vectorization;
63 ///   2. the associate loops have been sliced in chunks of static sizes that are
64 ///      compatible with vector sizes (i.e. similar to unroll-and-jam);
65 ///   3. the inner loops, in the unroll-and-jam analogy of 2, are captured by
66 ///   the
67 ///      vector type and no vectorization hampering transformations can be
68 ///      applied to them anymore;
69 ///   4. the underlying memrefs are accessed in some notional contiguous way
70 ///      that allows loading into vectors with some amount of spatial locality;
71 /// In other words, super-vectorization provides a level of separation of
72 /// concern by way of opacity to subsequent passes. This has the effect of
73 /// encapsulating and propagating vectorization constraints down the list of
74 /// passes until we are ready to lower further.
75 ///
76 /// For a particular target, a notion of minimal n-d vector size will be
77 /// specified and vectorization targets a multiple of those. In the following
78 /// paragraph, let "k ." represent "a multiple of", to be understood as a
79 /// multiple in the same dimension (e.g. vector<16 x k . 128> summarizes
80 /// vector<16 x 128>, vector<16 x 256>, vector<16 x 1024>, etc).
81 ///
82 /// Some non-exhaustive notable super-vector sizes of interest include:
83 ///   - CPU: vector<k . HW_vector_size>,
84 ///          vector<k' . core_count x k . HW_vector_size>,
85 ///          vector<socket_count x k' . core_count x k . HW_vector_size>;
86 ///   - GPU: vector<k . warp_size>,
87 ///          vector<k . warp_size x float2>,
88 ///          vector<k . warp_size x float4>,
89 ///          vector<k . warp_size x 4 x 4x 4> (for tensor_core sizes).
90 ///
91 /// Loops and operations are emitted that operate on those super-vector shapes.
92 /// Subsequent lowering passes will materialize to actual HW vector sizes. These
93 /// passes are expected to be (gradually) more target-specific.
94 ///
95 /// At a high level, a vectorized load in a loop will resemble:
96 /// ```mlir
97 ///   affine.for %i = ? to ? step ? {
98 ///     %v_a = vector.transfer_read A[%i] : memref<?xf32>, vector<128xf32>
99 ///   }
100 /// ```
101 /// It is the responsibility of the implementation of vector.transfer_read to
102 /// materialize vector registers from the original scalar memrefs. A later (more
103 /// target-dependent) lowering pass will materialize to actual HW vector sizes.
104 /// This lowering may be occur at different times:
105 ///   1. at the MLIR level into a combination of loops, unrolling, DmaStartOp +
106 ///      DmaWaitOp + vectorized operations for data transformations and shuffle;
107 ///      thus opening opportunities for unrolling and pipelining. This is an
108 ///      instance of library call "whiteboxing"; or
109 ///   2. later in the a target-specific lowering pass or hand-written library
110 ///      call; achieving full separation of concerns. This is an instance of
111 ///      library call; or
112 ///   3. a mix of both, e.g. based on a model.
113 /// In the future, these operations will expose a contract to constrain the
114 /// search on vectorization patterns and sizes.
115 ///
116 /// Occurrence of super-vectorization in the compiler flow:
117 /// =======================================================
118 /// This is an active area of investigation. We start with 2 remarks to position
119 /// super-vectorization in the context of existing ongoing work: LLVM VPLAN
120 /// and LLVM SLP Vectorizer.
121 ///
122 /// LLVM VPLAN:
123 /// -----------
124 /// The astute reader may have noticed that in the limit, super-vectorization
125 /// can be applied at a similar time and with similar objectives than VPLAN.
126 /// For instance, in the case of a traditional, polyhedral compilation-flow (for
127 /// instance, the PPCG project uses ISL to provide dependence analysis,
128 /// multi-level(scheduling + tiling), lifting footprint to fast memory,
129 /// communication synthesis, mapping, register optimizations) and before
130 /// unrolling. When vectorization is applied at this *late* level in a typical
131 /// polyhedral flow, and is instantiated with actual hardware vector sizes,
132 /// super-vectorization is expected to match (or subsume) the type of patterns
133 /// that LLVM's VPLAN aims at targeting. The main difference here is that MLIR
134 /// is higher level and our implementation should be significantly simpler. Also
135 /// note that in this mode, recursive patterns are probably a bit of an overkill
136 /// although it is reasonable to expect that mixing a bit of outer loop and
137 /// inner loop vectorization + unrolling will provide interesting choices to
138 /// MLIR.
139 ///
140 /// LLVM SLP Vectorizer:
141 /// --------------------
142 /// Super-vectorization however is not meant to be usable in a similar fashion
143 /// to the SLP vectorizer. The main difference lies in the information that
144 /// both vectorizers use: super-vectorization examines contiguity of memory
145 /// references along fastest varying dimensions and loops with recursive nested
146 /// patterns capturing imperfectly-nested loop nests; the SLP vectorizer, on
147 /// the other hand, performs flat pattern matching inside a single unrolled loop
148 /// body and stitches together pieces of load and store operations into full
149 /// 1-D vectors. We envision that the SLP vectorizer is a good way to capture
150 /// innermost loop, control-flow dependent patterns that super-vectorization may
151 /// not be able to capture easily. In other words, super-vectorization does not
152 /// aim at replacing the SLP vectorizer and the two solutions are complementary.
153 ///
154 /// Ongoing investigations:
155 /// -----------------------
156 /// We discuss the following *early* places where super-vectorization is
157 /// applicable and touch on the expected benefits and risks . We list the
158 /// opportunities in the context of the traditional polyhedral compiler flow
159 /// described in PPCG. There are essentially 6 places in the MLIR pass pipeline
160 /// we expect to experiment with super-vectorization:
161 /// 1. Right after language lowering to MLIR: this is the earliest time where
162 ///    super-vectorization is expected to be applied. At this level, all the
163 ///    language/user/library-level annotations are available and can be fully
164 ///    exploited. Examples include loop-type annotations (such as parallel,
165 ///    reduction, scan, dependence distance vector, vectorizable) as well as
166 ///    memory access annotations (such as non-aliasing writes guaranteed,
167 ///    indirect accesses that are permutations by construction) accesses or
168 ///    that a particular operation is prescribed atomic by the user. At this
169 ///    level, anything that enriches what dependence analysis can do should be
170 ///    aggressively exploited. At this level we are close to having explicit
171 ///    vector types in the language, except we do not impose that burden on the
172 ///    programmer/library: we derive information from scalar code + annotations.
173 /// 2. After dependence analysis and before polyhedral scheduling: the
174 ///    information that supports vectorization does not need to be supplied by a
175 ///    higher level of abstraction. Traditional dependence analysis is available
176 ///    in MLIR and will be used to drive vectorization and cost models.
177 ///
178 /// Let's pause here and remark that applying super-vectorization as described
179 /// in 1. and 2. presents clear opportunities and risks:
180 ///   - the opportunity is that vectorization is burned in the type system and
181 ///   is protected from the adverse effect of loop scheduling, tiling, loop
182 ///   interchange and all passes downstream. Provided that subsequent passes are
183 ///   able to operate on vector types; the vector shapes, associated loop
184 ///   iterator properties, alignment, and contiguity of fastest varying
185 ///   dimensions are preserved until we lower the super-vector types. We expect
186 ///   this to significantly rein in on the adverse effects of phase ordering.
187 ///   - the risks are that a. all passes after super-vectorization have to work
188 ///   on elemental vector types (not that this is always true, wherever
189 ///   vectorization is applied) and b. that imposing vectorization constraints
190 ///   too early may be overall detrimental to loop fusion, tiling and other
191 ///   transformations because the dependence distances are coarsened when
192 ///   operating on elemental vector types. For this reason, the pattern
193 ///   profitability analysis should include a component that also captures the
194 ///   maximal amount of fusion available under a particular pattern. This is
195 ///   still at the stage of rough ideas but in this context, search is our
196 ///   friend as the Tensor Comprehensions and auto-TVM contributions
197 ///   demonstrated previously.
198 /// Bottom-line is we do not yet have good answers for the above but aim at
199 /// making it easy to answer such questions.
200 ///
201 /// Back to our listing, the last places where early super-vectorization makes
202 /// sense are:
203 /// 3. right after polyhedral-style scheduling: PLUTO-style algorithms are known
204 ///    to improve locality, parallelism and be configurable (e.g. max-fuse,
205 ///    smart-fuse etc). They can also have adverse effects on contiguity
206 ///    properties that are required for vectorization but the vector.transfer
207 ///    copy-reshape-pad-transpose abstraction is expected to help recapture
208 ///    these properties.
209 /// 4. right after polyhedral-style scheduling+tiling;
210 /// 5. right after scheduling+tiling+rescheduling: points 4 and 5 represent
211 ///    probably the most promising places because applying tiling achieves a
212 ///    separation of concerns that allows rescheduling to worry less about
213 ///    locality and more about parallelism and distribution (e.g. min-fuse).
214 ///
215 /// At these levels the risk-reward looks different: on one hand we probably
216 /// lost a good deal of language/user/library-level annotation; on the other
217 /// hand we gained parallelism and locality through scheduling and tiling.
218 /// However we probably want to ensure tiling is compatible with the
219 /// full-tile-only abstraction used in super-vectorization or suffer the
220 /// consequences. It is too early to place bets on what will win but we expect
221 /// super-vectorization to be the right abstraction to allow exploring at all
222 /// these levels. And again, search is our friend.
223 ///
224 /// Lastly, we mention it again here:
225 /// 6. as a MLIR-based alternative to VPLAN.
226 ///
227 /// Lowering, unrolling, pipelining:
228 /// ================================
229 /// TODO: point to the proper places.
230 ///
231 /// Algorithm:
232 /// ==========
233 /// The algorithm proceeds in a few steps:
234 ///  1. defining super-vectorization patterns and matching them on the tree of
235 ///     AffineForOp. A super-vectorization pattern is defined as a recursive
236 ///     data structures that matches and captures nested, imperfectly-nested
237 ///     loops that have a. conformable loop annotations attached (e.g. parallel,
238 ///     reduction, vectorizable, ...) as well as b. all contiguous load/store
239 ///     operations along a specified minor dimension (not necessarily the
240 ///     fastest varying) ;
241 ///  2. analyzing those patterns for profitability (TODO: and
242 ///     interference);
243 ///  3. then, for each pattern in order:
244 ///    a. applying iterative rewriting of the loops and all their nested
245 ///       operations in topological order. Rewriting is implemented by
246 ///       coarsening the loops and converting operations and operands to their
247 ///       vector forms. Processing operations in topological order is relatively
248 ///       simple due to the structured nature of the control-flow
249 ///       representation. This order ensures that all the operands of a given
250 ///       operation have been vectorized before the operation itself in a single
251 ///       traversal, except for operands defined outside of the loop nest. The
252 ///       algorithm can convert the following operations to their vector form:
253 ///         * Affine load and store operations are converted to opaque vector
254 ///           transfer read and write operations.
255 ///         * Scalar constant operations/operands are converted to vector
256 ///           constant operations (splat).
257 ///         * Uniform operands (only induction variables of loops not mapped to
258 ///           a vector dimension, or operands defined outside of the loop nest
259 ///           for now) are broadcasted to a vector.
260 ///           TODO: Support more uniform cases.
261 ///         * Affine for operations with 'iter_args' are vectorized by
262 ///           vectorizing their 'iter_args' operands and results.
263 ///           TODO: Support more complex loops with divergent lbs and/or ubs.
264 ///         * The remaining operations in the loop nest are vectorized by
265 ///           widening their scalar types to vector types.
266 ///    b. if everything under the root AffineForOp in the current pattern
267 ///       is vectorized properly, we commit that loop to the IR and remove the
268 ///       scalar loop. Otherwise, we discard the vectorized loop and keep the
269 ///       original scalar loop.
270 ///    c. vectorization is applied on the next pattern in the list. Because
271 ///       pattern interference avoidance is not yet implemented and that we do
272 ///       not support further vectorizing an already vector load we need to
273 ///       re-verify that the pattern is still vectorizable. This is expected to
274 ///       make cost models more difficult to write and is subject to improvement
275 ///       in the future.
276 ///
277 /// Choice of loop transformation to support the algorithm:
278 /// =======================================================
279 /// The choice of loop transformation to apply for coarsening vectorized loops
280 /// is still subject to exploratory tradeoffs. In particular, say we want to
281 /// vectorize by a factor 128, we want to transform the following input:
282 /// ```mlir
283 ///   affine.for %i = %M to %N {
284 ///     %a = affine.load %A[%i] : memref<?xf32>
285 ///   }
286 /// ```
287 ///
288 /// Traditionally, one would vectorize late (after scheduling, tiling,
289 /// memory promotion etc) say after stripmining (and potentially unrolling in
290 /// the case of LLVM's SLP vectorizer):
291 /// ```mlir
292 ///   affine.for %i = floor(%M, 128) to ceil(%N, 128) {
293 ///     affine.for %ii = max(%M, 128 * %i) to min(%N, 128*%i + 127) {
294 ///       %a = affine.load %A[%ii] : memref<?xf32>
295 ///     }
296 ///   }
297 /// ```
298 ///
299 /// Instead, we seek to vectorize early and freeze vector types before
300 /// scheduling, so we want to generate a pattern that resembles:
301 /// ```mlir
302 ///   affine.for %i = ? to ? step ? {
303 ///     %v_a = vector.transfer_read %A[%i] : memref<?xf32>, vector<128xf32>
304 ///   }
305 /// ```
306 ///
307 /// i. simply dividing the lower / upper bounds by 128 creates issues
308 ///    when representing expressions such as ii + 1 because now we only
309 ///    have access to original values that have been divided. Additional
310 ///    information is needed to specify accesses at below-128 granularity;
311 /// ii. another alternative is to coarsen the loop step but this may have
312 ///    consequences on dependence analysis and fusability of loops: fusable
313 ///    loops probably need to have the same step (because we don't want to
314 ///    stripmine/unroll to enable fusion).
315 /// As a consequence, we choose to represent the coarsening using the loop
316 /// step for now and reevaluate in the future. Note that we can renormalize
317 /// loop steps later if/when we have evidence that they are problematic.
318 ///
319 /// For the simple strawman example above, vectorizing for a 1-D vector
320 /// abstraction of size 128 returns code similar to:
321 /// ```mlir
322 ///   affine.for %i = %M to %N step 128 {
323 ///     %v_a = vector.transfer_read %A[%i] : memref<?xf32>, vector<128xf32>
324 ///   }
325 /// ```
326 ///
327 /// Unsupported cases, extensions, and work in progress (help welcome :-) ):
328 /// ========================================================================
329 ///   1. lowering to concrete vector types for various HW;
330 ///   2. reduction support for n-D vectorization and non-unit steps;
331 ///   3. non-effecting padding during vector.transfer_read and filter during
332 ///      vector.transfer_write;
333 ///   4. misalignment support vector.transfer_read / vector.transfer_write
334 ///      (hopefully without read-modify-writes);
335 ///   5. control-flow support;
336 ///   6. cost-models, heuristics and search;
337 ///   7. Op implementation, extensions and implication on memref views;
338 ///   8. many TODOs left around.
339 ///
340 /// Examples:
341 /// =========
342 /// Consider the following Function:
343 /// ```mlir
344 /// func @vector_add_2d(%M : index, %N : index) -> f32 {
345 ///   %A = alloc (%M, %N) : memref<?x?xf32, 0>
346 ///   %B = alloc (%M, %N) : memref<?x?xf32, 0>
347 ///   %C = alloc (%M, %N) : memref<?x?xf32, 0>
348 ///   %f1 = arith.constant 1.0 : f32
349 ///   %f2 = arith.constant 2.0 : f32
350 ///   affine.for %i0 = 0 to %M {
351 ///     affine.for %i1 = 0 to %N {
352 ///       // non-scoped %f1
353 ///       affine.store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
354 ///     }
355 ///   }
356 ///   affine.for %i2 = 0 to %M {
357 ///     affine.for %i3 = 0 to %N {
358 ///       // non-scoped %f2
359 ///       affine.store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
360 ///     }
361 ///   }
362 ///   affine.for %i4 = 0 to %M {
363 ///     affine.for %i5 = 0 to %N {
364 ///       %a5 = affine.load %A[%i4, %i5] : memref<?x?xf32, 0>
365 ///       %b5 = affine.load %B[%i4, %i5] : memref<?x?xf32, 0>
366 ///       %s5 = arith.addf %a5, %b5 : f32
367 ///       // non-scoped %f1
368 ///       %s6 = arith.addf %s5, %f1 : f32
369 ///       // non-scoped %f2
370 ///       %s7 = arith.addf %s5, %f2 : f32
371 ///       // diamond dependency.
372 ///       %s8 = arith.addf %s7, %s6 : f32
373 ///       affine.store %s8, %C[%i4, %i5] : memref<?x?xf32, 0>
374 ///     }
375 ///   }
376 ///   %c7 = arith.constant 7 : index
377 ///   %c42 = arith.constant 42 : index
378 ///   %res = load %C[%c7, %c42] : memref<?x?xf32, 0>
379 ///   return %res : f32
380 /// }
381 /// ```
382 ///
383 /// The -affine-super-vectorize pass with the following arguments:
384 /// ```
385 /// -affine-super-vectorize="virtual-vector-size=256 test-fastest-varying=0"
386 /// ```
387 ///
388 /// produces this standard innermost-loop vectorized code:
389 /// ```mlir
390 /// func @vector_add_2d(%arg0 : index, %arg1 : index) -> f32 {
391 ///   %0 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
392 ///   %1 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
393 ///   %2 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
394 ///   %cst = arith.constant 1.0 : f32
395 ///   %cst_0 = arith.constant 2.0 : f32
396 ///   affine.for %i0 = 0 to %arg0 {
397 ///     affine.for %i1 = 0 to %arg1 step 256 {
398 ///       %cst_1 = arith.constant dense<vector<256xf32>, 1.0> :
399 ///                vector<256xf32>
400 ///       vector.transfer_write %cst_1, %0[%i0, %i1] :
401 ///                vector<256xf32>, memref<?x?xf32>
402 ///     }
403 ///   }
404 ///   affine.for %i2 = 0 to %arg0 {
405 ///     affine.for %i3 = 0 to %arg1 step 256 {
406 ///       %cst_2 = arith.constant dense<vector<256xf32>, 2.0> :
407 ///                vector<256xf32>
408 ///       vector.transfer_write %cst_2, %1[%i2, %i3] :
409 ///                vector<256xf32>, memref<?x?xf32>
410 ///     }
411 ///   }
412 ///   affine.for %i4 = 0 to %arg0 {
413 ///     affine.for %i5 = 0 to %arg1 step 256 {
414 ///       %3 = vector.transfer_read %0[%i4, %i5] :
415 ///            memref<?x?xf32>, vector<256xf32>
416 ///       %4 = vector.transfer_read %1[%i4, %i5] :
417 ///            memref<?x?xf32>, vector<256xf32>
418 ///       %5 = arith.addf %3, %4 : vector<256xf32>
419 ///       %cst_3 = arith.constant dense<vector<256xf32>, 1.0> :
420 ///                vector<256xf32>
421 ///       %6 = arith.addf %5, %cst_3 : vector<256xf32>
422 ///       %cst_4 = arith.constant dense<vector<256xf32>, 2.0> :
423 ///                vector<256xf32>
424 ///       %7 = arith.addf %5, %cst_4 : vector<256xf32>
425 ///       %8 = arith.addf %7, %6 : vector<256xf32>
426 ///       vector.transfer_write %8, %2[%i4, %i5] :
427 ///                vector<256xf32>, memref<?x?xf32>
428 ///     }
429 ///   }
430 ///   %c7 = arith.constant 7 : index
431 ///   %c42 = arith.constant 42 : index
432 ///   %9 = load %2[%c7, %c42] : memref<?x?xf32>
433 ///   return %9 : f32
434 /// }
435 /// ```
436 ///
437 /// The -affine-super-vectorize pass with the following arguments:
438 /// ```
439 /// -affine-super-vectorize="virtual-vector-size=32,256 \
440 ///                          test-fastest-varying=1,0"
441 /// ```
442 ///
443 /// produces this more interesting mixed outer-innermost-loop vectorized code:
444 /// ```mlir
445 /// func @vector_add_2d(%arg0 : index, %arg1 : index) -> f32 {
446 ///   %0 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
447 ///   %1 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
448 ///   %2 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
449 ///   %cst = arith.constant 1.0 : f32
450 ///   %cst_0 = arith.constant 2.0 : f32
451 ///   affine.for %i0 = 0 to %arg0 step 32 {
452 ///     affine.for %i1 = 0 to %arg1 step 256 {
453 ///       %cst_1 = arith.constant dense<vector<32x256xf32>, 1.0> :
454 ///                vector<32x256xf32>
455 ///       vector.transfer_write %cst_1, %0[%i0, %i1] :
456 ///                vector<32x256xf32>, memref<?x?xf32>
457 ///     }
458 ///   }
459 ///   affine.for %i2 = 0 to %arg0 step 32 {
460 ///     affine.for %i3 = 0 to %arg1 step 256 {
461 ///       %cst_2 = arith.constant dense<vector<32x256xf32>, 2.0> :
462 ///                vector<32x256xf32>
463 ///       vector.transfer_write %cst_2, %1[%i2, %i3] :
464 ///                vector<32x256xf32>, memref<?x?xf32>
465 ///     }
466 ///   }
467 ///   affine.for %i4 = 0 to %arg0 step 32 {
468 ///     affine.for %i5 = 0 to %arg1 step 256 {
469 ///       %3 = vector.transfer_read %0[%i4, %i5] :
470 ///                memref<?x?xf32> vector<32x256xf32>
471 ///       %4 = vector.transfer_read %1[%i4, %i5] :
472 ///                memref<?x?xf32>, vector<32x256xf32>
473 ///       %5 = arith.addf %3, %4 : vector<32x256xf32>
474 ///       %cst_3 = arith.constant dense<vector<32x256xf32>, 1.0> :
475 ///                vector<32x256xf32>
476 ///       %6 = arith.addf %5, %cst_3 : vector<32x256xf32>
477 ///       %cst_4 = arith.constant dense<vector<32x256xf32>, 2.0> :
478 ///                vector<32x256xf32>
479 ///       %7 = arith.addf %5, %cst_4 : vector<32x256xf32>
480 ///       %8 = arith.addf %7, %6 : vector<32x256xf32>
481 ///       vector.transfer_write %8, %2[%i4, %i5] :
482 ///                vector<32x256xf32>, memref<?x?xf32>
483 ///     }
484 ///   }
485 ///   %c7 = arith.constant 7 : index
486 ///   %c42 = arith.constant 42 : index
487 ///   %9 = load %2[%c7, %c42] : memref<?x?xf32>
488 ///   return %9 : f32
489 /// }
490 /// ```
491 ///
492 /// Of course, much more intricate n-D imperfectly-nested patterns can be
493 /// vectorized too and specified in a fully declarative fashion.
494 ///
495 /// Reduction:
496 /// ==========
497 /// Vectorizing reduction loops along the reduction dimension is supported if:
498 /// - the reduction kind is supported,
499 /// - the vectorization is 1-D, and
500 /// - the step size of the loop equals to one.
501 ///
502 /// Comparing to the non-vector-dimension case, two additional things are done
503 /// during vectorization of such loops:
504 /// - The resulting vector returned from the loop is reduced to a scalar using
505 ///   `vector.reduce`.
506 /// - In some cases a mask is applied to the vector yielded at the end of the
507 ///   loop to prevent garbage values from being written to the accumulator.
508 ///
509 /// Reduction vectorization is switched off by default, it can be enabled by
510 /// passing a map from loops to reductions to utility functions, or by passing
511 /// `vectorize-reductions=true` to the vectorization pass.
512 ///
513 /// Consider the following example:
514 /// ```mlir
515 /// func @vecred(%in: memref<512xf32>) -> f32 {
516 ///   %cst = arith.constant 0.000000e+00 : f32
517 ///   %sum = affine.for %i = 0 to 500 iter_args(%part_sum = %cst) -> (f32) {
518 ///     %ld = affine.load %in[%i] : memref<512xf32>
519 ///     %cos = math.cos %ld : f32
520 ///     %add = arith.addf %part_sum, %cos : f32
521 ///     affine.yield %add : f32
522 ///   }
523 ///   return %sum : f32
524 /// }
525 /// ```
526 ///
527 /// The -affine-super-vectorize pass with the following arguments:
528 /// ```
529 /// -affine-super-vectorize="virtual-vector-size=128 test-fastest-varying=0 \
530 ///                          vectorize-reductions=true"
531 /// ```
532 /// produces the following output:
533 /// ```mlir
534 /// #map = affine_map<(d0) -> (-d0 + 500)>
535 /// func @vecred(%arg0: memref<512xf32>) -> f32 {
536 ///   %cst = arith.constant 0.000000e+00 : f32
537 ///   %cst_0 = arith.constant dense<0.000000e+00> : vector<128xf32>
538 ///   %0 = affine.for %arg1 = 0 to 500 step 128 iter_args(%arg2 = %cst_0)
539 ///           -> (vector<128xf32>) {
540 ///     // %2 is the number of iterations left in the original loop.
541 ///     %2 = affine.apply #map(%arg1)
542 ///     %3 = vector.create_mask %2 : vector<128xi1>
543 ///     %cst_1 = arith.constant 0.000000e+00 : f32
544 ///     %4 = vector.transfer_read %arg0[%arg1], %cst_1 :
545 ///                     memref<512xf32>, vector<128xf32>
546 ///     %5 = math.cos %4 : vector<128xf32>
547 ///     %6 = arith.addf %arg2, %5 : vector<128xf32>
548 ///     // We filter out the effect of last 12 elements using the mask.
549 ///     %7 = select %3, %6, %arg2 : vector<128xi1>, vector<128xf32>
550 ///     affine.yield %7 : vector<128xf32>
551 ///   }
552 ///   %1 = vector.reduction <add>, %0 : vector<128xf32> into f32
553 ///   return %1 : f32
554 /// }
555 /// ```
556 ///
557 /// Note that because of loop misalignment we needed to apply a mask to prevent
558 /// last 12 elements from affecting the final result. The mask is full of ones
559 /// in every iteration except for the last one, in which it has the form
560 /// `11...100...0` with 116 ones and 12 zeros.
561 
562 #define DEBUG_TYPE "early-vect"
563 
564 using llvm::dbgs;
565 
566 /// Forward declaration.
567 static FilterFunctionType
568 isVectorizableLoopPtrFactory(const DenseSet<Operation *> &parallelLoops,
569                              int fastestVaryingMemRefDimension);
570 
571 /// Creates a vectorization pattern from the command line arguments.
572 /// Up to 3-D patterns are supported.
573 /// If the command line argument requests a pattern of higher order, returns an
574 /// empty pattern list which will conservatively result in no vectorization.
575 static Optional<NestedPattern>
576 makePattern(const DenseSet<Operation *> &parallelLoops, int vectorRank,
577             ArrayRef<int64_t> fastestVaryingPattern) {
578   using matcher::For;
579   int64_t d0 = fastestVaryingPattern.empty() ? -1 : fastestVaryingPattern[0];
580   int64_t d1 = fastestVaryingPattern.size() < 2 ? -1 : fastestVaryingPattern[1];
581   int64_t d2 = fastestVaryingPattern.size() < 3 ? -1 : fastestVaryingPattern[2];
582   switch (vectorRank) {
583   case 1:
584     return For(isVectorizableLoopPtrFactory(parallelLoops, d0));
585   case 2:
586     return For(isVectorizableLoopPtrFactory(parallelLoops, d0),
587                For(isVectorizableLoopPtrFactory(parallelLoops, d1)));
588   case 3:
589     return For(isVectorizableLoopPtrFactory(parallelLoops, d0),
590                For(isVectorizableLoopPtrFactory(parallelLoops, d1),
591                    For(isVectorizableLoopPtrFactory(parallelLoops, d2))));
592   default: {
593     return llvm::None;
594   }
595   }
596 }
597 
598 static NestedPattern &vectorTransferPattern() {
599   static auto pattern = matcher::Op([](Operation &op) {
600     return isa<vector::TransferReadOp, vector::TransferWriteOp>(op);
601   });
602   return pattern;
603 }
604 
605 namespace {
606 
607 /// Base state for the vectorize pass.
608 /// Command line arguments are preempted by non-empty pass arguments.
609 struct Vectorize : public AffineVectorizeBase<Vectorize> {
610   Vectorize() = default;
611   Vectorize(ArrayRef<int64_t> virtualVectorSize);
612   void runOnOperation() override;
613 };
614 
615 } // namespace
616 
617 Vectorize::Vectorize(ArrayRef<int64_t> virtualVectorSize) {
618   vectorSizes = virtualVectorSize;
619 }
620 
621 static void vectorizeLoopIfProfitable(Operation *loop, unsigned depthInPattern,
622                                       unsigned patternDepth,
623                                       VectorizationStrategy *strategy) {
624   assert(patternDepth > depthInPattern &&
625          "patternDepth is greater than depthInPattern");
626   if (patternDepth - depthInPattern > strategy->vectorSizes.size()) {
627     // Don't vectorize this loop
628     return;
629   }
630   strategy->loopToVectorDim[loop] =
631       strategy->vectorSizes.size() - (patternDepth - depthInPattern);
632 }
633 
634 /// Implements a simple strawman strategy for vectorization.
635 /// Given a matched pattern `matches` of depth `patternDepth`, this strategy
636 /// greedily assigns the fastest varying dimension ** of the vector ** to the
637 /// innermost loop in the pattern.
638 /// When coupled with a pattern that looks for the fastest varying dimension in
639 /// load/store MemRefs, this creates a generic vectorization strategy that works
640 /// for any loop in a hierarchy (outermost, innermost or intermediate).
641 ///
642 /// TODO: In the future we should additionally increase the power of the
643 /// profitability analysis along 3 directions:
644 ///   1. account for loop extents (both static and parametric + annotations);
645 ///   2. account for data layout permutations;
646 ///   3. account for impact of vectorization on maximal loop fusion.
647 /// Then we can quantify the above to build a cost model and search over
648 /// strategies.
649 static LogicalResult analyzeProfitability(ArrayRef<NestedMatch> matches,
650                                           unsigned depthInPattern,
651                                           unsigned patternDepth,
652                                           VectorizationStrategy *strategy) {
653   for (auto m : matches) {
654     if (failed(analyzeProfitability(m.getMatchedChildren(), depthInPattern + 1,
655                                     patternDepth, strategy))) {
656       return failure();
657     }
658     vectorizeLoopIfProfitable(m.getMatchedOperation(), depthInPattern,
659                               patternDepth, strategy);
660   }
661   return success();
662 }
663 
664 ///// end TODO: Hoist to a VectorizationStrategy.cpp when appropriate /////
665 
666 namespace {
667 
668 struct VectorizationState {
669 
670   VectorizationState(MLIRContext *context) : builder(context) {}
671 
672   /// Registers the vector replacement of a scalar operation and its result
673   /// values. Both operations must have the same number of results.
674   ///
675   /// This utility is used to register the replacement for the vast majority of
676   /// the vectorized operations.
677   ///
678   /// Example:
679   ///   * 'replaced': %0 = arith.addf %1, %2 : f32
680   ///   * 'replacement': %0 = arith.addf %1, %2 : vector<128xf32>
681   void registerOpVectorReplacement(Operation *replaced, Operation *replacement);
682 
683   /// Registers the vector replacement of a scalar value. The replacement
684   /// operation should have a single result, which replaces the scalar value.
685   ///
686   /// This utility is used to register the vector replacement of block arguments
687   /// and operation results which are not directly vectorized (i.e., their
688   /// scalar version still exists after vectorization), like uniforms.
689   ///
690   /// Example:
691   ///   * 'replaced': block argument or operation outside of the vectorized
692   ///     loop.
693   ///   * 'replacement': %0 = vector.broadcast %1 : f32 to vector<128xf32>
694   void registerValueVectorReplacement(Value replaced, Operation *replacement);
695 
696   /// Registers the vector replacement of a block argument (e.g., iter_args).
697   ///
698   /// Example:
699   ///   * 'replaced': 'iter_arg' block argument.
700   ///   * 'replacement': vectorized 'iter_arg' block argument.
701   void registerBlockArgVectorReplacement(BlockArgument replaced,
702                                          BlockArgument replacement);
703 
704   /// Registers the scalar replacement of a scalar value. 'replacement' must be
705   /// scalar. Both values must be block arguments. Operation results should be
706   /// replaced using the 'registerOp*' utilitites.
707   ///
708   /// This utility is used to register the replacement of block arguments
709   /// that are within the loop to be vectorized and will continue being scalar
710   /// within the vector loop.
711   ///
712   /// Example:
713   ///   * 'replaced': induction variable of a loop to be vectorized.
714   ///   * 'replacement': new induction variable in the new vector loop.
715   void registerValueScalarReplacement(BlockArgument replaced,
716                                       BlockArgument replacement);
717 
718   /// Registers the scalar replacement of a scalar result returned from a
719   /// reduction loop. 'replacement' must be scalar.
720   ///
721   /// This utility is used to register the replacement for scalar results of
722   /// vectorized reduction loops with iter_args.
723   ///
724   /// Example 2:
725   ///   * 'replaced': %0 = affine.for %i = 0 to 512 iter_args(%x = ...) -> (f32)
726   ///   * 'replacement': %1 = vector.reduction <add>, %0 : vector<4xf32> into
727   ///   f32
728   void registerLoopResultScalarReplacement(Value replaced, Value replacement);
729 
730   /// Returns in 'replacedVals' the scalar replacement for values in
731   /// 'inputVals'.
732   void getScalarValueReplacementsFor(ValueRange inputVals,
733                                      SmallVectorImpl<Value> &replacedVals);
734 
735   /// Erases the scalar loop nest after its successful vectorization.
736   void finishVectorizationPattern(AffineForOp rootLoop);
737 
738   // Used to build and insert all the new operations created. The insertion
739   // point is preserved and updated along the vectorization process.
740   OpBuilder builder;
741 
742   // Maps input scalar operations to their vector counterparts.
743   DenseMap<Operation *, Operation *> opVectorReplacement;
744   // Maps input scalar values to their vector counterparts.
745   BlockAndValueMapping valueVectorReplacement;
746   // Maps input scalar values to their new scalar counterparts in the vector
747   // loop nest.
748   BlockAndValueMapping valueScalarReplacement;
749   // Maps results of reduction loops to their new scalar counterparts.
750   DenseMap<Value, Value> loopResultScalarReplacement;
751 
752   // Maps the newly created vector loops to their vector dimension.
753   DenseMap<Operation *, unsigned> vecLoopToVecDim;
754 
755   // Maps the new vectorized loops to the corresponding vector masks if it is
756   // required.
757   DenseMap<Operation *, Value> vecLoopToMask;
758 
759   // The strategy drives which loop to vectorize by which amount.
760   const VectorizationStrategy *strategy = nullptr;
761 
762 private:
763   /// Internal implementation to map input scalar values to new vector or scalar
764   /// values.
765   void registerValueVectorReplacementImpl(Value replaced, Value replacement);
766   void registerValueScalarReplacementImpl(Value replaced, Value replacement);
767 };
768 
769 } // namespace
770 
771 /// Registers the vector replacement of a scalar operation and its result
772 /// values. Both operations must have the same number of results.
773 ///
774 /// This utility is used to register the replacement for the vast majority of
775 /// the vectorized operations.
776 ///
777 /// Example:
778 ///   * 'replaced': %0 = arith.addf %1, %2 : f32
779 ///   * 'replacement': %0 = arith.addf %1, %2 : vector<128xf32>
780 void VectorizationState::registerOpVectorReplacement(Operation *replaced,
781                                                      Operation *replacement) {
782   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ commit vectorized op:\n");
783   LLVM_DEBUG(dbgs() << *replaced << "\n");
784   LLVM_DEBUG(dbgs() << "into\n");
785   LLVM_DEBUG(dbgs() << *replacement << "\n");
786 
787   assert(replaced->getNumResults() == replacement->getNumResults() &&
788          "Unexpected replaced and replacement results");
789   assert(opVectorReplacement.count(replaced) == 0 && "already registered");
790   opVectorReplacement[replaced] = replacement;
791 
792   for (auto resultTuple :
793        llvm::zip(replaced->getResults(), replacement->getResults()))
794     registerValueVectorReplacementImpl(std::get<0>(resultTuple),
795                                        std::get<1>(resultTuple));
796 }
797 
798 /// Registers the vector replacement of a scalar value. The replacement
799 /// operation should have a single result, which replaces the scalar value.
800 ///
801 /// This utility is used to register the vector replacement of block arguments
802 /// and operation results which are not directly vectorized (i.e., their
803 /// scalar version still exists after vectorization), like uniforms.
804 ///
805 /// Example:
806 ///   * 'replaced': block argument or operation outside of the vectorized loop.
807 ///   * 'replacement': %0 = vector.broadcast %1 : f32 to vector<128xf32>
808 void VectorizationState::registerValueVectorReplacement(
809     Value replaced, Operation *replacement) {
810   assert(replacement->getNumResults() == 1 &&
811          "Expected single-result replacement");
812   if (Operation *defOp = replaced.getDefiningOp())
813     registerOpVectorReplacement(defOp, replacement);
814   else
815     registerValueVectorReplacementImpl(replaced, replacement->getResult(0));
816 }
817 
818 /// Registers the vector replacement of a block argument (e.g., iter_args).
819 ///
820 /// Example:
821 ///   * 'replaced': 'iter_arg' block argument.
822 ///   * 'replacement': vectorized 'iter_arg' block argument.
823 void VectorizationState::registerBlockArgVectorReplacement(
824     BlockArgument replaced, BlockArgument replacement) {
825   registerValueVectorReplacementImpl(replaced, replacement);
826 }
827 
828 void VectorizationState::registerValueVectorReplacementImpl(Value replaced,
829                                                             Value replacement) {
830   assert(!valueVectorReplacement.contains(replaced) &&
831          "Vector replacement already registered");
832   assert(replacement.getType().isa<VectorType>() &&
833          "Expected vector type in vector replacement");
834   valueVectorReplacement.map(replaced, replacement);
835 }
836 
837 /// Registers the scalar replacement of a scalar value. 'replacement' must be
838 /// scalar. Both values must be block arguments. Operation results should be
839 /// replaced using the 'registerOp*' utilitites.
840 ///
841 /// This utility is used to register the replacement of block arguments
842 /// that are within the loop to be vectorized and will continue being scalar
843 /// within the vector loop.
844 ///
845 /// Example:
846 ///   * 'replaced': induction variable of a loop to be vectorized.
847 ///   * 'replacement': new induction variable in the new vector loop.
848 void VectorizationState::registerValueScalarReplacement(
849     BlockArgument replaced, BlockArgument replacement) {
850   registerValueScalarReplacementImpl(replaced, replacement);
851 }
852 
853 /// Registers the scalar replacement of a scalar result returned from a
854 /// reduction loop. 'replacement' must be scalar.
855 ///
856 /// This utility is used to register the replacement for scalar results of
857 /// vectorized reduction loops with iter_args.
858 ///
859 /// Example 2:
860 ///   * 'replaced': %0 = affine.for %i = 0 to 512 iter_args(%x = ...) -> (f32)
861 ///   * 'replacement': %1 = vector.reduction <add>, %0 : vector<4xf32> into f32
862 void VectorizationState::registerLoopResultScalarReplacement(
863     Value replaced, Value replacement) {
864   assert(isa<AffineForOp>(replaced.getDefiningOp()));
865   assert(loopResultScalarReplacement.count(replaced) == 0 &&
866          "already registered");
867   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ will replace a result of the loop "
868                        "with scalar: "
869                     << replacement);
870   loopResultScalarReplacement[replaced] = replacement;
871 }
872 
873 void VectorizationState::registerValueScalarReplacementImpl(Value replaced,
874                                                             Value replacement) {
875   assert(!valueScalarReplacement.contains(replaced) &&
876          "Scalar value replacement already registered");
877   assert(!replacement.getType().isa<VectorType>() &&
878          "Expected scalar type in scalar replacement");
879   valueScalarReplacement.map(replaced, replacement);
880 }
881 
882 /// Returns in 'replacedVals' the scalar replacement for values in 'inputVals'.
883 void VectorizationState::getScalarValueReplacementsFor(
884     ValueRange inputVals, SmallVectorImpl<Value> &replacedVals) {
885   for (Value inputVal : inputVals)
886     replacedVals.push_back(valueScalarReplacement.lookupOrDefault(inputVal));
887 }
888 
889 /// Erases a loop nest, including all its nested operations.
890 static void eraseLoopNest(AffineForOp forOp) {
891   LLVM_DEBUG(dbgs() << "[early-vect]+++++ erasing:\n" << forOp << "\n");
892   forOp.erase();
893 }
894 
895 /// Erases the scalar loop nest after its successful vectorization.
896 void VectorizationState::finishVectorizationPattern(AffineForOp rootLoop) {
897   LLVM_DEBUG(dbgs() << "\n[early-vect] Finalizing vectorization\n");
898   eraseLoopNest(rootLoop);
899 }
900 
901 // Apply 'map' with 'mapOperands' returning resulting values in 'results'.
902 static void computeMemoryOpIndices(Operation *op, AffineMap map,
903                                    ValueRange mapOperands,
904                                    VectorizationState &state,
905                                    SmallVectorImpl<Value> &results) {
906   for (auto resultExpr : map.getResults()) {
907     auto singleResMap =
908         AffineMap::get(map.getNumDims(), map.getNumSymbols(), resultExpr);
909     auto afOp = state.builder.create<AffineApplyOp>(op->getLoc(), singleResMap,
910                                                     mapOperands);
911     results.push_back(afOp);
912   }
913 }
914 
915 /// Returns a FilterFunctionType that can be used in NestedPattern to match a
916 /// loop whose underlying load/store accesses are either invariant or all
917 // varying along the `fastestVaryingMemRefDimension`.
918 static FilterFunctionType
919 isVectorizableLoopPtrFactory(const DenseSet<Operation *> &parallelLoops,
920                              int fastestVaryingMemRefDimension) {
921   return [&parallelLoops, fastestVaryingMemRefDimension](Operation &forOp) {
922     auto loop = cast<AffineForOp>(forOp);
923     auto parallelIt = parallelLoops.find(loop);
924     if (parallelIt == parallelLoops.end())
925       return false;
926     int memRefDim = -1;
927     auto vectorizableBody =
928         isVectorizableLoopBody(loop, &memRefDim, vectorTransferPattern());
929     if (!vectorizableBody)
930       return false;
931     return memRefDim == -1 || fastestVaryingMemRefDimension == -1 ||
932            memRefDim == fastestVaryingMemRefDimension;
933   };
934 }
935 
936 /// Returns the vector type resulting from applying the provided vectorization
937 /// strategy on the scalar type.
938 static VectorType getVectorType(Type scalarTy,
939                                 const VectorizationStrategy *strategy) {
940   assert(!scalarTy.isa<VectorType>() && "Expected scalar type");
941   return VectorType::get(strategy->vectorSizes, scalarTy);
942 }
943 
944 /// Tries to transform a scalar constant into a vector constant. Returns the
945 /// vector constant if the scalar type is valid vector element type. Returns
946 /// nullptr, otherwise.
947 static arith::ConstantOp vectorizeConstant(arith::ConstantOp constOp,
948                                            VectorizationState &state) {
949   Type scalarTy = constOp.getType();
950   if (!VectorType::isValidElementType(scalarTy))
951     return nullptr;
952 
953   auto vecTy = getVectorType(scalarTy, state.strategy);
954   auto vecAttr = DenseElementsAttr::get(vecTy, constOp.getValue());
955 
956   OpBuilder::InsertionGuard guard(state.builder);
957   Operation *parentOp = state.builder.getInsertionBlock()->getParentOp();
958   // Find the innermost vectorized ancestor loop to insert the vector constant.
959   while (parentOp && !state.vecLoopToVecDim.count(parentOp))
960     parentOp = parentOp->getParentOp();
961   assert(parentOp && state.vecLoopToVecDim.count(parentOp) &&
962          isa<AffineForOp>(parentOp) && "Expected a vectorized for op");
963   auto vecForOp = cast<AffineForOp>(parentOp);
964   state.builder.setInsertionPointToStart(vecForOp.getBody());
965   auto newConstOp =
966       state.builder.create<arith::ConstantOp>(constOp.getLoc(), vecAttr);
967 
968   // Register vector replacement for future uses in the scope.
969   state.registerOpVectorReplacement(constOp, newConstOp);
970   return newConstOp;
971 }
972 
973 /// Creates a constant vector filled with the neutral elements of the given
974 /// reduction. The scalar type of vector elements will be taken from
975 /// `oldOperand`.
976 static arith::ConstantOp createInitialVector(arith::AtomicRMWKind reductionKind,
977                                              Value oldOperand,
978                                              VectorizationState &state) {
979   Type scalarTy = oldOperand.getType();
980   if (!VectorType::isValidElementType(scalarTy))
981     return nullptr;
982 
983   Attribute valueAttr = getIdentityValueAttr(
984       reductionKind, scalarTy, state.builder, oldOperand.getLoc());
985   auto vecTy = getVectorType(scalarTy, state.strategy);
986   auto vecAttr = DenseElementsAttr::get(vecTy, valueAttr);
987   auto newConstOp =
988       state.builder.create<arith::ConstantOp>(oldOperand.getLoc(), vecAttr);
989 
990   return newConstOp;
991 }
992 
993 /// Creates a mask used to filter out garbage elements in the last iteration
994 /// of unaligned loops. If a mask is not required then `nullptr` is returned.
995 /// The mask will be a vector of booleans representing meaningful vector
996 /// elements in the current iteration. It is filled with ones for each iteration
997 /// except for the last one, where it has the form `11...100...0` with the
998 /// number of ones equal to the number of meaningful elements (i.e. the number
999 /// of iterations that would be left in the original loop).
1000 static Value createMask(AffineForOp vecForOp, VectorizationState &state) {
1001   assert(state.strategy->vectorSizes.size() == 1 &&
1002          "Creating a mask non-1-D vectors is not supported.");
1003   assert(vecForOp.getStep() == state.strategy->vectorSizes[0] &&
1004          "Creating a mask for loops with non-unit original step size is not "
1005          "supported.");
1006 
1007   // Check if we have already created the mask.
1008   if (Value mask = state.vecLoopToMask.lookup(vecForOp))
1009     return mask;
1010 
1011   // If the loop has constant bounds and the original number of iterations is
1012   // divisable by the vector size then we don't need a mask.
1013   if (vecForOp.hasConstantBounds()) {
1014     int64_t originalTripCount =
1015         vecForOp.getConstantUpperBound() - vecForOp.getConstantLowerBound();
1016     if (originalTripCount % vecForOp.getStep() == 0)
1017       return nullptr;
1018   }
1019 
1020   OpBuilder::InsertionGuard guard(state.builder);
1021   state.builder.setInsertionPointToStart(vecForOp.getBody());
1022 
1023   // We generate the mask using the `vector.create_mask` operation which accepts
1024   // the number of meaningful elements (i.e. the length of the prefix of 1s).
1025   // To compute the number of meaningful elements we subtract the current value
1026   // of the iteration variable from the upper bound of the loop. Example:
1027   //
1028   //     // 500 is the upper bound of the loop
1029   //     #map = affine_map<(d0) -> (500 - d0)>
1030   //     %elems_left = affine.apply #map(%iv)
1031   //     %mask = vector.create_mask %elems_left : vector<128xi1>
1032 
1033   Location loc = vecForOp.getLoc();
1034 
1035   // First we get the upper bound of the loop using `affine.apply` or
1036   // `affine.min`.
1037   AffineMap ubMap = vecForOp.getUpperBoundMap();
1038   Value ub;
1039   if (ubMap.getNumResults() == 1)
1040     ub = state.builder.create<AffineApplyOp>(loc, vecForOp.getUpperBoundMap(),
1041                                              vecForOp.getUpperBoundOperands());
1042   else
1043     ub = state.builder.create<AffineMinOp>(loc, vecForOp.getUpperBoundMap(),
1044                                            vecForOp.getUpperBoundOperands());
1045   // Then we compute the number of (original) iterations left in the loop.
1046   AffineExpr subExpr =
1047       state.builder.getAffineDimExpr(0) - state.builder.getAffineDimExpr(1);
1048   Value itersLeft =
1049       makeComposedAffineApply(state.builder, loc, AffineMap::get(2, 0, subExpr),
1050                               {ub, vecForOp.getInductionVar()});
1051   // If the affine maps were successfully composed then `ub` is unneeded.
1052   if (ub.use_empty())
1053     ub.getDefiningOp()->erase();
1054   // Finally we create the mask.
1055   Type maskTy = VectorType::get(state.strategy->vectorSizes,
1056                                 state.builder.getIntegerType(1));
1057   Value mask =
1058       state.builder.create<vector::CreateMaskOp>(loc, maskTy, itersLeft);
1059 
1060   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ creating a mask:\n"
1061                     << itersLeft << "\n"
1062                     << mask << "\n");
1063 
1064   state.vecLoopToMask[vecForOp] = mask;
1065   return mask;
1066 }
1067 
1068 /// Returns true if the provided value is vector uniform given the vectorization
1069 /// strategy.
1070 // TODO: For now, only values that are induction variables of loops not in
1071 // `loopToVectorDim` or invariants to all the loops in the vectorization
1072 // strategy are considered vector uniforms.
1073 static bool isUniformDefinition(Value value,
1074                                 const VectorizationStrategy *strategy) {
1075   AffineForOp forOp = getForInductionVarOwner(value);
1076   if (forOp && strategy->loopToVectorDim.count(forOp) == 0)
1077     return true;
1078 
1079   for (auto loopToDim : strategy->loopToVectorDim) {
1080     auto loop = cast<AffineForOp>(loopToDim.first);
1081     if (!loop.isDefinedOutsideOfLoop(value))
1082       return false;
1083   }
1084   return true;
1085 }
1086 
1087 /// Generates a broadcast op for the provided uniform value using the
1088 /// vectorization strategy in 'state'.
1089 static Operation *vectorizeUniform(Value uniformVal,
1090                                    VectorizationState &state) {
1091   OpBuilder::InsertionGuard guard(state.builder);
1092   Value uniformScalarRepl =
1093       state.valueScalarReplacement.lookupOrDefault(uniformVal);
1094   state.builder.setInsertionPointAfterValue(uniformScalarRepl);
1095 
1096   auto vectorTy = getVectorType(uniformVal.getType(), state.strategy);
1097   auto bcastOp = state.builder.create<BroadcastOp>(uniformVal.getLoc(),
1098                                                    vectorTy, uniformScalarRepl);
1099   state.registerValueVectorReplacement(uniformVal, bcastOp);
1100   return bcastOp;
1101 }
1102 
1103 /// Tries to vectorize a given `operand` by applying the following logic:
1104 /// 1. if the defining operation has been already vectorized, `operand` is
1105 ///    already in the proper vector form;
1106 /// 2. if the `operand` is a constant, returns the vectorized form of the
1107 ///    constant;
1108 /// 3. if the `operand` is uniform, returns a vector broadcast of the `op`;
1109 /// 4. otherwise, the vectorization of `operand` is not supported.
1110 /// Newly created vector operations are registered in `state` as replacement
1111 /// for their scalar counterparts.
1112 /// In particular this logic captures some of the use cases where definitions
1113 /// that are not scoped under the current pattern are needed to vectorize.
1114 /// One such example is top level function constants that need to be splatted.
1115 ///
1116 /// Returns an operand that has been vectorized to match `state`'s strategy if
1117 /// vectorization is possible with the above logic. Returns nullptr otherwise.
1118 ///
1119 /// TODO: handle more complex cases.
1120 static Value vectorizeOperand(Value operand, VectorizationState &state) {
1121   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorize operand: " << operand);
1122   // If this value is already vectorized, we are done.
1123   if (Value vecRepl = state.valueVectorReplacement.lookupOrNull(operand)) {
1124     LLVM_DEBUG(dbgs() << " -> already vectorized: " << vecRepl);
1125     return vecRepl;
1126   }
1127 
1128   // An vector operand that is not in the replacement map should never reach
1129   // this point. Reaching this point could mean that the code was already
1130   // vectorized and we shouldn't try to vectorize already vectorized code.
1131   assert(!operand.getType().isa<VectorType>() &&
1132          "Vector op not found in replacement map");
1133 
1134   // Vectorize constant.
1135   if (auto constOp = operand.getDefiningOp<arith::ConstantOp>()) {
1136     auto vecConstant = vectorizeConstant(constOp, state);
1137     LLVM_DEBUG(dbgs() << "-> constant: " << vecConstant);
1138     return vecConstant.getResult();
1139   }
1140 
1141   // Vectorize uniform values.
1142   if (isUniformDefinition(operand, state.strategy)) {
1143     Operation *vecUniform = vectorizeUniform(operand, state);
1144     LLVM_DEBUG(dbgs() << "-> uniform: " << *vecUniform);
1145     return vecUniform->getResult(0);
1146   }
1147 
1148   // Check for unsupported block argument scenarios. A supported block argument
1149   // should have been vectorized already.
1150   if (!operand.getDefiningOp())
1151     LLVM_DEBUG(dbgs() << "-> unsupported block argument\n");
1152   else
1153     // Generic unsupported case.
1154     LLVM_DEBUG(dbgs() << "-> non-vectorizable\n");
1155 
1156   return nullptr;
1157 }
1158 
1159 /// Vectorizes an affine load with the vectorization strategy in 'state' by
1160 /// generating a 'vector.transfer_read' op with the proper permutation map
1161 /// inferred from the indices of the load. The new 'vector.transfer_read' is
1162 /// registered as replacement of the scalar load. Returns the newly created
1163 /// 'vector.transfer_read' if vectorization was successful. Returns nullptr,
1164 /// otherwise.
1165 static Operation *vectorizeAffineLoad(AffineLoadOp loadOp,
1166                                       VectorizationState &state) {
1167   MemRefType memRefType = loadOp.getMemRefType();
1168   Type elementType = memRefType.getElementType();
1169   auto vectorType = VectorType::get(state.strategy->vectorSizes, elementType);
1170 
1171   // Replace map operands with operands from the vector loop nest.
1172   SmallVector<Value, 8> mapOperands;
1173   state.getScalarValueReplacementsFor(loadOp.getMapOperands(), mapOperands);
1174 
1175   // Compute indices for the transfer op. AffineApplyOp's may be generated.
1176   SmallVector<Value, 8> indices;
1177   indices.reserve(memRefType.getRank());
1178   if (loadOp.getAffineMap() !=
1179       state.builder.getMultiDimIdentityMap(memRefType.getRank()))
1180     computeMemoryOpIndices(loadOp, loadOp.getAffineMap(), mapOperands, state,
1181                            indices);
1182   else
1183     indices.append(mapOperands.begin(), mapOperands.end());
1184 
1185   // Compute permutation map using the information of new vector loops.
1186   auto permutationMap = makePermutationMap(state.builder.getInsertionBlock(),
1187                                            indices, state.vecLoopToVecDim);
1188   if (!permutationMap) {
1189     LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ can't compute permutationMap\n");
1190     return nullptr;
1191   }
1192   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
1193   LLVM_DEBUG(permutationMap.print(dbgs()));
1194 
1195   auto transfer = state.builder.create<vector::TransferReadOp>(
1196       loadOp.getLoc(), vectorType, loadOp.getMemRef(), indices, permutationMap);
1197 
1198   // Register replacement for future uses in the scope.
1199   state.registerOpVectorReplacement(loadOp, transfer);
1200   return transfer;
1201 }
1202 
1203 /// Vectorizes an affine store with the vectorization strategy in 'state' by
1204 /// generating a 'vector.transfer_write' op with the proper permutation map
1205 /// inferred from the indices of the store. The new 'vector.transfer_store' is
1206 /// registered as replacement of the scalar load. Returns the newly created
1207 /// 'vector.transfer_write' if vectorization was successful. Returns nullptr,
1208 /// otherwise.
1209 static Operation *vectorizeAffineStore(AffineStoreOp storeOp,
1210                                        VectorizationState &state) {
1211   MemRefType memRefType = storeOp.getMemRefType();
1212   Value vectorValue = vectorizeOperand(storeOp.getValueToStore(), state);
1213   if (!vectorValue)
1214     return nullptr;
1215 
1216   // Replace map operands with operands from the vector loop nest.
1217   SmallVector<Value, 8> mapOperands;
1218   state.getScalarValueReplacementsFor(storeOp.getMapOperands(), mapOperands);
1219 
1220   // Compute indices for the transfer op. AffineApplyOp's may be generated.
1221   SmallVector<Value, 8> indices;
1222   indices.reserve(memRefType.getRank());
1223   if (storeOp.getAffineMap() !=
1224       state.builder.getMultiDimIdentityMap(memRefType.getRank()))
1225     computeMemoryOpIndices(storeOp, storeOp.getAffineMap(), mapOperands, state,
1226                            indices);
1227   else
1228     indices.append(mapOperands.begin(), mapOperands.end());
1229 
1230   // Compute permutation map using the information of new vector loops.
1231   auto permutationMap = makePermutationMap(state.builder.getInsertionBlock(),
1232                                            indices, state.vecLoopToVecDim);
1233   if (!permutationMap)
1234     return nullptr;
1235   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
1236   LLVM_DEBUG(permutationMap.print(dbgs()));
1237 
1238   auto transfer = state.builder.create<vector::TransferWriteOp>(
1239       storeOp.getLoc(), vectorValue, storeOp.getMemRef(), indices,
1240       permutationMap);
1241   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorized store: " << transfer);
1242 
1243   // Register replacement for future uses in the scope.
1244   state.registerOpVectorReplacement(storeOp, transfer);
1245   return transfer;
1246 }
1247 
1248 /// Returns true if `value` is a constant equal to the neutral element of the
1249 /// given vectorizable reduction.
1250 static bool isNeutralElementConst(arith::AtomicRMWKind reductionKind,
1251                                   Value value, VectorizationState &state) {
1252   Type scalarTy = value.getType();
1253   if (!VectorType::isValidElementType(scalarTy))
1254     return false;
1255   Attribute valueAttr = getIdentityValueAttr(reductionKind, scalarTy,
1256                                              state.builder, value.getLoc());
1257   if (auto constOp = dyn_cast_or_null<arith::ConstantOp>(value.getDefiningOp()))
1258     return constOp.getValue() == valueAttr;
1259   return false;
1260 }
1261 
1262 /// Vectorizes a loop with the vectorization strategy in 'state'. A new loop is
1263 /// created and registered as replacement for the scalar loop. The builder's
1264 /// insertion point is set to the new loop's body so that subsequent vectorized
1265 /// operations are inserted into the new loop. If the loop is a vector
1266 /// dimension, the step of the newly created loop will reflect the vectorization
1267 /// factor used to vectorized that dimension.
1268 static Operation *vectorizeAffineForOp(AffineForOp forOp,
1269                                        VectorizationState &state) {
1270   const VectorizationStrategy &strategy = *state.strategy;
1271   auto loopToVecDimIt = strategy.loopToVectorDim.find(forOp);
1272   bool isLoopVecDim = loopToVecDimIt != strategy.loopToVectorDim.end();
1273 
1274   // TODO: Vectorization of reduction loops is not supported for non-unit steps.
1275   if (isLoopVecDim && forOp.getNumIterOperands() > 0 && forOp.getStep() != 1) {
1276     LLVM_DEBUG(
1277         dbgs()
1278         << "\n[early-vect]+++++ unsupported step size for reduction loop: "
1279         << forOp.getStep() << "\n");
1280     return nullptr;
1281   }
1282 
1283   // If we are vectorizing a vector dimension, compute a new step for the new
1284   // vectorized loop using the vectorization factor for the vector dimension.
1285   // Otherwise, propagate the step of the scalar loop.
1286   unsigned newStep;
1287   if (isLoopVecDim) {
1288     unsigned vectorDim = loopToVecDimIt->second;
1289     assert(vectorDim < strategy.vectorSizes.size() && "vector dim overflow");
1290     int64_t forOpVecFactor = strategy.vectorSizes[vectorDim];
1291     newStep = forOp.getStep() * forOpVecFactor;
1292   } else {
1293     newStep = forOp.getStep();
1294   }
1295 
1296   // Get information about reduction kinds.
1297   ArrayRef<LoopReduction> reductions;
1298   if (isLoopVecDim && forOp.getNumIterOperands() > 0) {
1299     auto it = strategy.reductionLoops.find(forOp);
1300     assert(it != strategy.reductionLoops.end() &&
1301            "Reduction descriptors not found when vectorizing a reduction loop");
1302     reductions = it->second;
1303     assert(reductions.size() == forOp.getNumIterOperands() &&
1304            "The size of reductions array must match the number of iter_args");
1305   }
1306 
1307   // Vectorize 'iter_args'.
1308   SmallVector<Value, 8> vecIterOperands;
1309   if (!isLoopVecDim) {
1310     for (auto operand : forOp.getIterOperands())
1311       vecIterOperands.push_back(vectorizeOperand(operand, state));
1312   } else {
1313     // For reduction loops we need to pass a vector of neutral elements as an
1314     // initial value of the accumulator. We will add the original initial value
1315     // later.
1316     for (auto redAndOperand : llvm::zip(reductions, forOp.getIterOperands())) {
1317       vecIterOperands.push_back(createInitialVector(
1318           std::get<0>(redAndOperand).kind, std::get<1>(redAndOperand), state));
1319     }
1320   }
1321 
1322   auto vecForOp = state.builder.create<AffineForOp>(
1323       forOp.getLoc(), forOp.getLowerBoundOperands(), forOp.getLowerBoundMap(),
1324       forOp.getUpperBoundOperands(), forOp.getUpperBoundMap(), newStep,
1325       vecIterOperands,
1326       /*bodyBuilder=*/[](OpBuilder &, Location, Value, ValueRange) {
1327         // Make sure we don't create a default terminator in the loop body as
1328         // the proper terminator will be added during vectorization.
1329       });
1330 
1331   // Register loop-related replacements:
1332   //   1) The new vectorized loop is registered as vector replacement of the
1333   //      scalar loop.
1334   //   2) The new iv of the vectorized loop is registered as scalar replacement
1335   //      since a scalar copy of the iv will prevail in the vectorized loop.
1336   //      TODO: A vector replacement will also be added in the future when
1337   //      vectorization of linear ops is supported.
1338   //   3) The new 'iter_args' region arguments are registered as vector
1339   //      replacements since they have been vectorized.
1340   //   4) If the loop performs a reduction along the vector dimension, a
1341   //      `vector.reduction` or similar op is inserted for each resulting value
1342   //      of the loop and its scalar value replaces the corresponding scalar
1343   //      result of the loop.
1344   state.registerOpVectorReplacement(forOp, vecForOp);
1345   state.registerValueScalarReplacement(forOp.getInductionVar(),
1346                                        vecForOp.getInductionVar());
1347   for (auto iterTuple :
1348        llvm ::zip(forOp.getRegionIterArgs(), vecForOp.getRegionIterArgs()))
1349     state.registerBlockArgVectorReplacement(std::get<0>(iterTuple),
1350                                             std::get<1>(iterTuple));
1351 
1352   if (isLoopVecDim) {
1353     for (unsigned i = 0; i < vecForOp.getNumIterOperands(); ++i) {
1354       // First, we reduce the vector returned from the loop into a scalar.
1355       Value reducedRes =
1356           getVectorReductionOp(reductions[i].kind, state.builder,
1357                                vecForOp.getLoc(), vecForOp.getResult(i));
1358       LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ creating a vector reduction: "
1359                         << reducedRes);
1360       // Then we combine it with the original (scalar) initial value unless it
1361       // is equal to the neutral element of the reduction.
1362       Value origInit = forOp.getOperand(forOp.getNumControlOperands() + i);
1363       Value finalRes = reducedRes;
1364       if (!isNeutralElementConst(reductions[i].kind, origInit, state))
1365         finalRes =
1366             arith::getReductionOp(reductions[i].kind, state.builder,
1367                                   reducedRes.getLoc(), reducedRes, origInit);
1368       state.registerLoopResultScalarReplacement(forOp.getResult(i), finalRes);
1369     }
1370   }
1371 
1372   if (isLoopVecDim)
1373     state.vecLoopToVecDim[vecForOp] = loopToVecDimIt->second;
1374 
1375   // Change insertion point so that upcoming vectorized instructions are
1376   // inserted into the vectorized loop's body.
1377   state.builder.setInsertionPointToStart(vecForOp.getBody());
1378 
1379   // If this is a reduction loop then we may need to create a mask to filter out
1380   // garbage in the last iteration.
1381   if (isLoopVecDim && forOp.getNumIterOperands() > 0)
1382     createMask(vecForOp, state);
1383 
1384   return vecForOp;
1385 }
1386 
1387 /// Vectorizes arbitrary operation by plain widening. We apply generic type
1388 /// widening of all its results and retrieve the vector counterparts for all its
1389 /// operands.
1390 static Operation *widenOp(Operation *op, VectorizationState &state) {
1391   SmallVector<Type, 8> vectorTypes;
1392   for (Value result : op->getResults())
1393     vectorTypes.push_back(
1394         VectorType::get(state.strategy->vectorSizes, result.getType()));
1395 
1396   SmallVector<Value, 8> vectorOperands;
1397   for (Value operand : op->getOperands()) {
1398     Value vecOperand = vectorizeOperand(operand, state);
1399     if (!vecOperand) {
1400       LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ an operand failed vectorize\n");
1401       return nullptr;
1402     }
1403     vectorOperands.push_back(vecOperand);
1404   }
1405 
1406   // Create a clone of the op with the proper operands and return types.
1407   // TODO: The following assumes there is always an op with a fixed
1408   // name that works both in scalar mode and vector mode.
1409   // TODO: Is it worth considering an Operation.clone operation which
1410   // changes the type so we can promote an Operation with less boilerplate?
1411   OperationState vecOpState(op->getLoc(), op->getName(), vectorOperands,
1412                             vectorTypes, op->getAttrs(), /*successors=*/{},
1413                             /*regions=*/{});
1414   Operation *vecOp = state.builder.createOperation(vecOpState);
1415   state.registerOpVectorReplacement(op, vecOp);
1416   return vecOp;
1417 }
1418 
1419 /// Vectorizes a yield operation by widening its types. The builder's insertion
1420 /// point is set after the vectorized parent op to continue vectorizing the
1421 /// operations after the parent op. When vectorizing a reduction loop a mask may
1422 /// be used to prevent adding garbage values to the accumulator.
1423 static Operation *vectorizeAffineYieldOp(AffineYieldOp yieldOp,
1424                                          VectorizationState &state) {
1425   Operation *newYieldOp = widenOp(yieldOp, state);
1426   Operation *newParentOp = state.builder.getInsertionBlock()->getParentOp();
1427 
1428   // If there is a mask for this loop then we must prevent garbage values from
1429   // being added to the accumulator by inserting `select` operations, for
1430   // example:
1431   //
1432   //   %res = arith.addf %acc, %val : vector<128xf32>
1433   //   %res_masked = select %mask, %res, %acc : vector<128xi1>, vector<128xf32>
1434   //   affine.yield %res_masked : vector<128xf32>
1435   //
1436   if (Value mask = state.vecLoopToMask.lookup(newParentOp)) {
1437     state.builder.setInsertionPoint(newYieldOp);
1438     for (unsigned i = 0; i < newYieldOp->getNumOperands(); ++i) {
1439       Value result = newYieldOp->getOperand(i);
1440       Value iterArg = cast<AffineForOp>(newParentOp).getRegionIterArgs()[i];
1441       Value maskedResult = state.builder.create<arith::SelectOp>(
1442           result.getLoc(), mask, result, iterArg);
1443       LLVM_DEBUG(
1444           dbgs() << "\n[early-vect]+++++ masking a yielded vector value: "
1445                  << maskedResult);
1446       newYieldOp->setOperand(i, maskedResult);
1447     }
1448   }
1449 
1450   state.builder.setInsertionPointAfter(newParentOp);
1451   return newYieldOp;
1452 }
1453 
1454 /// Encodes Operation-specific behavior for vectorization. In general we
1455 /// assume that all operands of an op must be vectorized but this is not
1456 /// always true. In the future, it would be nice to have a trait that
1457 /// describes how a particular operation vectorizes. For now we implement the
1458 /// case distinction here. Returns a vectorized form of an operation or
1459 /// nullptr if vectorization fails.
1460 // TODO: consider adding a trait to Op to describe how it gets vectorized.
1461 // Maybe some Ops are not vectorizable or require some tricky logic, we cannot
1462 // do one-off logic here; ideally it would be TableGen'd.
1463 static Operation *vectorizeOneOperation(Operation *op,
1464                                         VectorizationState &state) {
1465   // Sanity checks.
1466   assert(!isa<vector::TransferReadOp>(op) &&
1467          "vector.transfer_read cannot be further vectorized");
1468   assert(!isa<vector::TransferWriteOp>(op) &&
1469          "vector.transfer_write cannot be further vectorized");
1470 
1471   if (auto loadOp = dyn_cast<AffineLoadOp>(op))
1472     return vectorizeAffineLoad(loadOp, state);
1473   if (auto storeOp = dyn_cast<AffineStoreOp>(op))
1474     return vectorizeAffineStore(storeOp, state);
1475   if (auto forOp = dyn_cast<AffineForOp>(op))
1476     return vectorizeAffineForOp(forOp, state);
1477   if (auto yieldOp = dyn_cast<AffineYieldOp>(op))
1478     return vectorizeAffineYieldOp(yieldOp, state);
1479   if (auto constant = dyn_cast<arith::ConstantOp>(op))
1480     return vectorizeConstant(constant, state);
1481 
1482   // Other ops with regions are not supported.
1483   if (op->getNumRegions() != 0)
1484     return nullptr;
1485 
1486   return widenOp(op, state);
1487 }
1488 
1489 /// Recursive implementation to convert all the nested loops in 'match' to a 2D
1490 /// vector container that preserves the relative nesting level of each loop with
1491 /// respect to the others in 'match'. 'currentLevel' is the nesting level that
1492 /// will be assigned to the loop in the current 'match'.
1493 static void
1494 getMatchedAffineLoopsRec(NestedMatch match, unsigned currentLevel,
1495                          std::vector<SmallVector<AffineForOp, 2>> &loops) {
1496   // Add a new empty level to the output if it doesn't exist already.
1497   assert(currentLevel <= loops.size() && "Unexpected currentLevel");
1498   if (currentLevel == loops.size())
1499     loops.emplace_back();
1500 
1501   // Add current match and recursively visit its children.
1502   loops[currentLevel].push_back(cast<AffineForOp>(match.getMatchedOperation()));
1503   for (auto childMatch : match.getMatchedChildren()) {
1504     getMatchedAffineLoopsRec(childMatch, currentLevel + 1, loops);
1505   }
1506 }
1507 
1508 /// Converts all the nested loops in 'match' to a 2D vector container that
1509 /// preserves the relative nesting level of each loop with respect to the others
1510 /// in 'match'. This means that every loop in 'loops[i]' will have a parent loop
1511 /// in 'loops[i-1]'. A loop in 'loops[i]' may or may not have a child loop in
1512 /// 'loops[i+1]'.
1513 static void
1514 getMatchedAffineLoops(NestedMatch match,
1515                       std::vector<SmallVector<AffineForOp, 2>> &loops) {
1516   getMatchedAffineLoopsRec(match, /*currLoopDepth=*/0, loops);
1517 }
1518 
1519 /// Internal implementation to vectorize affine loops from a single loop nest
1520 /// using an n-D vectorization strategy.
1521 static LogicalResult
1522 vectorizeLoopNest(std::vector<SmallVector<AffineForOp, 2>> &loops,
1523                   const VectorizationStrategy &strategy) {
1524   assert(loops[0].size() == 1 && "Expected single root loop");
1525   AffineForOp rootLoop = loops[0][0];
1526   VectorizationState state(rootLoop.getContext());
1527   state.builder.setInsertionPointAfter(rootLoop);
1528   state.strategy = &strategy;
1529 
1530   // Since patterns are recursive, they can very well intersect.
1531   // Since we do not want a fully greedy strategy in general, we decouple
1532   // pattern matching, from profitability analysis, from application.
1533   // As a consequence we must check that each root pattern is still
1534   // vectorizable. If a pattern is not vectorizable anymore, we just skip it.
1535   // TODO: implement a non-greedy profitability analysis that keeps only
1536   // non-intersecting patterns.
1537   if (!isVectorizableLoopBody(rootLoop, vectorTransferPattern())) {
1538     LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ loop is not vectorizable");
1539     return failure();
1540   }
1541 
1542   //////////////////////////////////////////////////////////////////////////////
1543   // Vectorize the scalar loop nest following a topological order. A new vector
1544   // loop nest with the vectorized operations is created along the process. If
1545   // vectorization succeeds, the scalar loop nest is erased. If vectorization
1546   // fails, the vector loop nest is erased and the scalar loop nest is not
1547   // modified.
1548   //////////////////////////////////////////////////////////////////////////////
1549 
1550   auto opVecResult = rootLoop.walk<WalkOrder::PreOrder>([&](Operation *op) {
1551     LLVM_DEBUG(dbgs() << "[early-vect]+++++ Vectorizing: " << *op);
1552     Operation *vectorOp = vectorizeOneOperation(op, state);
1553     if (!vectorOp) {
1554       LLVM_DEBUG(
1555           dbgs() << "[early-vect]+++++ failed vectorizing the operation: "
1556                  << *op << "\n");
1557       return WalkResult::interrupt();
1558     }
1559 
1560     return WalkResult::advance();
1561   });
1562 
1563   if (opVecResult.wasInterrupted()) {
1564     LLVM_DEBUG(dbgs() << "[early-vect]+++++ failed vectorization for: "
1565                       << rootLoop << "\n");
1566     // Erase vector loop nest if it was created.
1567     auto vecRootLoopIt = state.opVectorReplacement.find(rootLoop);
1568     if (vecRootLoopIt != state.opVectorReplacement.end())
1569       eraseLoopNest(cast<AffineForOp>(vecRootLoopIt->second));
1570 
1571     return failure();
1572   }
1573 
1574   // Replace results of reduction loops with the scalar values computed using
1575   // `vector.reduce` or similar ops.
1576   for (auto resPair : state.loopResultScalarReplacement)
1577     resPair.first.replaceAllUsesWith(resPair.second);
1578 
1579   assert(state.opVectorReplacement.count(rootLoop) == 1 &&
1580          "Expected vector replacement for loop nest");
1581   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ success vectorizing pattern");
1582   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorization result:\n"
1583                     << *state.opVectorReplacement[rootLoop]);
1584 
1585   // Finish this vectorization pattern.
1586   state.finishVectorizationPattern(rootLoop);
1587   return success();
1588 }
1589 
1590 /// Extracts the matched loops and vectorizes them following a topological
1591 /// order. A new vector loop nest will be created if vectorization succeeds. The
1592 /// original loop nest won't be modified in any case.
1593 static LogicalResult vectorizeRootMatch(NestedMatch m,
1594                                         const VectorizationStrategy &strategy) {
1595   std::vector<SmallVector<AffineForOp, 2>> loopsToVectorize;
1596   getMatchedAffineLoops(m, loopsToVectorize);
1597   return vectorizeLoopNest(loopsToVectorize, strategy);
1598 }
1599 
1600 /// Traverses all the loop matches and classifies them into intersection
1601 /// buckets. Two matches intersect if any of them encloses the other one. A
1602 /// match intersects with a bucket if the match intersects with the root
1603 /// (outermost) loop in that bucket.
1604 static void computeIntersectionBuckets(
1605     ArrayRef<NestedMatch> matches,
1606     std::vector<SmallVector<NestedMatch, 8>> &intersectionBuckets) {
1607   assert(intersectionBuckets.empty() && "Expected empty output");
1608   // Keeps track of the root (outermost) loop of each bucket.
1609   SmallVector<AffineForOp, 8> bucketRoots;
1610 
1611   for (const NestedMatch &match : matches) {
1612     AffineForOp matchRoot = cast<AffineForOp>(match.getMatchedOperation());
1613     bool intersects = false;
1614     for (int i = 0, end = intersectionBuckets.size(); i < end; ++i) {
1615       AffineForOp bucketRoot = bucketRoots[i];
1616       // Add match to the bucket if the bucket root encloses the match root.
1617       if (bucketRoot->isAncestor(matchRoot)) {
1618         intersectionBuckets[i].push_back(match);
1619         intersects = true;
1620         break;
1621       }
1622       // Add match to the bucket if the match root encloses the bucket root. The
1623       // match root becomes the new bucket root.
1624       if (matchRoot->isAncestor(bucketRoot)) {
1625         bucketRoots[i] = matchRoot;
1626         intersectionBuckets[i].push_back(match);
1627         intersects = true;
1628         break;
1629       }
1630     }
1631 
1632     // Match doesn't intersect with any existing bucket. Create a new bucket for
1633     // it.
1634     if (!intersects) {
1635       bucketRoots.push_back(matchRoot);
1636       intersectionBuckets.emplace_back();
1637       intersectionBuckets.back().push_back(match);
1638     }
1639   }
1640 }
1641 
1642 /// Internal implementation to vectorize affine loops in 'loops' using the n-D
1643 /// vectorization factors in 'vectorSizes'. By default, each vectorization
1644 /// factor is applied inner-to-outer to the loops of each loop nest.
1645 /// 'fastestVaryingPattern' can be optionally used to provide a different loop
1646 /// vectorization order. `reductionLoops` can be provided to specify loops which
1647 /// can be vectorized along the reduction dimension.
1648 static void vectorizeLoops(Operation *parentOp, DenseSet<Operation *> &loops,
1649                            ArrayRef<int64_t> vectorSizes,
1650                            ArrayRef<int64_t> fastestVaryingPattern,
1651                            const ReductionLoopMap &reductionLoops) {
1652   assert((reductionLoops.empty() || vectorSizes.size() == 1) &&
1653          "Vectorizing reductions is supported only for 1-D vectors");
1654 
1655   // Compute 1-D, 2-D or 3-D loop pattern to be matched on the target loops.
1656   Optional<NestedPattern> pattern =
1657       makePattern(loops, vectorSizes.size(), fastestVaryingPattern);
1658   if (!pattern.hasValue()) {
1659     LLVM_DEBUG(dbgs() << "\n[early-vect] pattern couldn't be computed\n");
1660     return;
1661   }
1662 
1663   LLVM_DEBUG(dbgs() << "\n******************************************");
1664   LLVM_DEBUG(dbgs() << "\n******************************************");
1665   LLVM_DEBUG(dbgs() << "\n[early-vect] new pattern on parent op\n");
1666   LLVM_DEBUG(dbgs() << *parentOp << "\n");
1667 
1668   unsigned patternDepth = pattern->getDepth();
1669 
1670   // Compute all the pattern matches and classify them into buckets of
1671   // intersecting matches.
1672   SmallVector<NestedMatch, 32> allMatches;
1673   pattern->match(parentOp, &allMatches);
1674   std::vector<SmallVector<NestedMatch, 8>> intersectionBuckets;
1675   computeIntersectionBuckets(allMatches, intersectionBuckets);
1676 
1677   // Iterate over all buckets and vectorize the matches eagerly. We can only
1678   // vectorize one match from each bucket since all the matches within a bucket
1679   // intersect.
1680   for (auto &intersectingMatches : intersectionBuckets) {
1681     for (NestedMatch &match : intersectingMatches) {
1682       VectorizationStrategy strategy;
1683       // TODO: depending on profitability, elect to reduce the vector size.
1684       strategy.vectorSizes.assign(vectorSizes.begin(), vectorSizes.end());
1685       strategy.reductionLoops = reductionLoops;
1686       if (failed(analyzeProfitability(match.getMatchedChildren(), 1,
1687                                       patternDepth, &strategy))) {
1688         continue;
1689       }
1690       vectorizeLoopIfProfitable(match.getMatchedOperation(), 0, patternDepth,
1691                                 &strategy);
1692       // Vectorize match. Skip the rest of intersecting matches in the bucket if
1693       // vectorization succeeded.
1694       // TODO: if pattern does not apply, report it; alter the cost/benefit.
1695       // TODO: some diagnostics if failure to vectorize occurs.
1696       if (succeeded(vectorizeRootMatch(match, strategy)))
1697         break;
1698     }
1699   }
1700 
1701   LLVM_DEBUG(dbgs() << "\n");
1702 }
1703 
1704 std::unique_ptr<OperationPass<FuncOp>>
1705 createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize) {
1706   return std::make_unique<Vectorize>(virtualVectorSize);
1707 }
1708 std::unique_ptr<OperationPass<FuncOp>> createSuperVectorizePass() {
1709   return std::make_unique<Vectorize>();
1710 }
1711 
1712 /// Applies vectorization to the current function by searching over a bunch of
1713 /// predetermined patterns.
1714 void Vectorize::runOnOperation() {
1715   FuncOp f = getOperation();
1716   if (!fastestVaryingPattern.empty() &&
1717       fastestVaryingPattern.size() != vectorSizes.size()) {
1718     f.emitRemark("Fastest varying pattern specified with different size than "
1719                  "the vector size.");
1720     return signalPassFailure();
1721   }
1722 
1723   if (vectorizeReductions && vectorSizes.size() != 1) {
1724     f.emitError("Vectorizing reductions is supported only for 1-D vectors.");
1725     return signalPassFailure();
1726   }
1727 
1728   DenseSet<Operation *> parallelLoops;
1729   ReductionLoopMap reductionLoops;
1730 
1731   // If 'vectorize-reduction=true' is provided, we also populate the
1732   // `reductionLoops` map.
1733   if (vectorizeReductions) {
1734     f.walk([&parallelLoops, &reductionLoops](AffineForOp loop) {
1735       SmallVector<LoopReduction, 2> reductions;
1736       if (isLoopParallel(loop, &reductions)) {
1737         parallelLoops.insert(loop);
1738         // If it's not a reduction loop, adding it to the map is not necessary.
1739         if (!reductions.empty())
1740           reductionLoops[loop] = reductions;
1741       }
1742     });
1743   } else {
1744     f.walk([&parallelLoops](AffineForOp loop) {
1745       if (isLoopParallel(loop))
1746         parallelLoops.insert(loop);
1747     });
1748   }
1749 
1750   // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
1751   NestedPatternContext mlContext;
1752   vectorizeLoops(f, parallelLoops, vectorSizes, fastestVaryingPattern,
1753                  reductionLoops);
1754 }
1755 
1756 /// Verify that affine loops in 'loops' meet the nesting criteria expected by
1757 /// SuperVectorizer:
1758 ///   * There must be at least one loop.
1759 ///   * There must be a single root loop (nesting level 0).
1760 ///   * Each loop at a given nesting level must be nested in a loop from a
1761 ///     previous nesting level.
1762 static LogicalResult
1763 verifyLoopNesting(const std::vector<SmallVector<AffineForOp, 2>> &loops) {
1764   // Expected at least one loop.
1765   if (loops.empty())
1766     return failure();
1767 
1768   // Expected only one root loop.
1769   if (loops[0].size() != 1)
1770     return failure();
1771 
1772   // Traverse loops outer-to-inner to check some invariants.
1773   for (int i = 1, end = loops.size(); i < end; ++i) {
1774     for (AffineForOp loop : loops[i]) {
1775       //  Check that each loop at this level is nested in one of the loops from
1776       //  the previous level.
1777       if (none_of(loops[i - 1], [&](AffineForOp maybeParent) {
1778             return maybeParent->isProperAncestor(loop);
1779           }))
1780         return failure();
1781 
1782       //  Check that each loop at this level is not nested in another loop from
1783       //  this level.
1784       for (AffineForOp sibling : loops[i]) {
1785         if (sibling->isProperAncestor(loop))
1786           return failure();
1787       }
1788     }
1789   }
1790 
1791   return success();
1792 }
1793 
1794 namespace mlir {
1795 
1796 /// External utility to vectorize affine loops in 'loops' using the n-D
1797 /// vectorization factors in 'vectorSizes'. By default, each vectorization
1798 /// factor is applied inner-to-outer to the loops of each loop nest.
1799 /// 'fastestVaryingPattern' can be optionally used to provide a different loop
1800 /// vectorization order.
1801 /// If `reductionLoops` is not empty, the given reduction loops may be
1802 /// vectorized along the reduction dimension.
1803 /// TODO: Vectorizing reductions is supported only for 1-D vectorization.
1804 void vectorizeAffineLoops(Operation *parentOp, DenseSet<Operation *> &loops,
1805                           ArrayRef<int64_t> vectorSizes,
1806                           ArrayRef<int64_t> fastestVaryingPattern,
1807                           const ReductionLoopMap &reductionLoops) {
1808   // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
1809   NestedPatternContext mlContext;
1810   vectorizeLoops(parentOp, loops, vectorSizes, fastestVaryingPattern,
1811                  reductionLoops);
1812 }
1813 
1814 /// External utility to vectorize affine loops from a single loop nest using an
1815 /// n-D vectorization strategy (see doc in VectorizationStrategy definition).
1816 /// Loops are provided in a 2D vector container. The first dimension represents
1817 /// the nesting level relative to the loops to be vectorized. The second
1818 /// dimension contains the loops. This means that:
1819 ///   a) every loop in 'loops[i]' must have a parent loop in 'loops[i-1]',
1820 ///   b) a loop in 'loops[i]' may or may not have a child loop in 'loops[i+1]'.
1821 ///
1822 /// For example, for the following loop nest:
1823 ///
1824 ///   func @vec2d(%in0: memref<64x128x512xf32>, %in1: memref<64x128x128xf32>,
1825 ///               %out0: memref<64x128x512xf32>,
1826 ///               %out1: memref<64x128x128xf32>) {
1827 ///     affine.for %i0 = 0 to 64 {
1828 ///       affine.for %i1 = 0 to 128 {
1829 ///         affine.for %i2 = 0 to 512 {
1830 ///           %ld = affine.load %in0[%i0, %i1, %i2] : memref<64x128x512xf32>
1831 ///           affine.store %ld, %out0[%i0, %i1, %i2] : memref<64x128x512xf32>
1832 ///         }
1833 ///         affine.for %i3 = 0 to 128 {
1834 ///           %ld = affine.load %in1[%i0, %i1, %i3] : memref<64x128x128xf32>
1835 ///           affine.store %ld, %out1[%i0, %i1, %i3] : memref<64x128x128xf32>
1836 ///         }
1837 ///       }
1838 ///     }
1839 ///     return
1840 ///   }
1841 ///
1842 /// loops = {{%i0}, {%i2, %i3}}, to vectorize the outermost and the two
1843 /// innermost loops;
1844 /// loops = {{%i1}, {%i2, %i3}}, to vectorize the middle and the two innermost
1845 /// loops;
1846 /// loops = {{%i2}}, to vectorize only the first innermost loop;
1847 /// loops = {{%i3}}, to vectorize only the second innermost loop;
1848 /// loops = {{%i1}}, to vectorize only the middle loop.
1849 LogicalResult
1850 vectorizeAffineLoopNest(std::vector<SmallVector<AffineForOp, 2>> &loops,
1851                         const VectorizationStrategy &strategy) {
1852   // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
1853   NestedPatternContext mlContext;
1854   if (failed(verifyLoopNesting(loops)))
1855     return failure();
1856   return vectorizeLoopNest(loops, strategy);
1857 }
1858 
1859 std::unique_ptr<OperationPass<FuncOp>>
1860 createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize) {
1861   return std::make_unique<Vectorize>(virtualVectorSize);
1862 }
1863 std::unique_ptr<OperationPass<FuncOp>> createSuperVectorizePass() {
1864   return std::make_unique<Vectorize>();
1865 }
1866 
1867 } // namespace mlir
1868