1 //===- SuperVectorize.cpp - Vectorize Pass Impl ---------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements vectorization of loops, operations and data types to
10 // a target-independent, n-D super-vector abstraction.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "PassDetail.h"
15 #include "mlir/Analysis/LoopAnalysis.h"
16 #include "mlir/Analysis/NestedMatcher.h"
17 #include "mlir/Analysis/SliceAnalysis.h"
18 #include "mlir/Analysis/Utils.h"
19 #include "mlir/Dialect/Affine/IR/AffineOps.h"
20 #include "mlir/Dialect/Affine/Passes.h"
21 #include "mlir/Dialect/Affine/Utils.h"
22 #include "mlir/Dialect/StandardOps/IR/Ops.h"
23 #include "mlir/Dialect/Vector/VectorOps.h"
24 #include "mlir/Dialect/Vector/VectorUtils.h"
25 #include "mlir/IR/AffineExpr.h"
26 #include "mlir/IR/Builders.h"
27 #include "mlir/IR/Location.h"
28 #include "mlir/IR/Types.h"
29 #include "mlir/Support/LLVM.h"
30 #include "mlir/Transforms/FoldUtils.h"
31 
32 #include "llvm/ADT/DenseMap.h"
33 #include "llvm/ADT/DenseSet.h"
34 #include "llvm/ADT/SetVector.h"
35 #include "llvm/ADT/SmallString.h"
36 #include "llvm/ADT/SmallVector.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/Debug.h"
39 
40 using namespace mlir;
41 using namespace vector;
42 
43 ///
44 /// Implements a high-level vectorization strategy on a Function.
45 /// The abstraction used is that of super-vectors, which provide a single,
46 /// compact, representation in the vector types, information that is expected
47 /// to reduce the impact of the phase ordering problem
48 ///
49 /// Vector granularity:
50 /// ===================
51 /// This pass is designed to perform vectorization at a super-vector
52 /// granularity. A super-vector is loosely defined as a vector type that is a
53 /// multiple of a "good" vector size so the HW can efficiently implement a set
54 /// of high-level primitives. Multiple is understood along any dimension; e.g.
55 /// both vector<16xf32> and vector<2x8xf32> are valid super-vectors for a
56 /// vector<8xf32> HW vector. Note that a "good vector size so the HW can
57 /// efficiently implement a set of high-level primitives" is not necessarily an
58 /// integer multiple of actual hardware registers. We leave details of this
59 /// distinction unspecified for now.
60 ///
61 /// Some may prefer the terminology a "tile of HW vectors". In this case, one
62 /// should note that super-vectors implement an "always full tile" abstraction.
63 /// They guarantee no partial-tile separation is necessary by relying on a
64 /// high-level copy-reshape abstraction that we call vector.transfer. This
65 /// copy-reshape operations is also responsible for performing layout
66 /// transposition if necessary. In the general case this will require a scoped
67 /// allocation in some notional local memory.
68 ///
69 /// Whatever the mental model one prefers to use for this abstraction, the key
70 /// point is that we burn into a single, compact, representation in the vector
71 /// types, information that is expected to reduce the impact of the phase
72 /// ordering problem. Indeed, a vector type conveys information that:
73 ///   1. the associated loops have dependency semantics that do not prevent
74 ///      vectorization;
75 ///   2. the associate loops have been sliced in chunks of static sizes that are
76 ///      compatible with vector sizes (i.e. similar to unroll-and-jam);
77 ///   3. the inner loops, in the unroll-and-jam analogy of 2, are captured by
78 ///   the
79 ///      vector type and no vectorization hampering transformations can be
80 ///      applied to them anymore;
81 ///   4. the underlying memrefs are accessed in some notional contiguous way
82 ///      that allows loading into vectors with some amount of spatial locality;
83 /// In other words, super-vectorization provides a level of separation of
84 /// concern by way of opacity to subsequent passes. This has the effect of
85 /// encapsulating and propagating vectorization constraints down the list of
86 /// passes until we are ready to lower further.
87 ///
88 /// For a particular target, a notion of minimal n-d vector size will be
89 /// specified and vectorization targets a multiple of those. In the following
90 /// paragraph, let "k ." represent "a multiple of", to be understood as a
91 /// multiple in the same dimension (e.g. vector<16 x k . 128> summarizes
92 /// vector<16 x 128>, vector<16 x 256>, vector<16 x 1024>, etc).
93 ///
94 /// Some non-exhaustive notable super-vector sizes of interest include:
95 ///   - CPU: vector<k . HW_vector_size>,
96 ///          vector<k' . core_count x k . HW_vector_size>,
97 ///          vector<socket_count x k' . core_count x k . HW_vector_size>;
98 ///   - GPU: vector<k . warp_size>,
99 ///          vector<k . warp_size x float2>,
100 ///          vector<k . warp_size x float4>,
101 ///          vector<k . warp_size x 4 x 4x 4> (for tensor_core sizes).
102 ///
103 /// Loops and operations are emitted that operate on those super-vector shapes.
104 /// Subsequent lowering passes will materialize to actual HW vector sizes. These
105 /// passes are expected to be (gradually) more target-specific.
106 ///
107 /// At a high level, a vectorized load in a loop will resemble:
108 /// ```mlir
109 ///   affine.for %i = ? to ? step ? {
110 ///     %v_a = vector.transfer_read A[%i] : memref<?xf32>, vector<128xf32>
111 ///   }
112 /// ```
113 /// It is the responsibility of the implementation of vector.transfer_read to
114 /// materialize vector registers from the original scalar memrefs. A later (more
115 /// target-dependent) lowering pass will materialize to actual HW vector sizes.
116 /// This lowering may be occur at different times:
117 ///   1. at the MLIR level into a combination of loops, unrolling, DmaStartOp +
118 ///      DmaWaitOp + vectorized operations for data transformations and shuffle;
119 ///      thus opening opportunities for unrolling and pipelining. This is an
120 ///      instance of library call "whiteboxing"; or
121 ///   2. later in the a target-specific lowering pass or hand-written library
122 ///      call; achieving full separation of concerns. This is an instance of
123 ///      library call; or
124 ///   3. a mix of both, e.g. based on a model.
125 /// In the future, these operations will expose a contract to constrain the
126 /// search on vectorization patterns and sizes.
127 ///
128 /// Occurrence of super-vectorization in the compiler flow:
129 /// =======================================================
130 /// This is an active area of investigation. We start with 2 remarks to position
131 /// super-vectorization in the context of existing ongoing work: LLVM VPLAN
132 /// and LLVM SLP Vectorizer.
133 ///
134 /// LLVM VPLAN:
135 /// -----------
136 /// The astute reader may have noticed that in the limit, super-vectorization
137 /// can be applied at a similar time and with similar objectives than VPLAN.
138 /// For instance, in the case of a traditional, polyhedral compilation-flow (for
139 /// instance, the PPCG project uses ISL to provide dependence analysis,
140 /// multi-level(scheduling + tiling), lifting footprint to fast memory,
141 /// communication synthesis, mapping, register optimizations) and before
142 /// unrolling. When vectorization is applied at this *late* level in a typical
143 /// polyhedral flow, and is instantiated with actual hardware vector sizes,
144 /// super-vectorization is expected to match (or subsume) the type of patterns
145 /// that LLVM's VPLAN aims at targeting. The main difference here is that MLIR
146 /// is higher level and our implementation should be significantly simpler. Also
147 /// note that in this mode, recursive patterns are probably a bit of an overkill
148 /// although it is reasonable to expect that mixing a bit of outer loop and
149 /// inner loop vectorization + unrolling will provide interesting choices to
150 /// MLIR.
151 ///
152 /// LLVM SLP Vectorizer:
153 /// --------------------
154 /// Super-vectorization however is not meant to be usable in a similar fashion
155 /// to the SLP vectorizer. The main difference lies in the information that
156 /// both vectorizers use: super-vectorization examines contiguity of memory
157 /// references along fastest varying dimensions and loops with recursive nested
158 /// patterns capturing imperfectly-nested loop nests; the SLP vectorizer, on
159 /// the other hand, performs flat pattern matching inside a single unrolled loop
160 /// body and stitches together pieces of load and store operations into full
161 /// 1-D vectors. We envision that the SLP vectorizer is a good way to capture
162 /// innermost loop, control-flow dependent patterns that super-vectorization may
163 /// not be able to capture easily. In other words, super-vectorization does not
164 /// aim at replacing the SLP vectorizer and the two solutions are complementary.
165 ///
166 /// Ongoing investigations:
167 /// -----------------------
168 /// We discuss the following *early* places where super-vectorization is
169 /// applicable and touch on the expected benefits and risks . We list the
170 /// opportunities in the context of the traditional polyhedral compiler flow
171 /// described in PPCG. There are essentially 6 places in the MLIR pass pipeline
172 /// we expect to experiment with super-vectorization:
173 /// 1. Right after language lowering to MLIR: this is the earliest time where
174 ///    super-vectorization is expected to be applied. At this level, all the
175 ///    language/user/library-level annotations are available and can be fully
176 ///    exploited. Examples include loop-type annotations (such as parallel,
177 ///    reduction, scan, dependence distance vector, vectorizable) as well as
178 ///    memory access annotations (such as non-aliasing writes guaranteed,
179 ///    indirect accesses that are permutations by construction) accesses or
180 ///    that a particular operation is prescribed atomic by the user. At this
181 ///    level, anything that enriches what dependence analysis can do should be
182 ///    aggressively exploited. At this level we are close to having explicit
183 ///    vector types in the language, except we do not impose that burden on the
184 ///    programmer/library: we derive information from scalar code + annotations.
185 /// 2. After dependence analysis and before polyhedral scheduling: the
186 ///    information that supports vectorization does not need to be supplied by a
187 ///    higher level of abstraction. Traditional dependence analysis is available
188 ///    in MLIR and will be used to drive vectorization and cost models.
189 ///
190 /// Let's pause here and remark that applying super-vectorization as described
191 /// in 1. and 2. presents clear opportunities and risks:
192 ///   - the opportunity is that vectorization is burned in the type system and
193 ///   is protected from the adverse effect of loop scheduling, tiling, loop
194 ///   interchange and all passes downstream. Provided that subsequent passes are
195 ///   able to operate on vector types; the vector shapes, associated loop
196 ///   iterator properties, alignment, and contiguity of fastest varying
197 ///   dimensions are preserved until we lower the super-vector types. We expect
198 ///   this to significantly rein in on the adverse effects of phase ordering.
199 ///   - the risks are that a. all passes after super-vectorization have to work
200 ///   on elemental vector types (not that this is always true, wherever
201 ///   vectorization is applied) and b. that imposing vectorization constraints
202 ///   too early may be overall detrimental to loop fusion, tiling and other
203 ///   transformations because the dependence distances are coarsened when
204 ///   operating on elemental vector types. For this reason, the pattern
205 ///   profitability analysis should include a component that also captures the
206 ///   maximal amount of fusion available under a particular pattern. This is
207 ///   still at the stage of rough ideas but in this context, search is our
208 ///   friend as the Tensor Comprehensions and auto-TVM contributions
209 ///   demonstrated previously.
210 /// Bottom-line is we do not yet have good answers for the above but aim at
211 /// making it easy to answer such questions.
212 ///
213 /// Back to our listing, the last places where early super-vectorization makes
214 /// sense are:
215 /// 3. right after polyhedral-style scheduling: PLUTO-style algorithms are known
216 ///    to improve locality, parallelism and be configurable (e.g. max-fuse,
217 ///    smart-fuse etc). They can also have adverse effects on contiguity
218 ///    properties that are required for vectorization but the vector.transfer
219 ///    copy-reshape-pad-transpose abstraction is expected to help recapture
220 ///    these properties.
221 /// 4. right after polyhedral-style scheduling+tiling;
222 /// 5. right after scheduling+tiling+rescheduling: points 4 and 5 represent
223 ///    probably the most promising places because applying tiling achieves a
224 ///    separation of concerns that allows rescheduling to worry less about
225 ///    locality and more about parallelism and distribution (e.g. min-fuse).
226 ///
227 /// At these levels the risk-reward looks different: on one hand we probably
228 /// lost a good deal of language/user/library-level annotation; on the other
229 /// hand we gained parallelism and locality through scheduling and tiling.
230 /// However we probably want to ensure tiling is compatible with the
231 /// full-tile-only abstraction used in super-vectorization or suffer the
232 /// consequences. It is too early to place bets on what will win but we expect
233 /// super-vectorization to be the right abstraction to allow exploring at all
234 /// these levels. And again, search is our friend.
235 ///
236 /// Lastly, we mention it again here:
237 /// 6. as a MLIR-based alternative to VPLAN.
238 ///
239 /// Lowering, unrolling, pipelining:
240 /// ================================
241 /// TODO: point to the proper places.
242 ///
243 /// Algorithm:
244 /// ==========
245 /// The algorithm proceeds in a few steps:
246 ///  1. defining super-vectorization patterns and matching them on the tree of
247 ///     AffineForOp. A super-vectorization pattern is defined as a recursive
248 ///     data structures that matches and captures nested, imperfectly-nested
249 ///     loops that have a. conformable loop annotations attached (e.g. parallel,
250 ///     reduction, vectorizable, ...) as well as b. all contiguous load/store
251 ///     operations along a specified minor dimension (not necessarily the
252 ///     fastest varying) ;
253 ///  2. analyzing those patterns for profitability (TODO: and
254 ///     interference);
255 ///  3. Then, for each pattern in order:
256 ///    a. applying iterative rewriting of the loop and the load operations in
257 ///       DFS postorder. Rewriting is implemented by coarsening the loops and
258 ///       turning load operations into opaque vector.transfer_read ops;
259 ///    b. keeping track of the load operations encountered as "roots" and the
260 ///       store operations as "terminals";
261 ///    c. traversing the use-def chains starting from the roots and iteratively
262 ///       propagating vectorized values. Scalar values that are encountered
263 ///       during this process must come from outside the scope of the current
264 ///       pattern (TODO: enforce this and generalize). Such a scalar value
265 ///       is vectorized only if it is a constant (into a vector splat). The
266 ///       non-constant case is not supported for now and results in the pattern
267 ///       failing to vectorize;
268 ///    d. performing a second traversal on the terminals (store ops) to
269 ///       rewriting the scalar value they write to memory into vector form.
270 ///       If the scalar value has been vectorized previously, we simply replace
271 ///       it by its vector form. Otherwise, if the scalar value is a constant,
272 ///       it is vectorized into a splat. In all other cases, vectorization for
273 ///       the pattern currently fails.
274 ///    e. if everything under the root AffineForOp in the current pattern
275 ///       vectorizes properly, we commit that loop to the IR. Otherwise we
276 ///       discard it and restore a previously cloned version of the loop. Thanks
277 ///       to the recursive scoping nature of matchers and captured patterns,
278 ///       this is transparently achieved by a simple RAII implementation.
279 ///    f. vectorization is applied on the next pattern in the list. Because
280 ///       pattern interference avoidance is not yet implemented and that we do
281 ///       not support further vectorizing an already vector load we need to
282 ///       re-verify that the pattern is still vectorizable. This is expected to
283 ///       make cost models more difficult to write and is subject to improvement
284 ///       in the future.
285 ///
286 /// Points c. and d. above are worth additional comment. In most passes that
287 /// do not change the type of operands, it is usually preferred to eagerly
288 /// `replaceAllUsesWith`. Unfortunately this does not work for vectorization
289 /// because during the use-def chain traversal, all the operands of an operation
290 /// must be available in vector form. Trying to propagate eagerly makes the IR
291 /// temporarily invalid and results in errors such as:
292 ///   `vectorize.mlir:308:13: error: 'addf' op requires the same type for all
293 ///   operands and results
294 ///      %s5 = addf %a5, %b5 : f32`
295 ///
296 /// Lastly, we show a minimal example for which use-def chains rooted in load /
297 /// vector.transfer_read are not enough. This is what motivated splitting
298 /// terminal processing out of the use-def chains starting from loads. In the
299 /// following snippet, there is simply no load::
300 /// ```mlir
301 /// func @fill(%A : memref<128xf32>) -> () {
302 ///   %f1 = constant 1.0 : f32
303 ///   affine.for %i0 = 0 to 32 {
304 ///     affine.store %f1, %A[%i0] : memref<128xf32, 0>
305 ///   }
306 ///   return
307 /// }
308 /// ```
309 ///
310 /// Choice of loop transformation to support the algorithm:
311 /// =======================================================
312 /// The choice of loop transformation to apply for coarsening vectorized loops
313 /// is still subject to exploratory tradeoffs. In particular, say we want to
314 /// vectorize by a factor 128, we want to transform the following input:
315 /// ```mlir
316 ///   affine.for %i = %M to %N {
317 ///     %a = affine.load %A[%i] : memref<?xf32>
318 ///   }
319 /// ```
320 ///
321 /// Traditionally, one would vectorize late (after scheduling, tiling,
322 /// memory promotion etc) say after stripmining (and potentially unrolling in
323 /// the case of LLVM's SLP vectorizer):
324 /// ```mlir
325 ///   affine.for %i = floor(%M, 128) to ceil(%N, 128) {
326 ///     affine.for %ii = max(%M, 128 * %i) to min(%N, 128*%i + 127) {
327 ///       %a = affine.load %A[%ii] : memref<?xf32>
328 ///     }
329 ///   }
330 /// ```
331 ///
332 /// Instead, we seek to vectorize early and freeze vector types before
333 /// scheduling, so we want to generate a pattern that resembles:
334 /// ```mlir
335 ///   affine.for %i = ? to ? step ? {
336 ///     %v_a = vector.transfer_read %A[%i] : memref<?xf32>, vector<128xf32>
337 ///   }
338 /// ```
339 ///
340 /// i. simply dividing the lower / upper bounds by 128 creates issues
341 ///    when representing expressions such as ii + 1 because now we only
342 ///    have access to original values that have been divided. Additional
343 ///    information is needed to specify accesses at below-128 granularity;
344 /// ii. another alternative is to coarsen the loop step but this may have
345 ///    consequences on dependence analysis and fusability of loops: fusable
346 ///    loops probably need to have the same step (because we don't want to
347 ///    stripmine/unroll to enable fusion).
348 /// As a consequence, we choose to represent the coarsening using the loop
349 /// step for now and reevaluate in the future. Note that we can renormalize
350 /// loop steps later if/when we have evidence that they are problematic.
351 ///
352 /// For the simple strawman example above, vectorizing for a 1-D vector
353 /// abstraction of size 128 returns code similar to:
354 /// ```mlir
355 ///   affine.for %i = %M to %N step 128 {
356 ///     %v_a = vector.transfer_read %A[%i] : memref<?xf32>, vector<128xf32>
357 ///   }
358 /// ```
359 ///
360 /// Unsupported cases, extensions, and work in progress (help welcome :-) ):
361 /// ========================================================================
362 ///   1. lowering to concrete vector types for various HW;
363 ///   2. reduction support;
364 ///   3. non-effecting padding during vector.transfer_read and filter during
365 ///      vector.transfer_write;
366 ///   4. misalignment support vector.transfer_read / vector.transfer_write
367 ///      (hopefully without read-modify-writes);
368 ///   5. control-flow support;
369 ///   6. cost-models, heuristics and search;
370 ///   7. Op implementation, extensions and implication on memref views;
371 ///   8. many TODOs left around.
372 ///
373 /// Examples:
374 /// =========
375 /// Consider the following Function:
376 /// ```mlir
377 /// func @vector_add_2d(%M : index, %N : index) -> f32 {
378 ///   %A = alloc (%M, %N) : memref<?x?xf32, 0>
379 ///   %B = alloc (%M, %N) : memref<?x?xf32, 0>
380 ///   %C = alloc (%M, %N) : memref<?x?xf32, 0>
381 ///   %f1 = constant 1.0 : f32
382 ///   %f2 = constant 2.0 : f32
383 ///   affine.for %i0 = 0 to %M {
384 ///     affine.for %i1 = 0 to %N {
385 ///       // non-scoped %f1
386 ///       affine.store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
387 ///     }
388 ///   }
389 ///   affine.for %i2 = 0 to %M {
390 ///     affine.for %i3 = 0 to %N {
391 ///       // non-scoped %f2
392 ///       affine.store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
393 ///     }
394 ///   }
395 ///   affine.for %i4 = 0 to %M {
396 ///     affine.for %i5 = 0 to %N {
397 ///       %a5 = affine.load %A[%i4, %i5] : memref<?x?xf32, 0>
398 ///       %b5 = affine.load %B[%i4, %i5] : memref<?x?xf32, 0>
399 ///       %s5 = addf %a5, %b5 : f32
400 ///       // non-scoped %f1
401 ///       %s6 = addf %s5, %f1 : f32
402 ///       // non-scoped %f2
403 ///       %s7 = addf %s5, %f2 : f32
404 ///       // diamond dependency.
405 ///       %s8 = addf %s7, %s6 : f32
406 ///       affine.store %s8, %C[%i4, %i5] : memref<?x?xf32, 0>
407 ///     }
408 ///   }
409 ///   %c7 = constant 7 : index
410 ///   %c42 = constant 42 : index
411 ///   %res = load %C[%c7, %c42] : memref<?x?xf32, 0>
412 ///   return %res : f32
413 /// }
414 /// ```
415 ///
416 /// The -affine-vectorize pass with the following arguments:
417 /// ```
418 /// -affine-vectorize="virtual-vector-size=256 test-fastest-varying=0"
419 /// ```
420 ///
421 /// produces this standard innermost-loop vectorized code:
422 /// ```mlir
423 /// func @vector_add_2d(%arg0 : index, %arg1 : index) -> f32 {
424 ///   %0 = alloc(%arg0, %arg1) : memref<?x?xf32>
425 ///   %1 = alloc(%arg0, %arg1) : memref<?x?xf32>
426 ///   %2 = alloc(%arg0, %arg1) : memref<?x?xf32>
427 ///   %cst = constant 1.0 : f32
428 ///   %cst_0 = constant 2.0 : f32
429 ///   affine.for %i0 = 0 to %arg0 {
430 ///     affine.for %i1 = 0 to %arg1 step 256 {
431 ///       %cst_1 = constant dense<vector<256xf32>, 1.0> :
432 ///                vector<256xf32>
433 ///       vector.transfer_write %cst_1, %0[%i0, %i1] :
434 ///                vector<256xf32>, memref<?x?xf32>
435 ///     }
436 ///   }
437 ///   affine.for %i2 = 0 to %arg0 {
438 ///     affine.for %i3 = 0 to %arg1 step 256 {
439 ///       %cst_2 = constant dense<vector<256xf32>, 2.0> :
440 ///                vector<256xf32>
441 ///       vector.transfer_write %cst_2, %1[%i2, %i3] :
442 ///                vector<256xf32>, memref<?x?xf32>
443 ///     }
444 ///   }
445 ///   affine.for %i4 = 0 to %arg0 {
446 ///     affine.for %i5 = 0 to %arg1 step 256 {
447 ///       %3 = vector.transfer_read %0[%i4, %i5] :
448 ///            memref<?x?xf32>, vector<256xf32>
449 ///       %4 = vector.transfer_read %1[%i4, %i5] :
450 ///            memref<?x?xf32>, vector<256xf32>
451 ///       %5 = addf %3, %4 : vector<256xf32>
452 ///       %cst_3 = constant dense<vector<256xf32>, 1.0> :
453 ///                vector<256xf32>
454 ///       %6 = addf %5, %cst_3 : vector<256xf32>
455 ///       %cst_4 = constant dense<vector<256xf32>, 2.0> :
456 ///                vector<256xf32>
457 ///       %7 = addf %5, %cst_4 : vector<256xf32>
458 ///       %8 = addf %7, %6 : vector<256xf32>
459 ///       vector.transfer_write %8, %2[%i4, %i5] :
460 ///                vector<256xf32>, memref<?x?xf32>
461 ///     }
462 ///   }
463 ///   %c7 = constant 7 : index
464 ///   %c42 = constant 42 : index
465 ///   %9 = load %2[%c7, %c42] : memref<?x?xf32>
466 ///   return %9 : f32
467 /// }
468 /// ```
469 ///
470 /// The -affine-vectorize pass with the following arguments:
471 /// ```
472 /// -affine-vectorize="virtual-vector-size=32,256 test-fastest-varying=1,0"
473 /// ```
474 ///
475 /// produces this more interesting mixed outer-innermost-loop vectorized code:
476 /// ```mlir
477 /// func @vector_add_2d(%arg0 : index, %arg1 : index) -> f32 {
478 ///   %0 = alloc(%arg0, %arg1) : memref<?x?xf32>
479 ///   %1 = alloc(%arg0, %arg1) : memref<?x?xf32>
480 ///   %2 = alloc(%arg0, %arg1) : memref<?x?xf32>
481 ///   %cst = constant 1.0 : f32
482 ///   %cst_0 = constant 2.0 : f32
483 ///   affine.for %i0 = 0 to %arg0 step 32 {
484 ///     affine.for %i1 = 0 to %arg1 step 256 {
485 ///       %cst_1 = constant dense<vector<32x256xf32>, 1.0> :
486 ///                vector<32x256xf32>
487 ///       vector.transfer_write %cst_1, %0[%i0, %i1] :
488 ///                vector<32x256xf32>, memref<?x?xf32>
489 ///     }
490 ///   }
491 ///   affine.for %i2 = 0 to %arg0 step 32 {
492 ///     affine.for %i3 = 0 to %arg1 step 256 {
493 ///       %cst_2 = constant dense<vector<32x256xf32>, 2.0> :
494 ///                vector<32x256xf32>
495 ///       vector.transfer_write %cst_2, %1[%i2, %i3] :
496 ///                vector<32x256xf32>, memref<?x?xf32>
497 ///     }
498 ///   }
499 ///   affine.for %i4 = 0 to %arg0 step 32 {
500 ///     affine.for %i5 = 0 to %arg1 step 256 {
501 ///       %3 = vector.transfer_read %0[%i4, %i5] :
502 ///                memref<?x?xf32> vector<32x256xf32>
503 ///       %4 = vector.transfer_read %1[%i4, %i5] :
504 ///                memref<?x?xf32>, vector<32x256xf32>
505 ///       %5 = addf %3, %4 : vector<32x256xf32>
506 ///       %cst_3 = constant dense<vector<32x256xf32>, 1.0> :
507 ///                vector<32x256xf32>
508 ///       %6 = addf %5, %cst_3 : vector<32x256xf32>
509 ///       %cst_4 = constant dense<vector<32x256xf32>, 2.0> :
510 ///                vector<32x256xf32>
511 ///       %7 = addf %5, %cst_4 : vector<32x256xf32>
512 ///       %8 = addf %7, %6 : vector<32x256xf32>
513 ///       vector.transfer_write %8, %2[%i4, %i5] :
514 ///                vector<32x256xf32>, memref<?x?xf32>
515 ///     }
516 ///   }
517 ///   %c7 = constant 7 : index
518 ///   %c42 = constant 42 : index
519 ///   %9 = load %2[%c7, %c42] : memref<?x?xf32>
520 ///   return %9 : f32
521 /// }
522 /// ```
523 ///
524 /// Of course, much more intricate n-D imperfectly-nested patterns can be
525 /// vectorized too and specified in a fully declarative fashion.
526 
527 #define DEBUG_TYPE "early-vect"
528 
529 using llvm::dbgs;
530 using llvm::SetVector;
531 
532 /// Forward declaration.
533 static FilterFunctionType
534 isVectorizableLoopPtrFactory(const DenseSet<Operation *> &parallelLoops,
535                              int fastestVaryingMemRefDimension);
536 
537 /// Creates a vectorization pattern from the command line arguments.
538 /// Up to 3-D patterns are supported.
539 /// If the command line argument requests a pattern of higher order, returns an
540 /// empty pattern list which will conservatively result in no vectorization.
541 static std::vector<NestedPattern>
542 makePatterns(const DenseSet<Operation *> &parallelLoops, int vectorRank,
543              ArrayRef<int64_t> fastestVaryingPattern) {
544   using matcher::For;
545   int64_t d0 = fastestVaryingPattern.empty() ? -1 : fastestVaryingPattern[0];
546   int64_t d1 = fastestVaryingPattern.size() < 2 ? -1 : fastestVaryingPattern[1];
547   int64_t d2 = fastestVaryingPattern.size() < 3 ? -1 : fastestVaryingPattern[2];
548   switch (vectorRank) {
549   case 1:
550     return {For(isVectorizableLoopPtrFactory(parallelLoops, d0))};
551   case 2:
552     return {For(isVectorizableLoopPtrFactory(parallelLoops, d0),
553                 For(isVectorizableLoopPtrFactory(parallelLoops, d1)))};
554   case 3:
555     return {For(isVectorizableLoopPtrFactory(parallelLoops, d0),
556                 For(isVectorizableLoopPtrFactory(parallelLoops, d1),
557                     For(isVectorizableLoopPtrFactory(parallelLoops, d2))))};
558   default: {
559     return std::vector<NestedPattern>();
560   }
561   }
562 }
563 
564 static NestedPattern &vectorTransferPattern() {
565   static auto pattern = matcher::Op([](Operation &op) {
566     return isa<vector::TransferReadOp, vector::TransferWriteOp>(op);
567   });
568   return pattern;
569 }
570 
571 namespace {
572 
573 /// Base state for the vectorize pass.
574 /// Command line arguments are preempted by non-empty pass arguments.
575 struct Vectorize : public AffineVectorizeBase<Vectorize> {
576   Vectorize() = default;
577   Vectorize(ArrayRef<int64_t> virtualVectorSize);
578   void runOnFunction() override;
579 };
580 
581 } // end anonymous namespace
582 
583 Vectorize::Vectorize(ArrayRef<int64_t> virtualVectorSize) {
584   vectorSizes = virtualVectorSize;
585 }
586 
587 /////// TODO: Hoist to a VectorizationStrategy.cpp when appropriate.
588 /////////
589 namespace {
590 
591 struct VectorizationStrategy {
592   SmallVector<int64_t, 8> vectorSizes;
593   DenseMap<Operation *, unsigned> loopToVectorDim;
594 };
595 
596 } // end anonymous namespace
597 
598 static void vectorizeLoopIfProfitable(Operation *loop, unsigned depthInPattern,
599                                       unsigned patternDepth,
600                                       VectorizationStrategy *strategy) {
601   assert(patternDepth > depthInPattern &&
602          "patternDepth is greater than depthInPattern");
603   if (patternDepth - depthInPattern > strategy->vectorSizes.size()) {
604     // Don't vectorize this loop
605     return;
606   }
607   strategy->loopToVectorDim[loop] =
608       strategy->vectorSizes.size() - (patternDepth - depthInPattern);
609 }
610 
611 /// Implements a simple strawman strategy for vectorization.
612 /// Given a matched pattern `matches` of depth `patternDepth`, this strategy
613 /// greedily assigns the fastest varying dimension ** of the vector ** to the
614 /// innermost loop in the pattern.
615 /// When coupled with a pattern that looks for the fastest varying dimension in
616 /// load/store MemRefs, this creates a generic vectorization strategy that works
617 /// for any loop in a hierarchy (outermost, innermost or intermediate).
618 ///
619 /// TODO: In the future we should additionally increase the power of the
620 /// profitability analysis along 3 directions:
621 ///   1. account for loop extents (both static and parametric + annotations);
622 ///   2. account for data layout permutations;
623 ///   3. account for impact of vectorization on maximal loop fusion.
624 /// Then we can quantify the above to build a cost model and search over
625 /// strategies.
626 static LogicalResult analyzeProfitability(ArrayRef<NestedMatch> matches,
627                                           unsigned depthInPattern,
628                                           unsigned patternDepth,
629                                           VectorizationStrategy *strategy) {
630   for (auto m : matches) {
631     if (failed(analyzeProfitability(m.getMatchedChildren(), depthInPattern + 1,
632                                     patternDepth, strategy))) {
633       return failure();
634     }
635     vectorizeLoopIfProfitable(m.getMatchedOperation(), depthInPattern,
636                               patternDepth, strategy);
637   }
638   return success();
639 }
640 
641 ///// end TODO: Hoist to a VectorizationStrategy.cpp when appropriate /////
642 
643 namespace {
644 
645 struct VectorizationState {
646   /// Adds an entry of pre/post vectorization operations in the state.
647   void registerReplacement(Operation *key, Operation *value);
648   /// When the current vectorization pattern is successful, this erases the
649   /// operations that were marked for erasure in the proper order and resets
650   /// the internal state for the next pattern.
651   void finishVectorizationPattern();
652 
653   // In-order tracking of original Operation that have been vectorized.
654   // Erase in reverse order.
655   SmallVector<Operation *, 16> toErase;
656   // Set of Operation that have been vectorized (the values in the
657   // vectorizationMap for hashed access). The vectorizedSet is used in
658   // particular to filter the operations that have already been vectorized by
659   // this pattern, when iterating over nested loops in this pattern.
660   DenseSet<Operation *> vectorizedSet;
661   // Map of old scalar Operation to new vectorized Operation.
662   DenseMap<Operation *, Operation *> vectorizationMap;
663   // Map of old scalar Value to new vectorized Value.
664   DenseMap<Value, Value> replacementMap;
665   // The strategy drives which loop to vectorize by which amount.
666   const VectorizationStrategy *strategy;
667   // Use-def roots. These represent the starting points for the worklist in the
668   // vectorizeNonTerminals function. They consist of the subset of load
669   // operations that have been vectorized. They can be retrieved from
670   // `vectorizationMap` but it is convenient to keep track of them in a separate
671   // data structure.
672   DenseSet<Operation *> roots;
673   // Terminal operations for the worklist in the vectorizeNonTerminals
674   // function. They consist of the subset of store operations that have been
675   // vectorized. They can be retrieved from `vectorizationMap` but it is
676   // convenient to keep track of them in a separate data structure. Since they
677   // do not necessarily belong to use-def chains starting from loads (e.g
678   // storing a constant), we need to handle them in a post-pass.
679   DenseSet<Operation *> terminals;
680   // Checks that the type of `op` is AffineStoreOp and adds it to the terminals
681   // set.
682   void registerTerminal(Operation *op);
683   // Folder used to factor out constant creation.
684   OperationFolder *folder;
685 
686 private:
687   void registerReplacement(Value key, Value value);
688 };
689 
690 } // end namespace
691 
692 void VectorizationState::registerReplacement(Operation *key, Operation *value) {
693   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ commit vectorized op: ");
694   LLVM_DEBUG(key->print(dbgs()));
695   LLVM_DEBUG(dbgs() << "  into  ");
696   LLVM_DEBUG(value->print(dbgs()));
697   assert(key->getNumResults() == 1 && "already registered");
698   assert(value->getNumResults() == 1 && "already registered");
699   assert(vectorizedSet.count(value) == 0 && "already registered");
700   assert(vectorizationMap.count(key) == 0 && "already registered");
701   toErase.push_back(key);
702   vectorizedSet.insert(value);
703   vectorizationMap.insert(std::make_pair(key, value));
704   registerReplacement(key->getResult(0), value->getResult(0));
705   if (isa<AffineLoadOp>(key)) {
706     assert(roots.count(key) == 0 && "root was already inserted previously");
707     roots.insert(key);
708   }
709 }
710 
711 void VectorizationState::registerTerminal(Operation *op) {
712   assert(isa<AffineStoreOp>(op) && "terminal must be a AffineStoreOp");
713   assert(terminals.count(op) == 0 &&
714          "terminal was already inserted previously");
715   terminals.insert(op);
716 }
717 
718 void VectorizationState::finishVectorizationPattern() {
719   while (!toErase.empty()) {
720     auto *op = toErase.pop_back_val();
721     LLVM_DEBUG(dbgs() << "\n[early-vect] finishVectorizationPattern erase: ");
722     LLVM_DEBUG(op->print(dbgs()));
723     op->erase();
724   }
725 }
726 
727 void VectorizationState::registerReplacement(Value key, Value value) {
728   assert(replacementMap.count(key) == 0 && "replacement already registered");
729   replacementMap.insert(std::make_pair(key, value));
730 }
731 
732 // Apply 'map' with 'mapOperands' returning resulting values in 'results'.
733 static void computeMemoryOpIndices(Operation *op, AffineMap map,
734                                    ValueRange mapOperands,
735                                    SmallVectorImpl<Value> &results) {
736   OpBuilder builder(op);
737   for (auto resultExpr : map.getResults()) {
738     auto singleResMap =
739         AffineMap::get(map.getNumDims(), map.getNumSymbols(), resultExpr);
740     auto afOp =
741         builder.create<AffineApplyOp>(op->getLoc(), singleResMap, mapOperands);
742     results.push_back(afOp);
743   }
744 }
745 
746 ////// TODO: Hoist to a VectorizationMaterialize.cpp when appropriate. ////
747 
748 /// Handles the vectorization of load and store MLIR operations.
749 ///
750 /// AffineLoadOp operations are the roots of the vectorizeNonTerminals call.
751 /// They are vectorized immediately. The resulting vector.transfer_read is
752 /// immediately registered to replace all uses of the AffineLoadOp in this
753 /// pattern's scope.
754 ///
755 /// AffineStoreOp are the terminals of the vectorizeNonTerminals call. They
756 /// need to be vectorized late once all the use-def chains have been traversed.
757 /// Additionally, they may have ssa-values operands which come from outside the
758 /// scope of the current pattern.
759 /// Such special cases force us to delay the vectorization of the stores until
760 /// the last step. Here we merely register the store operation.
761 template <typename LoadOrStoreOpPointer>
762 static LogicalResult vectorizeRootOrTerminal(Value iv,
763                                              LoadOrStoreOpPointer memoryOp,
764                                              VectorizationState *state) {
765   auto memRefType = memoryOp.getMemRef().getType().template cast<MemRefType>();
766 
767   auto elementType = memRefType.getElementType();
768   // TODO: ponder whether we want to further vectorize a vector value.
769   assert(VectorType::isValidElementType(elementType) &&
770          "Not a valid vector element type");
771   auto vectorType = VectorType::get(state->strategy->vectorSizes, elementType);
772 
773   // Materialize a MemRef with 1 vector.
774   auto *opInst = memoryOp.getOperation();
775   // For now, vector.transfers must be aligned, operate only on indices with an
776   // identity subset of AffineMap and do not change layout.
777   // TODO: increase the expressiveness power of vector.transfer operations
778   // as needed by various targets.
779   if (auto load = dyn_cast<AffineLoadOp>(opInst)) {
780     OpBuilder b(opInst);
781     ValueRange mapOperands = load.getMapOperands();
782     SmallVector<Value, 8> indices;
783     indices.reserve(load.getMemRefType().getRank());
784     if (load.getAffineMap() !=
785         b.getMultiDimIdentityMap(load.getMemRefType().getRank())) {
786       computeMemoryOpIndices(opInst, load.getAffineMap(), mapOperands, indices);
787     } else {
788       indices.append(mapOperands.begin(), mapOperands.end());
789     }
790     auto permutationMap =
791         makePermutationMap(opInst, indices, state->strategy->loopToVectorDim);
792     if (!permutationMap)
793       return LogicalResult::Failure;
794     LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
795     LLVM_DEBUG(permutationMap.print(dbgs()));
796     auto transfer = b.create<vector::TransferReadOp>(
797         opInst->getLoc(), vectorType, memoryOp.getMemRef(), indices,
798         permutationMap);
799     state->registerReplacement(opInst, transfer.getOperation());
800   } else {
801     state->registerTerminal(opInst);
802   }
803   return success();
804 }
805 /// end TODO: Hoist to a VectorizationMaterialize.cpp when appropriate. ///
806 
807 /// Coarsens the loops bounds and transforms all remaining load and store
808 /// operations into the appropriate vector.transfer.
809 static LogicalResult vectorizeAffineForOp(AffineForOp loop, int64_t step,
810                                           VectorizationState *state) {
811   loop.setStep(step);
812 
813   FilterFunctionType notVectorizedThisPattern = [state](Operation &op) {
814     if (!matcher::isLoadOrStore(op)) {
815       return false;
816     }
817     return state->vectorizationMap.count(&op) == 0 &&
818            state->vectorizedSet.count(&op) == 0 &&
819            state->roots.count(&op) == 0 && state->terminals.count(&op) == 0;
820   };
821   auto loadAndStores = matcher::Op(notVectorizedThisPattern);
822   SmallVector<NestedMatch, 8> loadAndStoresMatches;
823   loadAndStores.match(loop.getOperation(), &loadAndStoresMatches);
824   for (auto ls : loadAndStoresMatches) {
825     auto *opInst = ls.getMatchedOperation();
826     auto load = dyn_cast<AffineLoadOp>(opInst);
827     auto store = dyn_cast<AffineStoreOp>(opInst);
828     LLVM_DEBUG(opInst->print(dbgs()));
829     LogicalResult result =
830         load ? vectorizeRootOrTerminal(loop.getInductionVar(), load, state)
831              : vectorizeRootOrTerminal(loop.getInductionVar(), store, state);
832     if (failed(result)) {
833       return failure();
834     }
835   }
836   return success();
837 }
838 
839 /// Returns a FilterFunctionType that can be used in NestedPattern to match a
840 /// loop whose underlying load/store accesses are either invariant or all
841 // varying along the `fastestVaryingMemRefDimension`.
842 static FilterFunctionType
843 isVectorizableLoopPtrFactory(const DenseSet<Operation *> &parallelLoops,
844                              int fastestVaryingMemRefDimension) {
845   return [&parallelLoops, fastestVaryingMemRefDimension](Operation &forOp) {
846     auto loop = cast<AffineForOp>(forOp);
847     auto parallelIt = parallelLoops.find(loop);
848     if (parallelIt == parallelLoops.end())
849       return false;
850     int memRefDim = -1;
851     auto vectorizableBody =
852         isVectorizableLoopBody(loop, &memRefDim, vectorTransferPattern());
853     if (!vectorizableBody)
854       return false;
855     return memRefDim == -1 || fastestVaryingMemRefDimension == -1 ||
856            memRefDim == fastestVaryingMemRefDimension;
857   };
858 }
859 
860 /// Apply vectorization of `loop` according to `state`. This is only triggered
861 /// if all vectorizations in `childrenMatches` have already succeeded
862 /// recursively in DFS post-order.
863 static LogicalResult
864 vectorizeLoopsAndLoadsRecursively(NestedMatch oneMatch,
865                                   VectorizationState *state) {
866   auto *loopInst = oneMatch.getMatchedOperation();
867   auto loop = cast<AffineForOp>(loopInst);
868   auto childrenMatches = oneMatch.getMatchedChildren();
869 
870   // 1. DFS postorder recursion, if any of my children fails, I fail too.
871   for (auto m : childrenMatches) {
872     if (failed(vectorizeLoopsAndLoadsRecursively(m, state))) {
873       return failure();
874     }
875   }
876 
877   // 2. This loop may have been omitted from vectorization for various reasons
878   // (e.g. due to the performance model or pattern depth > vector size).
879   auto it = state->strategy->loopToVectorDim.find(loopInst);
880   if (it == state->strategy->loopToVectorDim.end()) {
881     return success();
882   }
883 
884   // 3. Actual post-order transformation.
885   auto vectorDim = it->second;
886   assert(vectorDim < state->strategy->vectorSizes.size() &&
887          "vector dim overflow");
888   //   a. get actual vector size
889   auto vectorSize = state->strategy->vectorSizes[vectorDim];
890   //   b. loop transformation for early vectorization is still subject to
891   //     exploratory tradeoffs (see top of the file). Apply coarsening, i.e.:
892   //        | ub -> ub
893   //        | step -> step * vectorSize
894   LLVM_DEBUG(dbgs() << "\n[early-vect] vectorizeForOp by " << vectorSize
895                     << " : ");
896   LLVM_DEBUG(loopInst->print(dbgs()));
897   return vectorizeAffineForOp(loop, loop.getStep() * vectorSize, state);
898 }
899 
900 /// Tries to transform a scalar constant into a vector splat of that constant.
901 /// Returns the vectorized splat operation if the constant is a valid vector
902 /// element type.
903 /// If `type` is not a valid vector type or if the scalar constant is not a
904 /// valid vector element type, returns nullptr.
905 static Value vectorizeConstant(Operation *op, ConstantOp constant, Type type) {
906   if (!type || !type.isa<VectorType>() ||
907       !VectorType::isValidElementType(constant.getType())) {
908     return nullptr;
909   }
910   OpBuilder b(op);
911   Location loc = op->getLoc();
912   auto vectorType = type.cast<VectorType>();
913   auto attr = DenseElementsAttr::get(vectorType, constant.getValue());
914   auto *constantOpInst = constant.getOperation();
915 
916   OperationState state(loc, constantOpInst->getName().getStringRef(), {},
917                        {vectorType}, {b.getNamedAttr("value", attr)});
918 
919   return b.createOperation(state)->getResult(0);
920 }
921 
922 /// Returns the vector type resulting from applying the provided vectorization
923 /// strategy on the scalar type.
924 static VectorType getVectorType(Type scalarTy,
925                                 const VectorizationStrategy *strategy) {
926   assert(!scalarTy.isa<VectorType>() && "Expected scalar type");
927   return VectorType::get(strategy->vectorSizes, scalarTy);
928 }
929 
930 /// Returns true if the provided value is vector uniform given the vectorization
931 /// strategy.
932 // TODO: For now, only values that are invariants to all the loops in the
933 // vectorization strategy are considered vector uniforms.
934 static bool isUniformDefinition(Value value,
935                                 const VectorizationStrategy *strategy) {
936   for (auto loopToDim : strategy->loopToVectorDim) {
937     auto loop = cast<AffineForOp>(loopToDim.first);
938     if (!loop.isDefinedOutsideOfLoop(value))
939       return false;
940   }
941   return true;
942 }
943 
944 /// Generates a broadcast op for the provided uniform value using the
945 /// vectorization strategy in 'state'.
946 static Value vectorizeUniform(Value value, VectorizationState *state) {
947   OpBuilder builder(value.getContext());
948   builder.setInsertionPointAfter(value);
949 
950   auto vectorTy = getVectorType(value.getType(), state->strategy);
951   auto bcast = builder.create<BroadcastOp>(value.getLoc(), vectorTy, value);
952 
953   // Add broadcast to the replacement map to reuse it for other uses.
954   state->replacementMap[value] = bcast;
955   return bcast;
956 }
957 
958 /// Tries to vectorize a given operand `op` of Operation `op` during
959 /// def-chain propagation or during terminal vectorization, by applying the
960 /// following logic:
961 /// 1. if the defining operation is part of the vectorizedSet (i.e. vectorized
962 ///    useby -def propagation), `op` is already in the proper vector form;
963 /// 2. otherwise, the `op` may be in some other vector form that fails to
964 ///    vectorize atm (i.e. broadcasting required), returns nullptr to indicate
965 ///    failure;
966 /// 3. if the `op` is a constant, returns the vectorized form of the constant;
967 /// 4. if the `op` is uniform, returns a vector broadcast of the `op`;
968 /// 5. non-constant scalars are currently non-vectorizable, in particular to
969 ///    guard against vectorizing an index which may be loop-variant and needs
970 ///    special handling.
971 ///
972 /// In particular this logic captures some of the use cases where definitions
973 /// that are not scoped under the current pattern are needed to vectorize.
974 /// One such example is top level function constants that need to be splatted.
975 ///
976 /// Returns an operand that has been vectorized to match `state`'s strategy if
977 /// vectorization is possible with the above logic. Returns nullptr otherwise.
978 ///
979 /// TODO: handle more complex cases.
980 static Value vectorizeOperand(Value operand, Operation *op,
981                               VectorizationState *state) {
982   LLVM_DEBUG(dbgs() << "\n[early-vect]vectorize operand: " << operand);
983   // 1. If this value has already been vectorized this round, we are done.
984   if (state->vectorizedSet.count(operand.getDefiningOp()) > 0) {
985     LLVM_DEBUG(dbgs() << " -> already vector operand");
986     return operand;
987   }
988   // 1.b. Delayed on-demand replacement of a use.
989   //    Note that we cannot just call replaceAllUsesWith because it may result
990   //    in ops with mixed types, for ops whose operands have not all yet
991   //    been vectorized. This would be invalid IR.
992   auto it = state->replacementMap.find(operand);
993   if (it != state->replacementMap.end()) {
994     auto res = it->second;
995     LLVM_DEBUG(dbgs() << "-> delayed replacement by: " << res);
996     return res;
997   }
998   // 2. TODO: broadcast needed.
999   if (operand.getType().isa<VectorType>()) {
1000     LLVM_DEBUG(dbgs() << "-> non-vectorizable");
1001     return nullptr;
1002   }
1003   // 3. vectorize constant.
1004   if (auto constant = operand.getDefiningOp<ConstantOp>())
1005     return vectorizeConstant(op, constant,
1006                              getVectorType(operand.getType(), state->strategy));
1007 
1008   // 4. Uniform values.
1009   if (isUniformDefinition(operand, state->strategy))
1010     return vectorizeUniform(operand, state);
1011 
1012   // 5. currently non-vectorizable.
1013   LLVM_DEBUG(dbgs() << "-> non-vectorizable: " << operand);
1014   return nullptr;
1015 }
1016 
1017 /// Encodes Operation-specific behavior for vectorization. In general we assume
1018 /// that all operands of an op must be vectorized but this is not always true.
1019 /// In the future, it would be nice to have a trait that describes how a
1020 /// particular operation vectorizes. For now we implement the case distinction
1021 /// here.
1022 /// Returns a vectorized form of an operation or nullptr if vectorization fails.
1023 // TODO: consider adding a trait to Op to describe how it gets vectorized.
1024 // Maybe some Ops are not vectorizable or require some tricky logic, we cannot
1025 // do one-off logic here; ideally it would be TableGen'd.
1026 static Operation *vectorizeOneOperation(Operation *opInst,
1027                                         VectorizationState *state) {
1028   // Sanity checks.
1029   assert(!isa<AffineLoadOp>(opInst) &&
1030          "all loads must have already been fully vectorized independently");
1031   assert(!isa<vector::TransferReadOp>(opInst) &&
1032          "vector.transfer_read cannot be further vectorized");
1033   assert(!isa<vector::TransferWriteOp>(opInst) &&
1034          "vector.transfer_write cannot be further vectorized");
1035 
1036   if (auto store = dyn_cast<AffineStoreOp>(opInst)) {
1037     OpBuilder b(opInst);
1038     auto memRef = store.getMemRef();
1039     auto value = store.getValueToStore();
1040     auto vectorValue = vectorizeOperand(value, opInst, state);
1041     if (!vectorValue)
1042       return nullptr;
1043 
1044     ValueRange mapOperands = store.getMapOperands();
1045     SmallVector<Value, 8> indices;
1046     indices.reserve(store.getMemRefType().getRank());
1047     if (store.getAffineMap() !=
1048         b.getMultiDimIdentityMap(store.getMemRefType().getRank())) {
1049       computeMemoryOpIndices(opInst, store.getAffineMap(), mapOperands,
1050                              indices);
1051     } else {
1052       indices.append(mapOperands.begin(), mapOperands.end());
1053     }
1054 
1055     auto permutationMap =
1056         makePermutationMap(opInst, indices, state->strategy->loopToVectorDim);
1057     if (!permutationMap)
1058       return nullptr;
1059     LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
1060     LLVM_DEBUG(permutationMap.print(dbgs()));
1061     auto transfer = b.create<vector::TransferWriteOp>(
1062         opInst->getLoc(), vectorValue, memRef, indices, permutationMap);
1063     auto *res = transfer.getOperation();
1064     LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorized store: " << *res);
1065     // "Terminals" (i.e. AffineStoreOps) are erased on the spot.
1066     opInst->erase();
1067     return res;
1068   }
1069   if (opInst->getNumRegions() != 0)
1070     return nullptr;
1071 
1072   SmallVector<Type, 8> vectorTypes;
1073   for (auto v : opInst->getResults()) {
1074     vectorTypes.push_back(
1075         VectorType::get(state->strategy->vectorSizes, v.getType()));
1076   }
1077   SmallVector<Value, 8> vectorOperands;
1078   for (auto v : opInst->getOperands()) {
1079     vectorOperands.push_back(vectorizeOperand(v, opInst, state));
1080   }
1081   // Check whether a single operand is null. If so, vectorization failed.
1082   bool success = llvm::all_of(vectorOperands, [](Value op) { return op; });
1083   if (!success) {
1084     LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ an operand failed vectorize");
1085     return nullptr;
1086   }
1087 
1088   // Create a clone of the op with the proper operands and return types.
1089   // TODO: The following assumes there is always an op with a fixed
1090   // name that works both in scalar mode and vector mode.
1091   // TODO: Is it worth considering an Operation.clone operation which
1092   // changes the type so we can promote an Operation with less boilerplate?
1093   OpBuilder b(opInst);
1094   OperationState newOp(opInst->getLoc(), opInst->getName().getStringRef(),
1095                        vectorOperands, vectorTypes, opInst->getAttrs(),
1096                        /*successors=*/{}, /*regions=*/{});
1097   return b.createOperation(newOp);
1098 }
1099 
1100 /// Iterates over the forward slice from the loads in the vectorization pattern
1101 /// and rewrites them using their vectorized counterpart by:
1102 ///   1. Create the forward slice starting from the loads in the vectorization
1103 ///   pattern.
1104 ///   2. Topologically sorts the forward slice.
1105 ///   3. For each operation in the slice, create the vector form of this
1106 ///   operation, replacing each operand by a replacement operands retrieved from
1107 ///   replacementMap. If any such replacement is missing, vectorization fails.
1108 static LogicalResult vectorizeNonTerminals(VectorizationState *state) {
1109   // 1. create initial worklist with the uses of the roots.
1110   SetVector<Operation *> worklist;
1111   // Note: state->roots have already been vectorized and must not be vectorized
1112   // again. This fits `getForwardSlice` which does not insert `op` in the
1113   // result.
1114   // Note: we have to exclude terminals because some of their defs may not be
1115   // nested under the vectorization pattern (e.g. constants defined in an
1116   // encompassing scope).
1117   // TODO: Use a backward slice for terminals, avoid special casing and
1118   // merge implementations.
1119   for (auto *op : state->roots) {
1120     getForwardSlice(op, &worklist, [state](Operation *op) {
1121       return state->terminals.count(op) == 0; // propagate if not terminal
1122     });
1123   }
1124   // We merged multiple slices, topological order may not hold anymore.
1125   worklist = topologicalSort(worklist);
1126 
1127   for (unsigned i = 0; i < worklist.size(); ++i) {
1128     auto *op = worklist[i];
1129     LLVM_DEBUG(dbgs() << "\n[early-vect] vectorize use: ");
1130     LLVM_DEBUG(op->print(dbgs()));
1131 
1132     // Create vector form of the operation.
1133     // Insert it just before op, on success register op as replaced.
1134     auto *vectorizedInst = vectorizeOneOperation(op, state);
1135     if (!vectorizedInst) {
1136       return failure();
1137     }
1138 
1139     // 3. Register replacement for future uses in the scope.
1140     //    Note that we cannot just call replaceAllUsesWith because it may
1141     //    result in ops with mixed types, for ops whose operands have not all
1142     //    yet been vectorized. This would be invalid IR.
1143     state->registerReplacement(op, vectorizedInst);
1144   }
1145   return success();
1146 }
1147 
1148 /// Vectorization is a recursive procedure where anything below can fail.
1149 /// The root match thus needs to maintain a clone for handling failure.
1150 /// Each root may succeed independently but will otherwise clean after itself if
1151 /// anything below it fails.
1152 static LogicalResult vectorizeRootMatch(NestedMatch m,
1153                                         VectorizationStrategy *strategy) {
1154   auto loop = cast<AffineForOp>(m.getMatchedOperation());
1155   OperationFolder folder(loop.getContext());
1156   VectorizationState state;
1157   state.strategy = strategy;
1158   state.folder = &folder;
1159 
1160   // Since patterns are recursive, they can very well intersect.
1161   // Since we do not want a fully greedy strategy in general, we decouple
1162   // pattern matching, from profitability analysis, from application.
1163   // As a consequence we must check that each root pattern is still
1164   // vectorizable. If a pattern is not vectorizable anymore, we just skip it.
1165   // TODO: implement a non-greedy profitability analysis that keeps only
1166   // non-intersecting patterns.
1167   if (!isVectorizableLoopBody(loop, vectorTransferPattern())) {
1168     LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ loop is not vectorizable");
1169     return failure();
1170   }
1171 
1172   /// Sets up error handling for this root loop. This is how the root match
1173   /// maintains a clone for handling failure and restores the proper state via
1174   /// RAII.
1175   auto *loopInst = loop.getOperation();
1176   OpBuilder builder(loopInst);
1177   auto clonedLoop = cast<AffineForOp>(builder.clone(*loopInst));
1178   struct Guard {
1179     LogicalResult failure() {
1180       loop.getInductionVar().replaceAllUsesWith(clonedLoop.getInductionVar());
1181       loop.erase();
1182       return mlir::failure();
1183     }
1184     LogicalResult success() {
1185       clonedLoop.erase();
1186       return mlir::success();
1187     }
1188     AffineForOp loop;
1189     AffineForOp clonedLoop;
1190   } guard{loop, clonedLoop};
1191 
1192   //////////////////////////////////////////////////////////////////////////////
1193   // Start vectorizing.
1194   // From now on, any error triggers the scope guard above.
1195   //////////////////////////////////////////////////////////////////////////////
1196   // 1. Vectorize all the loops matched by the pattern, recursively.
1197   // This also vectorizes the roots (AffineLoadOp) as well as registers the
1198   // terminals (AffineStoreOp) for post-processing vectorization (we need to
1199   // wait for all use-def chains into them to be vectorized first).
1200   if (failed(vectorizeLoopsAndLoadsRecursively(m, &state))) {
1201     LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ failed root vectorizeLoop");
1202     return guard.failure();
1203   }
1204 
1205   // 2. Vectorize operations reached by use-def chains from root except the
1206   // terminals (store operations) that need to be post-processed separately.
1207   // TODO: add more as we expand.
1208   if (failed(vectorizeNonTerminals(&state))) {
1209     LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ failed vectorizeNonTerminals");
1210     return guard.failure();
1211   }
1212 
1213   // 3. Post-process terminals.
1214   // Note: we have to post-process terminals because some of their defs may not
1215   // be nested under the vectorization pattern (e.g. constants defined in an
1216   // encompassing scope).
1217   // TODO: Use a backward slice for terminals, avoid special casing and
1218   // merge implementations.
1219   for (auto *op : state.terminals) {
1220     if (!vectorizeOneOperation(op, &state)) { // nullptr == failure
1221       LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ failed to vectorize terminals");
1222       return guard.failure();
1223     }
1224   }
1225 
1226   // 4. Finish this vectorization pattern.
1227   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ success vectorizing pattern");
1228   state.finishVectorizationPattern();
1229   return guard.success();
1230 }
1231 
1232 /// Applies vectorization to the current Function by searching over a bunch of
1233 /// predetermined patterns.
1234 void Vectorize::runOnFunction() {
1235   FuncOp f = getFunction();
1236   if (!fastestVaryingPattern.empty() &&
1237       fastestVaryingPattern.size() != vectorSizes.size()) {
1238     f.emitRemark("Fastest varying pattern specified with different size than "
1239                  "the vector size.");
1240     return signalPassFailure();
1241   }
1242 
1243   DenseSet<Operation *> parallelLoops;
1244   f.walk([&parallelLoops](AffineForOp loop) {
1245     if (isLoopParallel(loop))
1246       parallelLoops.insert(loop);
1247   });
1248 
1249   vectorizeAffineLoops(f, parallelLoops, vectorSizes, fastestVaryingPattern);
1250 }
1251 
1252 namespace mlir {
1253 
1254 /// Vectorizes affine loops in 'loops' using the n-D vectorization factors in
1255 /// 'vectorSizes'. By default, each vectorization factor is applied
1256 /// inner-to-outer to the loops of each loop nest. 'fastestVaryingPattern' can
1257 /// be optionally used to provide a different loop vectorization order.
1258 void vectorizeAffineLoops(Operation *parentOp, DenseSet<Operation *> &loops,
1259                           ArrayRef<int64_t> vectorSizes,
1260                           ArrayRef<int64_t> fastestVaryingPattern) {
1261   // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
1262   NestedPatternContext mlContext;
1263 
1264   for (auto &pat :
1265        makePatterns(loops, vectorSizes.size(), fastestVaryingPattern)) {
1266     LLVM_DEBUG(dbgs() << "\n******************************************");
1267     LLVM_DEBUG(dbgs() << "\n******************************************");
1268     LLVM_DEBUG(dbgs() << "\n[early-vect] new pattern on parent op\n");
1269     LLVM_DEBUG(parentOp->print(dbgs()));
1270 
1271     unsigned patternDepth = pat.getDepth();
1272 
1273     SmallVector<NestedMatch, 8> matches;
1274     pat.match(parentOp, &matches);
1275     // Iterate over all the top-level matches and vectorize eagerly.
1276     // This automatically prunes intersecting matches.
1277     for (auto m : matches) {
1278       VectorizationStrategy strategy;
1279       // TODO: depending on profitability, elect to reduce the vector size.
1280       strategy.vectorSizes.assign(vectorSizes.begin(), vectorSizes.end());
1281       if (failed(analyzeProfitability(m.getMatchedChildren(), 1, patternDepth,
1282                                       &strategy))) {
1283         continue;
1284       }
1285       vectorizeLoopIfProfitable(m.getMatchedOperation(), 0, patternDepth,
1286                                 &strategy);
1287       // TODO: if pattern does not apply, report it; alter the
1288       // cost/benefit.
1289       vectorizeRootMatch(m, &strategy);
1290       // TODO: some diagnostics if failure to vectorize occurs.
1291     }
1292   }
1293   LLVM_DEBUG(dbgs() << "\n");
1294 }
1295 
1296 std::unique_ptr<OperationPass<FuncOp>>
1297 createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize) {
1298   return std::make_unique<Vectorize>(virtualVectorSize);
1299 }
1300 std::unique_ptr<OperationPass<FuncOp>> createSuperVectorizePass() {
1301   return std::make_unique<Vectorize>();
1302 }
1303 
1304 } // namespace mlir
1305