1 //===- SuperVectorize.cpp - Vectorize Pass Impl ---------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements vectorization of loops, operations and data types to
10 // a target-independent, n-D super-vector abstraction.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "PassDetail.h"
15 #include "mlir/Analysis/SliceAnalysis.h"
16 #include "mlir/Dialect/Affine/Analysis/AffineAnalysis.h"
17 #include "mlir/Dialect/Affine/Analysis/LoopAnalysis.h"
18 #include "mlir/Dialect/Affine/Analysis/NestedMatcher.h"
19 #include "mlir/Dialect/Affine/IR/AffineOps.h"
20 #include "mlir/Dialect/Affine/Utils.h"
21 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
22 #include "mlir/Dialect/Vector/IR/VectorOps.h"
23 #include "mlir/Dialect/Vector/Utils/VectorUtils.h"
24 #include "mlir/IR/BlockAndValueMapping.h"
25 #include "mlir/Support/LLVM.h"
26 #include "llvm/ADT/STLExtras.h"
27 #include "llvm/Support/Debug.h"
28
29 using namespace mlir;
30 using namespace vector;
31
32 ///
33 /// Implements a high-level vectorization strategy on a Function.
34 /// The abstraction used is that of super-vectors, which provide a single,
35 /// compact, representation in the vector types, information that is expected
36 /// to reduce the impact of the phase ordering problem
37 ///
38 /// Vector granularity:
39 /// ===================
40 /// This pass is designed to perform vectorization at a super-vector
41 /// granularity. A super-vector is loosely defined as a vector type that is a
42 /// multiple of a "good" vector size so the HW can efficiently implement a set
43 /// of high-level primitives. Multiple is understood along any dimension; e.g.
44 /// both vector<16xf32> and vector<2x8xf32> are valid super-vectors for a
45 /// vector<8xf32> HW vector. Note that a "good vector size so the HW can
46 /// efficiently implement a set of high-level primitives" is not necessarily an
47 /// integer multiple of actual hardware registers. We leave details of this
48 /// distinction unspecified for now.
49 ///
50 /// Some may prefer the terminology a "tile of HW vectors". In this case, one
51 /// should note that super-vectors implement an "always full tile" abstraction.
52 /// They guarantee no partial-tile separation is necessary by relying on a
53 /// high-level copy-reshape abstraction that we call vector.transfer. This
54 /// copy-reshape operations is also responsible for performing layout
55 /// transposition if necessary. In the general case this will require a scoped
56 /// allocation in some notional local memory.
57 ///
58 /// Whatever the mental model one prefers to use for this abstraction, the key
59 /// point is that we burn into a single, compact, representation in the vector
60 /// types, information that is expected to reduce the impact of the phase
61 /// ordering problem. Indeed, a vector type conveys information that:
62 /// 1. the associated loops have dependency semantics that do not prevent
63 /// vectorization;
64 /// 2. the associate loops have been sliced in chunks of static sizes that are
65 /// compatible with vector sizes (i.e. similar to unroll-and-jam);
66 /// 3. the inner loops, in the unroll-and-jam analogy of 2, are captured by
67 /// the
68 /// vector type and no vectorization hampering transformations can be
69 /// applied to them anymore;
70 /// 4. the underlying memrefs are accessed in some notional contiguous way
71 /// that allows loading into vectors with some amount of spatial locality;
72 /// In other words, super-vectorization provides a level of separation of
73 /// concern by way of opacity to subsequent passes. This has the effect of
74 /// encapsulating and propagating vectorization constraints down the list of
75 /// passes until we are ready to lower further.
76 ///
77 /// For a particular target, a notion of minimal n-d vector size will be
78 /// specified and vectorization targets a multiple of those. In the following
79 /// paragraph, let "k ." represent "a multiple of", to be understood as a
80 /// multiple in the same dimension (e.g. vector<16 x k . 128> summarizes
81 /// vector<16 x 128>, vector<16 x 256>, vector<16 x 1024>, etc).
82 ///
83 /// Some non-exhaustive notable super-vector sizes of interest include:
84 /// - CPU: vector<k . HW_vector_size>,
85 /// vector<k' . core_count x k . HW_vector_size>,
86 /// vector<socket_count x k' . core_count x k . HW_vector_size>;
87 /// - GPU: vector<k . warp_size>,
88 /// vector<k . warp_size x float2>,
89 /// vector<k . warp_size x float4>,
90 /// vector<k . warp_size x 4 x 4x 4> (for tensor_core sizes).
91 ///
92 /// Loops and operations are emitted that operate on those super-vector shapes.
93 /// Subsequent lowering passes will materialize to actual HW vector sizes. These
94 /// passes are expected to be (gradually) more target-specific.
95 ///
96 /// At a high level, a vectorized load in a loop will resemble:
97 /// ```mlir
98 /// affine.for %i = ? to ? step ? {
99 /// %v_a = vector.transfer_read A[%i] : memref<?xf32>, vector<128xf32>
100 /// }
101 /// ```
102 /// It is the responsibility of the implementation of vector.transfer_read to
103 /// materialize vector registers from the original scalar memrefs. A later (more
104 /// target-dependent) lowering pass will materialize to actual HW vector sizes.
105 /// This lowering may be occur at different times:
106 /// 1. at the MLIR level into a combination of loops, unrolling, DmaStartOp +
107 /// DmaWaitOp + vectorized operations for data transformations and shuffle;
108 /// thus opening opportunities for unrolling and pipelining. This is an
109 /// instance of library call "whiteboxing"; or
110 /// 2. later in the a target-specific lowering pass or hand-written library
111 /// call; achieving full separation of concerns. This is an instance of
112 /// library call; or
113 /// 3. a mix of both, e.g. based on a model.
114 /// In the future, these operations will expose a contract to constrain the
115 /// search on vectorization patterns and sizes.
116 ///
117 /// Occurrence of super-vectorization in the compiler flow:
118 /// =======================================================
119 /// This is an active area of investigation. We start with 2 remarks to position
120 /// super-vectorization in the context of existing ongoing work: LLVM VPLAN
121 /// and LLVM SLP Vectorizer.
122 ///
123 /// LLVM VPLAN:
124 /// -----------
125 /// The astute reader may have noticed that in the limit, super-vectorization
126 /// can be applied at a similar time and with similar objectives than VPLAN.
127 /// For instance, in the case of a traditional, polyhedral compilation-flow (for
128 /// instance, the PPCG project uses ISL to provide dependence analysis,
129 /// multi-level(scheduling + tiling), lifting footprint to fast memory,
130 /// communication synthesis, mapping, register optimizations) and before
131 /// unrolling. When vectorization is applied at this *late* level in a typical
132 /// polyhedral flow, and is instantiated with actual hardware vector sizes,
133 /// super-vectorization is expected to match (or subsume) the type of patterns
134 /// that LLVM's VPLAN aims at targeting. The main difference here is that MLIR
135 /// is higher level and our implementation should be significantly simpler. Also
136 /// note that in this mode, recursive patterns are probably a bit of an overkill
137 /// although it is reasonable to expect that mixing a bit of outer loop and
138 /// inner loop vectorization + unrolling will provide interesting choices to
139 /// MLIR.
140 ///
141 /// LLVM SLP Vectorizer:
142 /// --------------------
143 /// Super-vectorization however is not meant to be usable in a similar fashion
144 /// to the SLP vectorizer. The main difference lies in the information that
145 /// both vectorizers use: super-vectorization examines contiguity of memory
146 /// references along fastest varying dimensions and loops with recursive nested
147 /// patterns capturing imperfectly-nested loop nests; the SLP vectorizer, on
148 /// the other hand, performs flat pattern matching inside a single unrolled loop
149 /// body and stitches together pieces of load and store operations into full
150 /// 1-D vectors. We envision that the SLP vectorizer is a good way to capture
151 /// innermost loop, control-flow dependent patterns that super-vectorization may
152 /// not be able to capture easily. In other words, super-vectorization does not
153 /// aim at replacing the SLP vectorizer and the two solutions are complementary.
154 ///
155 /// Ongoing investigations:
156 /// -----------------------
157 /// We discuss the following *early* places where super-vectorization is
158 /// applicable and touch on the expected benefits and risks . We list the
159 /// opportunities in the context of the traditional polyhedral compiler flow
160 /// described in PPCG. There are essentially 6 places in the MLIR pass pipeline
161 /// we expect to experiment with super-vectorization:
162 /// 1. Right after language lowering to MLIR: this is the earliest time where
163 /// super-vectorization is expected to be applied. At this level, all the
164 /// language/user/library-level annotations are available and can be fully
165 /// exploited. Examples include loop-type annotations (such as parallel,
166 /// reduction, scan, dependence distance vector, vectorizable) as well as
167 /// memory access annotations (such as non-aliasing writes guaranteed,
168 /// indirect accesses that are permutations by construction) accesses or
169 /// that a particular operation is prescribed atomic by the user. At this
170 /// level, anything that enriches what dependence analysis can do should be
171 /// aggressively exploited. At this level we are close to having explicit
172 /// vector types in the language, except we do not impose that burden on the
173 /// programmer/library: we derive information from scalar code + annotations.
174 /// 2. After dependence analysis and before polyhedral scheduling: the
175 /// information that supports vectorization does not need to be supplied by a
176 /// higher level of abstraction. Traditional dependence analysis is available
177 /// in MLIR and will be used to drive vectorization and cost models.
178 ///
179 /// Let's pause here and remark that applying super-vectorization as described
180 /// in 1. and 2. presents clear opportunities and risks:
181 /// - the opportunity is that vectorization is burned in the type system and
182 /// is protected from the adverse effect of loop scheduling, tiling, loop
183 /// interchange and all passes downstream. Provided that subsequent passes are
184 /// able to operate on vector types; the vector shapes, associated loop
185 /// iterator properties, alignment, and contiguity of fastest varying
186 /// dimensions are preserved until we lower the super-vector types. We expect
187 /// this to significantly rein in on the adverse effects of phase ordering.
188 /// - the risks are that a. all passes after super-vectorization have to work
189 /// on elemental vector types (not that this is always true, wherever
190 /// vectorization is applied) and b. that imposing vectorization constraints
191 /// too early may be overall detrimental to loop fusion, tiling and other
192 /// transformations because the dependence distances are coarsened when
193 /// operating on elemental vector types. For this reason, the pattern
194 /// profitability analysis should include a component that also captures the
195 /// maximal amount of fusion available under a particular pattern. This is
196 /// still at the stage of rough ideas but in this context, search is our
197 /// friend as the Tensor Comprehensions and auto-TVM contributions
198 /// demonstrated previously.
199 /// Bottom-line is we do not yet have good answers for the above but aim at
200 /// making it easy to answer such questions.
201 ///
202 /// Back to our listing, the last places where early super-vectorization makes
203 /// sense are:
204 /// 3. right after polyhedral-style scheduling: PLUTO-style algorithms are known
205 /// to improve locality, parallelism and be configurable (e.g. max-fuse,
206 /// smart-fuse etc). They can also have adverse effects on contiguity
207 /// properties that are required for vectorization but the vector.transfer
208 /// copy-reshape-pad-transpose abstraction is expected to help recapture
209 /// these properties.
210 /// 4. right after polyhedral-style scheduling+tiling;
211 /// 5. right after scheduling+tiling+rescheduling: points 4 and 5 represent
212 /// probably the most promising places because applying tiling achieves a
213 /// separation of concerns that allows rescheduling to worry less about
214 /// locality and more about parallelism and distribution (e.g. min-fuse).
215 ///
216 /// At these levels the risk-reward looks different: on one hand we probably
217 /// lost a good deal of language/user/library-level annotation; on the other
218 /// hand we gained parallelism and locality through scheduling and tiling.
219 /// However we probably want to ensure tiling is compatible with the
220 /// full-tile-only abstraction used in super-vectorization or suffer the
221 /// consequences. It is too early to place bets on what will win but we expect
222 /// super-vectorization to be the right abstraction to allow exploring at all
223 /// these levels. And again, search is our friend.
224 ///
225 /// Lastly, we mention it again here:
226 /// 6. as a MLIR-based alternative to VPLAN.
227 ///
228 /// Lowering, unrolling, pipelining:
229 /// ================================
230 /// TODO: point to the proper places.
231 ///
232 /// Algorithm:
233 /// ==========
234 /// The algorithm proceeds in a few steps:
235 /// 1. defining super-vectorization patterns and matching them on the tree of
236 /// AffineForOp. A super-vectorization pattern is defined as a recursive
237 /// data structures that matches and captures nested, imperfectly-nested
238 /// loops that have a. conformable loop annotations attached (e.g. parallel,
239 /// reduction, vectorizable, ...) as well as b. all contiguous load/store
240 /// operations along a specified minor dimension (not necessarily the
241 /// fastest varying) ;
242 /// 2. analyzing those patterns for profitability (TODO: and
243 /// interference);
244 /// 3. then, for each pattern in order:
245 /// a. applying iterative rewriting of the loops and all their nested
246 /// operations in topological order. Rewriting is implemented by
247 /// coarsening the loops and converting operations and operands to their
248 /// vector forms. Processing operations in topological order is relatively
249 /// simple due to the structured nature of the control-flow
250 /// representation. This order ensures that all the operands of a given
251 /// operation have been vectorized before the operation itself in a single
252 /// traversal, except for operands defined outside of the loop nest. The
253 /// algorithm can convert the following operations to their vector form:
254 /// * Affine load and store operations are converted to opaque vector
255 /// transfer read and write operations.
256 /// * Scalar constant operations/operands are converted to vector
257 /// constant operations (splat).
258 /// * Uniform operands (only induction variables of loops not mapped to
259 /// a vector dimension, or operands defined outside of the loop nest
260 /// for now) are broadcasted to a vector.
261 /// TODO: Support more uniform cases.
262 /// * Affine for operations with 'iter_args' are vectorized by
263 /// vectorizing their 'iter_args' operands and results.
264 /// TODO: Support more complex loops with divergent lbs and/or ubs.
265 /// * The remaining operations in the loop nest are vectorized by
266 /// widening their scalar types to vector types.
267 /// b. if everything under the root AffineForOp in the current pattern
268 /// is vectorized properly, we commit that loop to the IR and remove the
269 /// scalar loop. Otherwise, we discard the vectorized loop and keep the
270 /// original scalar loop.
271 /// c. vectorization is applied on the next pattern in the list. Because
272 /// pattern interference avoidance is not yet implemented and that we do
273 /// not support further vectorizing an already vector load we need to
274 /// re-verify that the pattern is still vectorizable. This is expected to
275 /// make cost models more difficult to write and is subject to improvement
276 /// in the future.
277 ///
278 /// Choice of loop transformation to support the algorithm:
279 /// =======================================================
280 /// The choice of loop transformation to apply for coarsening vectorized loops
281 /// is still subject to exploratory tradeoffs. In particular, say we want to
282 /// vectorize by a factor 128, we want to transform the following input:
283 /// ```mlir
284 /// affine.for %i = %M to %N {
285 /// %a = affine.load %A[%i] : memref<?xf32>
286 /// }
287 /// ```
288 ///
289 /// Traditionally, one would vectorize late (after scheduling, tiling,
290 /// memory promotion etc) say after stripmining (and potentially unrolling in
291 /// the case of LLVM's SLP vectorizer):
292 /// ```mlir
293 /// affine.for %i = floor(%M, 128) to ceil(%N, 128) {
294 /// affine.for %ii = max(%M, 128 * %i) to min(%N, 128*%i + 127) {
295 /// %a = affine.load %A[%ii] : memref<?xf32>
296 /// }
297 /// }
298 /// ```
299 ///
300 /// Instead, we seek to vectorize early and freeze vector types before
301 /// scheduling, so we want to generate a pattern that resembles:
302 /// ```mlir
303 /// affine.for %i = ? to ? step ? {
304 /// %v_a = vector.transfer_read %A[%i] : memref<?xf32>, vector<128xf32>
305 /// }
306 /// ```
307 ///
308 /// i. simply dividing the lower / upper bounds by 128 creates issues
309 /// when representing expressions such as ii + 1 because now we only
310 /// have access to original values that have been divided. Additional
311 /// information is needed to specify accesses at below-128 granularity;
312 /// ii. another alternative is to coarsen the loop step but this may have
313 /// consequences on dependence analysis and fusability of loops: fusable
314 /// loops probably need to have the same step (because we don't want to
315 /// stripmine/unroll to enable fusion).
316 /// As a consequence, we choose to represent the coarsening using the loop
317 /// step for now and reevaluate in the future. Note that we can renormalize
318 /// loop steps later if/when we have evidence that they are problematic.
319 ///
320 /// For the simple strawman example above, vectorizing for a 1-D vector
321 /// abstraction of size 128 returns code similar to:
322 /// ```mlir
323 /// affine.for %i = %M to %N step 128 {
324 /// %v_a = vector.transfer_read %A[%i] : memref<?xf32>, vector<128xf32>
325 /// }
326 /// ```
327 ///
328 /// Unsupported cases, extensions, and work in progress (help welcome :-) ):
329 /// ========================================================================
330 /// 1. lowering to concrete vector types for various HW;
331 /// 2. reduction support for n-D vectorization and non-unit steps;
332 /// 3. non-effecting padding during vector.transfer_read and filter during
333 /// vector.transfer_write;
334 /// 4. misalignment support vector.transfer_read / vector.transfer_write
335 /// (hopefully without read-modify-writes);
336 /// 5. control-flow support;
337 /// 6. cost-models, heuristics and search;
338 /// 7. Op implementation, extensions and implication on memref views;
339 /// 8. many TODOs left around.
340 ///
341 /// Examples:
342 /// =========
343 /// Consider the following Function:
344 /// ```mlir
345 /// func @vector_add_2d(%M : index, %N : index) -> f32 {
346 /// %A = alloc (%M, %N) : memref<?x?xf32, 0>
347 /// %B = alloc (%M, %N) : memref<?x?xf32, 0>
348 /// %C = alloc (%M, %N) : memref<?x?xf32, 0>
349 /// %f1 = arith.constant 1.0 : f32
350 /// %f2 = arith.constant 2.0 : f32
351 /// affine.for %i0 = 0 to %M {
352 /// affine.for %i1 = 0 to %N {
353 /// // non-scoped %f1
354 /// affine.store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
355 /// }
356 /// }
357 /// affine.for %i2 = 0 to %M {
358 /// affine.for %i3 = 0 to %N {
359 /// // non-scoped %f2
360 /// affine.store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
361 /// }
362 /// }
363 /// affine.for %i4 = 0 to %M {
364 /// affine.for %i5 = 0 to %N {
365 /// %a5 = affine.load %A[%i4, %i5] : memref<?x?xf32, 0>
366 /// %b5 = affine.load %B[%i4, %i5] : memref<?x?xf32, 0>
367 /// %s5 = arith.addf %a5, %b5 : f32
368 /// // non-scoped %f1
369 /// %s6 = arith.addf %s5, %f1 : f32
370 /// // non-scoped %f2
371 /// %s7 = arith.addf %s5, %f2 : f32
372 /// // diamond dependency.
373 /// %s8 = arith.addf %s7, %s6 : f32
374 /// affine.store %s8, %C[%i4, %i5] : memref<?x?xf32, 0>
375 /// }
376 /// }
377 /// %c7 = arith.constant 7 : index
378 /// %c42 = arith.constant 42 : index
379 /// %res = load %C[%c7, %c42] : memref<?x?xf32, 0>
380 /// return %res : f32
381 /// }
382 /// ```
383 ///
384 /// The -affine-super-vectorize pass with the following arguments:
385 /// ```
386 /// -affine-super-vectorize="virtual-vector-size=256 test-fastest-varying=0"
387 /// ```
388 ///
389 /// produces this standard innermost-loop vectorized code:
390 /// ```mlir
391 /// func @vector_add_2d(%arg0 : index, %arg1 : index) -> f32 {
392 /// %0 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
393 /// %1 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
394 /// %2 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
395 /// %cst = arith.constant 1.0 : f32
396 /// %cst_0 = arith.constant 2.0 : f32
397 /// affine.for %i0 = 0 to %arg0 {
398 /// affine.for %i1 = 0 to %arg1 step 256 {
399 /// %cst_1 = arith.constant dense<vector<256xf32>, 1.0> :
400 /// vector<256xf32>
401 /// vector.transfer_write %cst_1, %0[%i0, %i1] :
402 /// vector<256xf32>, memref<?x?xf32>
403 /// }
404 /// }
405 /// affine.for %i2 = 0 to %arg0 {
406 /// affine.for %i3 = 0 to %arg1 step 256 {
407 /// %cst_2 = arith.constant dense<vector<256xf32>, 2.0> :
408 /// vector<256xf32>
409 /// vector.transfer_write %cst_2, %1[%i2, %i3] :
410 /// vector<256xf32>, memref<?x?xf32>
411 /// }
412 /// }
413 /// affine.for %i4 = 0 to %arg0 {
414 /// affine.for %i5 = 0 to %arg1 step 256 {
415 /// %3 = vector.transfer_read %0[%i4, %i5] :
416 /// memref<?x?xf32>, vector<256xf32>
417 /// %4 = vector.transfer_read %1[%i4, %i5] :
418 /// memref<?x?xf32>, vector<256xf32>
419 /// %5 = arith.addf %3, %4 : vector<256xf32>
420 /// %cst_3 = arith.constant dense<vector<256xf32>, 1.0> :
421 /// vector<256xf32>
422 /// %6 = arith.addf %5, %cst_3 : vector<256xf32>
423 /// %cst_4 = arith.constant dense<vector<256xf32>, 2.0> :
424 /// vector<256xf32>
425 /// %7 = arith.addf %5, %cst_4 : vector<256xf32>
426 /// %8 = arith.addf %7, %6 : vector<256xf32>
427 /// vector.transfer_write %8, %2[%i4, %i5] :
428 /// vector<256xf32>, memref<?x?xf32>
429 /// }
430 /// }
431 /// %c7 = arith.constant 7 : index
432 /// %c42 = arith.constant 42 : index
433 /// %9 = load %2[%c7, %c42] : memref<?x?xf32>
434 /// return %9 : f32
435 /// }
436 /// ```
437 ///
438 /// The -affine-super-vectorize pass with the following arguments:
439 /// ```
440 /// -affine-super-vectorize="virtual-vector-size=32,256 \
441 /// test-fastest-varying=1,0"
442 /// ```
443 ///
444 /// produces this more interesting mixed outer-innermost-loop vectorized code:
445 /// ```mlir
446 /// func @vector_add_2d(%arg0 : index, %arg1 : index) -> f32 {
447 /// %0 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
448 /// %1 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
449 /// %2 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
450 /// %cst = arith.constant 1.0 : f32
451 /// %cst_0 = arith.constant 2.0 : f32
452 /// affine.for %i0 = 0 to %arg0 step 32 {
453 /// affine.for %i1 = 0 to %arg1 step 256 {
454 /// %cst_1 = arith.constant dense<vector<32x256xf32>, 1.0> :
455 /// vector<32x256xf32>
456 /// vector.transfer_write %cst_1, %0[%i0, %i1] :
457 /// vector<32x256xf32>, memref<?x?xf32>
458 /// }
459 /// }
460 /// affine.for %i2 = 0 to %arg0 step 32 {
461 /// affine.for %i3 = 0 to %arg1 step 256 {
462 /// %cst_2 = arith.constant dense<vector<32x256xf32>, 2.0> :
463 /// vector<32x256xf32>
464 /// vector.transfer_write %cst_2, %1[%i2, %i3] :
465 /// vector<32x256xf32>, memref<?x?xf32>
466 /// }
467 /// }
468 /// affine.for %i4 = 0 to %arg0 step 32 {
469 /// affine.for %i5 = 0 to %arg1 step 256 {
470 /// %3 = vector.transfer_read %0[%i4, %i5] :
471 /// memref<?x?xf32> vector<32x256xf32>
472 /// %4 = vector.transfer_read %1[%i4, %i5] :
473 /// memref<?x?xf32>, vector<32x256xf32>
474 /// %5 = arith.addf %3, %4 : vector<32x256xf32>
475 /// %cst_3 = arith.constant dense<vector<32x256xf32>, 1.0> :
476 /// vector<32x256xf32>
477 /// %6 = arith.addf %5, %cst_3 : vector<32x256xf32>
478 /// %cst_4 = arith.constant dense<vector<32x256xf32>, 2.0> :
479 /// vector<32x256xf32>
480 /// %7 = arith.addf %5, %cst_4 : vector<32x256xf32>
481 /// %8 = arith.addf %7, %6 : vector<32x256xf32>
482 /// vector.transfer_write %8, %2[%i4, %i5] :
483 /// vector<32x256xf32>, memref<?x?xf32>
484 /// }
485 /// }
486 /// %c7 = arith.constant 7 : index
487 /// %c42 = arith.constant 42 : index
488 /// %9 = load %2[%c7, %c42] : memref<?x?xf32>
489 /// return %9 : f32
490 /// }
491 /// ```
492 ///
493 /// Of course, much more intricate n-D imperfectly-nested patterns can be
494 /// vectorized too and specified in a fully declarative fashion.
495 ///
496 /// Reduction:
497 /// ==========
498 /// Vectorizing reduction loops along the reduction dimension is supported if:
499 /// - the reduction kind is supported,
500 /// - the vectorization is 1-D, and
501 /// - the step size of the loop equals to one.
502 ///
503 /// Comparing to the non-vector-dimension case, two additional things are done
504 /// during vectorization of such loops:
505 /// - The resulting vector returned from the loop is reduced to a scalar using
506 /// `vector.reduce`.
507 /// - In some cases a mask is applied to the vector yielded at the end of the
508 /// loop to prevent garbage values from being written to the accumulator.
509 ///
510 /// Reduction vectorization is switched off by default, it can be enabled by
511 /// passing a map from loops to reductions to utility functions, or by passing
512 /// `vectorize-reductions=true` to the vectorization pass.
513 ///
514 /// Consider the following example:
515 /// ```mlir
516 /// func @vecred(%in: memref<512xf32>) -> f32 {
517 /// %cst = arith.constant 0.000000e+00 : f32
518 /// %sum = affine.for %i = 0 to 500 iter_args(%part_sum = %cst) -> (f32) {
519 /// %ld = affine.load %in[%i] : memref<512xf32>
520 /// %cos = math.cos %ld : f32
521 /// %add = arith.addf %part_sum, %cos : f32
522 /// affine.yield %add : f32
523 /// }
524 /// return %sum : f32
525 /// }
526 /// ```
527 ///
528 /// The -affine-super-vectorize pass with the following arguments:
529 /// ```
530 /// -affine-super-vectorize="virtual-vector-size=128 test-fastest-varying=0 \
531 /// vectorize-reductions=true"
532 /// ```
533 /// produces the following output:
534 /// ```mlir
535 /// #map = affine_map<(d0) -> (-d0 + 500)>
536 /// func @vecred(%arg0: memref<512xf32>) -> f32 {
537 /// %cst = arith.constant 0.000000e+00 : f32
538 /// %cst_0 = arith.constant dense<0.000000e+00> : vector<128xf32>
539 /// %0 = affine.for %arg1 = 0 to 500 step 128 iter_args(%arg2 = %cst_0)
540 /// -> (vector<128xf32>) {
541 /// // %2 is the number of iterations left in the original loop.
542 /// %2 = affine.apply #map(%arg1)
543 /// %3 = vector.create_mask %2 : vector<128xi1>
544 /// %cst_1 = arith.constant 0.000000e+00 : f32
545 /// %4 = vector.transfer_read %arg0[%arg1], %cst_1 :
546 /// memref<512xf32>, vector<128xf32>
547 /// %5 = math.cos %4 : vector<128xf32>
548 /// %6 = arith.addf %arg2, %5 : vector<128xf32>
549 /// // We filter out the effect of last 12 elements using the mask.
550 /// %7 = select %3, %6, %arg2 : vector<128xi1>, vector<128xf32>
551 /// affine.yield %7 : vector<128xf32>
552 /// }
553 /// %1 = vector.reduction <add>, %0 : vector<128xf32> into f32
554 /// return %1 : f32
555 /// }
556 /// ```
557 ///
558 /// Note that because of loop misalignment we needed to apply a mask to prevent
559 /// last 12 elements from affecting the final result. The mask is full of ones
560 /// in every iteration except for the last one, in which it has the form
561 /// `11...100...0` with 116 ones and 12 zeros.
562
563 #define DEBUG_TYPE "early-vect"
564
565 using llvm::dbgs;
566
567 /// Forward declaration.
568 static FilterFunctionType
569 isVectorizableLoopPtrFactory(const DenseSet<Operation *> ¶llelLoops,
570 int fastestVaryingMemRefDimension);
571
572 /// Creates a vectorization pattern from the command line arguments.
573 /// Up to 3-D patterns are supported.
574 /// If the command line argument requests a pattern of higher order, returns an
575 /// empty pattern list which will conservatively result in no vectorization.
576 static Optional<NestedPattern>
makePattern(const DenseSet<Operation * > & parallelLoops,int vectorRank,ArrayRef<int64_t> fastestVaryingPattern)577 makePattern(const DenseSet<Operation *> ¶llelLoops, int vectorRank,
578 ArrayRef<int64_t> fastestVaryingPattern) {
579 using matcher::For;
580 int64_t d0 = fastestVaryingPattern.empty() ? -1 : fastestVaryingPattern[0];
581 int64_t d1 = fastestVaryingPattern.size() < 2 ? -1 : fastestVaryingPattern[1];
582 int64_t d2 = fastestVaryingPattern.size() < 3 ? -1 : fastestVaryingPattern[2];
583 switch (vectorRank) {
584 case 1:
585 return For(isVectorizableLoopPtrFactory(parallelLoops, d0));
586 case 2:
587 return For(isVectorizableLoopPtrFactory(parallelLoops, d0),
588 For(isVectorizableLoopPtrFactory(parallelLoops, d1)));
589 case 3:
590 return For(isVectorizableLoopPtrFactory(parallelLoops, d0),
591 For(isVectorizableLoopPtrFactory(parallelLoops, d1),
592 For(isVectorizableLoopPtrFactory(parallelLoops, d2))));
593 default: {
594 return llvm::None;
595 }
596 }
597 }
598
vectorTransferPattern()599 static NestedPattern &vectorTransferPattern() {
600 static auto pattern = matcher::Op([](Operation &op) {
601 return isa<vector::TransferReadOp, vector::TransferWriteOp>(op);
602 });
603 return pattern;
604 }
605
606 namespace {
607
608 /// Base state for the vectorize pass.
609 /// Command line arguments are preempted by non-empty pass arguments.
610 struct Vectorize : public AffineVectorizeBase<Vectorize> {
611 Vectorize() = default;
612 Vectorize(ArrayRef<int64_t> virtualVectorSize);
613 void runOnOperation() override;
614 };
615
616 } // namespace
617
Vectorize(ArrayRef<int64_t> virtualVectorSize)618 Vectorize::Vectorize(ArrayRef<int64_t> virtualVectorSize) {
619 vectorSizes = virtualVectorSize;
620 }
621
vectorizeLoopIfProfitable(Operation * loop,unsigned depthInPattern,unsigned patternDepth,VectorizationStrategy * strategy)622 static void vectorizeLoopIfProfitable(Operation *loop, unsigned depthInPattern,
623 unsigned patternDepth,
624 VectorizationStrategy *strategy) {
625 assert(patternDepth > depthInPattern &&
626 "patternDepth is greater than depthInPattern");
627 if (patternDepth - depthInPattern > strategy->vectorSizes.size()) {
628 // Don't vectorize this loop
629 return;
630 }
631 strategy->loopToVectorDim[loop] =
632 strategy->vectorSizes.size() - (patternDepth - depthInPattern);
633 }
634
635 /// Implements a simple strawman strategy for vectorization.
636 /// Given a matched pattern `matches` of depth `patternDepth`, this strategy
637 /// greedily assigns the fastest varying dimension ** of the vector ** to the
638 /// innermost loop in the pattern.
639 /// When coupled with a pattern that looks for the fastest varying dimension in
640 /// load/store MemRefs, this creates a generic vectorization strategy that works
641 /// for any loop in a hierarchy (outermost, innermost or intermediate).
642 ///
643 /// TODO: In the future we should additionally increase the power of the
644 /// profitability analysis along 3 directions:
645 /// 1. account for loop extents (both static and parametric + annotations);
646 /// 2. account for data layout permutations;
647 /// 3. account for impact of vectorization on maximal loop fusion.
648 /// Then we can quantify the above to build a cost model and search over
649 /// strategies.
analyzeProfitability(ArrayRef<NestedMatch> matches,unsigned depthInPattern,unsigned patternDepth,VectorizationStrategy * strategy)650 static LogicalResult analyzeProfitability(ArrayRef<NestedMatch> matches,
651 unsigned depthInPattern,
652 unsigned patternDepth,
653 VectorizationStrategy *strategy) {
654 for (auto m : matches) {
655 if (failed(analyzeProfitability(m.getMatchedChildren(), depthInPattern + 1,
656 patternDepth, strategy))) {
657 return failure();
658 }
659 vectorizeLoopIfProfitable(m.getMatchedOperation(), depthInPattern,
660 patternDepth, strategy);
661 }
662 return success();
663 }
664
665 ///// end TODO: Hoist to a VectorizationStrategy.cpp when appropriate /////
666
667 namespace {
668
669 struct VectorizationState {
670
VectorizationState__anon00f1a09e0311::VectorizationState671 VectorizationState(MLIRContext *context) : builder(context) {}
672
673 /// Registers the vector replacement of a scalar operation and its result
674 /// values. Both operations must have the same number of results.
675 ///
676 /// This utility is used to register the replacement for the vast majority of
677 /// the vectorized operations.
678 ///
679 /// Example:
680 /// * 'replaced': %0 = arith.addf %1, %2 : f32
681 /// * 'replacement': %0 = arith.addf %1, %2 : vector<128xf32>
682 void registerOpVectorReplacement(Operation *replaced, Operation *replacement);
683
684 /// Registers the vector replacement of a scalar value. The replacement
685 /// operation should have a single result, which replaces the scalar value.
686 ///
687 /// This utility is used to register the vector replacement of block arguments
688 /// and operation results which are not directly vectorized (i.e., their
689 /// scalar version still exists after vectorization), like uniforms.
690 ///
691 /// Example:
692 /// * 'replaced': block argument or operation outside of the vectorized
693 /// loop.
694 /// * 'replacement': %0 = vector.broadcast %1 : f32 to vector<128xf32>
695 void registerValueVectorReplacement(Value replaced, Operation *replacement);
696
697 /// Registers the vector replacement of a block argument (e.g., iter_args).
698 ///
699 /// Example:
700 /// * 'replaced': 'iter_arg' block argument.
701 /// * 'replacement': vectorized 'iter_arg' block argument.
702 void registerBlockArgVectorReplacement(BlockArgument replaced,
703 BlockArgument replacement);
704
705 /// Registers the scalar replacement of a scalar value. 'replacement' must be
706 /// scalar. Both values must be block arguments. Operation results should be
707 /// replaced using the 'registerOp*' utilitites.
708 ///
709 /// This utility is used to register the replacement of block arguments
710 /// that are within the loop to be vectorized and will continue being scalar
711 /// within the vector loop.
712 ///
713 /// Example:
714 /// * 'replaced': induction variable of a loop to be vectorized.
715 /// * 'replacement': new induction variable in the new vector loop.
716 void registerValueScalarReplacement(BlockArgument replaced,
717 BlockArgument replacement);
718
719 /// Registers the scalar replacement of a scalar result returned from a
720 /// reduction loop. 'replacement' must be scalar.
721 ///
722 /// This utility is used to register the replacement for scalar results of
723 /// vectorized reduction loops with iter_args.
724 ///
725 /// Example 2:
726 /// * 'replaced': %0 = affine.for %i = 0 to 512 iter_args(%x = ...) -> (f32)
727 /// * 'replacement': %1 = vector.reduction <add>, %0 : vector<4xf32> into
728 /// f32
729 void registerLoopResultScalarReplacement(Value replaced, Value replacement);
730
731 /// Returns in 'replacedVals' the scalar replacement for values in
732 /// 'inputVals'.
733 void getScalarValueReplacementsFor(ValueRange inputVals,
734 SmallVectorImpl<Value> &replacedVals);
735
736 /// Erases the scalar loop nest after its successful vectorization.
737 void finishVectorizationPattern(AffineForOp rootLoop);
738
739 // Used to build and insert all the new operations created. The insertion
740 // point is preserved and updated along the vectorization process.
741 OpBuilder builder;
742
743 // Maps input scalar operations to their vector counterparts.
744 DenseMap<Operation *, Operation *> opVectorReplacement;
745 // Maps input scalar values to their vector counterparts.
746 BlockAndValueMapping valueVectorReplacement;
747 // Maps input scalar values to their new scalar counterparts in the vector
748 // loop nest.
749 BlockAndValueMapping valueScalarReplacement;
750 // Maps results of reduction loops to their new scalar counterparts.
751 DenseMap<Value, Value> loopResultScalarReplacement;
752
753 // Maps the newly created vector loops to their vector dimension.
754 DenseMap<Operation *, unsigned> vecLoopToVecDim;
755
756 // Maps the new vectorized loops to the corresponding vector masks if it is
757 // required.
758 DenseMap<Operation *, Value> vecLoopToMask;
759
760 // The strategy drives which loop to vectorize by which amount.
761 const VectorizationStrategy *strategy = nullptr;
762
763 private:
764 /// Internal implementation to map input scalar values to new vector or scalar
765 /// values.
766 void registerValueVectorReplacementImpl(Value replaced, Value replacement);
767 void registerValueScalarReplacementImpl(Value replaced, Value replacement);
768 };
769
770 } // namespace
771
772 /// Registers the vector replacement of a scalar operation and its result
773 /// values. Both operations must have the same number of results.
774 ///
775 /// This utility is used to register the replacement for the vast majority of
776 /// the vectorized operations.
777 ///
778 /// Example:
779 /// * 'replaced': %0 = arith.addf %1, %2 : f32
780 /// * 'replacement': %0 = arith.addf %1, %2 : vector<128xf32>
registerOpVectorReplacement(Operation * replaced,Operation * replacement)781 void VectorizationState::registerOpVectorReplacement(Operation *replaced,
782 Operation *replacement) {
783 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ commit vectorized op:\n");
784 LLVM_DEBUG(dbgs() << *replaced << "\n");
785 LLVM_DEBUG(dbgs() << "into\n");
786 LLVM_DEBUG(dbgs() << *replacement << "\n");
787
788 assert(replaced->getNumResults() == replacement->getNumResults() &&
789 "Unexpected replaced and replacement results");
790 assert(opVectorReplacement.count(replaced) == 0 && "already registered");
791 opVectorReplacement[replaced] = replacement;
792
793 for (auto resultTuple :
794 llvm::zip(replaced->getResults(), replacement->getResults()))
795 registerValueVectorReplacementImpl(std::get<0>(resultTuple),
796 std::get<1>(resultTuple));
797 }
798
799 /// Registers the vector replacement of a scalar value. The replacement
800 /// operation should have a single result, which replaces the scalar value.
801 ///
802 /// This utility is used to register the vector replacement of block arguments
803 /// and operation results which are not directly vectorized (i.e., their
804 /// scalar version still exists after vectorization), like uniforms.
805 ///
806 /// Example:
807 /// * 'replaced': block argument or operation outside of the vectorized loop.
808 /// * 'replacement': %0 = vector.broadcast %1 : f32 to vector<128xf32>
registerValueVectorReplacement(Value replaced,Operation * replacement)809 void VectorizationState::registerValueVectorReplacement(
810 Value replaced, Operation *replacement) {
811 assert(replacement->getNumResults() == 1 &&
812 "Expected single-result replacement");
813 if (Operation *defOp = replaced.getDefiningOp())
814 registerOpVectorReplacement(defOp, replacement);
815 else
816 registerValueVectorReplacementImpl(replaced, replacement->getResult(0));
817 }
818
819 /// Registers the vector replacement of a block argument (e.g., iter_args).
820 ///
821 /// Example:
822 /// * 'replaced': 'iter_arg' block argument.
823 /// * 'replacement': vectorized 'iter_arg' block argument.
registerBlockArgVectorReplacement(BlockArgument replaced,BlockArgument replacement)824 void VectorizationState::registerBlockArgVectorReplacement(
825 BlockArgument replaced, BlockArgument replacement) {
826 registerValueVectorReplacementImpl(replaced, replacement);
827 }
828
registerValueVectorReplacementImpl(Value replaced,Value replacement)829 void VectorizationState::registerValueVectorReplacementImpl(Value replaced,
830 Value replacement) {
831 assert(!valueVectorReplacement.contains(replaced) &&
832 "Vector replacement already registered");
833 assert(replacement.getType().isa<VectorType>() &&
834 "Expected vector type in vector replacement");
835 valueVectorReplacement.map(replaced, replacement);
836 }
837
838 /// Registers the scalar replacement of a scalar value. 'replacement' must be
839 /// scalar. Both values must be block arguments. Operation results should be
840 /// replaced using the 'registerOp*' utilitites.
841 ///
842 /// This utility is used to register the replacement of block arguments
843 /// that are within the loop to be vectorized and will continue being scalar
844 /// within the vector loop.
845 ///
846 /// Example:
847 /// * 'replaced': induction variable of a loop to be vectorized.
848 /// * 'replacement': new induction variable in the new vector loop.
registerValueScalarReplacement(BlockArgument replaced,BlockArgument replacement)849 void VectorizationState::registerValueScalarReplacement(
850 BlockArgument replaced, BlockArgument replacement) {
851 registerValueScalarReplacementImpl(replaced, replacement);
852 }
853
854 /// Registers the scalar replacement of a scalar result returned from a
855 /// reduction loop. 'replacement' must be scalar.
856 ///
857 /// This utility is used to register the replacement for scalar results of
858 /// vectorized reduction loops with iter_args.
859 ///
860 /// Example 2:
861 /// * 'replaced': %0 = affine.for %i = 0 to 512 iter_args(%x = ...) -> (f32)
862 /// * 'replacement': %1 = vector.reduction <add>, %0 : vector<4xf32> into f32
registerLoopResultScalarReplacement(Value replaced,Value replacement)863 void VectorizationState::registerLoopResultScalarReplacement(
864 Value replaced, Value replacement) {
865 assert(isa<AffineForOp>(replaced.getDefiningOp()));
866 assert(loopResultScalarReplacement.count(replaced) == 0 &&
867 "already registered");
868 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ will replace a result of the loop "
869 "with scalar: "
870 << replacement);
871 loopResultScalarReplacement[replaced] = replacement;
872 }
873
registerValueScalarReplacementImpl(Value replaced,Value replacement)874 void VectorizationState::registerValueScalarReplacementImpl(Value replaced,
875 Value replacement) {
876 assert(!valueScalarReplacement.contains(replaced) &&
877 "Scalar value replacement already registered");
878 assert(!replacement.getType().isa<VectorType>() &&
879 "Expected scalar type in scalar replacement");
880 valueScalarReplacement.map(replaced, replacement);
881 }
882
883 /// Returns in 'replacedVals' the scalar replacement for values in 'inputVals'.
getScalarValueReplacementsFor(ValueRange inputVals,SmallVectorImpl<Value> & replacedVals)884 void VectorizationState::getScalarValueReplacementsFor(
885 ValueRange inputVals, SmallVectorImpl<Value> &replacedVals) {
886 for (Value inputVal : inputVals)
887 replacedVals.push_back(valueScalarReplacement.lookupOrDefault(inputVal));
888 }
889
890 /// Erases a loop nest, including all its nested operations.
eraseLoopNest(AffineForOp forOp)891 static void eraseLoopNest(AffineForOp forOp) {
892 LLVM_DEBUG(dbgs() << "[early-vect]+++++ erasing:\n" << forOp << "\n");
893 forOp.erase();
894 }
895
896 /// Erases the scalar loop nest after its successful vectorization.
finishVectorizationPattern(AffineForOp rootLoop)897 void VectorizationState::finishVectorizationPattern(AffineForOp rootLoop) {
898 LLVM_DEBUG(dbgs() << "\n[early-vect] Finalizing vectorization\n");
899 eraseLoopNest(rootLoop);
900 }
901
902 // Apply 'map' with 'mapOperands' returning resulting values in 'results'.
computeMemoryOpIndices(Operation * op,AffineMap map,ValueRange mapOperands,VectorizationState & state,SmallVectorImpl<Value> & results)903 static void computeMemoryOpIndices(Operation *op, AffineMap map,
904 ValueRange mapOperands,
905 VectorizationState &state,
906 SmallVectorImpl<Value> &results) {
907 for (auto resultExpr : map.getResults()) {
908 auto singleResMap =
909 AffineMap::get(map.getNumDims(), map.getNumSymbols(), resultExpr);
910 auto afOp = state.builder.create<AffineApplyOp>(op->getLoc(), singleResMap,
911 mapOperands);
912 results.push_back(afOp);
913 }
914 }
915
916 /// Returns a FilterFunctionType that can be used in NestedPattern to match a
917 /// loop whose underlying load/store accesses are either invariant or all
918 // varying along the `fastestVaryingMemRefDimension`.
919 static FilterFunctionType
isVectorizableLoopPtrFactory(const DenseSet<Operation * > & parallelLoops,int fastestVaryingMemRefDimension)920 isVectorizableLoopPtrFactory(const DenseSet<Operation *> ¶llelLoops,
921 int fastestVaryingMemRefDimension) {
922 return [¶llelLoops, fastestVaryingMemRefDimension](Operation &forOp) {
923 auto loop = cast<AffineForOp>(forOp);
924 auto parallelIt = parallelLoops.find(loop);
925 if (parallelIt == parallelLoops.end())
926 return false;
927 int memRefDim = -1;
928 auto vectorizableBody =
929 isVectorizableLoopBody(loop, &memRefDim, vectorTransferPattern());
930 if (!vectorizableBody)
931 return false;
932 return memRefDim == -1 || fastestVaryingMemRefDimension == -1 ||
933 memRefDim == fastestVaryingMemRefDimension;
934 };
935 }
936
937 /// Returns the vector type resulting from applying the provided vectorization
938 /// strategy on the scalar type.
getVectorType(Type scalarTy,const VectorizationStrategy * strategy)939 static VectorType getVectorType(Type scalarTy,
940 const VectorizationStrategy *strategy) {
941 assert(!scalarTy.isa<VectorType>() && "Expected scalar type");
942 return VectorType::get(strategy->vectorSizes, scalarTy);
943 }
944
945 /// Tries to transform a scalar constant into a vector constant. Returns the
946 /// vector constant if the scalar type is valid vector element type. Returns
947 /// nullptr, otherwise.
vectorizeConstant(arith::ConstantOp constOp,VectorizationState & state)948 static arith::ConstantOp vectorizeConstant(arith::ConstantOp constOp,
949 VectorizationState &state) {
950 Type scalarTy = constOp.getType();
951 if (!VectorType::isValidElementType(scalarTy))
952 return nullptr;
953
954 auto vecTy = getVectorType(scalarTy, state.strategy);
955 auto vecAttr = DenseElementsAttr::get(vecTy, constOp.getValue());
956
957 OpBuilder::InsertionGuard guard(state.builder);
958 Operation *parentOp = state.builder.getInsertionBlock()->getParentOp();
959 // Find the innermost vectorized ancestor loop to insert the vector constant.
960 while (parentOp && !state.vecLoopToVecDim.count(parentOp))
961 parentOp = parentOp->getParentOp();
962 assert(parentOp && state.vecLoopToVecDim.count(parentOp) &&
963 isa<AffineForOp>(parentOp) && "Expected a vectorized for op");
964 auto vecForOp = cast<AffineForOp>(parentOp);
965 state.builder.setInsertionPointToStart(vecForOp.getBody());
966 auto newConstOp =
967 state.builder.create<arith::ConstantOp>(constOp.getLoc(), vecAttr);
968
969 // Register vector replacement for future uses in the scope.
970 state.registerOpVectorReplacement(constOp, newConstOp);
971 return newConstOp;
972 }
973
974 /// Creates a constant vector filled with the neutral elements of the given
975 /// reduction. The scalar type of vector elements will be taken from
976 /// `oldOperand`.
createInitialVector(arith::AtomicRMWKind reductionKind,Value oldOperand,VectorizationState & state)977 static arith::ConstantOp createInitialVector(arith::AtomicRMWKind reductionKind,
978 Value oldOperand,
979 VectorizationState &state) {
980 Type scalarTy = oldOperand.getType();
981 if (!VectorType::isValidElementType(scalarTy))
982 return nullptr;
983
984 Attribute valueAttr = getIdentityValueAttr(
985 reductionKind, scalarTy, state.builder, oldOperand.getLoc());
986 auto vecTy = getVectorType(scalarTy, state.strategy);
987 auto vecAttr = DenseElementsAttr::get(vecTy, valueAttr);
988 auto newConstOp =
989 state.builder.create<arith::ConstantOp>(oldOperand.getLoc(), vecAttr);
990
991 return newConstOp;
992 }
993
994 /// Creates a mask used to filter out garbage elements in the last iteration
995 /// of unaligned loops. If a mask is not required then `nullptr` is returned.
996 /// The mask will be a vector of booleans representing meaningful vector
997 /// elements in the current iteration. It is filled with ones for each iteration
998 /// except for the last one, where it has the form `11...100...0` with the
999 /// number of ones equal to the number of meaningful elements (i.e. the number
1000 /// of iterations that would be left in the original loop).
createMask(AffineForOp vecForOp,VectorizationState & state)1001 static Value createMask(AffineForOp vecForOp, VectorizationState &state) {
1002 assert(state.strategy->vectorSizes.size() == 1 &&
1003 "Creating a mask non-1-D vectors is not supported.");
1004 assert(vecForOp.getStep() == state.strategy->vectorSizes[0] &&
1005 "Creating a mask for loops with non-unit original step size is not "
1006 "supported.");
1007
1008 // Check if we have already created the mask.
1009 if (Value mask = state.vecLoopToMask.lookup(vecForOp))
1010 return mask;
1011
1012 // If the loop has constant bounds and the original number of iterations is
1013 // divisable by the vector size then we don't need a mask.
1014 if (vecForOp.hasConstantBounds()) {
1015 int64_t originalTripCount =
1016 vecForOp.getConstantUpperBound() - vecForOp.getConstantLowerBound();
1017 if (originalTripCount % vecForOp.getStep() == 0)
1018 return nullptr;
1019 }
1020
1021 OpBuilder::InsertionGuard guard(state.builder);
1022 state.builder.setInsertionPointToStart(vecForOp.getBody());
1023
1024 // We generate the mask using the `vector.create_mask` operation which accepts
1025 // the number of meaningful elements (i.e. the length of the prefix of 1s).
1026 // To compute the number of meaningful elements we subtract the current value
1027 // of the iteration variable from the upper bound of the loop. Example:
1028 //
1029 // // 500 is the upper bound of the loop
1030 // #map = affine_map<(d0) -> (500 - d0)>
1031 // %elems_left = affine.apply #map(%iv)
1032 // %mask = vector.create_mask %elems_left : vector<128xi1>
1033
1034 Location loc = vecForOp.getLoc();
1035
1036 // First we get the upper bound of the loop using `affine.apply` or
1037 // `affine.min`.
1038 AffineMap ubMap = vecForOp.getUpperBoundMap();
1039 Value ub;
1040 if (ubMap.getNumResults() == 1)
1041 ub = state.builder.create<AffineApplyOp>(loc, vecForOp.getUpperBoundMap(),
1042 vecForOp.getUpperBoundOperands());
1043 else
1044 ub = state.builder.create<AffineMinOp>(loc, vecForOp.getUpperBoundMap(),
1045 vecForOp.getUpperBoundOperands());
1046 // Then we compute the number of (original) iterations left in the loop.
1047 AffineExpr subExpr =
1048 state.builder.getAffineDimExpr(0) - state.builder.getAffineDimExpr(1);
1049 Value itersLeft =
1050 makeComposedAffineApply(state.builder, loc, AffineMap::get(2, 0, subExpr),
1051 {ub, vecForOp.getInductionVar()});
1052 // If the affine maps were successfully composed then `ub` is unneeded.
1053 if (ub.use_empty())
1054 ub.getDefiningOp()->erase();
1055 // Finally we create the mask.
1056 Type maskTy = VectorType::get(state.strategy->vectorSizes,
1057 state.builder.getIntegerType(1));
1058 Value mask =
1059 state.builder.create<vector::CreateMaskOp>(loc, maskTy, itersLeft);
1060
1061 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ creating a mask:\n"
1062 << itersLeft << "\n"
1063 << mask << "\n");
1064
1065 state.vecLoopToMask[vecForOp] = mask;
1066 return mask;
1067 }
1068
1069 /// Returns true if the provided value is vector uniform given the vectorization
1070 /// strategy.
1071 // TODO: For now, only values that are induction variables of loops not in
1072 // `loopToVectorDim` or invariants to all the loops in the vectorization
1073 // strategy are considered vector uniforms.
isUniformDefinition(Value value,const VectorizationStrategy * strategy)1074 static bool isUniformDefinition(Value value,
1075 const VectorizationStrategy *strategy) {
1076 AffineForOp forOp = getForInductionVarOwner(value);
1077 if (forOp && strategy->loopToVectorDim.count(forOp) == 0)
1078 return true;
1079
1080 for (auto loopToDim : strategy->loopToVectorDim) {
1081 auto loop = cast<AffineForOp>(loopToDim.first);
1082 if (!loop.isDefinedOutsideOfLoop(value))
1083 return false;
1084 }
1085 return true;
1086 }
1087
1088 /// Generates a broadcast op for the provided uniform value using the
1089 /// vectorization strategy in 'state'.
vectorizeUniform(Value uniformVal,VectorizationState & state)1090 static Operation *vectorizeUniform(Value uniformVal,
1091 VectorizationState &state) {
1092 OpBuilder::InsertionGuard guard(state.builder);
1093 Value uniformScalarRepl =
1094 state.valueScalarReplacement.lookupOrDefault(uniformVal);
1095 state.builder.setInsertionPointAfterValue(uniformScalarRepl);
1096
1097 auto vectorTy = getVectorType(uniformVal.getType(), state.strategy);
1098 auto bcastOp = state.builder.create<BroadcastOp>(uniformVal.getLoc(),
1099 vectorTy, uniformScalarRepl);
1100 state.registerValueVectorReplacement(uniformVal, bcastOp);
1101 return bcastOp;
1102 }
1103
1104 /// Tries to vectorize a given `operand` by applying the following logic:
1105 /// 1. if the defining operation has been already vectorized, `operand` is
1106 /// already in the proper vector form;
1107 /// 2. if the `operand` is a constant, returns the vectorized form of the
1108 /// constant;
1109 /// 3. if the `operand` is uniform, returns a vector broadcast of the `op`;
1110 /// 4. otherwise, the vectorization of `operand` is not supported.
1111 /// Newly created vector operations are registered in `state` as replacement
1112 /// for their scalar counterparts.
1113 /// In particular this logic captures some of the use cases where definitions
1114 /// that are not scoped under the current pattern are needed to vectorize.
1115 /// One such example is top level function constants that need to be splatted.
1116 ///
1117 /// Returns an operand that has been vectorized to match `state`'s strategy if
1118 /// vectorization is possible with the above logic. Returns nullptr otherwise.
1119 ///
1120 /// TODO: handle more complex cases.
vectorizeOperand(Value operand,VectorizationState & state)1121 static Value vectorizeOperand(Value operand, VectorizationState &state) {
1122 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorize operand: " << operand);
1123 // If this value is already vectorized, we are done.
1124 if (Value vecRepl = state.valueVectorReplacement.lookupOrNull(operand)) {
1125 LLVM_DEBUG(dbgs() << " -> already vectorized: " << vecRepl);
1126 return vecRepl;
1127 }
1128
1129 // An vector operand that is not in the replacement map should never reach
1130 // this point. Reaching this point could mean that the code was already
1131 // vectorized and we shouldn't try to vectorize already vectorized code.
1132 assert(!operand.getType().isa<VectorType>() &&
1133 "Vector op not found in replacement map");
1134
1135 // Vectorize constant.
1136 if (auto constOp = operand.getDefiningOp<arith::ConstantOp>()) {
1137 auto vecConstant = vectorizeConstant(constOp, state);
1138 LLVM_DEBUG(dbgs() << "-> constant: " << vecConstant);
1139 return vecConstant.getResult();
1140 }
1141
1142 // Vectorize uniform values.
1143 if (isUniformDefinition(operand, state.strategy)) {
1144 Operation *vecUniform = vectorizeUniform(operand, state);
1145 LLVM_DEBUG(dbgs() << "-> uniform: " << *vecUniform);
1146 return vecUniform->getResult(0);
1147 }
1148
1149 // Check for unsupported block argument scenarios. A supported block argument
1150 // should have been vectorized already.
1151 if (!operand.getDefiningOp())
1152 LLVM_DEBUG(dbgs() << "-> unsupported block argument\n");
1153 else
1154 // Generic unsupported case.
1155 LLVM_DEBUG(dbgs() << "-> non-vectorizable\n");
1156
1157 return nullptr;
1158 }
1159
1160 /// Vectorizes an affine load with the vectorization strategy in 'state' by
1161 /// generating a 'vector.transfer_read' op with the proper permutation map
1162 /// inferred from the indices of the load. The new 'vector.transfer_read' is
1163 /// registered as replacement of the scalar load. Returns the newly created
1164 /// 'vector.transfer_read' if vectorization was successful. Returns nullptr,
1165 /// otherwise.
vectorizeAffineLoad(AffineLoadOp loadOp,VectorizationState & state)1166 static Operation *vectorizeAffineLoad(AffineLoadOp loadOp,
1167 VectorizationState &state) {
1168 MemRefType memRefType = loadOp.getMemRefType();
1169 Type elementType = memRefType.getElementType();
1170 auto vectorType = VectorType::get(state.strategy->vectorSizes, elementType);
1171
1172 // Replace map operands with operands from the vector loop nest.
1173 SmallVector<Value, 8> mapOperands;
1174 state.getScalarValueReplacementsFor(loadOp.getMapOperands(), mapOperands);
1175
1176 // Compute indices for the transfer op. AffineApplyOp's may be generated.
1177 SmallVector<Value, 8> indices;
1178 indices.reserve(memRefType.getRank());
1179 if (loadOp.getAffineMap() !=
1180 state.builder.getMultiDimIdentityMap(memRefType.getRank()))
1181 computeMemoryOpIndices(loadOp, loadOp.getAffineMap(), mapOperands, state,
1182 indices);
1183 else
1184 indices.append(mapOperands.begin(), mapOperands.end());
1185
1186 // Compute permutation map using the information of new vector loops.
1187 auto permutationMap = makePermutationMap(state.builder.getInsertionBlock(),
1188 indices, state.vecLoopToVecDim);
1189 if (!permutationMap) {
1190 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ can't compute permutationMap\n");
1191 return nullptr;
1192 }
1193 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
1194 LLVM_DEBUG(permutationMap.print(dbgs()));
1195
1196 auto transfer = state.builder.create<vector::TransferReadOp>(
1197 loadOp.getLoc(), vectorType, loadOp.getMemRef(), indices, permutationMap);
1198
1199 // Register replacement for future uses in the scope.
1200 state.registerOpVectorReplacement(loadOp, transfer);
1201 return transfer;
1202 }
1203
1204 /// Vectorizes an affine store with the vectorization strategy in 'state' by
1205 /// generating a 'vector.transfer_write' op with the proper permutation map
1206 /// inferred from the indices of the store. The new 'vector.transfer_store' is
1207 /// registered as replacement of the scalar load. Returns the newly created
1208 /// 'vector.transfer_write' if vectorization was successful. Returns nullptr,
1209 /// otherwise.
vectorizeAffineStore(AffineStoreOp storeOp,VectorizationState & state)1210 static Operation *vectorizeAffineStore(AffineStoreOp storeOp,
1211 VectorizationState &state) {
1212 MemRefType memRefType = storeOp.getMemRefType();
1213 Value vectorValue = vectorizeOperand(storeOp.getValueToStore(), state);
1214 if (!vectorValue)
1215 return nullptr;
1216
1217 // Replace map operands with operands from the vector loop nest.
1218 SmallVector<Value, 8> mapOperands;
1219 state.getScalarValueReplacementsFor(storeOp.getMapOperands(), mapOperands);
1220
1221 // Compute indices for the transfer op. AffineApplyOp's may be generated.
1222 SmallVector<Value, 8> indices;
1223 indices.reserve(memRefType.getRank());
1224 if (storeOp.getAffineMap() !=
1225 state.builder.getMultiDimIdentityMap(memRefType.getRank()))
1226 computeMemoryOpIndices(storeOp, storeOp.getAffineMap(), mapOperands, state,
1227 indices);
1228 else
1229 indices.append(mapOperands.begin(), mapOperands.end());
1230
1231 // Compute permutation map using the information of new vector loops.
1232 auto permutationMap = makePermutationMap(state.builder.getInsertionBlock(),
1233 indices, state.vecLoopToVecDim);
1234 if (!permutationMap)
1235 return nullptr;
1236 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
1237 LLVM_DEBUG(permutationMap.print(dbgs()));
1238
1239 auto transfer = state.builder.create<vector::TransferWriteOp>(
1240 storeOp.getLoc(), vectorValue, storeOp.getMemRef(), indices,
1241 permutationMap);
1242 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorized store: " << transfer);
1243
1244 // Register replacement for future uses in the scope.
1245 state.registerOpVectorReplacement(storeOp, transfer);
1246 return transfer;
1247 }
1248
1249 /// Returns true if `value` is a constant equal to the neutral element of the
1250 /// given vectorizable reduction.
isNeutralElementConst(arith::AtomicRMWKind reductionKind,Value value,VectorizationState & state)1251 static bool isNeutralElementConst(arith::AtomicRMWKind reductionKind,
1252 Value value, VectorizationState &state) {
1253 Type scalarTy = value.getType();
1254 if (!VectorType::isValidElementType(scalarTy))
1255 return false;
1256 Attribute valueAttr = getIdentityValueAttr(reductionKind, scalarTy,
1257 state.builder, value.getLoc());
1258 if (auto constOp = dyn_cast_or_null<arith::ConstantOp>(value.getDefiningOp()))
1259 return constOp.getValue() == valueAttr;
1260 return false;
1261 }
1262
1263 /// Vectorizes a loop with the vectorization strategy in 'state'. A new loop is
1264 /// created and registered as replacement for the scalar loop. The builder's
1265 /// insertion point is set to the new loop's body so that subsequent vectorized
1266 /// operations are inserted into the new loop. If the loop is a vector
1267 /// dimension, the step of the newly created loop will reflect the vectorization
1268 /// factor used to vectorized that dimension.
vectorizeAffineForOp(AffineForOp forOp,VectorizationState & state)1269 static Operation *vectorizeAffineForOp(AffineForOp forOp,
1270 VectorizationState &state) {
1271 const VectorizationStrategy &strategy = *state.strategy;
1272 auto loopToVecDimIt = strategy.loopToVectorDim.find(forOp);
1273 bool isLoopVecDim = loopToVecDimIt != strategy.loopToVectorDim.end();
1274
1275 // TODO: Vectorization of reduction loops is not supported for non-unit steps.
1276 if (isLoopVecDim && forOp.getNumIterOperands() > 0 && forOp.getStep() != 1) {
1277 LLVM_DEBUG(
1278 dbgs()
1279 << "\n[early-vect]+++++ unsupported step size for reduction loop: "
1280 << forOp.getStep() << "\n");
1281 return nullptr;
1282 }
1283
1284 // If we are vectorizing a vector dimension, compute a new step for the new
1285 // vectorized loop using the vectorization factor for the vector dimension.
1286 // Otherwise, propagate the step of the scalar loop.
1287 unsigned newStep;
1288 if (isLoopVecDim) {
1289 unsigned vectorDim = loopToVecDimIt->second;
1290 assert(vectorDim < strategy.vectorSizes.size() && "vector dim overflow");
1291 int64_t forOpVecFactor = strategy.vectorSizes[vectorDim];
1292 newStep = forOp.getStep() * forOpVecFactor;
1293 } else {
1294 newStep = forOp.getStep();
1295 }
1296
1297 // Get information about reduction kinds.
1298 ArrayRef<LoopReduction> reductions;
1299 if (isLoopVecDim && forOp.getNumIterOperands() > 0) {
1300 auto it = strategy.reductionLoops.find(forOp);
1301 assert(it != strategy.reductionLoops.end() &&
1302 "Reduction descriptors not found when vectorizing a reduction loop");
1303 reductions = it->second;
1304 assert(reductions.size() == forOp.getNumIterOperands() &&
1305 "The size of reductions array must match the number of iter_args");
1306 }
1307
1308 // Vectorize 'iter_args'.
1309 SmallVector<Value, 8> vecIterOperands;
1310 if (!isLoopVecDim) {
1311 for (auto operand : forOp.getIterOperands())
1312 vecIterOperands.push_back(vectorizeOperand(operand, state));
1313 } else {
1314 // For reduction loops we need to pass a vector of neutral elements as an
1315 // initial value of the accumulator. We will add the original initial value
1316 // later.
1317 for (auto redAndOperand : llvm::zip(reductions, forOp.getIterOperands())) {
1318 vecIterOperands.push_back(createInitialVector(
1319 std::get<0>(redAndOperand).kind, std::get<1>(redAndOperand), state));
1320 }
1321 }
1322
1323 auto vecForOp = state.builder.create<AffineForOp>(
1324 forOp.getLoc(), forOp.getLowerBoundOperands(), forOp.getLowerBoundMap(),
1325 forOp.getUpperBoundOperands(), forOp.getUpperBoundMap(), newStep,
1326 vecIterOperands,
1327 /*bodyBuilder=*/[](OpBuilder &, Location, Value, ValueRange) {
1328 // Make sure we don't create a default terminator in the loop body as
1329 // the proper terminator will be added during vectorization.
1330 });
1331
1332 // Register loop-related replacements:
1333 // 1) The new vectorized loop is registered as vector replacement of the
1334 // scalar loop.
1335 // 2) The new iv of the vectorized loop is registered as scalar replacement
1336 // since a scalar copy of the iv will prevail in the vectorized loop.
1337 // TODO: A vector replacement will also be added in the future when
1338 // vectorization of linear ops is supported.
1339 // 3) The new 'iter_args' region arguments are registered as vector
1340 // replacements since they have been vectorized.
1341 // 4) If the loop performs a reduction along the vector dimension, a
1342 // `vector.reduction` or similar op is inserted for each resulting value
1343 // of the loop and its scalar value replaces the corresponding scalar
1344 // result of the loop.
1345 state.registerOpVectorReplacement(forOp, vecForOp);
1346 state.registerValueScalarReplacement(forOp.getInductionVar(),
1347 vecForOp.getInductionVar());
1348 for (auto iterTuple :
1349 llvm ::zip(forOp.getRegionIterArgs(), vecForOp.getRegionIterArgs()))
1350 state.registerBlockArgVectorReplacement(std::get<0>(iterTuple),
1351 std::get<1>(iterTuple));
1352
1353 if (isLoopVecDim) {
1354 for (unsigned i = 0; i < vecForOp.getNumIterOperands(); ++i) {
1355 // First, we reduce the vector returned from the loop into a scalar.
1356 Value reducedRes =
1357 getVectorReductionOp(reductions[i].kind, state.builder,
1358 vecForOp.getLoc(), vecForOp.getResult(i));
1359 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ creating a vector reduction: "
1360 << reducedRes);
1361 // Then we combine it with the original (scalar) initial value unless it
1362 // is equal to the neutral element of the reduction.
1363 Value origInit = forOp.getOperand(forOp.getNumControlOperands() + i);
1364 Value finalRes = reducedRes;
1365 if (!isNeutralElementConst(reductions[i].kind, origInit, state))
1366 finalRes =
1367 arith::getReductionOp(reductions[i].kind, state.builder,
1368 reducedRes.getLoc(), reducedRes, origInit);
1369 state.registerLoopResultScalarReplacement(forOp.getResult(i), finalRes);
1370 }
1371 }
1372
1373 if (isLoopVecDim)
1374 state.vecLoopToVecDim[vecForOp] = loopToVecDimIt->second;
1375
1376 // Change insertion point so that upcoming vectorized instructions are
1377 // inserted into the vectorized loop's body.
1378 state.builder.setInsertionPointToStart(vecForOp.getBody());
1379
1380 // If this is a reduction loop then we may need to create a mask to filter out
1381 // garbage in the last iteration.
1382 if (isLoopVecDim && forOp.getNumIterOperands() > 0)
1383 createMask(vecForOp, state);
1384
1385 return vecForOp;
1386 }
1387
1388 /// Vectorizes arbitrary operation by plain widening. We apply generic type
1389 /// widening of all its results and retrieve the vector counterparts for all its
1390 /// operands.
widenOp(Operation * op,VectorizationState & state)1391 static Operation *widenOp(Operation *op, VectorizationState &state) {
1392 SmallVector<Type, 8> vectorTypes;
1393 for (Value result : op->getResults())
1394 vectorTypes.push_back(
1395 VectorType::get(state.strategy->vectorSizes, result.getType()));
1396
1397 SmallVector<Value, 8> vectorOperands;
1398 for (Value operand : op->getOperands()) {
1399 Value vecOperand = vectorizeOperand(operand, state);
1400 if (!vecOperand) {
1401 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ an operand failed vectorize\n");
1402 return nullptr;
1403 }
1404 vectorOperands.push_back(vecOperand);
1405 }
1406
1407 // Create a clone of the op with the proper operands and return types.
1408 // TODO: The following assumes there is always an op with a fixed
1409 // name that works both in scalar mode and vector mode.
1410 // TODO: Is it worth considering an Operation.clone operation which
1411 // changes the type so we can promote an Operation with less boilerplate?
1412 Operation *vecOp =
1413 state.builder.create(op->getLoc(), op->getName().getIdentifier(),
1414 vectorOperands, vectorTypes, op->getAttrs());
1415 state.registerOpVectorReplacement(op, vecOp);
1416 return vecOp;
1417 }
1418
1419 /// Vectorizes a yield operation by widening its types. The builder's insertion
1420 /// point is set after the vectorized parent op to continue vectorizing the
1421 /// operations after the parent op. When vectorizing a reduction loop a mask may
1422 /// be used to prevent adding garbage values to the accumulator.
vectorizeAffineYieldOp(AffineYieldOp yieldOp,VectorizationState & state)1423 static Operation *vectorizeAffineYieldOp(AffineYieldOp yieldOp,
1424 VectorizationState &state) {
1425 Operation *newYieldOp = widenOp(yieldOp, state);
1426 Operation *newParentOp = state.builder.getInsertionBlock()->getParentOp();
1427
1428 // If there is a mask for this loop then we must prevent garbage values from
1429 // being added to the accumulator by inserting `select` operations, for
1430 // example:
1431 //
1432 // %val_masked = select %mask, %val, %neutralCst : vector<128xi1>,
1433 // vector<128xf32>
1434 // %res = arith.addf %acc, %val_masked : vector<128xf32>
1435 // affine.yield %res : vector<128xf32>
1436 //
1437 if (Value mask = state.vecLoopToMask.lookup(newParentOp)) {
1438 state.builder.setInsertionPoint(newYieldOp);
1439 for (unsigned i = 0; i < newYieldOp->getNumOperands(); ++i) {
1440 SmallVector<Operation *> combinerOps;
1441 Value reducedVal = matchReduction(
1442 cast<AffineForOp>(newParentOp).getRegionIterArgs(), i, combinerOps);
1443 assert(reducedVal && "expect non-null value for parallel reduction loop");
1444 assert(combinerOps.size() == 1 && "expect only one combiner op");
1445 // IterOperands are neutral element vectors.
1446 Value neutralVal = cast<AffineForOp>(newParentOp).getIterOperands()[i];
1447 state.builder.setInsertionPoint(combinerOps.back());
1448 Value maskedReducedVal = state.builder.create<arith::SelectOp>(
1449 reducedVal.getLoc(), mask, reducedVal, neutralVal);
1450 LLVM_DEBUG(
1451 dbgs() << "\n[early-vect]+++++ masking an input to a binary op that"
1452 "produces value for a yield Op: "
1453 << maskedReducedVal);
1454 combinerOps.back()->replaceUsesOfWith(reducedVal, maskedReducedVal);
1455 }
1456 }
1457
1458 state.builder.setInsertionPointAfter(newParentOp);
1459 return newYieldOp;
1460 }
1461
1462 /// Encodes Operation-specific behavior for vectorization. In general we
1463 /// assume that all operands of an op must be vectorized but this is not
1464 /// always true. In the future, it would be nice to have a trait that
1465 /// describes how a particular operation vectorizes. For now we implement the
1466 /// case distinction here. Returns a vectorized form of an operation or
1467 /// nullptr if vectorization fails.
1468 // TODO: consider adding a trait to Op to describe how it gets vectorized.
1469 // Maybe some Ops are not vectorizable or require some tricky logic, we cannot
1470 // do one-off logic here; ideally it would be TableGen'd.
vectorizeOneOperation(Operation * op,VectorizationState & state)1471 static Operation *vectorizeOneOperation(Operation *op,
1472 VectorizationState &state) {
1473 // Sanity checks.
1474 assert(!isa<vector::TransferReadOp>(op) &&
1475 "vector.transfer_read cannot be further vectorized");
1476 assert(!isa<vector::TransferWriteOp>(op) &&
1477 "vector.transfer_write cannot be further vectorized");
1478
1479 if (auto loadOp = dyn_cast<AffineLoadOp>(op))
1480 return vectorizeAffineLoad(loadOp, state);
1481 if (auto storeOp = dyn_cast<AffineStoreOp>(op))
1482 return vectorizeAffineStore(storeOp, state);
1483 if (auto forOp = dyn_cast<AffineForOp>(op))
1484 return vectorizeAffineForOp(forOp, state);
1485 if (auto yieldOp = dyn_cast<AffineYieldOp>(op))
1486 return vectorizeAffineYieldOp(yieldOp, state);
1487 if (auto constant = dyn_cast<arith::ConstantOp>(op))
1488 return vectorizeConstant(constant, state);
1489
1490 // Other ops with regions are not supported.
1491 if (op->getNumRegions() != 0)
1492 return nullptr;
1493
1494 return widenOp(op, state);
1495 }
1496
1497 /// Recursive implementation to convert all the nested loops in 'match' to a 2D
1498 /// vector container that preserves the relative nesting level of each loop with
1499 /// respect to the others in 'match'. 'currentLevel' is the nesting level that
1500 /// will be assigned to the loop in the current 'match'.
1501 static void
getMatchedAffineLoopsRec(NestedMatch match,unsigned currentLevel,std::vector<SmallVector<AffineForOp,2>> & loops)1502 getMatchedAffineLoopsRec(NestedMatch match, unsigned currentLevel,
1503 std::vector<SmallVector<AffineForOp, 2>> &loops) {
1504 // Add a new empty level to the output if it doesn't exist already.
1505 assert(currentLevel <= loops.size() && "Unexpected currentLevel");
1506 if (currentLevel == loops.size())
1507 loops.emplace_back();
1508
1509 // Add current match and recursively visit its children.
1510 loops[currentLevel].push_back(cast<AffineForOp>(match.getMatchedOperation()));
1511 for (auto childMatch : match.getMatchedChildren()) {
1512 getMatchedAffineLoopsRec(childMatch, currentLevel + 1, loops);
1513 }
1514 }
1515
1516 /// Converts all the nested loops in 'match' to a 2D vector container that
1517 /// preserves the relative nesting level of each loop with respect to the others
1518 /// in 'match'. This means that every loop in 'loops[i]' will have a parent loop
1519 /// in 'loops[i-1]'. A loop in 'loops[i]' may or may not have a child loop in
1520 /// 'loops[i+1]'.
1521 static void
getMatchedAffineLoops(NestedMatch match,std::vector<SmallVector<AffineForOp,2>> & loops)1522 getMatchedAffineLoops(NestedMatch match,
1523 std::vector<SmallVector<AffineForOp, 2>> &loops) {
1524 getMatchedAffineLoopsRec(match, /*currLoopDepth=*/0, loops);
1525 }
1526
1527 /// Internal implementation to vectorize affine loops from a single loop nest
1528 /// using an n-D vectorization strategy.
1529 static LogicalResult
vectorizeLoopNest(std::vector<SmallVector<AffineForOp,2>> & loops,const VectorizationStrategy & strategy)1530 vectorizeLoopNest(std::vector<SmallVector<AffineForOp, 2>> &loops,
1531 const VectorizationStrategy &strategy) {
1532 assert(loops[0].size() == 1 && "Expected single root loop");
1533 AffineForOp rootLoop = loops[0][0];
1534 VectorizationState state(rootLoop.getContext());
1535 state.builder.setInsertionPointAfter(rootLoop);
1536 state.strategy = &strategy;
1537
1538 // Since patterns are recursive, they can very well intersect.
1539 // Since we do not want a fully greedy strategy in general, we decouple
1540 // pattern matching, from profitability analysis, from application.
1541 // As a consequence we must check that each root pattern is still
1542 // vectorizable. If a pattern is not vectorizable anymore, we just skip it.
1543 // TODO: implement a non-greedy profitability analysis that keeps only
1544 // non-intersecting patterns.
1545 if (!isVectorizableLoopBody(rootLoop, vectorTransferPattern())) {
1546 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ loop is not vectorizable");
1547 return failure();
1548 }
1549
1550 //////////////////////////////////////////////////////////////////////////////
1551 // Vectorize the scalar loop nest following a topological order. A new vector
1552 // loop nest with the vectorized operations is created along the process. If
1553 // vectorization succeeds, the scalar loop nest is erased. If vectorization
1554 // fails, the vector loop nest is erased and the scalar loop nest is not
1555 // modified.
1556 //////////////////////////////////////////////////////////////////////////////
1557
1558 auto opVecResult = rootLoop.walk<WalkOrder::PreOrder>([&](Operation *op) {
1559 LLVM_DEBUG(dbgs() << "[early-vect]+++++ Vectorizing: " << *op);
1560 Operation *vectorOp = vectorizeOneOperation(op, state);
1561 if (!vectorOp) {
1562 LLVM_DEBUG(
1563 dbgs() << "[early-vect]+++++ failed vectorizing the operation: "
1564 << *op << "\n");
1565 return WalkResult::interrupt();
1566 }
1567
1568 return WalkResult::advance();
1569 });
1570
1571 if (opVecResult.wasInterrupted()) {
1572 LLVM_DEBUG(dbgs() << "[early-vect]+++++ failed vectorization for: "
1573 << rootLoop << "\n");
1574 // Erase vector loop nest if it was created.
1575 auto vecRootLoopIt = state.opVectorReplacement.find(rootLoop);
1576 if (vecRootLoopIt != state.opVectorReplacement.end())
1577 eraseLoopNest(cast<AffineForOp>(vecRootLoopIt->second));
1578
1579 return failure();
1580 }
1581
1582 // Replace results of reduction loops with the scalar values computed using
1583 // `vector.reduce` or similar ops.
1584 for (auto resPair : state.loopResultScalarReplacement)
1585 resPair.first.replaceAllUsesWith(resPair.second);
1586
1587 assert(state.opVectorReplacement.count(rootLoop) == 1 &&
1588 "Expected vector replacement for loop nest");
1589 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ success vectorizing pattern");
1590 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorization result:\n"
1591 << *state.opVectorReplacement[rootLoop]);
1592
1593 // Finish this vectorization pattern.
1594 state.finishVectorizationPattern(rootLoop);
1595 return success();
1596 }
1597
1598 /// Extracts the matched loops and vectorizes them following a topological
1599 /// order. A new vector loop nest will be created if vectorization succeeds. The
1600 /// original loop nest won't be modified in any case.
vectorizeRootMatch(NestedMatch m,const VectorizationStrategy & strategy)1601 static LogicalResult vectorizeRootMatch(NestedMatch m,
1602 const VectorizationStrategy &strategy) {
1603 std::vector<SmallVector<AffineForOp, 2>> loopsToVectorize;
1604 getMatchedAffineLoops(m, loopsToVectorize);
1605 return vectorizeLoopNest(loopsToVectorize, strategy);
1606 }
1607
1608 /// Traverses all the loop matches and classifies them into intersection
1609 /// buckets. Two matches intersect if any of them encloses the other one. A
1610 /// match intersects with a bucket if the match intersects with the root
1611 /// (outermost) loop in that bucket.
computeIntersectionBuckets(ArrayRef<NestedMatch> matches,std::vector<SmallVector<NestedMatch,8>> & intersectionBuckets)1612 static void computeIntersectionBuckets(
1613 ArrayRef<NestedMatch> matches,
1614 std::vector<SmallVector<NestedMatch, 8>> &intersectionBuckets) {
1615 assert(intersectionBuckets.empty() && "Expected empty output");
1616 // Keeps track of the root (outermost) loop of each bucket.
1617 SmallVector<AffineForOp, 8> bucketRoots;
1618
1619 for (const NestedMatch &match : matches) {
1620 AffineForOp matchRoot = cast<AffineForOp>(match.getMatchedOperation());
1621 bool intersects = false;
1622 for (int i = 0, end = intersectionBuckets.size(); i < end; ++i) {
1623 AffineForOp bucketRoot = bucketRoots[i];
1624 // Add match to the bucket if the bucket root encloses the match root.
1625 if (bucketRoot->isAncestor(matchRoot)) {
1626 intersectionBuckets[i].push_back(match);
1627 intersects = true;
1628 break;
1629 }
1630 // Add match to the bucket if the match root encloses the bucket root. The
1631 // match root becomes the new bucket root.
1632 if (matchRoot->isAncestor(bucketRoot)) {
1633 bucketRoots[i] = matchRoot;
1634 intersectionBuckets[i].push_back(match);
1635 intersects = true;
1636 break;
1637 }
1638 }
1639
1640 // Match doesn't intersect with any existing bucket. Create a new bucket for
1641 // it.
1642 if (!intersects) {
1643 bucketRoots.push_back(matchRoot);
1644 intersectionBuckets.emplace_back();
1645 intersectionBuckets.back().push_back(match);
1646 }
1647 }
1648 }
1649
1650 /// Internal implementation to vectorize affine loops in 'loops' using the n-D
1651 /// vectorization factors in 'vectorSizes'. By default, each vectorization
1652 /// factor is applied inner-to-outer to the loops of each loop nest.
1653 /// 'fastestVaryingPattern' can be optionally used to provide a different loop
1654 /// vectorization order. `reductionLoops` can be provided to specify loops which
1655 /// can be vectorized along the reduction dimension.
vectorizeLoops(Operation * parentOp,DenseSet<Operation * > & loops,ArrayRef<int64_t> vectorSizes,ArrayRef<int64_t> fastestVaryingPattern,const ReductionLoopMap & reductionLoops)1656 static void vectorizeLoops(Operation *parentOp, DenseSet<Operation *> &loops,
1657 ArrayRef<int64_t> vectorSizes,
1658 ArrayRef<int64_t> fastestVaryingPattern,
1659 const ReductionLoopMap &reductionLoops) {
1660 assert((reductionLoops.empty() || vectorSizes.size() == 1) &&
1661 "Vectorizing reductions is supported only for 1-D vectors");
1662
1663 // Compute 1-D, 2-D or 3-D loop pattern to be matched on the target loops.
1664 Optional<NestedPattern> pattern =
1665 makePattern(loops, vectorSizes.size(), fastestVaryingPattern);
1666 if (!pattern) {
1667 LLVM_DEBUG(dbgs() << "\n[early-vect] pattern couldn't be computed\n");
1668 return;
1669 }
1670
1671 LLVM_DEBUG(dbgs() << "\n******************************************");
1672 LLVM_DEBUG(dbgs() << "\n******************************************");
1673 LLVM_DEBUG(dbgs() << "\n[early-vect] new pattern on parent op\n");
1674 LLVM_DEBUG(dbgs() << *parentOp << "\n");
1675
1676 unsigned patternDepth = pattern->getDepth();
1677
1678 // Compute all the pattern matches and classify them into buckets of
1679 // intersecting matches.
1680 SmallVector<NestedMatch, 32> allMatches;
1681 pattern->match(parentOp, &allMatches);
1682 std::vector<SmallVector<NestedMatch, 8>> intersectionBuckets;
1683 computeIntersectionBuckets(allMatches, intersectionBuckets);
1684
1685 // Iterate over all buckets and vectorize the matches eagerly. We can only
1686 // vectorize one match from each bucket since all the matches within a bucket
1687 // intersect.
1688 for (auto &intersectingMatches : intersectionBuckets) {
1689 for (NestedMatch &match : intersectingMatches) {
1690 VectorizationStrategy strategy;
1691 // TODO: depending on profitability, elect to reduce the vector size.
1692 strategy.vectorSizes.assign(vectorSizes.begin(), vectorSizes.end());
1693 strategy.reductionLoops = reductionLoops;
1694 if (failed(analyzeProfitability(match.getMatchedChildren(), 1,
1695 patternDepth, &strategy))) {
1696 continue;
1697 }
1698 vectorizeLoopIfProfitable(match.getMatchedOperation(), 0, patternDepth,
1699 &strategy);
1700 // Vectorize match. Skip the rest of intersecting matches in the bucket if
1701 // vectorization succeeded.
1702 // TODO: if pattern does not apply, report it; alter the cost/benefit.
1703 // TODO: some diagnostics if failure to vectorize occurs.
1704 if (succeeded(vectorizeRootMatch(match, strategy)))
1705 break;
1706 }
1707 }
1708
1709 LLVM_DEBUG(dbgs() << "\n");
1710 }
1711
1712 std::unique_ptr<OperationPass<func::FuncOp>>
createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize)1713 createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize) {
1714 return std::make_unique<Vectorize>(virtualVectorSize);
1715 }
createSuperVectorizePass()1716 std::unique_ptr<OperationPass<func::FuncOp>> createSuperVectorizePass() {
1717 return std::make_unique<Vectorize>();
1718 }
1719
1720 /// Applies vectorization to the current function by searching over a bunch of
1721 /// predetermined patterns.
runOnOperation()1722 void Vectorize::runOnOperation() {
1723 func::FuncOp f = getOperation();
1724 if (!fastestVaryingPattern.empty() &&
1725 fastestVaryingPattern.size() != vectorSizes.size()) {
1726 f.emitRemark("Fastest varying pattern specified with different size than "
1727 "the vector size.");
1728 return signalPassFailure();
1729 }
1730
1731 if (vectorizeReductions && vectorSizes.size() != 1) {
1732 f.emitError("Vectorizing reductions is supported only for 1-D vectors.");
1733 return signalPassFailure();
1734 }
1735
1736 DenseSet<Operation *> parallelLoops;
1737 ReductionLoopMap reductionLoops;
1738
1739 // If 'vectorize-reduction=true' is provided, we also populate the
1740 // `reductionLoops` map.
1741 if (vectorizeReductions) {
1742 f.walk([¶llelLoops, &reductionLoops](AffineForOp loop) {
1743 SmallVector<LoopReduction, 2> reductions;
1744 if (isLoopParallel(loop, &reductions)) {
1745 parallelLoops.insert(loop);
1746 // If it's not a reduction loop, adding it to the map is not necessary.
1747 if (!reductions.empty())
1748 reductionLoops[loop] = reductions;
1749 }
1750 });
1751 } else {
1752 f.walk([¶llelLoops](AffineForOp loop) {
1753 if (isLoopParallel(loop))
1754 parallelLoops.insert(loop);
1755 });
1756 }
1757
1758 // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
1759 NestedPatternContext mlContext;
1760 vectorizeLoops(f, parallelLoops, vectorSizes, fastestVaryingPattern,
1761 reductionLoops);
1762 }
1763
1764 /// Verify that affine loops in 'loops' meet the nesting criteria expected by
1765 /// SuperVectorizer:
1766 /// * There must be at least one loop.
1767 /// * There must be a single root loop (nesting level 0).
1768 /// * Each loop at a given nesting level must be nested in a loop from a
1769 /// previous nesting level.
1770 static LogicalResult
verifyLoopNesting(const std::vector<SmallVector<AffineForOp,2>> & loops)1771 verifyLoopNesting(const std::vector<SmallVector<AffineForOp, 2>> &loops) {
1772 // Expected at least one loop.
1773 if (loops.empty())
1774 return failure();
1775
1776 // Expected only one root loop.
1777 if (loops[0].size() != 1)
1778 return failure();
1779
1780 // Traverse loops outer-to-inner to check some invariants.
1781 for (int i = 1, end = loops.size(); i < end; ++i) {
1782 for (AffineForOp loop : loops[i]) {
1783 // Check that each loop at this level is nested in one of the loops from
1784 // the previous level.
1785 if (none_of(loops[i - 1], [&](AffineForOp maybeParent) {
1786 return maybeParent->isProperAncestor(loop);
1787 }))
1788 return failure();
1789
1790 // Check that each loop at this level is not nested in another loop from
1791 // this level.
1792 for (AffineForOp sibling : loops[i]) {
1793 if (sibling->isProperAncestor(loop))
1794 return failure();
1795 }
1796 }
1797 }
1798
1799 return success();
1800 }
1801
1802 namespace mlir {
1803
1804 /// External utility to vectorize affine loops in 'loops' using the n-D
1805 /// vectorization factors in 'vectorSizes'. By default, each vectorization
1806 /// factor is applied inner-to-outer to the loops of each loop nest.
1807 /// 'fastestVaryingPattern' can be optionally used to provide a different loop
1808 /// vectorization order.
1809 /// If `reductionLoops` is not empty, the given reduction loops may be
1810 /// vectorized along the reduction dimension.
1811 /// TODO: Vectorizing reductions is supported only for 1-D vectorization.
vectorizeAffineLoops(Operation * parentOp,DenseSet<Operation * > & loops,ArrayRef<int64_t> vectorSizes,ArrayRef<int64_t> fastestVaryingPattern,const ReductionLoopMap & reductionLoops)1812 void vectorizeAffineLoops(Operation *parentOp, DenseSet<Operation *> &loops,
1813 ArrayRef<int64_t> vectorSizes,
1814 ArrayRef<int64_t> fastestVaryingPattern,
1815 const ReductionLoopMap &reductionLoops) {
1816 // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
1817 NestedPatternContext mlContext;
1818 vectorizeLoops(parentOp, loops, vectorSizes, fastestVaryingPattern,
1819 reductionLoops);
1820 }
1821
1822 /// External utility to vectorize affine loops from a single loop nest using an
1823 /// n-D vectorization strategy (see doc in VectorizationStrategy definition).
1824 /// Loops are provided in a 2D vector container. The first dimension represents
1825 /// the nesting level relative to the loops to be vectorized. The second
1826 /// dimension contains the loops. This means that:
1827 /// a) every loop in 'loops[i]' must have a parent loop in 'loops[i-1]',
1828 /// b) a loop in 'loops[i]' may or may not have a child loop in 'loops[i+1]'.
1829 ///
1830 /// For example, for the following loop nest:
1831 ///
1832 /// func @vec2d(%in0: memref<64x128x512xf32>, %in1: memref<64x128x128xf32>,
1833 /// %out0: memref<64x128x512xf32>,
1834 /// %out1: memref<64x128x128xf32>) {
1835 /// affine.for %i0 = 0 to 64 {
1836 /// affine.for %i1 = 0 to 128 {
1837 /// affine.for %i2 = 0 to 512 {
1838 /// %ld = affine.load %in0[%i0, %i1, %i2] : memref<64x128x512xf32>
1839 /// affine.store %ld, %out0[%i0, %i1, %i2] : memref<64x128x512xf32>
1840 /// }
1841 /// affine.for %i3 = 0 to 128 {
1842 /// %ld = affine.load %in1[%i0, %i1, %i3] : memref<64x128x128xf32>
1843 /// affine.store %ld, %out1[%i0, %i1, %i3] : memref<64x128x128xf32>
1844 /// }
1845 /// }
1846 /// }
1847 /// return
1848 /// }
1849 ///
1850 /// loops = {{%i0}, {%i2, %i3}}, to vectorize the outermost and the two
1851 /// innermost loops;
1852 /// loops = {{%i1}, {%i2, %i3}}, to vectorize the middle and the two innermost
1853 /// loops;
1854 /// loops = {{%i2}}, to vectorize only the first innermost loop;
1855 /// loops = {{%i3}}, to vectorize only the second innermost loop;
1856 /// loops = {{%i1}}, to vectorize only the middle loop.
1857 LogicalResult
vectorizeAffineLoopNest(std::vector<SmallVector<AffineForOp,2>> & loops,const VectorizationStrategy & strategy)1858 vectorizeAffineLoopNest(std::vector<SmallVector<AffineForOp, 2>> &loops,
1859 const VectorizationStrategy &strategy) {
1860 // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
1861 NestedPatternContext mlContext;
1862 if (failed(verifyLoopNesting(loops)))
1863 return failure();
1864 return vectorizeLoopNest(loops, strategy);
1865 }
1866
1867 std::unique_ptr<OperationPass<func::FuncOp>>
createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize)1868 createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize) {
1869 return std::make_unique<Vectorize>(virtualVectorSize);
1870 }
createSuperVectorizePass()1871 std::unique_ptr<OperationPass<func::FuncOp>> createSuperVectorizePass() {
1872 return std::make_unique<Vectorize>();
1873 }
1874
1875 } // namespace mlir
1876