1b8737614SUday Bondhugula //===- SuperVectorize.cpp - Vectorize Pass Impl ---------------------------===//
2b8737614SUday Bondhugula //
3b8737614SUday Bondhugula // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4b8737614SUday Bondhugula // See https://llvm.org/LICENSE.txt for license information.
5b8737614SUday Bondhugula // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6b8737614SUday Bondhugula //
7b8737614SUday Bondhugula //===----------------------------------------------------------------------===//
8b8737614SUday Bondhugula //
9b8737614SUday Bondhugula // This file implements vectorization of loops, operations and data types to
10b8737614SUday Bondhugula // a target-independent, n-D super-vector abstraction.
11b8737614SUday Bondhugula //
12b8737614SUday Bondhugula //===----------------------------------------------------------------------===//
13b8737614SUday Bondhugula
141834ad4aSRiver Riddle #include "PassDetail.h"
155bd4bcfcSAmy Zhuang #include "mlir/Analysis/SliceAnalysis.h"
16755dc07dSRiver Riddle #include "mlir/Dialect/Affine/Analysis/AffineAnalysis.h"
17755dc07dSRiver Riddle #include "mlir/Dialect/Affine/Analysis/LoopAnalysis.h"
18755dc07dSRiver Riddle #include "mlir/Dialect/Affine/Analysis/NestedMatcher.h"
19b8737614SUday Bondhugula #include "mlir/Dialect/Affine/IR/AffineOps.h"
203fff5acdSDiego Caballero #include "mlir/Dialect/Affine/Utils.h"
21a54f4eaeSMogball #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
2299ef9eebSMatthias Springer #include "mlir/Dialect/Vector/IR/VectorOps.h"
2399ef9eebSMatthias Springer #include "mlir/Dialect/Vector/Utils/VectorUtils.h"
2496891f04SDiego Caballero #include "mlir/IR/BlockAndValueMapping.h"
25b8737614SUday Bondhugula #include "mlir/Support/LLVM.h"
26d80b04abSSergei Grechanik #include "llvm/ADT/STLExtras.h"
27545fa378SAlex Zinenko #include "llvm/Support/Debug.h"
28b8737614SUday Bondhugula
29b8737614SUday Bondhugula using namespace mlir;
3046781630SDiego Caballero using namespace vector;
31b8737614SUday Bondhugula
32b8737614SUday Bondhugula ///
33b8737614SUday Bondhugula /// Implements a high-level vectorization strategy on a Function.
34b8737614SUday Bondhugula /// The abstraction used is that of super-vectors, which provide a single,
35b8737614SUday Bondhugula /// compact, representation in the vector types, information that is expected
36b8737614SUday Bondhugula /// to reduce the impact of the phase ordering problem
37b8737614SUday Bondhugula ///
38b8737614SUday Bondhugula /// Vector granularity:
39b8737614SUday Bondhugula /// ===================
40b8737614SUday Bondhugula /// This pass is designed to perform vectorization at a super-vector
41b8737614SUday Bondhugula /// granularity. A super-vector is loosely defined as a vector type that is a
42b8737614SUday Bondhugula /// multiple of a "good" vector size so the HW can efficiently implement a set
43b8737614SUday Bondhugula /// of high-level primitives. Multiple is understood along any dimension; e.g.
44b8737614SUday Bondhugula /// both vector<16xf32> and vector<2x8xf32> are valid super-vectors for a
45b8737614SUday Bondhugula /// vector<8xf32> HW vector. Note that a "good vector size so the HW can
46b8737614SUday Bondhugula /// efficiently implement a set of high-level primitives" is not necessarily an
47b8737614SUday Bondhugula /// integer multiple of actual hardware registers. We leave details of this
48b8737614SUday Bondhugula /// distinction unspecified for now.
49b8737614SUday Bondhugula ///
50b8737614SUday Bondhugula /// Some may prefer the terminology a "tile of HW vectors". In this case, one
51b8737614SUday Bondhugula /// should note that super-vectors implement an "always full tile" abstraction.
52b8737614SUday Bondhugula /// They guarantee no partial-tile separation is necessary by relying on a
53b8737614SUday Bondhugula /// high-level copy-reshape abstraction that we call vector.transfer. This
54b8737614SUday Bondhugula /// copy-reshape operations is also responsible for performing layout
55b8737614SUday Bondhugula /// transposition if necessary. In the general case this will require a scoped
56b8737614SUday Bondhugula /// allocation in some notional local memory.
57b8737614SUday Bondhugula ///
58b8737614SUday Bondhugula /// Whatever the mental model one prefers to use for this abstraction, the key
59b8737614SUday Bondhugula /// point is that we burn into a single, compact, representation in the vector
60b8737614SUday Bondhugula /// types, information that is expected to reduce the impact of the phase
61b8737614SUday Bondhugula /// ordering problem. Indeed, a vector type conveys information that:
62b8737614SUday Bondhugula /// 1. the associated loops have dependency semantics that do not prevent
63b8737614SUday Bondhugula /// vectorization;
64b8737614SUday Bondhugula /// 2. the associate loops have been sliced in chunks of static sizes that are
65b8737614SUday Bondhugula /// compatible with vector sizes (i.e. similar to unroll-and-jam);
66b8737614SUday Bondhugula /// 3. the inner loops, in the unroll-and-jam analogy of 2, are captured by
67b8737614SUday Bondhugula /// the
68b8737614SUday Bondhugula /// vector type and no vectorization hampering transformations can be
69b8737614SUday Bondhugula /// applied to them anymore;
70b8737614SUday Bondhugula /// 4. the underlying memrefs are accessed in some notional contiguous way
71b8737614SUday Bondhugula /// that allows loading into vectors with some amount of spatial locality;
72b8737614SUday Bondhugula /// In other words, super-vectorization provides a level of separation of
73b8737614SUday Bondhugula /// concern by way of opacity to subsequent passes. This has the effect of
74b8737614SUday Bondhugula /// encapsulating and propagating vectorization constraints down the list of
75b8737614SUday Bondhugula /// passes until we are ready to lower further.
76b8737614SUday Bondhugula ///
77b8737614SUday Bondhugula /// For a particular target, a notion of minimal n-d vector size will be
78b8737614SUday Bondhugula /// specified and vectorization targets a multiple of those. In the following
79b8737614SUday Bondhugula /// paragraph, let "k ." represent "a multiple of", to be understood as a
80b8737614SUday Bondhugula /// multiple in the same dimension (e.g. vector<16 x k . 128> summarizes
81b8737614SUday Bondhugula /// vector<16 x 128>, vector<16 x 256>, vector<16 x 1024>, etc).
82b8737614SUday Bondhugula ///
83b8737614SUday Bondhugula /// Some non-exhaustive notable super-vector sizes of interest include:
84b8737614SUday Bondhugula /// - CPU: vector<k . HW_vector_size>,
85b8737614SUday Bondhugula /// vector<k' . core_count x k . HW_vector_size>,
86b8737614SUday Bondhugula /// vector<socket_count x k' . core_count x k . HW_vector_size>;
87b8737614SUday Bondhugula /// - GPU: vector<k . warp_size>,
88b8737614SUday Bondhugula /// vector<k . warp_size x float2>,
89b8737614SUday Bondhugula /// vector<k . warp_size x float4>,
90b8737614SUday Bondhugula /// vector<k . warp_size x 4 x 4x 4> (for tensor_core sizes).
91b8737614SUday Bondhugula ///
92b8737614SUday Bondhugula /// Loops and operations are emitted that operate on those super-vector shapes.
93b8737614SUday Bondhugula /// Subsequent lowering passes will materialize to actual HW vector sizes. These
94b8737614SUday Bondhugula /// passes are expected to be (gradually) more target-specific.
95b8737614SUday Bondhugula ///
96b8737614SUday Bondhugula /// At a high level, a vectorized load in a loop will resemble:
97b8737614SUday Bondhugula /// ```mlir
98b8737614SUday Bondhugula /// affine.for %i = ? to ? step ? {
99b8737614SUday Bondhugula /// %v_a = vector.transfer_read A[%i] : memref<?xf32>, vector<128xf32>
100b8737614SUday Bondhugula /// }
101b8737614SUday Bondhugula /// ```
102b8737614SUday Bondhugula /// It is the responsibility of the implementation of vector.transfer_read to
103b8737614SUday Bondhugula /// materialize vector registers from the original scalar memrefs. A later (more
104b8737614SUday Bondhugula /// target-dependent) lowering pass will materialize to actual HW vector sizes.
105b8737614SUday Bondhugula /// This lowering may be occur at different times:
106b8737614SUday Bondhugula /// 1. at the MLIR level into a combination of loops, unrolling, DmaStartOp +
107b8737614SUday Bondhugula /// DmaWaitOp + vectorized operations for data transformations and shuffle;
108b8737614SUday Bondhugula /// thus opening opportunities for unrolling and pipelining. This is an
109b8737614SUday Bondhugula /// instance of library call "whiteboxing"; or
110b8737614SUday Bondhugula /// 2. later in the a target-specific lowering pass or hand-written library
111b8737614SUday Bondhugula /// call; achieving full separation of concerns. This is an instance of
112b8737614SUday Bondhugula /// library call; or
113b8737614SUday Bondhugula /// 3. a mix of both, e.g. based on a model.
114b8737614SUday Bondhugula /// In the future, these operations will expose a contract to constrain the
115b8737614SUday Bondhugula /// search on vectorization patterns and sizes.
116b8737614SUday Bondhugula ///
117b8737614SUday Bondhugula /// Occurrence of super-vectorization in the compiler flow:
118b8737614SUday Bondhugula /// =======================================================
119b8737614SUday Bondhugula /// This is an active area of investigation. We start with 2 remarks to position
120b8737614SUday Bondhugula /// super-vectorization in the context of existing ongoing work: LLVM VPLAN
121b8737614SUday Bondhugula /// and LLVM SLP Vectorizer.
122b8737614SUday Bondhugula ///
123b8737614SUday Bondhugula /// LLVM VPLAN:
124b8737614SUday Bondhugula /// -----------
125b8737614SUday Bondhugula /// The astute reader may have noticed that in the limit, super-vectorization
126b8737614SUday Bondhugula /// can be applied at a similar time and with similar objectives than VPLAN.
127b8737614SUday Bondhugula /// For instance, in the case of a traditional, polyhedral compilation-flow (for
128b8737614SUday Bondhugula /// instance, the PPCG project uses ISL to provide dependence analysis,
129b8737614SUday Bondhugula /// multi-level(scheduling + tiling), lifting footprint to fast memory,
130b8737614SUday Bondhugula /// communication synthesis, mapping, register optimizations) and before
131b8737614SUday Bondhugula /// unrolling. When vectorization is applied at this *late* level in a typical
132b8737614SUday Bondhugula /// polyhedral flow, and is instantiated with actual hardware vector sizes,
133b8737614SUday Bondhugula /// super-vectorization is expected to match (or subsume) the type of patterns
134b8737614SUday Bondhugula /// that LLVM's VPLAN aims at targeting. The main difference here is that MLIR
135b8737614SUday Bondhugula /// is higher level and our implementation should be significantly simpler. Also
136b8737614SUday Bondhugula /// note that in this mode, recursive patterns are probably a bit of an overkill
137b8737614SUday Bondhugula /// although it is reasonable to expect that mixing a bit of outer loop and
138b8737614SUday Bondhugula /// inner loop vectorization + unrolling will provide interesting choices to
139b8737614SUday Bondhugula /// MLIR.
140b8737614SUday Bondhugula ///
141b8737614SUday Bondhugula /// LLVM SLP Vectorizer:
142b8737614SUday Bondhugula /// --------------------
143b8737614SUday Bondhugula /// Super-vectorization however is not meant to be usable in a similar fashion
144b8737614SUday Bondhugula /// to the SLP vectorizer. The main difference lies in the information that
145b8737614SUday Bondhugula /// both vectorizers use: super-vectorization examines contiguity of memory
146b8737614SUday Bondhugula /// references along fastest varying dimensions and loops with recursive nested
147b8737614SUday Bondhugula /// patterns capturing imperfectly-nested loop nests; the SLP vectorizer, on
148b8737614SUday Bondhugula /// the other hand, performs flat pattern matching inside a single unrolled loop
149b8737614SUday Bondhugula /// body and stitches together pieces of load and store operations into full
150b8737614SUday Bondhugula /// 1-D vectors. We envision that the SLP vectorizer is a good way to capture
151b8737614SUday Bondhugula /// innermost loop, control-flow dependent patterns that super-vectorization may
152b8737614SUday Bondhugula /// not be able to capture easily. In other words, super-vectorization does not
153b8737614SUday Bondhugula /// aim at replacing the SLP vectorizer and the two solutions are complementary.
154b8737614SUday Bondhugula ///
155b8737614SUday Bondhugula /// Ongoing investigations:
156b8737614SUday Bondhugula /// -----------------------
157b8737614SUday Bondhugula /// We discuss the following *early* places where super-vectorization is
158b8737614SUday Bondhugula /// applicable and touch on the expected benefits and risks . We list the
159b8737614SUday Bondhugula /// opportunities in the context of the traditional polyhedral compiler flow
160b8737614SUday Bondhugula /// described in PPCG. There are essentially 6 places in the MLIR pass pipeline
161b8737614SUday Bondhugula /// we expect to experiment with super-vectorization:
162b8737614SUday Bondhugula /// 1. Right after language lowering to MLIR: this is the earliest time where
163b8737614SUday Bondhugula /// super-vectorization is expected to be applied. At this level, all the
164b8737614SUday Bondhugula /// language/user/library-level annotations are available and can be fully
165b8737614SUday Bondhugula /// exploited. Examples include loop-type annotations (such as parallel,
166b8737614SUday Bondhugula /// reduction, scan, dependence distance vector, vectorizable) as well as
167b8737614SUday Bondhugula /// memory access annotations (such as non-aliasing writes guaranteed,
168b8737614SUday Bondhugula /// indirect accesses that are permutations by construction) accesses or
169b8737614SUday Bondhugula /// that a particular operation is prescribed atomic by the user. At this
170b8737614SUday Bondhugula /// level, anything that enriches what dependence analysis can do should be
171b8737614SUday Bondhugula /// aggressively exploited. At this level we are close to having explicit
172b8737614SUday Bondhugula /// vector types in the language, except we do not impose that burden on the
173b8737614SUday Bondhugula /// programmer/library: we derive information from scalar code + annotations.
174b8737614SUday Bondhugula /// 2. After dependence analysis and before polyhedral scheduling: the
175b8737614SUday Bondhugula /// information that supports vectorization does not need to be supplied by a
176b8737614SUday Bondhugula /// higher level of abstraction. Traditional dependence analysis is available
177b8737614SUday Bondhugula /// in MLIR and will be used to drive vectorization and cost models.
178b8737614SUday Bondhugula ///
179b8737614SUday Bondhugula /// Let's pause here and remark that applying super-vectorization as described
180b8737614SUday Bondhugula /// in 1. and 2. presents clear opportunities and risks:
181b8737614SUday Bondhugula /// - the opportunity is that vectorization is burned in the type system and
182b8737614SUday Bondhugula /// is protected from the adverse effect of loop scheduling, tiling, loop
183b8737614SUday Bondhugula /// interchange and all passes downstream. Provided that subsequent passes are
184b8737614SUday Bondhugula /// able to operate on vector types; the vector shapes, associated loop
185b8737614SUday Bondhugula /// iterator properties, alignment, and contiguity of fastest varying
186b8737614SUday Bondhugula /// dimensions are preserved until we lower the super-vector types. We expect
187b8737614SUday Bondhugula /// this to significantly rein in on the adverse effects of phase ordering.
188b8737614SUday Bondhugula /// - the risks are that a. all passes after super-vectorization have to work
189b8737614SUday Bondhugula /// on elemental vector types (not that this is always true, wherever
190b8737614SUday Bondhugula /// vectorization is applied) and b. that imposing vectorization constraints
191b8737614SUday Bondhugula /// too early may be overall detrimental to loop fusion, tiling and other
192b8737614SUday Bondhugula /// transformations because the dependence distances are coarsened when
193b8737614SUday Bondhugula /// operating on elemental vector types. For this reason, the pattern
194b8737614SUday Bondhugula /// profitability analysis should include a component that also captures the
195b8737614SUday Bondhugula /// maximal amount of fusion available under a particular pattern. This is
196b8737614SUday Bondhugula /// still at the stage of rough ideas but in this context, search is our
197b8737614SUday Bondhugula /// friend as the Tensor Comprehensions and auto-TVM contributions
198b8737614SUday Bondhugula /// demonstrated previously.
199b8737614SUday Bondhugula /// Bottom-line is we do not yet have good answers for the above but aim at
200b8737614SUday Bondhugula /// making it easy to answer such questions.
201b8737614SUday Bondhugula ///
202b8737614SUday Bondhugula /// Back to our listing, the last places where early super-vectorization makes
203b8737614SUday Bondhugula /// sense are:
204b8737614SUday Bondhugula /// 3. right after polyhedral-style scheduling: PLUTO-style algorithms are known
205b8737614SUday Bondhugula /// to improve locality, parallelism and be configurable (e.g. max-fuse,
206b8737614SUday Bondhugula /// smart-fuse etc). They can also have adverse effects on contiguity
207b8737614SUday Bondhugula /// properties that are required for vectorization but the vector.transfer
208b8737614SUday Bondhugula /// copy-reshape-pad-transpose abstraction is expected to help recapture
209b8737614SUday Bondhugula /// these properties.
210b8737614SUday Bondhugula /// 4. right after polyhedral-style scheduling+tiling;
211b8737614SUday Bondhugula /// 5. right after scheduling+tiling+rescheduling: points 4 and 5 represent
212b8737614SUday Bondhugula /// probably the most promising places because applying tiling achieves a
213b8737614SUday Bondhugula /// separation of concerns that allows rescheduling to worry less about
214b8737614SUday Bondhugula /// locality and more about parallelism and distribution (e.g. min-fuse).
215b8737614SUday Bondhugula ///
216b8737614SUday Bondhugula /// At these levels the risk-reward looks different: on one hand we probably
217b8737614SUday Bondhugula /// lost a good deal of language/user/library-level annotation; on the other
218b8737614SUday Bondhugula /// hand we gained parallelism and locality through scheduling and tiling.
219b8737614SUday Bondhugula /// However we probably want to ensure tiling is compatible with the
220b8737614SUday Bondhugula /// full-tile-only abstraction used in super-vectorization or suffer the
221b8737614SUday Bondhugula /// consequences. It is too early to place bets on what will win but we expect
222b8737614SUday Bondhugula /// super-vectorization to be the right abstraction to allow exploring at all
223b8737614SUday Bondhugula /// these levels. And again, search is our friend.
224b8737614SUday Bondhugula ///
225b8737614SUday Bondhugula /// Lastly, we mention it again here:
226b8737614SUday Bondhugula /// 6. as a MLIR-based alternative to VPLAN.
227b8737614SUday Bondhugula ///
228b8737614SUday Bondhugula /// Lowering, unrolling, pipelining:
229b8737614SUday Bondhugula /// ================================
2309db53a18SRiver Riddle /// TODO: point to the proper places.
231b8737614SUday Bondhugula ///
232b8737614SUday Bondhugula /// Algorithm:
233b8737614SUday Bondhugula /// ==========
234b8737614SUday Bondhugula /// The algorithm proceeds in a few steps:
235b8737614SUday Bondhugula /// 1. defining super-vectorization patterns and matching them on the tree of
236b8737614SUday Bondhugula /// AffineForOp. A super-vectorization pattern is defined as a recursive
237b8737614SUday Bondhugula /// data structures that matches and captures nested, imperfectly-nested
238b8737614SUday Bondhugula /// loops that have a. conformable loop annotations attached (e.g. parallel,
239b8737614SUday Bondhugula /// reduction, vectorizable, ...) as well as b. all contiguous load/store
240b8737614SUday Bondhugula /// operations along a specified minor dimension (not necessarily the
241b8737614SUday Bondhugula /// fastest varying) ;
2429db53a18SRiver Riddle /// 2. analyzing those patterns for profitability (TODO: and
243b8737614SUday Bondhugula /// interference);
24496891f04SDiego Caballero /// 3. then, for each pattern in order:
24596891f04SDiego Caballero /// a. applying iterative rewriting of the loops and all their nested
24696891f04SDiego Caballero /// operations in topological order. Rewriting is implemented by
24796891f04SDiego Caballero /// coarsening the loops and converting operations and operands to their
24896891f04SDiego Caballero /// vector forms. Processing operations in topological order is relatively
24996891f04SDiego Caballero /// simple due to the structured nature of the control-flow
25096891f04SDiego Caballero /// representation. This order ensures that all the operands of a given
25196891f04SDiego Caballero /// operation have been vectorized before the operation itself in a single
25296891f04SDiego Caballero /// traversal, except for operands defined outside of the loop nest. The
25396891f04SDiego Caballero /// algorithm can convert the following operations to their vector form:
25496891f04SDiego Caballero /// * Affine load and store operations are converted to opaque vector
25596891f04SDiego Caballero /// transfer read and write operations.
25696891f04SDiego Caballero /// * Scalar constant operations/operands are converted to vector
25796891f04SDiego Caballero /// constant operations (splat).
2585ce368cfSAmy Zhuang /// * Uniform operands (only induction variables of loops not mapped to
2595ce368cfSAmy Zhuang /// a vector dimension, or operands defined outside of the loop nest
26096891f04SDiego Caballero /// for now) are broadcasted to a vector.
26196891f04SDiego Caballero /// TODO: Support more uniform cases.
2620fd0fb53SDiego Caballero /// * Affine for operations with 'iter_args' are vectorized by
2630fd0fb53SDiego Caballero /// vectorizing their 'iter_args' operands and results.
2640fd0fb53SDiego Caballero /// TODO: Support more complex loops with divergent lbs and/or ubs.
26596891f04SDiego Caballero /// * The remaining operations in the loop nest are vectorized by
26696891f04SDiego Caballero /// widening their scalar types to vector types.
26796891f04SDiego Caballero /// b. if everything under the root AffineForOp in the current pattern
26896891f04SDiego Caballero /// is vectorized properly, we commit that loop to the IR and remove the
26996891f04SDiego Caballero /// scalar loop. Otherwise, we discard the vectorized loop and keep the
27096891f04SDiego Caballero /// original scalar loop.
27196891f04SDiego Caballero /// c. vectorization is applied on the next pattern in the list. Because
272b8737614SUday Bondhugula /// pattern interference avoidance is not yet implemented and that we do
273b8737614SUday Bondhugula /// not support further vectorizing an already vector load we need to
274b8737614SUday Bondhugula /// re-verify that the pattern is still vectorizable. This is expected to
275b8737614SUday Bondhugula /// make cost models more difficult to write and is subject to improvement
276b8737614SUday Bondhugula /// in the future.
277b8737614SUday Bondhugula ///
278b8737614SUday Bondhugula /// Choice of loop transformation to support the algorithm:
279b8737614SUday Bondhugula /// =======================================================
280b8737614SUday Bondhugula /// The choice of loop transformation to apply for coarsening vectorized loops
281b8737614SUday Bondhugula /// is still subject to exploratory tradeoffs. In particular, say we want to
282b8737614SUday Bondhugula /// vectorize by a factor 128, we want to transform the following input:
283b8737614SUday Bondhugula /// ```mlir
284b8737614SUday Bondhugula /// affine.for %i = %M to %N {
285b8737614SUday Bondhugula /// %a = affine.load %A[%i] : memref<?xf32>
286b8737614SUday Bondhugula /// }
287b8737614SUday Bondhugula /// ```
288b8737614SUday Bondhugula ///
289b8737614SUday Bondhugula /// Traditionally, one would vectorize late (after scheduling, tiling,
290b8737614SUday Bondhugula /// memory promotion etc) say after stripmining (and potentially unrolling in
291b8737614SUday Bondhugula /// the case of LLVM's SLP vectorizer):
292b8737614SUday Bondhugula /// ```mlir
293b8737614SUday Bondhugula /// affine.for %i = floor(%M, 128) to ceil(%N, 128) {
294b8737614SUday Bondhugula /// affine.for %ii = max(%M, 128 * %i) to min(%N, 128*%i + 127) {
295b8737614SUday Bondhugula /// %a = affine.load %A[%ii] : memref<?xf32>
296b8737614SUday Bondhugula /// }
297b8737614SUday Bondhugula /// }
298b8737614SUday Bondhugula /// ```
299b8737614SUday Bondhugula ///
300b8737614SUday Bondhugula /// Instead, we seek to vectorize early and freeze vector types before
301b8737614SUday Bondhugula /// scheduling, so we want to generate a pattern that resembles:
302b8737614SUday Bondhugula /// ```mlir
303b8737614SUday Bondhugula /// affine.for %i = ? to ? step ? {
304b8737614SUday Bondhugula /// %v_a = vector.transfer_read %A[%i] : memref<?xf32>, vector<128xf32>
305b8737614SUday Bondhugula /// }
306b8737614SUday Bondhugula /// ```
307b8737614SUday Bondhugula ///
308b8737614SUday Bondhugula /// i. simply dividing the lower / upper bounds by 128 creates issues
309b8737614SUday Bondhugula /// when representing expressions such as ii + 1 because now we only
310b8737614SUday Bondhugula /// have access to original values that have been divided. Additional
311b8737614SUday Bondhugula /// information is needed to specify accesses at below-128 granularity;
312b8737614SUday Bondhugula /// ii. another alternative is to coarsen the loop step but this may have
313b8737614SUday Bondhugula /// consequences on dependence analysis and fusability of loops: fusable
314b8737614SUday Bondhugula /// loops probably need to have the same step (because we don't want to
315b8737614SUday Bondhugula /// stripmine/unroll to enable fusion).
316b8737614SUday Bondhugula /// As a consequence, we choose to represent the coarsening using the loop
317b8737614SUday Bondhugula /// step for now and reevaluate in the future. Note that we can renormalize
318b8737614SUday Bondhugula /// loop steps later if/when we have evidence that they are problematic.
319b8737614SUday Bondhugula ///
320b8737614SUday Bondhugula /// For the simple strawman example above, vectorizing for a 1-D vector
321b8737614SUday Bondhugula /// abstraction of size 128 returns code similar to:
322b8737614SUday Bondhugula /// ```mlir
323b8737614SUday Bondhugula /// affine.for %i = %M to %N step 128 {
324b8737614SUday Bondhugula /// %v_a = vector.transfer_read %A[%i] : memref<?xf32>, vector<128xf32>
325b8737614SUday Bondhugula /// }
326b8737614SUday Bondhugula /// ```
327b8737614SUday Bondhugula ///
328b8737614SUday Bondhugula /// Unsupported cases, extensions, and work in progress (help welcome :-) ):
329b8737614SUday Bondhugula /// ========================================================================
330b8737614SUday Bondhugula /// 1. lowering to concrete vector types for various HW;
331d80b04abSSergei Grechanik /// 2. reduction support for n-D vectorization and non-unit steps;
332b8737614SUday Bondhugula /// 3. non-effecting padding during vector.transfer_read and filter during
333b8737614SUday Bondhugula /// vector.transfer_write;
334b8737614SUday Bondhugula /// 4. misalignment support vector.transfer_read / vector.transfer_write
335b8737614SUday Bondhugula /// (hopefully without read-modify-writes);
336b8737614SUday Bondhugula /// 5. control-flow support;
337b8737614SUday Bondhugula /// 6. cost-models, heuristics and search;
338b8737614SUday Bondhugula /// 7. Op implementation, extensions and implication on memref views;
339b8737614SUday Bondhugula /// 8. many TODOs left around.
340b8737614SUday Bondhugula ///
341b8737614SUday Bondhugula /// Examples:
342b8737614SUday Bondhugula /// =========
343b8737614SUday Bondhugula /// Consider the following Function:
344b8737614SUday Bondhugula /// ```mlir
345b8737614SUday Bondhugula /// func @vector_add_2d(%M : index, %N : index) -> f32 {
346b8737614SUday Bondhugula /// %A = alloc (%M, %N) : memref<?x?xf32, 0>
347b8737614SUday Bondhugula /// %B = alloc (%M, %N) : memref<?x?xf32, 0>
348b8737614SUday Bondhugula /// %C = alloc (%M, %N) : memref<?x?xf32, 0>
349a54f4eaeSMogball /// %f1 = arith.constant 1.0 : f32
350a54f4eaeSMogball /// %f2 = arith.constant 2.0 : f32
351b8737614SUday Bondhugula /// affine.for %i0 = 0 to %M {
352b8737614SUday Bondhugula /// affine.for %i1 = 0 to %N {
353b8737614SUday Bondhugula /// // non-scoped %f1
354b8737614SUday Bondhugula /// affine.store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
355b8737614SUday Bondhugula /// }
356b8737614SUday Bondhugula /// }
357b8737614SUday Bondhugula /// affine.for %i2 = 0 to %M {
358b8737614SUday Bondhugula /// affine.for %i3 = 0 to %N {
359b8737614SUday Bondhugula /// // non-scoped %f2
360b8737614SUday Bondhugula /// affine.store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
361b8737614SUday Bondhugula /// }
362b8737614SUday Bondhugula /// }
363b8737614SUday Bondhugula /// affine.for %i4 = 0 to %M {
364b8737614SUday Bondhugula /// affine.for %i5 = 0 to %N {
365b8737614SUday Bondhugula /// %a5 = affine.load %A[%i4, %i5] : memref<?x?xf32, 0>
366b8737614SUday Bondhugula /// %b5 = affine.load %B[%i4, %i5] : memref<?x?xf32, 0>
367a54f4eaeSMogball /// %s5 = arith.addf %a5, %b5 : f32
368b8737614SUday Bondhugula /// // non-scoped %f1
369a54f4eaeSMogball /// %s6 = arith.addf %s5, %f1 : f32
370b8737614SUday Bondhugula /// // non-scoped %f2
371a54f4eaeSMogball /// %s7 = arith.addf %s5, %f2 : f32
372b8737614SUday Bondhugula /// // diamond dependency.
373a54f4eaeSMogball /// %s8 = arith.addf %s7, %s6 : f32
374b8737614SUday Bondhugula /// affine.store %s8, %C[%i4, %i5] : memref<?x?xf32, 0>
375b8737614SUday Bondhugula /// }
376b8737614SUday Bondhugula /// }
377a54f4eaeSMogball /// %c7 = arith.constant 7 : index
378a54f4eaeSMogball /// %c42 = arith.constant 42 : index
379b8737614SUday Bondhugula /// %res = load %C[%c7, %c42] : memref<?x?xf32, 0>
380b8737614SUday Bondhugula /// return %res : f32
381b8737614SUday Bondhugula /// }
382b8737614SUday Bondhugula /// ```
383b8737614SUday Bondhugula ///
38499d95025SCullen Rhodes /// The -affine-super-vectorize pass with the following arguments:
385b8737614SUday Bondhugula /// ```
38699d95025SCullen Rhodes /// -affine-super-vectorize="virtual-vector-size=256 test-fastest-varying=0"
387b8737614SUday Bondhugula /// ```
388b8737614SUday Bondhugula ///
389b8737614SUday Bondhugula /// produces this standard innermost-loop vectorized code:
390b8737614SUday Bondhugula /// ```mlir
391b8737614SUday Bondhugula /// func @vector_add_2d(%arg0 : index, %arg1 : index) -> f32 {
3924b01968bSJulian Gross /// %0 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
3934b01968bSJulian Gross /// %1 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
3944b01968bSJulian Gross /// %2 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
395a54f4eaeSMogball /// %cst = arith.constant 1.0 : f32
396a54f4eaeSMogball /// %cst_0 = arith.constant 2.0 : f32
397b8737614SUday Bondhugula /// affine.for %i0 = 0 to %arg0 {
398b8737614SUday Bondhugula /// affine.for %i1 = 0 to %arg1 step 256 {
399a54f4eaeSMogball /// %cst_1 = arith.constant dense<vector<256xf32>, 1.0> :
400b8737614SUday Bondhugula /// vector<256xf32>
401b8737614SUday Bondhugula /// vector.transfer_write %cst_1, %0[%i0, %i1] :
402b8737614SUday Bondhugula /// vector<256xf32>, memref<?x?xf32>
403b8737614SUday Bondhugula /// }
404b8737614SUday Bondhugula /// }
405b8737614SUday Bondhugula /// affine.for %i2 = 0 to %arg0 {
406b8737614SUday Bondhugula /// affine.for %i3 = 0 to %arg1 step 256 {
407a54f4eaeSMogball /// %cst_2 = arith.constant dense<vector<256xf32>, 2.0> :
408b8737614SUday Bondhugula /// vector<256xf32>
409b8737614SUday Bondhugula /// vector.transfer_write %cst_2, %1[%i2, %i3] :
410b8737614SUday Bondhugula /// vector<256xf32>, memref<?x?xf32>
411b8737614SUday Bondhugula /// }
412b8737614SUday Bondhugula /// }
413b8737614SUday Bondhugula /// affine.for %i4 = 0 to %arg0 {
414b8737614SUday Bondhugula /// affine.for %i5 = 0 to %arg1 step 256 {
415b8737614SUday Bondhugula /// %3 = vector.transfer_read %0[%i4, %i5] :
416b8737614SUday Bondhugula /// memref<?x?xf32>, vector<256xf32>
417b8737614SUday Bondhugula /// %4 = vector.transfer_read %1[%i4, %i5] :
418b8737614SUday Bondhugula /// memref<?x?xf32>, vector<256xf32>
419a54f4eaeSMogball /// %5 = arith.addf %3, %4 : vector<256xf32>
420a54f4eaeSMogball /// %cst_3 = arith.constant dense<vector<256xf32>, 1.0> :
421b8737614SUday Bondhugula /// vector<256xf32>
422a54f4eaeSMogball /// %6 = arith.addf %5, %cst_3 : vector<256xf32>
423a54f4eaeSMogball /// %cst_4 = arith.constant dense<vector<256xf32>, 2.0> :
424b8737614SUday Bondhugula /// vector<256xf32>
425a54f4eaeSMogball /// %7 = arith.addf %5, %cst_4 : vector<256xf32>
426a54f4eaeSMogball /// %8 = arith.addf %7, %6 : vector<256xf32>
427b8737614SUday Bondhugula /// vector.transfer_write %8, %2[%i4, %i5] :
428b8737614SUday Bondhugula /// vector<256xf32>, memref<?x?xf32>
429b8737614SUday Bondhugula /// }
430b8737614SUday Bondhugula /// }
431a54f4eaeSMogball /// %c7 = arith.constant 7 : index
432a54f4eaeSMogball /// %c42 = arith.constant 42 : index
433b8737614SUday Bondhugula /// %9 = load %2[%c7, %c42] : memref<?x?xf32>
434b8737614SUday Bondhugula /// return %9 : f32
435b8737614SUday Bondhugula /// }
436b8737614SUday Bondhugula /// ```
437b8737614SUday Bondhugula ///
43899d95025SCullen Rhodes /// The -affine-super-vectorize pass with the following arguments:
439b8737614SUday Bondhugula /// ```
44099d95025SCullen Rhodes /// -affine-super-vectorize="virtual-vector-size=32,256 \
44199d95025SCullen Rhodes /// test-fastest-varying=1,0"
442b8737614SUday Bondhugula /// ```
443b8737614SUday Bondhugula ///
444b8737614SUday Bondhugula /// produces this more interesting mixed outer-innermost-loop vectorized code:
445b8737614SUday Bondhugula /// ```mlir
446b8737614SUday Bondhugula /// func @vector_add_2d(%arg0 : index, %arg1 : index) -> f32 {
4474b01968bSJulian Gross /// %0 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
4484b01968bSJulian Gross /// %1 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
4494b01968bSJulian Gross /// %2 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
450a54f4eaeSMogball /// %cst = arith.constant 1.0 : f32
451a54f4eaeSMogball /// %cst_0 = arith.constant 2.0 : f32
452b8737614SUday Bondhugula /// affine.for %i0 = 0 to %arg0 step 32 {
453b8737614SUday Bondhugula /// affine.for %i1 = 0 to %arg1 step 256 {
454a54f4eaeSMogball /// %cst_1 = arith.constant dense<vector<32x256xf32>, 1.0> :
455b8737614SUday Bondhugula /// vector<32x256xf32>
456b8737614SUday Bondhugula /// vector.transfer_write %cst_1, %0[%i0, %i1] :
457b8737614SUday Bondhugula /// vector<32x256xf32>, memref<?x?xf32>
458b8737614SUday Bondhugula /// }
459b8737614SUday Bondhugula /// }
460b8737614SUday Bondhugula /// affine.for %i2 = 0 to %arg0 step 32 {
461b8737614SUday Bondhugula /// affine.for %i3 = 0 to %arg1 step 256 {
462a54f4eaeSMogball /// %cst_2 = arith.constant dense<vector<32x256xf32>, 2.0> :
463b8737614SUday Bondhugula /// vector<32x256xf32>
464b8737614SUday Bondhugula /// vector.transfer_write %cst_2, %1[%i2, %i3] :
465b8737614SUday Bondhugula /// vector<32x256xf32>, memref<?x?xf32>
466b8737614SUday Bondhugula /// }
467b8737614SUday Bondhugula /// }
468b8737614SUday Bondhugula /// affine.for %i4 = 0 to %arg0 step 32 {
469b8737614SUday Bondhugula /// affine.for %i5 = 0 to %arg1 step 256 {
470b8737614SUday Bondhugula /// %3 = vector.transfer_read %0[%i4, %i5] :
471b8737614SUday Bondhugula /// memref<?x?xf32> vector<32x256xf32>
472b8737614SUday Bondhugula /// %4 = vector.transfer_read %1[%i4, %i5] :
473b8737614SUday Bondhugula /// memref<?x?xf32>, vector<32x256xf32>
474a54f4eaeSMogball /// %5 = arith.addf %3, %4 : vector<32x256xf32>
475a54f4eaeSMogball /// %cst_3 = arith.constant dense<vector<32x256xf32>, 1.0> :
476b8737614SUday Bondhugula /// vector<32x256xf32>
477a54f4eaeSMogball /// %6 = arith.addf %5, %cst_3 : vector<32x256xf32>
478a54f4eaeSMogball /// %cst_4 = arith.constant dense<vector<32x256xf32>, 2.0> :
479b8737614SUday Bondhugula /// vector<32x256xf32>
480a54f4eaeSMogball /// %7 = arith.addf %5, %cst_4 : vector<32x256xf32>
481a54f4eaeSMogball /// %8 = arith.addf %7, %6 : vector<32x256xf32>
482b8737614SUday Bondhugula /// vector.transfer_write %8, %2[%i4, %i5] :
483b8737614SUday Bondhugula /// vector<32x256xf32>, memref<?x?xf32>
484b8737614SUday Bondhugula /// }
485b8737614SUday Bondhugula /// }
486a54f4eaeSMogball /// %c7 = arith.constant 7 : index
487a54f4eaeSMogball /// %c42 = arith.constant 42 : index
488b8737614SUday Bondhugula /// %9 = load %2[%c7, %c42] : memref<?x?xf32>
489b8737614SUday Bondhugula /// return %9 : f32
490b8737614SUday Bondhugula /// }
491b8737614SUday Bondhugula /// ```
492b8737614SUday Bondhugula ///
493b8737614SUday Bondhugula /// Of course, much more intricate n-D imperfectly-nested patterns can be
494b8737614SUday Bondhugula /// vectorized too and specified in a fully declarative fashion.
495d80b04abSSergei Grechanik ///
496d80b04abSSergei Grechanik /// Reduction:
497d80b04abSSergei Grechanik /// ==========
498d80b04abSSergei Grechanik /// Vectorizing reduction loops along the reduction dimension is supported if:
499d80b04abSSergei Grechanik /// - the reduction kind is supported,
500d80b04abSSergei Grechanik /// - the vectorization is 1-D, and
501d80b04abSSergei Grechanik /// - the step size of the loop equals to one.
502d80b04abSSergei Grechanik ///
503d80b04abSSergei Grechanik /// Comparing to the non-vector-dimension case, two additional things are done
504d80b04abSSergei Grechanik /// during vectorization of such loops:
505d80b04abSSergei Grechanik /// - The resulting vector returned from the loop is reduced to a scalar using
506d80b04abSSergei Grechanik /// `vector.reduce`.
507d80b04abSSergei Grechanik /// - In some cases a mask is applied to the vector yielded at the end of the
508d80b04abSSergei Grechanik /// loop to prevent garbage values from being written to the accumulator.
509d80b04abSSergei Grechanik ///
510d80b04abSSergei Grechanik /// Reduction vectorization is switched off by default, it can be enabled by
511d80b04abSSergei Grechanik /// passing a map from loops to reductions to utility functions, or by passing
512d80b04abSSergei Grechanik /// `vectorize-reductions=true` to the vectorization pass.
513d80b04abSSergei Grechanik ///
514d80b04abSSergei Grechanik /// Consider the following example:
515d80b04abSSergei Grechanik /// ```mlir
516d80b04abSSergei Grechanik /// func @vecred(%in: memref<512xf32>) -> f32 {
517a54f4eaeSMogball /// %cst = arith.constant 0.000000e+00 : f32
518d80b04abSSergei Grechanik /// %sum = affine.for %i = 0 to 500 iter_args(%part_sum = %cst) -> (f32) {
519d80b04abSSergei Grechanik /// %ld = affine.load %in[%i] : memref<512xf32>
520d80b04abSSergei Grechanik /// %cos = math.cos %ld : f32
521a54f4eaeSMogball /// %add = arith.addf %part_sum, %cos : f32
522d80b04abSSergei Grechanik /// affine.yield %add : f32
523d80b04abSSergei Grechanik /// }
524d80b04abSSergei Grechanik /// return %sum : f32
525d80b04abSSergei Grechanik /// }
526d80b04abSSergei Grechanik /// ```
527d80b04abSSergei Grechanik ///
52899d95025SCullen Rhodes /// The -affine-super-vectorize pass with the following arguments:
529d80b04abSSergei Grechanik /// ```
53099d95025SCullen Rhodes /// -affine-super-vectorize="virtual-vector-size=128 test-fastest-varying=0 \
531d80b04abSSergei Grechanik /// vectorize-reductions=true"
532d80b04abSSergei Grechanik /// ```
533d80b04abSSergei Grechanik /// produces the following output:
534d80b04abSSergei Grechanik /// ```mlir
535d80b04abSSergei Grechanik /// #map = affine_map<(d0) -> (-d0 + 500)>
536d80b04abSSergei Grechanik /// func @vecred(%arg0: memref<512xf32>) -> f32 {
537a54f4eaeSMogball /// %cst = arith.constant 0.000000e+00 : f32
538a54f4eaeSMogball /// %cst_0 = arith.constant dense<0.000000e+00> : vector<128xf32>
539d80b04abSSergei Grechanik /// %0 = affine.for %arg1 = 0 to 500 step 128 iter_args(%arg2 = %cst_0)
540d80b04abSSergei Grechanik /// -> (vector<128xf32>) {
541d80b04abSSergei Grechanik /// // %2 is the number of iterations left in the original loop.
542d80b04abSSergei Grechanik /// %2 = affine.apply #map(%arg1)
543d80b04abSSergei Grechanik /// %3 = vector.create_mask %2 : vector<128xi1>
544a54f4eaeSMogball /// %cst_1 = arith.constant 0.000000e+00 : f32
545d80b04abSSergei Grechanik /// %4 = vector.transfer_read %arg0[%arg1], %cst_1 :
546d80b04abSSergei Grechanik /// memref<512xf32>, vector<128xf32>
547d80b04abSSergei Grechanik /// %5 = math.cos %4 : vector<128xf32>
548a54f4eaeSMogball /// %6 = arith.addf %arg2, %5 : vector<128xf32>
549d80b04abSSergei Grechanik /// // We filter out the effect of last 12 elements using the mask.
550d80b04abSSergei Grechanik /// %7 = select %3, %6, %arg2 : vector<128xi1>, vector<128xf32>
551d80b04abSSergei Grechanik /// affine.yield %7 : vector<128xf32>
552d80b04abSSergei Grechanik /// }
553fe0bf7d4SMatthias Springer /// %1 = vector.reduction <add>, %0 : vector<128xf32> into f32
554d80b04abSSergei Grechanik /// return %1 : f32
555d80b04abSSergei Grechanik /// }
556d80b04abSSergei Grechanik /// ```
557d80b04abSSergei Grechanik ///
558d80b04abSSergei Grechanik /// Note that because of loop misalignment we needed to apply a mask to prevent
559d80b04abSSergei Grechanik /// last 12 elements from affecting the final result. The mask is full of ones
560d80b04abSSergei Grechanik /// in every iteration except for the last one, in which it has the form
561d80b04abSSergei Grechanik /// `11...100...0` with 116 ones and 12 zeros.
562b8737614SUday Bondhugula
563b8737614SUday Bondhugula #define DEBUG_TYPE "early-vect"
564b8737614SUday Bondhugula
565b8737614SUday Bondhugula using llvm::dbgs;
566b8737614SUday Bondhugula
567b8737614SUday Bondhugula /// Forward declaration.
568b8737614SUday Bondhugula static FilterFunctionType
569b8737614SUday Bondhugula isVectorizableLoopPtrFactory(const DenseSet<Operation *> ¶llelLoops,
570b8737614SUday Bondhugula int fastestVaryingMemRefDimension);
571b8737614SUday Bondhugula
572b8737614SUday Bondhugula /// Creates a vectorization pattern from the command line arguments.
573b8737614SUday Bondhugula /// Up to 3-D patterns are supported.
574b8737614SUday Bondhugula /// If the command line argument requests a pattern of higher order, returns an
575b8737614SUday Bondhugula /// empty pattern list which will conservatively result in no vectorization.
576ed193bceSDiego Caballero static Optional<NestedPattern>
makePattern(const DenseSet<Operation * > & parallelLoops,int vectorRank,ArrayRef<int64_t> fastestVaryingPattern)577ed193bceSDiego Caballero makePattern(const DenseSet<Operation *> ¶llelLoops, int vectorRank,
578b8737614SUday Bondhugula ArrayRef<int64_t> fastestVaryingPattern) {
579b8737614SUday Bondhugula using matcher::For;
580b8737614SUday Bondhugula int64_t d0 = fastestVaryingPattern.empty() ? -1 : fastestVaryingPattern[0];
581b8737614SUday Bondhugula int64_t d1 = fastestVaryingPattern.size() < 2 ? -1 : fastestVaryingPattern[1];
582b8737614SUday Bondhugula int64_t d2 = fastestVaryingPattern.size() < 3 ? -1 : fastestVaryingPattern[2];
583b8737614SUday Bondhugula switch (vectorRank) {
584b8737614SUday Bondhugula case 1:
585ed193bceSDiego Caballero return For(isVectorizableLoopPtrFactory(parallelLoops, d0));
586b8737614SUday Bondhugula case 2:
587ed193bceSDiego Caballero return For(isVectorizableLoopPtrFactory(parallelLoops, d0),
588ed193bceSDiego Caballero For(isVectorizableLoopPtrFactory(parallelLoops, d1)));
589b8737614SUday Bondhugula case 3:
590ed193bceSDiego Caballero return For(isVectorizableLoopPtrFactory(parallelLoops, d0),
591b8737614SUday Bondhugula For(isVectorizableLoopPtrFactory(parallelLoops, d1),
592ed193bceSDiego Caballero For(isVectorizableLoopPtrFactory(parallelLoops, d2))));
593b8737614SUday Bondhugula default: {
594ed193bceSDiego Caballero return llvm::None;
595b8737614SUday Bondhugula }
596b8737614SUday Bondhugula }
597b8737614SUday Bondhugula }
598b8737614SUday Bondhugula
vectorTransferPattern()599b8737614SUday Bondhugula static NestedPattern &vectorTransferPattern() {
600b8737614SUday Bondhugula static auto pattern = matcher::Op([](Operation &op) {
601d891d738SRahul Joshi return isa<vector::TransferReadOp, vector::TransferWriteOp>(op);
602b8737614SUday Bondhugula });
603b8737614SUday Bondhugula return pattern;
604b8737614SUday Bondhugula }
605b8737614SUday Bondhugula
606b8737614SUday Bondhugula namespace {
607b8737614SUday Bondhugula
608b8737614SUday Bondhugula /// Base state for the vectorize pass.
609b8737614SUday Bondhugula /// Command line arguments are preempted by non-empty pass arguments.
6101834ad4aSRiver Riddle struct Vectorize : public AffineVectorizeBase<Vectorize> {
611b8737614SUday Bondhugula Vectorize() = default;
612b8737614SUday Bondhugula Vectorize(ArrayRef<int64_t> virtualVectorSize);
61341574554SRiver Riddle void runOnOperation() override;
614b8737614SUday Bondhugula };
615b8737614SUday Bondhugula
616be0a7e9fSMehdi Amini } // namespace
617b8737614SUday Bondhugula
Vectorize(ArrayRef<int64_t> virtualVectorSize)618b8737614SUday Bondhugula Vectorize::Vectorize(ArrayRef<int64_t> virtualVectorSize) {
619400ad6f9SRiver Riddle vectorSizes = virtualVectorSize;
620b8737614SUday Bondhugula }
621b8737614SUday Bondhugula
vectorizeLoopIfProfitable(Operation * loop,unsigned depthInPattern,unsigned patternDepth,VectorizationStrategy * strategy)622b8737614SUday Bondhugula static void vectorizeLoopIfProfitable(Operation *loop, unsigned depthInPattern,
623b8737614SUday Bondhugula unsigned patternDepth,
624b8737614SUday Bondhugula VectorizationStrategy *strategy) {
625b8737614SUday Bondhugula assert(patternDepth > depthInPattern &&
626b8737614SUday Bondhugula "patternDepth is greater than depthInPattern");
627b8737614SUday Bondhugula if (patternDepth - depthInPattern > strategy->vectorSizes.size()) {
628b8737614SUday Bondhugula // Don't vectorize this loop
629b8737614SUday Bondhugula return;
630b8737614SUday Bondhugula }
631b8737614SUday Bondhugula strategy->loopToVectorDim[loop] =
632b8737614SUday Bondhugula strategy->vectorSizes.size() - (patternDepth - depthInPattern);
633b8737614SUday Bondhugula }
634b8737614SUday Bondhugula
635b8737614SUday Bondhugula /// Implements a simple strawman strategy for vectorization.
636b8737614SUday Bondhugula /// Given a matched pattern `matches` of depth `patternDepth`, this strategy
637b8737614SUday Bondhugula /// greedily assigns the fastest varying dimension ** of the vector ** to the
638b8737614SUday Bondhugula /// innermost loop in the pattern.
639b8737614SUday Bondhugula /// When coupled with a pattern that looks for the fastest varying dimension in
640b8737614SUday Bondhugula /// load/store MemRefs, this creates a generic vectorization strategy that works
641b8737614SUday Bondhugula /// for any loop in a hierarchy (outermost, innermost or intermediate).
642b8737614SUday Bondhugula ///
6439db53a18SRiver Riddle /// TODO: In the future we should additionally increase the power of the
644b8737614SUday Bondhugula /// profitability analysis along 3 directions:
645b8737614SUday Bondhugula /// 1. account for loop extents (both static and parametric + annotations);
646b8737614SUday Bondhugula /// 2. account for data layout permutations;
647b8737614SUday Bondhugula /// 3. account for impact of vectorization on maximal loop fusion.
648b8737614SUday Bondhugula /// Then we can quantify the above to build a cost model and search over
649b8737614SUday Bondhugula /// strategies.
analyzeProfitability(ArrayRef<NestedMatch> matches,unsigned depthInPattern,unsigned patternDepth,VectorizationStrategy * strategy)650b8737614SUday Bondhugula static LogicalResult analyzeProfitability(ArrayRef<NestedMatch> matches,
651b8737614SUday Bondhugula unsigned depthInPattern,
652b8737614SUday Bondhugula unsigned patternDepth,
653b8737614SUday Bondhugula VectorizationStrategy *strategy) {
654b8737614SUday Bondhugula for (auto m : matches) {
655b8737614SUday Bondhugula if (failed(analyzeProfitability(m.getMatchedChildren(), depthInPattern + 1,
656b8737614SUday Bondhugula patternDepth, strategy))) {
657b8737614SUday Bondhugula return failure();
658b8737614SUday Bondhugula }
659b8737614SUday Bondhugula vectorizeLoopIfProfitable(m.getMatchedOperation(), depthInPattern,
660b8737614SUday Bondhugula patternDepth, strategy);
661b8737614SUday Bondhugula }
662b8737614SUday Bondhugula return success();
663b8737614SUday Bondhugula }
664b8737614SUday Bondhugula
6659db53a18SRiver Riddle ///// end TODO: Hoist to a VectorizationStrategy.cpp when appropriate /////
666b8737614SUday Bondhugula
667b8737614SUday Bondhugula namespace {
668b8737614SUday Bondhugula
669b8737614SUday Bondhugula struct VectorizationState {
670b8737614SUday Bondhugula
VectorizationState__anon00f1a09e0311::VectorizationState67196891f04SDiego Caballero VectorizationState(MLIRContext *context) : builder(context) {}
67296891f04SDiego Caballero
67396891f04SDiego Caballero /// Registers the vector replacement of a scalar operation and its result
67496891f04SDiego Caballero /// values. Both operations must have the same number of results.
67596891f04SDiego Caballero ///
67696891f04SDiego Caballero /// This utility is used to register the replacement for the vast majority of
67796891f04SDiego Caballero /// the vectorized operations.
67896891f04SDiego Caballero ///
67996891f04SDiego Caballero /// Example:
680a54f4eaeSMogball /// * 'replaced': %0 = arith.addf %1, %2 : f32
681a54f4eaeSMogball /// * 'replacement': %0 = arith.addf %1, %2 : vector<128xf32>
68296891f04SDiego Caballero void registerOpVectorReplacement(Operation *replaced, Operation *replacement);
68396891f04SDiego Caballero
68496891f04SDiego Caballero /// Registers the vector replacement of a scalar value. The replacement
68596891f04SDiego Caballero /// operation should have a single result, which replaces the scalar value.
68696891f04SDiego Caballero ///
68796891f04SDiego Caballero /// This utility is used to register the vector replacement of block arguments
68896891f04SDiego Caballero /// and operation results which are not directly vectorized (i.e., their
68996891f04SDiego Caballero /// scalar version still exists after vectorization), like uniforms.
69096891f04SDiego Caballero ///
69196891f04SDiego Caballero /// Example:
69296891f04SDiego Caballero /// * 'replaced': block argument or operation outside of the vectorized
69396891f04SDiego Caballero /// loop.
69496891f04SDiego Caballero /// * 'replacement': %0 = vector.broadcast %1 : f32 to vector<128xf32>
69596891f04SDiego Caballero void registerValueVectorReplacement(Value replaced, Operation *replacement);
69696891f04SDiego Caballero
6970fd0fb53SDiego Caballero /// Registers the vector replacement of a block argument (e.g., iter_args).
6980fd0fb53SDiego Caballero ///
6990fd0fb53SDiego Caballero /// Example:
7000fd0fb53SDiego Caballero /// * 'replaced': 'iter_arg' block argument.
7010fd0fb53SDiego Caballero /// * 'replacement': vectorized 'iter_arg' block argument.
7020fd0fb53SDiego Caballero void registerBlockArgVectorReplacement(BlockArgument replaced,
7030fd0fb53SDiego Caballero BlockArgument replacement);
7040fd0fb53SDiego Caballero
70596891f04SDiego Caballero /// Registers the scalar replacement of a scalar value. 'replacement' must be
70696891f04SDiego Caballero /// scalar. Both values must be block arguments. Operation results should be
70796891f04SDiego Caballero /// replaced using the 'registerOp*' utilitites.
70896891f04SDiego Caballero ///
70996891f04SDiego Caballero /// This utility is used to register the replacement of block arguments
71096891f04SDiego Caballero /// that are within the loop to be vectorized and will continue being scalar
71196891f04SDiego Caballero /// within the vector loop.
71296891f04SDiego Caballero ///
71396891f04SDiego Caballero /// Example:
71496891f04SDiego Caballero /// * 'replaced': induction variable of a loop to be vectorized.
71596891f04SDiego Caballero /// * 'replacement': new induction variable in the new vector loop.
71696891f04SDiego Caballero void registerValueScalarReplacement(BlockArgument replaced,
71796891f04SDiego Caballero BlockArgument replacement);
71896891f04SDiego Caballero
719d80b04abSSergei Grechanik /// Registers the scalar replacement of a scalar result returned from a
720d80b04abSSergei Grechanik /// reduction loop. 'replacement' must be scalar.
721d80b04abSSergei Grechanik ///
722d80b04abSSergei Grechanik /// This utility is used to register the replacement for scalar results of
723d80b04abSSergei Grechanik /// vectorized reduction loops with iter_args.
724d80b04abSSergei Grechanik ///
725d80b04abSSergei Grechanik /// Example 2:
726d80b04abSSergei Grechanik /// * 'replaced': %0 = affine.for %i = 0 to 512 iter_args(%x = ...) -> (f32)
727fe0bf7d4SMatthias Springer /// * 'replacement': %1 = vector.reduction <add>, %0 : vector<4xf32> into
728fe0bf7d4SMatthias Springer /// f32
729d80b04abSSergei Grechanik void registerLoopResultScalarReplacement(Value replaced, Value replacement);
730d80b04abSSergei Grechanik
73196891f04SDiego Caballero /// Returns in 'replacedVals' the scalar replacement for values in
73296891f04SDiego Caballero /// 'inputVals'.
73396891f04SDiego Caballero void getScalarValueReplacementsFor(ValueRange inputVals,
73496891f04SDiego Caballero SmallVectorImpl<Value> &replacedVals);
73596891f04SDiego Caballero
73696891f04SDiego Caballero /// Erases the scalar loop nest after its successful vectorization.
73796891f04SDiego Caballero void finishVectorizationPattern(AffineForOp rootLoop);
73896891f04SDiego Caballero
73996891f04SDiego Caballero // Used to build and insert all the new operations created. The insertion
74096891f04SDiego Caballero // point is preserved and updated along the vectorization process.
74196891f04SDiego Caballero OpBuilder builder;
74296891f04SDiego Caballero
74396891f04SDiego Caballero // Maps input scalar operations to their vector counterparts.
74496891f04SDiego Caballero DenseMap<Operation *, Operation *> opVectorReplacement;
74596891f04SDiego Caballero // Maps input scalar values to their vector counterparts.
74696891f04SDiego Caballero BlockAndValueMapping valueVectorReplacement;
74796891f04SDiego Caballero // Maps input scalar values to their new scalar counterparts in the vector
74896891f04SDiego Caballero // loop nest.
74996891f04SDiego Caballero BlockAndValueMapping valueScalarReplacement;
750d80b04abSSergei Grechanik // Maps results of reduction loops to their new scalar counterparts.
751d80b04abSSergei Grechanik DenseMap<Value, Value> loopResultScalarReplacement;
75296891f04SDiego Caballero
75396891f04SDiego Caballero // Maps the newly created vector loops to their vector dimension.
75496891f04SDiego Caballero DenseMap<Operation *, unsigned> vecLoopToVecDim;
75596891f04SDiego Caballero
756d80b04abSSergei Grechanik // Maps the new vectorized loops to the corresponding vector masks if it is
757d80b04abSSergei Grechanik // required.
758d80b04abSSergei Grechanik DenseMap<Operation *, Value> vecLoopToMask;
759d80b04abSSergei Grechanik
760b8737614SUday Bondhugula // The strategy drives which loop to vectorize by which amount.
761a9f13f80SMehdi Amini const VectorizationStrategy *strategy = nullptr;
762b8737614SUday Bondhugula
763b8737614SUday Bondhugula private:
76496891f04SDiego Caballero /// Internal implementation to map input scalar values to new vector or scalar
76596891f04SDiego Caballero /// values.
76696891f04SDiego Caballero void registerValueVectorReplacementImpl(Value replaced, Value replacement);
76796891f04SDiego Caballero void registerValueScalarReplacementImpl(Value replaced, Value replacement);
768b8737614SUday Bondhugula };
769b8737614SUday Bondhugula
770be0a7e9fSMehdi Amini } // namespace
771b8737614SUday Bondhugula
77296891f04SDiego Caballero /// Registers the vector replacement of a scalar operation and its result
77396891f04SDiego Caballero /// values. Both operations must have the same number of results.
77496891f04SDiego Caballero ///
77596891f04SDiego Caballero /// This utility is used to register the replacement for the vast majority of
77696891f04SDiego Caballero /// the vectorized operations.
77796891f04SDiego Caballero ///
77896891f04SDiego Caballero /// Example:
779a54f4eaeSMogball /// * 'replaced': %0 = arith.addf %1, %2 : f32
780a54f4eaeSMogball /// * 'replacement': %0 = arith.addf %1, %2 : vector<128xf32>
registerOpVectorReplacement(Operation * replaced,Operation * replacement)78196891f04SDiego Caballero void VectorizationState::registerOpVectorReplacement(Operation *replaced,
78296891f04SDiego Caballero Operation *replacement) {
78396891f04SDiego Caballero LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ commit vectorized op:\n");
78496891f04SDiego Caballero LLVM_DEBUG(dbgs() << *replaced << "\n");
78596891f04SDiego Caballero LLVM_DEBUG(dbgs() << "into\n");
78696891f04SDiego Caballero LLVM_DEBUG(dbgs() << *replacement << "\n");
78796891f04SDiego Caballero
78896891f04SDiego Caballero assert(replaced->getNumResults() == replacement->getNumResults() &&
78996891f04SDiego Caballero "Unexpected replaced and replacement results");
79096891f04SDiego Caballero assert(opVectorReplacement.count(replaced) == 0 && "already registered");
79196891f04SDiego Caballero opVectorReplacement[replaced] = replacement;
79296891f04SDiego Caballero
7930fd0fb53SDiego Caballero for (auto resultTuple :
7940fd0fb53SDiego Caballero llvm::zip(replaced->getResults(), replacement->getResults()))
7950fd0fb53SDiego Caballero registerValueVectorReplacementImpl(std::get<0>(resultTuple),
7960fd0fb53SDiego Caballero std::get<1>(resultTuple));
797b8737614SUday Bondhugula }
798b8737614SUday Bondhugula
79996891f04SDiego Caballero /// Registers the vector replacement of a scalar value. The replacement
80096891f04SDiego Caballero /// operation should have a single result, which replaces the scalar value.
80196891f04SDiego Caballero ///
80296891f04SDiego Caballero /// This utility is used to register the vector replacement of block arguments
80396891f04SDiego Caballero /// and operation results which are not directly vectorized (i.e., their
80496891f04SDiego Caballero /// scalar version still exists after vectorization), like uniforms.
80596891f04SDiego Caballero ///
80696891f04SDiego Caballero /// Example:
80796891f04SDiego Caballero /// * 'replaced': block argument or operation outside of the vectorized loop.
80896891f04SDiego Caballero /// * 'replacement': %0 = vector.broadcast %1 : f32 to vector<128xf32>
registerValueVectorReplacement(Value replaced,Operation * replacement)80996891f04SDiego Caballero void VectorizationState::registerValueVectorReplacement(
81096891f04SDiego Caballero Value replaced, Operation *replacement) {
81196891f04SDiego Caballero assert(replacement->getNumResults() == 1 &&
81296891f04SDiego Caballero "Expected single-result replacement");
81396891f04SDiego Caballero if (Operation *defOp = replaced.getDefiningOp())
81496891f04SDiego Caballero registerOpVectorReplacement(defOp, replacement);
81596891f04SDiego Caballero else
81696891f04SDiego Caballero registerValueVectorReplacementImpl(replaced, replacement->getResult(0));
817b8737614SUday Bondhugula }
818b8737614SUday Bondhugula
8190fd0fb53SDiego Caballero /// Registers the vector replacement of a block argument (e.g., iter_args).
8200fd0fb53SDiego Caballero ///
8210fd0fb53SDiego Caballero /// Example:
8220fd0fb53SDiego Caballero /// * 'replaced': 'iter_arg' block argument.
8230fd0fb53SDiego Caballero /// * 'replacement': vectorized 'iter_arg' block argument.
registerBlockArgVectorReplacement(BlockArgument replaced,BlockArgument replacement)8240fd0fb53SDiego Caballero void VectorizationState::registerBlockArgVectorReplacement(
8250fd0fb53SDiego Caballero BlockArgument replaced, BlockArgument replacement) {
8260fd0fb53SDiego Caballero registerValueVectorReplacementImpl(replaced, replacement);
8270fd0fb53SDiego Caballero }
8280fd0fb53SDiego Caballero
registerValueVectorReplacementImpl(Value replaced,Value replacement)82996891f04SDiego Caballero void VectorizationState::registerValueVectorReplacementImpl(Value replaced,
83096891f04SDiego Caballero Value replacement) {
83196891f04SDiego Caballero assert(!valueVectorReplacement.contains(replaced) &&
83296891f04SDiego Caballero "Vector replacement already registered");
83396891f04SDiego Caballero assert(replacement.getType().isa<VectorType>() &&
83496891f04SDiego Caballero "Expected vector type in vector replacement");
83596891f04SDiego Caballero valueVectorReplacement.map(replaced, replacement);
836b8737614SUday Bondhugula }
837b8737614SUday Bondhugula
83896891f04SDiego Caballero /// Registers the scalar replacement of a scalar value. 'replacement' must be
83996891f04SDiego Caballero /// scalar. Both values must be block arguments. Operation results should be
84096891f04SDiego Caballero /// replaced using the 'registerOp*' utilitites.
84196891f04SDiego Caballero ///
84296891f04SDiego Caballero /// This utility is used to register the replacement of block arguments
84396891f04SDiego Caballero /// that are within the loop to be vectorized and will continue being scalar
84496891f04SDiego Caballero /// within the vector loop.
84596891f04SDiego Caballero ///
84696891f04SDiego Caballero /// Example:
84796891f04SDiego Caballero /// * 'replaced': induction variable of a loop to be vectorized.
84896891f04SDiego Caballero /// * 'replacement': new induction variable in the new vector loop.
registerValueScalarReplacement(BlockArgument replaced,BlockArgument replacement)84996891f04SDiego Caballero void VectorizationState::registerValueScalarReplacement(
85096891f04SDiego Caballero BlockArgument replaced, BlockArgument replacement) {
85196891f04SDiego Caballero registerValueScalarReplacementImpl(replaced, replacement);
85296891f04SDiego Caballero }
85396891f04SDiego Caballero
854d80b04abSSergei Grechanik /// Registers the scalar replacement of a scalar result returned from a
855d80b04abSSergei Grechanik /// reduction loop. 'replacement' must be scalar.
856d80b04abSSergei Grechanik ///
857d80b04abSSergei Grechanik /// This utility is used to register the replacement for scalar results of
858d80b04abSSergei Grechanik /// vectorized reduction loops with iter_args.
859d80b04abSSergei Grechanik ///
860d80b04abSSergei Grechanik /// Example 2:
861d80b04abSSergei Grechanik /// * 'replaced': %0 = affine.for %i = 0 to 512 iter_args(%x = ...) -> (f32)
862fe0bf7d4SMatthias Springer /// * 'replacement': %1 = vector.reduction <add>, %0 : vector<4xf32> into f32
registerLoopResultScalarReplacement(Value replaced,Value replacement)863d80b04abSSergei Grechanik void VectorizationState::registerLoopResultScalarReplacement(
864d80b04abSSergei Grechanik Value replaced, Value replacement) {
865d80b04abSSergei Grechanik assert(isa<AffineForOp>(replaced.getDefiningOp()));
866d80b04abSSergei Grechanik assert(loopResultScalarReplacement.count(replaced) == 0 &&
867d80b04abSSergei Grechanik "already registered");
868d80b04abSSergei Grechanik LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ will replace a result of the loop "
869d80b04abSSergei Grechanik "with scalar: "
870d80b04abSSergei Grechanik << replacement);
871d80b04abSSergei Grechanik loopResultScalarReplacement[replaced] = replacement;
872d80b04abSSergei Grechanik }
873d80b04abSSergei Grechanik
registerValueScalarReplacementImpl(Value replaced,Value replacement)87496891f04SDiego Caballero void VectorizationState::registerValueScalarReplacementImpl(Value replaced,
87596891f04SDiego Caballero Value replacement) {
87696891f04SDiego Caballero assert(!valueScalarReplacement.contains(replaced) &&
87796891f04SDiego Caballero "Scalar value replacement already registered");
87896891f04SDiego Caballero assert(!replacement.getType().isa<VectorType>() &&
87996891f04SDiego Caballero "Expected scalar type in scalar replacement");
88096891f04SDiego Caballero valueScalarReplacement.map(replaced, replacement);
88196891f04SDiego Caballero }
88296891f04SDiego Caballero
88396891f04SDiego Caballero /// Returns in 'replacedVals' the scalar replacement for values in 'inputVals'.
getScalarValueReplacementsFor(ValueRange inputVals,SmallVectorImpl<Value> & replacedVals)88496891f04SDiego Caballero void VectorizationState::getScalarValueReplacementsFor(
88596891f04SDiego Caballero ValueRange inputVals, SmallVectorImpl<Value> &replacedVals) {
88696891f04SDiego Caballero for (Value inputVal : inputVals)
88796891f04SDiego Caballero replacedVals.push_back(valueScalarReplacement.lookupOrDefault(inputVal));
88896891f04SDiego Caballero }
88996891f04SDiego Caballero
89096891f04SDiego Caballero /// Erases a loop nest, including all its nested operations.
eraseLoopNest(AffineForOp forOp)89196891f04SDiego Caballero static void eraseLoopNest(AffineForOp forOp) {
89296891f04SDiego Caballero LLVM_DEBUG(dbgs() << "[early-vect]+++++ erasing:\n" << forOp << "\n");
89396891f04SDiego Caballero forOp.erase();
89496891f04SDiego Caballero }
89596891f04SDiego Caballero
89696891f04SDiego Caballero /// Erases the scalar loop nest after its successful vectorization.
finishVectorizationPattern(AffineForOp rootLoop)89796891f04SDiego Caballero void VectorizationState::finishVectorizationPattern(AffineForOp rootLoop) {
89896891f04SDiego Caballero LLVM_DEBUG(dbgs() << "\n[early-vect] Finalizing vectorization\n");
89996891f04SDiego Caballero eraseLoopNest(rootLoop);
900b8737614SUday Bondhugula }
901b8737614SUday Bondhugula
902b8737614SUday Bondhugula // Apply 'map' with 'mapOperands' returning resulting values in 'results'.
computeMemoryOpIndices(Operation * op,AffineMap map,ValueRange mapOperands,VectorizationState & state,SmallVectorImpl<Value> & results)903b8737614SUday Bondhugula static void computeMemoryOpIndices(Operation *op, AffineMap map,
904b8737614SUday Bondhugula ValueRange mapOperands,
90596891f04SDiego Caballero VectorizationState &state,
906b8737614SUday Bondhugula SmallVectorImpl<Value> &results) {
907b8737614SUday Bondhugula for (auto resultExpr : map.getResults()) {
908b8737614SUday Bondhugula auto singleResMap =
909b8737614SUday Bondhugula AffineMap::get(map.getNumDims(), map.getNumSymbols(), resultExpr);
91096891f04SDiego Caballero auto afOp = state.builder.create<AffineApplyOp>(op->getLoc(), singleResMap,
91196891f04SDiego Caballero mapOperands);
912b8737614SUday Bondhugula results.push_back(afOp);
913b8737614SUday Bondhugula }
914b8737614SUday Bondhugula }
915b8737614SUday Bondhugula
916b8737614SUday Bondhugula /// Returns a FilterFunctionType that can be used in NestedPattern to match a
917b8737614SUday Bondhugula /// loop whose underlying load/store accesses are either invariant or all
918b8737614SUday Bondhugula // varying along the `fastestVaryingMemRefDimension`.
919b8737614SUday Bondhugula static FilterFunctionType
isVectorizableLoopPtrFactory(const DenseSet<Operation * > & parallelLoops,int fastestVaryingMemRefDimension)920b8737614SUday Bondhugula isVectorizableLoopPtrFactory(const DenseSet<Operation *> ¶llelLoops,
921b8737614SUday Bondhugula int fastestVaryingMemRefDimension) {
922b8737614SUday Bondhugula return [¶llelLoops, fastestVaryingMemRefDimension](Operation &forOp) {
923b8737614SUday Bondhugula auto loop = cast<AffineForOp>(forOp);
924b8737614SUday Bondhugula auto parallelIt = parallelLoops.find(loop);
925b8737614SUday Bondhugula if (parallelIt == parallelLoops.end())
926b8737614SUday Bondhugula return false;
927b8737614SUday Bondhugula int memRefDim = -1;
928b8737614SUday Bondhugula auto vectorizableBody =
929b8737614SUday Bondhugula isVectorizableLoopBody(loop, &memRefDim, vectorTransferPattern());
930b8737614SUday Bondhugula if (!vectorizableBody)
931b8737614SUday Bondhugula return false;
932b8737614SUday Bondhugula return memRefDim == -1 || fastestVaryingMemRefDimension == -1 ||
933b8737614SUday Bondhugula memRefDim == fastestVaryingMemRefDimension;
934b8737614SUday Bondhugula };
935b8737614SUday Bondhugula }
936b8737614SUday Bondhugula
93746781630SDiego Caballero /// Returns the vector type resulting from applying the provided vectorization
93846781630SDiego Caballero /// strategy on the scalar type.
getVectorType(Type scalarTy,const VectorizationStrategy * strategy)93946781630SDiego Caballero static VectorType getVectorType(Type scalarTy,
94046781630SDiego Caballero const VectorizationStrategy *strategy) {
94146781630SDiego Caballero assert(!scalarTy.isa<VectorType>() && "Expected scalar type");
94246781630SDiego Caballero return VectorType::get(strategy->vectorSizes, scalarTy);
94346781630SDiego Caballero }
94446781630SDiego Caballero
94596891f04SDiego Caballero /// Tries to transform a scalar constant into a vector constant. Returns the
94696891f04SDiego Caballero /// vector constant if the scalar type is valid vector element type. Returns
94796891f04SDiego Caballero /// nullptr, otherwise.
vectorizeConstant(arith::ConstantOp constOp,VectorizationState & state)948a54f4eaeSMogball static arith::ConstantOp vectorizeConstant(arith::ConstantOp constOp,
94996891f04SDiego Caballero VectorizationState &state) {
95096891f04SDiego Caballero Type scalarTy = constOp.getType();
95196891f04SDiego Caballero if (!VectorType::isValidElementType(scalarTy))
95296891f04SDiego Caballero return nullptr;
95396891f04SDiego Caballero
95496891f04SDiego Caballero auto vecTy = getVectorType(scalarTy, state.strategy);
955cfb72fd3SJacques Pienaar auto vecAttr = DenseElementsAttr::get(vecTy, constOp.getValue());
956a8b7e56fSAmy Zhuang
957a8b7e56fSAmy Zhuang OpBuilder::InsertionGuard guard(state.builder);
958a8b7e56fSAmy Zhuang Operation *parentOp = state.builder.getInsertionBlock()->getParentOp();
959a8b7e56fSAmy Zhuang // Find the innermost vectorized ancestor loop to insert the vector constant.
960a8b7e56fSAmy Zhuang while (parentOp && !state.vecLoopToVecDim.count(parentOp))
961a8b7e56fSAmy Zhuang parentOp = parentOp->getParentOp();
962a8b7e56fSAmy Zhuang assert(parentOp && state.vecLoopToVecDim.count(parentOp) &&
963a8b7e56fSAmy Zhuang isa<AffineForOp>(parentOp) && "Expected a vectorized for op");
964a8b7e56fSAmy Zhuang auto vecForOp = cast<AffineForOp>(parentOp);
965a8b7e56fSAmy Zhuang state.builder.setInsertionPointToStart(vecForOp.getBody());
966a54f4eaeSMogball auto newConstOp =
967a54f4eaeSMogball state.builder.create<arith::ConstantOp>(constOp.getLoc(), vecAttr);
96896891f04SDiego Caballero
96996891f04SDiego Caballero // Register vector replacement for future uses in the scope.
97096891f04SDiego Caballero state.registerOpVectorReplacement(constOp, newConstOp);
97196891f04SDiego Caballero return newConstOp;
97296891f04SDiego Caballero }
97396891f04SDiego Caballero
974d80b04abSSergei Grechanik /// Creates a constant vector filled with the neutral elements of the given
975d80b04abSSergei Grechanik /// reduction. The scalar type of vector elements will be taken from
976d80b04abSSergei Grechanik /// `oldOperand`.
createInitialVector(arith::AtomicRMWKind reductionKind,Value oldOperand,VectorizationState & state)977a6a583daSWilliam S. Moses static arith::ConstantOp createInitialVector(arith::AtomicRMWKind reductionKind,
978d80b04abSSergei Grechanik Value oldOperand,
979d80b04abSSergei Grechanik VectorizationState &state) {
980d80b04abSSergei Grechanik Type scalarTy = oldOperand.getType();
981d80b04abSSergei Grechanik if (!VectorType::isValidElementType(scalarTy))
982d80b04abSSergei Grechanik return nullptr;
983d80b04abSSergei Grechanik
984d80b04abSSergei Grechanik Attribute valueAttr = getIdentityValueAttr(
985d80b04abSSergei Grechanik reductionKind, scalarTy, state.builder, oldOperand.getLoc());
986d80b04abSSergei Grechanik auto vecTy = getVectorType(scalarTy, state.strategy);
987d80b04abSSergei Grechanik auto vecAttr = DenseElementsAttr::get(vecTy, valueAttr);
988d80b04abSSergei Grechanik auto newConstOp =
989a54f4eaeSMogball state.builder.create<arith::ConstantOp>(oldOperand.getLoc(), vecAttr);
990d80b04abSSergei Grechanik
991d80b04abSSergei Grechanik return newConstOp;
992d80b04abSSergei Grechanik }
993d80b04abSSergei Grechanik
994d80b04abSSergei Grechanik /// Creates a mask used to filter out garbage elements in the last iteration
995d80b04abSSergei Grechanik /// of unaligned loops. If a mask is not required then `nullptr` is returned.
996d80b04abSSergei Grechanik /// The mask will be a vector of booleans representing meaningful vector
997d80b04abSSergei Grechanik /// elements in the current iteration. It is filled with ones for each iteration
998d80b04abSSergei Grechanik /// except for the last one, where it has the form `11...100...0` with the
999d80b04abSSergei Grechanik /// number of ones equal to the number of meaningful elements (i.e. the number
1000d80b04abSSergei Grechanik /// of iterations that would be left in the original loop).
createMask(AffineForOp vecForOp,VectorizationState & state)1001d80b04abSSergei Grechanik static Value createMask(AffineForOp vecForOp, VectorizationState &state) {
1002d80b04abSSergei Grechanik assert(state.strategy->vectorSizes.size() == 1 &&
1003d80b04abSSergei Grechanik "Creating a mask non-1-D vectors is not supported.");
1004d80b04abSSergei Grechanik assert(vecForOp.getStep() == state.strategy->vectorSizes[0] &&
1005d80b04abSSergei Grechanik "Creating a mask for loops with non-unit original step size is not "
1006d80b04abSSergei Grechanik "supported.");
1007d80b04abSSergei Grechanik
1008d80b04abSSergei Grechanik // Check if we have already created the mask.
1009d80b04abSSergei Grechanik if (Value mask = state.vecLoopToMask.lookup(vecForOp))
1010d80b04abSSergei Grechanik return mask;
1011d80b04abSSergei Grechanik
1012d80b04abSSergei Grechanik // If the loop has constant bounds and the original number of iterations is
1013d80b04abSSergei Grechanik // divisable by the vector size then we don't need a mask.
1014d80b04abSSergei Grechanik if (vecForOp.hasConstantBounds()) {
1015d80b04abSSergei Grechanik int64_t originalTripCount =
1016d80b04abSSergei Grechanik vecForOp.getConstantUpperBound() - vecForOp.getConstantLowerBound();
1017d80b04abSSergei Grechanik if (originalTripCount % vecForOp.getStep() == 0)
1018d80b04abSSergei Grechanik return nullptr;
1019d80b04abSSergei Grechanik }
1020d80b04abSSergei Grechanik
1021d80b04abSSergei Grechanik OpBuilder::InsertionGuard guard(state.builder);
1022d80b04abSSergei Grechanik state.builder.setInsertionPointToStart(vecForOp.getBody());
1023d80b04abSSergei Grechanik
1024d80b04abSSergei Grechanik // We generate the mask using the `vector.create_mask` operation which accepts
1025b7cac864SDiego Caballero // the number of meaningful elements (i.e. the length of the prefix of 1s).
1026d80b04abSSergei Grechanik // To compute the number of meaningful elements we subtract the current value
1027d80b04abSSergei Grechanik // of the iteration variable from the upper bound of the loop. Example:
1028d80b04abSSergei Grechanik //
1029d80b04abSSergei Grechanik // // 500 is the upper bound of the loop
1030d80b04abSSergei Grechanik // #map = affine_map<(d0) -> (500 - d0)>
1031d80b04abSSergei Grechanik // %elems_left = affine.apply #map(%iv)
1032d80b04abSSergei Grechanik // %mask = vector.create_mask %elems_left : vector<128xi1>
1033d80b04abSSergei Grechanik
1034d80b04abSSergei Grechanik Location loc = vecForOp.getLoc();
1035d80b04abSSergei Grechanik
1036d80b04abSSergei Grechanik // First we get the upper bound of the loop using `affine.apply` or
1037d80b04abSSergei Grechanik // `affine.min`.
1038d80b04abSSergei Grechanik AffineMap ubMap = vecForOp.getUpperBoundMap();
1039d80b04abSSergei Grechanik Value ub;
1040d80b04abSSergei Grechanik if (ubMap.getNumResults() == 1)
1041d80b04abSSergei Grechanik ub = state.builder.create<AffineApplyOp>(loc, vecForOp.getUpperBoundMap(),
1042d80b04abSSergei Grechanik vecForOp.getUpperBoundOperands());
1043d80b04abSSergei Grechanik else
1044d80b04abSSergei Grechanik ub = state.builder.create<AffineMinOp>(loc, vecForOp.getUpperBoundMap(),
1045d80b04abSSergei Grechanik vecForOp.getUpperBoundOperands());
1046d80b04abSSergei Grechanik // Then we compute the number of (original) iterations left in the loop.
1047d80b04abSSergei Grechanik AffineExpr subExpr =
1048d80b04abSSergei Grechanik state.builder.getAffineDimExpr(0) - state.builder.getAffineDimExpr(1);
1049d80b04abSSergei Grechanik Value itersLeft =
1050d80b04abSSergei Grechanik makeComposedAffineApply(state.builder, loc, AffineMap::get(2, 0, subExpr),
1051d80b04abSSergei Grechanik {ub, vecForOp.getInductionVar()});
1052d80b04abSSergei Grechanik // If the affine maps were successfully composed then `ub` is unneeded.
1053d80b04abSSergei Grechanik if (ub.use_empty())
1054d80b04abSSergei Grechanik ub.getDefiningOp()->erase();
1055d80b04abSSergei Grechanik // Finally we create the mask.
1056d80b04abSSergei Grechanik Type maskTy = VectorType::get(state.strategy->vectorSizes,
1057d80b04abSSergei Grechanik state.builder.getIntegerType(1));
1058d80b04abSSergei Grechanik Value mask =
1059d80b04abSSergei Grechanik state.builder.create<vector::CreateMaskOp>(loc, maskTy, itersLeft);
1060d80b04abSSergei Grechanik
1061d80b04abSSergei Grechanik LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ creating a mask:\n"
1062d80b04abSSergei Grechanik << itersLeft << "\n"
1063d80b04abSSergei Grechanik << mask << "\n");
1064d80b04abSSergei Grechanik
1065d80b04abSSergei Grechanik state.vecLoopToMask[vecForOp] = mask;
1066d80b04abSSergei Grechanik return mask;
1067d80b04abSSergei Grechanik }
1068d80b04abSSergei Grechanik
106946781630SDiego Caballero /// Returns true if the provided value is vector uniform given the vectorization
107046781630SDiego Caballero /// strategy.
10715ce368cfSAmy Zhuang // TODO: For now, only values that are induction variables of loops not in
10725ce368cfSAmy Zhuang // `loopToVectorDim` or invariants to all the loops in the vectorization
10735ce368cfSAmy Zhuang // strategy are considered vector uniforms.
isUniformDefinition(Value value,const VectorizationStrategy * strategy)107446781630SDiego Caballero static bool isUniformDefinition(Value value,
107546781630SDiego Caballero const VectorizationStrategy *strategy) {
10765ce368cfSAmy Zhuang AffineForOp forOp = getForInductionVarOwner(value);
10775ce368cfSAmy Zhuang if (forOp && strategy->loopToVectorDim.count(forOp) == 0)
10785ce368cfSAmy Zhuang return true;
10795ce368cfSAmy Zhuang
108046781630SDiego Caballero for (auto loopToDim : strategy->loopToVectorDim) {
108146781630SDiego Caballero auto loop = cast<AffineForOp>(loopToDim.first);
108246781630SDiego Caballero if (!loop.isDefinedOutsideOfLoop(value))
108346781630SDiego Caballero return false;
108446781630SDiego Caballero }
108546781630SDiego Caballero return true;
108646781630SDiego Caballero }
108746781630SDiego Caballero
108846781630SDiego Caballero /// Generates a broadcast op for the provided uniform value using the
108946781630SDiego Caballero /// vectorization strategy in 'state'.
vectorizeUniform(Value uniformVal,VectorizationState & state)109096891f04SDiego Caballero static Operation *vectorizeUniform(Value uniformVal,
109196891f04SDiego Caballero VectorizationState &state) {
109296891f04SDiego Caballero OpBuilder::InsertionGuard guard(state.builder);
10935ce368cfSAmy Zhuang Value uniformScalarRepl =
10945ce368cfSAmy Zhuang state.valueScalarReplacement.lookupOrDefault(uniformVal);
10955ce368cfSAmy Zhuang state.builder.setInsertionPointAfterValue(uniformScalarRepl);
109646781630SDiego Caballero
109796891f04SDiego Caballero auto vectorTy = getVectorType(uniformVal.getType(), state.strategy);
109896891f04SDiego Caballero auto bcastOp = state.builder.create<BroadcastOp>(uniformVal.getLoc(),
10995ce368cfSAmy Zhuang vectorTy, uniformScalarRepl);
110096891f04SDiego Caballero state.registerValueVectorReplacement(uniformVal, bcastOp);
110196891f04SDiego Caballero return bcastOp;
110246781630SDiego Caballero }
110346781630SDiego Caballero
110496891f04SDiego Caballero /// Tries to vectorize a given `operand` by applying the following logic:
110596891f04SDiego Caballero /// 1. if the defining operation has been already vectorized, `operand` is
110696891f04SDiego Caballero /// already in the proper vector form;
110796891f04SDiego Caballero /// 2. if the `operand` is a constant, returns the vectorized form of the
110896891f04SDiego Caballero /// constant;
110996891f04SDiego Caballero /// 3. if the `operand` is uniform, returns a vector broadcast of the `op`;
111096891f04SDiego Caballero /// 4. otherwise, the vectorization of `operand` is not supported.
111196891f04SDiego Caballero /// Newly created vector operations are registered in `state` as replacement
111296891f04SDiego Caballero /// for their scalar counterparts.
1113b8737614SUday Bondhugula /// In particular this logic captures some of the use cases where definitions
1114b8737614SUday Bondhugula /// that are not scoped under the current pattern are needed to vectorize.
1115b8737614SUday Bondhugula /// One such example is top level function constants that need to be splatted.
1116b8737614SUday Bondhugula ///
1117b8737614SUday Bondhugula /// Returns an operand that has been vectorized to match `state`'s strategy if
1118b8737614SUday Bondhugula /// vectorization is possible with the above logic. Returns nullptr otherwise.
1119b8737614SUday Bondhugula ///
11209db53a18SRiver Riddle /// TODO: handle more complex cases.
vectorizeOperand(Value operand,VectorizationState & state)112196891f04SDiego Caballero static Value vectorizeOperand(Value operand, VectorizationState &state) {
112296891f04SDiego Caballero LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorize operand: " << operand);
112396891f04SDiego Caballero // If this value is already vectorized, we are done.
112496891f04SDiego Caballero if (Value vecRepl = state.valueVectorReplacement.lookupOrNull(operand)) {
112596891f04SDiego Caballero LLVM_DEBUG(dbgs() << " -> already vectorized: " << vecRepl);
112696891f04SDiego Caballero return vecRepl;
1127b8737614SUday Bondhugula }
112895db7b4aSDiego Caballero
112996891f04SDiego Caballero // An vector operand that is not in the replacement map should never reach
113096891f04SDiego Caballero // this point. Reaching this point could mean that the code was already
113196891f04SDiego Caballero // vectorized and we shouldn't try to vectorize already vectorized code.
113296891f04SDiego Caballero assert(!operand.getType().isa<VectorType>() &&
113396891f04SDiego Caballero "Vector op not found in replacement map");
113495db7b4aSDiego Caballero
113596891f04SDiego Caballero // Vectorize constant.
1136a54f4eaeSMogball if (auto constOp = operand.getDefiningOp<arith::ConstantOp>()) {
1137a54f4eaeSMogball auto vecConstant = vectorizeConstant(constOp, state);
113896891f04SDiego Caballero LLVM_DEBUG(dbgs() << "-> constant: " << vecConstant);
113996891f04SDiego Caballero return vecConstant.getResult();
114096891f04SDiego Caballero }
114196891f04SDiego Caballero
114296891f04SDiego Caballero // Vectorize uniform values.
114396891f04SDiego Caballero if (isUniformDefinition(operand, state.strategy)) {
114496891f04SDiego Caballero Operation *vecUniform = vectorizeUniform(operand, state);
114596891f04SDiego Caballero LLVM_DEBUG(dbgs() << "-> uniform: " << *vecUniform);
114696891f04SDiego Caballero return vecUniform->getResult(0);
114796891f04SDiego Caballero }
114896891f04SDiego Caballero
114996891f04SDiego Caballero // Check for unsupported block argument scenarios. A supported block argument
115096891f04SDiego Caballero // should have been vectorized already.
115196891f04SDiego Caballero if (!operand.getDefiningOp())
115296891f04SDiego Caballero LLVM_DEBUG(dbgs() << "-> unsupported block argument\n");
115396891f04SDiego Caballero else
115496891f04SDiego Caballero // Generic unsupported case.
115596891f04SDiego Caballero LLVM_DEBUG(dbgs() << "-> non-vectorizable\n");
115696891f04SDiego Caballero
1157b8737614SUday Bondhugula return nullptr;
1158b8737614SUday Bondhugula }
1159b8737614SUday Bondhugula
116096891f04SDiego Caballero /// Vectorizes an affine load with the vectorization strategy in 'state' by
116196891f04SDiego Caballero /// generating a 'vector.transfer_read' op with the proper permutation map
116296891f04SDiego Caballero /// inferred from the indices of the load. The new 'vector.transfer_read' is
116396891f04SDiego Caballero /// registered as replacement of the scalar load. Returns the newly created
116496891f04SDiego Caballero /// 'vector.transfer_read' if vectorization was successful. Returns nullptr,
116596891f04SDiego Caballero /// otherwise.
vectorizeAffineLoad(AffineLoadOp loadOp,VectorizationState & state)116696891f04SDiego Caballero static Operation *vectorizeAffineLoad(AffineLoadOp loadOp,
116796891f04SDiego Caballero VectorizationState &state) {
116896891f04SDiego Caballero MemRefType memRefType = loadOp.getMemRefType();
116996891f04SDiego Caballero Type elementType = memRefType.getElementType();
117096891f04SDiego Caballero auto vectorType = VectorType::get(state.strategy->vectorSizes, elementType);
1171b8737614SUday Bondhugula
117296891f04SDiego Caballero // Replace map operands with operands from the vector loop nest.
117396891f04SDiego Caballero SmallVector<Value, 8> mapOperands;
117496891f04SDiego Caballero state.getScalarValueReplacementsFor(loadOp.getMapOperands(), mapOperands);
117596891f04SDiego Caballero
117696891f04SDiego Caballero // Compute indices for the transfer op. AffineApplyOp's may be generated.
117796891f04SDiego Caballero SmallVector<Value, 8> indices;
117896891f04SDiego Caballero indices.reserve(memRefType.getRank());
117996891f04SDiego Caballero if (loadOp.getAffineMap() !=
118096891f04SDiego Caballero state.builder.getMultiDimIdentityMap(memRefType.getRank()))
118196891f04SDiego Caballero computeMemoryOpIndices(loadOp, loadOp.getAffineMap(), mapOperands, state,
118296891f04SDiego Caballero indices);
118396891f04SDiego Caballero else
118496891f04SDiego Caballero indices.append(mapOperands.begin(), mapOperands.end());
118596891f04SDiego Caballero
118696891f04SDiego Caballero // Compute permutation map using the information of new vector loops.
118796891f04SDiego Caballero auto permutationMap = makePermutationMap(state.builder.getInsertionBlock(),
118896891f04SDiego Caballero indices, state.vecLoopToVecDim);
118996891f04SDiego Caballero if (!permutationMap) {
119096891f04SDiego Caballero LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ can't compute permutationMap\n");
119196891f04SDiego Caballero return nullptr;
119296891f04SDiego Caballero }
119396891f04SDiego Caballero LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
119496891f04SDiego Caballero LLVM_DEBUG(permutationMap.print(dbgs()));
119596891f04SDiego Caballero
119696891f04SDiego Caballero auto transfer = state.builder.create<vector::TransferReadOp>(
119796891f04SDiego Caballero loadOp.getLoc(), vectorType, loadOp.getMemRef(), indices, permutationMap);
119896891f04SDiego Caballero
119996891f04SDiego Caballero // Register replacement for future uses in the scope.
120096891f04SDiego Caballero state.registerOpVectorReplacement(loadOp, transfer);
120196891f04SDiego Caballero return transfer;
120296891f04SDiego Caballero }
120396891f04SDiego Caballero
120496891f04SDiego Caballero /// Vectorizes an affine store with the vectorization strategy in 'state' by
120596891f04SDiego Caballero /// generating a 'vector.transfer_write' op with the proper permutation map
120696891f04SDiego Caballero /// inferred from the indices of the store. The new 'vector.transfer_store' is
120796891f04SDiego Caballero /// registered as replacement of the scalar load. Returns the newly created
120896891f04SDiego Caballero /// 'vector.transfer_write' if vectorization was successful. Returns nullptr,
120996891f04SDiego Caballero /// otherwise.
vectorizeAffineStore(AffineStoreOp storeOp,VectorizationState & state)121096891f04SDiego Caballero static Operation *vectorizeAffineStore(AffineStoreOp storeOp,
121196891f04SDiego Caballero VectorizationState &state) {
121296891f04SDiego Caballero MemRefType memRefType = storeOp.getMemRefType();
121396891f04SDiego Caballero Value vectorValue = vectorizeOperand(storeOp.getValueToStore(), state);
1214b8737614SUday Bondhugula if (!vectorValue)
1215b8737614SUday Bondhugula return nullptr;
1216b8737614SUday Bondhugula
121796891f04SDiego Caballero // Replace map operands with operands from the vector loop nest.
121896891f04SDiego Caballero SmallVector<Value, 8> mapOperands;
121996891f04SDiego Caballero state.getScalarValueReplacementsFor(storeOp.getMapOperands(), mapOperands);
122095db7b4aSDiego Caballero
122196891f04SDiego Caballero // Compute indices for the transfer op. AffineApplyOp's may be generated.
122296891f04SDiego Caballero SmallVector<Value, 8> indices;
122396891f04SDiego Caballero indices.reserve(memRefType.getRank());
122496891f04SDiego Caballero if (storeOp.getAffineMap() !=
122596891f04SDiego Caballero state.builder.getMultiDimIdentityMap(memRefType.getRank()))
122696891f04SDiego Caballero computeMemoryOpIndices(storeOp, storeOp.getAffineMap(), mapOperands, state,
122796891f04SDiego Caballero indices);
122896891f04SDiego Caballero else
122996891f04SDiego Caballero indices.append(mapOperands.begin(), mapOperands.end());
123096891f04SDiego Caballero
123196891f04SDiego Caballero // Compute permutation map using the information of new vector loops.
123296891f04SDiego Caballero auto permutationMap = makePermutationMap(state.builder.getInsertionBlock(),
123396891f04SDiego Caballero indices, state.vecLoopToVecDim);
1234b8737614SUday Bondhugula if (!permutationMap)
1235b8737614SUday Bondhugula return nullptr;
1236b8737614SUday Bondhugula LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
1237b8737614SUday Bondhugula LLVM_DEBUG(permutationMap.print(dbgs()));
123896891f04SDiego Caballero
123996891f04SDiego Caballero auto transfer = state.builder.create<vector::TransferWriteOp>(
124096891f04SDiego Caballero storeOp.getLoc(), vectorValue, storeOp.getMemRef(), indices,
124196891f04SDiego Caballero permutationMap);
124296891f04SDiego Caballero LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorized store: " << transfer);
124396891f04SDiego Caballero
124496891f04SDiego Caballero // Register replacement for future uses in the scope.
124596891f04SDiego Caballero state.registerOpVectorReplacement(storeOp, transfer);
124696891f04SDiego Caballero return transfer;
1247b8737614SUday Bondhugula }
124896891f04SDiego Caballero
1249d80b04abSSergei Grechanik /// Returns true if `value` is a constant equal to the neutral element of the
1250d80b04abSSergei Grechanik /// given vectorizable reduction.
isNeutralElementConst(arith::AtomicRMWKind reductionKind,Value value,VectorizationState & state)1251a6a583daSWilliam S. Moses static bool isNeutralElementConst(arith::AtomicRMWKind reductionKind,
1252a6a583daSWilliam S. Moses Value value, VectorizationState &state) {
1253d80b04abSSergei Grechanik Type scalarTy = value.getType();
1254d80b04abSSergei Grechanik if (!VectorType::isValidElementType(scalarTy))
1255d80b04abSSergei Grechanik return false;
1256d80b04abSSergei Grechanik Attribute valueAttr = getIdentityValueAttr(reductionKind, scalarTy,
1257d80b04abSSergei Grechanik state.builder, value.getLoc());
1258a54f4eaeSMogball if (auto constOp = dyn_cast_or_null<arith::ConstantOp>(value.getDefiningOp()))
1259cfb72fd3SJacques Pienaar return constOp.getValue() == valueAttr;
1260d80b04abSSergei Grechanik return false;
1261d80b04abSSergei Grechanik }
1262d80b04abSSergei Grechanik
126396891f04SDiego Caballero /// Vectorizes a loop with the vectorization strategy in 'state'. A new loop is
126496891f04SDiego Caballero /// created and registered as replacement for the scalar loop. The builder's
126596891f04SDiego Caballero /// insertion point is set to the new loop's body so that subsequent vectorized
126696891f04SDiego Caballero /// operations are inserted into the new loop. If the loop is a vector
126796891f04SDiego Caballero /// dimension, the step of the newly created loop will reflect the vectorization
126896891f04SDiego Caballero /// factor used to vectorized that dimension.
vectorizeAffineForOp(AffineForOp forOp,VectorizationState & state)126996891f04SDiego Caballero static Operation *vectorizeAffineForOp(AffineForOp forOp,
127096891f04SDiego Caballero VectorizationState &state) {
12710fd0fb53SDiego Caballero const VectorizationStrategy &strategy = *state.strategy;
12720fd0fb53SDiego Caballero auto loopToVecDimIt = strategy.loopToVectorDim.find(forOp);
12730fd0fb53SDiego Caballero bool isLoopVecDim = loopToVecDimIt != strategy.loopToVectorDim.end();
12740fd0fb53SDiego Caballero
1275d80b04abSSergei Grechanik // TODO: Vectorization of reduction loops is not supported for non-unit steps.
1276d80b04abSSergei Grechanik if (isLoopVecDim && forOp.getNumIterOperands() > 0 && forOp.getStep() != 1) {
1277d80b04abSSergei Grechanik LLVM_DEBUG(
1278d80b04abSSergei Grechanik dbgs()
1279d80b04abSSergei Grechanik << "\n[early-vect]+++++ unsupported step size for reduction loop: "
1280d80b04abSSergei Grechanik << forOp.getStep() << "\n");
1281b8737614SUday Bondhugula return nullptr;
1282d80b04abSSergei Grechanik }
1283b8737614SUday Bondhugula
128496891f04SDiego Caballero // If we are vectorizing a vector dimension, compute a new step for the new
128596891f04SDiego Caballero // vectorized loop using the vectorization factor for the vector dimension.
128696891f04SDiego Caballero // Otherwise, propagate the step of the scalar loop.
128796891f04SDiego Caballero unsigned newStep;
128896891f04SDiego Caballero if (isLoopVecDim) {
128996891f04SDiego Caballero unsigned vectorDim = loopToVecDimIt->second;
129096891f04SDiego Caballero assert(vectorDim < strategy.vectorSizes.size() && "vector dim overflow");
129196891f04SDiego Caballero int64_t forOpVecFactor = strategy.vectorSizes[vectorDim];
129296891f04SDiego Caballero newStep = forOp.getStep() * forOpVecFactor;
129396891f04SDiego Caballero } else {
129496891f04SDiego Caballero newStep = forOp.getStep();
129596891f04SDiego Caballero }
129696891f04SDiego Caballero
1297d80b04abSSergei Grechanik // Get information about reduction kinds.
1298d80b04abSSergei Grechanik ArrayRef<LoopReduction> reductions;
1299d80b04abSSergei Grechanik if (isLoopVecDim && forOp.getNumIterOperands() > 0) {
1300d80b04abSSergei Grechanik auto it = strategy.reductionLoops.find(forOp);
1301d80b04abSSergei Grechanik assert(it != strategy.reductionLoops.end() &&
1302d80b04abSSergei Grechanik "Reduction descriptors not found when vectorizing a reduction loop");
1303d80b04abSSergei Grechanik reductions = it->second;
1304d80b04abSSergei Grechanik assert(reductions.size() == forOp.getNumIterOperands() &&
1305d80b04abSSergei Grechanik "The size of reductions array must match the number of iter_args");
1306d80b04abSSergei Grechanik }
1307d80b04abSSergei Grechanik
13080fd0fb53SDiego Caballero // Vectorize 'iter_args'.
13090fd0fb53SDiego Caballero SmallVector<Value, 8> vecIterOperands;
1310d80b04abSSergei Grechanik if (!isLoopVecDim) {
13110fd0fb53SDiego Caballero for (auto operand : forOp.getIterOperands())
13120fd0fb53SDiego Caballero vecIterOperands.push_back(vectorizeOperand(operand, state));
1313d80b04abSSergei Grechanik } else {
1314d80b04abSSergei Grechanik // For reduction loops we need to pass a vector of neutral elements as an
1315d80b04abSSergei Grechanik // initial value of the accumulator. We will add the original initial value
1316d80b04abSSergei Grechanik // later.
1317d80b04abSSergei Grechanik for (auto redAndOperand : llvm::zip(reductions, forOp.getIterOperands())) {
1318d80b04abSSergei Grechanik vecIterOperands.push_back(createInitialVector(
1319d80b04abSSergei Grechanik std::get<0>(redAndOperand).kind, std::get<1>(redAndOperand), state));
1320d80b04abSSergei Grechanik }
1321d80b04abSSergei Grechanik }
13220fd0fb53SDiego Caballero
132396891f04SDiego Caballero auto vecForOp = state.builder.create<AffineForOp>(
132496891f04SDiego Caballero forOp.getLoc(), forOp.getLowerBoundOperands(), forOp.getLowerBoundMap(),
132596891f04SDiego Caballero forOp.getUpperBoundOperands(), forOp.getUpperBoundMap(), newStep,
13260fd0fb53SDiego Caballero vecIterOperands,
132796891f04SDiego Caballero /*bodyBuilder=*/[](OpBuilder &, Location, Value, ValueRange) {
132896891f04SDiego Caballero // Make sure we don't create a default terminator in the loop body as
132996891f04SDiego Caballero // the proper terminator will be added during vectorization.
133096891f04SDiego Caballero });
133196891f04SDiego Caballero
133296891f04SDiego Caballero // Register loop-related replacements:
133396891f04SDiego Caballero // 1) The new vectorized loop is registered as vector replacement of the
133496891f04SDiego Caballero // scalar loop.
133596891f04SDiego Caballero // 2) The new iv of the vectorized loop is registered as scalar replacement
133696891f04SDiego Caballero // since a scalar copy of the iv will prevail in the vectorized loop.
133796891f04SDiego Caballero // TODO: A vector replacement will also be added in the future when
133896891f04SDiego Caballero // vectorization of linear ops is supported.
13390fd0fb53SDiego Caballero // 3) The new 'iter_args' region arguments are registered as vector
13400fd0fb53SDiego Caballero // replacements since they have been vectorized.
1341d80b04abSSergei Grechanik // 4) If the loop performs a reduction along the vector dimension, a
1342d80b04abSSergei Grechanik // `vector.reduction` or similar op is inserted for each resulting value
1343d80b04abSSergei Grechanik // of the loop and its scalar value replaces the corresponding scalar
1344d80b04abSSergei Grechanik // result of the loop.
134596891f04SDiego Caballero state.registerOpVectorReplacement(forOp, vecForOp);
134696891f04SDiego Caballero state.registerValueScalarReplacement(forOp.getInductionVar(),
134796891f04SDiego Caballero vecForOp.getInductionVar());
13480fd0fb53SDiego Caballero for (auto iterTuple :
13490fd0fb53SDiego Caballero llvm ::zip(forOp.getRegionIterArgs(), vecForOp.getRegionIterArgs()))
13500fd0fb53SDiego Caballero state.registerBlockArgVectorReplacement(std::get<0>(iterTuple),
13510fd0fb53SDiego Caballero std::get<1>(iterTuple));
13520fd0fb53SDiego Caballero
1353d80b04abSSergei Grechanik if (isLoopVecDim) {
1354d80b04abSSergei Grechanik for (unsigned i = 0; i < vecForOp.getNumIterOperands(); ++i) {
1355d80b04abSSergei Grechanik // First, we reduce the vector returned from the loop into a scalar.
1356d80b04abSSergei Grechanik Value reducedRes =
1357d80b04abSSergei Grechanik getVectorReductionOp(reductions[i].kind, state.builder,
1358d80b04abSSergei Grechanik vecForOp.getLoc(), vecForOp.getResult(i));
1359d80b04abSSergei Grechanik LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ creating a vector reduction: "
1360d80b04abSSergei Grechanik << reducedRes);
1361d80b04abSSergei Grechanik // Then we combine it with the original (scalar) initial value unless it
1362d80b04abSSergei Grechanik // is equal to the neutral element of the reduction.
1363d80b04abSSergei Grechanik Value origInit = forOp.getOperand(forOp.getNumControlOperands() + i);
1364d80b04abSSergei Grechanik Value finalRes = reducedRes;
1365d80b04abSSergei Grechanik if (!isNeutralElementConst(reductions[i].kind, origInit, state))
1366a6a583daSWilliam S. Moses finalRes =
1367a6a583daSWilliam S. Moses arith::getReductionOp(reductions[i].kind, state.builder,
1368d80b04abSSergei Grechanik reducedRes.getLoc(), reducedRes, origInit);
1369d80b04abSSergei Grechanik state.registerLoopResultScalarReplacement(forOp.getResult(i), finalRes);
1370d80b04abSSergei Grechanik }
1371d80b04abSSergei Grechanik }
1372d80b04abSSergei Grechanik
137396891f04SDiego Caballero if (isLoopVecDim)
137496891f04SDiego Caballero state.vecLoopToVecDim[vecForOp] = loopToVecDimIt->second;
137596891f04SDiego Caballero
137696891f04SDiego Caballero // Change insertion point so that upcoming vectorized instructions are
137796891f04SDiego Caballero // inserted into the vectorized loop's body.
137896891f04SDiego Caballero state.builder.setInsertionPointToStart(vecForOp.getBody());
1379d80b04abSSergei Grechanik
1380d80b04abSSergei Grechanik // If this is a reduction loop then we may need to create a mask to filter out
1381d80b04abSSergei Grechanik // garbage in the last iteration.
1382d80b04abSSergei Grechanik if (isLoopVecDim && forOp.getNumIterOperands() > 0)
1383d80b04abSSergei Grechanik createMask(vecForOp, state);
1384d80b04abSSergei Grechanik
138596891f04SDiego Caballero return vecForOp;
138696891f04SDiego Caballero }
138796891f04SDiego Caballero
138896891f04SDiego Caballero /// Vectorizes arbitrary operation by plain widening. We apply generic type
138996891f04SDiego Caballero /// widening of all its results and retrieve the vector counterparts for all its
139096891f04SDiego Caballero /// operands.
widenOp(Operation * op,VectorizationState & state)139196891f04SDiego Caballero static Operation *widenOp(Operation *op, VectorizationState &state) {
1392b8737614SUday Bondhugula SmallVector<Type, 8> vectorTypes;
139396891f04SDiego Caballero for (Value result : op->getResults())
1394b8737614SUday Bondhugula vectorTypes.push_back(
139596891f04SDiego Caballero VectorType::get(state.strategy->vectorSizes, result.getType()));
139696891f04SDiego Caballero
139779da91c5SAlex Zinenko SmallVector<Value, 8> vectorOperands;
139896891f04SDiego Caballero for (Value operand : op->getOperands()) {
139996891f04SDiego Caballero Value vecOperand = vectorizeOperand(operand, state);
140096891f04SDiego Caballero if (!vecOperand) {
140196891f04SDiego Caballero LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ an operand failed vectorize\n");
140279da91c5SAlex Zinenko return nullptr;
140395db7b4aSDiego Caballero }
140496891f04SDiego Caballero vectorOperands.push_back(vecOperand);
140596891f04SDiego Caballero }
1406b8737614SUday Bondhugula
1407b8737614SUday Bondhugula // Create a clone of the op with the proper operands and return types.
14089db53a18SRiver Riddle // TODO: The following assumes there is always an op with a fixed
1409b8737614SUday Bondhugula // name that works both in scalar mode and vector mode.
14109db53a18SRiver Riddle // TODO: Is it worth considering an Operation.clone operation which
1411b8737614SUday Bondhugula // changes the type so we can promote an Operation with less boilerplate?
141214ecafd0SChia-hung Duan Operation *vecOp =
141314ecafd0SChia-hung Duan state.builder.create(op->getLoc(), op->getName().getIdentifier(),
141414ecafd0SChia-hung Duan vectorOperands, vectorTypes, op->getAttrs());
141596891f04SDiego Caballero state.registerOpVectorReplacement(op, vecOp);
141696891f04SDiego Caballero return vecOp;
1417b8737614SUday Bondhugula }
1418b8737614SUday Bondhugula
141996891f04SDiego Caballero /// Vectorizes a yield operation by widening its types. The builder's insertion
142096891f04SDiego Caballero /// point is set after the vectorized parent op to continue vectorizing the
1421d80b04abSSergei Grechanik /// operations after the parent op. When vectorizing a reduction loop a mask may
1422d80b04abSSergei Grechanik /// be used to prevent adding garbage values to the accumulator.
vectorizeAffineYieldOp(AffineYieldOp yieldOp,VectorizationState & state)142396891f04SDiego Caballero static Operation *vectorizeAffineYieldOp(AffineYieldOp yieldOp,
142496891f04SDiego Caballero VectorizationState &state) {
142596891f04SDiego Caballero Operation *newYieldOp = widenOp(yieldOp, state);
142696891f04SDiego Caballero Operation *newParentOp = state.builder.getInsertionBlock()->getParentOp();
1427d80b04abSSergei Grechanik
1428d80b04abSSergei Grechanik // If there is a mask for this loop then we must prevent garbage values from
1429d80b04abSSergei Grechanik // being added to the accumulator by inserting `select` operations, for
1430d80b04abSSergei Grechanik // example:
1431d80b04abSSergei Grechanik //
14325bd4bcfcSAmy Zhuang // %val_masked = select %mask, %val, %neutralCst : vector<128xi1>,
14335bd4bcfcSAmy Zhuang // vector<128xf32>
14345bd4bcfcSAmy Zhuang // %res = arith.addf %acc, %val_masked : vector<128xf32>
14355bd4bcfcSAmy Zhuang // affine.yield %res : vector<128xf32>
1436d80b04abSSergei Grechanik //
1437d80b04abSSergei Grechanik if (Value mask = state.vecLoopToMask.lookup(newParentOp)) {
1438d80b04abSSergei Grechanik state.builder.setInsertionPoint(newYieldOp);
1439d80b04abSSergei Grechanik for (unsigned i = 0; i < newYieldOp->getNumOperands(); ++i) {
14405bd4bcfcSAmy Zhuang SmallVector<Operation *> combinerOps;
14415bd4bcfcSAmy Zhuang Value reducedVal = matchReduction(
14425bd4bcfcSAmy Zhuang cast<AffineForOp>(newParentOp).getRegionIterArgs(), i, combinerOps);
14435bd4bcfcSAmy Zhuang assert(reducedVal && "expect non-null value for parallel reduction loop");
14445bd4bcfcSAmy Zhuang assert(combinerOps.size() == 1 && "expect only one combiner op");
14455bd4bcfcSAmy Zhuang // IterOperands are neutral element vectors.
14465bd4bcfcSAmy Zhuang Value neutralVal = cast<AffineForOp>(newParentOp).getIterOperands()[i];
14475bd4bcfcSAmy Zhuang state.builder.setInsertionPoint(combinerOps.back());
14485bd4bcfcSAmy Zhuang Value maskedReducedVal = state.builder.create<arith::SelectOp>(
14495bd4bcfcSAmy Zhuang reducedVal.getLoc(), mask, reducedVal, neutralVal);
1450d80b04abSSergei Grechanik LLVM_DEBUG(
14515bd4bcfcSAmy Zhuang dbgs() << "\n[early-vect]+++++ masking an input to a binary op that"
14525bd4bcfcSAmy Zhuang "produces value for a yield Op: "
14535bd4bcfcSAmy Zhuang << maskedReducedVal);
14545bd4bcfcSAmy Zhuang combinerOps.back()->replaceUsesOfWith(reducedVal, maskedReducedVal);
1455d80b04abSSergei Grechanik }
1456d80b04abSSergei Grechanik }
1457d80b04abSSergei Grechanik
145896891f04SDiego Caballero state.builder.setInsertionPointAfter(newParentOp);
145996891f04SDiego Caballero return newYieldOp;
1460b8737614SUday Bondhugula }
1461b8737614SUday Bondhugula
146296891f04SDiego Caballero /// Encodes Operation-specific behavior for vectorization. In general we
146396891f04SDiego Caballero /// assume that all operands of an op must be vectorized but this is not
146496891f04SDiego Caballero /// always true. In the future, it would be nice to have a trait that
146596891f04SDiego Caballero /// describes how a particular operation vectorizes. For now we implement the
146696891f04SDiego Caballero /// case distinction here. Returns a vectorized form of an operation or
146796891f04SDiego Caballero /// nullptr if vectorization fails.
146896891f04SDiego Caballero // TODO: consider adding a trait to Op to describe how it gets vectorized.
146996891f04SDiego Caballero // Maybe some Ops are not vectorizable or require some tricky logic, we cannot
147096891f04SDiego Caballero // do one-off logic here; ideally it would be TableGen'd.
vectorizeOneOperation(Operation * op,VectorizationState & state)147196891f04SDiego Caballero static Operation *vectorizeOneOperation(Operation *op,
147296891f04SDiego Caballero VectorizationState &state) {
147396891f04SDiego Caballero // Sanity checks.
147496891f04SDiego Caballero assert(!isa<vector::TransferReadOp>(op) &&
147596891f04SDiego Caballero "vector.transfer_read cannot be further vectorized");
147696891f04SDiego Caballero assert(!isa<vector::TransferWriteOp>(op) &&
147796891f04SDiego Caballero "vector.transfer_write cannot be further vectorized");
147896891f04SDiego Caballero
147996891f04SDiego Caballero if (auto loadOp = dyn_cast<AffineLoadOp>(op))
148096891f04SDiego Caballero return vectorizeAffineLoad(loadOp, state);
148196891f04SDiego Caballero if (auto storeOp = dyn_cast<AffineStoreOp>(op))
148296891f04SDiego Caballero return vectorizeAffineStore(storeOp, state);
148396891f04SDiego Caballero if (auto forOp = dyn_cast<AffineForOp>(op))
148496891f04SDiego Caballero return vectorizeAffineForOp(forOp, state);
148596891f04SDiego Caballero if (auto yieldOp = dyn_cast<AffineYieldOp>(op))
148696891f04SDiego Caballero return vectorizeAffineYieldOp(yieldOp, state);
1487a54f4eaeSMogball if (auto constant = dyn_cast<arith::ConstantOp>(op))
148896891f04SDiego Caballero return vectorizeConstant(constant, state);
148996891f04SDiego Caballero
149096891f04SDiego Caballero // Other ops with regions are not supported.
149196891f04SDiego Caballero if (op->getNumRegions() != 0)
149296891f04SDiego Caballero return nullptr;
149396891f04SDiego Caballero
149496891f04SDiego Caballero return widenOp(op, state);
1495b8737614SUday Bondhugula }
1496b8737614SUday Bondhugula
149714d0735dSDiego Caballero /// Recursive implementation to convert all the nested loops in 'match' to a 2D
149814d0735dSDiego Caballero /// vector container that preserves the relative nesting level of each loop with
149914d0735dSDiego Caballero /// respect to the others in 'match'. 'currentLevel' is the nesting level that
150014d0735dSDiego Caballero /// will be assigned to the loop in the current 'match'.
150114d0735dSDiego Caballero static void
getMatchedAffineLoopsRec(NestedMatch match,unsigned currentLevel,std::vector<SmallVector<AffineForOp,2>> & loops)150214d0735dSDiego Caballero getMatchedAffineLoopsRec(NestedMatch match, unsigned currentLevel,
150314d0735dSDiego Caballero std::vector<SmallVector<AffineForOp, 2>> &loops) {
150414d0735dSDiego Caballero // Add a new empty level to the output if it doesn't exist already.
150514d0735dSDiego Caballero assert(currentLevel <= loops.size() && "Unexpected currentLevel");
150614d0735dSDiego Caballero if (currentLevel == loops.size())
1507e5639b3fSMehdi Amini loops.emplace_back();
150814d0735dSDiego Caballero
150914d0735dSDiego Caballero // Add current match and recursively visit its children.
151014d0735dSDiego Caballero loops[currentLevel].push_back(cast<AffineForOp>(match.getMatchedOperation()));
151114d0735dSDiego Caballero for (auto childMatch : match.getMatchedChildren()) {
151214d0735dSDiego Caballero getMatchedAffineLoopsRec(childMatch, currentLevel + 1, loops);
151314d0735dSDiego Caballero }
151414d0735dSDiego Caballero }
151514d0735dSDiego Caballero
151614d0735dSDiego Caballero /// Converts all the nested loops in 'match' to a 2D vector container that
151714d0735dSDiego Caballero /// preserves the relative nesting level of each loop with respect to the others
151814d0735dSDiego Caballero /// in 'match'. This means that every loop in 'loops[i]' will have a parent loop
151914d0735dSDiego Caballero /// in 'loops[i-1]'. A loop in 'loops[i]' may or may not have a child loop in
152014d0735dSDiego Caballero /// 'loops[i+1]'.
152114d0735dSDiego Caballero static void
getMatchedAffineLoops(NestedMatch match,std::vector<SmallVector<AffineForOp,2>> & loops)152214d0735dSDiego Caballero getMatchedAffineLoops(NestedMatch match,
152314d0735dSDiego Caballero std::vector<SmallVector<AffineForOp, 2>> &loops) {
152414d0735dSDiego Caballero getMatchedAffineLoopsRec(match, /*currLoopDepth=*/0, loops);
152514d0735dSDiego Caballero }
152614d0735dSDiego Caballero
152714d0735dSDiego Caballero /// Internal implementation to vectorize affine loops from a single loop nest
152814d0735dSDiego Caballero /// using an n-D vectorization strategy.
152914d0735dSDiego Caballero static LogicalResult
vectorizeLoopNest(std::vector<SmallVector<AffineForOp,2>> & loops,const VectorizationStrategy & strategy)153014d0735dSDiego Caballero vectorizeLoopNest(std::vector<SmallVector<AffineForOp, 2>> &loops,
153114d0735dSDiego Caballero const VectorizationStrategy &strategy) {
153214d0735dSDiego Caballero assert(loops[0].size() == 1 && "Expected single root loop");
153314d0735dSDiego Caballero AffineForOp rootLoop = loops[0][0];
153496891f04SDiego Caballero VectorizationState state(rootLoop.getContext());
153596891f04SDiego Caballero state.builder.setInsertionPointAfter(rootLoop);
153614d0735dSDiego Caballero state.strategy = &strategy;
1537b8737614SUday Bondhugula
1538b8737614SUday Bondhugula // Since patterns are recursive, they can very well intersect.
1539b8737614SUday Bondhugula // Since we do not want a fully greedy strategy in general, we decouple
1540b8737614SUday Bondhugula // pattern matching, from profitability analysis, from application.
1541b8737614SUday Bondhugula // As a consequence we must check that each root pattern is still
1542b8737614SUday Bondhugula // vectorizable. If a pattern is not vectorizable anymore, we just skip it.
15439db53a18SRiver Riddle // TODO: implement a non-greedy profitability analysis that keeps only
1544b8737614SUday Bondhugula // non-intersecting patterns.
154514d0735dSDiego Caballero if (!isVectorizableLoopBody(rootLoop, vectorTransferPattern())) {
1546b8737614SUday Bondhugula LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ loop is not vectorizable");
1547b8737614SUday Bondhugula return failure();
1548b8737614SUday Bondhugula }
1549b8737614SUday Bondhugula
1550b8737614SUday Bondhugula //////////////////////////////////////////////////////////////////////////////
155196891f04SDiego Caballero // Vectorize the scalar loop nest following a topological order. A new vector
155296891f04SDiego Caballero // loop nest with the vectorized operations is created along the process. If
155396891f04SDiego Caballero // vectorization succeeds, the scalar loop nest is erased. If vectorization
155496891f04SDiego Caballero // fails, the vector loop nest is erased and the scalar loop nest is not
155596891f04SDiego Caballero // modified.
1556b8737614SUday Bondhugula //////////////////////////////////////////////////////////////////////////////
155796891f04SDiego Caballero
155896891f04SDiego Caballero auto opVecResult = rootLoop.walk<WalkOrder::PreOrder>([&](Operation *op) {
155996891f04SDiego Caballero LLVM_DEBUG(dbgs() << "[early-vect]+++++ Vectorizing: " << *op);
156096891f04SDiego Caballero Operation *vectorOp = vectorizeOneOperation(op, state);
1561d80b04abSSergei Grechanik if (!vectorOp) {
1562d80b04abSSergei Grechanik LLVM_DEBUG(
1563d80b04abSSergei Grechanik dbgs() << "[early-vect]+++++ failed vectorizing the operation: "
1564d80b04abSSergei Grechanik << *op << "\n");
156596891f04SDiego Caballero return WalkResult::interrupt();
1566d80b04abSSergei Grechanik }
156796891f04SDiego Caballero
156896891f04SDiego Caballero return WalkResult::advance();
156996891f04SDiego Caballero });
157096891f04SDiego Caballero
157196891f04SDiego Caballero if (opVecResult.wasInterrupted()) {
157296891f04SDiego Caballero LLVM_DEBUG(dbgs() << "[early-vect]+++++ failed vectorization for: "
157396891f04SDiego Caballero << rootLoop << "\n");
157496891f04SDiego Caballero // Erase vector loop nest if it was created.
157596891f04SDiego Caballero auto vecRootLoopIt = state.opVectorReplacement.find(rootLoop);
157696891f04SDiego Caballero if (vecRootLoopIt != state.opVectorReplacement.end())
157796891f04SDiego Caballero eraseLoopNest(cast<AffineForOp>(vecRootLoopIt->second));
157896891f04SDiego Caballero
157996891f04SDiego Caballero return failure();
1580b8737614SUday Bondhugula }
1581b8737614SUday Bondhugula
1582d80b04abSSergei Grechanik // Replace results of reduction loops with the scalar values computed using
1583d80b04abSSergei Grechanik // `vector.reduce` or similar ops.
1584d80b04abSSergei Grechanik for (auto resPair : state.loopResultScalarReplacement)
1585d80b04abSSergei Grechanik resPair.first.replaceAllUsesWith(resPair.second);
1586d80b04abSSergei Grechanik
158796891f04SDiego Caballero assert(state.opVectorReplacement.count(rootLoop) == 1 &&
158896891f04SDiego Caballero "Expected vector replacement for loop nest");
1589b8737614SUday Bondhugula LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ success vectorizing pattern");
159096891f04SDiego Caballero LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorization result:\n"
159196891f04SDiego Caballero << *state.opVectorReplacement[rootLoop]);
159296891f04SDiego Caballero
159396891f04SDiego Caballero // Finish this vectorization pattern.
159496891f04SDiego Caballero state.finishVectorizationPattern(rootLoop);
159596891f04SDiego Caballero return success();
1596b8737614SUday Bondhugula }
1597b8737614SUday Bondhugula
159896891f04SDiego Caballero /// Extracts the matched loops and vectorizes them following a topological
159996891f04SDiego Caballero /// order. A new vector loop nest will be created if vectorization succeeds. The
160096891f04SDiego Caballero /// original loop nest won't be modified in any case.
vectorizeRootMatch(NestedMatch m,const VectorizationStrategy & strategy)160114d0735dSDiego Caballero static LogicalResult vectorizeRootMatch(NestedMatch m,
160214d0735dSDiego Caballero const VectorizationStrategy &strategy) {
160314d0735dSDiego Caballero std::vector<SmallVector<AffineForOp, 2>> loopsToVectorize;
160414d0735dSDiego Caballero getMatchedAffineLoops(m, loopsToVectorize);
160514d0735dSDiego Caballero return vectorizeLoopNest(loopsToVectorize, strategy);
1606b8737614SUday Bondhugula }
1607b8737614SUday Bondhugula
1608ed193bceSDiego Caballero /// Traverses all the loop matches and classifies them into intersection
1609ed193bceSDiego Caballero /// buckets. Two matches intersect if any of them encloses the other one. A
1610ed193bceSDiego Caballero /// match intersects with a bucket if the match intersects with the root
1611ed193bceSDiego Caballero /// (outermost) loop in that bucket.
computeIntersectionBuckets(ArrayRef<NestedMatch> matches,std::vector<SmallVector<NestedMatch,8>> & intersectionBuckets)1612ed193bceSDiego Caballero static void computeIntersectionBuckets(
1613ed193bceSDiego Caballero ArrayRef<NestedMatch> matches,
1614ed193bceSDiego Caballero std::vector<SmallVector<NestedMatch, 8>> &intersectionBuckets) {
1615ed193bceSDiego Caballero assert(intersectionBuckets.empty() && "Expected empty output");
1616ed193bceSDiego Caballero // Keeps track of the root (outermost) loop of each bucket.
1617ed193bceSDiego Caballero SmallVector<AffineForOp, 8> bucketRoots;
1618ed193bceSDiego Caballero
1619ed193bceSDiego Caballero for (const NestedMatch &match : matches) {
1620ed193bceSDiego Caballero AffineForOp matchRoot = cast<AffineForOp>(match.getMatchedOperation());
1621ed193bceSDiego Caballero bool intersects = false;
1622ed193bceSDiego Caballero for (int i = 0, end = intersectionBuckets.size(); i < end; ++i) {
1623ed193bceSDiego Caballero AffineForOp bucketRoot = bucketRoots[i];
1624ed193bceSDiego Caballero // Add match to the bucket if the bucket root encloses the match root.
1625ed193bceSDiego Caballero if (bucketRoot->isAncestor(matchRoot)) {
1626ed193bceSDiego Caballero intersectionBuckets[i].push_back(match);
1627ed193bceSDiego Caballero intersects = true;
1628ed193bceSDiego Caballero break;
1629ed193bceSDiego Caballero }
1630ed193bceSDiego Caballero // Add match to the bucket if the match root encloses the bucket root. The
1631ed193bceSDiego Caballero // match root becomes the new bucket root.
1632ed193bceSDiego Caballero if (matchRoot->isAncestor(bucketRoot)) {
1633ed193bceSDiego Caballero bucketRoots[i] = matchRoot;
1634ed193bceSDiego Caballero intersectionBuckets[i].push_back(match);
1635ed193bceSDiego Caballero intersects = true;
1636ed193bceSDiego Caballero break;
1637ed193bceSDiego Caballero }
1638ed193bceSDiego Caballero }
1639ed193bceSDiego Caballero
1640ed193bceSDiego Caballero // Match doesn't intersect with any existing bucket. Create a new bucket for
1641ed193bceSDiego Caballero // it.
1642ed193bceSDiego Caballero if (!intersects) {
1643ed193bceSDiego Caballero bucketRoots.push_back(matchRoot);
1644e5639b3fSMehdi Amini intersectionBuckets.emplace_back();
1645ed193bceSDiego Caballero intersectionBuckets.back().push_back(match);
1646ed193bceSDiego Caballero }
1647ed193bceSDiego Caballero }
1648ed193bceSDiego Caballero }
1649ed193bceSDiego Caballero
165014d0735dSDiego Caballero /// Internal implementation to vectorize affine loops in 'loops' using the n-D
165114d0735dSDiego Caballero /// vectorization factors in 'vectorSizes'. By default, each vectorization
165214d0735dSDiego Caballero /// factor is applied inner-to-outer to the loops of each loop nest.
165314d0735dSDiego Caballero /// 'fastestVaryingPattern' can be optionally used to provide a different loop
1654d80b04abSSergei Grechanik /// vectorization order. `reductionLoops` can be provided to specify loops which
1655d80b04abSSergei Grechanik /// can be vectorized along the reduction dimension.
vectorizeLoops(Operation * parentOp,DenseSet<Operation * > & loops,ArrayRef<int64_t> vectorSizes,ArrayRef<int64_t> fastestVaryingPattern,const ReductionLoopMap & reductionLoops)165614d0735dSDiego Caballero static void vectorizeLoops(Operation *parentOp, DenseSet<Operation *> &loops,
16573fff5acdSDiego Caballero ArrayRef<int64_t> vectorSizes,
1658d80b04abSSergei Grechanik ArrayRef<int64_t> fastestVaryingPattern,
1659d80b04abSSergei Grechanik const ReductionLoopMap &reductionLoops) {
1660d80b04abSSergei Grechanik assert((reductionLoops.empty() || vectorSizes.size() == 1) &&
1661d80b04abSSergei Grechanik "Vectorizing reductions is supported only for 1-D vectors");
1662d80b04abSSergei Grechanik
1663ed193bceSDiego Caballero // Compute 1-D, 2-D or 3-D loop pattern to be matched on the target loops.
1664ed193bceSDiego Caballero Optional<NestedPattern> pattern =
1665ed193bceSDiego Caballero makePattern(loops, vectorSizes.size(), fastestVaryingPattern);
1666*037f0995SKazu Hirata if (!pattern) {
1667ed193bceSDiego Caballero LLVM_DEBUG(dbgs() << "\n[early-vect] pattern couldn't be computed\n");
1668ed193bceSDiego Caballero return;
1669ed193bceSDiego Caballero }
1670ed193bceSDiego Caballero
1671b8737614SUday Bondhugula LLVM_DEBUG(dbgs() << "\n******************************************");
1672b8737614SUday Bondhugula LLVM_DEBUG(dbgs() << "\n******************************************");
16733fff5acdSDiego Caballero LLVM_DEBUG(dbgs() << "\n[early-vect] new pattern on parent op\n");
1674ed193bceSDiego Caballero LLVM_DEBUG(dbgs() << *parentOp << "\n");
16753fff5acdSDiego Caballero
1676ed193bceSDiego Caballero unsigned patternDepth = pattern->getDepth();
1677b8737614SUday Bondhugula
1678ed193bceSDiego Caballero // Compute all the pattern matches and classify them into buckets of
1679ed193bceSDiego Caballero // intersecting matches.
1680ed193bceSDiego Caballero SmallVector<NestedMatch, 32> allMatches;
1681ed193bceSDiego Caballero pattern->match(parentOp, &allMatches);
1682ed193bceSDiego Caballero std::vector<SmallVector<NestedMatch, 8>> intersectionBuckets;
1683ed193bceSDiego Caballero computeIntersectionBuckets(allMatches, intersectionBuckets);
1684ed193bceSDiego Caballero
1685ed193bceSDiego Caballero // Iterate over all buckets and vectorize the matches eagerly. We can only
1686ed193bceSDiego Caballero // vectorize one match from each bucket since all the matches within a bucket
1687ed193bceSDiego Caballero // intersect.
1688ed193bceSDiego Caballero for (auto &intersectingMatches : intersectionBuckets) {
1689ed193bceSDiego Caballero for (NestedMatch &match : intersectingMatches) {
1690b8737614SUday Bondhugula VectorizationStrategy strategy;
16919db53a18SRiver Riddle // TODO: depending on profitability, elect to reduce the vector size.
1692b8737614SUday Bondhugula strategy.vectorSizes.assign(vectorSizes.begin(), vectorSizes.end());
1693d80b04abSSergei Grechanik strategy.reductionLoops = reductionLoops;
1694ed193bceSDiego Caballero if (failed(analyzeProfitability(match.getMatchedChildren(), 1,
1695ed193bceSDiego Caballero patternDepth, &strategy))) {
1696b8737614SUday Bondhugula continue;
1697b8737614SUday Bondhugula }
1698ed193bceSDiego Caballero vectorizeLoopIfProfitable(match.getMatchedOperation(), 0, patternDepth,
1699b8737614SUday Bondhugula &strategy);
1700ed193bceSDiego Caballero // Vectorize match. Skip the rest of intersecting matches in the bucket if
1701ed193bceSDiego Caballero // vectorization succeeded.
1702ed193bceSDiego Caballero // TODO: if pattern does not apply, report it; alter the cost/benefit.
17039db53a18SRiver Riddle // TODO: some diagnostics if failure to vectorize occurs.
1704ed193bceSDiego Caballero if (succeeded(vectorizeRootMatch(match, strategy)))
1705ed193bceSDiego Caballero break;
1706b8737614SUday Bondhugula }
1707b8737614SUday Bondhugula }
1708ed193bceSDiego Caballero
1709b8737614SUday Bondhugula LLVM_DEBUG(dbgs() << "\n");
1710b8737614SUday Bondhugula }
1711b8737614SUday Bondhugula
171258ceae95SRiver Riddle std::unique_ptr<OperationPass<func::FuncOp>>
createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize)17133fff5acdSDiego Caballero createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize) {
1714b8737614SUday Bondhugula return std::make_unique<Vectorize>(virtualVectorSize);
1715b8737614SUday Bondhugula }
createSuperVectorizePass()171658ceae95SRiver Riddle std::unique_ptr<OperationPass<func::FuncOp>> createSuperVectorizePass() {
1717e3d834a5SRiver Riddle return std::make_unique<Vectorize>();
1718e3d834a5SRiver Riddle }
17193fff5acdSDiego Caballero
172014d0735dSDiego Caballero /// Applies vectorization to the current function by searching over a bunch of
172114d0735dSDiego Caballero /// predetermined patterns.
runOnOperation()172241574554SRiver Riddle void Vectorize::runOnOperation() {
172358ceae95SRiver Riddle func::FuncOp f = getOperation();
172414d0735dSDiego Caballero if (!fastestVaryingPattern.empty() &&
172514d0735dSDiego Caballero fastestVaryingPattern.size() != vectorSizes.size()) {
172614d0735dSDiego Caballero f.emitRemark("Fastest varying pattern specified with different size than "
172714d0735dSDiego Caballero "the vector size.");
172814d0735dSDiego Caballero return signalPassFailure();
172914d0735dSDiego Caballero }
173014d0735dSDiego Caballero
1731d80b04abSSergei Grechanik if (vectorizeReductions && vectorSizes.size() != 1) {
1732d80b04abSSergei Grechanik f.emitError("Vectorizing reductions is supported only for 1-D vectors.");
1733d80b04abSSergei Grechanik return signalPassFailure();
1734d80b04abSSergei Grechanik }
1735d80b04abSSergei Grechanik
173614d0735dSDiego Caballero DenseSet<Operation *> parallelLoops;
1737d80b04abSSergei Grechanik ReductionLoopMap reductionLoops;
1738d80b04abSSergei Grechanik
1739d80b04abSSergei Grechanik // If 'vectorize-reduction=true' is provided, we also populate the
1740d80b04abSSergei Grechanik // `reductionLoops` map.
1741d80b04abSSergei Grechanik if (vectorizeReductions) {
1742d80b04abSSergei Grechanik f.walk([¶llelLoops, &reductionLoops](AffineForOp loop) {
1743d80b04abSSergei Grechanik SmallVector<LoopReduction, 2> reductions;
1744d80b04abSSergei Grechanik if (isLoopParallel(loop, &reductions)) {
1745d80b04abSSergei Grechanik parallelLoops.insert(loop);
1746d80b04abSSergei Grechanik // If it's not a reduction loop, adding it to the map is not necessary.
1747d80b04abSSergei Grechanik if (!reductions.empty())
1748d80b04abSSergei Grechanik reductionLoops[loop] = reductions;
1749d80b04abSSergei Grechanik }
1750d80b04abSSergei Grechanik });
1751d80b04abSSergei Grechanik } else {
175214d0735dSDiego Caballero f.walk([¶llelLoops](AffineForOp loop) {
175314d0735dSDiego Caballero if (isLoopParallel(loop))
175414d0735dSDiego Caballero parallelLoops.insert(loop);
175514d0735dSDiego Caballero });
1756d80b04abSSergei Grechanik }
175714d0735dSDiego Caballero
175814d0735dSDiego Caballero // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
175914d0735dSDiego Caballero NestedPatternContext mlContext;
1760d80b04abSSergei Grechanik vectorizeLoops(f, parallelLoops, vectorSizes, fastestVaryingPattern,
1761d80b04abSSergei Grechanik reductionLoops);
176214d0735dSDiego Caballero }
176314d0735dSDiego Caballero
176414d0735dSDiego Caballero /// Verify that affine loops in 'loops' meet the nesting criteria expected by
176514d0735dSDiego Caballero /// SuperVectorizer:
176614d0735dSDiego Caballero /// * There must be at least one loop.
176714d0735dSDiego Caballero /// * There must be a single root loop (nesting level 0).
176814d0735dSDiego Caballero /// * Each loop at a given nesting level must be nested in a loop from a
176914d0735dSDiego Caballero /// previous nesting level.
1770f9f6b4f3SDiego Caballero static LogicalResult
verifyLoopNesting(const std::vector<SmallVector<AffineForOp,2>> & loops)177114d0735dSDiego Caballero verifyLoopNesting(const std::vector<SmallVector<AffineForOp, 2>> &loops) {
1772f9f6b4f3SDiego Caballero // Expected at least one loop.
1773f9f6b4f3SDiego Caballero if (loops.empty())
1774f9f6b4f3SDiego Caballero return failure();
1775f9f6b4f3SDiego Caballero
1776f9f6b4f3SDiego Caballero // Expected only one root loop.
1777f9f6b4f3SDiego Caballero if (loops[0].size() != 1)
1778f9f6b4f3SDiego Caballero return failure();
177914d0735dSDiego Caballero
178014d0735dSDiego Caballero // Traverse loops outer-to-inner to check some invariants.
178114d0735dSDiego Caballero for (int i = 1, end = loops.size(); i < end; ++i) {
178214d0735dSDiego Caballero for (AffineForOp loop : loops[i]) {
178314d0735dSDiego Caballero // Check that each loop at this level is nested in one of the loops from
178414d0735dSDiego Caballero // the previous level.
1785f9f6b4f3SDiego Caballero if (none_of(loops[i - 1], [&](AffineForOp maybeParent) {
1786f9f6b4f3SDiego Caballero return maybeParent->isProperAncestor(loop);
1787f9f6b4f3SDiego Caballero }))
1788f9f6b4f3SDiego Caballero return failure();
178914d0735dSDiego Caballero
179014d0735dSDiego Caballero // Check that each loop at this level is not nested in another loop from
179114d0735dSDiego Caballero // this level.
1792f9f6b4f3SDiego Caballero for (AffineForOp sibling : loops[i]) {
1793f9f6b4f3SDiego Caballero if (sibling->isProperAncestor(loop))
1794f9f6b4f3SDiego Caballero return failure();
179514d0735dSDiego Caballero }
179614d0735dSDiego Caballero }
179714d0735dSDiego Caballero }
179814d0735dSDiego Caballero
1799f9f6b4f3SDiego Caballero return success();
1800f9f6b4f3SDiego Caballero }
1801f9f6b4f3SDiego Caballero
180214d0735dSDiego Caballero namespace mlir {
180314d0735dSDiego Caballero
180414d0735dSDiego Caballero /// External utility to vectorize affine loops in 'loops' using the n-D
180514d0735dSDiego Caballero /// vectorization factors in 'vectorSizes'. By default, each vectorization
180614d0735dSDiego Caballero /// factor is applied inner-to-outer to the loops of each loop nest.
180714d0735dSDiego Caballero /// 'fastestVaryingPattern' can be optionally used to provide a different loop
180814d0735dSDiego Caballero /// vectorization order.
1809d80b04abSSergei Grechanik /// If `reductionLoops` is not empty, the given reduction loops may be
1810d80b04abSSergei Grechanik /// vectorized along the reduction dimension.
1811d80b04abSSergei Grechanik /// TODO: Vectorizing reductions is supported only for 1-D vectorization.
vectorizeAffineLoops(Operation * parentOp,DenseSet<Operation * > & loops,ArrayRef<int64_t> vectorSizes,ArrayRef<int64_t> fastestVaryingPattern,const ReductionLoopMap & reductionLoops)181214d0735dSDiego Caballero void vectorizeAffineLoops(Operation *parentOp, DenseSet<Operation *> &loops,
181314d0735dSDiego Caballero ArrayRef<int64_t> vectorSizes,
1814d80b04abSSergei Grechanik ArrayRef<int64_t> fastestVaryingPattern,
1815d80b04abSSergei Grechanik const ReductionLoopMap &reductionLoops) {
181614d0735dSDiego Caballero // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
181714d0735dSDiego Caballero NestedPatternContext mlContext;
1818d80b04abSSergei Grechanik vectorizeLoops(parentOp, loops, vectorSizes, fastestVaryingPattern,
1819d80b04abSSergei Grechanik reductionLoops);
182014d0735dSDiego Caballero }
182114d0735dSDiego Caballero
182214d0735dSDiego Caballero /// External utility to vectorize affine loops from a single loop nest using an
182314d0735dSDiego Caballero /// n-D vectorization strategy (see doc in VectorizationStrategy definition).
182414d0735dSDiego Caballero /// Loops are provided in a 2D vector container. The first dimension represents
182514d0735dSDiego Caballero /// the nesting level relative to the loops to be vectorized. The second
182614d0735dSDiego Caballero /// dimension contains the loops. This means that:
182714d0735dSDiego Caballero /// a) every loop in 'loops[i]' must have a parent loop in 'loops[i-1]',
182814d0735dSDiego Caballero /// b) a loop in 'loops[i]' may or may not have a child loop in 'loops[i+1]'.
182914d0735dSDiego Caballero ///
183014d0735dSDiego Caballero /// For example, for the following loop nest:
183114d0735dSDiego Caballero ///
183214d0735dSDiego Caballero /// func @vec2d(%in0: memref<64x128x512xf32>, %in1: memref<64x128x128xf32>,
183314d0735dSDiego Caballero /// %out0: memref<64x128x512xf32>,
183414d0735dSDiego Caballero /// %out1: memref<64x128x128xf32>) {
183514d0735dSDiego Caballero /// affine.for %i0 = 0 to 64 {
183614d0735dSDiego Caballero /// affine.for %i1 = 0 to 128 {
183714d0735dSDiego Caballero /// affine.for %i2 = 0 to 512 {
183814d0735dSDiego Caballero /// %ld = affine.load %in0[%i0, %i1, %i2] : memref<64x128x512xf32>
183914d0735dSDiego Caballero /// affine.store %ld, %out0[%i0, %i1, %i2] : memref<64x128x512xf32>
184014d0735dSDiego Caballero /// }
184114d0735dSDiego Caballero /// affine.for %i3 = 0 to 128 {
184214d0735dSDiego Caballero /// %ld = affine.load %in1[%i0, %i1, %i3] : memref<64x128x128xf32>
184314d0735dSDiego Caballero /// affine.store %ld, %out1[%i0, %i1, %i3] : memref<64x128x128xf32>
184414d0735dSDiego Caballero /// }
184514d0735dSDiego Caballero /// }
184614d0735dSDiego Caballero /// }
184714d0735dSDiego Caballero /// return
184814d0735dSDiego Caballero /// }
184914d0735dSDiego Caballero ///
185014d0735dSDiego Caballero /// loops = {{%i0}, {%i2, %i3}}, to vectorize the outermost and the two
185114d0735dSDiego Caballero /// innermost loops;
185214d0735dSDiego Caballero /// loops = {{%i1}, {%i2, %i3}}, to vectorize the middle and the two innermost
185314d0735dSDiego Caballero /// loops;
185414d0735dSDiego Caballero /// loops = {{%i2}}, to vectorize only the first innermost loop;
185514d0735dSDiego Caballero /// loops = {{%i3}}, to vectorize only the second innermost loop;
185614d0735dSDiego Caballero /// loops = {{%i1}}, to vectorize only the middle loop.
185714d0735dSDiego Caballero LogicalResult
vectorizeAffineLoopNest(std::vector<SmallVector<AffineForOp,2>> & loops,const VectorizationStrategy & strategy)185814d0735dSDiego Caballero vectorizeAffineLoopNest(std::vector<SmallVector<AffineForOp, 2>> &loops,
185914d0735dSDiego Caballero const VectorizationStrategy &strategy) {
186014d0735dSDiego Caballero // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
186114d0735dSDiego Caballero NestedPatternContext mlContext;
1862f9f6b4f3SDiego Caballero if (failed(verifyLoopNesting(loops)))
1863f9f6b4f3SDiego Caballero return failure();
186414d0735dSDiego Caballero return vectorizeLoopNest(loops, strategy);
186514d0735dSDiego Caballero }
186614d0735dSDiego Caballero
186758ceae95SRiver Riddle std::unique_ptr<OperationPass<func::FuncOp>>
createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize)186814d0735dSDiego Caballero createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize) {
186914d0735dSDiego Caballero return std::make_unique<Vectorize>(virtualVectorSize);
187014d0735dSDiego Caballero }
createSuperVectorizePass()187158ceae95SRiver Riddle std::unique_ptr<OperationPass<func::FuncOp>> createSuperVectorizePass() {
187214d0735dSDiego Caballero return std::make_unique<Vectorize>();
187314d0735dSDiego Caballero }
187414d0735dSDiego Caballero
18753fff5acdSDiego Caballero } // namespace mlir
1876