xref: /llvm-project/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp (revision 56d6b567394abfc07ea4d3c92fa534bbf58e1942)
1b8737614SUday Bondhugula //===- SuperVectorize.cpp - Vectorize Pass Impl ---------------------------===//
2b8737614SUday Bondhugula //
3b8737614SUday Bondhugula // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4b8737614SUday Bondhugula // See https://llvm.org/LICENSE.txt for license information.
5b8737614SUday Bondhugula // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6b8737614SUday Bondhugula //
7b8737614SUday Bondhugula //===----------------------------------------------------------------------===//
8b8737614SUday Bondhugula //
9b8737614SUday Bondhugula // This file implements vectorization of loops, operations and data types to
10b8737614SUday Bondhugula // a target-independent, n-D super-vector abstraction.
11b8737614SUday Bondhugula //
12b8737614SUday Bondhugula //===----------------------------------------------------------------------===//
13b8737614SUday Bondhugula 
1467d0d7acSMichele Scuttari #include "mlir/Dialect/Affine/Passes.h"
1567d0d7acSMichele Scuttari 
165bd4bcfcSAmy Zhuang #include "mlir/Analysis/SliceAnalysis.h"
17755dc07dSRiver Riddle #include "mlir/Dialect/Affine/Analysis/AffineAnalysis.h"
18755dc07dSRiver Riddle #include "mlir/Dialect/Affine/Analysis/LoopAnalysis.h"
19755dc07dSRiver Riddle #include "mlir/Dialect/Affine/Analysis/NestedMatcher.h"
20b8737614SUday Bondhugula #include "mlir/Dialect/Affine/IR/AffineOps.h"
213fff5acdSDiego Caballero #include "mlir/Dialect/Affine/Utils.h"
22abc362a1SJakub Kuderski #include "mlir/Dialect/Arith/IR/Arith.h"
2367d0d7acSMichele Scuttari #include "mlir/Dialect/Func/IR/FuncOps.h"
2499ef9eebSMatthias Springer #include "mlir/Dialect/Vector/IR/VectorOps.h"
2599ef9eebSMatthias Springer #include "mlir/Dialect/Vector/Utils/VectorUtils.h"
264d67b278SJeff Niu #include "mlir/IR/IRMapping.h"
2767d0d7acSMichele Scuttari #include "mlir/Pass/Pass.h"
28b8737614SUday Bondhugula #include "mlir/Support/LLVM.h"
29d80b04abSSergei Grechanik #include "llvm/ADT/STLExtras.h"
30545fa378SAlex Zinenko #include "llvm/Support/Debug.h"
31a1fe1f5fSKazu Hirata #include <optional>
32b8737614SUday Bondhugula 
3367d0d7acSMichele Scuttari namespace mlir {
344c48f016SMatthias Springer namespace affine {
3567d0d7acSMichele Scuttari #define GEN_PASS_DEF_AFFINEVECTORIZE
3667d0d7acSMichele Scuttari #include "mlir/Dialect/Affine/Passes.h.inc"
374c48f016SMatthias Springer } // namespace affine
3867d0d7acSMichele Scuttari } // namespace mlir
3967d0d7acSMichele Scuttari 
40b8737614SUday Bondhugula using namespace mlir;
414c48f016SMatthias Springer using namespace affine;
4246781630SDiego Caballero using namespace vector;
43b8737614SUday Bondhugula 
44b8737614SUday Bondhugula ///
45b8737614SUday Bondhugula /// Implements a high-level vectorization strategy on a Function.
46b8737614SUday Bondhugula /// The abstraction used is that of super-vectors, which provide a single,
47b8737614SUday Bondhugula /// compact, representation in the vector types, information that is expected
48b8737614SUday Bondhugula /// to reduce the impact of the phase ordering problem
49b8737614SUday Bondhugula ///
50b8737614SUday Bondhugula /// Vector granularity:
51b8737614SUday Bondhugula /// ===================
52b8737614SUday Bondhugula /// This pass is designed to perform vectorization at a super-vector
53b8737614SUday Bondhugula /// granularity. A super-vector is loosely defined as a vector type that is a
54b8737614SUday Bondhugula /// multiple of a "good" vector size so the HW can efficiently implement a set
55b8737614SUday Bondhugula /// of high-level primitives. Multiple is understood along any dimension; e.g.
56b8737614SUday Bondhugula /// both vector<16xf32> and vector<2x8xf32> are valid super-vectors for a
57b8737614SUday Bondhugula /// vector<8xf32> HW vector. Note that a "good vector size so the HW can
58b8737614SUday Bondhugula /// efficiently implement a set of high-level primitives" is not necessarily an
59b8737614SUday Bondhugula /// integer multiple of actual hardware registers. We leave details of this
60b8737614SUday Bondhugula /// distinction unspecified for now.
61b8737614SUday Bondhugula ///
62b8737614SUday Bondhugula /// Some may prefer the terminology a "tile of HW vectors". In this case, one
63b8737614SUday Bondhugula /// should note that super-vectors implement an "always full tile" abstraction.
64b8737614SUday Bondhugula /// They guarantee no partial-tile separation is necessary by relying on a
65b8737614SUday Bondhugula /// high-level copy-reshape abstraction that we call vector.transfer. This
66b8737614SUday Bondhugula /// copy-reshape operations is also responsible for performing layout
67b8737614SUday Bondhugula /// transposition if necessary. In the general case this will require a scoped
68b8737614SUday Bondhugula /// allocation in some notional local memory.
69b8737614SUday Bondhugula ///
70b8737614SUday Bondhugula /// Whatever the mental model one prefers to use for this abstraction, the key
71b8737614SUday Bondhugula /// point is that we burn into a single, compact, representation in the vector
72b8737614SUday Bondhugula /// types, information that is expected to reduce the impact of the phase
73b8737614SUday Bondhugula /// ordering problem. Indeed, a vector type conveys information that:
74b8737614SUday Bondhugula ///   1. the associated loops have dependency semantics that do not prevent
75b8737614SUday Bondhugula ///      vectorization;
76b8737614SUday Bondhugula ///   2. the associate loops have been sliced in chunks of static sizes that are
77b8737614SUday Bondhugula ///      compatible with vector sizes (i.e. similar to unroll-and-jam);
78b8737614SUday Bondhugula ///   3. the inner loops, in the unroll-and-jam analogy of 2, are captured by
79b8737614SUday Bondhugula ///   the
80b8737614SUday Bondhugula ///      vector type and no vectorization hampering transformations can be
81b8737614SUday Bondhugula ///      applied to them anymore;
82b8737614SUday Bondhugula ///   4. the underlying memrefs are accessed in some notional contiguous way
83b8737614SUday Bondhugula ///      that allows loading into vectors with some amount of spatial locality;
84b8737614SUday Bondhugula /// In other words, super-vectorization provides a level of separation of
85b8737614SUday Bondhugula /// concern by way of opacity to subsequent passes. This has the effect of
86b8737614SUday Bondhugula /// encapsulating and propagating vectorization constraints down the list of
87b8737614SUday Bondhugula /// passes until we are ready to lower further.
88b8737614SUday Bondhugula ///
89b8737614SUday Bondhugula /// For a particular target, a notion of minimal n-d vector size will be
90b8737614SUday Bondhugula /// specified and vectorization targets a multiple of those. In the following
91b8737614SUday Bondhugula /// paragraph, let "k ." represent "a multiple of", to be understood as a
92b8737614SUday Bondhugula /// multiple in the same dimension (e.g. vector<16 x k . 128> summarizes
93b8737614SUday Bondhugula /// vector<16 x 128>, vector<16 x 256>, vector<16 x 1024>, etc).
94b8737614SUday Bondhugula ///
95b8737614SUday Bondhugula /// Some non-exhaustive notable super-vector sizes of interest include:
96b8737614SUday Bondhugula ///   - CPU: vector<k . HW_vector_size>,
97b8737614SUday Bondhugula ///          vector<k' . core_count x k . HW_vector_size>,
98b8737614SUday Bondhugula ///          vector<socket_count x k' . core_count x k . HW_vector_size>;
99b8737614SUday Bondhugula ///   - GPU: vector<k . warp_size>,
100b8737614SUday Bondhugula ///          vector<k . warp_size x float2>,
101b8737614SUday Bondhugula ///          vector<k . warp_size x float4>,
102b8737614SUday Bondhugula ///          vector<k . warp_size x 4 x 4x 4> (for tensor_core sizes).
103b8737614SUday Bondhugula ///
104b8737614SUday Bondhugula /// Loops and operations are emitted that operate on those super-vector shapes.
105b8737614SUday Bondhugula /// Subsequent lowering passes will materialize to actual HW vector sizes. These
106b8737614SUday Bondhugula /// passes are expected to be (gradually) more target-specific.
107b8737614SUday Bondhugula ///
108b8737614SUday Bondhugula /// At a high level, a vectorized load in a loop will resemble:
109b8737614SUday Bondhugula /// ```mlir
110b8737614SUday Bondhugula ///   affine.for %i = ? to ? step ? {
111b8737614SUday Bondhugula ///     %v_a = vector.transfer_read A[%i] : memref<?xf32>, vector<128xf32>
112b8737614SUday Bondhugula ///   }
113b8737614SUday Bondhugula /// ```
114b8737614SUday Bondhugula /// It is the responsibility of the implementation of vector.transfer_read to
115b8737614SUday Bondhugula /// materialize vector registers from the original scalar memrefs. A later (more
116b8737614SUday Bondhugula /// target-dependent) lowering pass will materialize to actual HW vector sizes.
117b8737614SUday Bondhugula /// This lowering may be occur at different times:
118b8737614SUday Bondhugula ///   1. at the MLIR level into a combination of loops, unrolling, DmaStartOp +
119b8737614SUday Bondhugula ///      DmaWaitOp + vectorized operations for data transformations and shuffle;
120b8737614SUday Bondhugula ///      thus opening opportunities for unrolling and pipelining. This is an
121b8737614SUday Bondhugula ///      instance of library call "whiteboxing"; or
122b8737614SUday Bondhugula ///   2. later in the a target-specific lowering pass or hand-written library
123b8737614SUday Bondhugula ///      call; achieving full separation of concerns. This is an instance of
124b8737614SUday Bondhugula ///      library call; or
125b8737614SUday Bondhugula ///   3. a mix of both, e.g. based on a model.
126b8737614SUday Bondhugula /// In the future, these operations will expose a contract to constrain the
127b8737614SUday Bondhugula /// search on vectorization patterns and sizes.
128b8737614SUday Bondhugula ///
129b8737614SUday Bondhugula /// Occurrence of super-vectorization in the compiler flow:
130b8737614SUday Bondhugula /// =======================================================
131b8737614SUday Bondhugula /// This is an active area of investigation. We start with 2 remarks to position
132b8737614SUday Bondhugula /// super-vectorization in the context of existing ongoing work: LLVM VPLAN
133b8737614SUday Bondhugula /// and LLVM SLP Vectorizer.
134b8737614SUday Bondhugula ///
135b8737614SUday Bondhugula /// LLVM VPLAN:
136b8737614SUday Bondhugula /// -----------
137b8737614SUday Bondhugula /// The astute reader may have noticed that in the limit, super-vectorization
138b8737614SUday Bondhugula /// can be applied at a similar time and with similar objectives than VPLAN.
139b8737614SUday Bondhugula /// For instance, in the case of a traditional, polyhedral compilation-flow (for
140b8737614SUday Bondhugula /// instance, the PPCG project uses ISL to provide dependence analysis,
141b8737614SUday Bondhugula /// multi-level(scheduling + tiling), lifting footprint to fast memory,
142b8737614SUday Bondhugula /// communication synthesis, mapping, register optimizations) and before
143b8737614SUday Bondhugula /// unrolling. When vectorization is applied at this *late* level in a typical
144b8737614SUday Bondhugula /// polyhedral flow, and is instantiated with actual hardware vector sizes,
145b8737614SUday Bondhugula /// super-vectorization is expected to match (or subsume) the type of patterns
146b8737614SUday Bondhugula /// that LLVM's VPLAN aims at targeting. The main difference here is that MLIR
147b8737614SUday Bondhugula /// is higher level and our implementation should be significantly simpler. Also
148b8737614SUday Bondhugula /// note that in this mode, recursive patterns are probably a bit of an overkill
149b8737614SUday Bondhugula /// although it is reasonable to expect that mixing a bit of outer loop and
150b8737614SUday Bondhugula /// inner loop vectorization + unrolling will provide interesting choices to
151b8737614SUday Bondhugula /// MLIR.
152b8737614SUday Bondhugula ///
153b8737614SUday Bondhugula /// LLVM SLP Vectorizer:
154b8737614SUday Bondhugula /// --------------------
155b8737614SUday Bondhugula /// Super-vectorization however is not meant to be usable in a similar fashion
156b8737614SUday Bondhugula /// to the SLP vectorizer. The main difference lies in the information that
157b8737614SUday Bondhugula /// both vectorizers use: super-vectorization examines contiguity of memory
158b8737614SUday Bondhugula /// references along fastest varying dimensions and loops with recursive nested
159b8737614SUday Bondhugula /// patterns capturing imperfectly-nested loop nests; the SLP vectorizer, on
160b8737614SUday Bondhugula /// the other hand, performs flat pattern matching inside a single unrolled loop
161b8737614SUday Bondhugula /// body and stitches together pieces of load and store operations into full
162b8737614SUday Bondhugula /// 1-D vectors. We envision that the SLP vectorizer is a good way to capture
163b8737614SUday Bondhugula /// innermost loop, control-flow dependent patterns that super-vectorization may
164b8737614SUday Bondhugula /// not be able to capture easily. In other words, super-vectorization does not
165b8737614SUday Bondhugula /// aim at replacing the SLP vectorizer and the two solutions are complementary.
166b8737614SUday Bondhugula ///
167b8737614SUday Bondhugula /// Ongoing investigations:
168b8737614SUday Bondhugula /// -----------------------
169b8737614SUday Bondhugula /// We discuss the following *early* places where super-vectorization is
170b8737614SUday Bondhugula /// applicable and touch on the expected benefits and risks . We list the
171b8737614SUday Bondhugula /// opportunities in the context of the traditional polyhedral compiler flow
172b8737614SUday Bondhugula /// described in PPCG. There are essentially 6 places in the MLIR pass pipeline
173b8737614SUday Bondhugula /// we expect to experiment with super-vectorization:
174b8737614SUday Bondhugula /// 1. Right after language lowering to MLIR: this is the earliest time where
175b8737614SUday Bondhugula ///    super-vectorization is expected to be applied. At this level, all the
176b8737614SUday Bondhugula ///    language/user/library-level annotations are available and can be fully
177b8737614SUday Bondhugula ///    exploited. Examples include loop-type annotations (such as parallel,
178b8737614SUday Bondhugula ///    reduction, scan, dependence distance vector, vectorizable) as well as
179b8737614SUday Bondhugula ///    memory access annotations (such as non-aliasing writes guaranteed,
180b8737614SUday Bondhugula ///    indirect accesses that are permutations by construction) accesses or
181b8737614SUday Bondhugula ///    that a particular operation is prescribed atomic by the user. At this
182b8737614SUday Bondhugula ///    level, anything that enriches what dependence analysis can do should be
183b8737614SUday Bondhugula ///    aggressively exploited. At this level we are close to having explicit
184b8737614SUday Bondhugula ///    vector types in the language, except we do not impose that burden on the
185b8737614SUday Bondhugula ///    programmer/library: we derive information from scalar code + annotations.
186b8737614SUday Bondhugula /// 2. After dependence analysis and before polyhedral scheduling: the
187b8737614SUday Bondhugula ///    information that supports vectorization does not need to be supplied by a
188b8737614SUday Bondhugula ///    higher level of abstraction. Traditional dependence analysis is available
189b8737614SUday Bondhugula ///    in MLIR and will be used to drive vectorization and cost models.
190b8737614SUday Bondhugula ///
191b8737614SUday Bondhugula /// Let's pause here and remark that applying super-vectorization as described
192b8737614SUday Bondhugula /// in 1. and 2. presents clear opportunities and risks:
193b8737614SUday Bondhugula ///   - the opportunity is that vectorization is burned in the type system and
194b8737614SUday Bondhugula ///   is protected from the adverse effect of loop scheduling, tiling, loop
195b8737614SUday Bondhugula ///   interchange and all passes downstream. Provided that subsequent passes are
196b8737614SUday Bondhugula ///   able to operate on vector types; the vector shapes, associated loop
197b8737614SUday Bondhugula ///   iterator properties, alignment, and contiguity of fastest varying
198b8737614SUday Bondhugula ///   dimensions are preserved until we lower the super-vector types. We expect
199b8737614SUday Bondhugula ///   this to significantly rein in on the adverse effects of phase ordering.
200b8737614SUday Bondhugula ///   - the risks are that a. all passes after super-vectorization have to work
201b8737614SUday Bondhugula ///   on elemental vector types (not that this is always true, wherever
202b8737614SUday Bondhugula ///   vectorization is applied) and b. that imposing vectorization constraints
203b8737614SUday Bondhugula ///   too early may be overall detrimental to loop fusion, tiling and other
204b8737614SUday Bondhugula ///   transformations because the dependence distances are coarsened when
205b8737614SUday Bondhugula ///   operating on elemental vector types. For this reason, the pattern
206b8737614SUday Bondhugula ///   profitability analysis should include a component that also captures the
207b8737614SUday Bondhugula ///   maximal amount of fusion available under a particular pattern. This is
208b8737614SUday Bondhugula ///   still at the stage of rough ideas but in this context, search is our
209b8737614SUday Bondhugula ///   friend as the Tensor Comprehensions and auto-TVM contributions
210b8737614SUday Bondhugula ///   demonstrated previously.
211b8737614SUday Bondhugula /// Bottom-line is we do not yet have good answers for the above but aim at
212b8737614SUday Bondhugula /// making it easy to answer such questions.
213b8737614SUday Bondhugula ///
214b8737614SUday Bondhugula /// Back to our listing, the last places where early super-vectorization makes
215b8737614SUday Bondhugula /// sense are:
216b8737614SUday Bondhugula /// 3. right after polyhedral-style scheduling: PLUTO-style algorithms are known
217b8737614SUday Bondhugula ///    to improve locality, parallelism and be configurable (e.g. max-fuse,
218b8737614SUday Bondhugula ///    smart-fuse etc). They can also have adverse effects on contiguity
219b8737614SUday Bondhugula ///    properties that are required for vectorization but the vector.transfer
220b8737614SUday Bondhugula ///    copy-reshape-pad-transpose abstraction is expected to help recapture
221b8737614SUday Bondhugula ///    these properties.
222b8737614SUday Bondhugula /// 4. right after polyhedral-style scheduling+tiling;
223b8737614SUday Bondhugula /// 5. right after scheduling+tiling+rescheduling: points 4 and 5 represent
224b8737614SUday Bondhugula ///    probably the most promising places because applying tiling achieves a
225b8737614SUday Bondhugula ///    separation of concerns that allows rescheduling to worry less about
226b8737614SUday Bondhugula ///    locality and more about parallelism and distribution (e.g. min-fuse).
227b8737614SUday Bondhugula ///
228b8737614SUday Bondhugula /// At these levels the risk-reward looks different: on one hand we probably
229b8737614SUday Bondhugula /// lost a good deal of language/user/library-level annotation; on the other
230b8737614SUday Bondhugula /// hand we gained parallelism and locality through scheduling and tiling.
231b8737614SUday Bondhugula /// However we probably want to ensure tiling is compatible with the
232b8737614SUday Bondhugula /// full-tile-only abstraction used in super-vectorization or suffer the
233b8737614SUday Bondhugula /// consequences. It is too early to place bets on what will win but we expect
234b8737614SUday Bondhugula /// super-vectorization to be the right abstraction to allow exploring at all
235b8737614SUday Bondhugula /// these levels. And again, search is our friend.
236b8737614SUday Bondhugula ///
237b8737614SUday Bondhugula /// Lastly, we mention it again here:
238b8737614SUday Bondhugula /// 6. as a MLIR-based alternative to VPLAN.
239b8737614SUday Bondhugula ///
240b8737614SUday Bondhugula /// Lowering, unrolling, pipelining:
241b8737614SUday Bondhugula /// ================================
2429db53a18SRiver Riddle /// TODO: point to the proper places.
243b8737614SUday Bondhugula ///
244b8737614SUday Bondhugula /// Algorithm:
245b8737614SUday Bondhugula /// ==========
246b8737614SUday Bondhugula /// The algorithm proceeds in a few steps:
247b8737614SUday Bondhugula ///  1. defining super-vectorization patterns and matching them on the tree of
248b8737614SUday Bondhugula ///     AffineForOp. A super-vectorization pattern is defined as a recursive
249b8737614SUday Bondhugula ///     data structures that matches and captures nested, imperfectly-nested
250b8737614SUday Bondhugula ///     loops that have a. conformable loop annotations attached (e.g. parallel,
251b8737614SUday Bondhugula ///     reduction, vectorizable, ...) as well as b. all contiguous load/store
252b8737614SUday Bondhugula ///     operations along a specified minor dimension (not necessarily the
253b8737614SUday Bondhugula ///     fastest varying) ;
2549db53a18SRiver Riddle ///  2. analyzing those patterns for profitability (TODO: and
255b8737614SUday Bondhugula ///     interference);
25696891f04SDiego Caballero ///  3. then, for each pattern in order:
25796891f04SDiego Caballero ///    a. applying iterative rewriting of the loops and all their nested
25896891f04SDiego Caballero ///       operations in topological order. Rewriting is implemented by
25996891f04SDiego Caballero ///       coarsening the loops and converting operations and operands to their
26096891f04SDiego Caballero ///       vector forms. Processing operations in topological order is relatively
26196891f04SDiego Caballero ///       simple due to the structured nature of the control-flow
26296891f04SDiego Caballero ///       representation. This order ensures that all the operands of a given
26396891f04SDiego Caballero ///       operation have been vectorized before the operation itself in a single
26496891f04SDiego Caballero ///       traversal, except for operands defined outside of the loop nest. The
26596891f04SDiego Caballero ///       algorithm can convert the following operations to their vector form:
26696891f04SDiego Caballero ///         * Affine load and store operations are converted to opaque vector
26796891f04SDiego Caballero ///           transfer read and write operations.
26896891f04SDiego Caballero ///         * Scalar constant operations/operands are converted to vector
26996891f04SDiego Caballero ///           constant operations (splat).
2705ce368cfSAmy Zhuang ///         * Uniform operands (only induction variables of loops not mapped to
2715ce368cfSAmy Zhuang ///           a vector dimension, or operands defined outside of the loop nest
27296891f04SDiego Caballero ///           for now) are broadcasted to a vector.
27396891f04SDiego Caballero ///           TODO: Support more uniform cases.
2740fd0fb53SDiego Caballero ///         * Affine for operations with 'iter_args' are vectorized by
2750fd0fb53SDiego Caballero ///           vectorizing their 'iter_args' operands and results.
2760fd0fb53SDiego Caballero ///           TODO: Support more complex loops with divergent lbs and/or ubs.
27796891f04SDiego Caballero ///         * The remaining operations in the loop nest are vectorized by
27896891f04SDiego Caballero ///           widening their scalar types to vector types.
27996891f04SDiego Caballero ///    b. if everything under the root AffineForOp in the current pattern
28096891f04SDiego Caballero ///       is vectorized properly, we commit that loop to the IR and remove the
28196891f04SDiego Caballero ///       scalar loop. Otherwise, we discard the vectorized loop and keep the
28296891f04SDiego Caballero ///       original scalar loop.
28396891f04SDiego Caballero ///    c. vectorization is applied on the next pattern in the list. Because
284b8737614SUday Bondhugula ///       pattern interference avoidance is not yet implemented and that we do
285b8737614SUday Bondhugula ///       not support further vectorizing an already vector load we need to
286b8737614SUday Bondhugula ///       re-verify that the pattern is still vectorizable. This is expected to
287b8737614SUday Bondhugula ///       make cost models more difficult to write and is subject to improvement
288b8737614SUday Bondhugula ///       in the future.
289b8737614SUday Bondhugula ///
290b8737614SUday Bondhugula /// Choice of loop transformation to support the algorithm:
291b8737614SUday Bondhugula /// =======================================================
292b8737614SUday Bondhugula /// The choice of loop transformation to apply for coarsening vectorized loops
293b8737614SUday Bondhugula /// is still subject to exploratory tradeoffs. In particular, say we want to
294b8737614SUday Bondhugula /// vectorize by a factor 128, we want to transform the following input:
295b8737614SUday Bondhugula /// ```mlir
296b8737614SUday Bondhugula ///   affine.for %i = %M to %N {
297b8737614SUday Bondhugula ///     %a = affine.load %A[%i] : memref<?xf32>
298b8737614SUday Bondhugula ///   }
299b8737614SUday Bondhugula /// ```
300b8737614SUday Bondhugula ///
301b8737614SUday Bondhugula /// Traditionally, one would vectorize late (after scheduling, tiling,
302b8737614SUday Bondhugula /// memory promotion etc) say after stripmining (and potentially unrolling in
303b8737614SUday Bondhugula /// the case of LLVM's SLP vectorizer):
304b8737614SUday Bondhugula /// ```mlir
305b8737614SUday Bondhugula ///   affine.for %i = floor(%M, 128) to ceil(%N, 128) {
306b8737614SUday Bondhugula ///     affine.for %ii = max(%M, 128 * %i) to min(%N, 128*%i + 127) {
307b8737614SUday Bondhugula ///       %a = affine.load %A[%ii] : memref<?xf32>
308b8737614SUday Bondhugula ///     }
309b8737614SUday Bondhugula ///   }
310b8737614SUday Bondhugula /// ```
311b8737614SUday Bondhugula ///
312b8737614SUday Bondhugula /// Instead, we seek to vectorize early and freeze vector types before
313b8737614SUday Bondhugula /// scheduling, so we want to generate a pattern that resembles:
314b8737614SUday Bondhugula /// ```mlir
315b8737614SUday Bondhugula ///   affine.for %i = ? to ? step ? {
316b8737614SUday Bondhugula ///     %v_a = vector.transfer_read %A[%i] : memref<?xf32>, vector<128xf32>
317b8737614SUday Bondhugula ///   }
318b8737614SUday Bondhugula /// ```
319b8737614SUday Bondhugula ///
320b8737614SUday Bondhugula /// i. simply dividing the lower / upper bounds by 128 creates issues
321b8737614SUday Bondhugula ///    when representing expressions such as ii + 1 because now we only
322b8737614SUday Bondhugula ///    have access to original values that have been divided. Additional
323b8737614SUday Bondhugula ///    information is needed to specify accesses at below-128 granularity;
324b8737614SUday Bondhugula /// ii. another alternative is to coarsen the loop step but this may have
325b8737614SUday Bondhugula ///    consequences on dependence analysis and fusability of loops: fusable
326b8737614SUday Bondhugula ///    loops probably need to have the same step (because we don't want to
327b8737614SUday Bondhugula ///    stripmine/unroll to enable fusion).
328b8737614SUday Bondhugula /// As a consequence, we choose to represent the coarsening using the loop
329b8737614SUday Bondhugula /// step for now and reevaluate in the future. Note that we can renormalize
330b8737614SUday Bondhugula /// loop steps later if/when we have evidence that they are problematic.
331b8737614SUday Bondhugula ///
332b8737614SUday Bondhugula /// For the simple strawman example above, vectorizing for a 1-D vector
333b8737614SUday Bondhugula /// abstraction of size 128 returns code similar to:
334b8737614SUday Bondhugula /// ```mlir
335b8737614SUday Bondhugula ///   affine.for %i = %M to %N step 128 {
336b8737614SUday Bondhugula ///     %v_a = vector.transfer_read %A[%i] : memref<?xf32>, vector<128xf32>
337b8737614SUday Bondhugula ///   }
338b8737614SUday Bondhugula /// ```
339b8737614SUday Bondhugula ///
340b8737614SUday Bondhugula /// Unsupported cases, extensions, and work in progress (help welcome :-) ):
341b8737614SUday Bondhugula /// ========================================================================
342b8737614SUday Bondhugula ///   1. lowering to concrete vector types for various HW;
343d80b04abSSergei Grechanik ///   2. reduction support for n-D vectorization and non-unit steps;
344b8737614SUday Bondhugula ///   3. non-effecting padding during vector.transfer_read and filter during
345b8737614SUday Bondhugula ///      vector.transfer_write;
346b8737614SUday Bondhugula ///   4. misalignment support vector.transfer_read / vector.transfer_write
347b8737614SUday Bondhugula ///      (hopefully without read-modify-writes);
348b8737614SUday Bondhugula ///   5. control-flow support;
349b8737614SUday Bondhugula ///   6. cost-models, heuristics and search;
350b8737614SUday Bondhugula ///   7. Op implementation, extensions and implication on memref views;
351b8737614SUday Bondhugula ///   8. many TODOs left around.
352b8737614SUday Bondhugula ///
353b8737614SUday Bondhugula /// Examples:
354b8737614SUday Bondhugula /// =========
355b8737614SUday Bondhugula /// Consider the following Function:
356b8737614SUday Bondhugula /// ```mlir
357b8737614SUday Bondhugula /// func @vector_add_2d(%M : index, %N : index) -> f32 {
358b8737614SUday Bondhugula ///   %A = alloc (%M, %N) : memref<?x?xf32, 0>
359b8737614SUday Bondhugula ///   %B = alloc (%M, %N) : memref<?x?xf32, 0>
360b8737614SUday Bondhugula ///   %C = alloc (%M, %N) : memref<?x?xf32, 0>
361a54f4eaeSMogball ///   %f1 = arith.constant 1.0 : f32
362a54f4eaeSMogball ///   %f2 = arith.constant 2.0 : f32
363b8737614SUday Bondhugula ///   affine.for %i0 = 0 to %M {
364b8737614SUday Bondhugula ///     affine.for %i1 = 0 to %N {
365b8737614SUday Bondhugula ///       // non-scoped %f1
366b8737614SUday Bondhugula ///       affine.store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
367b8737614SUday Bondhugula ///     }
368b8737614SUday Bondhugula ///   }
369b8737614SUday Bondhugula ///   affine.for %i2 = 0 to %M {
370b8737614SUday Bondhugula ///     affine.for %i3 = 0 to %N {
371b8737614SUday Bondhugula ///       // non-scoped %f2
372b8737614SUday Bondhugula ///       affine.store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
373b8737614SUday Bondhugula ///     }
374b8737614SUday Bondhugula ///   }
375b8737614SUday Bondhugula ///   affine.for %i4 = 0 to %M {
376b8737614SUday Bondhugula ///     affine.for %i5 = 0 to %N {
377b8737614SUday Bondhugula ///       %a5 = affine.load %A[%i4, %i5] : memref<?x?xf32, 0>
378b8737614SUday Bondhugula ///       %b5 = affine.load %B[%i4, %i5] : memref<?x?xf32, 0>
379a54f4eaeSMogball ///       %s5 = arith.addf %a5, %b5 : f32
380b8737614SUday Bondhugula ///       // non-scoped %f1
381a54f4eaeSMogball ///       %s6 = arith.addf %s5, %f1 : f32
382b8737614SUday Bondhugula ///       // non-scoped %f2
383a54f4eaeSMogball ///       %s7 = arith.addf %s5, %f2 : f32
384b8737614SUday Bondhugula ///       // diamond dependency.
385a54f4eaeSMogball ///       %s8 = arith.addf %s7, %s6 : f32
386b8737614SUday Bondhugula ///       affine.store %s8, %C[%i4, %i5] : memref<?x?xf32, 0>
387b8737614SUday Bondhugula ///     }
388b8737614SUday Bondhugula ///   }
389a54f4eaeSMogball ///   %c7 = arith.constant 7 : index
390a54f4eaeSMogball ///   %c42 = arith.constant 42 : index
391b8737614SUday Bondhugula ///   %res = load %C[%c7, %c42] : memref<?x?xf32, 0>
392b8737614SUday Bondhugula ///   return %res : f32
393b8737614SUday Bondhugula /// }
394b8737614SUday Bondhugula /// ```
395b8737614SUday Bondhugula ///
39699d95025SCullen Rhodes /// The -affine-super-vectorize pass with the following arguments:
397b8737614SUday Bondhugula /// ```
39899d95025SCullen Rhodes /// -affine-super-vectorize="virtual-vector-size=256 test-fastest-varying=0"
399b8737614SUday Bondhugula /// ```
400b8737614SUday Bondhugula ///
401b8737614SUday Bondhugula /// produces this standard innermost-loop vectorized code:
402b8737614SUday Bondhugula /// ```mlir
403b8737614SUday Bondhugula /// func @vector_add_2d(%arg0 : index, %arg1 : index) -> f32 {
4044b01968bSJulian Gross ///   %0 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
4054b01968bSJulian Gross ///   %1 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
4064b01968bSJulian Gross ///   %2 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
407a54f4eaeSMogball ///   %cst = arith.constant 1.0 : f32
408a54f4eaeSMogball ///   %cst_0 = arith.constant 2.0 : f32
409b8737614SUday Bondhugula ///   affine.for %i0 = 0 to %arg0 {
410b8737614SUday Bondhugula ///     affine.for %i1 = 0 to %arg1 step 256 {
411a54f4eaeSMogball ///       %cst_1 = arith.constant dense<vector<256xf32>, 1.0> :
412b8737614SUday Bondhugula ///                vector<256xf32>
413b8737614SUday Bondhugula ///       vector.transfer_write %cst_1, %0[%i0, %i1] :
414b8737614SUday Bondhugula ///                vector<256xf32>, memref<?x?xf32>
415b8737614SUday Bondhugula ///     }
416b8737614SUday Bondhugula ///   }
417b8737614SUday Bondhugula ///   affine.for %i2 = 0 to %arg0 {
418b8737614SUday Bondhugula ///     affine.for %i3 = 0 to %arg1 step 256 {
419a54f4eaeSMogball ///       %cst_2 = arith.constant dense<vector<256xf32>, 2.0> :
420b8737614SUday Bondhugula ///                vector<256xf32>
421b8737614SUday Bondhugula ///       vector.transfer_write %cst_2, %1[%i2, %i3] :
422b8737614SUday Bondhugula ///                vector<256xf32>, memref<?x?xf32>
423b8737614SUday Bondhugula ///     }
424b8737614SUday Bondhugula ///   }
425b8737614SUday Bondhugula ///   affine.for %i4 = 0 to %arg0 {
426b8737614SUday Bondhugula ///     affine.for %i5 = 0 to %arg1 step 256 {
427b8737614SUday Bondhugula ///       %3 = vector.transfer_read %0[%i4, %i5] :
428b8737614SUday Bondhugula ///            memref<?x?xf32>, vector<256xf32>
429b8737614SUday Bondhugula ///       %4 = vector.transfer_read %1[%i4, %i5] :
430b8737614SUday Bondhugula ///            memref<?x?xf32>, vector<256xf32>
431a54f4eaeSMogball ///       %5 = arith.addf %3, %4 : vector<256xf32>
432a54f4eaeSMogball ///       %cst_3 = arith.constant dense<vector<256xf32>, 1.0> :
433b8737614SUday Bondhugula ///                vector<256xf32>
434a54f4eaeSMogball ///       %6 = arith.addf %5, %cst_3 : vector<256xf32>
435a54f4eaeSMogball ///       %cst_4 = arith.constant dense<vector<256xf32>, 2.0> :
436b8737614SUday Bondhugula ///                vector<256xf32>
437a54f4eaeSMogball ///       %7 = arith.addf %5, %cst_4 : vector<256xf32>
438a54f4eaeSMogball ///       %8 = arith.addf %7, %6 : vector<256xf32>
439b8737614SUday Bondhugula ///       vector.transfer_write %8, %2[%i4, %i5] :
440b8737614SUday Bondhugula ///                vector<256xf32>, memref<?x?xf32>
441b8737614SUday Bondhugula ///     }
442b8737614SUday Bondhugula ///   }
443a54f4eaeSMogball ///   %c7 = arith.constant 7 : index
444a54f4eaeSMogball ///   %c42 = arith.constant 42 : index
445b8737614SUday Bondhugula ///   %9 = load %2[%c7, %c42] : memref<?x?xf32>
446b8737614SUday Bondhugula ///   return %9 : f32
447b8737614SUday Bondhugula /// }
448b8737614SUday Bondhugula /// ```
449b8737614SUday Bondhugula ///
45099d95025SCullen Rhodes /// The -affine-super-vectorize pass with the following arguments:
451b8737614SUday Bondhugula /// ```
45299d95025SCullen Rhodes /// -affine-super-vectorize="virtual-vector-size=32,256 \
45399d95025SCullen Rhodes ///                          test-fastest-varying=1,0"
454b8737614SUday Bondhugula /// ```
455b8737614SUday Bondhugula ///
456b8737614SUday Bondhugula /// produces this more interesting mixed outer-innermost-loop vectorized code:
457b8737614SUday Bondhugula /// ```mlir
458b8737614SUday Bondhugula /// func @vector_add_2d(%arg0 : index, %arg1 : index) -> f32 {
4594b01968bSJulian Gross ///   %0 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
4604b01968bSJulian Gross ///   %1 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
4614b01968bSJulian Gross ///   %2 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
462a54f4eaeSMogball ///   %cst = arith.constant 1.0 : f32
463a54f4eaeSMogball ///   %cst_0 = arith.constant 2.0 : f32
464b8737614SUday Bondhugula ///   affine.for %i0 = 0 to %arg0 step 32 {
465b8737614SUday Bondhugula ///     affine.for %i1 = 0 to %arg1 step 256 {
466a54f4eaeSMogball ///       %cst_1 = arith.constant dense<vector<32x256xf32>, 1.0> :
467b8737614SUday Bondhugula ///                vector<32x256xf32>
468b8737614SUday Bondhugula ///       vector.transfer_write %cst_1, %0[%i0, %i1] :
469b8737614SUday Bondhugula ///                vector<32x256xf32>, memref<?x?xf32>
470b8737614SUday Bondhugula ///     }
471b8737614SUday Bondhugula ///   }
472b8737614SUday Bondhugula ///   affine.for %i2 = 0 to %arg0 step 32 {
473b8737614SUday Bondhugula ///     affine.for %i3 = 0 to %arg1 step 256 {
474a54f4eaeSMogball ///       %cst_2 = arith.constant dense<vector<32x256xf32>, 2.0> :
475b8737614SUday Bondhugula ///                vector<32x256xf32>
476b8737614SUday Bondhugula ///       vector.transfer_write %cst_2, %1[%i2, %i3] :
477b8737614SUday Bondhugula ///                vector<32x256xf32>, memref<?x?xf32>
478b8737614SUday Bondhugula ///     }
479b8737614SUday Bondhugula ///   }
480b8737614SUday Bondhugula ///   affine.for %i4 = 0 to %arg0 step 32 {
481b8737614SUday Bondhugula ///     affine.for %i5 = 0 to %arg1 step 256 {
482b8737614SUday Bondhugula ///       %3 = vector.transfer_read %0[%i4, %i5] :
483b8737614SUday Bondhugula ///                memref<?x?xf32> vector<32x256xf32>
484b8737614SUday Bondhugula ///       %4 = vector.transfer_read %1[%i4, %i5] :
485b8737614SUday Bondhugula ///                memref<?x?xf32>, vector<32x256xf32>
486a54f4eaeSMogball ///       %5 = arith.addf %3, %4 : vector<32x256xf32>
487a54f4eaeSMogball ///       %cst_3 = arith.constant dense<vector<32x256xf32>, 1.0> :
488b8737614SUday Bondhugula ///                vector<32x256xf32>
489a54f4eaeSMogball ///       %6 = arith.addf %5, %cst_3 : vector<32x256xf32>
490a54f4eaeSMogball ///       %cst_4 = arith.constant dense<vector<32x256xf32>, 2.0> :
491b8737614SUday Bondhugula ///                vector<32x256xf32>
492a54f4eaeSMogball ///       %7 = arith.addf %5, %cst_4 : vector<32x256xf32>
493a54f4eaeSMogball ///       %8 = arith.addf %7, %6 : vector<32x256xf32>
494b8737614SUday Bondhugula ///       vector.transfer_write %8, %2[%i4, %i5] :
495b8737614SUday Bondhugula ///                vector<32x256xf32>, memref<?x?xf32>
496b8737614SUday Bondhugula ///     }
497b8737614SUday Bondhugula ///   }
498a54f4eaeSMogball ///   %c7 = arith.constant 7 : index
499a54f4eaeSMogball ///   %c42 = arith.constant 42 : index
500b8737614SUday Bondhugula ///   %9 = load %2[%c7, %c42] : memref<?x?xf32>
501b8737614SUday Bondhugula ///   return %9 : f32
502b8737614SUday Bondhugula /// }
503b8737614SUday Bondhugula /// ```
504b8737614SUday Bondhugula ///
505b8737614SUday Bondhugula /// Of course, much more intricate n-D imperfectly-nested patterns can be
506b8737614SUday Bondhugula /// vectorized too and specified in a fully declarative fashion.
507d80b04abSSergei Grechanik ///
508d80b04abSSergei Grechanik /// Reduction:
509d80b04abSSergei Grechanik /// ==========
510d80b04abSSergei Grechanik /// Vectorizing reduction loops along the reduction dimension is supported if:
511d80b04abSSergei Grechanik /// - the reduction kind is supported,
512d80b04abSSergei Grechanik /// - the vectorization is 1-D, and
513d80b04abSSergei Grechanik /// - the step size of the loop equals to one.
514d80b04abSSergei Grechanik ///
515d80b04abSSergei Grechanik /// Comparing to the non-vector-dimension case, two additional things are done
516d80b04abSSergei Grechanik /// during vectorization of such loops:
517d80b04abSSergei Grechanik /// - The resulting vector returned from the loop is reduced to a scalar using
518d80b04abSSergei Grechanik ///   `vector.reduce`.
519d80b04abSSergei Grechanik /// - In some cases a mask is applied to the vector yielded at the end of the
520d80b04abSSergei Grechanik ///   loop to prevent garbage values from being written to the accumulator.
521d80b04abSSergei Grechanik ///
522d80b04abSSergei Grechanik /// Reduction vectorization is switched off by default, it can be enabled by
523d80b04abSSergei Grechanik /// passing a map from loops to reductions to utility functions, or by passing
524d80b04abSSergei Grechanik /// `vectorize-reductions=true` to the vectorization pass.
525d80b04abSSergei Grechanik ///
526d80b04abSSergei Grechanik /// Consider the following example:
527d80b04abSSergei Grechanik /// ```mlir
528d80b04abSSergei Grechanik /// func @vecred(%in: memref<512xf32>) -> f32 {
529a54f4eaeSMogball ///   %cst = arith.constant 0.000000e+00 : f32
530d80b04abSSergei Grechanik ///   %sum = affine.for %i = 0 to 500 iter_args(%part_sum = %cst) -> (f32) {
531d80b04abSSergei Grechanik ///     %ld = affine.load %in[%i] : memref<512xf32>
532d80b04abSSergei Grechanik ///     %cos = math.cos %ld : f32
533a54f4eaeSMogball ///     %add = arith.addf %part_sum, %cos : f32
534d80b04abSSergei Grechanik ///     affine.yield %add : f32
535d80b04abSSergei Grechanik ///   }
536d80b04abSSergei Grechanik ///   return %sum : f32
537d80b04abSSergei Grechanik /// }
538d80b04abSSergei Grechanik /// ```
539d80b04abSSergei Grechanik ///
54099d95025SCullen Rhodes /// The -affine-super-vectorize pass with the following arguments:
541d80b04abSSergei Grechanik /// ```
54299d95025SCullen Rhodes /// -affine-super-vectorize="virtual-vector-size=128 test-fastest-varying=0 \
543d80b04abSSergei Grechanik ///                          vectorize-reductions=true"
544d80b04abSSergei Grechanik /// ```
545d80b04abSSergei Grechanik /// produces the following output:
546d80b04abSSergei Grechanik /// ```mlir
547d80b04abSSergei Grechanik /// #map = affine_map<(d0) -> (-d0 + 500)>
548d80b04abSSergei Grechanik /// func @vecred(%arg0: memref<512xf32>) -> f32 {
549a54f4eaeSMogball ///   %cst = arith.constant 0.000000e+00 : f32
550a54f4eaeSMogball ///   %cst_0 = arith.constant dense<0.000000e+00> : vector<128xf32>
551d80b04abSSergei Grechanik ///   %0 = affine.for %arg1 = 0 to 500 step 128 iter_args(%arg2 = %cst_0)
552d80b04abSSergei Grechanik ///           -> (vector<128xf32>) {
553d80b04abSSergei Grechanik ///     // %2 is the number of iterations left in the original loop.
554d80b04abSSergei Grechanik ///     %2 = affine.apply #map(%arg1)
555d80b04abSSergei Grechanik ///     %3 = vector.create_mask %2 : vector<128xi1>
556a54f4eaeSMogball ///     %cst_1 = arith.constant 0.000000e+00 : f32
557d80b04abSSergei Grechanik ///     %4 = vector.transfer_read %arg0[%arg1], %cst_1 :
558d80b04abSSergei Grechanik ///                     memref<512xf32>, vector<128xf32>
559d80b04abSSergei Grechanik ///     %5 = math.cos %4 : vector<128xf32>
560a54f4eaeSMogball ///     %6 = arith.addf %arg2, %5 : vector<128xf32>
561d80b04abSSergei Grechanik ///     // We filter out the effect of last 12 elements using the mask.
562d80b04abSSergei Grechanik ///     %7 = select %3, %6, %arg2 : vector<128xi1>, vector<128xf32>
563d80b04abSSergei Grechanik ///     affine.yield %7 : vector<128xf32>
564d80b04abSSergei Grechanik ///   }
565fe0bf7d4SMatthias Springer ///   %1 = vector.reduction <add>, %0 : vector<128xf32> into f32
566d80b04abSSergei Grechanik ///   return %1 : f32
567d80b04abSSergei Grechanik /// }
568d80b04abSSergei Grechanik /// ```
569d80b04abSSergei Grechanik ///
570d80b04abSSergei Grechanik /// Note that because of loop misalignment we needed to apply a mask to prevent
571d80b04abSSergei Grechanik /// last 12 elements from affecting the final result. The mask is full of ones
572d80b04abSSergei Grechanik /// in every iteration except for the last one, in which it has the form
573d80b04abSSergei Grechanik /// `11...100...0` with 116 ones and 12 zeros.
574b8737614SUday Bondhugula 
575b8737614SUday Bondhugula #define DEBUG_TYPE "early-vect"
576b8737614SUday Bondhugula 
577b8737614SUday Bondhugula using llvm::dbgs;
578b8737614SUday Bondhugula 
579b8737614SUday Bondhugula /// Forward declaration.
580b8737614SUday Bondhugula static FilterFunctionType
581b8737614SUday Bondhugula isVectorizableLoopPtrFactory(const DenseSet<Operation *> &parallelLoops,
582b8737614SUday Bondhugula                              int fastestVaryingMemRefDimension);
583b8737614SUday Bondhugula 
584b8737614SUday Bondhugula /// Creates a vectorization pattern from the command line arguments.
585b8737614SUday Bondhugula /// Up to 3-D patterns are supported.
586b8737614SUday Bondhugula /// If the command line argument requests a pattern of higher order, returns an
587b8737614SUday Bondhugula /// empty pattern list which will conservatively result in no vectorization.
5880a81ace0SKazu Hirata static std::optional<NestedPattern>
589ed193bceSDiego Caballero makePattern(const DenseSet<Operation *> &parallelLoops, int vectorRank,
590b8737614SUday Bondhugula             ArrayRef<int64_t> fastestVaryingPattern) {
5914c48f016SMatthias Springer   using affine::matcher::For;
592b8737614SUday Bondhugula   int64_t d0 = fastestVaryingPattern.empty() ? -1 : fastestVaryingPattern[0];
593b8737614SUday Bondhugula   int64_t d1 = fastestVaryingPattern.size() < 2 ? -1 : fastestVaryingPattern[1];
594b8737614SUday Bondhugula   int64_t d2 = fastestVaryingPattern.size() < 3 ? -1 : fastestVaryingPattern[2];
595b8737614SUday Bondhugula   switch (vectorRank) {
596b8737614SUday Bondhugula   case 1:
597ed193bceSDiego Caballero     return For(isVectorizableLoopPtrFactory(parallelLoops, d0));
598b8737614SUday Bondhugula   case 2:
599ed193bceSDiego Caballero     return For(isVectorizableLoopPtrFactory(parallelLoops, d0),
600ed193bceSDiego Caballero                For(isVectorizableLoopPtrFactory(parallelLoops, d1)));
601b8737614SUday Bondhugula   case 3:
602ed193bceSDiego Caballero     return For(isVectorizableLoopPtrFactory(parallelLoops, d0),
603b8737614SUday Bondhugula                For(isVectorizableLoopPtrFactory(parallelLoops, d1),
604ed193bceSDiego Caballero                    For(isVectorizableLoopPtrFactory(parallelLoops, d2))));
605b8737614SUday Bondhugula   default: {
6061a36588eSKazu Hirata     return std::nullopt;
607b8737614SUday Bondhugula   }
608b8737614SUday Bondhugula   }
609b8737614SUday Bondhugula }
610b8737614SUday Bondhugula 
611b8737614SUday Bondhugula static NestedPattern &vectorTransferPattern() {
612971b8525SJakub Kuderski   static auto pattern = affine::matcher::Op(
613971b8525SJakub Kuderski       llvm::IsaPred<vector::TransferReadOp, vector::TransferWriteOp>);
614b8737614SUday Bondhugula   return pattern;
615b8737614SUday Bondhugula }
616b8737614SUday Bondhugula 
617b8737614SUday Bondhugula namespace {
618b8737614SUday Bondhugula 
619b8737614SUday Bondhugula /// Base state for the vectorize pass.
620b8737614SUday Bondhugula /// Command line arguments are preempted by non-empty pass arguments.
6214c48f016SMatthias Springer struct Vectorize : public affine::impl::AffineVectorizeBase<Vectorize> {
6228199a43aSJavier Setoain   using Base::Base;
6238199a43aSJavier Setoain 
62441574554SRiver Riddle   void runOnOperation() override;
625b8737614SUday Bondhugula };
626b8737614SUday Bondhugula 
627be0a7e9fSMehdi Amini } // namespace
628b8737614SUday Bondhugula 
629b8737614SUday Bondhugula static void vectorizeLoopIfProfitable(Operation *loop, unsigned depthInPattern,
630b8737614SUday Bondhugula                                       unsigned patternDepth,
631b8737614SUday Bondhugula                                       VectorizationStrategy *strategy) {
632b8737614SUday Bondhugula   assert(patternDepth > depthInPattern &&
633b8737614SUday Bondhugula          "patternDepth is greater than depthInPattern");
634b8737614SUday Bondhugula   if (patternDepth - depthInPattern > strategy->vectorSizes.size()) {
635b8737614SUday Bondhugula     // Don't vectorize this loop
636b8737614SUday Bondhugula     return;
637b8737614SUday Bondhugula   }
638b8737614SUday Bondhugula   strategy->loopToVectorDim[loop] =
639b8737614SUday Bondhugula       strategy->vectorSizes.size() - (patternDepth - depthInPattern);
640b8737614SUday Bondhugula }
641b8737614SUday Bondhugula 
642b8737614SUday Bondhugula /// Implements a simple strawman strategy for vectorization.
643b8737614SUday Bondhugula /// Given a matched pattern `matches` of depth `patternDepth`, this strategy
644b8737614SUday Bondhugula /// greedily assigns the fastest varying dimension ** of the vector ** to the
645b8737614SUday Bondhugula /// innermost loop in the pattern.
646b8737614SUday Bondhugula /// When coupled with a pattern that looks for the fastest varying dimension in
647b8737614SUday Bondhugula /// load/store MemRefs, this creates a generic vectorization strategy that works
648b8737614SUday Bondhugula /// for any loop in a hierarchy (outermost, innermost or intermediate).
649b8737614SUday Bondhugula ///
6509db53a18SRiver Riddle /// TODO: In the future we should additionally increase the power of the
651b8737614SUday Bondhugula /// profitability analysis along 3 directions:
652b8737614SUday Bondhugula ///   1. account for loop extents (both static and parametric + annotations);
653b8737614SUday Bondhugula ///   2. account for data layout permutations;
654b8737614SUday Bondhugula ///   3. account for impact of vectorization on maximal loop fusion.
655b8737614SUday Bondhugula /// Then we can quantify the above to build a cost model and search over
656b8737614SUday Bondhugula /// strategies.
657b8737614SUday Bondhugula static LogicalResult analyzeProfitability(ArrayRef<NestedMatch> matches,
658b8737614SUday Bondhugula                                           unsigned depthInPattern,
659b8737614SUday Bondhugula                                           unsigned patternDepth,
660b8737614SUday Bondhugula                                           VectorizationStrategy *strategy) {
661b8737614SUday Bondhugula   for (auto m : matches) {
662b8737614SUday Bondhugula     if (failed(analyzeProfitability(m.getMatchedChildren(), depthInPattern + 1,
663b8737614SUday Bondhugula                                     patternDepth, strategy))) {
664b8737614SUday Bondhugula       return failure();
665b8737614SUday Bondhugula     }
666b8737614SUday Bondhugula     vectorizeLoopIfProfitable(m.getMatchedOperation(), depthInPattern,
667b8737614SUday Bondhugula                               patternDepth, strategy);
668b8737614SUday Bondhugula   }
669b8737614SUday Bondhugula   return success();
670b8737614SUday Bondhugula }
671b8737614SUday Bondhugula 
6729db53a18SRiver Riddle ///// end TODO: Hoist to a VectorizationStrategy.cpp when appropriate /////
673b8737614SUday Bondhugula 
674b8737614SUday Bondhugula namespace {
675b8737614SUday Bondhugula 
676b8737614SUday Bondhugula struct VectorizationState {
677b8737614SUday Bondhugula 
67896891f04SDiego Caballero   VectorizationState(MLIRContext *context) : builder(context) {}
67996891f04SDiego Caballero 
68096891f04SDiego Caballero   /// Registers the vector replacement of a scalar operation and its result
68196891f04SDiego Caballero   /// values. Both operations must have the same number of results.
68296891f04SDiego Caballero   ///
68396891f04SDiego Caballero   /// This utility is used to register the replacement for the vast majority of
68496891f04SDiego Caballero   /// the vectorized operations.
68596891f04SDiego Caballero   ///
68696891f04SDiego Caballero   /// Example:
687a54f4eaeSMogball   ///   * 'replaced': %0 = arith.addf %1, %2 : f32
688a54f4eaeSMogball   ///   * 'replacement': %0 = arith.addf %1, %2 : vector<128xf32>
68996891f04SDiego Caballero   void registerOpVectorReplacement(Operation *replaced, Operation *replacement);
69096891f04SDiego Caballero 
69196891f04SDiego Caballero   /// Registers the vector replacement of a scalar value. The replacement
69296891f04SDiego Caballero   /// operation should have a single result, which replaces the scalar value.
69396891f04SDiego Caballero   ///
69496891f04SDiego Caballero   /// This utility is used to register the vector replacement of block arguments
69596891f04SDiego Caballero   /// and operation results which are not directly vectorized (i.e., their
69696891f04SDiego Caballero   /// scalar version still exists after vectorization), like uniforms.
69796891f04SDiego Caballero   ///
69896891f04SDiego Caballero   /// Example:
69996891f04SDiego Caballero   ///   * 'replaced': block argument or operation outside of the vectorized
70096891f04SDiego Caballero   ///     loop.
70196891f04SDiego Caballero   ///   * 'replacement': %0 = vector.broadcast %1 : f32 to vector<128xf32>
70296891f04SDiego Caballero   void registerValueVectorReplacement(Value replaced, Operation *replacement);
70396891f04SDiego Caballero 
7040fd0fb53SDiego Caballero   /// Registers the vector replacement of a block argument (e.g., iter_args).
7050fd0fb53SDiego Caballero   ///
7060fd0fb53SDiego Caballero   /// Example:
7070fd0fb53SDiego Caballero   ///   * 'replaced': 'iter_arg' block argument.
7080fd0fb53SDiego Caballero   ///   * 'replacement': vectorized 'iter_arg' block argument.
7090fd0fb53SDiego Caballero   void registerBlockArgVectorReplacement(BlockArgument replaced,
7100fd0fb53SDiego Caballero                                          BlockArgument replacement);
7110fd0fb53SDiego Caballero 
71296891f04SDiego Caballero   /// Registers the scalar replacement of a scalar value. 'replacement' must be
713181d9602SHsiangkai Wang   /// scalar.
71496891f04SDiego Caballero   ///
71596891f04SDiego Caballero   /// This utility is used to register the replacement of block arguments
716181d9602SHsiangkai Wang   /// or affine.apply results that are within the loop be vectorized and will
717181d9602SHsiangkai Wang   /// continue being scalar within the vector loop.
71896891f04SDiego Caballero   ///
71996891f04SDiego Caballero   /// Example:
72096891f04SDiego Caballero   ///   * 'replaced': induction variable of a loop to be vectorized.
72196891f04SDiego Caballero   ///   * 'replacement': new induction variable in the new vector loop.
722181d9602SHsiangkai Wang   void registerValueScalarReplacement(Value replaced, Value replacement);
72396891f04SDiego Caballero 
724d80b04abSSergei Grechanik   /// Registers the scalar replacement of a scalar result returned from a
725d80b04abSSergei Grechanik   /// reduction loop. 'replacement' must be scalar.
726d80b04abSSergei Grechanik   ///
727d80b04abSSergei Grechanik   /// This utility is used to register the replacement for scalar results of
728d80b04abSSergei Grechanik   /// vectorized reduction loops with iter_args.
729d80b04abSSergei Grechanik   ///
730d80b04abSSergei Grechanik   /// Example 2:
731d80b04abSSergei Grechanik   ///   * 'replaced': %0 = affine.for %i = 0 to 512 iter_args(%x = ...) -> (f32)
732fe0bf7d4SMatthias Springer   ///   * 'replacement': %1 = vector.reduction <add>, %0 : vector<4xf32> into
733fe0bf7d4SMatthias Springer   ///   f32
734d80b04abSSergei Grechanik   void registerLoopResultScalarReplacement(Value replaced, Value replacement);
735d80b04abSSergei Grechanik 
73696891f04SDiego Caballero   /// Returns in 'replacedVals' the scalar replacement for values in
73796891f04SDiego Caballero   /// 'inputVals'.
73896891f04SDiego Caballero   void getScalarValueReplacementsFor(ValueRange inputVals,
73996891f04SDiego Caballero                                      SmallVectorImpl<Value> &replacedVals);
74096891f04SDiego Caballero 
74196891f04SDiego Caballero   /// Erases the scalar loop nest after its successful vectorization.
74296891f04SDiego Caballero   void finishVectorizationPattern(AffineForOp rootLoop);
74396891f04SDiego Caballero 
74496891f04SDiego Caballero   // Used to build and insert all the new operations created. The insertion
74596891f04SDiego Caballero   // point is preserved and updated along the vectorization process.
74696891f04SDiego Caballero   OpBuilder builder;
74796891f04SDiego Caballero 
74896891f04SDiego Caballero   // Maps input scalar operations to their vector counterparts.
74996891f04SDiego Caballero   DenseMap<Operation *, Operation *> opVectorReplacement;
75096891f04SDiego Caballero   // Maps input scalar values to their vector counterparts.
7514d67b278SJeff Niu   IRMapping valueVectorReplacement;
75296891f04SDiego Caballero   // Maps input scalar values to their new scalar counterparts in the vector
75396891f04SDiego Caballero   // loop nest.
7544d67b278SJeff Niu   IRMapping valueScalarReplacement;
755d80b04abSSergei Grechanik   // Maps results of reduction loops to their new scalar counterparts.
756d80b04abSSergei Grechanik   DenseMap<Value, Value> loopResultScalarReplacement;
75796891f04SDiego Caballero 
75896891f04SDiego Caballero   // Maps the newly created vector loops to their vector dimension.
75996891f04SDiego Caballero   DenseMap<Operation *, unsigned> vecLoopToVecDim;
76096891f04SDiego Caballero 
761d80b04abSSergei Grechanik   // Maps the new vectorized loops to the corresponding vector masks if it is
762d80b04abSSergei Grechanik   // required.
763d80b04abSSergei Grechanik   DenseMap<Operation *, Value> vecLoopToMask;
764d80b04abSSergei Grechanik 
765b8737614SUday Bondhugula   // The strategy drives which loop to vectorize by which amount.
766a9f13f80SMehdi Amini   const VectorizationStrategy *strategy = nullptr;
767b8737614SUday Bondhugula 
768b8737614SUday Bondhugula private:
76996891f04SDiego Caballero   /// Internal implementation to map input scalar values to new vector or scalar
77096891f04SDiego Caballero   /// values.
77196891f04SDiego Caballero   void registerValueVectorReplacementImpl(Value replaced, Value replacement);
772b8737614SUday Bondhugula };
773b8737614SUday Bondhugula 
774be0a7e9fSMehdi Amini } // namespace
775b8737614SUday Bondhugula 
77696891f04SDiego Caballero /// Registers the vector replacement of a scalar operation and its result
77796891f04SDiego Caballero /// values. Both operations must have the same number of results.
77896891f04SDiego Caballero ///
77996891f04SDiego Caballero /// This utility is used to register the replacement for the vast majority of
78096891f04SDiego Caballero /// the vectorized operations.
78196891f04SDiego Caballero ///
78296891f04SDiego Caballero /// Example:
783a54f4eaeSMogball ///   * 'replaced': %0 = arith.addf %1, %2 : f32
784a54f4eaeSMogball ///   * 'replacement': %0 = arith.addf %1, %2 : vector<128xf32>
78596891f04SDiego Caballero void VectorizationState::registerOpVectorReplacement(Operation *replaced,
78696891f04SDiego Caballero                                                      Operation *replacement) {
78796891f04SDiego Caballero   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ commit vectorized op:\n");
78896891f04SDiego Caballero   LLVM_DEBUG(dbgs() << *replaced << "\n");
78996891f04SDiego Caballero   LLVM_DEBUG(dbgs() << "into\n");
79096891f04SDiego Caballero   LLVM_DEBUG(dbgs() << *replacement << "\n");
79196891f04SDiego Caballero 
79296891f04SDiego Caballero   assert(replaced->getNumResults() == replacement->getNumResults() &&
79396891f04SDiego Caballero          "Unexpected replaced and replacement results");
79496891f04SDiego Caballero   assert(opVectorReplacement.count(replaced) == 0 && "already registered");
79596891f04SDiego Caballero   opVectorReplacement[replaced] = replacement;
79696891f04SDiego Caballero 
7970fd0fb53SDiego Caballero   for (auto resultTuple :
7980fd0fb53SDiego Caballero        llvm::zip(replaced->getResults(), replacement->getResults()))
7990fd0fb53SDiego Caballero     registerValueVectorReplacementImpl(std::get<0>(resultTuple),
8000fd0fb53SDiego Caballero                                        std::get<1>(resultTuple));
801b8737614SUday Bondhugula }
802b8737614SUday Bondhugula 
80396891f04SDiego Caballero /// Registers the vector replacement of a scalar value. The replacement
80496891f04SDiego Caballero /// operation should have a single result, which replaces the scalar value.
80596891f04SDiego Caballero ///
80696891f04SDiego Caballero /// This utility is used to register the vector replacement of block arguments
80796891f04SDiego Caballero /// and operation results which are not directly vectorized (i.e., their
80896891f04SDiego Caballero /// scalar version still exists after vectorization), like uniforms.
80996891f04SDiego Caballero ///
81096891f04SDiego Caballero /// Example:
81196891f04SDiego Caballero ///   * 'replaced': block argument or operation outside of the vectorized loop.
81296891f04SDiego Caballero ///   * 'replacement': %0 = vector.broadcast %1 : f32 to vector<128xf32>
81396891f04SDiego Caballero void VectorizationState::registerValueVectorReplacement(
81496891f04SDiego Caballero     Value replaced, Operation *replacement) {
81596891f04SDiego Caballero   assert(replacement->getNumResults() == 1 &&
81696891f04SDiego Caballero          "Expected single-result replacement");
81796891f04SDiego Caballero   if (Operation *defOp = replaced.getDefiningOp())
81896891f04SDiego Caballero     registerOpVectorReplacement(defOp, replacement);
81996891f04SDiego Caballero   else
82096891f04SDiego Caballero     registerValueVectorReplacementImpl(replaced, replacement->getResult(0));
821b8737614SUday Bondhugula }
822b8737614SUday Bondhugula 
8230fd0fb53SDiego Caballero /// Registers the vector replacement of a block argument (e.g., iter_args).
8240fd0fb53SDiego Caballero ///
8250fd0fb53SDiego Caballero /// Example:
8260fd0fb53SDiego Caballero ///   * 'replaced': 'iter_arg' block argument.
8270fd0fb53SDiego Caballero ///   * 'replacement': vectorized 'iter_arg' block argument.
8280fd0fb53SDiego Caballero void VectorizationState::registerBlockArgVectorReplacement(
8290fd0fb53SDiego Caballero     BlockArgument replaced, BlockArgument replacement) {
8300fd0fb53SDiego Caballero   registerValueVectorReplacementImpl(replaced, replacement);
8310fd0fb53SDiego Caballero }
8320fd0fb53SDiego Caballero 
83396891f04SDiego Caballero void VectorizationState::registerValueVectorReplacementImpl(Value replaced,
83496891f04SDiego Caballero                                                             Value replacement) {
83596891f04SDiego Caballero   assert(!valueVectorReplacement.contains(replaced) &&
83696891f04SDiego Caballero          "Vector replacement already registered");
8375550c821STres Popp   assert(isa<VectorType>(replacement.getType()) &&
83896891f04SDiego Caballero          "Expected vector type in vector replacement");
83996891f04SDiego Caballero   valueVectorReplacement.map(replaced, replacement);
840b8737614SUday Bondhugula }
841b8737614SUday Bondhugula 
84296891f04SDiego Caballero /// Registers the scalar replacement of a scalar value. 'replacement' must be
843181d9602SHsiangkai Wang /// scalar.
84496891f04SDiego Caballero ///
84596891f04SDiego Caballero /// This utility is used to register the replacement of block arguments
846181d9602SHsiangkai Wang /// or affine.apply results that are within the loop be vectorized and will
847181d9602SHsiangkai Wang /// continue being scalar within the vector loop.
84896891f04SDiego Caballero ///
84996891f04SDiego Caballero /// Example:
85096891f04SDiego Caballero ///   * 'replaced': induction variable of a loop to be vectorized.
85196891f04SDiego Caballero ///   * 'replacement': new induction variable in the new vector loop.
852181d9602SHsiangkai Wang void VectorizationState::registerValueScalarReplacement(Value replaced,
853181d9602SHsiangkai Wang                                                         Value replacement) {
854181d9602SHsiangkai Wang   assert(!valueScalarReplacement.contains(replaced) &&
855181d9602SHsiangkai Wang          "Scalar value replacement already registered");
856181d9602SHsiangkai Wang   assert(!isa<VectorType>(replacement.getType()) &&
857181d9602SHsiangkai Wang          "Expected scalar type in scalar replacement");
858181d9602SHsiangkai Wang   valueScalarReplacement.map(replaced, replacement);
85996891f04SDiego Caballero }
86096891f04SDiego Caballero 
861d80b04abSSergei Grechanik /// Registers the scalar replacement of a scalar result returned from a
862d80b04abSSergei Grechanik /// reduction loop. 'replacement' must be scalar.
863d80b04abSSergei Grechanik ///
864d80b04abSSergei Grechanik /// This utility is used to register the replacement for scalar results of
865d80b04abSSergei Grechanik /// vectorized reduction loops with iter_args.
866d80b04abSSergei Grechanik ///
867d80b04abSSergei Grechanik /// Example 2:
868d80b04abSSergei Grechanik ///   * 'replaced': %0 = affine.for %i = 0 to 512 iter_args(%x = ...) -> (f32)
869fe0bf7d4SMatthias Springer ///   * 'replacement': %1 = vector.reduction <add>, %0 : vector<4xf32> into f32
870d80b04abSSergei Grechanik void VectorizationState::registerLoopResultScalarReplacement(
871d80b04abSSergei Grechanik     Value replaced, Value replacement) {
872d80b04abSSergei Grechanik   assert(isa<AffineForOp>(replaced.getDefiningOp()));
873d80b04abSSergei Grechanik   assert(loopResultScalarReplacement.count(replaced) == 0 &&
874d80b04abSSergei Grechanik          "already registered");
875d80b04abSSergei Grechanik   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ will replace a result of the loop "
876d80b04abSSergei Grechanik                        "with scalar: "
877d80b04abSSergei Grechanik                     << replacement);
878d80b04abSSergei Grechanik   loopResultScalarReplacement[replaced] = replacement;
879d80b04abSSergei Grechanik }
880d80b04abSSergei Grechanik 
88196891f04SDiego Caballero /// Returns in 'replacedVals' the scalar replacement for values in 'inputVals'.
88296891f04SDiego Caballero void VectorizationState::getScalarValueReplacementsFor(
88396891f04SDiego Caballero     ValueRange inputVals, SmallVectorImpl<Value> &replacedVals) {
88496891f04SDiego Caballero   for (Value inputVal : inputVals)
88596891f04SDiego Caballero     replacedVals.push_back(valueScalarReplacement.lookupOrDefault(inputVal));
88696891f04SDiego Caballero }
88796891f04SDiego Caballero 
88896891f04SDiego Caballero /// Erases a loop nest, including all its nested operations.
88996891f04SDiego Caballero static void eraseLoopNest(AffineForOp forOp) {
89096891f04SDiego Caballero   LLVM_DEBUG(dbgs() << "[early-vect]+++++ erasing:\n" << forOp << "\n");
89196891f04SDiego Caballero   forOp.erase();
89296891f04SDiego Caballero }
89396891f04SDiego Caballero 
89496891f04SDiego Caballero /// Erases the scalar loop nest after its successful vectorization.
89596891f04SDiego Caballero void VectorizationState::finishVectorizationPattern(AffineForOp rootLoop) {
89696891f04SDiego Caballero   LLVM_DEBUG(dbgs() << "\n[early-vect] Finalizing vectorization\n");
89796891f04SDiego Caballero   eraseLoopNest(rootLoop);
898b8737614SUday Bondhugula }
899b8737614SUday Bondhugula 
900b8737614SUday Bondhugula // Apply 'map' with 'mapOperands' returning resulting values in 'results'.
901b8737614SUday Bondhugula static void computeMemoryOpIndices(Operation *op, AffineMap map,
902b8737614SUday Bondhugula                                    ValueRange mapOperands,
90396891f04SDiego Caballero                                    VectorizationState &state,
904b8737614SUday Bondhugula                                    SmallVectorImpl<Value> &results) {
905b8737614SUday Bondhugula   for (auto resultExpr : map.getResults()) {
906b8737614SUday Bondhugula     auto singleResMap =
907b8737614SUday Bondhugula         AffineMap::get(map.getNumDims(), map.getNumSymbols(), resultExpr);
90896891f04SDiego Caballero     auto afOp = state.builder.create<AffineApplyOp>(op->getLoc(), singleResMap,
90996891f04SDiego Caballero                                                     mapOperands);
910b8737614SUday Bondhugula     results.push_back(afOp);
911b8737614SUday Bondhugula   }
912b8737614SUday Bondhugula }
913b8737614SUday Bondhugula 
914b8737614SUday Bondhugula /// Returns a FilterFunctionType that can be used in NestedPattern to match a
915b8737614SUday Bondhugula /// loop whose underlying load/store accesses are either invariant or all
916b8737614SUday Bondhugula // varying along the `fastestVaryingMemRefDimension`.
917b8737614SUday Bondhugula static FilterFunctionType
918b8737614SUday Bondhugula isVectorizableLoopPtrFactory(const DenseSet<Operation *> &parallelLoops,
919b8737614SUday Bondhugula                              int fastestVaryingMemRefDimension) {
920b8737614SUday Bondhugula   return [&parallelLoops, fastestVaryingMemRefDimension](Operation &forOp) {
921b8737614SUday Bondhugula     auto loop = cast<AffineForOp>(forOp);
92269ffd49cSKazu Hirata     if (!parallelLoops.contains(loop))
923b8737614SUday Bondhugula       return false;
924b8737614SUday Bondhugula     int memRefDim = -1;
925b8737614SUday Bondhugula     auto vectorizableBody =
926b8737614SUday Bondhugula         isVectorizableLoopBody(loop, &memRefDim, vectorTransferPattern());
927b8737614SUday Bondhugula     if (!vectorizableBody)
928b8737614SUday Bondhugula       return false;
929b8737614SUday Bondhugula     return memRefDim == -1 || fastestVaryingMemRefDimension == -1 ||
930b8737614SUday Bondhugula            memRefDim == fastestVaryingMemRefDimension;
931b8737614SUday Bondhugula   };
932b8737614SUday Bondhugula }
933b8737614SUday Bondhugula 
93446781630SDiego Caballero /// Returns the vector type resulting from applying the provided vectorization
93546781630SDiego Caballero /// strategy on the scalar type.
93646781630SDiego Caballero static VectorType getVectorType(Type scalarTy,
93746781630SDiego Caballero                                 const VectorizationStrategy *strategy) {
9385550c821STres Popp   assert(!isa<VectorType>(scalarTy) && "Expected scalar type");
93946781630SDiego Caballero   return VectorType::get(strategy->vectorSizes, scalarTy);
94046781630SDiego Caballero }
94146781630SDiego Caballero 
94296891f04SDiego Caballero /// Tries to transform a scalar constant into a vector constant. Returns the
94396891f04SDiego Caballero /// vector constant if the scalar type is valid vector element type. Returns
94496891f04SDiego Caballero /// nullptr, otherwise.
945a54f4eaeSMogball static arith::ConstantOp vectorizeConstant(arith::ConstantOp constOp,
94696891f04SDiego Caballero                                            VectorizationState &state) {
94796891f04SDiego Caballero   Type scalarTy = constOp.getType();
94896891f04SDiego Caballero   if (!VectorType::isValidElementType(scalarTy))
94996891f04SDiego Caballero     return nullptr;
95096891f04SDiego Caballero 
95196891f04SDiego Caballero   auto vecTy = getVectorType(scalarTy, state.strategy);
952cfb72fd3SJacques Pienaar   auto vecAttr = DenseElementsAttr::get(vecTy, constOp.getValue());
953a8b7e56fSAmy Zhuang 
954a8b7e56fSAmy Zhuang   OpBuilder::InsertionGuard guard(state.builder);
955a8b7e56fSAmy Zhuang   Operation *parentOp = state.builder.getInsertionBlock()->getParentOp();
956a8b7e56fSAmy Zhuang   // Find the innermost vectorized ancestor loop to insert the vector constant.
957a8b7e56fSAmy Zhuang   while (parentOp && !state.vecLoopToVecDim.count(parentOp))
958a8b7e56fSAmy Zhuang     parentOp = parentOp->getParentOp();
959a8b7e56fSAmy Zhuang   assert(parentOp && state.vecLoopToVecDim.count(parentOp) &&
960a8b7e56fSAmy Zhuang          isa<AffineForOp>(parentOp) && "Expected a vectorized for op");
961a8b7e56fSAmy Zhuang   auto vecForOp = cast<AffineForOp>(parentOp);
962a8b7e56fSAmy Zhuang   state.builder.setInsertionPointToStart(vecForOp.getBody());
963a54f4eaeSMogball   auto newConstOp =
964a54f4eaeSMogball       state.builder.create<arith::ConstantOp>(constOp.getLoc(), vecAttr);
96596891f04SDiego Caballero 
96696891f04SDiego Caballero   // Register vector replacement for future uses in the scope.
96796891f04SDiego Caballero   state.registerOpVectorReplacement(constOp, newConstOp);
96896891f04SDiego Caballero   return newConstOp;
96996891f04SDiego Caballero }
97096891f04SDiego Caballero 
971181d9602SHsiangkai Wang /// We have no need to vectorize affine.apply. However, we still need to
972181d9602SHsiangkai Wang /// generate it and replace the operands with values in valueScalarReplacement.
973181d9602SHsiangkai Wang static Operation *vectorizeAffineApplyOp(AffineApplyOp applyOp,
974181d9602SHsiangkai Wang                                          VectorizationState &state) {
975181d9602SHsiangkai Wang   SmallVector<Value, 8> updatedOperands;
976181d9602SHsiangkai Wang   for (Value operand : applyOp.getOperands()) {
977181d9602SHsiangkai Wang     if (state.valueVectorReplacement.contains(operand)) {
978181d9602SHsiangkai Wang       LLVM_DEBUG(
979181d9602SHsiangkai Wang           dbgs() << "\n[early-vect]+++++ affine.apply on vector operand\n");
980181d9602SHsiangkai Wang       return nullptr;
981181d9602SHsiangkai Wang     } else {
982181d9602SHsiangkai Wang       Value updatedOperand = state.valueScalarReplacement.lookupOrNull(operand);
983181d9602SHsiangkai Wang       if (!updatedOperand)
984181d9602SHsiangkai Wang         updatedOperand = operand;
985181d9602SHsiangkai Wang       updatedOperands.push_back(updatedOperand);
986181d9602SHsiangkai Wang     }
987181d9602SHsiangkai Wang   }
988181d9602SHsiangkai Wang 
989181d9602SHsiangkai Wang   auto newApplyOp = state.builder.create<AffineApplyOp>(
990181d9602SHsiangkai Wang       applyOp.getLoc(), applyOp.getAffineMap(), updatedOperands);
991181d9602SHsiangkai Wang 
992181d9602SHsiangkai Wang   // Register the new affine.apply result.
993181d9602SHsiangkai Wang   state.registerValueScalarReplacement(applyOp.getResult(),
994181d9602SHsiangkai Wang                                        newApplyOp.getResult());
995181d9602SHsiangkai Wang   return newApplyOp;
996181d9602SHsiangkai Wang }
997181d9602SHsiangkai Wang 
998d80b04abSSergei Grechanik /// Creates a constant vector filled with the neutral elements of the given
999d80b04abSSergei Grechanik /// reduction. The scalar type of vector elements will be taken from
1000d80b04abSSergei Grechanik /// `oldOperand`.
1001a6a583daSWilliam S. Moses static arith::ConstantOp createInitialVector(arith::AtomicRMWKind reductionKind,
1002d80b04abSSergei Grechanik                                              Value oldOperand,
1003d80b04abSSergei Grechanik                                              VectorizationState &state) {
1004d80b04abSSergei Grechanik   Type scalarTy = oldOperand.getType();
1005d80b04abSSergei Grechanik   if (!VectorType::isValidElementType(scalarTy))
1006d80b04abSSergei Grechanik     return nullptr;
1007d80b04abSSergei Grechanik 
1008d80b04abSSergei Grechanik   Attribute valueAttr = getIdentityValueAttr(
1009d80b04abSSergei Grechanik       reductionKind, scalarTy, state.builder, oldOperand.getLoc());
1010d80b04abSSergei Grechanik   auto vecTy = getVectorType(scalarTy, state.strategy);
1011d80b04abSSergei Grechanik   auto vecAttr = DenseElementsAttr::get(vecTy, valueAttr);
1012d80b04abSSergei Grechanik   auto newConstOp =
1013a54f4eaeSMogball       state.builder.create<arith::ConstantOp>(oldOperand.getLoc(), vecAttr);
1014d80b04abSSergei Grechanik 
1015d80b04abSSergei Grechanik   return newConstOp;
1016d80b04abSSergei Grechanik }
1017d80b04abSSergei Grechanik 
1018d80b04abSSergei Grechanik /// Creates a mask used to filter out garbage elements in the last iteration
1019d80b04abSSergei Grechanik /// of unaligned loops. If a mask is not required then `nullptr` is returned.
1020d80b04abSSergei Grechanik /// The mask will be a vector of booleans representing meaningful vector
1021d80b04abSSergei Grechanik /// elements in the current iteration. It is filled with ones for each iteration
1022d80b04abSSergei Grechanik /// except for the last one, where it has the form `11...100...0` with the
1023d80b04abSSergei Grechanik /// number of ones equal to the number of meaningful elements (i.e. the number
1024d80b04abSSergei Grechanik /// of iterations that would be left in the original loop).
1025d80b04abSSergei Grechanik static Value createMask(AffineForOp vecForOp, VectorizationState &state) {
1026d80b04abSSergei Grechanik   assert(state.strategy->vectorSizes.size() == 1 &&
1027d80b04abSSergei Grechanik          "Creating a mask non-1-D vectors is not supported.");
1028d80b04abSSergei Grechanik   assert(vecForOp.getStep() == state.strategy->vectorSizes[0] &&
1029d80b04abSSergei Grechanik          "Creating a mask for loops with non-unit original step size is not "
1030d80b04abSSergei Grechanik          "supported.");
1031d80b04abSSergei Grechanik 
1032d80b04abSSergei Grechanik   // Check if we have already created the mask.
1033d80b04abSSergei Grechanik   if (Value mask = state.vecLoopToMask.lookup(vecForOp))
1034d80b04abSSergei Grechanik     return mask;
1035d80b04abSSergei Grechanik 
1036d80b04abSSergei Grechanik   // If the loop has constant bounds and the original number of iterations is
1037d80b04abSSergei Grechanik   // divisable by the vector size then we don't need a mask.
1038d80b04abSSergei Grechanik   if (vecForOp.hasConstantBounds()) {
1039d80b04abSSergei Grechanik     int64_t originalTripCount =
1040d80b04abSSergei Grechanik         vecForOp.getConstantUpperBound() - vecForOp.getConstantLowerBound();
104123b794f7SMatthias Springer     if (originalTripCount % vecForOp.getStepAsInt() == 0)
1042d80b04abSSergei Grechanik       return nullptr;
1043d80b04abSSergei Grechanik   }
1044d80b04abSSergei Grechanik 
1045d80b04abSSergei Grechanik   OpBuilder::InsertionGuard guard(state.builder);
1046d80b04abSSergei Grechanik   state.builder.setInsertionPointToStart(vecForOp.getBody());
1047d80b04abSSergei Grechanik 
1048d80b04abSSergei Grechanik   // We generate the mask using the `vector.create_mask` operation which accepts
1049b7cac864SDiego Caballero   // the number of meaningful elements (i.e. the length of the prefix of 1s).
1050d80b04abSSergei Grechanik   // To compute the number of meaningful elements we subtract the current value
1051d80b04abSSergei Grechanik   // of the iteration variable from the upper bound of the loop. Example:
1052d80b04abSSergei Grechanik   //
1053d80b04abSSergei Grechanik   //     // 500 is the upper bound of the loop
1054d80b04abSSergei Grechanik   //     #map = affine_map<(d0) -> (500 - d0)>
1055d80b04abSSergei Grechanik   //     %elems_left = affine.apply #map(%iv)
1056d80b04abSSergei Grechanik   //     %mask = vector.create_mask %elems_left : vector<128xi1>
1057d80b04abSSergei Grechanik 
1058d80b04abSSergei Grechanik   Location loc = vecForOp.getLoc();
1059d80b04abSSergei Grechanik 
1060d80b04abSSergei Grechanik   // First we get the upper bound of the loop using `affine.apply` or
1061d80b04abSSergei Grechanik   // `affine.min`.
1062d80b04abSSergei Grechanik   AffineMap ubMap = vecForOp.getUpperBoundMap();
1063d80b04abSSergei Grechanik   Value ub;
1064d80b04abSSergei Grechanik   if (ubMap.getNumResults() == 1)
1065d80b04abSSergei Grechanik     ub = state.builder.create<AffineApplyOp>(loc, vecForOp.getUpperBoundMap(),
1066d80b04abSSergei Grechanik                                              vecForOp.getUpperBoundOperands());
1067d80b04abSSergei Grechanik   else
1068d80b04abSSergei Grechanik     ub = state.builder.create<AffineMinOp>(loc, vecForOp.getUpperBoundMap(),
1069d80b04abSSergei Grechanik                                            vecForOp.getUpperBoundOperands());
1070d80b04abSSergei Grechanik   // Then we compute the number of (original) iterations left in the loop.
1071d80b04abSSergei Grechanik   AffineExpr subExpr =
1072d80b04abSSergei Grechanik       state.builder.getAffineDimExpr(0) - state.builder.getAffineDimExpr(1);
1073d80b04abSSergei Grechanik   Value itersLeft =
1074d80b04abSSergei Grechanik       makeComposedAffineApply(state.builder, loc, AffineMap::get(2, 0, subExpr),
1075d80b04abSSergei Grechanik                               {ub, vecForOp.getInductionVar()});
1076d80b04abSSergei Grechanik   // If the affine maps were successfully composed then `ub` is unneeded.
1077d80b04abSSergei Grechanik   if (ub.use_empty())
1078d80b04abSSergei Grechanik     ub.getDefiningOp()->erase();
1079d80b04abSSergei Grechanik   // Finally we create the mask.
1080d80b04abSSergei Grechanik   Type maskTy = VectorType::get(state.strategy->vectorSizes,
1081d80b04abSSergei Grechanik                                 state.builder.getIntegerType(1));
1082d80b04abSSergei Grechanik   Value mask =
1083d80b04abSSergei Grechanik       state.builder.create<vector::CreateMaskOp>(loc, maskTy, itersLeft);
1084d80b04abSSergei Grechanik 
1085d80b04abSSergei Grechanik   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ creating a mask:\n"
1086d80b04abSSergei Grechanik                     << itersLeft << "\n"
1087d80b04abSSergei Grechanik                     << mask << "\n");
1088d80b04abSSergei Grechanik 
1089d80b04abSSergei Grechanik   state.vecLoopToMask[vecForOp] = mask;
1090d80b04abSSergei Grechanik   return mask;
1091d80b04abSSergei Grechanik }
1092d80b04abSSergei Grechanik 
109346781630SDiego Caballero /// Returns true if the provided value is vector uniform given the vectorization
109446781630SDiego Caballero /// strategy.
10955ce368cfSAmy Zhuang // TODO: For now, only values that are induction variables of loops not in
10965ce368cfSAmy Zhuang // `loopToVectorDim` or invariants to all the loops in the vectorization
10975ce368cfSAmy Zhuang // strategy are considered vector uniforms.
109846781630SDiego Caballero static bool isUniformDefinition(Value value,
109946781630SDiego Caballero                                 const VectorizationStrategy *strategy) {
11005ce368cfSAmy Zhuang   AffineForOp forOp = getForInductionVarOwner(value);
11015ce368cfSAmy Zhuang   if (forOp && strategy->loopToVectorDim.count(forOp) == 0)
11025ce368cfSAmy Zhuang     return true;
11035ce368cfSAmy Zhuang 
110446781630SDiego Caballero   for (auto loopToDim : strategy->loopToVectorDim) {
110546781630SDiego Caballero     auto loop = cast<AffineForOp>(loopToDim.first);
110646781630SDiego Caballero     if (!loop.isDefinedOutsideOfLoop(value))
110746781630SDiego Caballero       return false;
110846781630SDiego Caballero   }
110946781630SDiego Caballero   return true;
111046781630SDiego Caballero }
111146781630SDiego Caballero 
111246781630SDiego Caballero /// Generates a broadcast op for the provided uniform value using the
111346781630SDiego Caballero /// vectorization strategy in 'state'.
111496891f04SDiego Caballero static Operation *vectorizeUniform(Value uniformVal,
111596891f04SDiego Caballero                                    VectorizationState &state) {
111696891f04SDiego Caballero   OpBuilder::InsertionGuard guard(state.builder);
11175ce368cfSAmy Zhuang   Value uniformScalarRepl =
11185ce368cfSAmy Zhuang       state.valueScalarReplacement.lookupOrDefault(uniformVal);
11195ce368cfSAmy Zhuang   state.builder.setInsertionPointAfterValue(uniformScalarRepl);
112046781630SDiego Caballero 
112196891f04SDiego Caballero   auto vectorTy = getVectorType(uniformVal.getType(), state.strategy);
112296891f04SDiego Caballero   auto bcastOp = state.builder.create<BroadcastOp>(uniformVal.getLoc(),
11235ce368cfSAmy Zhuang                                                    vectorTy, uniformScalarRepl);
112496891f04SDiego Caballero   state.registerValueVectorReplacement(uniformVal, bcastOp);
112596891f04SDiego Caballero   return bcastOp;
112646781630SDiego Caballero }
112746781630SDiego Caballero 
112896891f04SDiego Caballero /// Tries to vectorize a given `operand` by applying the following logic:
112996891f04SDiego Caballero /// 1. if the defining operation has been already vectorized, `operand` is
113096891f04SDiego Caballero ///    already in the proper vector form;
113196891f04SDiego Caballero /// 2. if the `operand` is a constant, returns the vectorized form of the
113296891f04SDiego Caballero ///    constant;
113396891f04SDiego Caballero /// 3. if the `operand` is uniform, returns a vector broadcast of the `op`;
113496891f04SDiego Caballero /// 4. otherwise, the vectorization of `operand` is not supported.
113596891f04SDiego Caballero /// Newly created vector operations are registered in `state` as replacement
113696891f04SDiego Caballero /// for their scalar counterparts.
1137b8737614SUday Bondhugula /// In particular this logic captures some of the use cases where definitions
1138b8737614SUday Bondhugula /// that are not scoped under the current pattern are needed to vectorize.
1139b8737614SUday Bondhugula /// One such example is top level function constants that need to be splatted.
1140b8737614SUday Bondhugula ///
1141b8737614SUday Bondhugula /// Returns an operand that has been vectorized to match `state`'s strategy if
1142b8737614SUday Bondhugula /// vectorization is possible with the above logic. Returns nullptr otherwise.
1143b8737614SUday Bondhugula ///
11449db53a18SRiver Riddle /// TODO: handle more complex cases.
114596891f04SDiego Caballero static Value vectorizeOperand(Value operand, VectorizationState &state) {
114696891f04SDiego Caballero   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorize operand: " << operand);
114796891f04SDiego Caballero   // If this value is already vectorized, we are done.
114896891f04SDiego Caballero   if (Value vecRepl = state.valueVectorReplacement.lookupOrNull(operand)) {
114996891f04SDiego Caballero     LLVM_DEBUG(dbgs() << " -> already vectorized: " << vecRepl);
115096891f04SDiego Caballero     return vecRepl;
1151b8737614SUday Bondhugula   }
115295db7b4aSDiego Caballero 
115396891f04SDiego Caballero   // An vector operand that is not in the replacement map should never reach
115496891f04SDiego Caballero   // this point. Reaching this point could mean that the code was already
115596891f04SDiego Caballero   // vectorized and we shouldn't try to vectorize already vectorized code.
11565550c821STres Popp   assert(!isa<VectorType>(operand.getType()) &&
115796891f04SDiego Caballero          "Vector op not found in replacement map");
115895db7b4aSDiego Caballero 
115996891f04SDiego Caballero   // Vectorize constant.
1160a54f4eaeSMogball   if (auto constOp = operand.getDefiningOp<arith::ConstantOp>()) {
1161a54f4eaeSMogball     auto vecConstant = vectorizeConstant(constOp, state);
116296891f04SDiego Caballero     LLVM_DEBUG(dbgs() << "-> constant: " << vecConstant);
116396891f04SDiego Caballero     return vecConstant.getResult();
116496891f04SDiego Caballero   }
116596891f04SDiego Caballero 
116696891f04SDiego Caballero   // Vectorize uniform values.
116796891f04SDiego Caballero   if (isUniformDefinition(operand, state.strategy)) {
116896891f04SDiego Caballero     Operation *vecUniform = vectorizeUniform(operand, state);
116996891f04SDiego Caballero     LLVM_DEBUG(dbgs() << "-> uniform: " << *vecUniform);
117096891f04SDiego Caballero     return vecUniform->getResult(0);
117196891f04SDiego Caballero   }
117296891f04SDiego Caballero 
117396891f04SDiego Caballero   // Check for unsupported block argument scenarios. A supported block argument
117496891f04SDiego Caballero   // should have been vectorized already.
117596891f04SDiego Caballero   if (!operand.getDefiningOp())
117696891f04SDiego Caballero     LLVM_DEBUG(dbgs() << "-> unsupported block argument\n");
117796891f04SDiego Caballero   else
117896891f04SDiego Caballero     // Generic unsupported case.
117996891f04SDiego Caballero     LLVM_DEBUG(dbgs() << "-> non-vectorizable\n");
118096891f04SDiego Caballero 
1181b8737614SUday Bondhugula   return nullptr;
1182b8737614SUday Bondhugula }
1183b8737614SUday Bondhugula 
118496891f04SDiego Caballero /// Vectorizes an affine load with the vectorization strategy in 'state' by
118596891f04SDiego Caballero /// generating a 'vector.transfer_read' op with the proper permutation map
118696891f04SDiego Caballero /// inferred from the indices of the load. The new 'vector.transfer_read' is
118796891f04SDiego Caballero /// registered as replacement of the scalar load. Returns the newly created
118896891f04SDiego Caballero /// 'vector.transfer_read' if vectorization was successful. Returns nullptr,
118996891f04SDiego Caballero /// otherwise.
119096891f04SDiego Caballero static Operation *vectorizeAffineLoad(AffineLoadOp loadOp,
119196891f04SDiego Caballero                                       VectorizationState &state) {
119296891f04SDiego Caballero   MemRefType memRefType = loadOp.getMemRefType();
119396891f04SDiego Caballero   Type elementType = memRefType.getElementType();
119496891f04SDiego Caballero   auto vectorType = VectorType::get(state.strategy->vectorSizes, elementType);
1195b8737614SUday Bondhugula 
119696891f04SDiego Caballero   // Replace map operands with operands from the vector loop nest.
119796891f04SDiego Caballero   SmallVector<Value, 8> mapOperands;
119896891f04SDiego Caballero   state.getScalarValueReplacementsFor(loadOp.getMapOperands(), mapOperands);
119996891f04SDiego Caballero 
120096891f04SDiego Caballero   // Compute indices for the transfer op. AffineApplyOp's may be generated.
120196891f04SDiego Caballero   SmallVector<Value, 8> indices;
120296891f04SDiego Caballero   indices.reserve(memRefType.getRank());
120396891f04SDiego Caballero   if (loadOp.getAffineMap() !=
1204181d9602SHsiangkai Wang       state.builder.getMultiDimIdentityMap(memRefType.getRank())) {
1205181d9602SHsiangkai Wang     // Check the operand in loadOp affine map does not come from AffineApplyOp.
1206181d9602SHsiangkai Wang     for (auto op : mapOperands) {
1207181d9602SHsiangkai Wang       if (op.getDefiningOp<AffineApplyOp>())
1208181d9602SHsiangkai Wang         return nullptr;
1209181d9602SHsiangkai Wang     }
121096891f04SDiego Caballero     computeMemoryOpIndices(loadOp, loadOp.getAffineMap(), mapOperands, state,
121196891f04SDiego Caballero                            indices);
1212181d9602SHsiangkai Wang   } else {
121396891f04SDiego Caballero     indices.append(mapOperands.begin(), mapOperands.end());
1214181d9602SHsiangkai Wang   }
121596891f04SDiego Caballero 
121696891f04SDiego Caballero   // Compute permutation map using the information of new vector loops.
121796891f04SDiego Caballero   auto permutationMap = makePermutationMap(state.builder.getInsertionBlock(),
121896891f04SDiego Caballero                                            indices, state.vecLoopToVecDim);
121996891f04SDiego Caballero   if (!permutationMap) {
122096891f04SDiego Caballero     LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ can't compute permutationMap\n");
122196891f04SDiego Caballero     return nullptr;
122296891f04SDiego Caballero   }
122396891f04SDiego Caballero   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
122496891f04SDiego Caballero   LLVM_DEBUG(permutationMap.print(dbgs()));
122596891f04SDiego Caballero 
122696891f04SDiego Caballero   auto transfer = state.builder.create<vector::TransferReadOp>(
1227*56d6b567SAndrzej Warzyński       loadOp.getLoc(), vectorType, loadOp.getMemRef(), indices, permutationMap);
122896891f04SDiego Caballero 
122996891f04SDiego Caballero   // Register replacement for future uses in the scope.
123096891f04SDiego Caballero   state.registerOpVectorReplacement(loadOp, transfer);
123196891f04SDiego Caballero   return transfer;
123296891f04SDiego Caballero }
123396891f04SDiego Caballero 
123496891f04SDiego Caballero /// Vectorizes an affine store with the vectorization strategy in 'state' by
123596891f04SDiego Caballero /// generating a 'vector.transfer_write' op with the proper permutation map
123696891f04SDiego Caballero /// inferred from the indices of the store. The new 'vector.transfer_store' is
123796891f04SDiego Caballero /// registered as replacement of the scalar load. Returns the newly created
123896891f04SDiego Caballero /// 'vector.transfer_write' if vectorization was successful. Returns nullptr,
123996891f04SDiego Caballero /// otherwise.
124096891f04SDiego Caballero static Operation *vectorizeAffineStore(AffineStoreOp storeOp,
124196891f04SDiego Caballero                                        VectorizationState &state) {
124296891f04SDiego Caballero   MemRefType memRefType = storeOp.getMemRefType();
124396891f04SDiego Caballero   Value vectorValue = vectorizeOperand(storeOp.getValueToStore(), state);
1244b8737614SUday Bondhugula   if (!vectorValue)
1245b8737614SUday Bondhugula     return nullptr;
1246b8737614SUday Bondhugula 
124796891f04SDiego Caballero   // Replace map operands with operands from the vector loop nest.
124896891f04SDiego Caballero   SmallVector<Value, 8> mapOperands;
124996891f04SDiego Caballero   state.getScalarValueReplacementsFor(storeOp.getMapOperands(), mapOperands);
125095db7b4aSDiego Caballero 
125196891f04SDiego Caballero   // Compute indices for the transfer op. AffineApplyOp's may be generated.
125296891f04SDiego Caballero   SmallVector<Value, 8> indices;
125396891f04SDiego Caballero   indices.reserve(memRefType.getRank());
125496891f04SDiego Caballero   if (storeOp.getAffineMap() !=
125596891f04SDiego Caballero       state.builder.getMultiDimIdentityMap(memRefType.getRank()))
125696891f04SDiego Caballero     computeMemoryOpIndices(storeOp, storeOp.getAffineMap(), mapOperands, state,
125796891f04SDiego Caballero                            indices);
125896891f04SDiego Caballero   else
125996891f04SDiego Caballero     indices.append(mapOperands.begin(), mapOperands.end());
126096891f04SDiego Caballero 
126196891f04SDiego Caballero   // Compute permutation map using the information of new vector loops.
126296891f04SDiego Caballero   auto permutationMap = makePermutationMap(state.builder.getInsertionBlock(),
126396891f04SDiego Caballero                                            indices, state.vecLoopToVecDim);
1264b8737614SUday Bondhugula   if (!permutationMap)
1265b8737614SUday Bondhugula     return nullptr;
1266b8737614SUday Bondhugula   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
1267b8737614SUday Bondhugula   LLVM_DEBUG(permutationMap.print(dbgs()));
126896891f04SDiego Caballero 
126996891f04SDiego Caballero   auto transfer = state.builder.create<vector::TransferWriteOp>(
127096891f04SDiego Caballero       storeOp.getLoc(), vectorValue, storeOp.getMemRef(), indices,
127196891f04SDiego Caballero       permutationMap);
127296891f04SDiego Caballero   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorized store: " << transfer);
127396891f04SDiego Caballero 
127496891f04SDiego Caballero   // Register replacement for future uses in the scope.
127596891f04SDiego Caballero   state.registerOpVectorReplacement(storeOp, transfer);
127696891f04SDiego Caballero   return transfer;
1277b8737614SUday Bondhugula }
127896891f04SDiego Caballero 
1279d80b04abSSergei Grechanik /// Returns true if `value` is a constant equal to the neutral element of the
1280d80b04abSSergei Grechanik /// given vectorizable reduction.
1281a6a583daSWilliam S. Moses static bool isNeutralElementConst(arith::AtomicRMWKind reductionKind,
1282a6a583daSWilliam S. Moses                                   Value value, VectorizationState &state) {
1283d80b04abSSergei Grechanik   Type scalarTy = value.getType();
1284d80b04abSSergei Grechanik   if (!VectorType::isValidElementType(scalarTy))
1285d80b04abSSergei Grechanik     return false;
1286d80b04abSSergei Grechanik   Attribute valueAttr = getIdentityValueAttr(reductionKind, scalarTy,
1287d80b04abSSergei Grechanik                                              state.builder, value.getLoc());
1288a54f4eaeSMogball   if (auto constOp = dyn_cast_or_null<arith::ConstantOp>(value.getDefiningOp()))
1289cfb72fd3SJacques Pienaar     return constOp.getValue() == valueAttr;
1290d80b04abSSergei Grechanik   return false;
1291d80b04abSSergei Grechanik }
1292d80b04abSSergei Grechanik 
129396891f04SDiego Caballero /// Vectorizes a loop with the vectorization strategy in 'state'. A new loop is
129496891f04SDiego Caballero /// created and registered as replacement for the scalar loop. The builder's
129596891f04SDiego Caballero /// insertion point is set to the new loop's body so that subsequent vectorized
129696891f04SDiego Caballero /// operations are inserted into the new loop. If the loop is a vector
129796891f04SDiego Caballero /// dimension, the step of the newly created loop will reflect the vectorization
129896891f04SDiego Caballero /// factor used to vectorized that dimension.
129996891f04SDiego Caballero static Operation *vectorizeAffineForOp(AffineForOp forOp,
130096891f04SDiego Caballero                                        VectorizationState &state) {
13010fd0fb53SDiego Caballero   const VectorizationStrategy &strategy = *state.strategy;
13020fd0fb53SDiego Caballero   auto loopToVecDimIt = strategy.loopToVectorDim.find(forOp);
13030fd0fb53SDiego Caballero   bool isLoopVecDim = loopToVecDimIt != strategy.loopToVectorDim.end();
13040fd0fb53SDiego Caballero 
1305d80b04abSSergei Grechanik   // TODO: Vectorization of reduction loops is not supported for non-unit steps.
1306d80b04abSSergei Grechanik   if (isLoopVecDim && forOp.getNumIterOperands() > 0 && forOp.getStep() != 1) {
1307d80b04abSSergei Grechanik     LLVM_DEBUG(
1308d80b04abSSergei Grechanik         dbgs()
1309d80b04abSSergei Grechanik         << "\n[early-vect]+++++ unsupported step size for reduction loop: "
1310d80b04abSSergei Grechanik         << forOp.getStep() << "\n");
1311b8737614SUday Bondhugula     return nullptr;
1312d80b04abSSergei Grechanik   }
1313b8737614SUday Bondhugula 
131496891f04SDiego Caballero   // If we are vectorizing a vector dimension, compute a new step for the new
131596891f04SDiego Caballero   // vectorized loop using the vectorization factor for the vector dimension.
131696891f04SDiego Caballero   // Otherwise, propagate the step of the scalar loop.
131796891f04SDiego Caballero   unsigned newStep;
131896891f04SDiego Caballero   if (isLoopVecDim) {
131996891f04SDiego Caballero     unsigned vectorDim = loopToVecDimIt->second;
132096891f04SDiego Caballero     assert(vectorDim < strategy.vectorSizes.size() && "vector dim overflow");
132196891f04SDiego Caballero     int64_t forOpVecFactor = strategy.vectorSizes[vectorDim];
132223b794f7SMatthias Springer     newStep = forOp.getStepAsInt() * forOpVecFactor;
132396891f04SDiego Caballero   } else {
132423b794f7SMatthias Springer     newStep = forOp.getStepAsInt();
132596891f04SDiego Caballero   }
132696891f04SDiego Caballero 
1327d80b04abSSergei Grechanik   // Get information about reduction kinds.
1328d80b04abSSergei Grechanik   ArrayRef<LoopReduction> reductions;
1329d80b04abSSergei Grechanik   if (isLoopVecDim && forOp.getNumIterOperands() > 0) {
1330d80b04abSSergei Grechanik     auto it = strategy.reductionLoops.find(forOp);
1331d80b04abSSergei Grechanik     assert(it != strategy.reductionLoops.end() &&
1332d80b04abSSergei Grechanik            "Reduction descriptors not found when vectorizing a reduction loop");
1333d80b04abSSergei Grechanik     reductions = it->second;
1334d80b04abSSergei Grechanik     assert(reductions.size() == forOp.getNumIterOperands() &&
1335d80b04abSSergei Grechanik            "The size of reductions array must match the number of iter_args");
1336d80b04abSSergei Grechanik   }
1337d80b04abSSergei Grechanik 
13380fd0fb53SDiego Caballero   // Vectorize 'iter_args'.
13390fd0fb53SDiego Caballero   SmallVector<Value, 8> vecIterOperands;
1340d80b04abSSergei Grechanik   if (!isLoopVecDim) {
13413bd7a9b4SMatthias Springer     for (auto operand : forOp.getInits())
13420fd0fb53SDiego Caballero       vecIterOperands.push_back(vectorizeOperand(operand, state));
1343d80b04abSSergei Grechanik   } else {
1344d80b04abSSergei Grechanik     // For reduction loops we need to pass a vector of neutral elements as an
1345d80b04abSSergei Grechanik     // initial value of the accumulator. We will add the original initial value
1346d80b04abSSergei Grechanik     // later.
13473bd7a9b4SMatthias Springer     for (auto redAndOperand : llvm::zip(reductions, forOp.getInits())) {
1348d80b04abSSergei Grechanik       vecIterOperands.push_back(createInitialVector(
1349d80b04abSSergei Grechanik           std::get<0>(redAndOperand).kind, std::get<1>(redAndOperand), state));
1350d80b04abSSergei Grechanik     }
1351d80b04abSSergei Grechanik   }
13520fd0fb53SDiego Caballero 
135396891f04SDiego Caballero   auto vecForOp = state.builder.create<AffineForOp>(
135496891f04SDiego Caballero       forOp.getLoc(), forOp.getLowerBoundOperands(), forOp.getLowerBoundMap(),
135596891f04SDiego Caballero       forOp.getUpperBoundOperands(), forOp.getUpperBoundMap(), newStep,
13560fd0fb53SDiego Caballero       vecIterOperands,
135796891f04SDiego Caballero       /*bodyBuilder=*/[](OpBuilder &, Location, Value, ValueRange) {
135896891f04SDiego Caballero         // Make sure we don't create a default terminator in the loop body as
135996891f04SDiego Caballero         // the proper terminator will be added during vectorization.
136096891f04SDiego Caballero       });
136196891f04SDiego Caballero 
136296891f04SDiego Caballero   // Register loop-related replacements:
136396891f04SDiego Caballero   //   1) The new vectorized loop is registered as vector replacement of the
136496891f04SDiego Caballero   //      scalar loop.
136596891f04SDiego Caballero   //   2) The new iv of the vectorized loop is registered as scalar replacement
136696891f04SDiego Caballero   //      since a scalar copy of the iv will prevail in the vectorized loop.
136796891f04SDiego Caballero   //      TODO: A vector replacement will also be added in the future when
136896891f04SDiego Caballero   //      vectorization of linear ops is supported.
13690fd0fb53SDiego Caballero   //   3) The new 'iter_args' region arguments are registered as vector
13700fd0fb53SDiego Caballero   //      replacements since they have been vectorized.
1371d80b04abSSergei Grechanik   //   4) If the loop performs a reduction along the vector dimension, a
1372d80b04abSSergei Grechanik   //      `vector.reduction` or similar op is inserted for each resulting value
1373d80b04abSSergei Grechanik   //      of the loop and its scalar value replaces the corresponding scalar
1374d80b04abSSergei Grechanik   //      result of the loop.
137596891f04SDiego Caballero   state.registerOpVectorReplacement(forOp, vecForOp);
137696891f04SDiego Caballero   state.registerValueScalarReplacement(forOp.getInductionVar(),
137796891f04SDiego Caballero                                        vecForOp.getInductionVar());
13780fd0fb53SDiego Caballero   for (auto iterTuple :
13790fd0fb53SDiego Caballero        llvm ::zip(forOp.getRegionIterArgs(), vecForOp.getRegionIterArgs()))
13800fd0fb53SDiego Caballero     state.registerBlockArgVectorReplacement(std::get<0>(iterTuple),
13810fd0fb53SDiego Caballero                                             std::get<1>(iterTuple));
13820fd0fb53SDiego Caballero 
1383d80b04abSSergei Grechanik   if (isLoopVecDim) {
1384d80b04abSSergei Grechanik     for (unsigned i = 0; i < vecForOp.getNumIterOperands(); ++i) {
1385d80b04abSSergei Grechanik       // First, we reduce the vector returned from the loop into a scalar.
1386d80b04abSSergei Grechanik       Value reducedRes =
1387d80b04abSSergei Grechanik           getVectorReductionOp(reductions[i].kind, state.builder,
1388d80b04abSSergei Grechanik                                vecForOp.getLoc(), vecForOp.getResult(i));
1389d80b04abSSergei Grechanik       LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ creating a vector reduction: "
1390d80b04abSSergei Grechanik                         << reducedRes);
1391d80b04abSSergei Grechanik       // Then we combine it with the original (scalar) initial value unless it
1392d80b04abSSergei Grechanik       // is equal to the neutral element of the reduction.
1393d80b04abSSergei Grechanik       Value origInit = forOp.getOperand(forOp.getNumControlOperands() + i);
1394d80b04abSSergei Grechanik       Value finalRes = reducedRes;
1395d80b04abSSergei Grechanik       if (!isNeutralElementConst(reductions[i].kind, origInit, state))
1396a6a583daSWilliam S. Moses         finalRes =
1397a6a583daSWilliam S. Moses             arith::getReductionOp(reductions[i].kind, state.builder,
1398d80b04abSSergei Grechanik                                   reducedRes.getLoc(), reducedRes, origInit);
1399d80b04abSSergei Grechanik       state.registerLoopResultScalarReplacement(forOp.getResult(i), finalRes);
1400d80b04abSSergei Grechanik     }
1401d80b04abSSergei Grechanik   }
1402d80b04abSSergei Grechanik 
140396891f04SDiego Caballero   if (isLoopVecDim)
140496891f04SDiego Caballero     state.vecLoopToVecDim[vecForOp] = loopToVecDimIt->second;
140596891f04SDiego Caballero 
140696891f04SDiego Caballero   // Change insertion point so that upcoming vectorized instructions are
140796891f04SDiego Caballero   // inserted into the vectorized loop's body.
140896891f04SDiego Caballero   state.builder.setInsertionPointToStart(vecForOp.getBody());
1409d80b04abSSergei Grechanik 
1410d80b04abSSergei Grechanik   // If this is a reduction loop then we may need to create a mask to filter out
1411d80b04abSSergei Grechanik   // garbage in the last iteration.
1412d80b04abSSergei Grechanik   if (isLoopVecDim && forOp.getNumIterOperands() > 0)
1413d80b04abSSergei Grechanik     createMask(vecForOp, state);
1414d80b04abSSergei Grechanik 
141596891f04SDiego Caballero   return vecForOp;
141696891f04SDiego Caballero }
141796891f04SDiego Caballero 
141896891f04SDiego Caballero /// Vectorizes arbitrary operation by plain widening. We apply generic type
141996891f04SDiego Caballero /// widening of all its results and retrieve the vector counterparts for all its
142096891f04SDiego Caballero /// operands.
142196891f04SDiego Caballero static Operation *widenOp(Operation *op, VectorizationState &state) {
1422b8737614SUday Bondhugula   SmallVector<Type, 8> vectorTypes;
142396891f04SDiego Caballero   for (Value result : op->getResults())
1424b8737614SUday Bondhugula     vectorTypes.push_back(
142596891f04SDiego Caballero         VectorType::get(state.strategy->vectorSizes, result.getType()));
142696891f04SDiego Caballero 
142779da91c5SAlex Zinenko   SmallVector<Value, 8> vectorOperands;
142896891f04SDiego Caballero   for (Value operand : op->getOperands()) {
142996891f04SDiego Caballero     Value vecOperand = vectorizeOperand(operand, state);
143096891f04SDiego Caballero     if (!vecOperand) {
143196891f04SDiego Caballero       LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ an operand failed vectorize\n");
143279da91c5SAlex Zinenko       return nullptr;
143395db7b4aSDiego Caballero     }
143496891f04SDiego Caballero     vectorOperands.push_back(vecOperand);
143596891f04SDiego Caballero   }
1436b8737614SUday Bondhugula 
1437b8737614SUday Bondhugula   // Create a clone of the op with the proper operands and return types.
14389db53a18SRiver Riddle   // TODO: The following assumes there is always an op with a fixed
1439b8737614SUday Bondhugula   // name that works both in scalar mode and vector mode.
14409db53a18SRiver Riddle   // TODO: Is it worth considering an Operation.clone operation which
1441b8737614SUday Bondhugula   // changes the type so we can promote an Operation with less boilerplate?
144214ecafd0SChia-hung Duan   Operation *vecOp =
144314ecafd0SChia-hung Duan       state.builder.create(op->getLoc(), op->getName().getIdentifier(),
144414ecafd0SChia-hung Duan                            vectorOperands, vectorTypes, op->getAttrs());
144596891f04SDiego Caballero   state.registerOpVectorReplacement(op, vecOp);
144696891f04SDiego Caballero   return vecOp;
1447b8737614SUday Bondhugula }
1448b8737614SUday Bondhugula 
144996891f04SDiego Caballero /// Vectorizes a yield operation by widening its types. The builder's insertion
145096891f04SDiego Caballero /// point is set after the vectorized parent op to continue vectorizing the
1451d80b04abSSergei Grechanik /// operations after the parent op. When vectorizing a reduction loop a mask may
1452d80b04abSSergei Grechanik /// be used to prevent adding garbage values to the accumulator.
145396891f04SDiego Caballero static Operation *vectorizeAffineYieldOp(AffineYieldOp yieldOp,
145496891f04SDiego Caballero                                          VectorizationState &state) {
145596891f04SDiego Caballero   Operation *newYieldOp = widenOp(yieldOp, state);
145696891f04SDiego Caballero   Operation *newParentOp = state.builder.getInsertionBlock()->getParentOp();
1457d80b04abSSergei Grechanik 
1458d80b04abSSergei Grechanik   // If there is a mask for this loop then we must prevent garbage values from
1459d80b04abSSergei Grechanik   // being added to the accumulator by inserting `select` operations, for
1460d80b04abSSergei Grechanik   // example:
1461d80b04abSSergei Grechanik   //
14625bd4bcfcSAmy Zhuang   //   %val_masked = select %mask, %val, %neutralCst : vector<128xi1>,
14635bd4bcfcSAmy Zhuang   //   vector<128xf32>
14645bd4bcfcSAmy Zhuang   //   %res = arith.addf %acc, %val_masked : vector<128xf32>
14655bd4bcfcSAmy Zhuang   //   affine.yield %res : vector<128xf32>
1466d80b04abSSergei Grechanik   //
1467d80b04abSSergei Grechanik   if (Value mask = state.vecLoopToMask.lookup(newParentOp)) {
1468d80b04abSSergei Grechanik     state.builder.setInsertionPoint(newYieldOp);
1469d80b04abSSergei Grechanik     for (unsigned i = 0; i < newYieldOp->getNumOperands(); ++i) {
14705bd4bcfcSAmy Zhuang       SmallVector<Operation *> combinerOps;
14715bd4bcfcSAmy Zhuang       Value reducedVal = matchReduction(
14725bd4bcfcSAmy Zhuang           cast<AffineForOp>(newParentOp).getRegionIterArgs(), i, combinerOps);
14735bd4bcfcSAmy Zhuang       assert(reducedVal && "expect non-null value for parallel reduction loop");
14745bd4bcfcSAmy Zhuang       assert(combinerOps.size() == 1 && "expect only one combiner op");
14755bd4bcfcSAmy Zhuang       // IterOperands are neutral element vectors.
14763bd7a9b4SMatthias Springer       Value neutralVal = cast<AffineForOp>(newParentOp).getInits()[i];
14775bd4bcfcSAmy Zhuang       state.builder.setInsertionPoint(combinerOps.back());
14785bd4bcfcSAmy Zhuang       Value maskedReducedVal = state.builder.create<arith::SelectOp>(
14795bd4bcfcSAmy Zhuang           reducedVal.getLoc(), mask, reducedVal, neutralVal);
1480d80b04abSSergei Grechanik       LLVM_DEBUG(
14815bd4bcfcSAmy Zhuang           dbgs() << "\n[early-vect]+++++ masking an input to a binary op that"
14825bd4bcfcSAmy Zhuang                     "produces value for a yield Op: "
14835bd4bcfcSAmy Zhuang                  << maskedReducedVal);
14845bd4bcfcSAmy Zhuang       combinerOps.back()->replaceUsesOfWith(reducedVal, maskedReducedVal);
1485d80b04abSSergei Grechanik     }
1486d80b04abSSergei Grechanik   }
1487d80b04abSSergei Grechanik 
148896891f04SDiego Caballero   state.builder.setInsertionPointAfter(newParentOp);
148996891f04SDiego Caballero   return newYieldOp;
1490b8737614SUday Bondhugula }
1491b8737614SUday Bondhugula 
149296891f04SDiego Caballero /// Encodes Operation-specific behavior for vectorization. In general we
149396891f04SDiego Caballero /// assume that all operands of an op must be vectorized but this is not
149496891f04SDiego Caballero /// always true. In the future, it would be nice to have a trait that
149596891f04SDiego Caballero /// describes how a particular operation vectorizes. For now we implement the
149696891f04SDiego Caballero /// case distinction here. Returns a vectorized form of an operation or
149796891f04SDiego Caballero /// nullptr if vectorization fails.
149896891f04SDiego Caballero // TODO: consider adding a trait to Op to describe how it gets vectorized.
149996891f04SDiego Caballero // Maybe some Ops are not vectorizable or require some tricky logic, we cannot
150096891f04SDiego Caballero // do one-off logic here; ideally it would be TableGen'd.
150196891f04SDiego Caballero static Operation *vectorizeOneOperation(Operation *op,
150296891f04SDiego Caballero                                         VectorizationState &state) {
150396891f04SDiego Caballero   // Sanity checks.
150496891f04SDiego Caballero   assert(!isa<vector::TransferReadOp>(op) &&
150596891f04SDiego Caballero          "vector.transfer_read cannot be further vectorized");
150696891f04SDiego Caballero   assert(!isa<vector::TransferWriteOp>(op) &&
150796891f04SDiego Caballero          "vector.transfer_write cannot be further vectorized");
150896891f04SDiego Caballero 
150996891f04SDiego Caballero   if (auto loadOp = dyn_cast<AffineLoadOp>(op))
151096891f04SDiego Caballero     return vectorizeAffineLoad(loadOp, state);
151196891f04SDiego Caballero   if (auto storeOp = dyn_cast<AffineStoreOp>(op))
151296891f04SDiego Caballero     return vectorizeAffineStore(storeOp, state);
151396891f04SDiego Caballero   if (auto forOp = dyn_cast<AffineForOp>(op))
151496891f04SDiego Caballero     return vectorizeAffineForOp(forOp, state);
151596891f04SDiego Caballero   if (auto yieldOp = dyn_cast<AffineYieldOp>(op))
151696891f04SDiego Caballero     return vectorizeAffineYieldOp(yieldOp, state);
1517a54f4eaeSMogball   if (auto constant = dyn_cast<arith::ConstantOp>(op))
151896891f04SDiego Caballero     return vectorizeConstant(constant, state);
1519181d9602SHsiangkai Wang   if (auto applyOp = dyn_cast<AffineApplyOp>(op))
1520181d9602SHsiangkai Wang     return vectorizeAffineApplyOp(applyOp, state);
152196891f04SDiego Caballero 
152296891f04SDiego Caballero   // Other ops with regions are not supported.
152396891f04SDiego Caballero   if (op->getNumRegions() != 0)
152496891f04SDiego Caballero     return nullptr;
152596891f04SDiego Caballero 
152696891f04SDiego Caballero   return widenOp(op, state);
1527b8737614SUday Bondhugula }
1528b8737614SUday Bondhugula 
152914d0735dSDiego Caballero /// Recursive implementation to convert all the nested loops in 'match' to a 2D
153014d0735dSDiego Caballero /// vector container that preserves the relative nesting level of each loop with
153114d0735dSDiego Caballero /// respect to the others in 'match'. 'currentLevel' is the nesting level that
153214d0735dSDiego Caballero /// will be assigned to the loop in the current 'match'.
153314d0735dSDiego Caballero static void
153414d0735dSDiego Caballero getMatchedAffineLoopsRec(NestedMatch match, unsigned currentLevel,
153514d0735dSDiego Caballero                          std::vector<SmallVector<AffineForOp, 2>> &loops) {
153614d0735dSDiego Caballero   // Add a new empty level to the output if it doesn't exist already.
153714d0735dSDiego Caballero   assert(currentLevel <= loops.size() && "Unexpected currentLevel");
153814d0735dSDiego Caballero   if (currentLevel == loops.size())
1539e5639b3fSMehdi Amini     loops.emplace_back();
154014d0735dSDiego Caballero 
154114d0735dSDiego Caballero   // Add current match and recursively visit its children.
154214d0735dSDiego Caballero   loops[currentLevel].push_back(cast<AffineForOp>(match.getMatchedOperation()));
154314d0735dSDiego Caballero   for (auto childMatch : match.getMatchedChildren()) {
154414d0735dSDiego Caballero     getMatchedAffineLoopsRec(childMatch, currentLevel + 1, loops);
154514d0735dSDiego Caballero   }
154614d0735dSDiego Caballero }
154714d0735dSDiego Caballero 
154814d0735dSDiego Caballero /// Converts all the nested loops in 'match' to a 2D vector container that
154914d0735dSDiego Caballero /// preserves the relative nesting level of each loop with respect to the others
155014d0735dSDiego Caballero /// in 'match'. This means that every loop in 'loops[i]' will have a parent loop
155114d0735dSDiego Caballero /// in 'loops[i-1]'. A loop in 'loops[i]' may or may not have a child loop in
155214d0735dSDiego Caballero /// 'loops[i+1]'.
155314d0735dSDiego Caballero static void
155414d0735dSDiego Caballero getMatchedAffineLoops(NestedMatch match,
155514d0735dSDiego Caballero                       std::vector<SmallVector<AffineForOp, 2>> &loops) {
155614d0735dSDiego Caballero   getMatchedAffineLoopsRec(match, /*currLoopDepth=*/0, loops);
155714d0735dSDiego Caballero }
155814d0735dSDiego Caballero 
155914d0735dSDiego Caballero /// Internal implementation to vectorize affine loops from a single loop nest
156014d0735dSDiego Caballero /// using an n-D vectorization strategy.
156114d0735dSDiego Caballero static LogicalResult
156214d0735dSDiego Caballero vectorizeLoopNest(std::vector<SmallVector<AffineForOp, 2>> &loops,
156314d0735dSDiego Caballero                   const VectorizationStrategy &strategy) {
156414d0735dSDiego Caballero   assert(loops[0].size() == 1 && "Expected single root loop");
156514d0735dSDiego Caballero   AffineForOp rootLoop = loops[0][0];
156696891f04SDiego Caballero   VectorizationState state(rootLoop.getContext());
156796891f04SDiego Caballero   state.builder.setInsertionPointAfter(rootLoop);
156814d0735dSDiego Caballero   state.strategy = &strategy;
1569b8737614SUday Bondhugula 
1570b8737614SUday Bondhugula   // Since patterns are recursive, they can very well intersect.
1571b8737614SUday Bondhugula   // Since we do not want a fully greedy strategy in general, we decouple
1572b8737614SUday Bondhugula   // pattern matching, from profitability analysis, from application.
1573b8737614SUday Bondhugula   // As a consequence we must check that each root pattern is still
1574b8737614SUday Bondhugula   // vectorizable. If a pattern is not vectorizable anymore, we just skip it.
15759db53a18SRiver Riddle   // TODO: implement a non-greedy profitability analysis that keeps only
1576b8737614SUday Bondhugula   // non-intersecting patterns.
157714d0735dSDiego Caballero   if (!isVectorizableLoopBody(rootLoop, vectorTransferPattern())) {
1578b8737614SUday Bondhugula     LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ loop is not vectorizable");
1579b8737614SUday Bondhugula     return failure();
1580b8737614SUday Bondhugula   }
1581b8737614SUday Bondhugula 
1582b8737614SUday Bondhugula   //////////////////////////////////////////////////////////////////////////////
158396891f04SDiego Caballero   // Vectorize the scalar loop nest following a topological order. A new vector
158496891f04SDiego Caballero   // loop nest with the vectorized operations is created along the process. If
158596891f04SDiego Caballero   // vectorization succeeds, the scalar loop nest is erased. If vectorization
158696891f04SDiego Caballero   // fails, the vector loop nest is erased and the scalar loop nest is not
158796891f04SDiego Caballero   // modified.
1588b8737614SUday Bondhugula   //////////////////////////////////////////////////////////////////////////////
158996891f04SDiego Caballero 
159096891f04SDiego Caballero   auto opVecResult = rootLoop.walk<WalkOrder::PreOrder>([&](Operation *op) {
159196891f04SDiego Caballero     LLVM_DEBUG(dbgs() << "[early-vect]+++++ Vectorizing: " << *op);
159296891f04SDiego Caballero     Operation *vectorOp = vectorizeOneOperation(op, state);
1593d80b04abSSergei Grechanik     if (!vectorOp) {
1594d80b04abSSergei Grechanik       LLVM_DEBUG(
1595d80b04abSSergei Grechanik           dbgs() << "[early-vect]+++++ failed vectorizing the operation: "
1596d80b04abSSergei Grechanik                  << *op << "\n");
159796891f04SDiego Caballero       return WalkResult::interrupt();
1598d80b04abSSergei Grechanik     }
159996891f04SDiego Caballero 
160096891f04SDiego Caballero     return WalkResult::advance();
160196891f04SDiego Caballero   });
160296891f04SDiego Caballero 
160396891f04SDiego Caballero   if (opVecResult.wasInterrupted()) {
160496891f04SDiego Caballero     LLVM_DEBUG(dbgs() << "[early-vect]+++++ failed vectorization for: "
160596891f04SDiego Caballero                       << rootLoop << "\n");
160696891f04SDiego Caballero     // Erase vector loop nest if it was created.
160796891f04SDiego Caballero     auto vecRootLoopIt = state.opVectorReplacement.find(rootLoop);
160896891f04SDiego Caballero     if (vecRootLoopIt != state.opVectorReplacement.end())
160996891f04SDiego Caballero       eraseLoopNest(cast<AffineForOp>(vecRootLoopIt->second));
161096891f04SDiego Caballero 
161196891f04SDiego Caballero     return failure();
1612b8737614SUday Bondhugula   }
1613b8737614SUday Bondhugula 
1614d80b04abSSergei Grechanik   // Replace results of reduction loops with the scalar values computed using
1615d80b04abSSergei Grechanik   // `vector.reduce` or similar ops.
1616d80b04abSSergei Grechanik   for (auto resPair : state.loopResultScalarReplacement)
1617d80b04abSSergei Grechanik     resPair.first.replaceAllUsesWith(resPair.second);
1618d80b04abSSergei Grechanik 
161996891f04SDiego Caballero   assert(state.opVectorReplacement.count(rootLoop) == 1 &&
162096891f04SDiego Caballero          "Expected vector replacement for loop nest");
1621b8737614SUday Bondhugula   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ success vectorizing pattern");
162296891f04SDiego Caballero   LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorization result:\n"
162396891f04SDiego Caballero                     << *state.opVectorReplacement[rootLoop]);
162496891f04SDiego Caballero 
162596891f04SDiego Caballero   // Finish this vectorization pattern.
162696891f04SDiego Caballero   state.finishVectorizationPattern(rootLoop);
162796891f04SDiego Caballero   return success();
1628b8737614SUday Bondhugula }
1629b8737614SUday Bondhugula 
163096891f04SDiego Caballero /// Extracts the matched loops and vectorizes them following a topological
163196891f04SDiego Caballero /// order. A new vector loop nest will be created if vectorization succeeds. The
163296891f04SDiego Caballero /// original loop nest won't be modified in any case.
163314d0735dSDiego Caballero static LogicalResult vectorizeRootMatch(NestedMatch m,
163414d0735dSDiego Caballero                                         const VectorizationStrategy &strategy) {
163514d0735dSDiego Caballero   std::vector<SmallVector<AffineForOp, 2>> loopsToVectorize;
163614d0735dSDiego Caballero   getMatchedAffineLoops(m, loopsToVectorize);
163714d0735dSDiego Caballero   return vectorizeLoopNest(loopsToVectorize, strategy);
1638b8737614SUday Bondhugula }
1639b8737614SUday Bondhugula 
1640ed193bceSDiego Caballero /// Traverses all the loop matches and classifies them into intersection
1641ed193bceSDiego Caballero /// buckets. Two matches intersect if any of them encloses the other one. A
1642ed193bceSDiego Caballero /// match intersects with a bucket if the match intersects with the root
1643ed193bceSDiego Caballero /// (outermost) loop in that bucket.
1644ed193bceSDiego Caballero static void computeIntersectionBuckets(
1645ed193bceSDiego Caballero     ArrayRef<NestedMatch> matches,
1646ed193bceSDiego Caballero     std::vector<SmallVector<NestedMatch, 8>> &intersectionBuckets) {
1647ed193bceSDiego Caballero   assert(intersectionBuckets.empty() && "Expected empty output");
1648ed193bceSDiego Caballero   // Keeps track of the root (outermost) loop of each bucket.
1649ed193bceSDiego Caballero   SmallVector<AffineForOp, 8> bucketRoots;
1650ed193bceSDiego Caballero 
1651ed193bceSDiego Caballero   for (const NestedMatch &match : matches) {
1652ed193bceSDiego Caballero     AffineForOp matchRoot = cast<AffineForOp>(match.getMatchedOperation());
1653ed193bceSDiego Caballero     bool intersects = false;
1654ed193bceSDiego Caballero     for (int i = 0, end = intersectionBuckets.size(); i < end; ++i) {
1655ed193bceSDiego Caballero       AffineForOp bucketRoot = bucketRoots[i];
1656ed193bceSDiego Caballero       // Add match to the bucket if the bucket root encloses the match root.
1657ed193bceSDiego Caballero       if (bucketRoot->isAncestor(matchRoot)) {
1658ed193bceSDiego Caballero         intersectionBuckets[i].push_back(match);
1659ed193bceSDiego Caballero         intersects = true;
1660ed193bceSDiego Caballero         break;
1661ed193bceSDiego Caballero       }
1662ed193bceSDiego Caballero       // Add match to the bucket if the match root encloses the bucket root. The
1663ed193bceSDiego Caballero       // match root becomes the new bucket root.
1664ed193bceSDiego Caballero       if (matchRoot->isAncestor(bucketRoot)) {
1665ed193bceSDiego Caballero         bucketRoots[i] = matchRoot;
1666ed193bceSDiego Caballero         intersectionBuckets[i].push_back(match);
1667ed193bceSDiego Caballero         intersects = true;
1668ed193bceSDiego Caballero         break;
1669ed193bceSDiego Caballero       }
1670ed193bceSDiego Caballero     }
1671ed193bceSDiego Caballero 
1672ed193bceSDiego Caballero     // Match doesn't intersect with any existing bucket. Create a new bucket for
1673ed193bceSDiego Caballero     // it.
1674ed193bceSDiego Caballero     if (!intersects) {
1675ed193bceSDiego Caballero       bucketRoots.push_back(matchRoot);
1676e5639b3fSMehdi Amini       intersectionBuckets.emplace_back();
1677ed193bceSDiego Caballero       intersectionBuckets.back().push_back(match);
1678ed193bceSDiego Caballero     }
1679ed193bceSDiego Caballero   }
1680ed193bceSDiego Caballero }
1681ed193bceSDiego Caballero 
168214d0735dSDiego Caballero /// Internal implementation to vectorize affine loops in 'loops' using the n-D
168314d0735dSDiego Caballero /// vectorization factors in 'vectorSizes'. By default, each vectorization
168414d0735dSDiego Caballero /// factor is applied inner-to-outer to the loops of each loop nest.
168514d0735dSDiego Caballero /// 'fastestVaryingPattern' can be optionally used to provide a different loop
1686d80b04abSSergei Grechanik /// vectorization order. `reductionLoops` can be provided to specify loops which
1687d80b04abSSergei Grechanik /// can be vectorized along the reduction dimension.
168814d0735dSDiego Caballero static void vectorizeLoops(Operation *parentOp, DenseSet<Operation *> &loops,
16893fff5acdSDiego Caballero                            ArrayRef<int64_t> vectorSizes,
1690d80b04abSSergei Grechanik                            ArrayRef<int64_t> fastestVaryingPattern,
1691d80b04abSSergei Grechanik                            const ReductionLoopMap &reductionLoops) {
1692d80b04abSSergei Grechanik   assert((reductionLoops.empty() || vectorSizes.size() == 1) &&
1693d80b04abSSergei Grechanik          "Vectorizing reductions is supported only for 1-D vectors");
1694d80b04abSSergei Grechanik 
1695ed193bceSDiego Caballero   // Compute 1-D, 2-D or 3-D loop pattern to be matched on the target loops.
16960a81ace0SKazu Hirata   std::optional<NestedPattern> pattern =
1697ed193bceSDiego Caballero       makePattern(loops, vectorSizes.size(), fastestVaryingPattern);
1698037f0995SKazu Hirata   if (!pattern) {
1699ed193bceSDiego Caballero     LLVM_DEBUG(dbgs() << "\n[early-vect] pattern couldn't be computed\n");
1700ed193bceSDiego Caballero     return;
1701ed193bceSDiego Caballero   }
1702ed193bceSDiego Caballero 
1703b8737614SUday Bondhugula   LLVM_DEBUG(dbgs() << "\n******************************************");
1704b8737614SUday Bondhugula   LLVM_DEBUG(dbgs() << "\n******************************************");
17053fff5acdSDiego Caballero   LLVM_DEBUG(dbgs() << "\n[early-vect] new pattern on parent op\n");
1706ed193bceSDiego Caballero   LLVM_DEBUG(dbgs() << *parentOp << "\n");
17073fff5acdSDiego Caballero 
1708ed193bceSDiego Caballero   unsigned patternDepth = pattern->getDepth();
1709b8737614SUday Bondhugula 
1710ed193bceSDiego Caballero   // Compute all the pattern matches and classify them into buckets of
1711ed193bceSDiego Caballero   // intersecting matches.
1712ed193bceSDiego Caballero   SmallVector<NestedMatch, 32> allMatches;
1713ed193bceSDiego Caballero   pattern->match(parentOp, &allMatches);
1714ed193bceSDiego Caballero   std::vector<SmallVector<NestedMatch, 8>> intersectionBuckets;
1715ed193bceSDiego Caballero   computeIntersectionBuckets(allMatches, intersectionBuckets);
1716ed193bceSDiego Caballero 
1717ed193bceSDiego Caballero   // Iterate over all buckets and vectorize the matches eagerly. We can only
1718ed193bceSDiego Caballero   // vectorize one match from each bucket since all the matches within a bucket
1719ed193bceSDiego Caballero   // intersect.
1720ed193bceSDiego Caballero   for (auto &intersectingMatches : intersectionBuckets) {
1721ed193bceSDiego Caballero     for (NestedMatch &match : intersectingMatches) {
1722b8737614SUday Bondhugula       VectorizationStrategy strategy;
17239db53a18SRiver Riddle       // TODO: depending on profitability, elect to reduce the vector size.
1724b8737614SUday Bondhugula       strategy.vectorSizes.assign(vectorSizes.begin(), vectorSizes.end());
1725d80b04abSSergei Grechanik       strategy.reductionLoops = reductionLoops;
1726ed193bceSDiego Caballero       if (failed(analyzeProfitability(match.getMatchedChildren(), 1,
1727ed193bceSDiego Caballero                                       patternDepth, &strategy))) {
1728b8737614SUday Bondhugula         continue;
1729b8737614SUday Bondhugula       }
1730ed193bceSDiego Caballero       vectorizeLoopIfProfitable(match.getMatchedOperation(), 0, patternDepth,
1731b8737614SUday Bondhugula                                 &strategy);
1732ed193bceSDiego Caballero       // Vectorize match. Skip the rest of intersecting matches in the bucket if
1733ed193bceSDiego Caballero       // vectorization succeeded.
1734ed193bceSDiego Caballero       // TODO: if pattern does not apply, report it; alter the cost/benefit.
17359db53a18SRiver Riddle       // TODO: some diagnostics if failure to vectorize occurs.
1736ed193bceSDiego Caballero       if (succeeded(vectorizeRootMatch(match, strategy)))
1737ed193bceSDiego Caballero         break;
1738b8737614SUday Bondhugula     }
1739b8737614SUday Bondhugula   }
1740ed193bceSDiego Caballero 
1741b8737614SUday Bondhugula   LLVM_DEBUG(dbgs() << "\n");
1742b8737614SUday Bondhugula }
1743b8737614SUday Bondhugula 
174414d0735dSDiego Caballero /// Applies vectorization to the current function by searching over a bunch of
174514d0735dSDiego Caballero /// predetermined patterns.
174641574554SRiver Riddle void Vectorize::runOnOperation() {
174758ceae95SRiver Riddle   func::FuncOp f = getOperation();
174814d0735dSDiego Caballero   if (!fastestVaryingPattern.empty() &&
174914d0735dSDiego Caballero       fastestVaryingPattern.size() != vectorSizes.size()) {
175014d0735dSDiego Caballero     f.emitRemark("Fastest varying pattern specified with different size than "
175114d0735dSDiego Caballero                  "the vector size.");
175214d0735dSDiego Caballero     return signalPassFailure();
175314d0735dSDiego Caballero   }
175414d0735dSDiego Caballero 
1755d80b04abSSergei Grechanik   if (vectorizeReductions && vectorSizes.size() != 1) {
1756d80b04abSSergei Grechanik     f.emitError("Vectorizing reductions is supported only for 1-D vectors.");
1757d80b04abSSergei Grechanik     return signalPassFailure();
1758d80b04abSSergei Grechanik   }
1759d80b04abSSergei Grechanik 
176097829935SKai Sasaki   if (llvm::any_of(vectorSizes, [](int64_t size) { return size <= 0; })) {
176197829935SKai Sasaki     f.emitError("Vectorization factor must be greater than zero.");
176297829935SKai Sasaki     return signalPassFailure();
176397829935SKai Sasaki   }
176497829935SKai Sasaki 
176514d0735dSDiego Caballero   DenseSet<Operation *> parallelLoops;
1766d80b04abSSergei Grechanik   ReductionLoopMap reductionLoops;
1767d80b04abSSergei Grechanik 
1768d80b04abSSergei Grechanik   // If 'vectorize-reduction=true' is provided, we also populate the
1769d80b04abSSergei Grechanik   // `reductionLoops` map.
1770d80b04abSSergei Grechanik   if (vectorizeReductions) {
1771d80b04abSSergei Grechanik     f.walk([&parallelLoops, &reductionLoops](AffineForOp loop) {
1772d80b04abSSergei Grechanik       SmallVector<LoopReduction, 2> reductions;
1773d80b04abSSergei Grechanik       if (isLoopParallel(loop, &reductions)) {
1774d80b04abSSergei Grechanik         parallelLoops.insert(loop);
1775d80b04abSSergei Grechanik         // If it's not a reduction loop, adding it to the map is not necessary.
1776d80b04abSSergei Grechanik         if (!reductions.empty())
1777d80b04abSSergei Grechanik           reductionLoops[loop] = reductions;
1778d80b04abSSergei Grechanik       }
1779d80b04abSSergei Grechanik     });
1780d80b04abSSergei Grechanik   } else {
178114d0735dSDiego Caballero     f.walk([&parallelLoops](AffineForOp loop) {
178214d0735dSDiego Caballero       if (isLoopParallel(loop))
178314d0735dSDiego Caballero         parallelLoops.insert(loop);
178414d0735dSDiego Caballero     });
1785d80b04abSSergei Grechanik   }
178614d0735dSDiego Caballero 
178714d0735dSDiego Caballero   // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
178814d0735dSDiego Caballero   NestedPatternContext mlContext;
1789d80b04abSSergei Grechanik   vectorizeLoops(f, parallelLoops, vectorSizes, fastestVaryingPattern,
1790d80b04abSSergei Grechanik                  reductionLoops);
179114d0735dSDiego Caballero }
179214d0735dSDiego Caballero 
179314d0735dSDiego Caballero /// Verify that affine loops in 'loops' meet the nesting criteria expected by
179414d0735dSDiego Caballero /// SuperVectorizer:
179514d0735dSDiego Caballero ///   * There must be at least one loop.
179614d0735dSDiego Caballero ///   * There must be a single root loop (nesting level 0).
179714d0735dSDiego Caballero ///   * Each loop at a given nesting level must be nested in a loop from a
179814d0735dSDiego Caballero ///     previous nesting level.
1799f9f6b4f3SDiego Caballero static LogicalResult
180014d0735dSDiego Caballero verifyLoopNesting(const std::vector<SmallVector<AffineForOp, 2>> &loops) {
1801f9f6b4f3SDiego Caballero   // Expected at least one loop.
1802f9f6b4f3SDiego Caballero   if (loops.empty())
1803f9f6b4f3SDiego Caballero     return failure();
1804f9f6b4f3SDiego Caballero 
1805f9f6b4f3SDiego Caballero   // Expected only one root loop.
1806f9f6b4f3SDiego Caballero   if (loops[0].size() != 1)
1807f9f6b4f3SDiego Caballero     return failure();
180814d0735dSDiego Caballero 
180914d0735dSDiego Caballero   // Traverse loops outer-to-inner to check some invariants.
181014d0735dSDiego Caballero   for (int i = 1, end = loops.size(); i < end; ++i) {
181114d0735dSDiego Caballero     for (AffineForOp loop : loops[i]) {
181214d0735dSDiego Caballero       //  Check that each loop at this level is nested in one of the loops from
181314d0735dSDiego Caballero       //  the previous level.
1814f9f6b4f3SDiego Caballero       if (none_of(loops[i - 1], [&](AffineForOp maybeParent) {
1815f9f6b4f3SDiego Caballero             return maybeParent->isProperAncestor(loop);
1816f9f6b4f3SDiego Caballero           }))
1817f9f6b4f3SDiego Caballero         return failure();
181814d0735dSDiego Caballero 
181914d0735dSDiego Caballero       //  Check that each loop at this level is not nested in another loop from
182014d0735dSDiego Caballero       //  this level.
1821f9f6b4f3SDiego Caballero       for (AffineForOp sibling : loops[i]) {
1822f9f6b4f3SDiego Caballero         if (sibling->isProperAncestor(loop))
1823f9f6b4f3SDiego Caballero           return failure();
182414d0735dSDiego Caballero       }
182514d0735dSDiego Caballero     }
182614d0735dSDiego Caballero   }
182714d0735dSDiego Caballero 
1828f9f6b4f3SDiego Caballero   return success();
1829f9f6b4f3SDiego Caballero }
1830f9f6b4f3SDiego Caballero 
183114d0735dSDiego Caballero 
183214d0735dSDiego Caballero /// External utility to vectorize affine loops in 'loops' using the n-D
183314d0735dSDiego Caballero /// vectorization factors in 'vectorSizes'. By default, each vectorization
183414d0735dSDiego Caballero /// factor is applied inner-to-outer to the loops of each loop nest.
183514d0735dSDiego Caballero /// 'fastestVaryingPattern' can be optionally used to provide a different loop
183614d0735dSDiego Caballero /// vectorization order.
1837d80b04abSSergei Grechanik /// If `reductionLoops` is not empty, the given reduction loops may be
1838d80b04abSSergei Grechanik /// vectorized along the reduction dimension.
1839d80b04abSSergei Grechanik /// TODO: Vectorizing reductions is supported only for 1-D vectorization.
18404c48f016SMatthias Springer void mlir::affine::vectorizeAffineLoops(
18414c48f016SMatthias Springer     Operation *parentOp, DenseSet<Operation *> &loops,
18424c48f016SMatthias Springer     ArrayRef<int64_t> vectorSizes, ArrayRef<int64_t> fastestVaryingPattern,
1843d80b04abSSergei Grechanik     const ReductionLoopMap &reductionLoops) {
184414d0735dSDiego Caballero   // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
184514d0735dSDiego Caballero   NestedPatternContext mlContext;
1846d80b04abSSergei Grechanik   vectorizeLoops(parentOp, loops, vectorSizes, fastestVaryingPattern,
1847d80b04abSSergei Grechanik                  reductionLoops);
184814d0735dSDiego Caballero }
184914d0735dSDiego Caballero 
185014d0735dSDiego Caballero /// External utility to vectorize affine loops from a single loop nest using an
185114d0735dSDiego Caballero /// n-D vectorization strategy (see doc in VectorizationStrategy definition).
185214d0735dSDiego Caballero /// Loops are provided in a 2D vector container. The first dimension represents
185314d0735dSDiego Caballero /// the nesting level relative to the loops to be vectorized. The second
185414d0735dSDiego Caballero /// dimension contains the loops. This means that:
185514d0735dSDiego Caballero ///   a) every loop in 'loops[i]' must have a parent loop in 'loops[i-1]',
185614d0735dSDiego Caballero ///   b) a loop in 'loops[i]' may or may not have a child loop in 'loops[i+1]'.
185714d0735dSDiego Caballero ///
185814d0735dSDiego Caballero /// For example, for the following loop nest:
185914d0735dSDiego Caballero ///
186014d0735dSDiego Caballero ///   func @vec2d(%in0: memref<64x128x512xf32>, %in1: memref<64x128x128xf32>,
186114d0735dSDiego Caballero ///               %out0: memref<64x128x512xf32>,
186214d0735dSDiego Caballero ///               %out1: memref<64x128x128xf32>) {
186314d0735dSDiego Caballero ///     affine.for %i0 = 0 to 64 {
186414d0735dSDiego Caballero ///       affine.for %i1 = 0 to 128 {
186514d0735dSDiego Caballero ///         affine.for %i2 = 0 to 512 {
186614d0735dSDiego Caballero ///           %ld = affine.load %in0[%i0, %i1, %i2] : memref<64x128x512xf32>
186714d0735dSDiego Caballero ///           affine.store %ld, %out0[%i0, %i1, %i2] : memref<64x128x512xf32>
186814d0735dSDiego Caballero ///         }
186914d0735dSDiego Caballero ///         affine.for %i3 = 0 to 128 {
187014d0735dSDiego Caballero ///           %ld = affine.load %in1[%i0, %i1, %i3] : memref<64x128x128xf32>
187114d0735dSDiego Caballero ///           affine.store %ld, %out1[%i0, %i1, %i3] : memref<64x128x128xf32>
187214d0735dSDiego Caballero ///         }
187314d0735dSDiego Caballero ///       }
187414d0735dSDiego Caballero ///     }
187514d0735dSDiego Caballero ///     return
187614d0735dSDiego Caballero ///   }
187714d0735dSDiego Caballero ///
187814d0735dSDiego Caballero /// loops = {{%i0}, {%i2, %i3}}, to vectorize the outermost and the two
187914d0735dSDiego Caballero /// innermost loops;
188014d0735dSDiego Caballero /// loops = {{%i1}, {%i2, %i3}}, to vectorize the middle and the two innermost
188114d0735dSDiego Caballero /// loops;
188214d0735dSDiego Caballero /// loops = {{%i2}}, to vectorize only the first innermost loop;
188314d0735dSDiego Caballero /// loops = {{%i3}}, to vectorize only the second innermost loop;
188414d0735dSDiego Caballero /// loops = {{%i1}}, to vectorize only the middle loop.
18854c48f016SMatthias Springer LogicalResult mlir::affine::vectorizeAffineLoopNest(
18864c48f016SMatthias Springer     std::vector<SmallVector<AffineForOp, 2>> &loops,
188714d0735dSDiego Caballero     const VectorizationStrategy &strategy) {
188814d0735dSDiego Caballero   // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
188914d0735dSDiego Caballero   NestedPatternContext mlContext;
1890f9f6b4f3SDiego Caballero   if (failed(verifyLoopNesting(loops)))
1891f9f6b4f3SDiego Caballero     return failure();
189214d0735dSDiego Caballero   return vectorizeLoopNest(loops, strategy);
189314d0735dSDiego Caballero }
1894