1 //===- Schedule.cpp - Calculate an optimized schedule ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass generates an entirely new schedule tree from the data dependences
10 // and iteration domains. The new schedule tree is computed in two steps:
11 //
12 // 1) The isl scheduling optimizer is run
13 //
14 // The isl scheduling optimizer creates a new schedule tree that maximizes
15 // parallelism and tileability and minimizes data-dependence distances. The
16 // algorithm used is a modified version of the ``Pluto'' algorithm:
17 //
18 //   U. Bondhugula, A. Hartono, J. Ramanujam, and P. Sadayappan.
19 //   A Practical Automatic Polyhedral Parallelizer and Locality Optimizer.
20 //   In Proceedings of the 2008 ACM SIGPLAN Conference On Programming Language
21 //   Design and Implementation, PLDI ’08, pages 101–113. ACM, 2008.
22 //
23 // 2) A set of post-scheduling transformations is applied on the schedule tree.
24 //
25 // These optimizations include:
26 //
27 //  - Tiling of the innermost tilable bands
28 //  - Prevectorization - The choice of a possible outer loop that is strip-mined
29 //                       to the innermost level to enable inner-loop
30 //                       vectorization.
31 //  - Some optimizations for spatial locality are also planned.
32 //
33 // For a detailed description of the schedule tree itself please see section 6
34 // of:
35 //
36 // Polyhedral AST generation is more than scanning polyhedra
37 // Tobias Grosser, Sven Verdoolaege, Albert Cohen
38 // ACM Transactions on Programming Languages and Systems (TOPLAS),
39 // 37(4), July 2015
40 // http://www.grosser.es/#pub-polyhedral-AST-generation
41 //
42 // This publication also contains a detailed discussion of the different options
43 // for polyhedral loop unrolling, full/partial tile separation and other uses
44 // of the schedule tree.
45 //
46 //===----------------------------------------------------------------------===//
47 
48 #include "polly/ScheduleOptimizer.h"
49 #include "polly/CodeGen/CodeGeneration.h"
50 #include "polly/DependenceInfo.h"
51 #include "polly/LinkAllPasses.h"
52 #include "polly/ManualOptimizer.h"
53 #include "polly/Options.h"
54 #include "polly/ScheduleTreeTransform.h"
55 #include "polly/ScopInfo.h"
56 #include "polly/ScopPass.h"
57 #include "polly/Simplify.h"
58 #include "polly/Support/ISLOStream.h"
59 #include "llvm/ADT/Sequence.h"
60 #include "llvm/ADT/Statistic.h"
61 #include "llvm/Analysis/TargetTransformInfo.h"
62 #include "llvm/IR/Function.h"
63 #include "llvm/InitializePasses.h"
64 #include "llvm/Support/CommandLine.h"
65 #include "llvm/Support/Debug.h"
66 #include "llvm/Support/raw_ostream.h"
67 #include "isl/ctx.h"
68 #include "isl/options.h"
69 #include "isl/printer.h"
70 #include "isl/schedule.h"
71 #include "isl/schedule_node.h"
72 #include "isl/union_map.h"
73 #include "isl/union_set.h"
74 #include <algorithm>
75 #include <cassert>
76 #include <cmath>
77 #include <cstdint>
78 #include <cstdlib>
79 #include <string>
80 #include <vector>
81 
82 using namespace llvm;
83 using namespace polly;
84 
85 #define DEBUG_TYPE "polly-opt-isl"
86 
87 static cl::opt<std::string>
88     OptimizeDeps("polly-opt-optimize-only",
89                  cl::desc("Only a certain kind of dependences (all/raw)"),
90                  cl::Hidden, cl::init("all"), cl::ZeroOrMore,
91                  cl::cat(PollyCategory));
92 
93 static cl::opt<std::string>
94     SimplifyDeps("polly-opt-simplify-deps",
95                  cl::desc("Dependences should be simplified (yes/no)"),
96                  cl::Hidden, cl::init("yes"), cl::ZeroOrMore,
97                  cl::cat(PollyCategory));
98 
99 static cl::opt<int> MaxConstantTerm(
100     "polly-opt-max-constant-term",
101     cl::desc("The maximal constant term allowed (-1 is unlimited)"), cl::Hidden,
102     cl::init(20), cl::ZeroOrMore, cl::cat(PollyCategory));
103 
104 static cl::opt<int> MaxCoefficient(
105     "polly-opt-max-coefficient",
106     cl::desc("The maximal coefficient allowed (-1 is unlimited)"), cl::Hidden,
107     cl::init(20), cl::ZeroOrMore, cl::cat(PollyCategory));
108 
109 static cl::opt<std::string> FusionStrategy(
110     "polly-opt-fusion", cl::desc("The fusion strategy to choose (min/max)"),
111     cl::Hidden, cl::init("min"), cl::ZeroOrMore, cl::cat(PollyCategory));
112 
113 static cl::opt<std::string>
114     MaximizeBandDepth("polly-opt-maximize-bands",
115                       cl::desc("Maximize the band depth (yes/no)"), cl::Hidden,
116                       cl::init("yes"), cl::ZeroOrMore, cl::cat(PollyCategory));
117 
118 static cl::opt<std::string> OuterCoincidence(
119     "polly-opt-outer-coincidence",
120     cl::desc("Try to construct schedules where the outer member of each band "
121              "satisfies the coincidence constraints (yes/no)"),
122     cl::Hidden, cl::init("no"), cl::ZeroOrMore, cl::cat(PollyCategory));
123 
124 static cl::opt<int> PrevectorWidth(
125     "polly-prevect-width",
126     cl::desc(
127         "The number of loop iterations to strip-mine for pre-vectorization"),
128     cl::Hidden, cl::init(4), cl::ZeroOrMore, cl::cat(PollyCategory));
129 
130 static cl::opt<bool> FirstLevelTiling("polly-tiling",
131                                       cl::desc("Enable loop tiling"),
132                                       cl::init(true), cl::ZeroOrMore,
133                                       cl::cat(PollyCategory));
134 
135 static cl::opt<int> LatencyVectorFma(
136     "polly-target-latency-vector-fma",
137     cl::desc("The minimal number of cycles between issuing two "
138              "dependent consecutive vector fused multiply-add "
139              "instructions."),
140     cl::Hidden, cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory));
141 
142 static cl::opt<int> ThroughputVectorFma(
143     "polly-target-throughput-vector-fma",
144     cl::desc("A throughput of the processor floating-point arithmetic units "
145              "expressed in the number of vector fused multiply-add "
146              "instructions per clock cycle."),
147     cl::Hidden, cl::init(1), cl::ZeroOrMore, cl::cat(PollyCategory));
148 
149 // This option, along with --polly-target-2nd-cache-level-associativity,
150 // --polly-target-1st-cache-level-size, and --polly-target-2st-cache-level-size
151 // represent the parameters of the target cache, which do not have typical
152 // values that can be used by default. However, to apply the pattern matching
153 // optimizations, we use the values of the parameters of Intel Core i7-3820
154 // SandyBridge in case the parameters are not specified or not provided by the
155 // TargetTransformInfo.
156 static cl::opt<int> FirstCacheLevelAssociativity(
157     "polly-target-1st-cache-level-associativity",
158     cl::desc("The associativity of the first cache level."), cl::Hidden,
159     cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
160 
161 static cl::opt<int> FirstCacheLevelDefaultAssociativity(
162     "polly-target-1st-cache-level-default-associativity",
163     cl::desc("The default associativity of the first cache level"
164              " (if not enough were provided by the TargetTransformInfo)."),
165     cl::Hidden, cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory));
166 
167 static cl::opt<int> SecondCacheLevelAssociativity(
168     "polly-target-2nd-cache-level-associativity",
169     cl::desc("The associativity of the second cache level."), cl::Hidden,
170     cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
171 
172 static cl::opt<int> SecondCacheLevelDefaultAssociativity(
173     "polly-target-2nd-cache-level-default-associativity",
174     cl::desc("The default associativity of the second cache level"
175              " (if not enough were provided by the TargetTransformInfo)."),
176     cl::Hidden, cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory));
177 
178 static cl::opt<int> FirstCacheLevelSize(
179     "polly-target-1st-cache-level-size",
180     cl::desc("The size of the first cache level specified in bytes."),
181     cl::Hidden, cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
182 
183 static cl::opt<int> FirstCacheLevelDefaultSize(
184     "polly-target-1st-cache-level-default-size",
185     cl::desc("The default size of the first cache level specified in bytes"
186              " (if not enough were provided by the TargetTransformInfo)."),
187     cl::Hidden, cl::init(32768), cl::ZeroOrMore, cl::cat(PollyCategory));
188 
189 static cl::opt<int> SecondCacheLevelSize(
190     "polly-target-2nd-cache-level-size",
191     cl::desc("The size of the second level specified in bytes."), cl::Hidden,
192     cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
193 
194 static cl::opt<int> SecondCacheLevelDefaultSize(
195     "polly-target-2nd-cache-level-default-size",
196     cl::desc("The default size of the second cache level specified in bytes"
197              " (if not enough were provided by the TargetTransformInfo)."),
198     cl::Hidden, cl::init(262144), cl::ZeroOrMore, cl::cat(PollyCategory));
199 
200 static cl::opt<int> VectorRegisterBitwidth(
201     "polly-target-vector-register-bitwidth",
202     cl::desc("The size in bits of a vector register (if not set, this "
203              "information is taken from LLVM's target information."),
204     cl::Hidden, cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
205 
206 static cl::opt<int> FirstLevelDefaultTileSize(
207     "polly-default-tile-size",
208     cl::desc("The default tile size (if not enough were provided by"
209              " --polly-tile-sizes)"),
210     cl::Hidden, cl::init(32), cl::ZeroOrMore, cl::cat(PollyCategory));
211 
212 static cl::list<int>
213     FirstLevelTileSizes("polly-tile-sizes",
214                         cl::desc("A tile size for each loop dimension, filled "
215                                  "with --polly-default-tile-size"),
216                         cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
217                         cl::cat(PollyCategory));
218 
219 static cl::opt<bool>
220     SecondLevelTiling("polly-2nd-level-tiling",
221                       cl::desc("Enable a 2nd level loop of loop tiling"),
222                       cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
223 
224 static cl::opt<int> SecondLevelDefaultTileSize(
225     "polly-2nd-level-default-tile-size",
226     cl::desc("The default 2nd-level tile size (if not enough were provided by"
227              " --polly-2nd-level-tile-sizes)"),
228     cl::Hidden, cl::init(16), cl::ZeroOrMore, cl::cat(PollyCategory));
229 
230 static cl::list<int>
231     SecondLevelTileSizes("polly-2nd-level-tile-sizes",
232                          cl::desc("A tile size for each loop dimension, filled "
233                                   "with --polly-default-tile-size"),
234                          cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
235                          cl::cat(PollyCategory));
236 
237 static cl::opt<bool> RegisterTiling("polly-register-tiling",
238                                     cl::desc("Enable register tiling"),
239                                     cl::init(false), cl::ZeroOrMore,
240                                     cl::cat(PollyCategory));
241 
242 static cl::opt<int> RegisterDefaultTileSize(
243     "polly-register-tiling-default-tile-size",
244     cl::desc("The default register tile size (if not enough were provided by"
245              " --polly-register-tile-sizes)"),
246     cl::Hidden, cl::init(2), cl::ZeroOrMore, cl::cat(PollyCategory));
247 
248 static cl::opt<int> PollyPatternMatchingNcQuotient(
249     "polly-pattern-matching-nc-quotient",
250     cl::desc("Quotient that is obtained by dividing Nc, the parameter of the"
251              "macro-kernel, by Nr, the parameter of the micro-kernel"),
252     cl::Hidden, cl::init(256), cl::ZeroOrMore, cl::cat(PollyCategory));
253 
254 static cl::list<int>
255     RegisterTileSizes("polly-register-tile-sizes",
256                       cl::desc("A tile size for each loop dimension, filled "
257                                "with --polly-register-tile-size"),
258                       cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
259                       cl::cat(PollyCategory));
260 
261 static cl::opt<bool> PragmaBasedOpts(
262     "polly-pragma-based-opts",
263     cl::desc("Apply user-directed transformation from metadata"),
264     cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory));
265 
266 static cl::opt<bool>
267     PMBasedOpts("polly-pattern-matching-based-opts",
268                 cl::desc("Perform optimizations based on pattern matching"),
269                 cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory));
270 
271 static cl::opt<bool> OptimizedScops(
272     "polly-optimized-scops",
273     cl::desc("Polly - Dump polyhedral description of Scops optimized with "
274              "the isl scheduling optimizer and the set of post-scheduling "
275              "transformations is applied on the schedule tree"),
276     cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
277 
278 STATISTIC(ScopsProcessed, "Number of scops processed");
279 STATISTIC(ScopsRescheduled, "Number of scops rescheduled");
280 STATISTIC(ScopsOptimized, "Number of scops optimized");
281 
282 STATISTIC(NumAffineLoopsOptimized, "Number of affine loops optimized");
283 STATISTIC(NumBoxedLoopsOptimized, "Number of boxed loops optimized");
284 
285 #define THREE_STATISTICS(VARNAME, DESC)                                        \
286   static Statistic VARNAME[3] = {                                              \
287       {DEBUG_TYPE, #VARNAME "0", DESC " (original)"},                          \
288       {DEBUG_TYPE, #VARNAME "1", DESC " (after scheduler)"},                   \
289       {DEBUG_TYPE, #VARNAME "2", DESC " (after optimizer)"}}
290 
291 THREE_STATISTICS(NumBands, "Number of bands");
292 THREE_STATISTICS(NumBandMembers, "Number of band members");
293 THREE_STATISTICS(NumCoincident, "Number of coincident band members");
294 THREE_STATISTICS(NumPermutable, "Number of permutable bands");
295 THREE_STATISTICS(NumFilters, "Number of filter nodes");
296 THREE_STATISTICS(NumExtension, "Number of extension nodes");
297 
298 STATISTIC(FirstLevelTileOpts, "Number of first level tiling applied");
299 STATISTIC(SecondLevelTileOpts, "Number of second level tiling applied");
300 STATISTIC(RegisterTileOpts, "Number of register tiling applied");
301 STATISTIC(PrevectOpts, "Number of strip-mining for prevectorization applied");
302 STATISTIC(MatMulOpts,
303           "Number of matrix multiplication patterns detected and optimized");
304 
305 namespace {
306 /// Parameters of the micro kernel.
307 ///
308 /// Parameters, which determine sizes of rank-1 (i.e., outer product) update
309 /// used in the optimized matrix multiplication.
310 struct MicroKernelParamsTy {
311   int Mr;
312   int Nr;
313 };
314 
315 /// Parameters of the macro kernel.
316 ///
317 /// Parameters, which determine sizes of blocks of partitioned matrices
318 /// used in the optimized matrix multiplication.
319 struct MacroKernelParamsTy {
320   int Mc;
321   int Nc;
322   int Kc;
323 };
324 
325 /// Additional parameters of the schedule optimizer.
326 ///
327 /// Target Transform Info and the SCoP dependencies used by the schedule
328 /// optimizer.
329 struct OptimizerAdditionalInfoTy {
330   const llvm::TargetTransformInfo *TTI;
331   const Dependences *D;
332 };
333 
334 /// Parameters of the matrix multiplication operands.
335 ///
336 /// Parameters, which describe access relations that represent operands of the
337 /// matrix multiplication.
338 struct MatMulInfoTy {
339   MemoryAccess *A = nullptr;
340   MemoryAccess *B = nullptr;
341   MemoryAccess *ReadFromC = nullptr;
342   MemoryAccess *WriteToC = nullptr;
343   int i = -1;
344   int j = -1;
345   int k = -1;
346 };
347 
348 class ScheduleTreeOptimizer {
349 public:
350   /// Apply schedule tree transformations.
351   ///
352   /// This function takes an (possibly already optimized) schedule tree and
353   /// applies a set of additional optimizations on the schedule tree. The
354   /// transformations applied include:
355   ///
356   ///   - Tiling
357   ///   - Prevectorization
358   ///
359   /// @param Schedule The schedule object the transformations will be applied
360   ///                 to.
361   /// @param OAI      Target Transform Info and the SCoP dependencies.
362   /// @returns        The transformed schedule.
363   static isl::schedule
364   optimizeSchedule(isl::schedule Schedule,
365                    const OptimizerAdditionalInfoTy *OAI = nullptr);
366 
367   /// Apply schedule tree transformations.
368   ///
369   /// This function takes a node in an (possibly already optimized) schedule
370   /// tree and applies a set of additional optimizations on this schedule tree
371   /// node and its descendants. The transformations applied include:
372   ///
373   ///   - Tiling
374   ///   - Prevectorization
375   ///
376   /// @param Node The schedule object post-transformations will be applied to.
377   /// @param OAI  Target Transform Info and the SCoP dependencies.
378   /// @returns    The transformed schedule.
379   static isl::schedule_node
380   optimizeScheduleNode(isl::schedule_node Node,
381                        const OptimizerAdditionalInfoTy *OAI = nullptr);
382 
383   /// Decide if the @p NewSchedule is profitable for @p S.
384   ///
385   /// @param S           The SCoP we optimize.
386   /// @param NewSchedule The new schedule we computed.
387   ///
388   /// @return True, if we believe @p NewSchedule is an improvement for @p S.
389   static bool isProfitableSchedule(polly::Scop &S, isl::schedule NewSchedule);
390 
391   /// Isolate a set of partial tile prefixes.
392   ///
393   /// This set should ensure that it contains only partial tile prefixes that
394   /// have exactly VectorWidth iterations.
395   ///
396   /// @param Node A schedule node band, which is a parent of a band node,
397   ///             that contains a vector loop.
398   /// @return Modified isl_schedule_node.
399   static isl::schedule_node isolateFullPartialTiles(isl::schedule_node Node,
400                                                     int VectorWidth);
401 
402 private:
403   /// Tile a schedule node.
404   ///
405   /// @param Node            The node to tile.
406   /// @param Identifier      An name that identifies this kind of tiling and
407   ///                        that is used to mark the tiled loops in the
408   ///                        generated AST.
409   /// @param TileSizes       A vector of tile sizes that should be used for
410   ///                        tiling.
411   /// @param DefaultTileSize A default tile size that is used for dimensions
412   ///                        that are not covered by the TileSizes vector.
413   static isl::schedule_node tileNode(isl::schedule_node Node,
414                                      const char *Identifier,
415                                      llvm::ArrayRef<int> TileSizes,
416                                      int DefaultTileSize);
417 
418   /// Tile a schedule node and unroll point loops.
419   ///
420   /// @param Node            The node to register tile.
421   /// @param TileSizes       A vector of tile sizes that should be used for
422   ///                        tiling.
423   /// @param DefaultTileSize A default tile size that is used for dimensions
424   static isl::schedule_node applyRegisterTiling(isl::schedule_node Node,
425                                                 llvm::ArrayRef<int> TileSizes,
426                                                 int DefaultTileSize);
427 
428   /// Apply the BLIS matmul optimization pattern.
429   ///
430   /// Make the loops containing the matrix multiplication be the innermost
431   /// loops and apply the BLIS matmul optimization pattern. BLIS implements
432   /// gemm as three nested loops around a macro-kernel, plus two packing
433   /// routines. The macro-kernel is implemented in terms of two additional
434   /// loops around a micro-kernel. The micro-kernel is a loop around a rank-1
435   /// (i.e., outer product) update.
436   ///
437   /// For a detailed description please see [1].
438   ///
439   /// The order of the loops defines the data reused in the BLIS implementation
440   /// of gemm ([1]). In particular, elements of the matrix B, the second
441   /// operand of matrix multiplication, are reused between iterations of the
442   /// innermost loop. To keep the reused data in cache, only elements of matrix
443   /// A, the first operand of matrix multiplication, should be evicted during
444   /// an iteration of the innermost loop. To provide such a cache replacement
445   /// policy, elements of the matrix A can, in particular, be loaded first and,
446   /// consequently, be least-recently-used.
447   ///
448   /// In our case matrices are stored in row-major order instead of
449   /// column-major order used in the BLIS implementation ([1]). It affects only
450   /// on the form of the BLIS micro kernel and the computation of its
451   /// parameters. In particular, reused elements of the matrix B are
452   /// successively multiplied by specific elements of the matrix A.
453   ///
454   /// Refs.:
455   /// [1] - Analytical Modeling is Enough for High Performance BLIS
456   /// Tze Meng Low, Francisco D Igual, Tyler M Smith, Enrique S Quintana-Orti
457   /// Technical Report, 2014
458   /// http://www.cs.utexas.edu/users/flame/pubs/TOMS-BLIS-Analytical.pdf
459   ///
460   /// @see ScheduleTreeOptimizer::createMicroKernel
461   /// @see ScheduleTreeOptimizer::createMacroKernel
462   /// @see getMicroKernelParams
463   /// @see getMacroKernelParams
464   ///
465   /// TODO: Implement the packing transformation.
466   ///
467   /// @param Node The node that contains a band to be optimized. The node
468   ///             is required to successfully pass
469   ///             ScheduleTreeOptimizer::isMatrMultPattern.
470   /// @param TTI  Target Transform Info.
471   /// @param MMI  Parameters of the matrix multiplication operands.
472   /// @returns    The transformed schedule.
473   static isl::schedule_node
474   optimizeMatMulPattern(isl::schedule_node Node,
475                         const llvm::TargetTransformInfo *TTI,
476                         MatMulInfoTy &MMI);
477 
478   /// Check if this node is a band node we want to tile.
479   ///
480   /// We look for innermost band nodes where individual dimensions are marked as
481   /// permutable.
482   ///
483   /// @param Node The node to check.
484   static bool isTileableBandNode(isl::schedule_node Node);
485 
486   /// Pre-vectorizes one scheduling dimension of a schedule band.
487   ///
488   /// prevectSchedBand splits out the dimension DimToVectorize, tiles it and
489   /// sinks the resulting point loop.
490   ///
491   /// Example (DimToVectorize=0, VectorWidth=4):
492   ///
493   /// | Before transformation:
494   /// |
495   /// | A[i,j] -> [i,j]
496   /// |
497   /// | for (i = 0; i < 128; i++)
498   /// |    for (j = 0; j < 128; j++)
499   /// |      A(i,j);
500   ///
501   /// | After transformation:
502   /// |
503   /// | for (it = 0; it < 32; it+=1)
504   /// |    for (j = 0; j < 128; j++)
505   /// |      for (ip = 0; ip <= 3; ip++)
506   /// |        A(4 * it + ip,j);
507   ///
508   /// The goal of this transformation is to create a trivially vectorizable
509   /// loop.  This means a parallel loop at the innermost level that has a
510   /// constant number of iterations corresponding to the target vector width.
511   ///
512   /// This transformation creates a loop at the innermost level. The loop has
513   /// a constant number of iterations, if the number of loop iterations at
514   /// DimToVectorize can be divided by VectorWidth. The default VectorWidth is
515   /// currently constant and not yet target specific. This function does not
516   /// reason about parallelism.
517   static isl::schedule_node prevectSchedBand(isl::schedule_node Node,
518                                              unsigned DimToVectorize,
519                                              int VectorWidth);
520 
521   /// Apply additional optimizations on the bands in the schedule tree.
522   ///
523   /// We are looking for an innermost band node and apply the following
524   /// transformations:
525   ///
526   ///  - Tile the band
527   ///      - if the band is tileable
528   ///      - if the band has more than one loop dimension
529   ///
530   ///  - Prevectorize the schedule of the band (or the point loop in case of
531   ///    tiling).
532   ///      - if vectorization is enabled
533   ///
534   /// @param Node The schedule node to (possibly) optimize.
535   /// @param User A pointer to forward some use information
536   ///        (currently unused).
537   static isl_schedule_node *optimizeBand(isl_schedule_node *Node, void *User);
538 
539   /// Apply additional optimizations on the bands in the schedule tree.
540   ///
541   /// We apply the following
542   /// transformations:
543   ///
544   ///  - Tile the band
545   ///  - Prevectorize the schedule of the band (or the point loop in case of
546   ///    tiling).
547   ///      - if vectorization is enabled
548   ///
549   /// @param Node The schedule node to (possibly) optimize.
550   /// @param User A pointer to forward some use information
551   ///        (currently unused).
552   static isl::schedule_node standardBandOpts(isl::schedule_node Node,
553                                              void *User);
554 
555   /// Check if this node contains a partial schedule that could
556   ///        probably be optimized with analytical modeling.
557   ///
558   /// isMatrMultPattern tries to determine whether the following conditions
559   /// are true:
560   /// 1. the partial schedule contains only one statement.
561   /// 2. there are exactly three input dimensions.
562   /// 3. all memory accesses of the statement will have stride 0 or 1, if we
563   ///    interchange loops (switch the variable used in the inner loop to
564   ///    the outer loop).
565   /// 4. all memory accesses of the statement except from the last one, are
566   ///    read memory access and the last one is write memory access.
567   /// 5. all subscripts of the last memory access of the statement don't
568   ///    contain the variable used in the inner loop.
569   /// If this is the case, we could try to use an approach that is similar to
570   /// the one used to get close-to-peak performance of matrix multiplications.
571   ///
572   /// @param Node The node to check.
573   /// @param D    The SCoP dependencies.
574   /// @param MMI  Parameters of the matrix multiplication operands.
575   static bool isMatrMultPattern(isl::schedule_node Node,
576                                 const polly::Dependences *D, MatMulInfoTy &MMI);
577 
578   /// Create the BLIS macro-kernel.
579   ///
580   /// We create the BLIS macro-kernel by applying a combination of tiling
581   /// of dimensions of the band node and interchanging of two innermost
582   /// modified dimensions. The values of of MacroKernelParams's fields are used
583   /// as tile sizes.
584   ///
585   /// @param Node The schedule node to be modified.
586   /// @param MacroKernelParams Parameters of the macro kernel
587   ///                          to be used as tile sizes.
588   static isl::schedule_node
589   createMacroKernel(isl::schedule_node Node,
590                     MacroKernelParamsTy MacroKernelParams);
591 
592   /// Create the BLIS macro-kernel.
593   ///
594   /// We create the BLIS macro-kernel by applying a combination of tiling
595   /// of dimensions of the band node and interchanging of two innermost
596   /// modified dimensions. The values passed in MicroKernelParam are used
597   /// as tile sizes.
598   ///
599   /// @param Node The schedule node to be modified.
600   /// @param MicroKernelParams Parameters of the micro kernel
601   ///                          to be used as tile sizes.
602   /// @see MicroKernelParamsTy
603   static isl::schedule_node
604   createMicroKernel(isl::schedule_node Node,
605                     MicroKernelParamsTy MicroKernelParams);
606 };
607 
608 /// Create an isl::union_set, which describes the isolate option based on
609 /// IsolateDomain.
610 ///
611 /// @param IsolateDomain An isl::set whose @p OutDimsNum last dimensions should
612 ///                      belong to the current band node.
613 /// @param OutDimsNum    A number of dimensions that should belong to
614 ///                      the current band node.
615 static isl::union_set getIsolateOptions(isl::set IsolateDomain,
616                                         isl_size OutDimsNum) {
617   isl_size Dims = IsolateDomain.dim(isl::dim::set);
618   assert(OutDimsNum <= Dims &&
619          "The isl::set IsolateDomain is used to describe the range of schedule "
620          "dimensions values, which should be isolated. Consequently, the "
621          "number of its dimensions should be greater than or equal to the "
622          "number of the schedule dimensions.");
623   isl::map IsolateRelation = isl::map::from_domain(IsolateDomain);
624   IsolateRelation = IsolateRelation.move_dims(isl::dim::out, 0, isl::dim::in,
625                                               Dims - OutDimsNum, OutDimsNum);
626   isl::set IsolateOption = IsolateRelation.wrap();
627   isl::id Id = isl::id::alloc(IsolateOption.get_ctx(), "isolate", nullptr);
628   IsolateOption = IsolateOption.set_tuple_id(Id);
629   return isl::union_set(IsolateOption);
630 }
631 
632 /// Create an isl::union_set, which describes the specified option for the
633 /// dimension of the current node.
634 ///
635 /// @param Ctx    An isl::ctx, which is used to create the isl::union_set.
636 /// @param Option The name of the option.
637 isl::union_set getDimOptions(isl::ctx Ctx, const char *Option) {
638   isl::space Space(Ctx, 0, 1);
639   auto DimOption = isl::set::universe(Space);
640   auto Id = isl::id::alloc(Ctx, Option, nullptr);
641   DimOption = DimOption.set_tuple_id(Id);
642   return isl::union_set(DimOption);
643 }
644 
645 /// Create an isl::union_set, which describes the option of the form
646 /// [isolate[] -> unroll[x]].
647 ///
648 /// @param Ctx An isl::ctx, which is used to create the isl::union_set.
649 static isl::union_set getUnrollIsolatedSetOptions(isl::ctx Ctx) {
650   isl::space Space = isl::space(Ctx, 0, 0, 1);
651   isl::map UnrollIsolatedSetOption = isl::map::universe(Space);
652   isl::id DimInId = isl::id::alloc(Ctx, "isolate", nullptr);
653   isl::id DimOutId = isl::id::alloc(Ctx, "unroll", nullptr);
654   UnrollIsolatedSetOption =
655       UnrollIsolatedSetOption.set_tuple_id(isl::dim::in, DimInId);
656   UnrollIsolatedSetOption =
657       UnrollIsolatedSetOption.set_tuple_id(isl::dim::out, DimOutId);
658   return UnrollIsolatedSetOption.wrap();
659 }
660 
661 /// Make the last dimension of Set to take values from 0 to VectorWidth - 1.
662 ///
663 /// @param Set         A set, which should be modified.
664 /// @param VectorWidth A parameter, which determines the constraint.
665 static isl::set addExtentConstraints(isl::set Set, int VectorWidth) {
666   unsigned Dims = Set.dim(isl::dim::set);
667   isl::space Space = Set.get_space();
668   isl::local_space LocalSpace = isl::local_space(Space);
669   isl::constraint ExtConstr = isl::constraint::alloc_inequality(LocalSpace);
670   ExtConstr = ExtConstr.set_constant_si(0);
671   ExtConstr = ExtConstr.set_coefficient_si(isl::dim::set, Dims - 1, 1);
672   Set = Set.add_constraint(ExtConstr);
673   ExtConstr = isl::constraint::alloc_inequality(LocalSpace);
674   ExtConstr = ExtConstr.set_constant_si(VectorWidth - 1);
675   ExtConstr = ExtConstr.set_coefficient_si(isl::dim::set, Dims - 1, -1);
676   return Set.add_constraint(ExtConstr);
677 }
678 } // namespace
679 
680 isl::set polly::getPartialTilePrefixes(isl::set ScheduleRange,
681                                        int VectorWidth) {
682   isl_size Dims = ScheduleRange.dim(isl::dim::set);
683   isl::set LoopPrefixes =
684       ScheduleRange.drop_constraints_involving_dims(isl::dim::set, Dims - 1, 1);
685   auto ExtentPrefixes = addExtentConstraints(LoopPrefixes, VectorWidth);
686   isl::set BadPrefixes = ExtentPrefixes.subtract(ScheduleRange);
687   BadPrefixes = BadPrefixes.project_out(isl::dim::set, Dims - 1, 1);
688   LoopPrefixes = LoopPrefixes.project_out(isl::dim::set, Dims - 1, 1);
689   return LoopPrefixes.subtract(BadPrefixes);
690 }
691 
692 namespace {
693 isl::schedule_node
694 ScheduleTreeOptimizer::isolateFullPartialTiles(isl::schedule_node Node,
695                                                int VectorWidth) {
696   assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
697   Node = Node.child(0).child(0);
698   isl::union_map SchedRelUMap = Node.get_prefix_schedule_relation();
699   isl::union_set ScheduleRangeUSet = SchedRelUMap.range();
700   isl::set ScheduleRange{ScheduleRangeUSet};
701   isl::set IsolateDomain = getPartialTilePrefixes(ScheduleRange, VectorWidth);
702   auto AtomicOption = getDimOptions(IsolateDomain.get_ctx(), "atomic");
703   isl::union_set IsolateOption = getIsolateOptions(IsolateDomain, 1);
704   Node = Node.parent().parent();
705   isl::union_set Options = IsolateOption.unite(AtomicOption);
706   Node = Node.band_set_ast_build_options(Options);
707   return Node;
708 }
709 
710 isl::schedule_node ScheduleTreeOptimizer::prevectSchedBand(
711     isl::schedule_node Node, unsigned DimToVectorize, int VectorWidth) {
712   assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
713 
714   auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
715   isl_size ScheduleDimensions = Space.dim(isl::dim::set);
716   assert((isl_size)DimToVectorize < ScheduleDimensions);
717 
718   if (DimToVectorize > 0) {
719     Node = isl::manage(
720         isl_schedule_node_band_split(Node.release(), DimToVectorize));
721     Node = Node.child(0);
722   }
723   if ((isl_size)DimToVectorize < ScheduleDimensions - 1)
724     Node = isl::manage(isl_schedule_node_band_split(Node.release(), 1));
725   Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
726   auto Sizes = isl::multi_val::zero(Space);
727   Sizes = Sizes.set_val(0, isl::val(Node.get_ctx(), VectorWidth));
728   Node =
729       isl::manage(isl_schedule_node_band_tile(Node.release(), Sizes.release()));
730   Node = isolateFullPartialTiles(Node, VectorWidth);
731   Node = Node.child(0);
732   // Make sure the "trivially vectorizable loop" is not unrolled. Otherwise,
733   // we will have troubles to match it in the backend.
734   Node = Node.band_set_ast_build_options(
735       isl::union_set(Node.get_ctx(), "{ unroll[x]: 1 = 0 }"));
736   Node = isl::manage(isl_schedule_node_band_sink(Node.release()));
737   Node = Node.child(0);
738   if (isl_schedule_node_get_type(Node.get()) == isl_schedule_node_leaf)
739     Node = Node.parent();
740   auto LoopMarker = isl::id::alloc(Node.get_ctx(), "SIMD", nullptr);
741   PrevectOpts++;
742   return Node.insert_mark(LoopMarker);
743 }
744 
745 isl::schedule_node ScheduleTreeOptimizer::tileNode(isl::schedule_node Node,
746                                                    const char *Identifier,
747                                                    ArrayRef<int> TileSizes,
748                                                    int DefaultTileSize) {
749   auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
750   auto Dims = Space.dim(isl::dim::set);
751   auto Sizes = isl::multi_val::zero(Space);
752   std::string IdentifierString(Identifier);
753   for (auto i : seq<isl_size>(0, Dims)) {
754     auto tileSize =
755         i < (isl_size)TileSizes.size() ? TileSizes[i] : DefaultTileSize;
756     Sizes = Sizes.set_val(i, isl::val(Node.get_ctx(), tileSize));
757   }
758   auto TileLoopMarkerStr = IdentifierString + " - Tiles";
759   auto TileLoopMarker =
760       isl::id::alloc(Node.get_ctx(), TileLoopMarkerStr, nullptr);
761   Node = Node.insert_mark(TileLoopMarker);
762   Node = Node.child(0);
763   Node =
764       isl::manage(isl_schedule_node_band_tile(Node.release(), Sizes.release()));
765   Node = Node.child(0);
766   auto PointLoopMarkerStr = IdentifierString + " - Points";
767   auto PointLoopMarker =
768       isl::id::alloc(Node.get_ctx(), PointLoopMarkerStr, nullptr);
769   Node = Node.insert_mark(PointLoopMarker);
770   return Node.child(0);
771 }
772 
773 isl::schedule_node ScheduleTreeOptimizer::applyRegisterTiling(
774     isl::schedule_node Node, ArrayRef<int> TileSizes, int DefaultTileSize) {
775   Node = tileNode(Node, "Register tiling", TileSizes, DefaultTileSize);
776   auto Ctx = Node.get_ctx();
777   return Node.band_set_ast_build_options(isl::union_set(Ctx, "{unroll[x]}"));
778 }
779 
780 static bool isSimpleInnermostBand(const isl::schedule_node &Node) {
781   assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
782   assert(isl_schedule_node_n_children(Node.get()) == 1);
783 
784   auto ChildType = isl_schedule_node_get_type(Node.child(0).get());
785 
786   if (ChildType == isl_schedule_node_leaf)
787     return true;
788 
789   if (ChildType != isl_schedule_node_sequence)
790     return false;
791 
792   auto Sequence = Node.child(0);
793 
794   for (int c = 0, nc = isl_schedule_node_n_children(Sequence.get()); c < nc;
795        ++c) {
796     auto Child = Sequence.child(c);
797     if (isl_schedule_node_get_type(Child.get()) != isl_schedule_node_filter)
798       return false;
799     if (isl_schedule_node_get_type(Child.child(0).get()) !=
800         isl_schedule_node_leaf)
801       return false;
802   }
803   return true;
804 }
805 
806 bool ScheduleTreeOptimizer::isTileableBandNode(isl::schedule_node Node) {
807   if (isl_schedule_node_get_type(Node.get()) != isl_schedule_node_band)
808     return false;
809 
810   if (isl_schedule_node_n_children(Node.get()) != 1)
811     return false;
812 
813   if (!isl_schedule_node_band_get_permutable(Node.get()))
814     return false;
815 
816   auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
817   auto Dims = Space.dim(isl::dim::set);
818 
819   if (Dims <= 1)
820     return false;
821 
822   return isSimpleInnermostBand(Node);
823 }
824 
825 __isl_give isl::schedule_node
826 ScheduleTreeOptimizer::standardBandOpts(isl::schedule_node Node, void *User) {
827   if (FirstLevelTiling) {
828     Node = tileNode(Node, "1st level tiling", FirstLevelTileSizes,
829                     FirstLevelDefaultTileSize);
830     FirstLevelTileOpts++;
831   }
832 
833   if (SecondLevelTiling) {
834     Node = tileNode(Node, "2nd level tiling", SecondLevelTileSizes,
835                     SecondLevelDefaultTileSize);
836     SecondLevelTileOpts++;
837   }
838 
839   if (RegisterTiling) {
840     Node =
841         applyRegisterTiling(Node, RegisterTileSizes, RegisterDefaultTileSize);
842     RegisterTileOpts++;
843   }
844 
845   if (PollyVectorizerChoice == VECTORIZER_NONE)
846     return Node;
847 
848   auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
849   auto Dims = Space.dim(isl::dim::set);
850 
851   for (int i = Dims - 1; i >= 0; i--)
852     if (Node.band_member_get_coincident(i)) {
853       Node = prevectSchedBand(Node, i, PrevectorWidth);
854       break;
855     }
856 
857   return Node;
858 }
859 
860 /// Permute the two dimensions of the isl map.
861 ///
862 /// Permute @p DstPos and @p SrcPos dimensions of the isl map @p Map that
863 /// have type @p DimType.
864 ///
865 /// @param Map     The isl map to be modified.
866 /// @param DimType The type of the dimensions.
867 /// @param DstPos  The first dimension.
868 /// @param SrcPos  The second dimension.
869 /// @return        The modified map.
870 isl::map permuteDimensions(isl::map Map, isl::dim DimType, unsigned DstPos,
871                            unsigned SrcPos) {
872   assert((isl_size)DstPos < Map.dim(DimType) &&
873          (isl_size)SrcPos < Map.dim(DimType));
874   if (DstPos == SrcPos)
875     return Map;
876   isl::id DimId;
877   if (Map.has_tuple_id(DimType))
878     DimId = Map.get_tuple_id(DimType);
879   auto FreeDim = DimType == isl::dim::in ? isl::dim::out : isl::dim::in;
880   isl::id FreeDimId;
881   if (Map.has_tuple_id(FreeDim))
882     FreeDimId = Map.get_tuple_id(FreeDim);
883   auto MaxDim = std::max(DstPos, SrcPos);
884   auto MinDim = std::min(DstPos, SrcPos);
885   Map = Map.move_dims(FreeDim, 0, DimType, MaxDim, 1);
886   Map = Map.move_dims(FreeDim, 0, DimType, MinDim, 1);
887   Map = Map.move_dims(DimType, MinDim, FreeDim, 1, 1);
888   Map = Map.move_dims(DimType, MaxDim, FreeDim, 0, 1);
889   if (DimId)
890     Map = Map.set_tuple_id(DimType, DimId);
891   if (FreeDimId)
892     Map = Map.set_tuple_id(FreeDim, FreeDimId);
893   return Map;
894 }
895 
896 /// Check the form of the access relation.
897 ///
898 /// Check that the access relation @p AccMap has the form M[i][j], where i
899 /// is a @p FirstPos and j is a @p SecondPos.
900 ///
901 /// @param AccMap    The access relation to be checked.
902 /// @param FirstPos  The index of the input dimension that is mapped to
903 ///                  the first output dimension.
904 /// @param SecondPos The index of the input dimension that is mapped to the
905 ///                  second output dimension.
906 /// @return          True in case @p AccMap has the expected form and false,
907 ///                  otherwise.
908 static bool isMatMulOperandAcc(isl::set Domain, isl::map AccMap, int &FirstPos,
909                                int &SecondPos) {
910   isl::space Space = AccMap.get_space();
911   isl::map Universe = isl::map::universe(Space);
912 
913   if (Space.dim(isl::dim::out) != 2)
914     return false;
915 
916   // MatMul has the form:
917   // for (i = 0; i < N; i++)
918   //   for (j = 0; j < M; j++)
919   //     for (k = 0; k < P; k++)
920   //       C[i, j] += A[i, k] * B[k, j]
921   //
922   // Permutation of three outer loops: 3! = 6 possibilities.
923   int FirstDims[] = {0, 0, 1, 1, 2, 2};
924   int SecondDims[] = {1, 2, 2, 0, 0, 1};
925   for (int i = 0; i < 6; i += 1) {
926     auto PossibleMatMul =
927         Universe.equate(isl::dim::in, FirstDims[i], isl::dim::out, 0)
928             .equate(isl::dim::in, SecondDims[i], isl::dim::out, 1);
929 
930     AccMap = AccMap.intersect_domain(Domain);
931     PossibleMatMul = PossibleMatMul.intersect_domain(Domain);
932 
933     // If AccMap spans entire domain (Non-partial write),
934     // compute FirstPos and SecondPos.
935     // If AccMap != PossibleMatMul here (the two maps have been gisted at
936     // this point), it means that the writes are not complete, or in other
937     // words, it is a Partial write and Partial writes must be rejected.
938     if (AccMap.is_equal(PossibleMatMul)) {
939       if (FirstPos != -1 && FirstPos != FirstDims[i])
940         continue;
941       FirstPos = FirstDims[i];
942       if (SecondPos != -1 && SecondPos != SecondDims[i])
943         continue;
944       SecondPos = SecondDims[i];
945       return true;
946     }
947   }
948 
949   return false;
950 }
951 
952 /// Does the memory access represent a non-scalar operand of the matrix
953 /// multiplication.
954 ///
955 /// Check that the memory access @p MemAccess is the read access to a non-scalar
956 /// operand of the matrix multiplication or its result.
957 ///
958 /// @param MemAccess The memory access to be checked.
959 /// @param MMI       Parameters of the matrix multiplication operands.
960 /// @return          True in case the memory access represents the read access
961 ///                  to a non-scalar operand of the matrix multiplication and
962 ///                  false, otherwise.
963 static bool isMatMulNonScalarReadAccess(MemoryAccess *MemAccess,
964                                         MatMulInfoTy &MMI) {
965   if (!MemAccess->isLatestArrayKind() || !MemAccess->isRead())
966     return false;
967   auto AccMap = MemAccess->getLatestAccessRelation();
968   isl::set StmtDomain = MemAccess->getStatement()->getDomain();
969   if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.i, MMI.j) && !MMI.ReadFromC) {
970     MMI.ReadFromC = MemAccess;
971     return true;
972   }
973   if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.i, MMI.k) && !MMI.A) {
974     MMI.A = MemAccess;
975     return true;
976   }
977   if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.k, MMI.j) && !MMI.B) {
978     MMI.B = MemAccess;
979     return true;
980   }
981   return false;
982 }
983 
984 /// Check accesses to operands of the matrix multiplication.
985 ///
986 /// Check that accesses of the SCoP statement, which corresponds to
987 /// the partial schedule @p PartialSchedule, are scalar in terms of loops
988 /// containing the matrix multiplication, in case they do not represent
989 /// accesses to the non-scalar operands of the matrix multiplication or
990 /// its result.
991 ///
992 /// @param  PartialSchedule The partial schedule of the SCoP statement.
993 /// @param  MMI             Parameters of the matrix multiplication operands.
994 /// @return                 True in case the corresponding SCoP statement
995 ///                         represents matrix multiplication and false,
996 ///                         otherwise.
997 static bool containsOnlyMatrMultAcc(isl::map PartialSchedule,
998                                     MatMulInfoTy &MMI) {
999   auto InputDimId = PartialSchedule.get_tuple_id(isl::dim::in);
1000   auto *Stmt = static_cast<ScopStmt *>(InputDimId.get_user());
1001   isl_size OutDimNum = PartialSchedule.dim(isl::dim::out);
1002   assert(OutDimNum > 2 && "In case of the matrix multiplication the loop nest "
1003                           "and, consequently, the corresponding scheduling "
1004                           "functions have at least three dimensions.");
1005   auto MapI =
1006       permuteDimensions(PartialSchedule, isl::dim::out, MMI.i, OutDimNum - 1);
1007   auto MapJ =
1008       permuteDimensions(PartialSchedule, isl::dim::out, MMI.j, OutDimNum - 1);
1009   auto MapK =
1010       permuteDimensions(PartialSchedule, isl::dim::out, MMI.k, OutDimNum - 1);
1011 
1012   auto Accesses = getAccessesInOrder(*Stmt);
1013   for (auto *MemA = Accesses.begin(); MemA != Accesses.end() - 1; MemA++) {
1014     auto *MemAccessPtr = *MemA;
1015     if (MemAccessPtr->isLatestArrayKind() && MemAccessPtr != MMI.WriteToC &&
1016         !isMatMulNonScalarReadAccess(MemAccessPtr, MMI) &&
1017         !(MemAccessPtr->isStrideZero(MapI)) &&
1018         MemAccessPtr->isStrideZero(MapJ) && MemAccessPtr->isStrideZero(MapK))
1019       return false;
1020   }
1021   return true;
1022 }
1023 
1024 /// Check for dependencies corresponding to the matrix multiplication.
1025 ///
1026 /// Check that there is only true dependence of the form
1027 /// S(..., k, ...) -> S(..., k + 1, …), where S is the SCoP statement
1028 /// represented by @p Schedule and k is @p Pos. Such a dependence corresponds
1029 /// to the dependency produced by the matrix multiplication.
1030 ///
1031 /// @param  Schedule The schedule of the SCoP statement.
1032 /// @param  D The SCoP dependencies.
1033 /// @param  Pos The parameter to describe an acceptable true dependence.
1034 ///             In case it has a negative value, try to determine its
1035 ///             acceptable value.
1036 /// @return True in case dependencies correspond to the matrix multiplication
1037 ///         and false, otherwise.
1038 static bool containsOnlyMatMulDep(isl::map Schedule, const Dependences *D,
1039                                   int &Pos) {
1040   isl::union_map Dep = D->getDependences(Dependences::TYPE_RAW);
1041   isl::union_map Red = D->getDependences(Dependences::TYPE_RED);
1042   if (Red)
1043     Dep = Dep.unite(Red);
1044   auto DomainSpace = Schedule.get_space().domain();
1045   auto Space = DomainSpace.map_from_domain_and_range(DomainSpace);
1046   auto Deltas = Dep.extract_map(Space).deltas();
1047   isl_size DeltasDimNum = Deltas.dim(isl::dim::set);
1048   for (int i = 0; i < DeltasDimNum; i++) {
1049     auto Val = Deltas.plain_get_val_if_fixed(isl::dim::set, i);
1050     Pos = Pos < 0 && Val.is_one() ? i : Pos;
1051     if (Val.is_nan() || !(Val.is_zero() || (i == Pos && Val.is_one())))
1052       return false;
1053   }
1054   if (DeltasDimNum == 0 || Pos < 0)
1055     return false;
1056   return true;
1057 }
1058 
1059 /// Check if the SCoP statement could probably be optimized with analytical
1060 /// modeling.
1061 ///
1062 /// containsMatrMult tries to determine whether the following conditions
1063 /// are true:
1064 /// 1. The last memory access modeling an array, MA1, represents writing to
1065 ///    memory and has the form S(..., i1, ..., i2, ...) -> M(i1, i2) or
1066 ///    S(..., i2, ..., i1, ...) -> M(i1, i2), where S is the SCoP statement
1067 ///    under consideration.
1068 /// 2. There is only one loop-carried true dependency, and it has the
1069 ///    form S(..., i3, ...) -> S(..., i3 + 1, ...), and there are no
1070 ///    loop-carried or anti dependencies.
1071 /// 3. SCoP contains three access relations, MA2, MA3, and MA4 that represent
1072 ///    reading from memory and have the form S(..., i3, ...) -> M(i1, i3),
1073 ///    S(..., i3, ...) -> M(i3, i2), S(...) -> M(i1, i2), respectively,
1074 ///    and all memory accesses of the SCoP that are different from MA1, MA2,
1075 ///    MA3, and MA4 have stride 0, if the innermost loop is exchanged with any
1076 ///    of loops i1, i2 and i3.
1077 ///
1078 /// @param PartialSchedule The PartialSchedule that contains a SCoP statement
1079 ///        to check.
1080 /// @D     The SCoP dependencies.
1081 /// @MMI   Parameters of the matrix multiplication operands.
1082 static bool containsMatrMult(isl::map PartialSchedule, const Dependences *D,
1083                              MatMulInfoTy &MMI) {
1084   auto InputDimsId = PartialSchedule.get_tuple_id(isl::dim::in);
1085   auto *Stmt = static_cast<ScopStmt *>(InputDimsId.get_user());
1086   if (Stmt->size() <= 1)
1087     return false;
1088 
1089   auto Accesses = getAccessesInOrder(*Stmt);
1090   for (auto *MemA = Accesses.end() - 1; MemA != Accesses.begin(); MemA--) {
1091     auto *MemAccessPtr = *MemA;
1092     if (!MemAccessPtr->isLatestArrayKind())
1093       continue;
1094     if (!MemAccessPtr->isWrite())
1095       return false;
1096     auto AccMap = MemAccessPtr->getLatestAccessRelation();
1097     if (!isMatMulOperandAcc(Stmt->getDomain(), AccMap, MMI.i, MMI.j))
1098       return false;
1099     MMI.WriteToC = MemAccessPtr;
1100     break;
1101   }
1102 
1103   if (!containsOnlyMatMulDep(PartialSchedule, D, MMI.k))
1104     return false;
1105 
1106   if (!MMI.WriteToC || !containsOnlyMatrMultAcc(PartialSchedule, MMI))
1107     return false;
1108 
1109   if (!MMI.A || !MMI.B || !MMI.ReadFromC)
1110     return false;
1111   return true;
1112 }
1113 
1114 /// Permute two dimensions of the band node.
1115 ///
1116 /// Permute FirstDim and SecondDim dimensions of the Node.
1117 ///
1118 /// @param Node The band node to be modified.
1119 /// @param FirstDim The first dimension to be permuted.
1120 /// @param SecondDim The second dimension to be permuted.
1121 static isl::schedule_node permuteBandNodeDimensions(isl::schedule_node Node,
1122                                                     unsigned FirstDim,
1123                                                     unsigned SecondDim) {
1124   assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band &&
1125          (unsigned)isl_schedule_node_band_n_member(Node.get()) >
1126              std::max(FirstDim, SecondDim));
1127   auto PartialSchedule =
1128       isl::manage(isl_schedule_node_band_get_partial_schedule(Node.get()));
1129   auto PartialScheduleFirstDim = PartialSchedule.get_union_pw_aff(FirstDim);
1130   auto PartialScheduleSecondDim = PartialSchedule.get_union_pw_aff(SecondDim);
1131   PartialSchedule =
1132       PartialSchedule.set_union_pw_aff(SecondDim, PartialScheduleFirstDim);
1133   PartialSchedule =
1134       PartialSchedule.set_union_pw_aff(FirstDim, PartialScheduleSecondDim);
1135   Node = isl::manage(isl_schedule_node_delete(Node.release()));
1136   return Node.insert_partial_schedule(PartialSchedule);
1137 }
1138 
1139 isl::schedule_node ScheduleTreeOptimizer::createMicroKernel(
1140     isl::schedule_node Node, MicroKernelParamsTy MicroKernelParams) {
1141   Node = applyRegisterTiling(Node, {MicroKernelParams.Mr, MicroKernelParams.Nr},
1142                              1);
1143   Node = Node.parent().parent();
1144   return permuteBandNodeDimensions(Node, 0, 1).child(0).child(0);
1145 }
1146 
1147 isl::schedule_node ScheduleTreeOptimizer::createMacroKernel(
1148     isl::schedule_node Node, MacroKernelParamsTy MacroKernelParams) {
1149   assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
1150   if (MacroKernelParams.Mc == 1 && MacroKernelParams.Nc == 1 &&
1151       MacroKernelParams.Kc == 1)
1152     return Node;
1153   int DimOutNum = isl_schedule_node_band_n_member(Node.get());
1154   std::vector<int> TileSizes(DimOutNum, 1);
1155   TileSizes[DimOutNum - 3] = MacroKernelParams.Mc;
1156   TileSizes[DimOutNum - 2] = MacroKernelParams.Nc;
1157   TileSizes[DimOutNum - 1] = MacroKernelParams.Kc;
1158   Node = tileNode(Node, "1st level tiling", TileSizes, 1);
1159   Node = Node.parent().parent();
1160   Node = permuteBandNodeDimensions(Node, DimOutNum - 2, DimOutNum - 1);
1161   Node = permuteBandNodeDimensions(Node, DimOutNum - 3, DimOutNum - 1);
1162 
1163   // Mark the outermost loop as parallelizable.
1164   Node = Node.band_member_set_coincident(0, true);
1165 
1166   return Node.child(0).child(0);
1167 }
1168 
1169 /// Get the size of the widest type of the matrix multiplication operands
1170 /// in bytes, including alignment padding.
1171 ///
1172 /// @param MMI Parameters of the matrix multiplication operands.
1173 /// @return The size of the widest type of the matrix multiplication operands
1174 ///         in bytes, including alignment padding.
1175 static uint64_t getMatMulAlignTypeSize(MatMulInfoTy MMI) {
1176   auto *S = MMI.A->getStatement()->getParent();
1177   auto &DL = S->getFunction().getParent()->getDataLayout();
1178   auto ElementSizeA = DL.getTypeAllocSize(MMI.A->getElementType());
1179   auto ElementSizeB = DL.getTypeAllocSize(MMI.B->getElementType());
1180   auto ElementSizeC = DL.getTypeAllocSize(MMI.WriteToC->getElementType());
1181   return std::max({ElementSizeA, ElementSizeB, ElementSizeC});
1182 }
1183 
1184 /// Get the size of the widest type of the matrix multiplication operands
1185 /// in bits.
1186 ///
1187 /// @param MMI Parameters of the matrix multiplication operands.
1188 /// @return The size of the widest type of the matrix multiplication operands
1189 ///         in bits.
1190 static uint64_t getMatMulTypeSize(MatMulInfoTy MMI) {
1191   auto *S = MMI.A->getStatement()->getParent();
1192   auto &DL = S->getFunction().getParent()->getDataLayout();
1193   auto ElementSizeA = DL.getTypeSizeInBits(MMI.A->getElementType());
1194   auto ElementSizeB = DL.getTypeSizeInBits(MMI.B->getElementType());
1195   auto ElementSizeC = DL.getTypeSizeInBits(MMI.WriteToC->getElementType());
1196   return std::max({ElementSizeA, ElementSizeB, ElementSizeC});
1197 }
1198 
1199 /// Get parameters of the BLIS micro kernel.
1200 ///
1201 /// We choose the Mr and Nr parameters of the micro kernel to be large enough
1202 /// such that no stalls caused by the combination of latencies and dependencies
1203 /// are introduced during the updates of the resulting matrix of the matrix
1204 /// multiplication. However, they should also be as small as possible to
1205 /// release more registers for entries of multiplied matrices.
1206 ///
1207 /// @param TTI Target Transform Info.
1208 /// @param MMI Parameters of the matrix multiplication operands.
1209 /// @return The structure of type MicroKernelParamsTy.
1210 /// @see MicroKernelParamsTy
1211 static struct MicroKernelParamsTy
1212 getMicroKernelParams(const TargetTransformInfo *TTI, MatMulInfoTy MMI) {
1213   assert(TTI && "The target transform info should be provided.");
1214 
1215   // Nvec - Number of double-precision floating-point numbers that can be hold
1216   // by a vector register. Use 2 by default.
1217   long RegisterBitwidth = VectorRegisterBitwidth;
1218 
1219   if (RegisterBitwidth == -1)
1220     RegisterBitwidth = TTI->getRegisterBitWidth(true);
1221   auto ElementSize = getMatMulTypeSize(MMI);
1222   assert(ElementSize > 0 && "The element size of the matrix multiplication "
1223                             "operands should be greater than zero.");
1224   auto Nvec = RegisterBitwidth / ElementSize;
1225   if (Nvec == 0)
1226     Nvec = 2;
1227   int Nr = ceil(sqrt((double)(Nvec * LatencyVectorFma * ThroughputVectorFma)) /
1228                 Nvec) *
1229            Nvec;
1230   int Mr = ceil((double)(Nvec * LatencyVectorFma * ThroughputVectorFma / Nr));
1231   return {Mr, Nr};
1232 }
1233 
1234 /// Determine parameters of the target cache.
1235 ///
1236 /// @param TTI Target Transform Info.
1237 void getTargetCacheParameters(const llvm::TargetTransformInfo *TTI) {
1238   auto L1DCache = llvm::TargetTransformInfo::CacheLevel::L1D;
1239   auto L2DCache = llvm::TargetTransformInfo::CacheLevel::L2D;
1240   if (FirstCacheLevelSize == -1) {
1241     if (TTI->getCacheSize(L1DCache).hasValue())
1242       FirstCacheLevelSize = TTI->getCacheSize(L1DCache).getValue();
1243     else
1244       FirstCacheLevelSize = static_cast<int>(FirstCacheLevelDefaultSize);
1245   }
1246   if (SecondCacheLevelSize == -1) {
1247     if (TTI->getCacheSize(L2DCache).hasValue())
1248       SecondCacheLevelSize = TTI->getCacheSize(L2DCache).getValue();
1249     else
1250       SecondCacheLevelSize = static_cast<int>(SecondCacheLevelDefaultSize);
1251   }
1252   if (FirstCacheLevelAssociativity == -1) {
1253     if (TTI->getCacheAssociativity(L1DCache).hasValue())
1254       FirstCacheLevelAssociativity =
1255           TTI->getCacheAssociativity(L1DCache).getValue();
1256     else
1257       FirstCacheLevelAssociativity =
1258           static_cast<int>(FirstCacheLevelDefaultAssociativity);
1259   }
1260   if (SecondCacheLevelAssociativity == -1) {
1261     if (TTI->getCacheAssociativity(L2DCache).hasValue())
1262       SecondCacheLevelAssociativity =
1263           TTI->getCacheAssociativity(L2DCache).getValue();
1264     else
1265       SecondCacheLevelAssociativity =
1266           static_cast<int>(SecondCacheLevelDefaultAssociativity);
1267   }
1268 }
1269 
1270 /// Get parameters of the BLIS macro kernel.
1271 ///
1272 /// During the computation of matrix multiplication, blocks of partitioned
1273 /// matrices are mapped to different layers of the memory hierarchy.
1274 /// To optimize data reuse, blocks should be ideally kept in cache between
1275 /// iterations. Since parameters of the macro kernel determine sizes of these
1276 /// blocks, there are upper and lower bounds on these parameters.
1277 ///
1278 /// @param TTI Target Transform Info.
1279 /// @param MicroKernelParams Parameters of the micro-kernel
1280 ///                          to be taken into account.
1281 /// @param MMI Parameters of the matrix multiplication operands.
1282 /// @return The structure of type MacroKernelParamsTy.
1283 /// @see MacroKernelParamsTy
1284 /// @see MicroKernelParamsTy
1285 static struct MacroKernelParamsTy
1286 getMacroKernelParams(const llvm::TargetTransformInfo *TTI,
1287                      const MicroKernelParamsTy &MicroKernelParams,
1288                      MatMulInfoTy MMI) {
1289   getTargetCacheParameters(TTI);
1290   // According to www.cs.utexas.edu/users/flame/pubs/TOMS-BLIS-Analytical.pdf,
1291   // it requires information about the first two levels of a cache to determine
1292   // all the parameters of a macro-kernel. It also checks that an associativity
1293   // degree of a cache level is greater than two. Otherwise, another algorithm
1294   // for determination of the parameters should be used.
1295   if (!(MicroKernelParams.Mr > 0 && MicroKernelParams.Nr > 0 &&
1296         FirstCacheLevelSize > 0 && SecondCacheLevelSize > 0 &&
1297         FirstCacheLevelAssociativity > 2 && SecondCacheLevelAssociativity > 2))
1298     return {1, 1, 1};
1299   // The quotient should be greater than zero.
1300   if (PollyPatternMatchingNcQuotient <= 0)
1301     return {1, 1, 1};
1302   int Car = floor(
1303       (FirstCacheLevelAssociativity - 1) /
1304       (1 + static_cast<double>(MicroKernelParams.Nr) / MicroKernelParams.Mr));
1305 
1306   // Car can be computed to be zero since it is floor to int.
1307   // On Mac OS, division by 0 does not raise a signal. This causes negative
1308   // tile sizes to be computed. Prevent division by Cac==0 by early returning
1309   // if this happens.
1310   if (Car == 0)
1311     return {1, 1, 1};
1312 
1313   auto ElementSize = getMatMulAlignTypeSize(MMI);
1314   assert(ElementSize > 0 && "The element size of the matrix multiplication "
1315                             "operands should be greater than zero.");
1316   int Kc = (Car * FirstCacheLevelSize) /
1317            (MicroKernelParams.Mr * FirstCacheLevelAssociativity * ElementSize);
1318   double Cac =
1319       static_cast<double>(Kc * ElementSize * SecondCacheLevelAssociativity) /
1320       SecondCacheLevelSize;
1321   int Mc = floor((SecondCacheLevelAssociativity - 2) / Cac);
1322   int Nc = PollyPatternMatchingNcQuotient * MicroKernelParams.Nr;
1323 
1324   assert(Mc > 0 && Nc > 0 && Kc > 0 &&
1325          "Matrix block sizes should be  greater than zero");
1326   return {Mc, Nc, Kc};
1327 }
1328 
1329 /// Create an access relation that is specific to
1330 ///        the matrix multiplication pattern.
1331 ///
1332 /// Create an access relation of the following form:
1333 /// [O0, O1, O2, O3, O4, O5, O6, O7, O8] -> [OI, O5, OJ]
1334 /// where I is @p FirstDim, J is @p SecondDim.
1335 ///
1336 /// It can be used, for example, to create relations that helps to consequently
1337 /// access elements of operands of a matrix multiplication after creation of
1338 /// the BLIS micro and macro kernels.
1339 ///
1340 /// @see ScheduleTreeOptimizer::createMicroKernel
1341 /// @see ScheduleTreeOptimizer::createMacroKernel
1342 ///
1343 /// Subsequently, the described access relation is applied to the range of
1344 /// @p MapOldIndVar, that is used to map original induction variables to
1345 /// the ones, which are produced by schedule transformations. It helps to
1346 /// define relations using a new space and, at the same time, keep them
1347 /// in the original one.
1348 ///
1349 /// @param MapOldIndVar The relation, which maps original induction variables
1350 ///                     to the ones, which are produced by schedule
1351 ///                     transformations.
1352 /// @param FirstDim, SecondDim The input dimensions that are used to define
1353 ///        the specified access relation.
1354 /// @return The specified access relation.
1355 isl::map getMatMulAccRel(isl::map MapOldIndVar, unsigned FirstDim,
1356                          unsigned SecondDim) {
1357   auto AccessRelSpace = isl::space(MapOldIndVar.get_ctx(), 0, 9, 3);
1358   auto AccessRel = isl::map::universe(AccessRelSpace);
1359   AccessRel = AccessRel.equate(isl::dim::in, FirstDim, isl::dim::out, 0);
1360   AccessRel = AccessRel.equate(isl::dim::in, 5, isl::dim::out, 1);
1361   AccessRel = AccessRel.equate(isl::dim::in, SecondDim, isl::dim::out, 2);
1362   return MapOldIndVar.apply_range(AccessRel);
1363 }
1364 
1365 isl::schedule_node createExtensionNode(isl::schedule_node Node,
1366                                        isl::map ExtensionMap) {
1367   auto Extension = isl::union_map(ExtensionMap);
1368   auto NewNode = isl::schedule_node::from_extension(Extension);
1369   return Node.graft_before(NewNode);
1370 }
1371 
1372 /// Apply the packing transformation.
1373 ///
1374 /// The packing transformation can be described as a data-layout
1375 /// transformation that requires to introduce a new array, copy data
1376 /// to the array, and change memory access locations to reference the array.
1377 /// It can be used to ensure that elements of the new array are read in-stride
1378 /// access, aligned to cache lines boundaries, and preloaded into certain cache
1379 /// levels.
1380 ///
1381 /// As an example let us consider the packing of the array A that would help
1382 /// to read its elements with in-stride access. An access to the array A
1383 /// is represented by an access relation that has the form
1384 /// S[i, j, k] -> A[i, k]. The scheduling function of the SCoP statement S has
1385 /// the form S[i,j, k] -> [floor((j mod Nc) / Nr), floor((i mod Mc) / Mr),
1386 /// k mod Kc, j mod Nr, i mod Mr].
1387 ///
1388 /// To ensure that elements of the array A are read in-stride access, we add
1389 /// a new array Packed_A[Mc/Mr][Kc][Mr] to the SCoP, using
1390 /// Scop::createScopArrayInfo, change the access relation
1391 /// S[i, j, k] -> A[i, k] to
1392 /// S[i, j, k] -> Packed_A[floor((i mod Mc) / Mr), k mod Kc, i mod Mr], using
1393 /// MemoryAccess::setNewAccessRelation, and copy the data to the array, using
1394 /// the copy statement created by Scop::addScopStmt.
1395 ///
1396 /// @param Node The schedule node to be optimized.
1397 /// @param MapOldIndVar The relation, which maps original induction variables
1398 ///                     to the ones, which are produced by schedule
1399 ///                     transformations.
1400 /// @param MicroParams, MacroParams Parameters of the BLIS kernel
1401 ///                                 to be taken into account.
1402 /// @param MMI Parameters of the matrix multiplication operands.
1403 /// @return The optimized schedule node.
1404 static isl::schedule_node
1405 optimizeDataLayoutMatrMulPattern(isl::schedule_node Node, isl::map MapOldIndVar,
1406                                  MicroKernelParamsTy MicroParams,
1407                                  MacroKernelParamsTy MacroParams,
1408                                  MatMulInfoTy &MMI) {
1409   auto InputDimsId = MapOldIndVar.get_tuple_id(isl::dim::in);
1410   auto *Stmt = static_cast<ScopStmt *>(InputDimsId.get_user());
1411 
1412   // Create a copy statement that corresponds to the memory access to the
1413   // matrix B, the second operand of the matrix multiplication.
1414   Node = Node.parent().parent().parent().parent().parent().parent();
1415   Node = isl::manage(isl_schedule_node_band_split(Node.release(), 2)).child(0);
1416   auto AccRel = getMatMulAccRel(MapOldIndVar, 3, 7);
1417   unsigned FirstDimSize = MacroParams.Nc / MicroParams.Nr;
1418   unsigned SecondDimSize = MacroParams.Kc;
1419   unsigned ThirdDimSize = MicroParams.Nr;
1420   auto *SAI = Stmt->getParent()->createScopArrayInfo(
1421       MMI.B->getElementType(), "Packed_B",
1422       {FirstDimSize, SecondDimSize, ThirdDimSize});
1423   AccRel = AccRel.set_tuple_id(isl::dim::out, SAI->getBasePtrId());
1424   auto OldAcc = MMI.B->getLatestAccessRelation();
1425   MMI.B->setNewAccessRelation(AccRel);
1426   auto ExtMap = MapOldIndVar.project_out(isl::dim::out, 2,
1427                                          MapOldIndVar.dim(isl::dim::out) - 2);
1428   ExtMap = ExtMap.reverse();
1429   ExtMap = ExtMap.fix_si(isl::dim::out, MMI.i, 0);
1430   auto Domain = Stmt->getDomain();
1431 
1432   // Restrict the domains of the copy statements to only execute when also its
1433   // originating statement is executed.
1434   auto DomainId = Domain.get_tuple_id();
1435   auto *NewStmt = Stmt->getParent()->addScopStmt(
1436       OldAcc, MMI.B->getLatestAccessRelation(), Domain);
1437   ExtMap = ExtMap.set_tuple_id(isl::dim::out, DomainId);
1438   ExtMap = ExtMap.intersect_range(Domain);
1439   ExtMap = ExtMap.set_tuple_id(isl::dim::out, NewStmt->getDomainId());
1440   Node = createExtensionNode(Node, ExtMap);
1441 
1442   // Create a copy statement that corresponds to the memory access
1443   // to the matrix A, the first operand of the matrix multiplication.
1444   Node = Node.child(0);
1445   AccRel = getMatMulAccRel(MapOldIndVar, 4, 6);
1446   FirstDimSize = MacroParams.Mc / MicroParams.Mr;
1447   ThirdDimSize = MicroParams.Mr;
1448   SAI = Stmt->getParent()->createScopArrayInfo(
1449       MMI.A->getElementType(), "Packed_A",
1450       {FirstDimSize, SecondDimSize, ThirdDimSize});
1451   AccRel = AccRel.set_tuple_id(isl::dim::out, SAI->getBasePtrId());
1452   OldAcc = MMI.A->getLatestAccessRelation();
1453   MMI.A->setNewAccessRelation(AccRel);
1454   ExtMap = MapOldIndVar.project_out(isl::dim::out, 3,
1455                                     MapOldIndVar.dim(isl::dim::out) - 3);
1456   ExtMap = ExtMap.reverse();
1457   ExtMap = ExtMap.fix_si(isl::dim::out, MMI.j, 0);
1458   NewStmt = Stmt->getParent()->addScopStmt(
1459       OldAcc, MMI.A->getLatestAccessRelation(), Domain);
1460 
1461   // Restrict the domains of the copy statements to only execute when also its
1462   // originating statement is executed.
1463   ExtMap = ExtMap.set_tuple_id(isl::dim::out, DomainId);
1464   ExtMap = ExtMap.intersect_range(Domain);
1465   ExtMap = ExtMap.set_tuple_id(isl::dim::out, NewStmt->getDomainId());
1466   Node = createExtensionNode(Node, ExtMap);
1467   return Node.child(0).child(0).child(0).child(0).child(0);
1468 }
1469 
1470 /// Get a relation mapping induction variables produced by schedule
1471 /// transformations to the original ones.
1472 ///
1473 /// @param Node The schedule node produced as the result of creation
1474 ///        of the BLIS kernels.
1475 /// @param MicroKernelParams, MacroKernelParams Parameters of the BLIS kernel
1476 ///                                             to be taken into account.
1477 /// @return  The relation mapping original induction variables to the ones
1478 ///          produced by schedule transformation.
1479 /// @see ScheduleTreeOptimizer::createMicroKernel
1480 /// @see ScheduleTreeOptimizer::createMacroKernel
1481 /// @see getMacroKernelParams
1482 isl::map
1483 getInductionVariablesSubstitution(isl::schedule_node Node,
1484                                   MicroKernelParamsTy MicroKernelParams,
1485                                   MacroKernelParamsTy MacroKernelParams) {
1486   auto Child = Node.child(0);
1487   auto UnMapOldIndVar = Child.get_prefix_schedule_union_map();
1488   auto MapOldIndVar = isl::map::from_union_map(UnMapOldIndVar);
1489   if (MapOldIndVar.dim(isl::dim::out) > 9)
1490     return MapOldIndVar.project_out(isl::dim::out, 0,
1491                                     MapOldIndVar.dim(isl::dim::out) - 9);
1492   return MapOldIndVar;
1493 }
1494 
1495 /// Isolate a set of partial tile prefixes and unroll the isolated part.
1496 ///
1497 /// The set should ensure that it contains only partial tile prefixes that have
1498 /// exactly Mr x Nr iterations of the two innermost loops produced by
1499 /// the optimization of the matrix multiplication. Mr and Nr are parameters of
1500 /// the micro-kernel.
1501 ///
1502 /// In case of parametric bounds, this helps to auto-vectorize the unrolled
1503 /// innermost loops, using the SLP vectorizer.
1504 ///
1505 /// @param Node              The schedule node to be modified.
1506 /// @param MicroKernelParams Parameters of the micro-kernel
1507 ///                          to be taken into account.
1508 /// @return The modified isl_schedule_node.
1509 static isl::schedule_node
1510 isolateAndUnrollMatMulInnerLoops(isl::schedule_node Node,
1511                                  struct MicroKernelParamsTy MicroKernelParams) {
1512   isl::schedule_node Child = Node.get_child(0);
1513   isl::union_map UnMapOldIndVar = Child.get_prefix_schedule_relation();
1514   isl::set Prefix = isl::map::from_union_map(UnMapOldIndVar).range();
1515   isl_size Dims = Prefix.dim(isl::dim::set);
1516   Prefix = Prefix.project_out(isl::dim::set, Dims - 1, 1);
1517   Prefix = getPartialTilePrefixes(Prefix, MicroKernelParams.Nr);
1518   Prefix = getPartialTilePrefixes(Prefix, MicroKernelParams.Mr);
1519 
1520   isl::union_set IsolateOption =
1521       getIsolateOptions(Prefix.add_dims(isl::dim::set, 3), 3);
1522   isl::ctx Ctx = Node.get_ctx();
1523   auto Options = IsolateOption.unite(getDimOptions(Ctx, "unroll"));
1524   Options = Options.unite(getUnrollIsolatedSetOptions(Ctx));
1525   Node = Node.band_set_ast_build_options(Options);
1526   Node = Node.parent().parent().parent();
1527   IsolateOption = getIsolateOptions(Prefix, 3);
1528   Options = IsolateOption.unite(getDimOptions(Ctx, "separate"));
1529   Node = Node.band_set_ast_build_options(Options);
1530   Node = Node.child(0).child(0).child(0);
1531   return Node;
1532 }
1533 
1534 /// Mark @p BasePtr with "Inter iteration alias-free" mark node.
1535 ///
1536 /// @param Node The child of the mark node to be inserted.
1537 /// @param BasePtr The pointer to be marked.
1538 /// @return The modified isl_schedule_node.
1539 static isl::schedule_node markInterIterationAliasFree(isl::schedule_node Node,
1540                                                       Value *BasePtr) {
1541   if (!BasePtr)
1542     return Node;
1543 
1544   auto Id =
1545       isl::id::alloc(Node.get_ctx(), "Inter iteration alias-free", BasePtr);
1546   return Node.insert_mark(Id).child(0);
1547 }
1548 
1549 /// Insert "Loop Vectorizer Disabled" mark node.
1550 ///
1551 /// @param Node The child of the mark node to be inserted.
1552 /// @return The modified isl_schedule_node.
1553 static isl::schedule_node markLoopVectorizerDisabled(isl::schedule_node Node) {
1554   auto Id = isl::id::alloc(Node.get_ctx(), "Loop Vectorizer Disabled", nullptr);
1555   return Node.insert_mark(Id).child(0);
1556 }
1557 
1558 /// Restore the initial ordering of dimensions of the band node
1559 ///
1560 /// In case the band node represents all the dimensions of the iteration
1561 /// domain, recreate the band node to restore the initial ordering of the
1562 /// dimensions.
1563 ///
1564 /// @param Node The band node to be modified.
1565 /// @return The modified schedule node.
1566 static isl::schedule_node
1567 getBandNodeWithOriginDimOrder(isl::schedule_node Node) {
1568   assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
1569   if (isl_schedule_node_get_type(Node.child(0).get()) != isl_schedule_node_leaf)
1570     return Node;
1571   auto Domain = Node.get_universe_domain();
1572   assert(isl_union_set_n_set(Domain.get()) == 1);
1573   if (Node.get_schedule_depth() != 0 ||
1574       (isl::set(Domain).dim(isl::dim::set) !=
1575        isl_schedule_node_band_n_member(Node.get())))
1576     return Node;
1577   Node = isl::manage(isl_schedule_node_delete(Node.copy()));
1578   auto PartialSchedulePwAff = Domain.identity_union_pw_multi_aff();
1579   auto PartialScheduleMultiPwAff =
1580       isl::multi_union_pw_aff(PartialSchedulePwAff);
1581   PartialScheduleMultiPwAff =
1582       PartialScheduleMultiPwAff.reset_tuple_id(isl::dim::set);
1583   return Node.insert_partial_schedule(PartialScheduleMultiPwAff);
1584 }
1585 
1586 isl::schedule_node
1587 ScheduleTreeOptimizer::optimizeMatMulPattern(isl::schedule_node Node,
1588                                              const TargetTransformInfo *TTI,
1589                                              MatMulInfoTy &MMI) {
1590   assert(TTI && "The target transform info should be provided.");
1591   Node = markInterIterationAliasFree(
1592       Node, MMI.WriteToC->getLatestScopArrayInfo()->getBasePtr());
1593   int DimOutNum = isl_schedule_node_band_n_member(Node.get());
1594   assert(DimOutNum > 2 && "In case of the matrix multiplication the loop nest "
1595                           "and, consequently, the corresponding scheduling "
1596                           "functions have at least three dimensions.");
1597   Node = getBandNodeWithOriginDimOrder(Node);
1598   Node = permuteBandNodeDimensions(Node, MMI.i, DimOutNum - 3);
1599   int NewJ = MMI.j == DimOutNum - 3 ? MMI.i : MMI.j;
1600   int NewK = MMI.k == DimOutNum - 3 ? MMI.i : MMI.k;
1601   Node = permuteBandNodeDimensions(Node, NewJ, DimOutNum - 2);
1602   NewK = NewK == DimOutNum - 2 ? NewJ : NewK;
1603   Node = permuteBandNodeDimensions(Node, NewK, DimOutNum - 1);
1604   auto MicroKernelParams = getMicroKernelParams(TTI, MMI);
1605   auto MacroKernelParams = getMacroKernelParams(TTI, MicroKernelParams, MMI);
1606   Node = createMacroKernel(Node, MacroKernelParams);
1607   Node = createMicroKernel(Node, MicroKernelParams);
1608   if (MacroKernelParams.Mc == 1 || MacroKernelParams.Nc == 1 ||
1609       MacroKernelParams.Kc == 1)
1610     return Node;
1611   auto MapOldIndVar = getInductionVariablesSubstitution(Node, MicroKernelParams,
1612                                                         MacroKernelParams);
1613   if (!MapOldIndVar)
1614     return Node;
1615   Node = markLoopVectorizerDisabled(Node.parent()).child(0);
1616   Node = isolateAndUnrollMatMulInnerLoops(Node, MicroKernelParams);
1617   return optimizeDataLayoutMatrMulPattern(Node, MapOldIndVar, MicroKernelParams,
1618                                           MacroKernelParams, MMI);
1619 }
1620 
1621 bool ScheduleTreeOptimizer::isMatrMultPattern(isl::schedule_node Node,
1622                                               const Dependences *D,
1623                                               MatMulInfoTy &MMI) {
1624   auto PartialSchedule = isl::manage(
1625       isl_schedule_node_band_get_partial_schedule_union_map(Node.get()));
1626   Node = Node.child(0);
1627   auto LeafType = isl_schedule_node_get_type(Node.get());
1628   Node = Node.parent();
1629   if (LeafType != isl_schedule_node_leaf ||
1630       isl_schedule_node_band_n_member(Node.get()) < 3 ||
1631       Node.get_schedule_depth() != 0 ||
1632       isl_union_map_n_map(PartialSchedule.get()) != 1)
1633     return false;
1634   auto NewPartialSchedule = isl::map::from_union_map(PartialSchedule);
1635   if (containsMatrMult(NewPartialSchedule, D, MMI))
1636     return true;
1637   return false;
1638 }
1639 
1640 __isl_give isl_schedule_node *
1641 ScheduleTreeOptimizer::optimizeBand(__isl_take isl_schedule_node *Node,
1642                                     void *User) {
1643   if (!isTileableBandNode(isl::manage_copy(Node)))
1644     return Node;
1645 
1646   const OptimizerAdditionalInfoTy *OAI =
1647       static_cast<const OptimizerAdditionalInfoTy *>(User);
1648 
1649   MatMulInfoTy MMI;
1650   if (PMBasedOpts && User &&
1651       isMatrMultPattern(isl::manage_copy(Node), OAI->D, MMI)) {
1652     LLVM_DEBUG(dbgs() << "The matrix multiplication pattern was detected\n");
1653     MatMulOpts++;
1654     return optimizeMatMulPattern(isl::manage(Node), OAI->TTI, MMI).release();
1655   }
1656 
1657   return standardBandOpts(isl::manage(Node), User).release();
1658 }
1659 
1660 isl::schedule
1661 ScheduleTreeOptimizer::optimizeSchedule(isl::schedule Schedule,
1662                                         const OptimizerAdditionalInfoTy *OAI) {
1663   auto Root = Schedule.get_root();
1664   Root = optimizeScheduleNode(Root, OAI);
1665   return Root.get_schedule();
1666 }
1667 
1668 isl::schedule_node ScheduleTreeOptimizer::optimizeScheduleNode(
1669     isl::schedule_node Node, const OptimizerAdditionalInfoTy *OAI) {
1670   Node = isl::manage(isl_schedule_node_map_descendant_bottom_up(
1671       Node.release(), optimizeBand,
1672       const_cast<void *>(static_cast<const void *>(OAI))));
1673   return Node;
1674 }
1675 
1676 bool ScheduleTreeOptimizer::isProfitableSchedule(Scop &S,
1677                                                  isl::schedule NewSchedule) {
1678   // To understand if the schedule has been optimized we check if the schedule
1679   // has changed at all.
1680   // TODO: We can improve this by tracking if any necessarily beneficial
1681   // transformations have been performed. This can e.g. be tiling, loop
1682   // interchange, or ...) We can track this either at the place where the
1683   // transformation has been performed or, in case of automatic ILP based
1684   // optimizations, by comparing (yet to be defined) performance metrics
1685   // before/after the scheduling optimizer
1686   // (e.g., #stride-one accesses)
1687   auto NewScheduleMap = NewSchedule.get_map();
1688   auto OldSchedule = S.getSchedule();
1689   assert(OldSchedule && "Only IslScheduleOptimizer can insert extension nodes "
1690                         "that make Scop::getSchedule() return nullptr.");
1691   bool changed = !OldSchedule.is_equal(NewScheduleMap);
1692   return changed;
1693 }
1694 
1695 class IslScheduleOptimizerWrapperPass : public ScopPass {
1696 public:
1697   static char ID;
1698 
1699   explicit IslScheduleOptimizerWrapperPass() : ScopPass(ID) {}
1700 
1701   ~IslScheduleOptimizerWrapperPass() override { releaseMemory(); }
1702 
1703   /// Optimize the schedule of the SCoP @p S.
1704   bool runOnScop(Scop &S) override;
1705 
1706   /// Print the new schedule for the SCoP @p S.
1707   void printScop(raw_ostream &OS, Scop &S) const override;
1708 
1709   /// Register all analyses and transformation required.
1710   void getAnalysisUsage(AnalysisUsage &AU) const override;
1711 
1712   /// Release the internal memory.
1713   void releaseMemory() override {
1714     LastSchedule = nullptr;
1715     IslCtx.reset();
1716   }
1717 
1718 private:
1719   std::shared_ptr<isl_ctx> IslCtx;
1720   isl::schedule LastSchedule;
1721 };
1722 
1723 char IslScheduleOptimizerWrapperPass::ID = 0;
1724 
1725 static void printSchedule(llvm::raw_ostream &OS, const isl::schedule &Schedule,
1726                           StringRef Desc) {
1727   isl::ctx Ctx = Schedule.get_ctx();
1728   isl_printer *P = isl_printer_to_str(Ctx.get());
1729   P = isl_printer_set_yaml_style(P, ISL_YAML_STYLE_BLOCK);
1730   P = isl_printer_print_schedule(P, Schedule.get());
1731   char *Str = isl_printer_get_str(P);
1732   OS << Desc << ": \n" << Str << "\n";
1733   free(Str);
1734   isl_printer_free(P);
1735 }
1736 
1737 /// Collect statistics for the schedule tree.
1738 ///
1739 /// @param Schedule The schedule tree to analyze. If not a schedule tree it is
1740 /// ignored.
1741 /// @param Version  The version of the schedule tree that is analyzed.
1742 ///                 0 for the original schedule tree before any transformation.
1743 ///                 1 for the schedule tree after isl's rescheduling.
1744 ///                 2 for the schedule tree after optimizations are applied
1745 ///                 (tiling, pattern matching)
1746 static void walkScheduleTreeForStatistics(isl::schedule Schedule, int Version) {
1747   auto Root = Schedule.get_root();
1748   if (!Root)
1749     return;
1750 
1751   isl_schedule_node_foreach_descendant_top_down(
1752       Root.get(),
1753       [](__isl_keep isl_schedule_node *nodeptr, void *user) -> isl_bool {
1754         isl::schedule_node Node = isl::manage_copy(nodeptr);
1755         int Version = *static_cast<int *>(user);
1756 
1757         switch (isl_schedule_node_get_type(Node.get())) {
1758         case isl_schedule_node_band: {
1759           NumBands[Version]++;
1760           if (isl_schedule_node_band_get_permutable(Node.get()) ==
1761               isl_bool_true)
1762             NumPermutable[Version]++;
1763 
1764           int CountMembers = isl_schedule_node_band_n_member(Node.get());
1765           NumBandMembers[Version] += CountMembers;
1766           for (int i = 0; i < CountMembers; i += 1) {
1767             if (Node.band_member_get_coincident(i))
1768               NumCoincident[Version]++;
1769           }
1770           break;
1771         }
1772 
1773         case isl_schedule_node_filter:
1774           NumFilters[Version]++;
1775           break;
1776 
1777         case isl_schedule_node_extension:
1778           NumExtension[Version]++;
1779           break;
1780 
1781         default:
1782           break;
1783         }
1784 
1785         return isl_bool_true;
1786       },
1787       &Version);
1788 }
1789 
1790 static bool runIslScheduleOptimizer(
1791     Scop &S,
1792     function_ref<const Dependences &(Dependences::AnalysisLevel)> GetDeps,
1793     TargetTransformInfo *TTI, isl::schedule &LastSchedule) {
1794   // Skip SCoPs in case they're already optimised by PPCGCodeGeneration
1795   if (S.isToBeSkipped())
1796     return false;
1797 
1798   // Skip empty SCoPs but still allow code generation as it will delete the
1799   // loops present but not needed.
1800   if (S.getSize() == 0) {
1801     S.markAsOptimized();
1802     return false;
1803   }
1804 
1805   ScopsProcessed++;
1806 
1807   // Schedule without optimizations.
1808   isl::schedule Schedule = S.getScheduleTree();
1809   walkScheduleTreeForStatistics(S.getScheduleTree(), 0);
1810   LLVM_DEBUG(printSchedule(dbgs(), Schedule, "Original schedule tree"));
1811 
1812   bool HasUserTransformation = false;
1813   if (PragmaBasedOpts) {
1814     isl::schedule ManuallyTransformed =
1815         applyManualTransformations(&S, Schedule);
1816     if (!ManuallyTransformed) {
1817       LLVM_DEBUG(dbgs() << "Error during manual optimization\n");
1818       return false;
1819     }
1820 
1821     if (ManuallyTransformed.get() != Schedule.get()) {
1822       // User transformations have precedence over other transformations.
1823       HasUserTransformation = true;
1824       Schedule = std::move(ManuallyTransformed);
1825       LLVM_DEBUG(
1826           printSchedule(dbgs(), Schedule, "After manual transformations"));
1827     }
1828   }
1829 
1830   // Only continue if either manual transformations have been applied or we are
1831   // allowed to apply heuristics.
1832   // TODO: Detect disabled heuristics and no user-directed transformation
1833   // metadata earlier in ScopDetection.
1834   if (!HasUserTransformation && S.hasDisableHeuristicsHint()) {
1835     LLVM_DEBUG(dbgs() << "Heuristic optimizations disabled by metadata\n");
1836     return false;
1837   }
1838 
1839   // Get dependency analysis.
1840   const Dependences &D = GetDeps(Dependences::AL_Statement);
1841   if (D.getSharedIslCtx() != S.getSharedIslCtx()) {
1842     LLVM_DEBUG(dbgs() << "DependenceInfo for another SCoP/isl_ctx\n");
1843     return false;
1844   }
1845   if (!D.hasValidDependences()) {
1846     LLVM_DEBUG(dbgs() << "Dependency information not available\n");
1847     return false;
1848   }
1849 
1850   // Apply ISL's algorithm only if not overriden by the user. Note that
1851   // post-rescheduling optimizations (tiling, pattern-based, prevectorization)
1852   // rely on the coincidence/permutable annotations on schedule tree bands that
1853   // are added by the rescheduling analyzer. Therefore, disabling the
1854   // rescheduler implicitly also disables these optimizations.
1855   if (HasUserTransformation) {
1856     LLVM_DEBUG(
1857         dbgs() << "Skipping rescheduling due to manual transformation\n");
1858   } else {
1859     // Build input data.
1860     int ValidityKinds =
1861         Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
1862     int ProximityKinds;
1863 
1864     if (OptimizeDeps == "all")
1865       ProximityKinds =
1866           Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
1867     else if (OptimizeDeps == "raw")
1868       ProximityKinds = Dependences::TYPE_RAW;
1869     else {
1870       errs() << "Do not know how to optimize for '" << OptimizeDeps << "'"
1871              << " Falling back to optimizing all dependences.\n";
1872       ProximityKinds =
1873           Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
1874     }
1875 
1876     isl::union_set Domain = S.getDomains();
1877 
1878     if (!Domain)
1879       return false;
1880 
1881     isl::union_map Validity = D.getDependences(ValidityKinds);
1882     isl::union_map Proximity = D.getDependences(ProximityKinds);
1883 
1884     // Simplify the dependences by removing the constraints introduced by the
1885     // domains. This can speed up the scheduling time significantly, as large
1886     // constant coefficients will be removed from the dependences. The
1887     // introduction of some additional dependences reduces the possible
1888     // transformations, but in most cases, such transformation do not seem to be
1889     // interesting anyway. In some cases this option may stop the scheduler to
1890     // find any schedule.
1891     if (SimplifyDeps == "yes") {
1892       Validity = Validity.gist_domain(Domain);
1893       Validity = Validity.gist_range(Domain);
1894       Proximity = Proximity.gist_domain(Domain);
1895       Proximity = Proximity.gist_range(Domain);
1896     } else if (SimplifyDeps != "no") {
1897       errs()
1898           << "warning: Option -polly-opt-simplify-deps should either be 'yes' "
1899              "or 'no'. Falling back to default: 'yes'\n";
1900     }
1901 
1902     LLVM_DEBUG(dbgs() << "\n\nCompute schedule from: ");
1903     LLVM_DEBUG(dbgs() << "Domain := " << Domain << ";\n");
1904     LLVM_DEBUG(dbgs() << "Proximity := " << Proximity << ";\n");
1905     LLVM_DEBUG(dbgs() << "Validity := " << Validity << ";\n");
1906 
1907     unsigned IslSerializeSCCs;
1908 
1909     if (FusionStrategy == "max") {
1910       IslSerializeSCCs = 0;
1911     } else if (FusionStrategy == "min") {
1912       IslSerializeSCCs = 1;
1913     } else {
1914       errs() << "warning: Unknown fusion strategy. Falling back to maximal "
1915                 "fusion.\n";
1916       IslSerializeSCCs = 0;
1917     }
1918 
1919     int IslMaximizeBands;
1920 
1921     if (MaximizeBandDepth == "yes") {
1922       IslMaximizeBands = 1;
1923     } else if (MaximizeBandDepth == "no") {
1924       IslMaximizeBands = 0;
1925     } else {
1926       errs()
1927           << "warning: Option -polly-opt-maximize-bands should either be 'yes'"
1928              " or 'no'. Falling back to default: 'yes'\n";
1929       IslMaximizeBands = 1;
1930     }
1931 
1932     int IslOuterCoincidence;
1933 
1934     if (OuterCoincidence == "yes") {
1935       IslOuterCoincidence = 1;
1936     } else if (OuterCoincidence == "no") {
1937       IslOuterCoincidence = 0;
1938     } else {
1939       errs() << "warning: Option -polly-opt-outer-coincidence should either be "
1940                 "'yes' or 'no'. Falling back to default: 'no'\n";
1941       IslOuterCoincidence = 0;
1942     }
1943 
1944     isl_ctx *Ctx = S.getIslCtx().get();
1945 
1946     isl_options_set_schedule_outer_coincidence(Ctx, IslOuterCoincidence);
1947     isl_options_set_schedule_serialize_sccs(Ctx, IslSerializeSCCs);
1948     isl_options_set_schedule_maximize_band_depth(Ctx, IslMaximizeBands);
1949     isl_options_set_schedule_max_constant_term(Ctx, MaxConstantTerm);
1950     isl_options_set_schedule_max_coefficient(Ctx, MaxCoefficient);
1951     isl_options_set_tile_scale_tile_loops(Ctx, 0);
1952 
1953     auto OnErrorStatus = isl_options_get_on_error(Ctx);
1954     isl_options_set_on_error(Ctx, ISL_ON_ERROR_CONTINUE);
1955 
1956     auto SC = isl::schedule_constraints::on_domain(Domain);
1957     SC = SC.set_proximity(Proximity);
1958     SC = SC.set_validity(Validity);
1959     SC = SC.set_coincidence(Validity);
1960     Schedule = SC.compute_schedule();
1961     isl_options_set_on_error(Ctx, OnErrorStatus);
1962 
1963     ScopsRescheduled++;
1964     LLVM_DEBUG(printSchedule(dbgs(), Schedule, "After rescheduling"));
1965   }
1966 
1967   walkScheduleTreeForStatistics(Schedule, 1);
1968 
1969   // In cases the scheduler is not able to optimize the code, we just do not
1970   // touch the schedule.
1971   if (!Schedule)
1972     return false;
1973 
1974   // Apply post-rescheduling optimizations.
1975   const OptimizerAdditionalInfoTy OAI = {TTI, const_cast<Dependences *>(&D)};
1976   Schedule = ScheduleTreeOptimizer::optimizeSchedule(Schedule, &OAI);
1977   Schedule = hoistExtensionNodes(Schedule);
1978   LLVM_DEBUG(printSchedule(dbgs(), Schedule, "After post-optimizations"));
1979   walkScheduleTreeForStatistics(Schedule, 2);
1980 
1981   if (!ScheduleTreeOptimizer::isProfitableSchedule(S, Schedule))
1982     return false;
1983 
1984   auto ScopStats = S.getStatistics();
1985   ScopsOptimized++;
1986   NumAffineLoopsOptimized += ScopStats.NumAffineLoops;
1987   NumBoxedLoopsOptimized += ScopStats.NumBoxedLoops;
1988   LastSchedule = Schedule;
1989 
1990   S.setScheduleTree(Schedule);
1991   S.markAsOptimized();
1992 
1993   if (OptimizedScops)
1994     errs() << S;
1995 
1996   return false;
1997 }
1998 
1999 bool IslScheduleOptimizerWrapperPass::runOnScop(Scop &S) {
2000   releaseMemory();
2001 
2002   Function &F = S.getFunction();
2003   IslCtx = S.getSharedIslCtx();
2004 
2005   auto getDependences =
2006       [this](Dependences::AnalysisLevel) -> const Dependences & {
2007     return getAnalysis<DependenceInfo>().getDependences(
2008         Dependences::AL_Statement);
2009   };
2010   // auto &Deps  = getAnalysis<DependenceInfo>();
2011   TargetTransformInfo *TTI =
2012       &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2013   return runIslScheduleOptimizer(S, getDependences, TTI, LastSchedule);
2014 }
2015 
2016 static void runScheduleOptimizerPrinter(raw_ostream &OS,
2017                                         isl::schedule LastSchedule) {
2018   isl_printer *p;
2019   char *ScheduleStr;
2020 
2021   OS << "Calculated schedule:\n";
2022 
2023   if (!LastSchedule) {
2024     OS << "n/a\n";
2025     return;
2026   }
2027 
2028   p = isl_printer_to_str(LastSchedule.get_ctx().get());
2029   p = isl_printer_set_yaml_style(p, ISL_YAML_STYLE_BLOCK);
2030   p = isl_printer_print_schedule(p, LastSchedule.get());
2031   ScheduleStr = isl_printer_get_str(p);
2032   isl_printer_free(p);
2033 
2034   OS << ScheduleStr << "\n";
2035 
2036   free(ScheduleStr);
2037 }
2038 
2039 void IslScheduleOptimizerWrapperPass::printScop(raw_ostream &OS, Scop &) const {
2040   runScheduleOptimizerPrinter(OS, LastSchedule);
2041 }
2042 
2043 void IslScheduleOptimizerWrapperPass::getAnalysisUsage(
2044     AnalysisUsage &AU) const {
2045   ScopPass::getAnalysisUsage(AU);
2046   AU.addRequired<DependenceInfo>();
2047   AU.addRequired<TargetTransformInfoWrapperPass>();
2048 
2049   AU.addPreserved<DependenceInfo>();
2050 }
2051 
2052 } // namespace
2053 
2054 Pass *polly::createIslScheduleOptimizerWrapperPass() {
2055   return new IslScheduleOptimizerWrapperPass();
2056 }
2057 
2058 INITIALIZE_PASS_BEGIN(IslScheduleOptimizerWrapperPass, "polly-opt-isl",
2059                       "Polly - Optimize schedule of SCoP", false, false);
2060 INITIALIZE_PASS_DEPENDENCY(DependenceInfo);
2061 INITIALIZE_PASS_DEPENDENCY(ScopInfoRegionPass);
2062 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass);
2063 INITIALIZE_PASS_END(IslScheduleOptimizerWrapperPass, "polly-opt-isl",
2064                     "Polly - Optimize schedule of SCoP", false, false)
2065 
2066 static llvm::PreservedAnalyses
2067 runIslScheduleOptimizerUsingNPM(Scop &S, ScopAnalysisManager &SAM,
2068                                 ScopStandardAnalysisResults &SAR, SPMUpdater &U,
2069                                 raw_ostream *OS) {
2070   DependenceAnalysis::Result &Deps = SAM.getResult<DependenceAnalysis>(S, SAR);
2071   auto GetDeps = [&Deps](Dependences::AnalysisLevel) -> const Dependences & {
2072     return Deps.getDependences(Dependences::AL_Statement);
2073   };
2074   TargetTransformInfo *TTI = &SAR.TTI;
2075   isl::schedule LastSchedule;
2076   bool Modified = runIslScheduleOptimizer(S, GetDeps, TTI, LastSchedule);
2077   if (OS) {
2078     *OS << "Printing analysis 'Polly - Optimize schedule of SCoP' for region: '"
2079         << S.getName() << "' in function '" << S.getFunction().getName()
2080         << "':\n";
2081     runScheduleOptimizerPrinter(*OS, LastSchedule);
2082   }
2083 
2084   if (!Modified)
2085     return PreservedAnalyses::all();
2086 
2087   PreservedAnalyses PA;
2088   PA.preserveSet<AllAnalysesOn<Module>>();
2089   PA.preserveSet<AllAnalysesOn<Function>>();
2090   PA.preserveSet<AllAnalysesOn<Loop>>();
2091   return PA;
2092 }
2093 
2094 llvm::PreservedAnalyses
2095 IslScheduleOptimizerPass::run(Scop &S, ScopAnalysisManager &SAM,
2096                               ScopStandardAnalysisResults &SAR, SPMUpdater &U) {
2097   return runIslScheduleOptimizerUsingNPM(S, SAM, SAR, U, nullptr);
2098 }
2099 
2100 llvm::PreservedAnalyses
2101 IslScheduleOptimizerPrinterPass::run(Scop &S, ScopAnalysisManager &SAM,
2102                                      ScopStandardAnalysisResults &SAR,
2103                                      SPMUpdater &U) {
2104   return runIslScheduleOptimizerUsingNPM(S, SAM, SAR, U, &OS);
2105 }
2106